TensorFlow: Upstream latest changes to Git.
Changes: - Updates to installation instructions. - Updates to documentation. - Minor modifications and tests for word2vec. Base CL: 107284192
This commit is contained in:
parent
f41959ccb2
commit
cd9e60c1cd
61
README.md
61
README.md
@ -11,7 +11,66 @@ organization for the purposes of conducting machine learning and deep neural
|
|||||||
networks research. The system is general enough to be applicable in a wide
|
networks research. The system is general enough to be applicable in a wide
|
||||||
variety of other domains, as well.
|
variety of other domains, as well.
|
||||||
|
|
||||||
|
# Download and Setup
|
||||||
|
|
||||||
|
For detailed installation instructions, see
|
||||||
|
[here](g3doc/get_started/os_setup.md).
|
||||||
|
|
||||||
|
## Binary Installation
|
||||||
|
|
||||||
|
### Ubuntu/Linux
|
||||||
|
|
||||||
|
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ sudo apt-get install python-pip
|
||||||
|
```
|
||||||
|
|
||||||
|
Install TensorFlow:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# For CPU-only version
|
||||||
|
$ sudo pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
|
||||||
|
|
||||||
|
# For GPU-enabled version
|
||||||
|
$ sudo pip install https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
|
||||||
|
```
|
||||||
|
|
||||||
|
### Mac OS X
|
||||||
|
|
||||||
|
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
|
||||||
|
|
||||||
|
If using `easy_install`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ sudo easy_install pip
|
||||||
|
```
|
||||||
|
|
||||||
|
Install TensorFlow (only CPU binary version is currently available).
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ sudo pip install https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl
|
||||||
|
```
|
||||||
|
|
||||||
|
### Try your first TensorFlow program
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ python
|
||||||
|
|
||||||
|
>>> import tensorflow as tf
|
||||||
|
>>> hello = tf.constant('Hello, TensorFlow!')
|
||||||
|
>>> sess = tf.Session()
|
||||||
|
>>> print sess.run(hello)
|
||||||
|
Hello, TensorFlow!
|
||||||
|
>>> a = tf.constant(10)
|
||||||
|
>>> b = tf.constant(32)
|
||||||
|
>>> print sess.run(a+b)
|
||||||
|
42
|
||||||
|
>>>
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
##For more information
|
##For more information
|
||||||
|
|
||||||
* [Installation and setup instructions](/tensorflow/g3doc/get_started/os_setup.md)
|
|
||||||
* [TensorFlow website](http://tensorflow.org)
|
* [TensorFlow website](http://tensorflow.org)
|
||||||
|
@ -10,7 +10,7 @@ import "tensorflow/core/framework/types.proto";
|
|||||||
message TensorProto {
|
message TensorProto {
|
||||||
DataType dtype = 1;
|
DataType dtype = 1;
|
||||||
|
|
||||||
// Shape of the tensor. TODO(mdevin): sort out the 0-rank issues.
|
// Shape of the tensor. TODO(touts): sort out the 0-rank issues.
|
||||||
TensorShapeProto tensor_shape = 2;
|
TensorShapeProto tensor_shape = 2;
|
||||||
|
|
||||||
// Only one of the representations below is set, one of "tensor_contents" and
|
// Only one of the representations below is set, one of "tensor_contents" and
|
||||||
|
@ -46,7 +46,7 @@ struct SoftmaxEigenImpl {
|
|||||||
Eigen::IndexList<Eigen::type2index<1>, int> one_by_class;
|
Eigen::IndexList<Eigen::type2index<1>, int> one_by_class;
|
||||||
one_by_class.set(1, num_classes);
|
one_by_class.set(1, num_classes);
|
||||||
#endif
|
#endif
|
||||||
// NOTE(mdevin): If you modify this implementation please run
|
// NOTE(touts): If you modify this implementation please run
|
||||||
// the ImageNetSoftmaxFwd benchmark in core_ops_test.cc.
|
// the ImageNetSoftmaxFwd benchmark in core_ops_test.cc.
|
||||||
//
|
//
|
||||||
// softmax = exp(logits - max(logits along classes));
|
// softmax = exp(logits - max(logits along classes));
|
||||||
|
@ -35,7 +35,7 @@ struct XentEigenImpl {
|
|||||||
typename TTypes<T>::Matrix scratch,
|
typename TTypes<T>::Matrix scratch,
|
||||||
typename TTypes<T>::Vec loss,
|
typename TTypes<T>::Vec loss,
|
||||||
typename TTypes<T>::Matrix backprop) {
|
typename TTypes<T>::Matrix backprop) {
|
||||||
// NOTE(mdevin): This duplicates some of the computations in softmax_op
|
// NOTE(touts): This duplicates some of the computations in softmax_op
|
||||||
// because we need the intermediate (logits -max(logits)) values to
|
// because we need the intermediate (logits -max(logits)) values to
|
||||||
// avoid a log(exp()) in the computation of the loss.
|
// avoid a log(exp()) in the computation of the loss.
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ class ThreadSafeHistogram {
|
|||||||
|
|
||||||
void Clear();
|
void Clear();
|
||||||
|
|
||||||
// TODO(mdevin): It might be a good idea to provide a AddN(<many values>)
|
// TODO(touts): It might be a good idea to provide a AddN(<many values>)
|
||||||
// method to avoid grabbing/releasing the lock when adding many values.
|
// method to avoid grabbing/releasing the lock when adding many values.
|
||||||
void Add(double value);
|
void Add(double value);
|
||||||
|
|
||||||
|
@ -63,7 +63,7 @@ class TensorShape {
|
|||||||
|
|
||||||
/// \brief Returns the number of elements in dimension "d".
|
/// \brief Returns the number of elements in dimension "d".
|
||||||
/// REQUIRES: "0 <= d < dims()"
|
/// REQUIRES: "0 <= d < dims()"
|
||||||
// TODO(mdevin): Rename to dimension() to match Eigen::Tensor::dimension()?
|
// TODO(touts): Rename to dimension() to match Eigen::Tensor::dimension()?
|
||||||
int64 dim_size(int d) const {
|
int64 dim_size(int d) const {
|
||||||
DCHECK_GE(d, 0);
|
DCHECK_GE(d, 0);
|
||||||
DCHECK_LT(d, dims());
|
DCHECK_LT(d, dims());
|
||||||
|
@ -12,7 +12,7 @@ message Event {
|
|||||||
// Timestamp of the event.
|
// Timestamp of the event.
|
||||||
double wall_time = 1;
|
double wall_time = 1;
|
||||||
|
|
||||||
// Globale step of the event.
|
// Global step of the event.
|
||||||
int64 step = 2;
|
int64 step = 2;
|
||||||
|
|
||||||
oneof what {
|
oneof what {
|
||||||
|
@ -20,7 +20,6 @@ installed the NDK and SDK. Otherwise an error such as:
|
|||||||
"The external label '//external:android/sdk' is not bound to anything" will
|
"The external label '//external:android/sdk' is not bound to anything" will
|
||||||
be reported.
|
be reported.
|
||||||
|
|
||||||
|
|
||||||
To build the APK, run this from your workspace root:
|
To build the APK, run this from your workspace root:
|
||||||
```
|
```
|
||||||
bazel build //tensorflow/examples/android:tensorflow_demo -c opt --copt=-mfpu=neon
|
bazel build //tensorflow/examples/android:tensorflow_demo -c opt --copt=-mfpu=neon
|
||||||
@ -29,11 +28,19 @@ Note that "-c opt" is currently required; if not set, an assert (for an
|
|||||||
otherwise non-problematic issue) in Eigen will halt the application during
|
otherwise non-problematic issue) in Eigen will halt the application during
|
||||||
execution. This issue will be corrected in an upcoming release.
|
execution. This issue will be corrected in an upcoming release.
|
||||||
|
|
||||||
If adb debugging is enabled on your device, you may instead use the following
|
If adb debugging is enabled on your Android 5.0 or later device, you may then
|
||||||
command from your workspace root to automatically build and install:
|
use the following command from your workspace root to install the APK once
|
||||||
|
built:
|
||||||
|
'''
|
||||||
|
adb install -r -g bazel-bin/tensorflow/examples/android/tensorflow_demo_incremental.apk
|
||||||
|
'''
|
||||||
|
|
||||||
|
Alternatively, a streamlined means of building, installing and running in one
|
||||||
|
command is:
|
||||||
```
|
```
|
||||||
bazel mobile-install //tensorflow/examples/android:tensorflow_demo -c opt --copt=-mfpu=neon
|
bazel mobile-install //tensorflow/examples/android:tensorflow_demo -c opt --start_app --copt=-mfpu=neon
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the "--start_app" flag if you wish to automatically start the app after
|
If camera permission errors are encountered (possible on Android Marshmallow or
|
||||||
installing. Otherwise, find the application icon labeled "Tensorflow Demo".
|
above), then the adb install command above should be used instead, as it
|
||||||
|
automatically grants the required camera permissions with '-g'.
|
||||||
|
@ -9,7 +9,7 @@ Over time, we hope that the TensorFlow community will develop front ends for
|
|||||||
languages like Go, Java, Javascript, Lua R, and perhaps others. With SWIG, it's
|
languages like Go, Java, Javascript, Lua R, and perhaps others. With SWIG, it's
|
||||||
relatively easy to contribute a TensorFlow interface to your favorite language.
|
relatively easy to contribute a TensorFlow interface to your favorite language.
|
||||||
|
|
||||||
Note: Many practical aspects of ssage are covered in the Mechanics tab, and
|
Note: Many practical aspects of usage are covered in the Mechanics tab, and
|
||||||
some additional documentation not specific to any particular language API is
|
some additional documentation not specific to any particular language API is
|
||||||
available in the Resources tab.
|
available in the Resources tab.
|
||||||
|
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Tensor Transformations
|
# Tensor Transformations
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Casting](#AUTOGENERATED-casting)
|
* [Casting](#AUTOGENERATED-casting)
|
||||||
|
@ -299,7 +299,8 @@ The operation that failed, if known.
|
|||||||
or `Recv` op, there will be no corresponding
|
or `Recv` op, there will be no corresponding
|
||||||
[`Operation`](framework.md#Operation) object. In that case, this
|
[`Operation`](framework.md#Operation) object. In that case, this
|
||||||
will return `None`, and you should instead use the
|
will return `None`, and you should instead use the
|
||||||
[`node_def`](OpError.node_def) to discover information about the op.
|
[`OpError.node_def`](#OpError.node_def) to discover information about the
|
||||||
|
op.
|
||||||
|
|
||||||
##### Returns:
|
##### Returns:
|
||||||
|
|
||||||
@ -536,7 +537,7 @@ The operation was aborted, typically due to a concurrent action.
|
|||||||
|
|
||||||
For example, running a [`queue.enqueue()`](io_ops.md#QueueBase.enqueue)
|
For example, running a [`queue.enqueue()`](io_ops.md#QueueBase.enqueue)
|
||||||
operation may raise `AbortedError` if a
|
operation may raise `AbortedError` if a
|
||||||
[`queue.close()`](io_ops.md@QueueBase.close) operation previously ran.
|
[`queue.close()`](io_ops.md#QueueBase.close) operation previously ran.
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Constants, Sequences, and Random Values
|
# Constants, Sequences, and Random Values
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Constant Value Tensors](#AUTOGENERATED-constant-value-tensors)
|
* [Constant Value Tensors](#AUTOGENERATED-constant-value-tensors)
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Control Flow
|
# Control Flow
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Control Flow Operations](#AUTOGENERATED-control-flow-operations)
|
* [Control Flow Operations](#AUTOGENERATED-control-flow-operations)
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
|
|
||||||
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
||||||
|
|
||||||
Import names from the framework library.
|
Classes and functions for building TensorFlow graphs.
|
||||||
|
|
||||||
## Core graph data structures <div class="md-anchor" id="AUTOGENERATED-core-graph-data-structures">{#AUTOGENERATED-core-graph-data-structures}</div>
|
## Core graph data structures <div class="md-anchor" id="AUTOGENERATED-core-graph-data-structures">{#AUTOGENERATED-core-graph-data-structures}</div>
|
||||||
|
|
||||||
@ -126,6 +126,10 @@ with tf.Graph().as_default() as g:
|
|||||||
|
|
||||||
Returns a serialized `GraphDef` representation of this graph.
|
Returns a serialized `GraphDef` representation of this graph.
|
||||||
|
|
||||||
|
The serialized `GraphDef` can be imported into another `Graph`
|
||||||
|
(using [`import_graph_def()`](#import_graph_def)) or used with the
|
||||||
|
[C++ Session API](../cc/index.md).
|
||||||
|
|
||||||
This method is thread-safe.
|
This method is thread-safe.
|
||||||
|
|
||||||
##### Args:
|
##### Args:
|
||||||
@ -137,8 +141,7 @@ This method is thread-safe.
|
|||||||
|
|
||||||
##### Returns:
|
##### Returns:
|
||||||
|
|
||||||
A
|
A [`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
|
||||||
[`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
|
|
||||||
protocol buffer.
|
protocol buffer.
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,32 +1,36 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Images
|
# Images
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Encoding and Decoding.](#AUTOGENERATED-encoding-and-decoding.)
|
* [Encoding and Decoding](#AUTOGENERATED-encoding-and-decoding)
|
||||||
* [tf.image.decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None)](#decode_jpeg)
|
* [tf.image.decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None)](#decode_jpeg)
|
||||||
* [tf.image.encode_jpeg(image, format=None, quality=None, progressive=None, optimize_size=None, chroma_downsampling=None, density_unit=None, x_density=None, y_density=None, xmp_metadata=None, name=None)](#encode_jpeg)
|
* [tf.image.encode_jpeg(image, format=None, quality=None, progressive=None, optimize_size=None, chroma_downsampling=None, density_unit=None, x_density=None, y_density=None, xmp_metadata=None, name=None)](#encode_jpeg)
|
||||||
* [tf.image.decode_png(contents, channels=None, name=None)](#decode_png)
|
* [tf.image.decode_png(contents, channels=None, name=None)](#decode_png)
|
||||||
* [tf.image.encode_png(image, compression=None, name=None)](#encode_png)
|
* [tf.image.encode_png(image, compression=None, name=None)](#encode_png)
|
||||||
* [Resizing.](#AUTOGENERATED-resizing.)
|
* [Resizing](#AUTOGENERATED-resizing)
|
||||||
* [tf.image.resize_images(images, new_height, new_width, method=0)](#resize_images)
|
* [tf.image.resize_images(images, new_height, new_width, method=0)](#resize_images)
|
||||||
* [tf.image.resize_area(images, size, name=None)](#resize_area)
|
* [tf.image.resize_area(images, size, name=None)](#resize_area)
|
||||||
* [tf.image.resize_bicubic(images, size, name=None)](#resize_bicubic)
|
* [tf.image.resize_bicubic(images, size, name=None)](#resize_bicubic)
|
||||||
* [tf.image.resize_bilinear(images, size, name=None)](#resize_bilinear)
|
* [tf.image.resize_bilinear(images, size, name=None)](#resize_bilinear)
|
||||||
* [tf.image.resize_nearest_neighbor(images, size, name=None)](#resize_nearest_neighbor)
|
* [tf.image.resize_nearest_neighbor(images, size, name=None)](#resize_nearest_neighbor)
|
||||||
* [Cropping.](#AUTOGENERATED-cropping.)
|
* [Cropping](#AUTOGENERATED-cropping)
|
||||||
* [tf.image.resize_image_with_crop_or_pad(image, target_height, target_width)](#resize_image_with_crop_or_pad)
|
* [tf.image.resize_image_with_crop_or_pad(image, target_height, target_width)](#resize_image_with_crop_or_pad)
|
||||||
* [tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#pad_to_bounding_box)
|
* [tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#pad_to_bounding_box)
|
||||||
* [tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#crop_to_bounding_box)
|
* [tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#crop_to_bounding_box)
|
||||||
* [tf.image.random_crop(image, size, seed=None, name=None)](#random_crop)
|
* [tf.image.random_crop(image, size, seed=None, name=None)](#random_crop)
|
||||||
* [tf.image.extract_glimpse(input, size, offsets, centered=None, normalized=None, uniform_noise=None, name=None)](#extract_glimpse)
|
* [tf.image.extract_glimpse(input, size, offsets, centered=None, normalized=None, uniform_noise=None, name=None)](#extract_glimpse)
|
||||||
* [Flipping and Transposing.](#AUTOGENERATED-flipping-and-transposing.)
|
* [Flipping and Transposing](#AUTOGENERATED-flipping-and-transposing)
|
||||||
* [tf.image.flip_up_down(image)](#flip_up_down)
|
* [tf.image.flip_up_down(image)](#flip_up_down)
|
||||||
* [tf.image.random_flip_up_down(image, seed=None)](#random_flip_up_down)
|
* [tf.image.random_flip_up_down(image, seed=None)](#random_flip_up_down)
|
||||||
* [tf.image.flip_left_right(image)](#flip_left_right)
|
* [tf.image.flip_left_right(image)](#flip_left_right)
|
||||||
* [tf.image.random_flip_left_right(image, seed=None)](#random_flip_left_right)
|
* [tf.image.random_flip_left_right(image, seed=None)](#random_flip_left_right)
|
||||||
* [tf.image.transpose_image(image)](#transpose_image)
|
* [tf.image.transpose_image(image)](#transpose_image)
|
||||||
* [Image Adjustments.](#AUTOGENERATED-image-adjustments.)
|
* [Image Adjustments](#AUTOGENERATED-image-adjustments)
|
||||||
* [tf.image.adjust_brightness(image, delta, min_value=None, max_value=None)](#adjust_brightness)
|
* [tf.image.adjust_brightness(image, delta, min_value=None, max_value=None)](#adjust_brightness)
|
||||||
* [tf.image.random_brightness(image, max_delta, seed=None)](#random_brightness)
|
* [tf.image.random_brightness(image, max_delta, seed=None)](#random_brightness)
|
||||||
* [tf.image.adjust_contrast(images, contrast_factor, min_value=None, max_value=None)](#adjust_contrast)
|
* [tf.image.adjust_contrast(images, contrast_factor, min_value=None, max_value=None)](#adjust_contrast)
|
||||||
@ -36,7 +40,7 @@
|
|||||||
|
|
||||||
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
||||||
|
|
||||||
## Encoding and Decoding. <div class="md-anchor" id="AUTOGENERATED-encoding-and-decoding.">{#AUTOGENERATED-encoding-and-decoding.}</div>
|
## Encoding and Decoding <div class="md-anchor" id="AUTOGENERATED-encoding-and-decoding">{#AUTOGENERATED-encoding-and-decoding}</div>
|
||||||
|
|
||||||
TensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded
|
TensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded
|
||||||
images are represented by scalar string Tensors, decoded images by 3-D uint8
|
images are represented by scalar string Tensors, decoded images by 3-D uint8
|
||||||
@ -211,7 +215,7 @@ the smallest output, but is slower.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Resizing. <div class="md-anchor" id="AUTOGENERATED-resizing.">{#AUTOGENERATED-resizing.}</div>
|
## Resizing <div class="md-anchor" id="AUTOGENERATED-resizing">{#AUTOGENERATED-resizing}</div>
|
||||||
|
|
||||||
The resizing Ops accept input images as tensors of several types. They always
|
The resizing Ops accept input images as tensors of several types. They always
|
||||||
output resized images as float32 tensors.
|
output resized images as float32 tensors.
|
||||||
@ -376,7 +380,7 @@ Input images can be of different types but output images are always float.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Cropping. <div class="md-anchor" id="AUTOGENERATED-cropping.">{#AUTOGENERATED-cropping.}</div>
|
## Cropping <div class="md-anchor" id="AUTOGENERATED-cropping">{#AUTOGENERATED-cropping}</div>
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
@ -555,7 +559,7 @@ The argument `normalized` and `centered` controls how the windows are built:
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Flipping and Transposing. <div class="md-anchor" id="AUTOGENERATED-flipping-and-transposing.">{#AUTOGENERATED-flipping-and-transposing.}</div>
|
## Flipping and Transposing <div class="md-anchor" id="AUTOGENERATED-flipping-and-transposing">{#AUTOGENERATED-flipping-and-transposing}</div>
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
@ -687,7 +691,7 @@ See also `transpose()`.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Image Adjustments. <div class="md-anchor" id="AUTOGENERATED-image-adjustments.">{#AUTOGENERATED-image-adjustments.}</div>
|
## Image Adjustments <div class="md-anchor" id="AUTOGENERATED-image-adjustments">{#AUTOGENERATED-image-adjustments}</div>
|
||||||
|
|
||||||
TensorFlow provides functions to adjust images in various ways: brightness,
|
TensorFlow provides functions to adjust images in various ways: brightness,
|
||||||
contrast, hue, and saturation. Each adjustment can be done with predefined
|
contrast, hue, and saturation. Each adjustment can be done with predefined
|
||||||
|
@ -2,336 +2,349 @@
|
|||||||
|
|
||||||
# TensorFlow Python reference documentation
|
# TensorFlow Python reference documentation
|
||||||
|
|
||||||
* <b>[Building Graphs](framework.md)</b>: [class DType](framework.md#DType),
|
* **[Building Graphs](framework.md)**:
|
||||||
[class Dimension](framework.md#Dimension),
|
* [`add_to_collection`](framework.md#add_to_collection)
|
||||||
[class Graph](framework.md#Graph),
|
* [`as_dtype`](framework.md#as_dtype)
|
||||||
[class GraphKeys](framework.md#GraphKeys),
|
* [`control_dependencies`](framework.md#control_dependencies)
|
||||||
[NoGradient](framework.md#NoGradient),
|
* [`convert_to_tensor`](framework.md#convert_to_tensor)
|
||||||
[class Operation](framework.md#Operation),
|
* [`device`](framework.md#device)
|
||||||
[class RegisterGradient](framework.md#RegisterGradient),
|
* [`Dimension`](framework.md#Dimension)
|
||||||
[class RegisterShape](framework.md#RegisterShape),
|
* [`DType`](framework.md#DType)
|
||||||
[class Tensor](framework.md#Tensor),
|
* [`get_collection`](framework.md#get_collection)
|
||||||
[class TensorShape](framework.md#TensorShape),
|
* [`get_default_graph`](framework.md#get_default_graph)
|
||||||
[add_to_collection](framework.md#add_to_collection),
|
* [`get_seed`](framework.md#get_seed)
|
||||||
[as_dtype](framework.md#as_dtype),
|
* [`Graph`](framework.md#Graph)
|
||||||
[control_dependencies](framework.md#control_dependencies),
|
* [`GraphKeys`](framework.md#GraphKeys)
|
||||||
[convert_to_tensor](framework.md#convert_to_tensor),
|
* [`import_graph_def`](framework.md#import_graph_def)
|
||||||
[device](framework.md#device),
|
* [`name_scope`](framework.md#name_scope)
|
||||||
[get_collection](framework.md#get_collection),
|
* [`NoGradient`](framework.md#NoGradient)
|
||||||
[get_default_graph](framework.md#get_default_graph),
|
* [`op_scope`](framework.md#op_scope)
|
||||||
[get_seed](framework.md#get_seed),
|
* [`Operation`](framework.md#Operation)
|
||||||
[import_graph_def](framework.md#import_graph_def),
|
* [`RegisterGradient`](framework.md#RegisterGradient)
|
||||||
[name_scope](framework.md#name_scope),
|
* [`RegisterShape`](framework.md#RegisterShape)
|
||||||
[op_scope](framework.md#op_scope)
|
* [`Tensor`](framework.md#Tensor)
|
||||||
|
* [`TensorShape`](framework.md#TensorShape)
|
||||||
|
|
||||||
* <b>[Constants, Sequences, and Random Values](constant_op.md)</b>: [constant](constant_op.md#constant),
|
* **[Constants, Sequences, and Random Values](constant_op.md)**:
|
||||||
[fill](constant_op.md#fill),
|
* [`constant`](constant_op.md#constant)
|
||||||
[linspace](constant_op.md#linspace),
|
* [`fill`](constant_op.md#fill)
|
||||||
[ones](constant_op.md#ones),
|
* [`linspace`](constant_op.md#linspace)
|
||||||
[ones_like](constant_op.md#ones_like),
|
* [`ones`](constant_op.md#ones)
|
||||||
[random_normal](constant_op.md#random_normal),
|
* [`ones_like`](constant_op.md#ones_like)
|
||||||
[random_shuffle](constant_op.md#random_shuffle),
|
* [`random_normal`](constant_op.md#random_normal)
|
||||||
[random_uniform](constant_op.md#random_uniform),
|
* [`random_shuffle`](constant_op.md#random_shuffle)
|
||||||
[range](constant_op.md#range),
|
* [`random_uniform`](constant_op.md#random_uniform)
|
||||||
[set_random_seed](constant_op.md#set_random_seed),
|
* [`range`](constant_op.md#range)
|
||||||
[truncated_normal](constant_op.md#truncated_normal),
|
* [`set_random_seed`](constant_op.md#set_random_seed)
|
||||||
[zeros](constant_op.md#zeros),
|
* [`truncated_normal`](constant_op.md#truncated_normal)
|
||||||
[zeros_like](constant_op.md#zeros_like)
|
* [`zeros`](constant_op.md#zeros)
|
||||||
|
* [`zeros_like`](constant_op.md#zeros_like)
|
||||||
|
|
||||||
* <b>[Variables](state_ops.md)</b>: [class IndexedSlices](state_ops.md#IndexedSlices),
|
* **[Variables](state_ops.md)**:
|
||||||
[class Saver](state_ops.md#Saver),
|
* [`all_variables`](state_ops.md#all_variables)
|
||||||
[class Variable](state_ops.md#Variable),
|
* [`assert_variables_initialized`](state_ops.md#assert_variables_initialized)
|
||||||
[all_variables](state_ops.md#all_variables),
|
* [`assign`](state_ops.md#assign)
|
||||||
[assert_variables_initialized](state_ops.md#assert_variables_initialized),
|
* [`assign_add`](state_ops.md#assign_add)
|
||||||
[assign](state_ops.md#assign),
|
* [`assign_sub`](state_ops.md#assign_sub)
|
||||||
[assign_add](state_ops.md#assign_add),
|
* [`constant_initializer`](state_ops.md#constant_initializer)
|
||||||
[assign_sub](state_ops.md#assign_sub),
|
* [`count_up_to`](state_ops.md#count_up_to)
|
||||||
[constant_initializer](state_ops.md#constant_initializer),
|
* [`device`](state_ops.md#device)
|
||||||
[count_up_to](state_ops.md#count_up_to),
|
* [`get_checkpoint_state`](state_ops.md#get_checkpoint_state)
|
||||||
[device](state_ops.md#device),
|
* [`get_variable`](state_ops.md#get_variable)
|
||||||
[get_checkpoint_state](state_ops.md#get_checkpoint_state),
|
* [`get_variable_scope`](state_ops.md#get_variable_scope)
|
||||||
[get_variable](state_ops.md#get_variable),
|
* [`IndexedSlices`](state_ops.md#IndexedSlices)
|
||||||
[get_variable_scope](state_ops.md#get_variable_scope),
|
* [`initialize_all_variables`](state_ops.md#initialize_all_variables)
|
||||||
[initialize_all_variables](state_ops.md#initialize_all_variables),
|
* [`initialize_variables`](state_ops.md#initialize_variables)
|
||||||
[initialize_variables](state_ops.md#initialize_variables),
|
* [`latest_checkpoint`](state_ops.md#latest_checkpoint)
|
||||||
[latest_checkpoint](state_ops.md#latest_checkpoint),
|
* [`random_normal_initializer`](state_ops.md#random_normal_initializer)
|
||||||
[random_normal_initializer](state_ops.md#random_normal_initializer),
|
* [`random_uniform_initializer`](state_ops.md#random_uniform_initializer)
|
||||||
[random_uniform_initializer](state_ops.md#random_uniform_initializer),
|
* [`Saver`](state_ops.md#Saver)
|
||||||
[scatter_add](state_ops.md#scatter_add),
|
* [`scatter_add`](state_ops.md#scatter_add)
|
||||||
[scatter_sub](state_ops.md#scatter_sub),
|
* [`scatter_sub`](state_ops.md#scatter_sub)
|
||||||
[scatter_update](state_ops.md#scatter_update),
|
* [`scatter_update`](state_ops.md#scatter_update)
|
||||||
[sparse_mask](state_ops.md#sparse_mask),
|
* [`sparse_mask`](state_ops.md#sparse_mask)
|
||||||
[trainable_variables](state_ops.md#trainable_variables),
|
* [`trainable_variables`](state_ops.md#trainable_variables)
|
||||||
[truncated_normal_initializer](state_ops.md#truncated_normal_initializer),
|
* [`truncated_normal_initializer`](state_ops.md#truncated_normal_initializer)
|
||||||
[uniform_unit_scaling_initializer](state_ops.md#uniform_unit_scaling_initializer),
|
* [`uniform_unit_scaling_initializer`](state_ops.md#uniform_unit_scaling_initializer)
|
||||||
[update_checkpoint_state](state_ops.md#update_checkpoint_state),
|
* [`update_checkpoint_state`](state_ops.md#update_checkpoint_state)
|
||||||
[variable_scope](state_ops.md#variable_scope),
|
* [`Variable`](state_ops.md#Variable)
|
||||||
[zeros_initializer](state_ops.md#zeros_initializer)
|
* [`variable_scope`](state_ops.md#variable_scope)
|
||||||
|
* [`zeros_initializer`](state_ops.md#zeros_initializer)
|
||||||
|
|
||||||
* <b>[Tensor Transformations](array_ops.md)</b>: [cast](array_ops.md#cast),
|
* **[Tensor Transformations](array_ops.md)**:
|
||||||
[concat](array_ops.md#concat),
|
* [`cast`](array_ops.md#cast)
|
||||||
[dynamic_partition](array_ops.md#dynamic_partition),
|
* [`concat`](array_ops.md#concat)
|
||||||
[dynamic_stitch](array_ops.md#dynamic_stitch),
|
* [`dynamic_partition`](array_ops.md#dynamic_partition)
|
||||||
[expand_dims](array_ops.md#expand_dims),
|
* [`dynamic_stitch`](array_ops.md#dynamic_stitch)
|
||||||
[gather](array_ops.md#gather),
|
* [`expand_dims`](array_ops.md#expand_dims)
|
||||||
[pack](array_ops.md#pack),
|
* [`gather`](array_ops.md#gather)
|
||||||
[pad](array_ops.md#pad),
|
* [`pack`](array_ops.md#pack)
|
||||||
[rank](array_ops.md#rank),
|
* [`pad`](array_ops.md#pad)
|
||||||
[reshape](array_ops.md#reshape),
|
* [`rank`](array_ops.md#rank)
|
||||||
[reverse](array_ops.md#reverse),
|
* [`reshape`](array_ops.md#reshape)
|
||||||
[reverse_sequence](array_ops.md#reverse_sequence),
|
* [`reverse`](array_ops.md#reverse)
|
||||||
[shape](array_ops.md#shape),
|
* [`reverse_sequence`](array_ops.md#reverse_sequence)
|
||||||
[size](array_ops.md#size),
|
* [`shape`](array_ops.md#shape)
|
||||||
[slice](array_ops.md#slice),
|
* [`size`](array_ops.md#size)
|
||||||
[split](array_ops.md#split),
|
* [`slice`](array_ops.md#slice)
|
||||||
[squeeze](array_ops.md#squeeze),
|
* [`split`](array_ops.md#split)
|
||||||
[string_to_number](array_ops.md#string_to_number),
|
* [`squeeze`](array_ops.md#squeeze)
|
||||||
[tile](array_ops.md#tile),
|
* [`string_to_number`](array_ops.md#string_to_number)
|
||||||
[to_bfloat16](array_ops.md#to_bfloat16),
|
* [`tile`](array_ops.md#tile)
|
||||||
[to_double](array_ops.md#to_double),
|
* [`to_bfloat16`](array_ops.md#to_bfloat16)
|
||||||
[to_float](array_ops.md#to_float),
|
* [`to_double`](array_ops.md#to_double)
|
||||||
[to_int32](array_ops.md#to_int32),
|
* [`to_float`](array_ops.md#to_float)
|
||||||
[to_int64](array_ops.md#to_int64),
|
* [`to_int32`](array_ops.md#to_int32)
|
||||||
[transpose](array_ops.md#transpose),
|
* [`to_int64`](array_ops.md#to_int64)
|
||||||
[unpack](array_ops.md#unpack)
|
* [`transpose`](array_ops.md#transpose)
|
||||||
|
* [`unpack`](array_ops.md#unpack)
|
||||||
|
|
||||||
* <b>[Math](math_ops.md)</b>: [abs](math_ops.md#abs),
|
* **[Math](math_ops.md)**:
|
||||||
[accumulate_n](math_ops.md#accumulate_n),
|
* [`abs`](math_ops.md#abs)
|
||||||
[add](math_ops.md#add),
|
* [`accumulate_n`](math_ops.md#accumulate_n)
|
||||||
[add_n](math_ops.md#add_n),
|
* [`add`](math_ops.md#add)
|
||||||
[argmax](math_ops.md#argmax),
|
* [`add_n`](math_ops.md#add_n)
|
||||||
[argmin](math_ops.md#argmin),
|
* [`argmax`](math_ops.md#argmax)
|
||||||
[batch_cholesky](math_ops.md#batch_cholesky),
|
* [`argmin`](math_ops.md#argmin)
|
||||||
[batch_matmul](math_ops.md#batch_matmul),
|
* [`batch_cholesky`](math_ops.md#batch_cholesky)
|
||||||
[batch_matrix_determinant](math_ops.md#batch_matrix_determinant),
|
* [`batch_matmul`](math_ops.md#batch_matmul)
|
||||||
[batch_matrix_inverse](math_ops.md#batch_matrix_inverse),
|
* [`batch_matrix_determinant`](math_ops.md#batch_matrix_determinant)
|
||||||
[ceil](math_ops.md#ceil),
|
* [`batch_matrix_inverse`](math_ops.md#batch_matrix_inverse)
|
||||||
[cholesky](math_ops.md#cholesky),
|
* [`ceil`](math_ops.md#ceil)
|
||||||
[complex](math_ops.md#complex),
|
* [`cholesky`](math_ops.md#cholesky)
|
||||||
[complex_abs](math_ops.md#complex_abs),
|
* [`complex`](math_ops.md#complex)
|
||||||
[conj](math_ops.md#conj),
|
* [`complex_abs`](math_ops.md#complex_abs)
|
||||||
[cos](math_ops.md#cos),
|
* [`conj`](math_ops.md#conj)
|
||||||
[diag](math_ops.md#diag),
|
* [`cos`](math_ops.md#cos)
|
||||||
[div](math_ops.md#div),
|
* [`diag`](math_ops.md#diag)
|
||||||
[edit_distance](math_ops.md#edit_distance),
|
* [`div`](math_ops.md#div)
|
||||||
[exp](math_ops.md#exp),
|
* [`edit_distance`](math_ops.md#edit_distance)
|
||||||
[floor](math_ops.md#floor),
|
* [`exp`](math_ops.md#exp)
|
||||||
[imag](math_ops.md#imag),
|
* [`floor`](math_ops.md#floor)
|
||||||
[inv](math_ops.md#inv),
|
* [`imag`](math_ops.md#imag)
|
||||||
[invert_permutation](math_ops.md#invert_permutation),
|
* [`inv`](math_ops.md#inv)
|
||||||
[listdiff](math_ops.md#listdiff),
|
* [`invert_permutation`](math_ops.md#invert_permutation)
|
||||||
[log](math_ops.md#log),
|
* [`listdiff`](math_ops.md#listdiff)
|
||||||
[matmul](math_ops.md#matmul),
|
* [`log`](math_ops.md#log)
|
||||||
[matrix_determinant](math_ops.md#matrix_determinant),
|
* [`matmul`](math_ops.md#matmul)
|
||||||
[matrix_inverse](math_ops.md#matrix_inverse),
|
* [`matrix_determinant`](math_ops.md#matrix_determinant)
|
||||||
[maximum](math_ops.md#maximum),
|
* [`matrix_inverse`](math_ops.md#matrix_inverse)
|
||||||
[minimum](math_ops.md#minimum),
|
* [`maximum`](math_ops.md#maximum)
|
||||||
[mod](math_ops.md#mod),
|
* [`minimum`](math_ops.md#minimum)
|
||||||
[mul](math_ops.md#mul),
|
* [`mod`](math_ops.md#mod)
|
||||||
[neg](math_ops.md#neg),
|
* [`mul`](math_ops.md#mul)
|
||||||
[pow](math_ops.md#pow),
|
* [`neg`](math_ops.md#neg)
|
||||||
[real](math_ops.md#real),
|
* [`pow`](math_ops.md#pow)
|
||||||
[reduce_all](math_ops.md#reduce_all),
|
* [`real`](math_ops.md#real)
|
||||||
[reduce_any](math_ops.md#reduce_any),
|
* [`reduce_all`](math_ops.md#reduce_all)
|
||||||
[reduce_max](math_ops.md#reduce_max),
|
* [`reduce_any`](math_ops.md#reduce_any)
|
||||||
[reduce_mean](math_ops.md#reduce_mean),
|
* [`reduce_max`](math_ops.md#reduce_max)
|
||||||
[reduce_min](math_ops.md#reduce_min),
|
* [`reduce_mean`](math_ops.md#reduce_mean)
|
||||||
[reduce_prod](math_ops.md#reduce_prod),
|
* [`reduce_min`](math_ops.md#reduce_min)
|
||||||
[reduce_sum](math_ops.md#reduce_sum),
|
* [`reduce_prod`](math_ops.md#reduce_prod)
|
||||||
[round](math_ops.md#round),
|
* [`reduce_sum`](math_ops.md#reduce_sum)
|
||||||
[rsqrt](math_ops.md#rsqrt),
|
* [`round`](math_ops.md#round)
|
||||||
[segment_max](math_ops.md#segment_max),
|
* [`rsqrt`](math_ops.md#rsqrt)
|
||||||
[segment_mean](math_ops.md#segment_mean),
|
* [`segment_max`](math_ops.md#segment_max)
|
||||||
[segment_min](math_ops.md#segment_min),
|
* [`segment_mean`](math_ops.md#segment_mean)
|
||||||
[segment_prod](math_ops.md#segment_prod),
|
* [`segment_min`](math_ops.md#segment_min)
|
||||||
[segment_sum](math_ops.md#segment_sum),
|
* [`segment_prod`](math_ops.md#segment_prod)
|
||||||
[sign](math_ops.md#sign),
|
* [`segment_sum`](math_ops.md#segment_sum)
|
||||||
[sin](math_ops.md#sin),
|
* [`sign`](math_ops.md#sign)
|
||||||
[sparse_segment_mean](math_ops.md#sparse_segment_mean),
|
* [`sin`](math_ops.md#sin)
|
||||||
[sparse_segment_sum](math_ops.md#sparse_segment_sum),
|
* [`sparse_segment_mean`](math_ops.md#sparse_segment_mean)
|
||||||
[sqrt](math_ops.md#sqrt),
|
* [`sparse_segment_sum`](math_ops.md#sparse_segment_sum)
|
||||||
[square](math_ops.md#square),
|
* [`sqrt`](math_ops.md#sqrt)
|
||||||
[sub](math_ops.md#sub),
|
* [`square`](math_ops.md#square)
|
||||||
[transpose](math_ops.md#transpose),
|
* [`sub`](math_ops.md#sub)
|
||||||
[unique](math_ops.md#unique),
|
* [`transpose`](math_ops.md#transpose)
|
||||||
[unsorted_segment_sum](math_ops.md#unsorted_segment_sum),
|
* [`unique`](math_ops.md#unique)
|
||||||
[where](math_ops.md#where)
|
* [`unsorted_segment_sum`](math_ops.md#unsorted_segment_sum)
|
||||||
|
* [`where`](math_ops.md#where)
|
||||||
|
|
||||||
* <b>[Control Flow](control_flow_ops.md)</b>: [Assert](control_flow_ops.md#Assert),
|
* **[Control Flow](control_flow_ops.md)**:
|
||||||
[Print](control_flow_ops.md#Print),
|
* [`add_check_numerics_ops`](control_flow_ops.md#add_check_numerics_ops)
|
||||||
[add_check_numerics_ops](control_flow_ops.md#add_check_numerics_ops),
|
* [`Assert`](control_flow_ops.md#Assert)
|
||||||
[check_numerics](control_flow_ops.md#check_numerics),
|
* [`check_numerics`](control_flow_ops.md#check_numerics)
|
||||||
[count_up_to](control_flow_ops.md#count_up_to),
|
* [`count_up_to`](control_flow_ops.md#count_up_to)
|
||||||
[equal](control_flow_ops.md#equal),
|
* [`equal`](control_flow_ops.md#equal)
|
||||||
[greater](control_flow_ops.md#greater),
|
* [`greater`](control_flow_ops.md#greater)
|
||||||
[greater_equal](control_flow_ops.md#greater_equal),
|
* [`greater_equal`](control_flow_ops.md#greater_equal)
|
||||||
[group](control_flow_ops.md#group),
|
* [`group`](control_flow_ops.md#group)
|
||||||
[identity](control_flow_ops.md#identity),
|
* [`identity`](control_flow_ops.md#identity)
|
||||||
[is_finite](control_flow_ops.md#is_finite),
|
* [`is_finite`](control_flow_ops.md#is_finite)
|
||||||
[is_inf](control_flow_ops.md#is_inf),
|
* [`is_inf`](control_flow_ops.md#is_inf)
|
||||||
[is_nan](control_flow_ops.md#is_nan),
|
* [`is_nan`](control_flow_ops.md#is_nan)
|
||||||
[less](control_flow_ops.md#less),
|
* [`less`](control_flow_ops.md#less)
|
||||||
[less_equal](control_flow_ops.md#less_equal),
|
* [`less_equal`](control_flow_ops.md#less_equal)
|
||||||
[logical_and](control_flow_ops.md#logical_and),
|
* [`logical_and`](control_flow_ops.md#logical_and)
|
||||||
[logical_not](control_flow_ops.md#logical_not),
|
* [`logical_not`](control_flow_ops.md#logical_not)
|
||||||
[logical_or](control_flow_ops.md#logical_or),
|
* [`logical_or`](control_flow_ops.md#logical_or)
|
||||||
[logical_xor](control_flow_ops.md#logical_xor),
|
* [`logical_xor`](control_flow_ops.md#logical_xor)
|
||||||
[no_op](control_flow_ops.md#no_op),
|
* [`no_op`](control_flow_ops.md#no_op)
|
||||||
[not_equal](control_flow_ops.md#not_equal),
|
* [`not_equal`](control_flow_ops.md#not_equal)
|
||||||
[select](control_flow_ops.md#select),
|
* [`Print`](control_flow_ops.md#Print)
|
||||||
[tuple](control_flow_ops.md#tuple),
|
* [`select`](control_flow_ops.md#select)
|
||||||
[verify_tensor_all_finite](control_flow_ops.md#verify_tensor_all_finite),
|
* [`tuple`](control_flow_ops.md#tuple)
|
||||||
[where](control_flow_ops.md#where)
|
* [`verify_tensor_all_finite`](control_flow_ops.md#verify_tensor_all_finite)
|
||||||
|
* [`where`](control_flow_ops.md#where)
|
||||||
|
|
||||||
* <b>[Images](image.md)</b>: [adjust_brightness](image.md#adjust_brightness),
|
* **[Images](image.md)**:
|
||||||
[adjust_contrast](image.md#adjust_contrast),
|
* [`adjust_brightness`](image.md#adjust_brightness)
|
||||||
[crop_to_bounding_box](image.md#crop_to_bounding_box),
|
* [`adjust_contrast`](image.md#adjust_contrast)
|
||||||
[decode_jpeg](image.md#decode_jpeg),
|
* [`crop_to_bounding_box`](image.md#crop_to_bounding_box)
|
||||||
[decode_png](image.md#decode_png),
|
* [`decode_jpeg`](image.md#decode_jpeg)
|
||||||
[encode_jpeg](image.md#encode_jpeg),
|
* [`decode_png`](image.md#decode_png)
|
||||||
[encode_png](image.md#encode_png),
|
* [`encode_jpeg`](image.md#encode_jpeg)
|
||||||
[extract_glimpse](image.md#extract_glimpse),
|
* [`encode_png`](image.md#encode_png)
|
||||||
[flip_left_right](image.md#flip_left_right),
|
* [`extract_glimpse`](image.md#extract_glimpse)
|
||||||
[flip_up_down](image.md#flip_up_down),
|
* [`flip_left_right`](image.md#flip_left_right)
|
||||||
[pad_to_bounding_box](image.md#pad_to_bounding_box),
|
* [`flip_up_down`](image.md#flip_up_down)
|
||||||
[per_image_whitening](image.md#per_image_whitening),
|
* [`pad_to_bounding_box`](image.md#pad_to_bounding_box)
|
||||||
[random_brightness](image.md#random_brightness),
|
* [`per_image_whitening`](image.md#per_image_whitening)
|
||||||
[random_contrast](image.md#random_contrast),
|
* [`random_brightness`](image.md#random_brightness)
|
||||||
[random_crop](image.md#random_crop),
|
* [`random_contrast`](image.md#random_contrast)
|
||||||
[random_flip_left_right](image.md#random_flip_left_right),
|
* [`random_crop`](image.md#random_crop)
|
||||||
[random_flip_up_down](image.md#random_flip_up_down),
|
* [`random_flip_left_right`](image.md#random_flip_left_right)
|
||||||
[resize_area](image.md#resize_area),
|
* [`random_flip_up_down`](image.md#random_flip_up_down)
|
||||||
[resize_bicubic](image.md#resize_bicubic),
|
* [`resize_area`](image.md#resize_area)
|
||||||
[resize_bilinear](image.md#resize_bilinear),
|
* [`resize_bicubic`](image.md#resize_bicubic)
|
||||||
[resize_image_with_crop_or_pad](image.md#resize_image_with_crop_or_pad),
|
* [`resize_bilinear`](image.md#resize_bilinear)
|
||||||
[resize_images](image.md#resize_images),
|
* [`resize_image_with_crop_or_pad`](image.md#resize_image_with_crop_or_pad)
|
||||||
[resize_nearest_neighbor](image.md#resize_nearest_neighbor),
|
* [`resize_images`](image.md#resize_images)
|
||||||
[transpose_image](image.md#transpose_image)
|
* [`resize_nearest_neighbor`](image.md#resize_nearest_neighbor)
|
||||||
|
* [`transpose_image`](image.md#transpose_image)
|
||||||
|
|
||||||
* <b>[Sparse Tensors](sparse_ops.md)</b>: [class SparseTensor](sparse_ops.md#SparseTensor),
|
* **[Sparse Tensors](sparse_ops.md)**:
|
||||||
[class SparseTensorValue](sparse_ops.md#SparseTensorValue),
|
* [`shape`](sparse_ops.md#shape)
|
||||||
[shape](sparse_ops.md#shape),
|
* [`sparse_concat`](sparse_ops.md#sparse_concat)
|
||||||
[sparse_concat](sparse_ops.md#sparse_concat),
|
* [`sparse_fill_empty_rows`](sparse_ops.md#sparse_fill_empty_rows)
|
||||||
[sparse_fill_empty_rows](sparse_ops.md#sparse_fill_empty_rows),
|
* [`sparse_reorder`](sparse_ops.md#sparse_reorder)
|
||||||
[sparse_reorder](sparse_ops.md#sparse_reorder),
|
* [`sparse_retain`](sparse_ops.md#sparse_retain)
|
||||||
[sparse_retain](sparse_ops.md#sparse_retain),
|
* [`sparse_tensor_to_dense`](sparse_ops.md#sparse_tensor_to_dense)
|
||||||
[sparse_tensor_to_dense](sparse_ops.md#sparse_tensor_to_dense),
|
* [`sparse_to_dense`](sparse_ops.md#sparse_to_dense)
|
||||||
[sparse_to_dense](sparse_ops.md#sparse_to_dense),
|
* [`sparse_to_indicator`](sparse_ops.md#sparse_to_indicator)
|
||||||
[sparse_to_indicator](sparse_ops.md#sparse_to_indicator)
|
* [`SparseTensor`](sparse_ops.md#SparseTensor)
|
||||||
|
* [`SparseTensorValue`](sparse_ops.md#SparseTensorValue)
|
||||||
|
|
||||||
* <b>[Inputs and Readers](io_ops.md)</b>: [class FIFOQueue](io_ops.md#FIFOQueue),
|
* **[Inputs and Readers](io_ops.md)**:
|
||||||
[class FixedLengthRecordReader](io_ops.md#FixedLengthRecordReader),
|
* [`batch`](io_ops.md#batch)
|
||||||
[class IdentityReader](io_ops.md#IdentityReader),
|
* [`batch_join`](io_ops.md#batch_join)
|
||||||
[class QueueBase](io_ops.md#QueueBase),
|
* [`decode_csv`](io_ops.md#decode_csv)
|
||||||
[class RandomShuffleQueue](io_ops.md#RandomShuffleQueue),
|
* [`decode_raw`](io_ops.md#decode_raw)
|
||||||
[class ReaderBase](io_ops.md#ReaderBase),
|
* [`FIFOQueue`](io_ops.md#FIFOQueue)
|
||||||
[class TFRecordReader](io_ops.md#TFRecordReader),
|
* [`FixedLengthRecordReader`](io_ops.md#FixedLengthRecordReader)
|
||||||
[class TextLineReader](io_ops.md#TextLineReader),
|
* [`IdentityReader`](io_ops.md#IdentityReader)
|
||||||
[class WholeFileReader](io_ops.md#WholeFileReader),
|
* [`limit_epochs`](io_ops.md#limit_epochs)
|
||||||
[batch](io_ops.md#batch),
|
* [`match_filenames_once`](io_ops.md#match_filenames_once)
|
||||||
[batch_join](io_ops.md#batch_join),
|
* [`matching_files`](io_ops.md#matching_files)
|
||||||
[decode_csv](io_ops.md#decode_csv),
|
* [`parse_example`](io_ops.md#parse_example)
|
||||||
[decode_raw](io_ops.md#decode_raw),
|
* [`parse_single_example`](io_ops.md#parse_single_example)
|
||||||
[limit_epochs](io_ops.md#limit_epochs),
|
* [`placeholder`](io_ops.md#placeholder)
|
||||||
[match_filenames_once](io_ops.md#match_filenames_once),
|
* [`QueueBase`](io_ops.md#QueueBase)
|
||||||
[matching_files](io_ops.md#matching_files),
|
* [`RandomShuffleQueue`](io_ops.md#RandomShuffleQueue)
|
||||||
[parse_example](io_ops.md#parse_example),
|
* [`range_input_producer`](io_ops.md#range_input_producer)
|
||||||
[parse_single_example](io_ops.md#parse_single_example),
|
* [`read_file`](io_ops.md#read_file)
|
||||||
[placeholder](io_ops.md#placeholder),
|
* [`ReaderBase`](io_ops.md#ReaderBase)
|
||||||
[range_input_producer](io_ops.md#range_input_producer),
|
* [`shuffle_batch`](io_ops.md#shuffle_batch)
|
||||||
[read_file](io_ops.md#read_file),
|
* [`shuffle_batch_join`](io_ops.md#shuffle_batch_join)
|
||||||
[shuffle_batch](io_ops.md#shuffle_batch),
|
* [`size`](io_ops.md#size)
|
||||||
[shuffle_batch_join](io_ops.md#shuffle_batch_join),
|
* [`slice_input_producer`](io_ops.md#slice_input_producer)
|
||||||
[size](io_ops.md#size),
|
* [`string_input_producer`](io_ops.md#string_input_producer)
|
||||||
[slice_input_producer](io_ops.md#slice_input_producer),
|
* [`TextLineReader`](io_ops.md#TextLineReader)
|
||||||
[string_input_producer](io_ops.md#string_input_producer)
|
* [`TFRecordReader`](io_ops.md#TFRecordReader)
|
||||||
|
* [`WholeFileReader`](io_ops.md#WholeFileReader)
|
||||||
|
|
||||||
* <b>[Data IO (Python functions)](python_io.md)</b>: [class TFRecordWriter](python_io.md#TFRecordWriter),
|
* **[Data IO (Python functions)](python_io.md)**:
|
||||||
[tf_record_iterator](python_io.md#tf_record_iterator)
|
* [`tf_record_iterator`](python_io.md#tf_record_iterator)
|
||||||
|
* [`TFRecordWriter`](python_io.md#TFRecordWriter)
|
||||||
|
|
||||||
* <b>[Neural Network](nn.md)</b>: [avg_pool](nn.md#avg_pool),
|
* **[Neural Network](nn.md)**:
|
||||||
[bias_add](nn.md#bias_add),
|
* [`avg_pool`](nn.md#avg_pool)
|
||||||
[compute_accidental_hits](nn.md#compute_accidental_hits),
|
* [`bias_add`](nn.md#bias_add)
|
||||||
[conv2d](nn.md#conv2d),
|
* [`compute_accidental_hits`](nn.md#compute_accidental_hits)
|
||||||
[depthwise_conv2d](nn.md#depthwise_conv2d),
|
* [`conv2d`](nn.md#conv2d)
|
||||||
[dropout](nn.md#dropout),
|
* [`depthwise_conv2d`](nn.md#depthwise_conv2d)
|
||||||
[embedding_lookup](nn.md#embedding_lookup),
|
* [`dropout`](nn.md#dropout)
|
||||||
[embedding_lookup_sparse](nn.md#embedding_lookup_sparse),
|
* [`embedding_lookup`](nn.md#embedding_lookup)
|
||||||
[fixed_unigram_candidate_sampler](nn.md#fixed_unigram_candidate_sampler),
|
* [`embedding_lookup_sparse`](nn.md#embedding_lookup_sparse)
|
||||||
[in_top_k](nn.md#in_top_k),
|
* [`fixed_unigram_candidate_sampler`](nn.md#fixed_unigram_candidate_sampler)
|
||||||
[l2_loss](nn.md#l2_loss),
|
* [`in_top_k`](nn.md#in_top_k)
|
||||||
[l2_normalize](nn.md#l2_normalize),
|
* [`l2_loss`](nn.md#l2_loss)
|
||||||
[learned_unigram_candidate_sampler](nn.md#learned_unigram_candidate_sampler),
|
* [`l2_normalize`](nn.md#l2_normalize)
|
||||||
[local_response_normalization](nn.md#local_response_normalization),
|
* [`learned_unigram_candidate_sampler`](nn.md#learned_unigram_candidate_sampler)
|
||||||
[log_uniform_candidate_sampler](nn.md#log_uniform_candidate_sampler),
|
* [`local_response_normalization`](nn.md#local_response_normalization)
|
||||||
[max_pool](nn.md#max_pool),
|
* [`log_uniform_candidate_sampler`](nn.md#log_uniform_candidate_sampler)
|
||||||
[max_pool_with_argmax](nn.md#max_pool_with_argmax),
|
* [`max_pool`](nn.md#max_pool)
|
||||||
[moments](nn.md#moments),
|
* [`max_pool_with_argmax`](nn.md#max_pool_with_argmax)
|
||||||
[nce_loss](nn.md#nce_loss),
|
* [`moments`](nn.md#moments)
|
||||||
[relu](nn.md#relu),
|
* [`nce_loss`](nn.md#nce_loss)
|
||||||
[relu6](nn.md#relu6),
|
* [`relu`](nn.md#relu)
|
||||||
[sampled_softmax_loss](nn.md#sampled_softmax_loss),
|
* [`relu6`](nn.md#relu6)
|
||||||
[separable_conv2d](nn.md#separable_conv2d),
|
* [`sampled_softmax_loss`](nn.md#sampled_softmax_loss)
|
||||||
[sigmoid](nn.md#sigmoid),
|
* [`separable_conv2d`](nn.md#separable_conv2d)
|
||||||
[sigmoid_cross_entropy_with_logits](nn.md#sigmoid_cross_entropy_with_logits),
|
* [`sigmoid`](nn.md#sigmoid)
|
||||||
[softmax](nn.md#softmax),
|
* [`sigmoid_cross_entropy_with_logits`](nn.md#sigmoid_cross_entropy_with_logits)
|
||||||
[softmax_cross_entropy_with_logits](nn.md#softmax_cross_entropy_with_logits),
|
* [`softmax`](nn.md#softmax)
|
||||||
[softplus](nn.md#softplus),
|
* [`softmax_cross_entropy_with_logits`](nn.md#softmax_cross_entropy_with_logits)
|
||||||
[tanh](nn.md#tanh),
|
* [`softplus`](nn.md#softplus)
|
||||||
[top_k](nn.md#top_k),
|
* [`tanh`](nn.md#tanh)
|
||||||
[uniform_candidate_sampler](nn.md#uniform_candidate_sampler)
|
* [`top_k`](nn.md#top_k)
|
||||||
|
* [`uniform_candidate_sampler`](nn.md#uniform_candidate_sampler)
|
||||||
|
|
||||||
* <b>[Running Graphs](client.md)</b>: [class AbortedError](client.md#AbortedError),
|
* **[Running Graphs](client.md)**:
|
||||||
[class AlreadyExistsError](client.md#AlreadyExistsError),
|
* [`AbortedError`](client.md#AbortedError)
|
||||||
[class CancelledError](client.md#CancelledError),
|
* [`AlreadyExistsError`](client.md#AlreadyExistsError)
|
||||||
[class DataLossError](client.md#DataLossError),
|
* [`CancelledError`](client.md#CancelledError)
|
||||||
[class DeadlineExceededError](client.md#DeadlineExceededError),
|
* [`DataLossError`](client.md#DataLossError)
|
||||||
[class FailedPreconditionError](client.md#FailedPreconditionError),
|
* [`DeadlineExceededError`](client.md#DeadlineExceededError)
|
||||||
[class InternalError](client.md#InternalError),
|
* [`FailedPreconditionError`](client.md#FailedPreconditionError)
|
||||||
[class InvalidArgumentError](client.md#InvalidArgumentError),
|
* [`get_default_session`](client.md#get_default_session)
|
||||||
[class NotFoundError](client.md#NotFoundError),
|
* [`InternalError`](client.md#InternalError)
|
||||||
[class OpError](client.md#OpError),
|
* [`InvalidArgumentError`](client.md#InvalidArgumentError)
|
||||||
[class OutOfRangeError](client.md#OutOfRangeError),
|
* [`NotFoundError`](client.md#NotFoundError)
|
||||||
[class PermissionDeniedError](client.md#PermissionDeniedError),
|
* [`OpError`](client.md#OpError)
|
||||||
[class ResourceExhaustedError](client.md#ResourceExhaustedError),
|
* [`OutOfRangeError`](client.md#OutOfRangeError)
|
||||||
[class Session](client.md#Session),
|
* [`PermissionDeniedError`](client.md#PermissionDeniedError)
|
||||||
[class UnauthenticatedError](client.md#UnauthenticatedError),
|
* [`ResourceExhaustedError`](client.md#ResourceExhaustedError)
|
||||||
[class UnavailableError](client.md#UnavailableError),
|
* [`Session`](client.md#Session)
|
||||||
[class UnimplementedError](client.md#UnimplementedError),
|
* [`UnauthenticatedError`](client.md#UnauthenticatedError)
|
||||||
[class UnknownError](client.md#UnknownError),
|
* [`UnavailableError`](client.md#UnavailableError)
|
||||||
[get_default_session](client.md#get_default_session)
|
* [`UnimplementedError`](client.md#UnimplementedError)
|
||||||
|
* [`UnknownError`](client.md#UnknownError)
|
||||||
|
|
||||||
* <b>[Training](train.md)</b>: [class AdagradOptimizer](train.md#AdagradOptimizer),
|
* **[Training](train.md)**:
|
||||||
[class AdamOptimizer](train.md#AdamOptimizer),
|
* [`AdagradOptimizer`](train.md#AdagradOptimizer)
|
||||||
[class AggregationMethod](train.md#AggregationMethod),
|
* [`AdamOptimizer`](train.md#AdamOptimizer)
|
||||||
[class Coordinator](train.md#Coordinator),
|
* [`add_queue_runner`](train.md#add_queue_runner)
|
||||||
[class ExponentialMovingAverage](train.md#ExponentialMovingAverage),
|
* [`AggregationMethod`](train.md#AggregationMethod)
|
||||||
[class FtrlOptimizer](train.md#FtrlOptimizer),
|
* [`clip_by_average_norm`](train.md#clip_by_average_norm)
|
||||||
[class GradientDescentOptimizer](train.md#GradientDescentOptimizer),
|
* [`clip_by_global_norm`](train.md#clip_by_global_norm)
|
||||||
[class MomentumOptimizer](train.md#MomentumOptimizer),
|
* [`clip_by_norm`](train.md#clip_by_norm)
|
||||||
[class Optimizer](train.md#Optimizer),
|
* [`clip_by_value`](train.md#clip_by_value)
|
||||||
[class QueueRunner](train.md#QueueRunner),
|
* [`Coordinator`](train.md#Coordinator)
|
||||||
[class RMSPropOptimizer](train.md#RMSPropOptimizer),
|
* [`exponential_decay`](train.md#exponential_decay)
|
||||||
[class SummaryWriter](train.md#SummaryWriter),
|
* [`ExponentialMovingAverage`](train.md#ExponentialMovingAverage)
|
||||||
[add_queue_runner](train.md#add_queue_runner),
|
* [`FtrlOptimizer`](train.md#FtrlOptimizer)
|
||||||
[clip_by_average_norm](train.md#clip_by_average_norm),
|
* [`global_norm`](train.md#global_norm)
|
||||||
[clip_by_global_norm](train.md#clip_by_global_norm),
|
* [`global_step`](train.md#global_step)
|
||||||
[clip_by_norm](train.md#clip_by_norm),
|
* [`GradientDescentOptimizer`](train.md#GradientDescentOptimizer)
|
||||||
[clip_by_value](train.md#clip_by_value),
|
* [`gradients`](train.md#gradients)
|
||||||
[exponential_decay](train.md#exponential_decay),
|
* [`histogram_summary`](train.md#histogram_summary)
|
||||||
[global_norm](train.md#global_norm),
|
* [`image_summary`](train.md#image_summary)
|
||||||
[global_step](train.md#global_step),
|
* [`merge_all_summaries`](train.md#merge_all_summaries)
|
||||||
[gradients](train.md#gradients),
|
* [`merge_summary`](train.md#merge_summary)
|
||||||
[histogram_summary](train.md#histogram_summary),
|
* [`MomentumOptimizer`](train.md#MomentumOptimizer)
|
||||||
[image_summary](train.md#image_summary),
|
* [`Optimizer`](train.md#Optimizer)
|
||||||
[merge_all_summaries](train.md#merge_all_summaries),
|
* [`QueueRunner`](train.md#QueueRunner)
|
||||||
[merge_summary](train.md#merge_summary),
|
* [`RMSPropOptimizer`](train.md#RMSPropOptimizer)
|
||||||
[scalar_summary](train.md#scalar_summary),
|
* [`scalar_summary`](train.md#scalar_summary)
|
||||||
[start_queue_runners](train.md#start_queue_runners),
|
* [`start_queue_runners`](train.md#start_queue_runners)
|
||||||
[stop_gradient](train.md#stop_gradient),
|
* [`stop_gradient`](train.md#stop_gradient)
|
||||||
[summary_iterator](train.md#summary_iterator),
|
* [`summary_iterator`](train.md#summary_iterator)
|
||||||
[write_graph](train.md#write_graph),
|
* [`SummaryWriter`](train.md#SummaryWriter)
|
||||||
[zero_fraction](train.md#zero_fraction)
|
* [`write_graph`](train.md#write_graph)
|
||||||
|
* [`zero_fraction`](train.md#zero_fraction)
|
||||||
|
|
||||||
<div class="sections-order" style="display: none;">
|
<div class="sections-order" style="display: none;">
|
||||||
<!--
|
<!--
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Inputs and Readers
|
# Inputs and Readers
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Placeholders](#AUTOGENERATED-placeholders)
|
* [Placeholders](#AUTOGENERATED-placeholders)
|
||||||
@ -15,6 +19,7 @@
|
|||||||
* [Converting](#AUTOGENERATED-converting)
|
* [Converting](#AUTOGENERATED-converting)
|
||||||
* [tf.decode_csv(records, record_defaults, field_delim=None, name=None)](#decode_csv)
|
* [tf.decode_csv(records, record_defaults, field_delim=None, name=None)](#decode_csv)
|
||||||
* [tf.decode_raw(bytes, out_type, little_endian=None, name=None)](#decode_raw)
|
* [tf.decode_raw(bytes, out_type, little_endian=None, name=None)](#decode_raw)
|
||||||
|
* [Example protocol buffer](#AUTOGENERATED-example-protocol-buffer)
|
||||||
* [tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample')](#parse_example)
|
* [tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample')](#parse_example)
|
||||||
* [tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample')](#parse_single_example)
|
* [tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample')](#parse_single_example)
|
||||||
* [Queues](#AUTOGENERATED-queues)
|
* [Queues](#AUTOGENERATED-queues)
|
||||||
@ -1061,109 +1066,106 @@ Reinterpret the bytes of a string as a vector of numbers.
|
|||||||
of bytes divided by the number of bytes to represent out_type.
|
of bytes divided by the number of bytes to represent out_type.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
### Example protocol buffer <div class="md-anchor" id="AUTOGENERATED-example-protocol-buffer">{#AUTOGENERATED-example-protocol-buffer}</div>
|
||||||
|
|
||||||
|
TensorFlow's [recommended format for training
|
||||||
|
examples](../../how_tos/reading_data/index.md#standard-tensorflow-format)
|
||||||
|
is serialized `Example` protocol buffers, [described
|
||||||
|
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto).
|
||||||
|
They contain `Features`, [described
|
||||||
|
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/feature.proto).
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
### tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample') <div class="md-anchor" id="parse_example">{#parse_example}</div>
|
### tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample') <div class="md-anchor" id="parse_example">{#parse_example}</div>
|
||||||
|
|
||||||
Parse Example protos.
|
Parses `Example` protos.
|
||||||
|
|
||||||
##### Args:
|
Parses a number of serialized [`Example`]
|
||||||
|
(https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto)
|
||||||
|
protos given in `serialized`.
|
||||||
|
|
||||||
|
`names` may contain descriptive names for the corresponding serialized protos.
|
||||||
|
These may be useful for debugging purposes, but they have no effect on the
|
||||||
|
output. If not `None`, `names` must be the same length as `serialized`.
|
||||||
|
|
||||||
* <b>serialized</b>: string vector, a batch of binary serialized Example protos.
|
This op parses serialized examples into a dictionary mapping keys to `Tensor`
|
||||||
* <b>names</b>: A string vector, the names of the serialized protos.
|
and `SparseTensor` objects respectively, depending on whether the keys appear
|
||||||
"names" may contain, e.g., table key (descriptive) names for the
|
in `sparse_keys` or `dense_keys`.
|
||||||
corresponding serialized protos. These are purely useful for debugging
|
|
||||||
purposes, and the presence of values here has no effect on the output.
|
|
||||||
"names" may be an empty vector, if no names are available.
|
|
||||||
If non-empty, this vector must be the same length as "serialized".
|
|
||||||
* <b>sparse_keys</b>: A string list of keys in the Examples' features.
|
|
||||||
These keys are associated with sparse values.
|
|
||||||
* <b>sparse_types</b>: A list of DTypes.
|
|
||||||
This list's length must match that of sparse_keys. Currently
|
|
||||||
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
|
|
||||||
and tf.string (BytesList).
|
|
||||||
* <b>dense_keys</b>: A string list of keys in the Examples' features.
|
|
||||||
These keys are associated with dense values.
|
|
||||||
* <b>dense_types</b>: A list of DTypes.
|
|
||||||
This list's length must match that of dense_keys. Currently
|
|
||||||
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
|
|
||||||
and tf.string (BytesList).
|
|
||||||
* <b>dense_defaults</b>: A dict of {key:Tensor} (some may be missing).
|
|
||||||
The keys of the dict must match the dense_keys of the feature.
|
|
||||||
If a key is not present in this dictionary, the corresponding dense
|
|
||||||
Feature is required in all elements of serialized.
|
|
||||||
* <b>dense_shapes</b>: A list of tuples.
|
|
||||||
Entries provide the shape of data in each dense Feature in features.
|
|
||||||
The length of dense_shapes must be the same as the length of dense_keys.
|
|
||||||
The number of elements in the Feature corresponding to dense_key[j]
|
|
||||||
must always have np.prod(dense_shapes[j]) entries.
|
|
||||||
If dense_shapes[j] == (D0, D1, ..., DN) then the the shape of output
|
|
||||||
Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
|
|
||||||
The dense outputs are just the inputs row-stacked by batch.
|
|
||||||
* <b>name</b>: (Optional) Name of Op in the graph.
|
|
||||||
|
|
||||||
##### Returns:
|
The key `dense_keys[j]` is mapped to a `Tensor` of type `dense_types[j]` and
|
||||||
|
of shape `(serialized.size(),) + dense_shapes[j]`.
|
||||||
|
|
||||||
A dictionary mapping keys to Tensors and SparseTensors.
|
`dense_defaults` provides defaults for values referenced using `dense_keys`.
|
||||||
|
If a key is not present in this dictionary, the corresponding dense `Feature`
|
||||||
|
is required in all elements of `serialized`.
|
||||||
|
|
||||||
The key dense_keys[j] is mapped to a tensor of type dense_types[j] and
|
`dense_shapes[j]` provides the shape of each `Feature` entry referenced by
|
||||||
of shape (serialized.size(),) + dense_shapes[j] (i.e., the dense outputs are
|
`dense_keys[j]`. The number of elements in the `Feature` corresponding to
|
||||||
inputs, reshaped in row-major format and then row-stacked by batch).
|
`dense_key[j]` must always have `np.prod(dense_shapes[j])` entries. The
|
||||||
|
returned `Tensor` for `dense_key[j]` has shape `[N] + dense_shape[j]`, where
|
||||||
|
`N` is the number of `Example`s in `serialized`.
|
||||||
|
|
||||||
The key sparse_keys[j] is mapped to a SparseTensor of type sparse_types[j].
|
The key `sparse_keys[j]` is mapped to a `SparseTensor` of type
|
||||||
The SparseTensor represents a ragged matrix. Its indices are [batch, index]
|
`sparse_types[j]`. The `SparseTensor` represents a ragged matrix.
|
||||||
where "batch" is is the batch entry the value is from, and "index" is the
|
Its indices are `[batch, index]` where `batch` is the batch entry the value
|
||||||
value's index in the list of values associated with that feature
|
is from, and `index` is the value's index in the list of values associated
|
||||||
and example. For example, if one expects a tf.float32 sparse feature "ft"
|
with that feature and example.
|
||||||
and three serialized examples are provided:
|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
For example, if one expects a `tf.float32` sparse feature `ft` and three
|
||||||
|
serialized `Example`s are provided:
|
||||||
|
|
||||||
|
```
|
||||||
serialized = [
|
serialized = [
|
||||||
|
features:
|
||||||
* <b>features</b>:
|
|
||||||
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
|
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
|
||||||
* <b>features</b>:
|
features:
|
||||||
{ feature: [] },
|
{ feature: [] },
|
||||||
* <b>features</b>:
|
features:
|
||||||
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
|
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
|
||||||
]
|
]
|
||||||
|
```
|
||||||
|
|
||||||
then the output will look like:
|
then the output will look like:
|
||||||
|
|
||||||
|
```
|
||||||
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
|
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
|
||||||
values=[1.0, 2.0, 3.0],
|
values=[1.0, 2.0, 3.0],
|
||||||
shape=(3, 2)) }
|
shape=(3, 2)) }
|
||||||
|
```
|
||||||
|
|
||||||
##### Raises:
|
Given two `Example` input protos in `serialized`:
|
||||||
|
|
||||||
|
```
|
||||||
* <b>ValueError</b>: If sparse and dense keys intersect, or input lengths do not
|
[
|
||||||
match up for sparse_* (similarly for dense_*).
|
features: {
|
||||||
* <b>TypeError</b>: If an input is malformed.
|
feature: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } }
|
||||||
|
feature: { key: "gps" value: { float_list: { value: [] } } }
|
||||||
Example input, format, and output: Just Sparse Inputs
|
|
||||||
================================================
|
|
||||||
|
|
||||||
Given two brain.Example input protos:
|
|
||||||
|
|
||||||
|
|
||||||
* <b>serialized</b>: // serialized versions of the protos below
|
|
||||||
[features: {
|
|
||||||
|
|
||||||
* <b>feature</b>: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } }
|
|
||||||
* <b>feature</b>: { key: "gps" value: { float_list: { value: [] } } }
|
|
||||||
},
|
},
|
||||||
* <b>features</b>: {
|
features: {
|
||||||
* <b>feature</b>: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } }
|
feature: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } }
|
||||||
* <b>feature</b>: { key: "dank" value: { int64_list: { value: [ 42 ] } } }
|
feature: { key: "dank" value: { int64_list: { value: [ 42 ] } } }
|
||||||
* <b>feature</b>: { key: "gps" value: { } }
|
feature: { key: "gps" value: { } }
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
* <b>names</b>: ["input0", "input1"],
|
And arguments
|
||||||
* <b>sparse_keys</b>: ["kw", "dank", "gps"]
|
|
||||||
* <b>sparse_types</b>: [DT_STRING, DT_INT64, DT_FLOAT]
|
|
||||||
|
|
||||||
Then the expected output is a dictionary:
|
```
|
||||||
|
names: ["input0", "input1"],
|
||||||
|
sparse_keys: ["kw", "dank", "gps"]
|
||||||
|
sparse_types: [DT_STRING, DT_INT64, DT_FLOAT]
|
||||||
|
```
|
||||||
|
|
||||||
|
Then the output is a dictionary:
|
||||||
|
|
||||||
|
```python
|
||||||
{
|
{
|
||||||
"kw": SparseTensor(
|
"kw": SparseTensor(
|
||||||
indices=[[0, 0], [0, 1], [1, 0]],
|
indices=[[0, 0], [0, 1], [1, 0]],
|
||||||
@ -1178,78 +1180,96 @@ Then the expected output is a dictionary:
|
|||||||
values=[],
|
values=[],
|
||||||
shape=[2, 0]),
|
shape=[2, 0]),
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For dense results in two serialized `Example`s:
|
||||||
|
|
||||||
Example input, format, and output: Dense Inputs (without defaults)
|
```
|
||||||
==================================================================
|
[
|
||||||
|
features: {
|
||||||
Given two brain.Example input protos:
|
feature: { key: "age" value: { int64_list: { value: [ 0 ] } } }
|
||||||
|
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
||||||
|
|
||||||
* <b>serialized</b>: // serialized versions of the protos below
|
|
||||||
[features: {
|
|
||||||
|
|
||||||
* <b>feature</b>: { key: "age" value: { int64_list: { value: [ 0 ] } } }
|
|
||||||
* <b>feature</b>: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
|
||||||
},
|
},
|
||||||
* <b>features</b>: {
|
features: {
|
||||||
* <b>feature</b>: { key: "age" value: { int64_list: { value: [] } } }
|
feature: { key: "age" value: { int64_list: { value: [] } } }
|
||||||
* <b>feature</b>: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
* <b>names</b>: ["input0", "input1"],
|
We can use arguments:
|
||||||
* <b>dense_keys</b>: np.array(["age", "gender"])
|
|
||||||
* <b>dense_types</b>: [tf.int64, tf.string]
|
```
|
||||||
* <b>dense_defaults</b>: {
|
names: ["input0", "input1"],
|
||||||
"age": -1 # defaults to -1 if missing
|
dense_keys: np.array(["age", "gender"]),
|
||||||
|
dense_types: [tf.int64, tf.string],
|
||||||
|
dense_defaults: {
|
||||||
|
"age": -1 # "age" defaults to -1 if missing
|
||||||
# "gender" has no specified default so it's required
|
# "gender" has no specified default so it's required
|
||||||
}
|
}
|
||||||
|
dense_shapes: [(1,), (1,)], # age, gender, label, weight
|
||||||
|
```
|
||||||
|
|
||||||
* <b>dense_shapes</b>: [(1,), (1,)] # age, gender, label, weight
|
And the expected output is:
|
||||||
|
|
||||||
Then the expected output is a dictionary:
|
```python
|
||||||
{
|
{
|
||||||
"age": [[0], [-1]],
|
"age": [[0], [-1]],
|
||||||
"gender": [["f"], ["f"]],
|
"gender": [["f"], ["f"]],
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
Example input, format, and output: Dense Inputs (with defaults)
|
* <b>serialized</b>: A list of strings, a batch of binary serialized `Example`
|
||||||
===============================================================
|
protos.
|
||||||
|
* <b>names</b>: A list of strings, the names of the serialized protos.
|
||||||
|
* <b>sparse_keys</b>: A list of string keys in the examples' features.
|
||||||
|
The results for these keys will be returned as `SparseTensor` objects.
|
||||||
|
* <b>sparse_types</b>: A list of `DTypes` of the same length as `sparse_keys`.
|
||||||
|
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
|
||||||
|
and `tf.string` (`BytesList`) are supported.
|
||||||
|
* <b>dense_keys</b>: A list of string keys in the examples' features.
|
||||||
|
The results for these keys will be returned as `Tensor`s
|
||||||
|
* <b>dense_types</b>: A list of DTypes of the same length as `dense_keys`.
|
||||||
|
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
|
||||||
|
and `tf.string` (`BytesList`) are supported.
|
||||||
|
* <b>dense_defaults</b>: A dict mapping string keys to `Tensor`s.
|
||||||
|
The keys of the dict must match the dense_keys of the feature.
|
||||||
|
* <b>dense_shapes</b>: A list of tuples with the same length as `dense_keys`.
|
||||||
|
The shape of the data for each dense feature referenced by `dense_keys`.
|
||||||
|
* <b>name</b>: A name for this operation (optional).
|
||||||
|
|
||||||
Given two brain.Example input protos:
|
##### Returns:
|
||||||
|
|
||||||
|
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
|
||||||
|
|
||||||
|
##### Raises:
|
||||||
|
|
||||||
|
|
||||||
* <b>serialized</b>: // serialized versions of the protos below
|
* <b>ValueError</b>: If sparse and dense key sets intersect, or input lengths do not
|
||||||
[features: {
|
match up.
|
||||||
|
|
||||||
* <b>feature</b>: { key: "weight" value: { float_list: { value: [ 1.0 ] } } }
|
|
||||||
},
|
|
||||||
* <b>features</b>: {
|
|
||||||
* <b>feature</b>: { key: "label" value: { float_list: { value: [ -1.0, 0.0 ] } } }
|
|
||||||
}]
|
|
||||||
|
|
||||||
* <b>names</b>: ["input0", "input1"],
|
|
||||||
* <b>dense_keys</b>: np.array(["label", "weight"])
|
|
||||||
* <b>dense_defaults</b>: {
|
|
||||||
"label": [1.0, 2.0], # float (default: vector)
|
|
||||||
"weight": 5.0 # float (default: scalar, 5.0)
|
|
||||||
}
|
|
||||||
|
|
||||||
* <b>dense_shapes</b>: [(2,), (1,)] # age, gender, label, weight
|
|
||||||
|
|
||||||
Then the expected output is a dictionary:
|
|
||||||
{
|
|
||||||
"label": [[1.0, 2.0], [-1.0, 0.0]],
|
|
||||||
"weight": [[1.0], [5.0]],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
### tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample') <div class="md-anchor" id="parse_single_example">{#parse_single_example}</div>
|
### tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample') <div class="md-anchor" id="parse_single_example">{#parse_single_example}</div>
|
||||||
|
|
||||||
Identical to parse_example but for scalar serialized and names.
|
Parses a single `Example` proto.
|
||||||
|
|
||||||
|
Similar to `parse_example`, except:
|
||||||
|
|
||||||
|
For dense tensors, the returned `Tensor` is identical to the output of
|
||||||
|
`parse_example`, except there is no batch dimension, the output shape is the
|
||||||
|
same as the shape given in `dense_shape`.
|
||||||
|
|
||||||
|
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
|
||||||
|
(the indices matrix is a column vector), the values vector is unchanged, and
|
||||||
|
the first (batch_size) entry of the shape vector is removed (it is now a
|
||||||
|
single element vector).
|
||||||
|
|
||||||
|
See also `parse_example`.
|
||||||
|
|
||||||
##### Args:
|
##### Args:
|
||||||
|
|
||||||
@ -1264,22 +1284,12 @@ Identical to parse_example but for scalar serialized and names.
|
|||||||
* <b>dense_types</b>: See parse_example documentation for more details.
|
* <b>dense_types</b>: See parse_example documentation for more details.
|
||||||
* <b>dense_defaults</b>: See parse_example documentation for more details.
|
* <b>dense_defaults</b>: See parse_example documentation for more details.
|
||||||
* <b>dense_shapes</b>: See parse_example documentation for more details.
|
* <b>dense_shapes</b>: See parse_example documentation for more details.
|
||||||
* <b>name</b>: Optional op name.
|
* <b>name</b>: A name for this operation (optional).
|
||||||
|
|
||||||
##### Returns:
|
##### Returns:
|
||||||
|
|
||||||
A dictionary mapping keys to Tensors and SparseTensors.
|
A dictionary mapping keys to Tensors and SparseTensors.
|
||||||
|
|
||||||
For dense tensors, the Tensor is identical to the output of parse_example,
|
|
||||||
except it is one less dimension (the first, batch, dimension is removed).
|
|
||||||
|
|
||||||
For SparseTensors:
|
|
||||||
The first (batch) column of the indices matrix is removed
|
|
||||||
(it is now a column vector).
|
|
||||||
The values vector is unchanged.
|
|
||||||
The first (batch_size) entry of the shape vector is removed
|
|
||||||
(it is now a single element vector).
|
|
||||||
|
|
||||||
##### Raises:
|
##### Raises:
|
||||||
|
|
||||||
|
|
||||||
@ -1632,7 +1642,7 @@ Reads and outputs the entire contents of the input filename.
|
|||||||
## Input pipeline <div class="md-anchor" id="AUTOGENERATED-input-pipeline">{#AUTOGENERATED-input-pipeline}</div>
|
## Input pipeline <div class="md-anchor" id="AUTOGENERATED-input-pipeline">{#AUTOGENERATED-input-pipeline}</div>
|
||||||
|
|
||||||
TensorFlow functions for setting up an input-prefetching pipeline.
|
TensorFlow functions for setting up an input-prefetching pipeline.
|
||||||
Please see the [reading data how-to](../../how_tos/reading_data.md)
|
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
|
||||||
for context.
|
for context.
|
||||||
|
|
||||||
### Beginning of an input pipeline <div class="md-anchor" id="AUTOGENERATED-beginning-of-an-input-pipeline">{#AUTOGENERATED-beginning-of-an-input-pipeline}</div>
|
### Beginning of an input pipeline <div class="md-anchor" id="AUTOGENERATED-beginning-of-an-input-pipeline">{#AUTOGENERATED-beginning-of-an-input-pipeline}</div>
|
||||||
@ -1822,41 +1832,45 @@ is added to the current Graph's QUEUE_RUNNER collection.
|
|||||||
|
|
||||||
Run a list of tensors to fill a queue to create batches of examples.
|
Run a list of tensors to fill a queue to create batches of examples.
|
||||||
|
|
||||||
This version enqueues a different list of tensors in different threads.
|
Enqueues a different list of tensors in different threads.
|
||||||
Implemented using a queue -- a QueueRunner for the queue
|
Implemented using a queue -- a `QueueRunner` for the queue
|
||||||
is added to the current Graph's QUEUE_RUNNER collection.
|
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
|
||||||
|
|
||||||
|
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
|
||||||
|
the tensors from tensor_list[i]. `tensor_list[i1][j]` must match
|
||||||
|
`tensor_list[i2][j]` in type and shape, except in the first dimension if
|
||||||
|
`enqueue_many` is true.
|
||||||
|
|
||||||
|
If `enqueue_many` is false, each `tensor_list_list[i]` is assumed to
|
||||||
|
represent a single example. Otherwise, `tensor_list_list[i]` is assumed to
|
||||||
|
represent a batch of examples, where the first dimension is indexed by
|
||||||
|
example, and all members of `tensor_list_list[i]` should have the same size
|
||||||
|
in the first dimension.
|
||||||
|
|
||||||
|
If `enqueue_many` is false, then an input tensor `x` will be output as a
|
||||||
|
tensor with shape `[batch_size] + x.shape`. If `enqueue_many` is true, the
|
||||||
|
slices of any input tensor `x` are treated as examples, and the output tensors
|
||||||
|
will have shape `[batch_size] + x.shape[1:]`.
|
||||||
|
|
||||||
|
The `capacity` argument controls the how long the prefetching
|
||||||
|
is allowed to grow the queues.
|
||||||
|
|
||||||
##### Args:
|
##### Args:
|
||||||
|
|
||||||
|
|
||||||
* <b>tensor_list_list</b>: A list of tuples of tensors to enqueue.
|
* <b>tensor_list_list</b>: A list of tuples of tensors to enqueue.
|
||||||
len(tensor_list_list) threads will be started, with the i-th
|
* <b>batch_size</b>: An integer. The new batch size pulled from the queue.
|
||||||
thread enqueuing the tensors from tensor_list[i].
|
* <b>capacity</b>: An integer. The maximum number of elements in the queue.
|
||||||
tensor_list[i1][j] must match tensor_list[i2][j] in type and
|
* <b>enqueue_many</b>: Whether each tensor in `tensor_list_list` is a single
|
||||||
shape (except in the first dimension if enqueue_many is true).
|
example.
|
||||||
* <b>batch_size</b>: The new batch size pulled from the queue.
|
* <b>shapes</b>: (Optional) The shapes for each example. Defaults to the
|
||||||
* <b>capacity</b>: Maximum number of elements in the queue, controls the
|
inferred shapes for `tensor_list_list[i]`.
|
||||||
how far ahead the prefetching allowed is allowed to get and
|
|
||||||
memory usage.
|
|
||||||
* <b>enqueue_many</b>: If False, each tensor_list_list[i] is assumed to
|
|
||||||
represent a single example. If True, tensor_list_list[i] is
|
|
||||||
assumed to represent a batch of examples, where the first
|
|
||||||
dimension is indexed by example, and all members of
|
|
||||||
tensor_list_list[i] should have the same size in the first
|
|
||||||
dimension.
|
|
||||||
* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
|
|
||||||
inferred shapes for tensor_list_list[i] (which must match, after
|
|
||||||
leaving off the first dimension if enqueue_many is True).
|
|
||||||
* <b>name</b>: A name for the operations (optional).
|
* <b>name</b>: A name for the operations (optional).
|
||||||
|
|
||||||
##### Returns:
|
##### Returns:
|
||||||
|
|
||||||
A list of tensors with the same number and types as
|
A list of tensors with the same number and types as
|
||||||
tensor_list_list[i]. If enqueue_many is false, then an input
|
`tensor_list_list[i]`.
|
||||||
tensor with shape `[x, y, z]` will be output as a tensor with
|
|
||||||
shape `[batch_size, x, y, z]`. If enqueue_many is True, and an
|
|
||||||
input tensor has shape `[*, x, y, z]`, the the output will have
|
|
||||||
shape `[batch_size, x, y, z]`.
|
|
||||||
|
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
@ -1933,15 +1947,15 @@ It adds:
|
|||||||
* <b>min_after_dequeue</b>: Minimum number elements in the queue after a
|
* <b>min_after_dequeue</b>: Minimum number elements in the queue after a
|
||||||
dequeue, used to ensure a level of mixing of elements.
|
dequeue, used to ensure a level of mixing of elements.
|
||||||
* <b>seed</b>: Seed for the random shuffling within the queue.
|
* <b>seed</b>: Seed for the random shuffling within the queue.
|
||||||
* <b>enqueue_many</b>: If False, each tensor_list_list[i] is assumed to
|
* <b>enqueue_many</b>: If `False`, each tensor_list_list[i] is assumed to
|
||||||
represent a single example. If True, tensor_list_list[i] is
|
represent a single example. If `True`, tensor_list_list[i] is
|
||||||
assumed to represent a batch of examples, where the first
|
assumed to represent a batch of examples, where the first
|
||||||
dimension is indexed by example, and all members of
|
dimension is indexed by example, and all members of
|
||||||
tensor_list_list[i] should have the same size in the first
|
tensor_list_list[i] should have the same size in the first
|
||||||
dimension.
|
dimension.
|
||||||
* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
|
* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
|
||||||
inferred shapes for tensor_list_list[i] (which must match, after
|
inferred shapes for `tensor_list_list[i]` (which must match, after
|
||||||
leaving off the first dimension if enqueue_many is True).
|
leaving off the first dimension if enqueue_many is `True`).
|
||||||
* <b>name</b>: A name for the operations (optional).
|
* <b>name</b>: A name for the operations (optional).
|
||||||
|
|
||||||
##### Returns:
|
##### Returns:
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Math
|
# Math
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Arithmetic Operators](#AUTOGENERATED-arithmetic-operators)
|
* [Arithmetic Operators](#AUTOGENERATED-arithmetic-operators)
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Neural Network
|
# Neural Network
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Activation Functions](#AUTOGENERATED-activation-functions)
|
* [Activation Functions](#AUTOGENERATED-activation-functions)
|
||||||
@ -899,7 +903,7 @@ only considering a small randomly-chosen subset of contrastive classes
|
|||||||
(called candidates) for each batch of training examples.
|
(called candidates) for each batch of training examples.
|
||||||
|
|
||||||
See our [Candidate Sampling Algorithms Reference]
|
See our [Candidate Sampling Algorithms Reference]
|
||||||
(http://www.tensorflow.org/extras/candidate_sampling.pdf)
|
(../../extras/candidate_sampling.pdf)
|
||||||
|
|
||||||
### Sampled Loss Functions <div class="md-anchor" id="AUTOGENERATED-sampled-loss-functions">{#AUTOGENERATED-sampled-loss-functions}</div>
|
### Sampled Loss Functions <div class="md-anchor" id="AUTOGENERATED-sampled-loss-functions">{#AUTOGENERATED-sampled-loss-functions}</div>
|
||||||
|
|
||||||
|
@ -1,16 +1,20 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Sparse Tensors
|
# Sparse Tensors
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Sparse Tensor Representation.](#AUTOGENERATED-sparse-tensor-representation.)
|
* [Sparse Tensor Representation](#AUTOGENERATED-sparse-tensor-representation)
|
||||||
* [class tf.SparseTensor](#SparseTensor)
|
* [class tf.SparseTensor](#SparseTensor)
|
||||||
* [class tf.SparseTensorValue](#SparseTensorValue)
|
* [class tf.SparseTensorValue](#SparseTensorValue)
|
||||||
* [Sparse to Dense Conversion.](#AUTOGENERATED-sparse-to-dense-conversion.)
|
* [Sparse to Dense Conversion](#AUTOGENERATED-sparse-to-dense-conversion)
|
||||||
* [tf.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, name=None)](#sparse_to_dense)
|
* [tf.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, name=None)](#sparse_to_dense)
|
||||||
* [tf.sparse_tensor_to_dense(sp_input, default_value, name=None)](#sparse_tensor_to_dense)
|
* [tf.sparse_tensor_to_dense(sp_input, default_value, name=None)](#sparse_tensor_to_dense)
|
||||||
* [tf.sparse_to_indicator(sp_input, vocab_size, name=None)](#sparse_to_indicator)
|
* [tf.sparse_to_indicator(sp_input, vocab_size, name=None)](#sparse_to_indicator)
|
||||||
* [Manipulation.](#AUTOGENERATED-manipulation.)
|
* [Manipulation](#AUTOGENERATED-manipulation)
|
||||||
* [tf.sparse_concat(concat_dim, sp_inputs, name=None)](#sparse_concat)
|
* [tf.sparse_concat(concat_dim, sp_inputs, name=None)](#sparse_concat)
|
||||||
* [tf.sparse_reorder(sp_input, name=None)](#sparse_reorder)
|
* [tf.sparse_reorder(sp_input, name=None)](#sparse_reorder)
|
||||||
* [tf.sparse_retain(sp_input, to_retain)](#sparse_retain)
|
* [tf.sparse_retain(sp_input, to_retain)](#sparse_retain)
|
||||||
@ -19,7 +23,7 @@
|
|||||||
|
|
||||||
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
||||||
|
|
||||||
## Sparse Tensor Representation. <div class="md-anchor" id="AUTOGENERATED-sparse-tensor-representation.">{#AUTOGENERATED-sparse-tensor-representation.}</div>
|
## Sparse Tensor Representation <div class="md-anchor" id="AUTOGENERATED-sparse-tensor-representation">{#AUTOGENERATED-sparse-tensor-representation}</div>
|
||||||
|
|
||||||
Tensorflow supports a `SparseTensor` representation for data that is sparse
|
Tensorflow supports a `SparseTensor` representation for data that is sparse
|
||||||
in multiple dimensions. Contrast this representation with `IndexedSlices`,
|
in multiple dimensions. Contrast this representation with `IndexedSlices`,
|
||||||
@ -157,7 +161,7 @@ Alias for field number 1
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Sparse to Dense Conversion. <div class="md-anchor" id="AUTOGENERATED-sparse-to-dense-conversion.">{#AUTOGENERATED-sparse-to-dense-conversion.}</div>
|
## Sparse to Dense Conversion <div class="md-anchor" id="AUTOGENERATED-sparse-to-dense-conversion">{#AUTOGENERATED-sparse-to-dense-conversion}</div>
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
@ -296,7 +300,7 @@ The input `SparseTensor` must be in row-major order.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Manipulation. <div class="md-anchor" id="AUTOGENERATED-manipulation.">{#AUTOGENERATED-manipulation.}</div>
|
## Manipulation <div class="md-anchor" id="AUTOGENERATED-manipulation">{#AUTOGENERATED-manipulation}</div>
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
<!-- This file is machine generated: DO NOT EDIT! -->
|
<!-- This file is machine generated: DO NOT EDIT! -->
|
||||||
|
|
||||||
# Variables
|
# Variables
|
||||||
|
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Variables](#AUTOGENERATED-variables)
|
* [Variables](#AUTOGENERATED-variables)
|
||||||
@ -11,7 +15,7 @@
|
|||||||
* [tf.initialize_all_variables()](#initialize_all_variables)
|
* [tf.initialize_all_variables()](#initialize_all_variables)
|
||||||
* [tf.initialize_variables(var_list, name='init')](#initialize_variables)
|
* [tf.initialize_variables(var_list, name='init')](#initialize_variables)
|
||||||
* [tf.assert_variables_initialized(var_list=None)](#assert_variables_initialized)
|
* [tf.assert_variables_initialized(var_list=None)](#assert_variables_initialized)
|
||||||
* [Saving and Restoring Variables.](#AUTOGENERATED-saving-and-restoring-variables.)
|
* [Saving and Restoring Variables](#AUTOGENERATED-saving-and-restoring-variables)
|
||||||
* [class tf.train.Saver](#Saver)
|
* [class tf.train.Saver](#Saver)
|
||||||
* [tf.train.latest_checkpoint(checkpoint_dir, latest_filename=None)](#latest_checkpoint)
|
* [tf.train.latest_checkpoint(checkpoint_dir, latest_filename=None)](#latest_checkpoint)
|
||||||
* [tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=None)](#get_checkpoint_state)
|
* [tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=None)](#get_checkpoint_state)
|
||||||
@ -325,7 +329,7 @@ This is not a graph construction method, it does not add ops to the graph.
|
|||||||
|
|
||||||
This convenience method requires a session where the graph containing this
|
This convenience method requires a session where the graph containing this
|
||||||
variable has been launched. If no session is passed, the default session is
|
variable has been launched. If no session is passed, the default session is
|
||||||
used. See the [Session class](../client.md#Session) for more information on
|
used. See the [Session class](client.md#Session) for more information on
|
||||||
launching a graph and on sessions.
|
launching a graph and on sessions.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@ -506,7 +510,7 @@ logged by the C++ runtime. This is expected.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Saving and Restoring Variables. <div class="md-anchor" id="AUTOGENERATED-saving-and-restoring-variables.">{#AUTOGENERATED-saving-and-restoring-variables.}</div>
|
## Saving and Restoring Variables <div class="md-anchor" id="AUTOGENERATED-saving-and-restoring-variables">{#AUTOGENERATED-saving-and-restoring-variables}</div>
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
# Training
|
# Training
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
* [Optimizers.](#AUTOGENERATED-optimizers.)
|
* [Optimizers](#AUTOGENERATED-optimizers)
|
||||||
* [class tf.train.Optimizer](#Optimizer)
|
* [class tf.train.Optimizer](#Optimizer)
|
||||||
* [Usage](#AUTOGENERATED-usage)
|
* [Usage](#AUTOGENERATED-usage)
|
||||||
* [Processing gradients before applying them.](#AUTOGENERATED-processing-gradients-before-applying-them.)
|
* [Processing gradients before applying them.](#AUTOGENERATED-processing-gradients-before-applying-them.)
|
||||||
@ -15,7 +15,7 @@
|
|||||||
* [class tf.train.AdamOptimizer](#AdamOptimizer)
|
* [class tf.train.AdamOptimizer](#AdamOptimizer)
|
||||||
* [class tf.train.FtrlOptimizer](#FtrlOptimizer)
|
* [class tf.train.FtrlOptimizer](#FtrlOptimizer)
|
||||||
* [class tf.train.RMSPropOptimizer](#RMSPropOptimizer)
|
* [class tf.train.RMSPropOptimizer](#RMSPropOptimizer)
|
||||||
* [Gradient Computation.](#AUTOGENERATED-gradient-computation.)
|
* [Gradient Computation](#AUTOGENERATED-gradient-computation)
|
||||||
* [tf.gradients(ys, xs, grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None)](#gradients)
|
* [tf.gradients(ys, xs, grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None)](#gradients)
|
||||||
* [class tf.AggregationMethod](#AggregationMethod)
|
* [class tf.AggregationMethod](#AggregationMethod)
|
||||||
* [tf.stop_gradient(input, name=None)](#stop_gradient)
|
* [tf.stop_gradient(input, name=None)](#stop_gradient)
|
||||||
@ -25,26 +25,26 @@
|
|||||||
* [tf.clip_by_average_norm(t, clip_norm, name=None)](#clip_by_average_norm)
|
* [tf.clip_by_average_norm(t, clip_norm, name=None)](#clip_by_average_norm)
|
||||||
* [tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None)](#clip_by_global_norm)
|
* [tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None)](#clip_by_global_norm)
|
||||||
* [tf.global_norm(t_list, name=None)](#global_norm)
|
* [tf.global_norm(t_list, name=None)](#global_norm)
|
||||||
* [Decaying the learning rate.](#AUTOGENERATED-decaying-the-learning-rate.)
|
* [Decaying the learning rate](#AUTOGENERATED-decaying-the-learning-rate)
|
||||||
* [tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)](#exponential_decay)
|
* [tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)](#exponential_decay)
|
||||||
* [Moving Averages.](#AUTOGENERATED-moving-averages.)
|
* [Moving Averages](#AUTOGENERATED-moving-averages)
|
||||||
* [class tf.train.ExponentialMovingAverage](#ExponentialMovingAverage)
|
* [class tf.train.ExponentialMovingAverage](#ExponentialMovingAverage)
|
||||||
* [Coordinator and QueueRunner.](#AUTOGENERATED-coordinator-and-queuerunner.)
|
* [Coordinator and QueueRunner](#AUTOGENERATED-coordinator-and-queuerunner)
|
||||||
* [class tf.train.Coordinator](#Coordinator)
|
* [class tf.train.Coordinator](#Coordinator)
|
||||||
* [class tf.train.QueueRunner](#QueueRunner)
|
* [class tf.train.QueueRunner](#QueueRunner)
|
||||||
* [tf.train.add_queue_runner(qr, collection='queue_runners')](#add_queue_runner)
|
* [tf.train.add_queue_runner(qr, collection='queue_runners')](#add_queue_runner)
|
||||||
* [tf.train.start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection='queue_runners')](#start_queue_runners)
|
* [tf.train.start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection='queue_runners')](#start_queue_runners)
|
||||||
* [Summary Operations.](#AUTOGENERATED-summary-operations.)
|
* [Summary Operations](#AUTOGENERATED-summary-operations)
|
||||||
* [tf.scalar_summary(tags, values, collections=None, name=None)](#scalar_summary)
|
* [tf.scalar_summary(tags, values, collections=None, name=None)](#scalar_summary)
|
||||||
* [tf.image_summary(tag, tensor, max_images=None, collections=None, name=None)](#image_summary)
|
* [tf.image_summary(tag, tensor, max_images=None, collections=None, name=None)](#image_summary)
|
||||||
* [tf.histogram_summary(tag, values, collections=None, name=None)](#histogram_summary)
|
* [tf.histogram_summary(tag, values, collections=None, name=None)](#histogram_summary)
|
||||||
* [tf.nn.zero_fraction(value, name=None)](#zero_fraction)
|
* [tf.nn.zero_fraction(value, name=None)](#zero_fraction)
|
||||||
* [tf.merge_summary(inputs, collections=None, name=None)](#merge_summary)
|
* [tf.merge_summary(inputs, collections=None, name=None)](#merge_summary)
|
||||||
* [tf.merge_all_summaries(key='summaries')](#merge_all_summaries)
|
* [tf.merge_all_summaries(key='summaries')](#merge_all_summaries)
|
||||||
* [Adding Summaries to Event Files.](#AUTOGENERATED-adding-summaries-to-event-files.)
|
* [Adding Summaries to Event Files](#AUTOGENERATED-adding-summaries-to-event-files)
|
||||||
* [class tf.train.SummaryWriter](#SummaryWriter)
|
* [class tf.train.SummaryWriter](#SummaryWriter)
|
||||||
* [tf.train.summary_iterator(path)](#summary_iterator)
|
* [tf.train.summary_iterator(path)](#summary_iterator)
|
||||||
* [Training utilities.](#AUTOGENERATED-training-utilities.)
|
* [Training utilities](#AUTOGENERATED-training-utilities)
|
||||||
* [tf.train.global_step(sess, global_step_tensor)](#global_step)
|
* [tf.train.global_step(sess, global_step_tensor)](#global_step)
|
||||||
* [tf.train.write_graph(graph_def, logdir, name, as_text=True)](#write_graph)
|
* [tf.train.write_graph(graph_def, logdir, name, as_text=True)](#write_graph)
|
||||||
|
|
||||||
@ -53,7 +53,7 @@
|
|||||||
|
|
||||||
This library provides a set of classes and functions that helps train models.
|
This library provides a set of classes and functions that helps train models.
|
||||||
|
|
||||||
## Optimizers. <div class="md-anchor" id="AUTOGENERATED-optimizers.">{#AUTOGENERATED-optimizers.}</div>
|
## Optimizers <div class="md-anchor" id="AUTOGENERATED-optimizers">{#AUTOGENERATED-optimizers}</div>
|
||||||
|
|
||||||
The Optimizer base class provides methods to compute gradients for a loss and
|
The Optimizer base class provides methods to compute gradients for a loss and
|
||||||
apply gradients to variables. A collection of subclasses implement classic
|
apply gradients to variables. A collection of subclasses implement classic
|
||||||
@ -523,7 +523,7 @@ Construct a new RMSProp optimizer.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Gradient Computation. <div class="md-anchor" id="AUTOGENERATED-gradient-computation.">{#AUTOGENERATED-gradient-computation.}</div>
|
## Gradient Computation <div class="md-anchor" id="AUTOGENERATED-gradient-computation">{#AUTOGENERATED-gradient-computation}</div>
|
||||||
|
|
||||||
TensorFlow provides functions to compute the derivatives for a given
|
TensorFlow provides functions to compute the derivatives for a given
|
||||||
TensorFlow computation graph, adding operations to the graph. The
|
TensorFlow computation graph, adding operations to the graph. The
|
||||||
@ -816,7 +816,7 @@ Any entries in `t_list` that are of type None are ignored.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Decaying the learning rate. <div class="md-anchor" id="AUTOGENERATED-decaying-the-learning-rate.">{#AUTOGENERATED-decaying-the-learning-rate.}</div>
|
## Decaying the learning rate <div class="md-anchor" id="AUTOGENERATED-decaying-the-learning-rate">{#AUTOGENERATED-decaying-the-learning-rate}</div>
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
### tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None) <div class="md-anchor" id="exponential_decay">{#exponential_decay}</div>
|
### tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None) <div class="md-anchor" id="exponential_decay">{#exponential_decay}</div>
|
||||||
@ -873,7 +873,7 @@ optimizer.minimize(...my loss..., global_step=global_step)
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Moving Averages. <div class="md-anchor" id="AUTOGENERATED-moving-averages.">{#AUTOGENERATED-moving-averages.}</div>
|
## Moving Averages <div class="md-anchor" id="AUTOGENERATED-moving-averages">{#AUTOGENERATED-moving-averages}</div>
|
||||||
|
|
||||||
Some training algorithms, such as GradientDescent and Momentum often benefit
|
Some training algorithms, such as GradientDescent and Momentum often benefit
|
||||||
from maintaining a moving average of variables during optimization. Using the
|
from maintaining a moving average of variables during optimization. Using the
|
||||||
@ -1075,7 +1075,7 @@ Returns the `Variable` holding the average of `var`.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Coordinator and QueueRunner. <div class="md-anchor" id="AUTOGENERATED-coordinator-and-queuerunner.">{#AUTOGENERATED-coordinator-and-queuerunner.}</div>
|
## Coordinator and QueueRunner <div class="md-anchor" id="AUTOGENERATED-coordinator-and-queuerunner">{#AUTOGENERATED-coordinator-and-queuerunner}</div>
|
||||||
|
|
||||||
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
|
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
|
||||||
for how to use threads and queues. For documentation on the Queue API,
|
for how to use threads and queues. For documentation on the Queue API,
|
||||||
@ -1399,17 +1399,21 @@ the list of all threads.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Summary Operations. <div class="md-anchor" id="AUTOGENERATED-summary-operations.">{#AUTOGENERATED-summary-operations.}</div>
|
## Summary Operations <div class="md-anchor" id="AUTOGENERATED-summary-operations">{#AUTOGENERATED-summary-operations}</div>
|
||||||
|
|
||||||
The following ops output
|
The following ops output
|
||||||
[`Summary`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto)
|
[`Summary`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto)
|
||||||
protocol buffers as serialized string tensors.
|
protocol buffers as serialized string tensors.
|
||||||
|
|
||||||
You can fetch the output of a summary op in a session, and pass it to a
|
You can fetch the output of a summary op in a session, and pass it to
|
||||||
[SummaryWriter](train.md#SummaryWriter) to append it to an event file. You can
|
a [SummaryWriter](train.md#SummaryWriter) to append it to an event
|
||||||
then use TensorBoard to visualize the contents of the event files. See
|
file. Event files contain
|
||||||
[TensorBoard and Summaries](../../how_tos/summaries_and_tensorboard/index.md)
|
[`Event`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/util/event.proto)
|
||||||
for more details.
|
protos that can contain `Summary` protos along with the timestamp and
|
||||||
|
step. You can then use TensorBoard to visualize the contents of the
|
||||||
|
event files. See [TensorBoard and
|
||||||
|
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
|
||||||
|
details.
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
@ -1587,7 +1591,7 @@ Merges all summaries collected in the default graph.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Adding Summaries to Event Files. <div class="md-anchor" id="AUTOGENERATED-adding-summaries-to-event-files.">{#AUTOGENERATED-adding-summaries-to-event-files.}</div>
|
## Adding Summaries to Event Files <div class="md-anchor" id="AUTOGENERATED-adding-summaries-to-event-files">{#AUTOGENERATED-adding-summaries-to-event-files}</div>
|
||||||
|
|
||||||
See [Summaries and
|
See [Summaries and
|
||||||
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
|
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
|
||||||
@ -1768,7 +1772,7 @@ for more information about their attributes.
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Training utilities. <div class="md-anchor" id="AUTOGENERATED-training-utilities.">{#AUTOGENERATED-training-utilities.}</div>
|
## Training utilities <div class="md-anchor" id="AUTOGENERATED-training-utilities">{#AUTOGENERATED-training-utilities}</div>
|
||||||
|
|
||||||
- - -
|
- - -
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ The default graph now has three nodes: two `constant()` ops and one `matmul()`
|
|||||||
op. To actually multiply the matrices, and get the result of the multiplication,
|
op. To actually multiply the matrices, and get the result of the multiplication,
|
||||||
you must launch the graph in a session.
|
you must launch the graph in a session.
|
||||||
|
|
||||||
## Launching the graph in a Session
|
### Launching the graph in a session
|
||||||
|
|
||||||
Launching follows construction. To launch a graph, create a `Session` object.
|
Launching follows construction. To launch a graph, create a `Session` object.
|
||||||
Without arguments the session constructor launches the default graph.
|
Without arguments the session constructor launches the default graph.
|
||||||
@ -102,20 +102,16 @@ sess = tf.Session()
|
|||||||
# The output of the op is returned in 'result' as a numpy `ndarray` object.
|
# The output of the op is returned in 'result' as a numpy `ndarray` object.
|
||||||
result = sess.run(product)
|
result = sess.run(product)
|
||||||
print result
|
print result
|
||||||
|
# ==> [[ 12.]]
|
||||||
|
|
||||||
# Close the Session when we're done.
|
# Close the Session when we're done.
|
||||||
sess.close()
|
sess.close()
|
||||||
|
|
||||||
|
|
||||||
# Stdout output ==> [[ 12.]]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Sessions should be closed to release resources. You can also enter a `Session`
|
Sessions should be closed to release resources. You can also enter a `Session`
|
||||||
with a "with" block. The `Session` closes automatically at the end of the
|
with a "with" block. The `Session` closes automatically at the end of the
|
||||||
`with` block.
|
`with` block.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
with tf.Session() as sess:
|
with tf.Session() as sess:
|
||||||
result = sess.run([product])
|
result = sess.run([product])
|
||||||
@ -150,6 +146,37 @@ Devices are specified with strings. The currently supported devices are:
|
|||||||
See [Using GPUs](../how_tos/using_gpu/index.md) for more information about GPUs
|
See [Using GPUs](../how_tos/using_gpu/index.md) for more information about GPUs
|
||||||
and TensorFlow.
|
and TensorFlow.
|
||||||
|
|
||||||
|
## Interactive Usage
|
||||||
|
|
||||||
|
The Python examples in the documentation launch the graph with a
|
||||||
|
[`Session`](../api_docs/python/client.md#Session) and use the
|
||||||
|
[`Session.run()`](../api_docs/python/client.md#Session.run) method to execute
|
||||||
|
operations.
|
||||||
|
|
||||||
|
For ease of use in interactive Python environments, such as
|
||||||
|
[IPython](http://ipython.org) you can instead use the
|
||||||
|
[`InteractiveSession`](../api_docs/python/client.md#InteractiveSession) class,
|
||||||
|
and the [`Tensor.eval()`](../api_docs/python/framework.md#Tensor.eval) and
|
||||||
|
[`Operation.run()`](../api_docs/python/framework.md#Operation.run) methods. This
|
||||||
|
avoids having to keep a variable holding the session.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Enter an interactive TensorFlow Session.
|
||||||
|
import tensorflow as tf
|
||||||
|
sess = tf.InteractiveSession()
|
||||||
|
|
||||||
|
x = tf.Variable([1.0, 2.0])
|
||||||
|
a = tf.constant([3.0, 3.0])
|
||||||
|
|
||||||
|
# Initialize 'x' using the run() method of its initializer op.
|
||||||
|
x.initializer.run()
|
||||||
|
|
||||||
|
# Add an op to subtact 'a' from 'x'. Run it and print the result
|
||||||
|
sub = tf.sub(x, a)
|
||||||
|
print sub.eval()
|
||||||
|
# ==> [-2. -1.]
|
||||||
|
```
|
||||||
|
|
||||||
## Tensors
|
## Tensors
|
||||||
|
|
||||||
TensorFlow programs use a tensor data structure to represent all data -- only
|
TensorFlow programs use a tensor data structure to represent all data -- only
|
||||||
@ -159,14 +186,6 @@ static type a rank, and a shape. To learn more about how TensorFlow handles
|
|||||||
these concepts, see the [Rank, Shape, and Type](../resources/dims_types.md)
|
these concepts, see the [Rank, Shape, and Type](../resources/dims_types.md)
|
||||||
reference.
|
reference.
|
||||||
|
|
||||||
|
|
||||||
# output:
|
|
||||||
# [array([ 21.], dtype=float32), array([ 7.], dtype=float32)]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Variables
|
## Variables
|
||||||
|
|
||||||
Variables maintain state across executions of the graph. The following example
|
Variables maintain state across executions of the graph. The following example
|
||||||
|
@ -52,17 +52,15 @@ other software packages, please take the red pill. If you've never even heard
|
|||||||
of MNIST, definitely take the blue pill. If you're somewhere in between, we
|
of MNIST, definitely take the blue pill. If you're somewhere in between, we
|
||||||
suggest skimming blue, then red.
|
suggest skimming blue, then red.
|
||||||
|
|
||||||
TODO(danmane): Add in creative commons attribution for these images.
|
|
||||||
Also, make sure the sizes are precisely the same.
|
|
||||||
|
|
||||||
<div style="width:100%; margin:auto; margin-bottom:10px; margin-top:20px; display: flex; flex-direction: row">
|
<div style="width:100%; margin:auto; margin-bottom:10px; margin-top:20px; display: flex; flex-direction: row">
|
||||||
<a href="../tutorials/mnist/beginners/index.md">
|
<a href="../tutorials/mnist/beginners/index.md">
|
||||||
<img style="flex-grow:1; flex-shrink:1;border: 1px solid black;" src="./blue_pill.jpg">
|
<img style="flex-grow:1; flex-shrink:1;border: 1px solid black;" src="./blue_pill.png">
|
||||||
</a>
|
</a>
|
||||||
<a href="../tutorials/mnist/pros/index.md">
|
<a href="../tutorials/mnist/pros/index.md">
|
||||||
<img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="./red_pill.jpg">
|
<img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="./red_pill.png">
|
||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
<p style="font-size:10px;">Images licensed CC BY-SA 4.0; original by W. Carter</p>
|
||||||
|
|
||||||
If you're already sure you want to learn and install TensorFlow you can skip
|
If you're already sure you want to learn and install TensorFlow you can skip
|
||||||
these and charge ahead. Don't worry, you'll still get to see MNIST -- we'll
|
these and charge ahead. Don't worry, you'll still get to see MNIST -- we'll
|
||||||
|
@ -4,10 +4,10 @@
|
|||||||
|
|
||||||
### Ubuntu/Linux
|
### Ubuntu/Linux
|
||||||
|
|
||||||
Make sure you have `pip` and `numpy` installed :
|
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ sudo apt-get install python-pip python-numpy
|
$ sudo apt-get install python-pip
|
||||||
```
|
```
|
||||||
|
|
||||||
Install TensorFlow:
|
Install TensorFlow:
|
||||||
@ -22,7 +22,7 @@ $ sudo pip install https://storage.googleapis.com/tensorflow/linux/gpu/tensorflo
|
|||||||
|
|
||||||
### Mac OS X
|
### Mac OS X
|
||||||
|
|
||||||
Make sure you have `pip` installed:
|
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
|
||||||
|
|
||||||
If using `easy_install`:
|
If using `easy_install`:
|
||||||
|
|
||||||
@ -90,16 +90,16 @@ Validation error: 84.6%
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Source Installation {#source}
|
## Installing from sources {#source}
|
||||||
|
|
||||||
### Clone the TensorFlow repository
|
### Clone the TensorFlow repository
|
||||||
|
|
||||||
TODO(keveman): Supply clone command for external users.
|
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ git clone --recurse-submodules https://YOUR_WHITELISTED_EMAIL_WITH_AT_REPLACED_BY_DOT@tensorflow.googlesource.com/tf3
|
$ git clone --recurse-submodules https://tensorflow.googlesource.com/tensorflow
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`--recurse-submodules` is required to fetch the protobuf library that TensorFlow
|
||||||
|
depends on.
|
||||||
|
|
||||||
### Installation for Linux
|
### Installation for Linux
|
||||||
|
|
||||||
@ -162,11 +162,9 @@ GPU support will be enabled for TensorFlow
|
|||||||
|
|
||||||
Please specify the location where CUDA 7.0 toolkit is installed. Refer to
|
Please specify the location where CUDA 7.0 toolkit is installed. Refer to
|
||||||
README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
|
README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
|
||||||
CUDA 7.0 toolkit found
|
|
||||||
|
|
||||||
Please specify the location where CUDNN 6.5 V2 library is installed. Refer to
|
Please specify the location where CUDNN 6.5 V2 library is installed. Refer to
|
||||||
README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
|
README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
|
||||||
CUDNN 6.5 V2 library found
|
|
||||||
|
|
||||||
Setting up Cuda include
|
Setting up Cuda include
|
||||||
Setting up Cuda lib64
|
Setting up Cuda lib64
|
||||||
@ -191,9 +189,6 @@ $ bazel-bin/tensorflow/cc/tutorials_example_trainer --use_gpu
|
|||||||
000009/000005 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
000009/000005 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
||||||
000006/000001 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
000006/000001 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
||||||
000009/000009 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
000009/000009 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
||||||
000006/000008 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
|
||||||
000009/000003 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
|
||||||
000006/000006 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that "--config=cuda" is needed to enable the GPU support.
|
Note that "--config=cuda" is needed to enable the GPU support.
|
||||||
@ -231,14 +226,24 @@ Notes : You need to install
|
|||||||
|
|
||||||
Follow installation instructions [here](http://docs.scipy.org/doc/numpy/user/install.html).
|
Follow installation instructions [here](http://docs.scipy.org/doc/numpy/user/install.html).
|
||||||
|
|
||||||
### Build and train your first TensorFlow neural net model
|
|
||||||
|
### Create the pip package and install
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ cd tf3
|
$ bazel build -c opt //tensorflow/tools/pip_package:build_pip_package
|
||||||
|
|
||||||
$ bazel build tensorflow/models/image/mnist:convolutional
|
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
|
||||||
|
|
||||||
$ bazel-bin/tensorflow/models/image/mnist/convolutional
|
# The name of the .whl file will depend on your platform.
|
||||||
|
$ pip install /tmp/tensorflow_pkg/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
|
||||||
|
```
|
||||||
|
|
||||||
|
### Train your first TensorFlow neural net model
|
||||||
|
|
||||||
|
From the root of your source tree, run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
$ python tensorflow/models/image/mnist/convolutional.py
|
||||||
Succesfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
|
Succesfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
|
||||||
Succesfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
|
Succesfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
|
||||||
Succesfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
|
Succesfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
|
||||||
|
@ -8,8 +8,11 @@ your TensorFlow graph, quantitative metrics about the execution of your graph,
|
|||||||
and even additional data like images that pass through it. When TensorBoard is
|
and even additional data like images that pass through it. When TensorBoard is
|
||||||
fully configured, it looks like this:
|
fully configured, it looks like this:
|
||||||
|
|
||||||
TODO(danmane): Enable a live TensorBoard
|
 If you're on
|
||||||

|
desktop Chrome or FF, try playing around with [this live
|
||||||
|
TensorBoard](/tensorboard/cifar.html).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Serializing the data
|
## Serializing the data
|
||||||
|
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
# BibTex Citation
|
||||||
|
```
|
||||||
@misc{tensorflow2015-whitepaper,
|
@misc{tensorflow2015-whitepaper,
|
||||||
title={{TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
|
title={{TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
|
||||||
url={http://www.tensorflow.org/extras/tensorflow-whitepaper2015.pdf},
|
url={http://www.tensorflow.org/extras/tensorflow-whitepaper2015.pdf},
|
||||||
@ -9,7 +11,7 @@ author={
|
|||||||
Eugene~Brevdo and
|
Eugene~Brevdo and
|
||||||
Zhifeng~Chen and
|
Zhifeng~Chen and
|
||||||
Craig~Citro and
|
Craig~Citro and
|
||||||
Greg~Corrado and
|
Greg~S-Corrado and
|
||||||
Andy~Davis and
|
Andy~Davis and
|
||||||
Jeffrey~Dean and
|
Jeffrey~Dean and
|
||||||
Matthieu~Devin and
|
Matthieu~Devin and
|
||||||
@ -43,3 +45,4 @@ author={
|
|||||||
Xiaoqiang~Zheng},
|
Xiaoqiang~Zheng},
|
||||||
year={2015},
|
year={2015},
|
||||||
}
|
}
|
||||||
|
```
|
@ -1,21 +1,28 @@
|
|||||||
# Frequently Asked Questions
|
# Frequently Asked Questions
|
||||||
|
|
||||||
This document provides answers to some of the frequently asked questions about
|
This document provides answers to some of the frequently asked questions about
|
||||||
TensorFlow. If you have a question that is not covered here, please
|
TensorFlow. If you have a question that is not covered here, you might find an
|
||||||
[get in touch](index.md).
|
answer on one of the TensorFlow [community resources](index.md).
|
||||||
|
|
||||||
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
|
||||||
## Contents
|
## Contents
|
||||||
|
* [Building a TensorFlow graph](#AUTOGENERATED-building-a-tensorflow-graph)
|
||||||
|
* [Running a TensorFlow computation](#AUTOGENERATED-running-a-tensorflow-computation)
|
||||||
|
* [Variables](#AUTOGENERATED-variables)
|
||||||
|
* [Tensor shapes](#AUTOGENERATED-tensor-shapes)
|
||||||
|
* [TensorBoard](#AUTOGENERATED-tensorboard)
|
||||||
|
* [Extending TensorFlow](#AUTOGENERATED-extending-tensorflow)
|
||||||
|
* [Miscellaneous](#AUTOGENERATED-miscellaneous)
|
||||||
|
|
||||||
|
|
||||||
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
|
||||||
|
|
||||||
#### Building a TensorFlow graph
|
### Building a TensorFlow graph <div class="md-anchor" id="AUTOGENERATED-building-a-tensorflow-graph">{#AUTOGENERATED-building-a-tensorflow-graph}</div>
|
||||||
|
|
||||||
See also the
|
See also the
|
||||||
[API documentation on building graphs](../api_docs/python/framework.md).
|
[API documentation on building graphs](../api_docs/python/framework.md).
|
||||||
|
|
||||||
##### Why does `c = tf.matmul(a, b)` not execute the matrix multiplication immediately?
|
#### Why does `c = tf.matmul(a, b)` not execute the matrix multiplication immediately?
|
||||||
|
|
||||||
In the TensorFlow Python API, `a`, `b`, and `c` are
|
In the TensorFlow Python API, `a`, `b`, and `c` are
|
||||||
[`Tensor`](../api_docs/python/framework.md#Tensor) objects. A `Tensor` object is
|
[`Tensor`](../api_docs/python/framework.md#Tensor) objects. A `Tensor` object is
|
||||||
@ -28,12 +35,12 @@ a dataflow graph. You then offload the computation of the entire dataflow graph
|
|||||||
whole computation much more efficiently than executing the operations
|
whole computation much more efficiently than executing the operations
|
||||||
one-by-one.
|
one-by-one.
|
||||||
|
|
||||||
##### How are devices named?
|
#### How are devices named?
|
||||||
|
|
||||||
The supported device names are `"/device:CPU:0"` (or `"/cpu:0"`) for the CPU
|
The supported device names are `"/device:CPU:0"` (or `"/cpu:0"`) for the CPU
|
||||||
device, and `"/device:GPU:i"` (or `"/gpu:i"`) for the *i*th GPU device.
|
device, and `"/device:GPU:i"` (or `"/gpu:i"`) for the *i*th GPU device.
|
||||||
|
|
||||||
##### How do I place operations on a particular device?
|
#### How do I place operations on a particular device?
|
||||||
|
|
||||||
To place a group of operations on a device, create them within a
|
To place a group of operations on a device, create them within a
|
||||||
[`with tf.device(name):`](../api_docs/python/framework.md#device) context. See
|
[`with tf.device(name):`](../api_docs/python/framework.md#device) context. See
|
||||||
@ -43,17 +50,17 @@ TensorFlow assigns operations to devices, and the
|
|||||||
[CIFAR-10 tutorial](../tutorials/deep_cnn/index.md) for an example model that
|
[CIFAR-10 tutorial](../tutorials/deep_cnn/index.md) for an example model that
|
||||||
uses multiple GPUs.
|
uses multiple GPUs.
|
||||||
|
|
||||||
##### What are the different types of tensors that are available?
|
#### What are the different types of tensors that are available?
|
||||||
|
|
||||||
TensorFlow supports a variety of different data types and tensor shapes. See the
|
TensorFlow supports a variety of different data types and tensor shapes. See the
|
||||||
[ranks, shapes, and types reference](dims_types.md) for more details.
|
[ranks, shapes, and types reference](dims_types.md) for more details.
|
||||||
|
|
||||||
#### Running a TensorFlow computation
|
### Running a TensorFlow computation <div class="md-anchor" id="AUTOGENERATED-running-a-tensorflow-computation">{#AUTOGENERATED-running-a-tensorflow-computation}</div>
|
||||||
|
|
||||||
See also the
|
See also the
|
||||||
[API documentation on running graphs](../api_docs/python/client.md).
|
[API documentation on running graphs](../api_docs/python/client.md).
|
||||||
|
|
||||||
##### What's the deal with feeding and placeholders?
|
#### What's the deal with feeding and placeholders?
|
||||||
|
|
||||||
Feeding is a mechanism in the TensorFlow Session API that allows you to
|
Feeding is a mechanism in the TensorFlow Session API that allows you to
|
||||||
substitute different values for one or more tensors at run time. The `feed_dict`
|
substitute different values for one or more tensors at run time. The `feed_dict`
|
||||||
@ -69,7 +76,7 @@ optionally allows you to constrain their shape as well. See the
|
|||||||
example of how placeholders and feeding can be used to provide the training data
|
example of how placeholders and feeding can be used to provide the training data
|
||||||
for a neural network.
|
for a neural network.
|
||||||
|
|
||||||
##### What is the difference between `Session.run()` and `Tensor.eval()`?
|
#### What is the difference between `Session.run()` and `Tensor.eval()`?
|
||||||
|
|
||||||
If `t` is a [`Tensor`](../api_docs/python/framework.md#Tensor) object,
|
If `t` is a [`Tensor`](../api_docs/python/framework.md#Tensor) object,
|
||||||
[`t.eval()`](../api_docs/python/framework.md#Tensor.eval) is shorthand for
|
[`t.eval()`](../api_docs/python/framework.md#Tensor.eval) is shorthand for
|
||||||
@ -96,7 +103,7 @@ the `with` block. The context manager approach can lead to more concise code for
|
|||||||
simple use cases (like unit tests); if your code deals with multiple graphs and
|
simple use cases (like unit tests); if your code deals with multiple graphs and
|
||||||
sessions, it may be more straightforward to explicit calls to `Session.run()`.
|
sessions, it may be more straightforward to explicit calls to `Session.run()`.
|
||||||
|
|
||||||
##### Do Sessions have a lifetime? What about intermediate tensors?
|
#### Do Sessions have a lifetime? What about intermediate tensors?
|
||||||
|
|
||||||
Sessions can own resources, such
|
Sessions can own resources, such
|
||||||
[variables](../api_docs/python/state_ops.md#Variable),
|
[variables](../api_docs/python/state_ops.md#Variable),
|
||||||
@ -110,13 +117,13 @@ The intermediate tensors that are created as part of a call to
|
|||||||
[`Session.run()`](../api_docs/python/client.md) will be freed at or before the
|
[`Session.run()`](../api_docs/python/client.md) will be freed at or before the
|
||||||
end of the call.
|
end of the call.
|
||||||
|
|
||||||
##### Can I run distributed training on multiple computers?
|
#### Can I run distributed training on multiple computers?
|
||||||
|
|
||||||
The initial open-source release of TensorFlow supports multiple devices (CPUs
|
The initial open-source release of TensorFlow supports multiple devices (CPUs
|
||||||
and GPUs) in a single computer. We are working on a distributed version as well:
|
and GPUs) in a single computer. We are working on a distributed version as well:
|
||||||
if you are interested, please let us know so we can prioritize accordingly.
|
if you are interested, please let us know so we can prioritize accordingly.
|
||||||
|
|
||||||
##### Does the runtime parallelize parts of graph execution?
|
#### Does the runtime parallelize parts of graph execution?
|
||||||
|
|
||||||
The TensorFlow runtime parallelizes graph execution across many different
|
The TensorFlow runtime parallelizes graph execution across many different
|
||||||
dimensions:
|
dimensions:
|
||||||
@ -131,7 +138,7 @@ dimensions:
|
|||||||
enables the runtime to get higher throughput, if a single step does not use
|
enables the runtime to get higher throughput, if a single step does not use
|
||||||
all of the resources in your computer.
|
all of the resources in your computer.
|
||||||
|
|
||||||
##### Which client languages are supported in TensorFlow?
|
#### Which client languages are supported in TensorFlow?
|
||||||
|
|
||||||
TensorFlow is designed to support multiple client languages. Currently, the
|
TensorFlow is designed to support multiple client languages. Currently, the
|
||||||
best-supported client language is [Python](../api_docs/python/index.md). The
|
best-supported client language is [Python](../api_docs/python/index.md). The
|
||||||
@ -145,7 +152,7 @@ interest. TensorFlow has a
|
|||||||
that makes it easy to build a client in many different languages. We invite
|
that makes it easy to build a client in many different languages. We invite
|
||||||
contributions of new language bindings.
|
contributions of new language bindings.
|
||||||
|
|
||||||
##### Does TensorFlow make use of all the devices (GPUs and CPUs) available on my machine?
|
#### Does TensorFlow make use of all the devices (GPUs and CPUs) available on my machine?
|
||||||
|
|
||||||
TensorFlow supports multiple GPUs and CPUs. See the how-to documentation on
|
TensorFlow supports multiple GPUs and CPUs. See the how-to documentation on
|
||||||
[using GPUs with TensorFlow](../how_tos/using_gpu/index.md) for details of how
|
[using GPUs with TensorFlow](../how_tos/using_gpu/index.md) for details of how
|
||||||
@ -156,10 +163,10 @@ uses multiple GPUs.
|
|||||||
Note that TensorFlow only uses GPU devices with a compute capability greater
|
Note that TensorFlow only uses GPU devices with a compute capability greater
|
||||||
than 3.5.
|
than 3.5.
|
||||||
|
|
||||||
##### Why does `Session.run()` hang when using a reader or a queue?
|
#### Why does `Session.run()` hang when using a reader or a queue?
|
||||||
|
|
||||||
The [reader](../api_docs/io_ops.md#ReaderBase) and
|
The [reader](../api_docs/python/io_ops.md#ReaderBase) and
|
||||||
[queue](../api_docs/io_ops.md#QueueBase) classes provide special operations that
|
[queue](../api_docs/python/io_ops.md#QueueBase) classes provide special operations that
|
||||||
can *block* until input (or free space in a bounded queue) becomes
|
can *block* until input (or free space in a bounded queue) becomes
|
||||||
available. These operations allow you to build sophisticated
|
available. These operations allow you to build sophisticated
|
||||||
[input pipelines](../how_tos/reading_data/index.md), at the cost of making the
|
[input pipelines](../how_tos/reading_data/index.md), at the cost of making the
|
||||||
@ -168,20 +175,20 @@ for
|
|||||||
[using `QueueRunner` objects to drive queues and readers](../how_tos/reading_data/index.md#QueueRunners)
|
[using `QueueRunner` objects to drive queues and readers](../how_tos/reading_data/index.md#QueueRunners)
|
||||||
for more information on how to use them.
|
for more information on how to use them.
|
||||||
|
|
||||||
#### Variables
|
### Variables <div class="md-anchor" id="AUTOGENERATED-variables">{#AUTOGENERATED-variables}</div>
|
||||||
|
|
||||||
See also the how-to documentation on [variables](../how_tos/variables/index.md)
|
See also the how-to documentation on [variables](../how_tos/variables/index.md)
|
||||||
and [variable scopes](../how_tos/variable_scope/index.md), and
|
and [variable scopes](../how_tos/variable_scope/index.md), and
|
||||||
[the API documentation for variables](../api_docs/python/state_ops.md).
|
[the API documentation for variables](../api_docs/python/state_ops.md).
|
||||||
|
|
||||||
##### What is the lifetime of a variable?
|
#### What is the lifetime of a variable?
|
||||||
|
|
||||||
A variable is created when you first run the
|
A variable is created when you first run the
|
||||||
[`tf.Variable.initializer`](../api_docs/python/state_ops.md#Variable.initializer)
|
[`tf.Variable.initializer`](../api_docs/python/state_ops.md#Variable.initializer)
|
||||||
operation for that variable in a session. It is destroyed when that
|
operation for that variable in a session. It is destroyed when that
|
||||||
[`session is closed`](../api_docs/python/client.md#Session.close).
|
[`session is closed`](../api_docs/python/client.md#Session.close).
|
||||||
|
|
||||||
##### How do variables behave when they are concurrently accessed?
|
#### How do variables behave when they are concurrently accessed?
|
||||||
|
|
||||||
Variables allow concurrent read and write operations. The value read from a
|
Variables allow concurrent read and write operations. The value read from a
|
||||||
variable may change it is concurrently updated. By default, concurrent assigment
|
variable may change it is concurrently updated. By default, concurrent assigment
|
||||||
@ -189,12 +196,12 @@ operations to a variable are allowed to run with no mutual exclusion. To acquire
|
|||||||
a lock when assigning to a variable, pass `use_locking=True` to
|
a lock when assigning to a variable, pass `use_locking=True` to
|
||||||
[`Variable.assign()`](../api_docs/python/state_ops.md#Variable.assign).
|
[`Variable.assign()`](../api_docs/python/state_ops.md#Variable.assign).
|
||||||
|
|
||||||
#### Tensor shapes
|
### Tensor shapes <div class="md-anchor" id="AUTOGENERATED-tensor-shapes">{#AUTOGENERATED-tensor-shapes}</div>
|
||||||
|
|
||||||
See also the
|
See also the
|
||||||
[`TensorShape` API documentation](../api_docs/python/framework.md#TensorShape).
|
[`TensorShape` API documentation](../api_docs/python/framework.md#TensorShape).
|
||||||
|
|
||||||
##### How can I determine the shape of a tensor in Python?
|
#### How can I determine the shape of a tensor in Python?
|
||||||
|
|
||||||
In TensorFlow, a tensor has both a static (inferred) shape and a dynamic (true)
|
In TensorFlow, a tensor has both a static (inferred) shape and a dynamic (true)
|
||||||
shape. The static shape can be read using the
|
shape. The static shape can be read using the
|
||||||
@ -205,7 +212,7 @@ tensor, and may be
|
|||||||
shape is not fully defined, the dynamic shape of a `Tensor` `t` can be
|
shape is not fully defined, the dynamic shape of a `Tensor` `t` can be
|
||||||
determined by evaluating [`tf.shape(t)`](../api_docs/python/array_ops.md#shape).
|
determined by evaluating [`tf.shape(t)`](../api_docs/python/array_ops.md#shape).
|
||||||
|
|
||||||
##### What is the difference between `x.set_shape()` and `x = tf.reshape(x)`?
|
#### What is the difference between `x.set_shape()` and `x = tf.reshape(x)`?
|
||||||
|
|
||||||
The [`tf.Tensor.set_shape()`](../api_docs/python/framework.md) method updates
|
The [`tf.Tensor.set_shape()`](../api_docs/python/framework.md) method updates
|
||||||
the static shape of a `Tensor` object, and it is typically used to provide
|
the static shape of a `Tensor` object, and it is typically used to provide
|
||||||
@ -215,7 +222,7 @@ change the dynamic shape of the tensor.
|
|||||||
The [`tf.reshape()`](../api_docs/python/array_ops.md#reshape) operation creates
|
The [`tf.reshape()`](../api_docs/python/array_ops.md#reshape) operation creates
|
||||||
a new tensor with a different dynamic shape.
|
a new tensor with a different dynamic shape.
|
||||||
|
|
||||||
##### How do I build a graph that works with variable batch sizes?
|
#### How do I build a graph that works with variable batch sizes?
|
||||||
|
|
||||||
It is often useful to build a graph that works with variable batch sizes, for
|
It is often useful to build a graph that works with variable batch sizes, for
|
||||||
example so that the same code can be used for (mini-)batch training, and
|
example so that the same code can be used for (mini-)batch training, and
|
||||||
@ -241,31 +248,31 @@ to encode the batch size as a Python constant, but instead to use a symbolic
|
|||||||
[`tf.placeholder(..., shape=[None, ...])`](../api_docs/python/io_ops.md#placeholder). The
|
[`tf.placeholder(..., shape=[None, ...])`](../api_docs/python/io_ops.md#placeholder). The
|
||||||
`None` element of the shape corresponds to a variable-sized dimension.
|
`None` element of the shape corresponds to a variable-sized dimension.
|
||||||
|
|
||||||
#### TensorBoard
|
### TensorBoard <div class="md-anchor" id="AUTOGENERATED-tensorboard">{#AUTOGENERATED-tensorboard}</div>
|
||||||
|
|
||||||
See also the
|
See also the
|
||||||
[how-to documentation on TensorBoard](../how_tos/graph_viz/index.md).
|
[how-to documentation on TensorBoard](../how_tos/graph_viz/index.md).
|
||||||
|
|
||||||
##### What is the simplest way to send data to tensorboard? # TODO(danmane)
|
#### What is the simplest way to send data to tensorboard? # TODO(danmane)
|
||||||
|
|
||||||
Add summary_ops to your TensorFlow graph, and use a SummaryWriter to write all
|
Add summary_ops to your TensorFlow graph, and use a SummaryWriter to write all
|
||||||
of these summaries to a log directory. Then, startup TensorBoard using
|
of these summaries to a log directory. Then, startup TensorBoard using
|
||||||
<SOME_COMMAND> and pass the --logdir flag so that it points to your
|
<SOME_COMMAND> and pass the --logdir flag so that it points to your
|
||||||
log directory. For more details, see <YET_UNWRITTEN_TENSORBOARD_TUTORIAL>.
|
log directory. For more details, see <YET_UNWRITTEN_TENSORBOARD_TUTORIAL>.
|
||||||
|
|
||||||
#### Extending TensorFlow
|
### Extending TensorFlow <div class="md-anchor" id="AUTOGENERATED-extending-tensorflow">{#AUTOGENERATED-extending-tensorflow}</div>
|
||||||
|
|
||||||
See also the how-to documentation for
|
See also the how-to documentation for
|
||||||
[adding a new operation to TensorFlow](../how_tos/adding_an_op/index.md).
|
[adding a new operation to TensorFlow](../how_tos/adding_an_op/index.md).
|
||||||
|
|
||||||
##### My data is in a custom format. How do I read it using TensorFlow?
|
#### My data is in a custom format. How do I read it using TensorFlow?
|
||||||
|
|
||||||
There are two main options for dealing with data in a custom format.
|
There are two main options for dealing with data in a custom format.
|
||||||
|
|
||||||
The easier option is to write parsing code in Python that transforms the data
|
The easier option is to write parsing code in Python that transforms the data
|
||||||
into a numpy array, then feed a
|
into a numpy array, then feed a [`tf.placeholder()`]
|
||||||
[tf.placeholder()](../api_docs/python/io_ops.md#placeholder) a tensor with that
|
(../api_docs/python/io_ops.md#placeholder) a tensor with that data. See the
|
||||||
data. See the documentation on
|
documentation on
|
||||||
[using placeholders for input](../how_tos/reading_data/index.md#Feeding) for
|
[using placeholders for input](../how_tos/reading_data/index.md#Feeding) for
|
||||||
more details. This approach is easy to get up and running, but the parsing can
|
more details. This approach is easy to get up and running, but the parsing can
|
||||||
be a performance bottleneck.
|
be a performance bottleneck.
|
||||||
@ -276,7 +283,7 @@ data format. The
|
|||||||
[guide to handling new data formats](../how_tos/new_data_formats/index.md) has
|
[guide to handling new data formats](../how_tos/new_data_formats/index.md) has
|
||||||
more information about the steps for doing this.
|
more information about the steps for doing this.
|
||||||
|
|
||||||
##### How do I define an operation that takes a variable number of inputs?
|
#### How do I define an operation that takes a variable number of inputs?
|
||||||
|
|
||||||
The TensorFlow op registration mechanism allows you to define inputs that are a
|
The TensorFlow op registration mechanism allows you to define inputs that are a
|
||||||
single tensor, a list of tensors with the same type (for example when adding
|
single tensor, a list of tensors with the same type (for example when adding
|
||||||
@ -286,15 +293,15 @@ how-to documentation for
|
|||||||
[adding an op with a list of inputs or outputs](../how_tos/adding_an_op/index.md#list-input-output)
|
[adding an op with a list of inputs or outputs](../how_tos/adding_an_op/index.md#list-input-output)
|
||||||
for more details of how to define these different input types.
|
for more details of how to define these different input types.
|
||||||
|
|
||||||
#### Miscellaneous
|
### Miscellaneous <div class="md-anchor" id="AUTOGENERATED-miscellaneous">{#AUTOGENERATED-miscellaneous}</div>
|
||||||
|
|
||||||
##### Does TensorFlow work with Python 3?
|
#### Does TensorFlow work with Python 3?
|
||||||
|
|
||||||
We have only tested TensorFlow using Python 2.7. We are aware of some changes
|
We have only tested TensorFlow using Python 2.7. We are aware of some changes
|
||||||
that will be required for Python 3 compatibility, and welcome contributions
|
that will be required for Python 3 compatibility, and welcome contributions
|
||||||
towards this effort.
|
towards this effort.
|
||||||
|
|
||||||
##### What is TensorFlow's coding style convention?
|
#### What is TensorFlow's coding style convention?
|
||||||
|
|
||||||
The TensorFlow Python API adheres to the
|
The TensorFlow Python API adheres to the
|
||||||
[PEP8](https://www.python.org/dev/peps/pep-0008/) conventions.<sup>*</sup> In
|
[PEP8](https://www.python.org/dev/peps/pep-0008/) conventions.<sup>*</sup> In
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
# Glossary
|
# Glossary
|
||||||
|
|
||||||
|
TODO(someone): Fix several broken links in Glossary
|
||||||
|
|
||||||
**Broadcasting operation**
|
**Broadcasting operation**
|
||||||
|
|
||||||
An operation that uses [numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
|
An operation that uses [numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
|
||||||
|
@ -12,25 +12,33 @@ implementation can be found in out white paper:
|
|||||||
|
|
||||||
If you use TensorFlow in your research and would like to cite the TensorFlow
|
If you use TensorFlow in your research and would like to cite the TensorFlow
|
||||||
system, we suggest you cite the paper above. You can use this [BibTeX
|
system, we suggest you cite the paper above. You can use this [BibTeX
|
||||||
entry](../extras/tensorflow-whitepaper2015.bib). As the project progresses, we
|
entry](bib.md). As the project progresses, we
|
||||||
may update the suggested citation with new papers.
|
may update the suggested citation with new papers.
|
||||||
|
|
||||||
|
|
||||||
## Community
|
## Community
|
||||||
|
|
||||||
TODO(rajatmonga): Write this!
|
### Discuss
|
||||||
|
|
||||||
* NO - google group
|
* GitHub: <https://github.com/tensorflow/tensorflow>
|
||||||
* YES, ASAP - internal support mailing list
|
* Stack Overflow: <https://stackoverflow.com/questions/tagged/tensorflow>
|
||||||
* YES, ASAP - stack overflow presence
|
* [TensorFlow discuss mailing list](
|
||||||
* SOON - slack
|
https://groups.google.com/forum/#!forum/tensorflow-discuss)
|
||||||
|
|
||||||
|
### Report Issues
|
||||||
|
|
||||||
|
* [TensorFlow issues](https://github.com/tensorflow/tensorflow/issues)
|
||||||
|
|
||||||
|
### Development
|
||||||
|
|
||||||
|
* If you are interested in contributing to TensorFlow please
|
||||||
|
[review the contributing guide](
|
||||||
|
https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
|
|
||||||
<div class='sections-order' style="display: none;">
|
<div class='sections-order' style="display: none;">
|
||||||
<!--
|
<!--
|
||||||
|
<!-- bib.md -->
|
||||||
<!-- uses.md -->
|
<!-- uses.md -->
|
||||||
<!-- faq.md -->
|
<!-- faq.md -->
|
||||||
<!-- glossary.md -->
|
<!-- glossary.md -->
|
||||||
|
@ -36,7 +36,3 @@ Listed below are some of the many uses of TensorFlow.
|
|||||||
* **Description**: On-device computer vision model to do optical character recoignition to enable real-time translation.
|
* **Description**: On-device computer vision model to do optical character recoignition to enable real-time translation.
|
||||||
* **More info**: [Google Research blog post](http://googleresearch.blogspot.com/2015/07/how-google-translate-squeezes-deep.html)
|
* **More info**: [Google Research blog post](http://googleresearch.blogspot.com/2015/07/how-google-translate-squeezes-deep.html)
|
||||||
}
|
}
|
||||||
|
|
||||||
* TODO(opensource): Add several other research projects
|
|
||||||
* TODO(opensource): Pointer Sets?
|
|
||||||
* TODO(opensource): Others
|
|
||||||
|
@ -198,7 +198,8 @@ loss and all these weight decay terms, as returned by the `loss()` function.
|
|||||||
|
|
||||||
We visualize it in TensorBoard with a [scalar_summary](../../api_docs/python/train.md?#scalar_summary):
|
We visualize it in TensorBoard with a [scalar_summary](../../api_docs/python/train.md?#scalar_summary):
|
||||||
|
|
||||||
[](#TODO(migmigmig)#TODO(danmane))
|

|
||||||
|
###### [View this TensorBoard live! (Chrome/FF)](/tensorboard/cifar.html)
|
||||||
|
|
||||||
We train the model using standard
|
We train the model using standard
|
||||||
[gradient descent](https://en.wikipedia.org/wiki/Gradient_descent)
|
[gradient descent](https://en.wikipedia.org/wiki/Gradient_descent)
|
||||||
@ -207,7 +208,8 @@ with a learning rate that
|
|||||||
[exponentially decays](../../api_docs/python/train.md#exponential_decay)
|
[exponentially decays](../../api_docs/python/train.md#exponential_decay)
|
||||||
over time.
|
over time.
|
||||||
|
|
||||||
[](#TODO(migmigmig)#TODO(danmane))
|

|
||||||
|
###### [View this TensorBoard live! (Chrome/FF)](/tensorboard/cifar.html)
|
||||||
|
|
||||||
The `train()` function adds the operations needed to minimize the objective by
|
The `train()` function adds the operations needed to minimize the objective by
|
||||||
calculating the gradient and updating the learned variables (see
|
calculating the gradient and updating the learned variables (see
|
||||||
|
@ -90,15 +90,6 @@ stuff.
|
|||||||
[View Tutorial](mnist/download/index.md)
|
[View Tutorial](mnist/download/index.md)
|
||||||
|
|
||||||
|
|
||||||
## Sparse Linear Regression
|
|
||||||
|
|
||||||
In many practical machine learning settings we have a large number input
|
|
||||||
features, only very few of which are active for any given example. TensorFlow
|
|
||||||
has great tools for learning predictive models in these settings.
|
|
||||||
|
|
||||||
COMING SOON
|
|
||||||
|
|
||||||
|
|
||||||
## Visual Object Recognition
|
## Visual Object Recognition
|
||||||
|
|
||||||
We will be releasing our state-of-the-art Inception object recognition model,
|
We will be releasing our state-of-the-art Inception object recognition model,
|
||||||
@ -116,14 +107,6 @@ visual hallucination software.
|
|||||||
COMING SOON
|
COMING SOON
|
||||||
|
|
||||||
|
|
||||||
## Automated Image Captioning
|
|
||||||
|
|
||||||
TODO(vinyals): Write me, three lines max.
|
|
||||||
|
|
||||||
COMING SOON
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<div class='sections-order' style="display: none;">
|
<div class='sections-order' style="display: none;">
|
||||||
<!--
|
<!--
|
||||||
<!-- mnist/beginners/index.md -->
|
<!-- mnist/beginners/index.md -->
|
||||||
|
@ -212,19 +212,12 @@ from Python, TensorFlow lets us describe a graph of interacting operations that
|
|||||||
run entirely outside Python. (Approaches like this can be seen in a few
|
run entirely outside Python. (Approaches like this can be seen in a few
|
||||||
machine learning libraries.)
|
machine learning libraries.)
|
||||||
|
|
||||||
To run computations, TensorFlow needs to connect to its backend. This connection
|
To use TensorFlow, we need to import it.
|
||||||
is called a `Session`. To use TensorFlow, we need to import it and create a
|
|
||||||
session.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
sess = tf.InteractiveSession()
|
|
||||||
```
|
```
|
||||||
|
|
||||||
(Using an `InteractiveSession` makes TensorFlow a bit more flexible about how
|
|
||||||
you structure your code. In particular, it's helpful for work in interactive
|
|
||||||
contexts like iPython.)
|
|
||||||
|
|
||||||
We describe these interacting operations by manipulating symbolic variables.
|
We describe these interacting operations by manipulating symbolic variables.
|
||||||
Let's create one:
|
Let's create one:
|
||||||
|
|
||||||
@ -350,11 +343,19 @@ implement backpropagation and gradient descent. Then it gives you back a
|
|||||||
single operation which, when run, will do a step of gradient descent training,
|
single operation which, when run, will do a step of gradient descent training,
|
||||||
slightly tweaking your variables to reduce the cost.
|
slightly tweaking your variables to reduce the cost.
|
||||||
|
|
||||||
Now we have our model set up to train. But before we start, we need to
|
Now we have our model set up to train. One last thing before we launch it,
|
||||||
initialize the variables we created:
|
we have to add an operation to initialize the variables we created:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
tf.initialize_all_variables().run()
|
init = tf.initialize_all_variables()
|
||||||
|
```
|
||||||
|
|
||||||
|
We can now launch the model in a `Session`, and run the operation that
|
||||||
|
initializes the variables:
|
||||||
|
|
||||||
|
```python
|
||||||
|
sess = tf.Session()
|
||||||
|
sess.run(init)
|
||||||
```
|
```
|
||||||
|
|
||||||
Let's train -- we'll run the training step 1000 times!
|
Let's train -- we'll run the training step 1000 times!
|
||||||
@ -362,7 +363,7 @@ Let's train -- we'll run the training step 1000 times!
|
|||||||
```python
|
```python
|
||||||
for i in range(1000):
|
for i in range(1000):
|
||||||
batch_xs, batch_ys = mnist.train.next_batch(100)
|
batch_xs, batch_ys = mnist.train.next_batch(100)
|
||||||
train_step.run({x: batch_xs, y_: batch_ys})
|
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
|
||||||
```
|
```
|
||||||
|
|
||||||
Each step of the loop, we get a "batch" of one hundred random data points from
|
Each step of the loop, we get a "batch" of one hundred random data points from
|
||||||
@ -403,7 +404,7 @@ accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
|
|||||||
Finally, we ask for our accuracy on our test data.
|
Finally, we ask for our accuracy on our test data.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
print accuracy.eval({x: mnist.test.images, y_: mnist.test.labels})
|
print sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
|
||||||
```
|
```
|
||||||
|
|
||||||
This should be about 91%.
|
This should be about 91%.
|
||||||
|
@ -32,18 +32,14 @@ testing sets as NumPy arrays.
|
|||||||
It also provides a function for iterating through data minibatches, which we
|
It also provides a function for iterating through data minibatches, which we
|
||||||
will use below.
|
will use below.
|
||||||
|
|
||||||
### Start TensorFlow Session
|
### Start TensorFlow InteractiveSession
|
||||||
|
|
||||||
Tensorflow relies on a highly efficient C++ backend to do its computation. The
|
Tensorflow relies on a highly efficient C++ backend to do its computation. The
|
||||||
connection to this backend is called a session. We will need to create a session
|
connection to this backend is called a session. The common usage for TensorFlow
|
||||||
before we can do any computation.
|
programs is to first create a graph and then launch it in a session.
|
||||||
|
|
||||||
```python
|
Here we instead use the convenience `InteractiveSession` class, which
|
||||||
import tensorflow as tf
|
makes TensorFlow more flexible about how you
|
||||||
sess = tf.InteractiveSession()
|
|
||||||
```
|
|
||||||
|
|
||||||
Using an `InteractiveSession` makes TensorFlow more flexible about how you
|
|
||||||
structure your code.
|
structure your code.
|
||||||
It allows you to interleave operations which build a
|
It allows you to interleave operations which build a
|
||||||
[computation graph](../../../get_started/basic_usage.md#the-computation-graph)
|
[computation graph](../../../get_started/basic_usage.md#the-computation-graph)
|
||||||
@ -54,6 +50,11 @@ If you are not using an `InteractiveSession`, then you should build
|
|||||||
the entire computation graph before starting a session and [launching the
|
the entire computation graph before starting a session and [launching the
|
||||||
graph](../../../get_started/basic_usage.md#launching-the-graph-in-a-session).
|
graph](../../../get_started/basic_usage.md#launching-the-graph-in-a-session).
|
||||||
|
|
||||||
|
```python
|
||||||
|
import tensorflow as tf
|
||||||
|
sess = tf.InteractiveSession()
|
||||||
|
```
|
||||||
|
|
||||||
#### Computation Graph
|
#### Computation Graph
|
||||||
|
|
||||||
To do efficient numerical computing in Python, we typically use libraries like
|
To do efficient numerical computing in Python, we typically use libraries like
|
||||||
|
@ -373,14 +373,14 @@ less time). For example, the naive code we used in this tutorial would suffer
|
|||||||
compromised speed because we use Python for reading and feeding data items --
|
compromised speed because we use Python for reading and feeding data items --
|
||||||
each of which require very little work on the TensorFlow back-end. If you find
|
each of which require very little work on the TensorFlow back-end. If you find
|
||||||
your model is seriously bottlenecked on input data, you may want to implement a
|
your model is seriously bottlenecked on input data, you may want to implement a
|
||||||
custom data reader for your problem, as described in [New Data
|
custom data reader for your problem, as described in
|
||||||
Formats](../how_tos/new_data_formats/index.md). For the case of Skip-Gram
|
[New Data Formats](../../how_tos/new_data_formats/index.md). For the case of Skip-Gram
|
||||||
modeling, we've actually already done this for you as an example in
|
modeling, we've actually already done this for you as an example in
|
||||||
[tensorflow/models/embedding/word2vec.py](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/embedding/word2vec.py).
|
[tensorflow/models/embedding/word2vec.py](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/embedding/word2vec.py).
|
||||||
|
|
||||||
If your model is no longer I/O bound but you want still more performance, you
|
If your model is no longer I/O bound but you want still more performance, you
|
||||||
can take things further by writing your own TensorFlow Ops, as described in
|
can take things further by writing your own TensorFlow Ops, as described in
|
||||||
[Adding a New Op](../how_tos/adding_an_op/index.md). Again we've provided an
|
[Adding a New Op](../../how_tos/adding_an_op/index.md). Again we've provided an
|
||||||
example of this for the Skip-Gram case
|
example of this for the Skip-Gram case
|
||||||
[tensorflow/models/embedding/word2vec_optimized.py](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/embedding/word2vec_optimized.py).
|
[tensorflow/models/embedding/word2vec_optimized.py](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/embedding/word2vec_optimized.py).
|
||||||
Feel free to benchmark these against each other to measure performance
|
Feel free to benchmark these against each other to measure performance
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
import tensorflow.python.platform
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
import math
|
import math
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -216,4 +218,3 @@ try:
|
|||||||
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print "Please install sklearn and matplotlib to visualize embeddings."
|
print "Please install sklearn and matplotlib to visualize embeddings."
|
||||||
|
|
||||||
|
@ -31,6 +31,26 @@ py_binary(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
py_test(
|
||||||
|
name = "word2vec_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = ["word2vec_test.py"],
|
||||||
|
deps = [
|
||||||
|
":word2vec",
|
||||||
|
"//tensorflow:tensorflow_py",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
py_test(
|
||||||
|
name = "word2vec_optimized_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = ["word2vec_optimized_test.py"],
|
||||||
|
deps = [
|
||||||
|
":word2vec_optimized",
|
||||||
|
"//tensorflow:tensorflow_py",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "word2vec_ops",
|
name = "word2vec_ops",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
@ -402,7 +402,7 @@ class Word2Vec(object):
|
|||||||
if now - last_checkpoint_time > opts.checkpoint_interval:
|
if now - last_checkpoint_time > opts.checkpoint_interval:
|
||||||
self.saver.save(self._session,
|
self.saver.save(self._session,
|
||||||
opts.save_path + "model",
|
opts.save_path + "model",
|
||||||
global_step=step)
|
global_step=step.astype(int))
|
||||||
last_checkpoint_time = now
|
last_checkpoint_time = now
|
||||||
if epoch != initial_epoch:
|
if epoch != initial_epoch:
|
||||||
break
|
break
|
||||||
@ -482,6 +482,9 @@ def _start_shell(local_ns=None):
|
|||||||
|
|
||||||
def main(_):
|
def main(_):
|
||||||
"""Train a word2vec model."""
|
"""Train a word2vec model."""
|
||||||
|
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
|
||||||
|
print "--train_data --eval_data and --save_path must be specified."
|
||||||
|
sys.exit(1)
|
||||||
opts = Options()
|
opts = Options()
|
||||||
with tf.Graph().as_default(), tf.Session() as session:
|
with tf.Graph().as_default(), tf.Session() as session:
|
||||||
model = Word2Vec(opts, session)
|
model = Word2Vec(opts, session)
|
||||||
|
@ -386,6 +386,9 @@ def _start_shell(local_ns=None):
|
|||||||
|
|
||||||
def main(_):
|
def main(_):
|
||||||
"""Train a word2vec model."""
|
"""Train a word2vec model."""
|
||||||
|
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
|
||||||
|
print "--train_data --eval_data and --save_path must be specified."
|
||||||
|
sys.exit(1)
|
||||||
opts = Options()
|
opts = Options()
|
||||||
with tf.Graph().as_default(), tf.Session() as session:
|
with tf.Graph().as_default(), tf.Session() as session:
|
||||||
model = Word2Vec(opts, session)
|
model = Word2Vec(opts, session)
|
||||||
|
44
tensorflow/models/embedding/word2vec_optimized_test.py
Normal file
44
tensorflow/models/embedding/word2vec_optimized_test.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
"""Tests for word2vec_optimized module."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import tensorflow.python.platform
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from tensorflow.models.embedding import word2vec_optimized as word2vec_optimized
|
||||||
|
|
||||||
|
flags = tf.app.flags
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class Word2VecTest(tf.test.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
FLAGS.train_data = os.path.join(self.get_temp_dir() + "test-text.txt")
|
||||||
|
FLAGS.eval_data = os.path.join(self.get_temp_dir() + "eval-text.txt")
|
||||||
|
FLAGS.save_path = self.get_temp_dir()
|
||||||
|
with open(FLAGS.train_data, "w") as f:
|
||||||
|
f.write(
|
||||||
|
"""alice was beginning to get very tired of sitting by her sister on
|
||||||
|
the bank, and of having nothing to do: once or twice she had peeped
|
||||||
|
into the book her sister was reading, but it had no pictures or
|
||||||
|
conversations in it, 'and what is the use of a book,' thought alice
|
||||||
|
'without pictures or conversations?' So she was considering in her own
|
||||||
|
mind (as well as she could, for the hot day made her feel very sleepy
|
||||||
|
and stupid), whether the pleasure of making a daisy-chain would be
|
||||||
|
worth the trouble of getting up and picking the daisies, when suddenly
|
||||||
|
a White rabbit with pink eyes ran close by her.\n""")
|
||||||
|
with open(FLAGS.eval_data, "w") as f:
|
||||||
|
f.write("alice she rabbit once\n")
|
||||||
|
|
||||||
|
def testWord2VecOptimized(self):
|
||||||
|
FLAGS.batch_size = 5
|
||||||
|
FLAGS.num_neg_samples = 10
|
||||||
|
FLAGS.epochs_to_train = 1
|
||||||
|
FLAGS.min_count = 0
|
||||||
|
word2vec_optimized.main([])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
tf.test.main()
|
44
tensorflow/models/embedding/word2vec_test.py
Normal file
44
tensorflow/models/embedding/word2vec_test.py
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
"""Tests for word2vec module."""
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import tensorflow.python.platform
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from tensorflow.models.embedding import word2vec as word2vec
|
||||||
|
|
||||||
|
flags = tf.app.flags
|
||||||
|
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
class Word2VecTest(tf.test.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
FLAGS.train_data = os.path.join(self.get_temp_dir() + "test-text.txt")
|
||||||
|
FLAGS.eval_data = os.path.join(self.get_temp_dir() + "eval-text.txt")
|
||||||
|
FLAGS.save_path = self.get_temp_dir()
|
||||||
|
with open(FLAGS.train_data, "w") as f:
|
||||||
|
f.write(
|
||||||
|
"""alice was beginning to get very tired of sitting by her sister on
|
||||||
|
the bank, and of having nothing to do: once or twice she had peeped
|
||||||
|
into the book her sister was reading, but it had no pictures or
|
||||||
|
conversations in it, 'and what is the use of a book,' thought alice
|
||||||
|
'without pictures or conversations?' So she was considering in her own
|
||||||
|
mind (as well as she could, for the hot day made her feel very sleepy
|
||||||
|
and stupid), whether the pleasure of making a daisy-chain would be
|
||||||
|
worth the trouble of getting up and picking the daisies, when suddenly
|
||||||
|
a White rabbit with pink eyes ran close by her.\n""")
|
||||||
|
with open(FLAGS.eval_data, "w") as f:
|
||||||
|
f.write("alice she rabbit once\n")
|
||||||
|
|
||||||
|
def testWord2Vec(self):
|
||||||
|
FLAGS.batch_size = 5
|
||||||
|
FLAGS.num_neg_samples = 10
|
||||||
|
FLAGS.epochs_to_train = 1
|
||||||
|
FLAGS.min_count = 0
|
||||||
|
word2vec.main([])
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
tf.test.main()
|
@ -90,7 +90,7 @@ class Device(object):
|
|||||||
for y in splits:
|
for y in splits:
|
||||||
ly = len(y)
|
ly = len(y)
|
||||||
if y:
|
if y:
|
||||||
# NOTE(mdevin): we use the property getters here.
|
# NOTE(touts): we use the property getters here.
|
||||||
if ly == 2 and y[0] == "job":
|
if ly == 2 and y[0] == "job":
|
||||||
self.job = y[1]
|
self.job = y[1]
|
||||||
elif ly == 2 and y[0] == "replica":
|
elif ly == 2 and y[0] == "replica":
|
||||||
|
@ -60,18 +60,18 @@ class Index(Document):
|
|||||||
print >>f, ""
|
print >>f, ""
|
||||||
print >>f, "# TensorFlow Python reference documentation"
|
print >>f, "# TensorFlow Python reference documentation"
|
||||||
print >>f, ""
|
print >>f, ""
|
||||||
|
fullname_f = lambda name: self._members[name][0]
|
||||||
|
anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name))
|
||||||
|
|
||||||
for filename, library in self._filename_to_library_map:
|
for filename, library in self._filename_to_library_map:
|
||||||
per_symbol_links = []
|
sorted_names = sorted(library.mentioned, key=str.lower)
|
||||||
for name in sorted(library.mentioned):
|
member_names = [n for n in sorted_names if n in self._members]
|
||||||
if name in self._members:
|
links = ["[`%s`](%s#%s)" % (name, filename, anchor_f(name))
|
||||||
fullname, member = self._members[name]
|
for name in member_names]
|
||||||
anchor = _get_anchor(self._module_to_name, fullname)
|
if links:
|
||||||
prefix = "class " * inspect.isclass(member)
|
print >>f, "* **[%s](%s)**:" % (library.title, filename)
|
||||||
per_symbol_links.append("[%s%s](%s#%s)" %
|
for link in links:
|
||||||
(prefix, name, filename, anchor))
|
print >>f, " * %s" % link
|
||||||
if per_symbol_links:
|
|
||||||
print >>f, "* <b>[%s](%s)</b>: %s" % (library.title, filename,
|
|
||||||
",\n ".join(per_symbol_links))
|
|
||||||
print >>f, ""
|
print >>f, ""
|
||||||
|
|
||||||
# actually include the files right here
|
# actually include the files right here
|
||||||
@ -146,7 +146,7 @@ class Library(Document):
|
|||||||
members,
|
members,
|
||||||
documented,
|
documented,
|
||||||
exclude_symbols=(),
|
exclude_symbols=(),
|
||||||
catch_all=False):
|
prefix=None):
|
||||||
"""Creates a new Library.
|
"""Creates a new Library.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -157,6 +157,7 @@ class Library(Document):
|
|||||||
members: Dictionary mapping member name to (fullname, member).
|
members: Dictionary mapping member name to (fullname, member).
|
||||||
documented: Set of documented names to update.
|
documented: Set of documented names to update.
|
||||||
exclude_symbols: A list of specific symbols to exclude.
|
exclude_symbols: A list of specific symbols to exclude.
|
||||||
|
prefix: A string to include at the beginning of the page.
|
||||||
"""
|
"""
|
||||||
self._title = title
|
self._title = title
|
||||||
self._module = module
|
self._module = module
|
||||||
@ -166,6 +167,7 @@ class Library(Document):
|
|||||||
documented.update(exclude_symbols)
|
documented.update(exclude_symbols)
|
||||||
self._documented = documented
|
self._documented = documented
|
||||||
self._mentioned = set()
|
self._mentioned = set()
|
||||||
|
self._prefix = prefix or ""
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def title(self):
|
def title(self):
|
||||||
@ -400,7 +402,7 @@ class Library(Document):
|
|||||||
# defined by the class itself (not inherited). If NO methods were
|
# defined by the class itself (not inherited). If NO methods were
|
||||||
# described, describe all methods.
|
# described, describe all methods.
|
||||||
#
|
#
|
||||||
# TODO(mdevin): when all methods have been categorized make it an error
|
# TODO(touts): when all methods have been categorized make it an error
|
||||||
# if some methods are not categorized.
|
# if some methods are not categorized.
|
||||||
any_method_called_out = (len(methods) != num_methods)
|
any_method_called_out = (len(methods) != num_methods)
|
||||||
if any_method_called_out:
|
if any_method_called_out:
|
||||||
@ -429,9 +431,11 @@ class Library(Document):
|
|||||||
"""
|
"""
|
||||||
print >>f, "<!-- This file is machine generated: DO NOT EDIT! -->"
|
print >>f, "<!-- This file is machine generated: DO NOT EDIT! -->"
|
||||||
print >>f, ""
|
print >>f, ""
|
||||||
# TODO(mdevin): Do not insert these. Let the doc writer put them in
|
# TODO(touts): Do not insert these. Let the doc writer put them in
|
||||||
# the module docstring explicitly.
|
# the module docstring explicitly.
|
||||||
print >>f, "#", self._title
|
print >>f, "#", self._title
|
||||||
|
if self._prefix:
|
||||||
|
print >>f, self._prefix
|
||||||
print >>f, "[TOC]"
|
print >>f, "[TOC]"
|
||||||
print >>f, ""
|
print >>f, ""
|
||||||
if self._module is not None:
|
if self._module is not None:
|
||||||
|
@ -43,7 +43,8 @@ class OpError(Exception):
|
|||||||
or `Recv` op, there will be no corresponding
|
or `Recv` op, there will be no corresponding
|
||||||
[`Operation`](framework.md#Operation) object. In that case, this
|
[`Operation`](framework.md#Operation) object. In that case, this
|
||||||
will return `None`, and you should instead use the
|
will return `None`, and you should instead use the
|
||||||
[`node_def`](OpError.node_def) to discover information about the op.
|
[`OpError.node_def`](#OpError.node_def) to discover information about the
|
||||||
|
op.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The `Operation` that failed, or None.
|
The `Operation` that failed, or None.
|
||||||
@ -293,7 +294,7 @@ class AbortedError(OpError):
|
|||||||
|
|
||||||
For example, running a [`queue.enqueue()`](io_ops.md#QueueBase.enqueue)
|
For example, running a [`queue.enqueue()`](io_ops.md#QueueBase.enqueue)
|
||||||
operation may raise `AbortedError` if a
|
operation may raise `AbortedError` if a
|
||||||
[`queue.close()`](io_ops.md@QueueBase.close) operation previously ran.
|
[`queue.close()`](io_ops.md#QueueBase.close) operation previously ran.
|
||||||
|
|
||||||
@@__init__
|
@@__init__
|
||||||
"""
|
"""
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
|
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
|
||||||
"""Import names from the framework library.
|
"""Classes and functions for building TensorFlow graphs.
|
||||||
|
|
||||||
## Core graph data structures
|
## Core graph data structures
|
||||||
|
|
||||||
|
@ -18,6 +18,12 @@ tf.flags.DEFINE_boolean("print_hidden_regex", False,
|
|||||||
FLAGS = tf.flags.FLAGS
|
FLAGS = tf.flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
PREFIX_TEXT = """
|
||||||
|
Note: Functions taking `Tensor` arguments can also take anything
|
||||||
|
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
def get_module_to_name():
|
def get_module_to_name():
|
||||||
return {tf: 'tf',
|
return {tf: 'tf',
|
||||||
tf.errors: 'tf.errors',
|
tf.errors: 'tf.errors',
|
||||||
@ -42,20 +48,24 @@ def all_libraries(module_to_name, members, documented):
|
|||||||
return [
|
return [
|
||||||
# Splits of module 'tf'.
|
# Splits of module 'tf'.
|
||||||
library("framework", "Building Graphs", framework_lib),
|
library("framework", "Building Graphs", framework_lib),
|
||||||
library("constant_op", "Constants, Sequences, and Random Values"),
|
library("constant_op", "Constants, Sequences, and Random Values",
|
||||||
library("state_ops", "Variables"),
|
prefix=PREFIX_TEXT),
|
||||||
|
library("state_ops", "Variables", prefix=PREFIX_TEXT),
|
||||||
library("array_ops", "Tensor Transformations",
|
library("array_ops", "Tensor Transformations",
|
||||||
exclude_symbols=["list_diff"]),
|
exclude_symbols=["list_diff"], prefix=PREFIX_TEXT),
|
||||||
library("math_ops", "Math",
|
library("math_ops", "Math",
|
||||||
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
|
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
|
||||||
"lin_space", "sparse_segment_mean_grad"]),
|
"lin_space", "sparse_segment_mean_grad"],
|
||||||
library("control_flow_ops", "Control Flow"),
|
prefix=PREFIX_TEXT),
|
||||||
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"]),
|
library("control_flow_ops", "Control Flow", prefix=PREFIX_TEXT),
|
||||||
library("sparse_ops", "Sparse Tensors"),
|
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"],
|
||||||
|
prefix=PREFIX_TEXT),
|
||||||
|
library("sparse_ops", "Sparse Tensors", prefix=PREFIX_TEXT),
|
||||||
library("io_ops", "Inputs and Readers",
|
library("io_ops", "Inputs and Readers",
|
||||||
exclude_symbols=["LookupTableBase", "HashTable",
|
exclude_symbols=["LookupTableBase", "HashTable",
|
||||||
"initialize_all_tables",
|
"initialize_all_tables",
|
||||||
"string_to_hash_bucket"]),
|
"string_to_hash_bucket"],
|
||||||
|
prefix=PREFIX_TEXT),
|
||||||
library("python_io", "Data IO (Python functions)", tf.python_io),
|
library("python_io", "Data IO (Python functions)", tf.python_io),
|
||||||
library("nn", "Neural Network", tf.nn,
|
library("nn", "Neural Network", tf.nn,
|
||||||
exclude_symbols=["deconv2d", "conv2d_backprop_input",
|
exclude_symbols=["deconv2d", "conv2d_backprop_input",
|
||||||
@ -66,7 +76,8 @@ def all_libraries(module_to_name, members, documented):
|
|||||||
"xw_plus_b", "relu_layer", "lrn",
|
"xw_plus_b", "relu_layer", "lrn",
|
||||||
"batch_norm_with_global_normalization",
|
"batch_norm_with_global_normalization",
|
||||||
"batch_norm_with_global_normalization_grad",
|
"batch_norm_with_global_normalization_grad",
|
||||||
"all_candidate_sampler"]),
|
"all_candidate_sampler"],
|
||||||
|
prefix=PREFIX_TEXT),
|
||||||
library('client', "Running Graphs", client_lib,
|
library('client', "Running Graphs", client_lib,
|
||||||
exclude_symbols=["InteractiveSession"]),
|
exclude_symbols=["InteractiveSession"]),
|
||||||
library("train", "Training", tf.train,
|
library("train", "Training", tf.train,
|
||||||
|
@ -1604,6 +1604,10 @@ class Graph(object):
|
|||||||
def as_graph_def(self, from_version=None):
|
def as_graph_def(self, from_version=None):
|
||||||
"""Returns a serialized `GraphDef` representation of this graph.
|
"""Returns a serialized `GraphDef` representation of this graph.
|
||||||
|
|
||||||
|
The serialized `GraphDef` can be imported into another `Graph`
|
||||||
|
(using [`import_graph_def()`](#import_graph_def)) or used with the
|
||||||
|
[C++ Session API](../cc/index.md).
|
||||||
|
|
||||||
This method is thread-safe.
|
This method is thread-safe.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -1612,8 +1616,7 @@ class Graph(object):
|
|||||||
its `version` property had the given value.
|
its `version` property had the given value.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A
|
A [`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
|
||||||
[`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
|
|
||||||
protocol buffer.
|
protocol buffer.
|
||||||
"""
|
"""
|
||||||
graph = graph_pb2.GraphDef()
|
graph = graph_pb2.GraphDef()
|
||||||
@ -2116,7 +2119,7 @@ class Graph(object):
|
|||||||
self._names_in_use[name] = 1
|
self._names_in_use[name] = 1
|
||||||
return name
|
return name
|
||||||
|
|
||||||
# TODO(mdevin): remove
|
# TODO(touts): remove
|
||||||
def _plain_name(self, name):
|
def _plain_name(self, name):
|
||||||
"""Return the fully scoped 'name'.
|
"""Return the fully scoped 'name'.
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ if _FAST_TENSOR_UTIL_AVAILABLE:
|
|||||||
fast_tensor_util.AppendUInt8ArrayToTensorProto,
|
fast_tensor_util.AppendUInt8ArrayToTensorProto,
|
||||||
types.qint32.as_numpy_dtype:
|
types.qint32.as_numpy_dtype:
|
||||||
fast_tensor_util.AppendInt32ArrayToTensorProto,
|
fast_tensor_util.AppendInt32ArrayToTensorProto,
|
||||||
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
|
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
|
|
||||||
@ -81,7 +81,7 @@ else:
|
|||||||
types.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
|
types.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
|
||||||
types.quint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
|
types.quint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
|
||||||
types.qint32.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
|
types.qint32.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
|
||||||
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
|
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -472,7 +472,7 @@ def ConstantValue(tensor):
|
|||||||
Raises:
|
Raises:
|
||||||
TypeError: if tensor is not an ops.Tensor.
|
TypeError: if tensor is not an ops.Tensor.
|
||||||
"""
|
"""
|
||||||
# TODO(mdevin): Support Variables?
|
# TODO(touts): Support Variables?
|
||||||
if not isinstance(tensor, ops.Tensor):
|
if not isinstance(tensor, ops.Tensor):
|
||||||
raise TypeError("tensor is not a Tensor")
|
raise TypeError("tensor is not a Tensor")
|
||||||
if tensor.op.type == "Const":
|
if tensor.op.type == "Const":
|
||||||
|
@ -332,7 +332,7 @@ _NP_TO_TF = frozenset([
|
|||||||
(_np_qint8, qint8),
|
(_np_qint8, qint8),
|
||||||
(_np_quint8, quint8),
|
(_np_quint8, quint8),
|
||||||
(_np_qint32, qint32),
|
(_np_qint32, qint32),
|
||||||
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
|
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
|
||||||
])
|
])
|
||||||
_TF_TO_NP = {
|
_TF_TO_NP = {
|
||||||
types_pb2.DT_FLOAT: np.float32,
|
types_pb2.DT_FLOAT: np.float32,
|
||||||
@ -341,7 +341,7 @@ _TF_TO_NP = {
|
|||||||
types_pb2.DT_UINT8: np.uint8,
|
types_pb2.DT_UINT8: np.uint8,
|
||||||
types_pb2.DT_INT16: np.int16,
|
types_pb2.DT_INT16: np.int16,
|
||||||
types_pb2.DT_INT8: np.int8,
|
types_pb2.DT_INT8: np.int8,
|
||||||
# NOTE(mdevin): For strings we use np.object as it supports variable length
|
# NOTE(touts): For strings we use np.object as it supports variable length
|
||||||
# strings.
|
# strings.
|
||||||
types_pb2.DT_STRING: np.object,
|
types_pb2.DT_STRING: np.object,
|
||||||
types_pb2.DT_COMPLEX64: np.complex64,
|
types_pb2.DT_COMPLEX64: np.complex64,
|
||||||
|
@ -34,7 +34,7 @@ class TypesTest(test_util.TensorFlowTestCase):
|
|||||||
numpy_dtype = dtype.as_numpy_dtype
|
numpy_dtype = dtype.as_numpy_dtype
|
||||||
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
|
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
|
||||||
if dtype.base_dtype != types.bfloat16:
|
if dtype.base_dtype != types.bfloat16:
|
||||||
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
|
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
types.as_dtype(datatype_enum).base_dtype, types.as_dtype(numpy_dtype))
|
types.as_dtype(datatype_enum).base_dtype, types.as_dtype(numpy_dtype))
|
||||||
|
|
||||||
|
@ -98,7 +98,7 @@ class PadOpTest(tf.test.TestCase):
|
|||||||
tf.constant([-1, 0], shape=[1, 2]))
|
tf.constant([-1, 0], shape=[1, 2]))
|
||||||
|
|
||||||
def testIntTypes(self):
|
def testIntTypes(self):
|
||||||
# TODO(mdevin): Figure out why the padding tests do not work on GPU
|
# TODO(touts): Figure out why the padding tests do not work on GPU
|
||||||
# for int types and rank > 2.
|
# for int types and rank > 2.
|
||||||
for t in [np.int32, np.int64]:
|
for t in [np.int32, np.int64]:
|
||||||
self._testPad((np.random.rand(4, 3, 3) * 100).astype(t),
|
self._testPad((np.random.rand(4, 3, 3) * 100).astype(t),
|
||||||
|
@ -33,7 +33,7 @@ class VariableOpTest(tf.test.TestCase):
|
|||||||
x = vals.astype(dtype)
|
x = vals.astype(dtype)
|
||||||
tftype = _NP_TO_TF[dtype]
|
tftype = _NP_TO_TF[dtype]
|
||||||
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
|
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
|
||||||
# NOTE(mdevin): the GPU test should pass for all types, whether the
|
# NOTE(touts): the GPU test should pass for all types, whether the
|
||||||
# Variable op has an implementation for that type on GPU as we expect
|
# Variable op has an implementation for that type on GPU as we expect
|
||||||
# that Variable and Assign have GPU implementations for matching tf.
|
# that Variable and Assign have GPU implementations for matching tf.
|
||||||
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
|
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))
|
||||||
|
@ -1192,7 +1192,7 @@ def _GroupControlDeps(dev, deps, name=None):
|
|||||||
return no_op(name=name)
|
return no_op(name=name)
|
||||||
|
|
||||||
|
|
||||||
# TODO(mdevin): Accept "inputs" as a list.
|
# TODO(touts): Accept "inputs" as a list.
|
||||||
def group(*inputs, **kwargs):
|
def group(*inputs, **kwargs):
|
||||||
"""Create an op that groups multiple operations.
|
"""Create an op that groups multiple operations.
|
||||||
|
|
||||||
@ -1217,7 +1217,7 @@ def group(*inputs, **kwargs):
|
|||||||
if kwargs:
|
if kwargs:
|
||||||
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
|
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
|
||||||
if not inputs:
|
if not inputs:
|
||||||
# TODO(mdevin): Would make sense to return a NoOp.
|
# TODO(touts): Would make sense to return a NoOp.
|
||||||
raise ValueError("No inputs provided")
|
raise ValueError("No inputs provided")
|
||||||
with ops.op_scope(inputs, name, "group_deps") as name:
|
with ops.op_scope(inputs, name, "group_deps") as name:
|
||||||
# Sorts *inputs according to their devices.
|
# Sorts *inputs according to their devices.
|
||||||
|
@ -37,7 +37,7 @@ def _OpsBetween(graph, to_ops, from_ops):
|
|||||||
The list of operations between "from_ops" and "to_ops", sorted by
|
The list of operations between "from_ops" and "to_ops", sorted by
|
||||||
decreasing operation id. This list contains all elements of to_ops.
|
decreasing operation id. This list contains all elements of to_ops.
|
||||||
|
|
||||||
TODO(mdevin): Think about returning an empty list if from_ops are not
|
TODO(touts): Think about returning an empty list if from_ops are not
|
||||||
reachable from to_ops. Presently it returns to_ops in that case.
|
reachable from to_ops. Presently it returns to_ops in that case.
|
||||||
"""
|
"""
|
||||||
# List of booleans, indexed by operation id, indicating if
|
# List of booleans, indexed by operation id, indicating if
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
"""## Encoding and Decoding.
|
# pylint: disable=g-short-docstring-punctuation
|
||||||
|
"""## Encoding and Decoding
|
||||||
|
|
||||||
TensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded
|
TensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded
|
||||||
images are represented by scalar string Tensors, decoded images by 3-D uint8
|
images are represented by scalar string Tensors, decoded images by 3-D uint8
|
||||||
@ -17,7 +18,7 @@ presently only support RGB, HSV, and GrayScale.
|
|||||||
@@decode_png
|
@@decode_png
|
||||||
@@encode_png
|
@@encode_png
|
||||||
|
|
||||||
## Resizing.
|
## Resizing
|
||||||
|
|
||||||
The resizing Ops accept input images as tensors of several types. They always
|
The resizing Ops accept input images as tensors of several types. They always
|
||||||
output resized images as float32 tensors.
|
output resized images as float32 tensors.
|
||||||
@ -51,7 +52,7 @@ images from the Queue.</i>
|
|||||||
@@resize_nearest_neighbor
|
@@resize_nearest_neighbor
|
||||||
|
|
||||||
|
|
||||||
## Cropping.
|
## Cropping
|
||||||
|
|
||||||
@@resize_image_with_crop_or_pad
|
@@resize_image_with_crop_or_pad
|
||||||
|
|
||||||
@ -60,7 +61,7 @@ images from the Queue.</i>
|
|||||||
@@random_crop
|
@@random_crop
|
||||||
@@extract_glimpse
|
@@extract_glimpse
|
||||||
|
|
||||||
## Flipping and Transposing.
|
## Flipping and Transposing
|
||||||
|
|
||||||
@@flip_up_down
|
@@flip_up_down
|
||||||
@@random_flip_up_down
|
@@random_flip_up_down
|
||||||
@ -70,7 +71,7 @@ images from the Queue.</i>
|
|||||||
|
|
||||||
@@transpose_image
|
@@transpose_image
|
||||||
|
|
||||||
## Image Adjustments.
|
## Image Adjustments
|
||||||
|
|
||||||
TensorFlow provides functions to adjust images in various ways: brightness,
|
TensorFlow provides functions to adjust images in various ways: brightness,
|
||||||
contrast, hue, and saturation. Each adjustment can be done with predefined
|
contrast, hue, and saturation. Each adjustment can be done with predefined
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
# pylint: disable=line-too-long
|
||||||
"""## Placeholders
|
"""## Placeholders
|
||||||
|
|
||||||
TensorFlow provides a placeholder operation that must be fed with data
|
TensorFlow provides a placeholder operation that must be fed with data
|
||||||
@ -26,6 +27,18 @@ formats into tensors.
|
|||||||
|
|
||||||
@@decode_csv
|
@@decode_csv
|
||||||
@@decode_raw
|
@@decode_raw
|
||||||
|
|
||||||
|
- - -
|
||||||
|
|
||||||
|
### Example protocol buffer
|
||||||
|
|
||||||
|
TensorFlow's [recommended format for training
|
||||||
|
examples](../../how_tos/reading_data/index.md#standard-tensorflow-format)
|
||||||
|
is serialized `Example` protocol buffers, [described
|
||||||
|
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto).
|
||||||
|
They contain `Features`, [described
|
||||||
|
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/feature.proto).
|
||||||
|
|
||||||
@@parse_example
|
@@parse_example
|
||||||
@@parse_single_example
|
@@parse_single_example
|
||||||
|
|
||||||
@ -49,7 +62,7 @@ Queues](../../how_tos/threading_and_queues/index.md).
|
|||||||
## Input pipeline
|
## Input pipeline
|
||||||
|
|
||||||
TensorFlow functions for setting up an input-prefetching pipeline.
|
TensorFlow functions for setting up an input-prefetching pipeline.
|
||||||
Please see the [reading data how-to](../../how_tos/reading_data.md)
|
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
|
||||||
for context.
|
for context.
|
||||||
|
|
||||||
### Beginning of an input pipeline
|
### Beginning of an input pipeline
|
||||||
|
@ -289,7 +289,7 @@ def cast(x, dtype, name=None):
|
|||||||
values_cast = cast(x.values, dtype, name=name)
|
values_cast = cast(x.values, dtype, name=name)
|
||||||
return ops.SparseTensor(x.indices, values_cast, x.shape)
|
return ops.SparseTensor(x.indices, values_cast, x.shape)
|
||||||
else:
|
else:
|
||||||
# TODO(mdevin): Handle what Josh said.
|
# TODO(touts): Handle what Josh said.
|
||||||
#
|
#
|
||||||
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
|
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
|
||||||
# allows some conversions that cast() can't do, e.g. casting numbers to
|
# allows some conversions that cast() can't do, e.g. casting numbers to
|
||||||
@ -801,7 +801,7 @@ def _as_indexed_slices(x):
|
|||||||
Raises:
|
Raises:
|
||||||
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
|
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
|
||||||
"""
|
"""
|
||||||
# TODO(mdevin): op_scope
|
# TODO(touts): op_scope
|
||||||
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
|
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
|
||||||
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
|
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
|
||||||
if isinstance(x, ops.IndexedSlices):
|
if isinstance(x, ops.IndexedSlices):
|
||||||
|
@ -143,7 +143,7 @@ only considering a small randomly-chosen subset of contrastive classes
|
|||||||
(called candidates) for each batch of training examples.
|
(called candidates) for each batch of training examples.
|
||||||
|
|
||||||
See our [Candidate Sampling Algorithms Reference]
|
See our [Candidate Sampling Algorithms Reference]
|
||||||
(http://www.tensorflow.org/extras/candidate_sampling.pdf)
|
(../../extras/candidate_sampling.pdf)
|
||||||
|
|
||||||
### Sampled Loss Functions
|
### Sampled Loss Functions
|
||||||
|
|
||||||
|
@ -28,56 +28,45 @@ def parse_example(serialized,
|
|||||||
dense_defaults=None,
|
dense_defaults=None,
|
||||||
dense_shapes=None,
|
dense_shapes=None,
|
||||||
name="ParseExample"):
|
name="ParseExample"):
|
||||||
"""Parse Example protos.
|
"""Parses `Example` protos.
|
||||||
|
|
||||||
Args:
|
Parses a number of serialized [`Example`]
|
||||||
serialized: string vector, a batch of binary serialized Example protos.
|
(https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto)
|
||||||
names: A string vector, the names of the serialized protos.
|
protos given in `serialized`.
|
||||||
"names" may contain, e.g., table key (descriptive) names for the
|
|
||||||
corresponding serialized protos. These are purely useful for debugging
|
|
||||||
purposes, and the presence of values here has no effect on the output.
|
|
||||||
"names" may be an empty vector, if no names are available.
|
|
||||||
If non-empty, this vector must be the same length as "serialized".
|
|
||||||
sparse_keys: A string list of keys in the Examples' features.
|
|
||||||
These keys are associated with sparse values.
|
|
||||||
sparse_types: A list of DTypes.
|
|
||||||
This list's length must match that of sparse_keys. Currently
|
|
||||||
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
|
|
||||||
and tf.string (BytesList).
|
|
||||||
dense_keys: A string list of keys in the Examples' features.
|
|
||||||
These keys are associated with dense values.
|
|
||||||
dense_types: A list of DTypes.
|
|
||||||
This list's length must match that of dense_keys. Currently
|
|
||||||
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
|
|
||||||
and tf.string (BytesList).
|
|
||||||
dense_defaults: A dict of {key:Tensor} (some may be missing).
|
|
||||||
The keys of the dict must match the dense_keys of the feature.
|
|
||||||
If a key is not present in this dictionary, the corresponding dense
|
|
||||||
Feature is required in all elements of serialized.
|
|
||||||
dense_shapes: A list of tuples.
|
|
||||||
Entries provide the shape of data in each dense Feature in features.
|
|
||||||
The length of dense_shapes must be the same as the length of dense_keys.
|
|
||||||
The number of elements in the Feature corresponding to dense_key[j]
|
|
||||||
must always have np.prod(dense_shapes[j]) entries.
|
|
||||||
If dense_shapes[j] == (D0, D1, ..., DN) then the the shape of output
|
|
||||||
Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
|
|
||||||
The dense outputs are just the inputs row-stacked by batch.
|
|
||||||
name: (Optional) Name of Op in the graph.
|
|
||||||
|
|
||||||
Returns:
|
`names` may contain descriptive names for the corresponding serialized protos.
|
||||||
A dictionary mapping keys to Tensors and SparseTensors.
|
These may be useful for debugging purposes, but they have no effect on the
|
||||||
|
output. If not `None`, `names` must be the same length as `serialized`.
|
||||||
|
|
||||||
The key dense_keys[j] is mapped to a tensor of type dense_types[j] and
|
This op parses serialized examples into a dictionary mapping keys to `Tensor`
|
||||||
of shape (serialized.size(),) + dense_shapes[j] (i.e., the dense outputs are
|
and `SparseTensor` objects respectively, depending on whether the keys appear
|
||||||
inputs, reshaped in row-major format and then row-stacked by batch).
|
in `sparse_keys` or `dense_keys`.
|
||||||
|
|
||||||
The key sparse_keys[j] is mapped to a SparseTensor of type sparse_types[j].
|
The key `dense_keys[j]` is mapped to a `Tensor` of type `dense_types[j]` and
|
||||||
The SparseTensor represents a ragged matrix. Its indices are [batch, index]
|
of shape `(serialized.size(),) + dense_shapes[j]`.
|
||||||
where "batch" is is the batch entry the value is from, and "index" is the
|
|
||||||
value's index in the list of values associated with that feature
|
|
||||||
and example. For example, if one expects a tf.float32 sparse feature "ft"
|
|
||||||
and three serialized examples are provided:
|
|
||||||
|
|
||||||
|
`dense_defaults` provides defaults for values referenced using `dense_keys`.
|
||||||
|
If a key is not present in this dictionary, the corresponding dense `Feature`
|
||||||
|
is required in all elements of `serialized`.
|
||||||
|
|
||||||
|
`dense_shapes[j]` provides the shape of each `Feature` entry referenced by
|
||||||
|
`dense_keys[j]`. The number of elements in the `Feature` corresponding to
|
||||||
|
`dense_key[j]` must always have `np.prod(dense_shapes[j])` entries. The
|
||||||
|
returned `Tensor` for `dense_key[j]` has shape `[N] + dense_shape[j]`, where
|
||||||
|
`N` is the number of `Example`s in `serialized`.
|
||||||
|
|
||||||
|
The key `sparse_keys[j]` is mapped to a `SparseTensor` of type
|
||||||
|
`sparse_types[j]`. The `SparseTensor` represents a ragged matrix.
|
||||||
|
Its indices are `[batch, index]` where `batch` is the batch entry the value
|
||||||
|
is from, and `index` is the value's index in the list of values associated
|
||||||
|
with that feature and example.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
For example, if one expects a `tf.float32` sparse feature `ft` and three
|
||||||
|
serialized `Example`s are provided:
|
||||||
|
|
||||||
|
```
|
||||||
serialized = [
|
serialized = [
|
||||||
features:
|
features:
|
||||||
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
|
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
|
||||||
@ -86,25 +75,21 @@ def parse_example(serialized,
|
|||||||
features:
|
features:
|
||||||
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
|
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
|
||||||
]
|
]
|
||||||
|
```
|
||||||
|
|
||||||
then the output will look like:
|
then the output will look like:
|
||||||
|
|
||||||
|
```
|
||||||
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
|
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
|
||||||
values=[1.0, 2.0, 3.0],
|
values=[1.0, 2.0, 3.0],
|
||||||
shape=(3, 2)) }
|
shape=(3, 2)) }
|
||||||
|
```
|
||||||
|
|
||||||
Raises:
|
Given two `Example` input protos in `serialized`:
|
||||||
ValueError: If sparse and dense keys intersect, or input lengths do not
|
|
||||||
match up for sparse_* (similarly for dense_*).
|
|
||||||
TypeError: If an input is malformed.
|
|
||||||
|
|
||||||
Example input, format, and output: Just Sparse Inputs
|
```
|
||||||
================================================
|
[
|
||||||
|
features: {
|
||||||
Given two brain.Example input protos:
|
|
||||||
|
|
||||||
serialized: // serialized versions of the protos below
|
|
||||||
[features: {
|
|
||||||
feature: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } }
|
feature: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } }
|
||||||
feature: { key: "gps" value: { float_list: { value: [] } } }
|
feature: { key: "gps" value: { float_list: { value: [] } } }
|
||||||
},
|
},
|
||||||
@ -112,12 +97,21 @@ def parse_example(serialized,
|
|||||||
feature: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } }
|
feature: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } }
|
||||||
feature: { key: "dank" value: { int64_list: { value: [ 42 ] } } }
|
feature: { key: "dank" value: { int64_list: { value: [ 42 ] } } }
|
||||||
feature: { key: "gps" value: { } }
|
feature: { key: "gps" value: { } }
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
And arguments
|
||||||
|
|
||||||
|
```
|
||||||
names: ["input0", "input1"],
|
names: ["input0", "input1"],
|
||||||
sparse_keys: ["kw", "dank", "gps"]
|
sparse_keys: ["kw", "dank", "gps"]
|
||||||
sparse_types: [DT_STRING, DT_INT64, DT_FLOAT]
|
sparse_types: [DT_STRING, DT_INT64, DT_FLOAT]
|
||||||
|
```
|
||||||
|
|
||||||
Then the expected output is a dictionary:
|
Then the output is a dictionary:
|
||||||
|
|
||||||
|
```python
|
||||||
{
|
{
|
||||||
"kw": SparseTensor(
|
"kw": SparseTensor(
|
||||||
indices=[[0, 0], [0, 1], [1, 0]],
|
indices=[[0, 0], [0, 1], [1, 0]],
|
||||||
@ -132,63 +126,71 @@ def parse_example(serialized,
|
|||||||
values=[],
|
values=[],
|
||||||
shape=[2, 0]),
|
shape=[2, 0]),
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For dense results in two serialized `Example`s:
|
||||||
|
|
||||||
Example input, format, and output: Dense Inputs (without defaults)
|
```
|
||||||
==================================================================
|
[
|
||||||
|
features: {
|
||||||
Given two brain.Example input protos:
|
|
||||||
|
|
||||||
serialized: // serialized versions of the protos below
|
|
||||||
[features: {
|
|
||||||
feature: { key: "age" value: { int64_list: { value: [ 0 ] } } }
|
feature: { key: "age" value: { int64_list: { value: [ 0 ] } } }
|
||||||
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
||||||
},
|
},
|
||||||
features: {
|
features: {
|
||||||
feature: { key: "age" value: { int64_list: { value: [] } } }
|
feature: { key: "age" value: { int64_list: { value: [] } } }
|
||||||
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
We can use arguments:
|
||||||
|
|
||||||
|
```
|
||||||
names: ["input0", "input1"],
|
names: ["input0", "input1"],
|
||||||
dense_keys: np.array(["age", "gender"])
|
dense_keys: np.array(["age", "gender"]),
|
||||||
dense_types: [tf.int64, tf.string]
|
dense_types: [tf.int64, tf.string],
|
||||||
dense_defaults: {
|
dense_defaults: {
|
||||||
"age": -1 # defaults to -1 if missing
|
"age": -1 # "age" defaults to -1 if missing
|
||||||
# "gender" has no specified default so it's required
|
# "gender" has no specified default so it's required
|
||||||
}
|
}
|
||||||
dense_shapes: [(1,), (1,)] # age, gender, label, weight
|
dense_shapes: [(1,), (1,)], # age, gender, label, weight
|
||||||
|
```
|
||||||
|
|
||||||
Then the expected output is a dictionary:
|
And the expected output is:
|
||||||
|
|
||||||
|
```python
|
||||||
{
|
{
|
||||||
"age": [[0], [-1]],
|
"age": [[0], [-1]],
|
||||||
"gender": [["f"], ["f"]],
|
"gender": [["f"], ["f"]],
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Args:
|
||||||
|
serialized: A list of strings, a batch of binary serialized `Example`
|
||||||
|
protos.
|
||||||
|
names: A list of strings, the names of the serialized protos.
|
||||||
|
sparse_keys: A list of string keys in the examples' features.
|
||||||
|
The results for these keys will be returned as `SparseTensor` objects.
|
||||||
|
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
|
||||||
|
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
|
||||||
|
and `tf.string` (`BytesList`) are supported.
|
||||||
|
dense_keys: A list of string keys in the examples' features.
|
||||||
|
The results for these keys will be returned as `Tensor`s
|
||||||
|
dense_types: A list of DTypes of the same length as `dense_keys`.
|
||||||
|
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
|
||||||
|
and `tf.string` (`BytesList`) are supported.
|
||||||
|
dense_defaults: A dict mapping string keys to `Tensor`s.
|
||||||
|
The keys of the dict must match the dense_keys of the feature.
|
||||||
|
dense_shapes: A list of tuples with the same length as `dense_keys`.
|
||||||
|
The shape of the data for each dense feature referenced by `dense_keys`.
|
||||||
|
name: A name for this operation (optional).
|
||||||
|
|
||||||
Example input, format, and output: Dense Inputs (with defaults)
|
Returns:
|
||||||
===============================================================
|
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
|
||||||
|
|
||||||
Given two brain.Example input protos:
|
Raises:
|
||||||
|
ValueError: If sparse and dense key sets intersect, or input lengths do not
|
||||||
serialized: // serialized versions of the protos below
|
match up.
|
||||||
[features: {
|
|
||||||
feature: { key: "weight" value: { float_list: { value: [ 1.0 ] } } }
|
|
||||||
},
|
|
||||||
features: {
|
|
||||||
feature: { key: "label" value: { float_list: { value: [ -1.0, 0.0 ] } } }
|
|
||||||
}]
|
|
||||||
names: ["input0", "input1"],
|
|
||||||
dense_keys: np.array(["label", "weight"])
|
|
||||||
dense_defaults: {
|
|
||||||
"label": [1.0, 2.0], # float (default: vector)
|
|
||||||
"weight": 5.0 # float (default: scalar, 5.0)
|
|
||||||
}
|
|
||||||
dense_shapes: [(2,), (1,)] # age, gender, label, weight
|
|
||||||
|
|
||||||
Then the expected output is a dictionary:
|
|
||||||
{
|
|
||||||
"label": [[1.0, 2.0], [-1.0, 0.0]],
|
|
||||||
"weight": [[1.0], [5.0]],
|
|
||||||
}
|
|
||||||
"""
|
"""
|
||||||
names = [] if names is None else names
|
names = [] if names is None else names
|
||||||
dense_defaults = {} if dense_defaults is None else dense_defaults
|
dense_defaults = {} if dense_defaults is None else dense_defaults
|
||||||
@ -262,7 +264,20 @@ def parse_single_example(serialized, # pylint: disable=invalid-name
|
|||||||
dense_defaults=None,
|
dense_defaults=None,
|
||||||
dense_shapes=None,
|
dense_shapes=None,
|
||||||
name="ParseSingleExample"):
|
name="ParseSingleExample"):
|
||||||
"""Identical to parse_example but for scalar serialized and names.
|
"""Parses a single `Example` proto.
|
||||||
|
|
||||||
|
Similar to `parse_example`, except:
|
||||||
|
|
||||||
|
For dense tensors, the returned `Tensor` is identical to the output of
|
||||||
|
`parse_example`, except there is no batch dimension, the output shape is the
|
||||||
|
same as the shape given in `dense_shape`.
|
||||||
|
|
||||||
|
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
|
||||||
|
(the indices matrix is a column vector), the values vector is unchanged, and
|
||||||
|
the first (batch_size) entry of the shape vector is removed (it is now a
|
||||||
|
single element vector).
|
||||||
|
|
||||||
|
See also `parse_example`.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
serialized: A scalar string, a single serialized Example.
|
serialized: A scalar string, a single serialized Example.
|
||||||
@ -275,21 +290,11 @@ def parse_single_example(serialized, # pylint: disable=invalid-name
|
|||||||
dense_types: See parse_example documentation for more details.
|
dense_types: See parse_example documentation for more details.
|
||||||
dense_defaults: See parse_example documentation for more details.
|
dense_defaults: See parse_example documentation for more details.
|
||||||
dense_shapes: See parse_example documentation for more details.
|
dense_shapes: See parse_example documentation for more details.
|
||||||
name: Optional op name.
|
name: A name for this operation (optional).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A dictionary mapping keys to Tensors and SparseTensors.
|
A dictionary mapping keys to Tensors and SparseTensors.
|
||||||
|
|
||||||
For dense tensors, the Tensor is identical to the output of parse_example,
|
|
||||||
except it is one less dimension (the first, batch, dimension is removed).
|
|
||||||
|
|
||||||
For SparseTensors:
|
|
||||||
The first (batch) column of the indices matrix is removed
|
|
||||||
(it is now a column vector).
|
|
||||||
The values vector is unchanged.
|
|
||||||
The first (batch_size) entry of the shape vector is removed
|
|
||||||
(it is now a single element vector).
|
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: if "scalar" or "names" have known shapes, and are not scalars.
|
ValueError: if "scalar" or "names" have known shapes, and are not scalars.
|
||||||
"""
|
"""
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
"""## Sparse Tensor Representation.
|
# pylint: disable=g-short-docstring-punctuation
|
||||||
|
"""## Sparse Tensor Representation
|
||||||
|
|
||||||
Tensorflow supports a `SparseTensor` representation for data that is sparse
|
Tensorflow supports a `SparseTensor` representation for data that is sparse
|
||||||
in multiple dimensions. Contrast this representation with `IndexedSlices`,
|
in multiple dimensions. Contrast this representation with `IndexedSlices`,
|
||||||
@ -8,13 +9,13 @@ dimension, and dense along all other dimensions.
|
|||||||
@@SparseTensor
|
@@SparseTensor
|
||||||
@@SparseTensorValue
|
@@SparseTensorValue
|
||||||
|
|
||||||
## Sparse to Dense Conversion.
|
## Sparse to Dense Conversion
|
||||||
|
|
||||||
@@sparse_to_dense
|
@@sparse_to_dense
|
||||||
@@sparse_tensor_to_dense
|
@@sparse_tensor_to_dense
|
||||||
@@sparse_to_indicator
|
@@sparse_to_indicator
|
||||||
|
|
||||||
## Manipulation.
|
## Manipulation
|
||||||
|
|
||||||
@@sparse_concat
|
@@sparse_concat
|
||||||
@@sparse_reorder
|
@@sparse_reorder
|
||||||
|
@ -14,7 +14,7 @@ collected in the graph.
|
|||||||
@@initialize_variables
|
@@initialize_variables
|
||||||
@@assert_variables_initialized
|
@@assert_variables_initialized
|
||||||
|
|
||||||
## Saving and Restoring Variables.
|
## Saving and Restoring Variables
|
||||||
|
|
||||||
@@Saver
|
@@Saver
|
||||||
|
|
||||||
|
@ -207,7 +207,7 @@ class Variable(object):
|
|||||||
|
|
||||||
This convenience method requires a session where the graph containing this
|
This convenience method requires a session where the graph containing this
|
||||||
variable has been launched. If no session is passed, the default session is
|
variable has been launched. If no session is passed, the default session is
|
||||||
used. See the [Session class](../client.md#Session) for more information on
|
used. See the [Session class](client.md#Session) for more information on
|
||||||
launching a graph and on sessions.
|
launching a graph and on sessions.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
@ -543,7 +543,7 @@ def assert_variables_initialized(var_list=None):
|
|||||||
"""
|
"""
|
||||||
if var_list is None:
|
if var_list is None:
|
||||||
var_list = all_variables()
|
var_list = all_variables()
|
||||||
# Backwards compatibility for old-style variables. TODO(mdevin): remove.
|
# Backwards compatibility for old-style variables. TODO(touts): remove.
|
||||||
if not var_list:
|
if not var_list:
|
||||||
var_list = []
|
var_list = []
|
||||||
for op in ops.get_default_graph().get_operations():
|
for op in ops.get_default_graph().get_operations():
|
||||||
|
@ -5,7 +5,7 @@ package tensorflow;
|
|||||||
|
|
||||||
// Protocol buffer representing the checkpoint state.
|
// Protocol buffer representing the checkpoint state.
|
||||||
//
|
//
|
||||||
// TODO(mdevin): Add other attributes as needed.
|
// TODO(touts): Add other attributes as needed.
|
||||||
message CheckpointState {
|
message CheckpointState {
|
||||||
// Path to the most-recent model checkpoint.
|
// Path to the most-recent model checkpoint.
|
||||||
string model_checkpoint_path = 1;
|
string model_checkpoint_path = 1;
|
||||||
|
@ -329,38 +329,42 @@ def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
|
|||||||
shapes=None, name=None):
|
shapes=None, name=None):
|
||||||
"""Run a list of tensors to fill a queue to create batches of examples.
|
"""Run a list of tensors to fill a queue to create batches of examples.
|
||||||
|
|
||||||
This version enqueues a different list of tensors in different threads.
|
Enqueues a different list of tensors in different threads.
|
||||||
Implemented using a queue -- a QueueRunner for the queue
|
Implemented using a queue -- a `QueueRunner` for the queue
|
||||||
is added to the current Graph's QUEUE_RUNNER collection.
|
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
|
||||||
|
|
||||||
|
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
|
||||||
|
the tensors from tensor_list[i]. `tensor_list[i1][j]` must match
|
||||||
|
`tensor_list[i2][j]` in type and shape, except in the first dimension if
|
||||||
|
`enqueue_many` is true.
|
||||||
|
|
||||||
|
If `enqueue_many` is false, each `tensor_list_list[i]` is assumed to
|
||||||
|
represent a single example. Otherwise, `tensor_list_list[i]` is assumed to
|
||||||
|
represent a batch of examples, where the first dimension is indexed by
|
||||||
|
example, and all members of `tensor_list_list[i]` should have the same size
|
||||||
|
in the first dimension.
|
||||||
|
|
||||||
|
If `enqueue_many` is false, then an input tensor `x` will be output as a
|
||||||
|
tensor with shape `[batch_size] + x.shape`. If `enqueue_many` is true, the
|
||||||
|
slices of any input tensor `x` are treated as examples, and the output tensors
|
||||||
|
will have shape `[batch_size] + x.shape[1:]`.
|
||||||
|
|
||||||
|
The `capacity` argument controls the how long the prefetching
|
||||||
|
is allowed to grow the queues.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
tensor_list_list: A list of tuples of tensors to enqueue.
|
tensor_list_list: A list of tuples of tensors to enqueue.
|
||||||
len(tensor_list_list) threads will be started, with the i-th
|
batch_size: An integer. The new batch size pulled from the queue.
|
||||||
thread enqueuing the tensors from tensor_list[i].
|
capacity: An integer. The maximum number of elements in the queue.
|
||||||
tensor_list[i1][j] must match tensor_list[i2][j] in type and
|
enqueue_many: Whether each tensor in `tensor_list_list` is a single
|
||||||
shape (except in the first dimension if enqueue_many is true).
|
example.
|
||||||
batch_size: The new batch size pulled from the queue.
|
shapes: (Optional) The shapes for each example. Defaults to the
|
||||||
capacity: Maximum number of elements in the queue, controls the
|
inferred shapes for `tensor_list_list[i]`.
|
||||||
how far ahead the prefetching allowed is allowed to get and
|
|
||||||
memory usage.
|
|
||||||
enqueue_many: If False, each tensor_list_list[i] is assumed to
|
|
||||||
represent a single example. If True, tensor_list_list[i] is
|
|
||||||
assumed to represent a batch of examples, where the first
|
|
||||||
dimension is indexed by example, and all members of
|
|
||||||
tensor_list_list[i] should have the same size in the first
|
|
||||||
dimension.
|
|
||||||
shapes: Optional. The shapes for each example. Defaults to the
|
|
||||||
inferred shapes for tensor_list_list[i] (which must match, after
|
|
||||||
leaving off the first dimension if enqueue_many is True).
|
|
||||||
name: A name for the operations (optional).
|
name: A name for the operations (optional).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A list of tensors with the same number and types as
|
A list of tensors with the same number and types as
|
||||||
tensor_list_list[i]. If enqueue_many is false, then an input
|
`tensor_list_list[i]`.
|
||||||
tensor with shape `[x, y, z]` will be output as a tensor with
|
|
||||||
shape `[batch_size, x, y, z]`. If enqueue_many is True, and an
|
|
||||||
input tensor has shape `[*, x, y, z]`, the the output will have
|
|
||||||
shape `[batch_size, x, y, z]`.
|
|
||||||
"""
|
"""
|
||||||
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
|
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
|
||||||
tensor_list_list = _validate_join(tensor_list_list)
|
tensor_list_list = _validate_join(tensor_list_list)
|
||||||
@ -462,15 +466,15 @@ def shuffle_batch_join(tensor_list_list, batch_size, capacity,
|
|||||||
min_after_dequeue: Minimum number elements in the queue after a
|
min_after_dequeue: Minimum number elements in the queue after a
|
||||||
dequeue, used to ensure a level of mixing of elements.
|
dequeue, used to ensure a level of mixing of elements.
|
||||||
seed: Seed for the random shuffling within the queue.
|
seed: Seed for the random shuffling within the queue.
|
||||||
enqueue_many: If False, each tensor_list_list[i] is assumed to
|
enqueue_many: If `False`, each tensor_list_list[i] is assumed to
|
||||||
represent a single example. If True, tensor_list_list[i] is
|
represent a single example. If `True`, tensor_list_list[i] is
|
||||||
assumed to represent a batch of examples, where the first
|
assumed to represent a batch of examples, where the first
|
||||||
dimension is indexed by example, and all members of
|
dimension is indexed by example, and all members of
|
||||||
tensor_list_list[i] should have the same size in the first
|
tensor_list_list[i] should have the same size in the first
|
||||||
dimension.
|
dimension.
|
||||||
shapes: Optional. The shapes for each example. Defaults to the
|
shapes: Optional. The shapes for each example. Defaults to the
|
||||||
inferred shapes for tensor_list_list[i] (which must match, after
|
inferred shapes for `tensor_list_list[i]` (which must match, after
|
||||||
leaving off the first dimension if enqueue_many is True).
|
leaving off the first dimension if enqueue_many is `True`).
|
||||||
name: A name for the operations (optional).
|
name: A name for the operations (optional).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -9,7 +9,7 @@ from tensorflow.python.ops import state_ops
|
|||||||
from tensorflow.python.ops import variables
|
from tensorflow.python.ops import variables
|
||||||
|
|
||||||
|
|
||||||
# TODO(mdevin): switch to variables.Variable.
|
# TODO(touts): switch to variables.Variable.
|
||||||
def assign_moving_average(variable, value, decay, name=None):
|
def assign_moving_average(variable, value, decay, name=None):
|
||||||
"""Compute the moving average of a variable.
|
"""Compute the moving average of a variable.
|
||||||
|
|
||||||
@ -182,7 +182,7 @@ class ExponentialMovingAverage(object):
|
|||||||
ValueError: If the moving average of one of the variables is already
|
ValueError: If the moving average of one of the variables is already
|
||||||
being computed.
|
being computed.
|
||||||
"""
|
"""
|
||||||
# TODO(mdevin): op_scope
|
# TODO(touts): op_scope
|
||||||
if var_list is None:
|
if var_list is None:
|
||||||
var_list = variables.trainable_variables()
|
var_list = variables.trainable_variables()
|
||||||
for var in var_list:
|
for var in var_list:
|
||||||
|
@ -159,7 +159,7 @@ class BaseSaverBuilder(object):
|
|||||||
restore_control_inputs = assign_ops[-1:] if restore_sequentially else []
|
restore_control_inputs = assign_ops[-1:] if restore_sequentially else []
|
||||||
# Load and optionally reshape on the CPU, as string tensors are not
|
# Load and optionally reshape on the CPU, as string tensors are not
|
||||||
# available on the GPU.
|
# available on the GPU.
|
||||||
# TODO(mdevin): Re-enable restore on GPU when we can support annotating
|
# TODO(touts): Re-enable restore on GPU when we can support annotating
|
||||||
# string tensors as "HostMemory" inputs.
|
# string tensors as "HostMemory" inputs.
|
||||||
with ops.device(graph_util.set_cpu0(v.device) if v.device else None):
|
with ops.device(graph_util.set_cpu0(v.device) if v.device else None):
|
||||||
with ops.control_dependencies(restore_control_inputs):
|
with ops.control_dependencies(restore_control_inputs):
|
||||||
@ -214,7 +214,7 @@ class BaseSaverBuilder(object):
|
|||||||
def _GroupByDevices(self, vars_to_save):
|
def _GroupByDevices(self, vars_to_save):
|
||||||
"""Group Variable tensor slices per device.
|
"""Group Variable tensor slices per device.
|
||||||
|
|
||||||
TODO(mdevin): Make sure that all the devices found are on different
|
TODO(touts): Make sure that all the devices found are on different
|
||||||
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
|
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
|
||||||
It can happen if the devices as unspecified.
|
It can happen if the devices as unspecified.
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
|
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
|
||||||
"""This library provides a set of classes and functions that helps train models.
|
"""This library provides a set of classes and functions that helps train models.
|
||||||
|
|
||||||
## Optimizers.
|
## Optimizers
|
||||||
|
|
||||||
The Optimizer base class provides methods to compute gradients for a loss and
|
The Optimizer base class provides methods to compute gradients for a loss and
|
||||||
apply gradients to variables. A collection of subclasses implement classic
|
apply gradients to variables. A collection of subclasses implement classic
|
||||||
@ -19,7 +19,7 @@ of the subclasses.
|
|||||||
@@FtrlOptimizer
|
@@FtrlOptimizer
|
||||||
@@RMSPropOptimizer
|
@@RMSPropOptimizer
|
||||||
|
|
||||||
## Gradient Computation.
|
## Gradient Computation
|
||||||
|
|
||||||
TensorFlow provides functions to compute the derivatives for a given
|
TensorFlow provides functions to compute the derivatives for a given
|
||||||
TensorFlow computation graph, adding operations to the graph. The
|
TensorFlow computation graph, adding operations to the graph. The
|
||||||
@ -46,10 +46,10 @@ gradients.
|
|||||||
@@clip_by_global_norm
|
@@clip_by_global_norm
|
||||||
@@global_norm
|
@@global_norm
|
||||||
|
|
||||||
## Decaying the learning rate.
|
## Decaying the learning rate
|
||||||
@@exponential_decay
|
@@exponential_decay
|
||||||
|
|
||||||
## Moving Averages.
|
## Moving Averages
|
||||||
|
|
||||||
Some training algorithms, such as GradientDescent and Momentum often benefit
|
Some training algorithms, such as GradientDescent and Momentum often benefit
|
||||||
from maintaining a moving average of variables during optimization. Using the
|
from maintaining a moving average of variables during optimization. Using the
|
||||||
@ -57,7 +57,7 @@ moving averages for evaluations often improve results significantly.
|
|||||||
|
|
||||||
@@ExponentialMovingAverage
|
@@ExponentialMovingAverage
|
||||||
|
|
||||||
## Coordinator and QueueRunner.
|
## Coordinator and QueueRunner
|
||||||
|
|
||||||
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
|
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
|
||||||
for how to use threads and queues. For documentation on the Queue API,
|
for how to use threads and queues. For documentation on the Queue API,
|
||||||
@ -68,17 +68,21 @@ see [Queues](../../api_docs/python/io_ops.md#queues).
|
|||||||
@@add_queue_runner
|
@@add_queue_runner
|
||||||
@@start_queue_runners
|
@@start_queue_runners
|
||||||
|
|
||||||
## Summary Operations.
|
## Summary Operations
|
||||||
|
|
||||||
The following ops output
|
The following ops output
|
||||||
[`Summary`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto)
|
[`Summary`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto)
|
||||||
protocol buffers as serialized string tensors.
|
protocol buffers as serialized string tensors.
|
||||||
|
|
||||||
You can fetch the output of a summary op in a session, and pass it to a
|
You can fetch the output of a summary op in a session, and pass it to
|
||||||
[SummaryWriter](train.md#SummaryWriter) to append it to an event file. You can
|
a [SummaryWriter](train.md#SummaryWriter) to append it to an event
|
||||||
then use TensorBoard to visualize the contents of the event files. See
|
file. Event files contain
|
||||||
[TensorBoard and Summaries](../../how_tos/summaries_and_tensorboard/index.md)
|
[`Event`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/util/event.proto)
|
||||||
for more details.
|
protos that can contain `Summary` protos along with the timestamp and
|
||||||
|
step. You can then use TensorBoard to visualize the contents of the
|
||||||
|
event files. See [TensorBoard and
|
||||||
|
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
|
||||||
|
details.
|
||||||
|
|
||||||
@@scalar_summary
|
@@scalar_summary
|
||||||
@@image_summary
|
@@image_summary
|
||||||
@ -88,7 +92,7 @@ for more details.
|
|||||||
@@merge_summary
|
@@merge_summary
|
||||||
@@merge_all_summaries
|
@@merge_all_summaries
|
||||||
|
|
||||||
## Adding Summaries to Event Files.
|
## Adding Summaries to Event Files
|
||||||
|
|
||||||
See [Summaries and
|
See [Summaries and
|
||||||
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
|
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
|
||||||
@ -97,10 +101,11 @@ overview of summaries, event files, and visualization in TensorBoard.
|
|||||||
@@SummaryWriter
|
@@SummaryWriter
|
||||||
@@summary_iterator
|
@@summary_iterator
|
||||||
|
|
||||||
## Training utilities.
|
## Training utilities
|
||||||
|
|
||||||
@@global_step
|
@@global_step
|
||||||
@@write_graph
|
@@write_graph
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Optimizers.
|
# Optimizers.
|
||||||
@ -134,5 +139,5 @@ from tensorflow.python.training.training_util import global_step
|
|||||||
from tensorflow.core.example.example_pb2 import *
|
from tensorflow.core.example.example_pb2 import *
|
||||||
from tensorflow.core.example.feature_pb2 import *
|
from tensorflow.core.example.feature_pb2 import *
|
||||||
|
|
||||||
# Utility op. Open Source. TODO(mdevin): move to nn?
|
# Utility op. Open Source. TODO(touts): move to nn?
|
||||||
from tensorflow.python.training.learning_rate_decay import exponential_decay
|
from tensorflow.python.training.learning_rate_decay import exponential_decay
|
||||||
|
@ -194,7 +194,7 @@ Extract3DPatches(
|
|||||||
return Extract3DPatches(input, patchPlanes, patchRows, patchCols,
|
return Extract3DPatches(input, patchPlanes, patchRows, patchCols,
|
||||||
stridePlanes, strideRows, strideCols,
|
stridePlanes, strideRows, strideCols,
|
||||||
0, 0, 0, 0, 0, 0, padding_value);
|
0, 0, 0, 0, 0, 0, padding_value);
|
||||||
case PADDING_SAME:
|
case PADDING_SAME: {
|
||||||
// The side of the tensor before striding should be just the expected
|
// The side of the tensor before striding should be just the expected
|
||||||
// output times the stride.
|
// output times the stride.
|
||||||
const TensorIndex size_z = ceil(inputPlanes / static_cast<float>(stridePlanes)) * stridePlanes;
|
const TensorIndex size_z = ceil(inputPlanes / static_cast<float>(stridePlanes)) * stridePlanes;
|
||||||
@ -215,6 +215,13 @@ Extract3DPatches(
|
|||||||
dx - dx / 2, dx / 2,
|
dx - dx / 2, dx / 2,
|
||||||
padding_value);
|
padding_value);
|
||||||
}
|
}
|
||||||
|
default:
|
||||||
|
eigen_assert(false && "unexpected padding");
|
||||||
|
// unreachable code to avoid missing return warning.
|
||||||
|
return Extract3DPatches(input, patchPlanes, patchRows, patchCols,
|
||||||
|
stridePlanes, strideRows, strideCols,
|
||||||
|
0, 0, 0, 0, 0, 0, padding_value);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(mjanusz): Switch this to a 'using' alias once CUDA supports C++11.
|
// TODO(mjanusz): Switch this to a 'using' alias once CUDA supports C++11.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user