TensorFlow: Upstream latest changes to Git.

Changes:
- Updates to installation instructions.
- Updates to documentation.
- Minor modifications and tests for word2vec.

Base CL: 107284192
This commit is contained in:
Manjunath Kudlur 2015-11-06 18:37:11 -08:00
parent f41959ccb2
commit cd9e60c1cd
71 changed files with 1237 additions and 906 deletions

View File

@ -11,7 +11,66 @@ organization for the purposes of conducting machine learning and deep neural
networks research. The system is general enough to be applicable in a wide
variety of other domains, as well.
# Download and Setup
For detailed installation instructions, see
[here](g3doc/get_started/os_setup.md).
## Binary Installation
### Ubuntu/Linux
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
```sh
$ sudo apt-get install python-pip
```
Install TensorFlow:
```sh
# For CPU-only version
$ sudo pip install https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
# For GPU-enabled version
$ sudo pip install https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
```
### Mac OS X
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
If using `easy_install`:
```sh
$ sudo easy_install pip
```
Install TensorFlow (only CPU binary version is currently available).
```sh
$ sudo pip install https://storage.googleapis.com/tensorflow/mac/tensorflow-0.5.0-py2-none-any.whl
```
### Try your first TensorFlow program
```sh
$ python
>>> import tensorflow as tf
>>> hello = tf.constant('Hello, TensorFlow!')
>>> sess = tf.Session()
>>> print sess.run(hello)
Hello, TensorFlow!
>>> a = tf.constant(10)
>>> b = tf.constant(32)
>>> print sess.run(a+b)
42
>>>
```
##For more information
* [Installation and setup instructions](/tensorflow/g3doc/get_started/os_setup.md)
* [TensorFlow website](http://tensorflow.org)

View File

@ -10,7 +10,7 @@ import "tensorflow/core/framework/types.proto";
message TensorProto {
DataType dtype = 1;
// Shape of the tensor. TODO(mdevin): sort out the 0-rank issues.
// Shape of the tensor. TODO(touts): sort out the 0-rank issues.
TensorShapeProto tensor_shape = 2;
// Only one of the representations below is set, one of "tensor_contents" and

View File

@ -46,7 +46,7 @@ struct SoftmaxEigenImpl {
Eigen::IndexList<Eigen::type2index<1>, int> one_by_class;
one_by_class.set(1, num_classes);
#endif
// NOTE(mdevin): If you modify this implementation please run
// NOTE(touts): If you modify this implementation please run
// the ImageNetSoftmaxFwd benchmark in core_ops_test.cc.
//
// softmax = exp(logits - max(logits along classes));

View File

@ -35,7 +35,7 @@ struct XentEigenImpl {
typename TTypes<T>::Matrix scratch,
typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop) {
// NOTE(mdevin): This duplicates some of the computations in softmax_op
// NOTE(touts): This duplicates some of the computations in softmax_op
// because we need the intermediate (logits -max(logits)) values to
// avoid a log(exp()) in the computation of the loss.

View File

@ -97,7 +97,7 @@ class ThreadSafeHistogram {
void Clear();
// TODO(mdevin): It might be a good idea to provide a AddN(<many values>)
// TODO(touts): It might be a good idea to provide a AddN(<many values>)
// method to avoid grabbing/releasing the lock when adding many values.
void Add(double value);

View File

@ -63,7 +63,7 @@ class TensorShape {
/// \brief Returns the number of elements in dimension "d".
/// REQUIRES: "0 <= d < dims()"
// TODO(mdevin): Rename to dimension() to match Eigen::Tensor::dimension()?
// TODO(touts): Rename to dimension() to match Eigen::Tensor::dimension()?
int64 dim_size(int d) const {
DCHECK_GE(d, 0);
DCHECK_LT(d, dims());

View File

@ -12,7 +12,7 @@ message Event {
// Timestamp of the event.
double wall_time = 1;
// Globale step of the event.
// Global step of the event.
int64 step = 2;
oneof what {

View File

@ -20,7 +20,6 @@ installed the NDK and SDK. Otherwise an error such as:
"The external label '//external:android/sdk' is not bound to anything" will
be reported.
To build the APK, run this from your workspace root:
```
bazel build //tensorflow/examples/android:tensorflow_demo -c opt --copt=-mfpu=neon
@ -29,11 +28,19 @@ Note that "-c opt" is currently required; if not set, an assert (for an
otherwise non-problematic issue) in Eigen will halt the application during
execution. This issue will be corrected in an upcoming release.
If adb debugging is enabled on your device, you may instead use the following
command from your workspace root to automatically build and install:
If adb debugging is enabled on your Android 5.0 or later device, you may then
use the following command from your workspace root to install the APK once
built:
'''
adb install -r -g bazel-bin/tensorflow/examples/android/tensorflow_demo_incremental.apk
'''
Alternatively, a streamlined means of building, installing and running in one
command is:
```
bazel mobile-install //tensorflow/examples/android:tensorflow_demo -c opt --copt=-mfpu=neon
bazel mobile-install //tensorflow/examples/android:tensorflow_demo -c opt --start_app --copt=-mfpu=neon
```
Add the "--start_app" flag if you wish to automatically start the app after
installing. Otherwise, find the application icon labeled "Tensorflow Demo".
If camera permission errors are encountered (possible on Android Marshmallow or
above), then the adb install command above should be used instead, as it
automatically grants the required camera permissions with '-g'.

View File

@ -9,7 +9,7 @@ Over time, we hope that the TensorFlow community will develop front ends for
languages like Go, Java, Javascript, Lua R, and perhaps others. With SWIG, it's
relatively easy to contribute a TensorFlow interface to your favorite language.
Note: Many practical aspects of ssage are covered in the Mechanics tab, and
Note: Many practical aspects of usage are covered in the Mechanics tab, and
some additional documentation not specific to any particular language API is
available in the Resources tab.

View File

@ -1,6 +1,10 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Tensor Transformations
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Casting](#AUTOGENERATED-casting)

View File

@ -299,7 +299,8 @@ The operation that failed, if known.
or `Recv` op, there will be no corresponding
[`Operation`](framework.md#Operation) object. In that case, this
will return `None`, and you should instead use the
[`node_def`](OpError.node_def) to discover information about the op.
[`OpError.node_def`](#OpError.node_def) to discover information about the
op.
##### Returns:
@ -536,7 +537,7 @@ The operation was aborted, typically due to a concurrent action.
For example, running a [`queue.enqueue()`](io_ops.md#QueueBase.enqueue)
operation may raise `AbortedError` if a
[`queue.close()`](io_ops.md@QueueBase.close) operation previously ran.
[`queue.close()`](io_ops.md#QueueBase.close) operation previously ran.
- - -

View File

@ -1,6 +1,10 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Constants, Sequences, and Random Values
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Constant Value Tensors](#AUTOGENERATED-constant-value-tensors)

View File

@ -1,6 +1,10 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Control Flow
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Control Flow Operations](#AUTOGENERATED-control-flow-operations)

View File

@ -33,7 +33,7 @@
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
Import names from the framework library.
Classes and functions for building TensorFlow graphs.
## Core graph data structures <div class="md-anchor" id="AUTOGENERATED-core-graph-data-structures">{#AUTOGENERATED-core-graph-data-structures}</div>
@ -126,6 +126,10 @@ with tf.Graph().as_default() as g:
Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using [`import_graph_def()`](#import_graph_def)) or used with the
[C++ Session API](../cc/index.md).
This method is thread-safe.
##### Args:
@ -137,8 +141,7 @@ This method is thread-safe.
##### Returns:
A
[`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
A [`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
protocol buffer.

View File

@ -1,32 +1,36 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Images
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Encoding and Decoding.](#AUTOGENERATED-encoding-and-decoding.)
* [Encoding and Decoding](#AUTOGENERATED-encoding-and-decoding)
* [tf.image.decode_jpeg(contents, channels=None, ratio=None, fancy_upscaling=None, try_recover_truncated=None, acceptable_fraction=None, name=None)](#decode_jpeg)
* [tf.image.encode_jpeg(image, format=None, quality=None, progressive=None, optimize_size=None, chroma_downsampling=None, density_unit=None, x_density=None, y_density=None, xmp_metadata=None, name=None)](#encode_jpeg)
* [tf.image.decode_png(contents, channels=None, name=None)](#decode_png)
* [tf.image.encode_png(image, compression=None, name=None)](#encode_png)
* [Resizing.](#AUTOGENERATED-resizing.)
* [Resizing](#AUTOGENERATED-resizing)
* [tf.image.resize_images(images, new_height, new_width, method=0)](#resize_images)
* [tf.image.resize_area(images, size, name=None)](#resize_area)
* [tf.image.resize_bicubic(images, size, name=None)](#resize_bicubic)
* [tf.image.resize_bilinear(images, size, name=None)](#resize_bilinear)
* [tf.image.resize_nearest_neighbor(images, size, name=None)](#resize_nearest_neighbor)
* [Cropping.](#AUTOGENERATED-cropping.)
* [Cropping](#AUTOGENERATED-cropping)
* [tf.image.resize_image_with_crop_or_pad(image, target_height, target_width)](#resize_image_with_crop_or_pad)
* [tf.image.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#pad_to_bounding_box)
* [tf.image.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width)](#crop_to_bounding_box)
* [tf.image.random_crop(image, size, seed=None, name=None)](#random_crop)
* [tf.image.extract_glimpse(input, size, offsets, centered=None, normalized=None, uniform_noise=None, name=None)](#extract_glimpse)
* [Flipping and Transposing.](#AUTOGENERATED-flipping-and-transposing.)
* [Flipping and Transposing](#AUTOGENERATED-flipping-and-transposing)
* [tf.image.flip_up_down(image)](#flip_up_down)
* [tf.image.random_flip_up_down(image, seed=None)](#random_flip_up_down)
* [tf.image.flip_left_right(image)](#flip_left_right)
* [tf.image.random_flip_left_right(image, seed=None)](#random_flip_left_right)
* [tf.image.transpose_image(image)](#transpose_image)
* [Image Adjustments.](#AUTOGENERATED-image-adjustments.)
* [Image Adjustments](#AUTOGENERATED-image-adjustments)
* [tf.image.adjust_brightness(image, delta, min_value=None, max_value=None)](#adjust_brightness)
* [tf.image.random_brightness(image, max_delta, seed=None)](#random_brightness)
* [tf.image.adjust_contrast(images, contrast_factor, min_value=None, max_value=None)](#adjust_contrast)
@ -36,7 +40,7 @@
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
## Encoding and Decoding. <div class="md-anchor" id="AUTOGENERATED-encoding-and-decoding.">{#AUTOGENERATED-encoding-and-decoding.}</div>
## Encoding and Decoding <div class="md-anchor" id="AUTOGENERATED-encoding-and-decoding">{#AUTOGENERATED-encoding-and-decoding}</div>
TensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded
images are represented by scalar string Tensors, decoded images by 3-D uint8
@ -211,7 +215,7 @@ the smallest output, but is slower.
## Resizing. <div class="md-anchor" id="AUTOGENERATED-resizing.">{#AUTOGENERATED-resizing.}</div>
## Resizing <div class="md-anchor" id="AUTOGENERATED-resizing">{#AUTOGENERATED-resizing}</div>
The resizing Ops accept input images as tensors of several types. They always
output resized images as float32 tensors.
@ -376,7 +380,7 @@ Input images can be of different types but output images are always float.
## Cropping. <div class="md-anchor" id="AUTOGENERATED-cropping.">{#AUTOGENERATED-cropping.}</div>
## Cropping <div class="md-anchor" id="AUTOGENERATED-cropping">{#AUTOGENERATED-cropping}</div>
- - -
@ -555,7 +559,7 @@ The argument `normalized` and `centered` controls how the windows are built:
## Flipping and Transposing. <div class="md-anchor" id="AUTOGENERATED-flipping-and-transposing.">{#AUTOGENERATED-flipping-and-transposing.}</div>
## Flipping and Transposing <div class="md-anchor" id="AUTOGENERATED-flipping-and-transposing">{#AUTOGENERATED-flipping-and-transposing}</div>
- - -
@ -687,7 +691,7 @@ See also `transpose()`.
## Image Adjustments. <div class="md-anchor" id="AUTOGENERATED-image-adjustments.">{#AUTOGENERATED-image-adjustments.}</div>
## Image Adjustments <div class="md-anchor" id="AUTOGENERATED-image-adjustments">{#AUTOGENERATED-image-adjustments}</div>
TensorFlow provides functions to adjust images in various ways: brightness,
contrast, hue, and saturation. Each adjustment can be done with predefined

View File

@ -2,336 +2,349 @@
# TensorFlow Python reference documentation
* <b>[Building Graphs](framework.md)</b>: [class DType](framework.md#DType),
[class Dimension](framework.md#Dimension),
[class Graph](framework.md#Graph),
[class GraphKeys](framework.md#GraphKeys),
[NoGradient](framework.md#NoGradient),
[class Operation](framework.md#Operation),
[class RegisterGradient](framework.md#RegisterGradient),
[class RegisterShape](framework.md#RegisterShape),
[class Tensor](framework.md#Tensor),
[class TensorShape](framework.md#TensorShape),
[add_to_collection](framework.md#add_to_collection),
[as_dtype](framework.md#as_dtype),
[control_dependencies](framework.md#control_dependencies),
[convert_to_tensor](framework.md#convert_to_tensor),
[device](framework.md#device),
[get_collection](framework.md#get_collection),
[get_default_graph](framework.md#get_default_graph),
[get_seed](framework.md#get_seed),
[import_graph_def](framework.md#import_graph_def),
[name_scope](framework.md#name_scope),
[op_scope](framework.md#op_scope)
* **[Building Graphs](framework.md)**:
* [`add_to_collection`](framework.md#add_to_collection)
* [`as_dtype`](framework.md#as_dtype)
* [`control_dependencies`](framework.md#control_dependencies)
* [`convert_to_tensor`](framework.md#convert_to_tensor)
* [`device`](framework.md#device)
* [`Dimension`](framework.md#Dimension)
* [`DType`](framework.md#DType)
* [`get_collection`](framework.md#get_collection)
* [`get_default_graph`](framework.md#get_default_graph)
* [`get_seed`](framework.md#get_seed)
* [`Graph`](framework.md#Graph)
* [`GraphKeys`](framework.md#GraphKeys)
* [`import_graph_def`](framework.md#import_graph_def)
* [`name_scope`](framework.md#name_scope)
* [`NoGradient`](framework.md#NoGradient)
* [`op_scope`](framework.md#op_scope)
* [`Operation`](framework.md#Operation)
* [`RegisterGradient`](framework.md#RegisterGradient)
* [`RegisterShape`](framework.md#RegisterShape)
* [`Tensor`](framework.md#Tensor)
* [`TensorShape`](framework.md#TensorShape)
* <b>[Constants, Sequences, and Random Values](constant_op.md)</b>: [constant](constant_op.md#constant),
[fill](constant_op.md#fill),
[linspace](constant_op.md#linspace),
[ones](constant_op.md#ones),
[ones_like](constant_op.md#ones_like),
[random_normal](constant_op.md#random_normal),
[random_shuffle](constant_op.md#random_shuffle),
[random_uniform](constant_op.md#random_uniform),
[range](constant_op.md#range),
[set_random_seed](constant_op.md#set_random_seed),
[truncated_normal](constant_op.md#truncated_normal),
[zeros](constant_op.md#zeros),
[zeros_like](constant_op.md#zeros_like)
* **[Constants, Sequences, and Random Values](constant_op.md)**:
* [`constant`](constant_op.md#constant)
* [`fill`](constant_op.md#fill)
* [`linspace`](constant_op.md#linspace)
* [`ones`](constant_op.md#ones)
* [`ones_like`](constant_op.md#ones_like)
* [`random_normal`](constant_op.md#random_normal)
* [`random_shuffle`](constant_op.md#random_shuffle)
* [`random_uniform`](constant_op.md#random_uniform)
* [`range`](constant_op.md#range)
* [`set_random_seed`](constant_op.md#set_random_seed)
* [`truncated_normal`](constant_op.md#truncated_normal)
* [`zeros`](constant_op.md#zeros)
* [`zeros_like`](constant_op.md#zeros_like)
* <b>[Variables](state_ops.md)</b>: [class IndexedSlices](state_ops.md#IndexedSlices),
[class Saver](state_ops.md#Saver),
[class Variable](state_ops.md#Variable),
[all_variables](state_ops.md#all_variables),
[assert_variables_initialized](state_ops.md#assert_variables_initialized),
[assign](state_ops.md#assign),
[assign_add](state_ops.md#assign_add),
[assign_sub](state_ops.md#assign_sub),
[constant_initializer](state_ops.md#constant_initializer),
[count_up_to](state_ops.md#count_up_to),
[device](state_ops.md#device),
[get_checkpoint_state](state_ops.md#get_checkpoint_state),
[get_variable](state_ops.md#get_variable),
[get_variable_scope](state_ops.md#get_variable_scope),
[initialize_all_variables](state_ops.md#initialize_all_variables),
[initialize_variables](state_ops.md#initialize_variables),
[latest_checkpoint](state_ops.md#latest_checkpoint),
[random_normal_initializer](state_ops.md#random_normal_initializer),
[random_uniform_initializer](state_ops.md#random_uniform_initializer),
[scatter_add](state_ops.md#scatter_add),
[scatter_sub](state_ops.md#scatter_sub),
[scatter_update](state_ops.md#scatter_update),
[sparse_mask](state_ops.md#sparse_mask),
[trainable_variables](state_ops.md#trainable_variables),
[truncated_normal_initializer](state_ops.md#truncated_normal_initializer),
[uniform_unit_scaling_initializer](state_ops.md#uniform_unit_scaling_initializer),
[update_checkpoint_state](state_ops.md#update_checkpoint_state),
[variable_scope](state_ops.md#variable_scope),
[zeros_initializer](state_ops.md#zeros_initializer)
* **[Variables](state_ops.md)**:
* [`all_variables`](state_ops.md#all_variables)
* [`assert_variables_initialized`](state_ops.md#assert_variables_initialized)
* [`assign`](state_ops.md#assign)
* [`assign_add`](state_ops.md#assign_add)
* [`assign_sub`](state_ops.md#assign_sub)
* [`constant_initializer`](state_ops.md#constant_initializer)
* [`count_up_to`](state_ops.md#count_up_to)
* [`device`](state_ops.md#device)
* [`get_checkpoint_state`](state_ops.md#get_checkpoint_state)
* [`get_variable`](state_ops.md#get_variable)
* [`get_variable_scope`](state_ops.md#get_variable_scope)
* [`IndexedSlices`](state_ops.md#IndexedSlices)
* [`initialize_all_variables`](state_ops.md#initialize_all_variables)
* [`initialize_variables`](state_ops.md#initialize_variables)
* [`latest_checkpoint`](state_ops.md#latest_checkpoint)
* [`random_normal_initializer`](state_ops.md#random_normal_initializer)
* [`random_uniform_initializer`](state_ops.md#random_uniform_initializer)
* [`Saver`](state_ops.md#Saver)
* [`scatter_add`](state_ops.md#scatter_add)
* [`scatter_sub`](state_ops.md#scatter_sub)
* [`scatter_update`](state_ops.md#scatter_update)
* [`sparse_mask`](state_ops.md#sparse_mask)
* [`trainable_variables`](state_ops.md#trainable_variables)
* [`truncated_normal_initializer`](state_ops.md#truncated_normal_initializer)
* [`uniform_unit_scaling_initializer`](state_ops.md#uniform_unit_scaling_initializer)
* [`update_checkpoint_state`](state_ops.md#update_checkpoint_state)
* [`Variable`](state_ops.md#Variable)
* [`variable_scope`](state_ops.md#variable_scope)
* [`zeros_initializer`](state_ops.md#zeros_initializer)
* <b>[Tensor Transformations](array_ops.md)</b>: [cast](array_ops.md#cast),
[concat](array_ops.md#concat),
[dynamic_partition](array_ops.md#dynamic_partition),
[dynamic_stitch](array_ops.md#dynamic_stitch),
[expand_dims](array_ops.md#expand_dims),
[gather](array_ops.md#gather),
[pack](array_ops.md#pack),
[pad](array_ops.md#pad),
[rank](array_ops.md#rank),
[reshape](array_ops.md#reshape),
[reverse](array_ops.md#reverse),
[reverse_sequence](array_ops.md#reverse_sequence),
[shape](array_ops.md#shape),
[size](array_ops.md#size),
[slice](array_ops.md#slice),
[split](array_ops.md#split),
[squeeze](array_ops.md#squeeze),
[string_to_number](array_ops.md#string_to_number),
[tile](array_ops.md#tile),
[to_bfloat16](array_ops.md#to_bfloat16),
[to_double](array_ops.md#to_double),
[to_float](array_ops.md#to_float),
[to_int32](array_ops.md#to_int32),
[to_int64](array_ops.md#to_int64),
[transpose](array_ops.md#transpose),
[unpack](array_ops.md#unpack)
* **[Tensor Transformations](array_ops.md)**:
* [`cast`](array_ops.md#cast)
* [`concat`](array_ops.md#concat)
* [`dynamic_partition`](array_ops.md#dynamic_partition)
* [`dynamic_stitch`](array_ops.md#dynamic_stitch)
* [`expand_dims`](array_ops.md#expand_dims)
* [`gather`](array_ops.md#gather)
* [`pack`](array_ops.md#pack)
* [`pad`](array_ops.md#pad)
* [`rank`](array_ops.md#rank)
* [`reshape`](array_ops.md#reshape)
* [`reverse`](array_ops.md#reverse)
* [`reverse_sequence`](array_ops.md#reverse_sequence)
* [`shape`](array_ops.md#shape)
* [`size`](array_ops.md#size)
* [`slice`](array_ops.md#slice)
* [`split`](array_ops.md#split)
* [`squeeze`](array_ops.md#squeeze)
* [`string_to_number`](array_ops.md#string_to_number)
* [`tile`](array_ops.md#tile)
* [`to_bfloat16`](array_ops.md#to_bfloat16)
* [`to_double`](array_ops.md#to_double)
* [`to_float`](array_ops.md#to_float)
* [`to_int32`](array_ops.md#to_int32)
* [`to_int64`](array_ops.md#to_int64)
* [`transpose`](array_ops.md#transpose)
* [`unpack`](array_ops.md#unpack)
* <b>[Math](math_ops.md)</b>: [abs](math_ops.md#abs),
[accumulate_n](math_ops.md#accumulate_n),
[add](math_ops.md#add),
[add_n](math_ops.md#add_n),
[argmax](math_ops.md#argmax),
[argmin](math_ops.md#argmin),
[batch_cholesky](math_ops.md#batch_cholesky),
[batch_matmul](math_ops.md#batch_matmul),
[batch_matrix_determinant](math_ops.md#batch_matrix_determinant),
[batch_matrix_inverse](math_ops.md#batch_matrix_inverse),
[ceil](math_ops.md#ceil),
[cholesky](math_ops.md#cholesky),
[complex](math_ops.md#complex),
[complex_abs](math_ops.md#complex_abs),
[conj](math_ops.md#conj),
[cos](math_ops.md#cos),
[diag](math_ops.md#diag),
[div](math_ops.md#div),
[edit_distance](math_ops.md#edit_distance),
[exp](math_ops.md#exp),
[floor](math_ops.md#floor),
[imag](math_ops.md#imag),
[inv](math_ops.md#inv),
[invert_permutation](math_ops.md#invert_permutation),
[listdiff](math_ops.md#listdiff),
[log](math_ops.md#log),
[matmul](math_ops.md#matmul),
[matrix_determinant](math_ops.md#matrix_determinant),
[matrix_inverse](math_ops.md#matrix_inverse),
[maximum](math_ops.md#maximum),
[minimum](math_ops.md#minimum),
[mod](math_ops.md#mod),
[mul](math_ops.md#mul),
[neg](math_ops.md#neg),
[pow](math_ops.md#pow),
[real](math_ops.md#real),
[reduce_all](math_ops.md#reduce_all),
[reduce_any](math_ops.md#reduce_any),
[reduce_max](math_ops.md#reduce_max),
[reduce_mean](math_ops.md#reduce_mean),
[reduce_min](math_ops.md#reduce_min),
[reduce_prod](math_ops.md#reduce_prod),
[reduce_sum](math_ops.md#reduce_sum),
[round](math_ops.md#round),
[rsqrt](math_ops.md#rsqrt),
[segment_max](math_ops.md#segment_max),
[segment_mean](math_ops.md#segment_mean),
[segment_min](math_ops.md#segment_min),
[segment_prod](math_ops.md#segment_prod),
[segment_sum](math_ops.md#segment_sum),
[sign](math_ops.md#sign),
[sin](math_ops.md#sin),
[sparse_segment_mean](math_ops.md#sparse_segment_mean),
[sparse_segment_sum](math_ops.md#sparse_segment_sum),
[sqrt](math_ops.md#sqrt),
[square](math_ops.md#square),
[sub](math_ops.md#sub),
[transpose](math_ops.md#transpose),
[unique](math_ops.md#unique),
[unsorted_segment_sum](math_ops.md#unsorted_segment_sum),
[where](math_ops.md#where)
* **[Math](math_ops.md)**:
* [`abs`](math_ops.md#abs)
* [`accumulate_n`](math_ops.md#accumulate_n)
* [`add`](math_ops.md#add)
* [`add_n`](math_ops.md#add_n)
* [`argmax`](math_ops.md#argmax)
* [`argmin`](math_ops.md#argmin)
* [`batch_cholesky`](math_ops.md#batch_cholesky)
* [`batch_matmul`](math_ops.md#batch_matmul)
* [`batch_matrix_determinant`](math_ops.md#batch_matrix_determinant)
* [`batch_matrix_inverse`](math_ops.md#batch_matrix_inverse)
* [`ceil`](math_ops.md#ceil)
* [`cholesky`](math_ops.md#cholesky)
* [`complex`](math_ops.md#complex)
* [`complex_abs`](math_ops.md#complex_abs)
* [`conj`](math_ops.md#conj)
* [`cos`](math_ops.md#cos)
* [`diag`](math_ops.md#diag)
* [`div`](math_ops.md#div)
* [`edit_distance`](math_ops.md#edit_distance)
* [`exp`](math_ops.md#exp)
* [`floor`](math_ops.md#floor)
* [`imag`](math_ops.md#imag)
* [`inv`](math_ops.md#inv)
* [`invert_permutation`](math_ops.md#invert_permutation)
* [`listdiff`](math_ops.md#listdiff)
* [`log`](math_ops.md#log)
* [`matmul`](math_ops.md#matmul)
* [`matrix_determinant`](math_ops.md#matrix_determinant)
* [`matrix_inverse`](math_ops.md#matrix_inverse)
* [`maximum`](math_ops.md#maximum)
* [`minimum`](math_ops.md#minimum)
* [`mod`](math_ops.md#mod)
* [`mul`](math_ops.md#mul)
* [`neg`](math_ops.md#neg)
* [`pow`](math_ops.md#pow)
* [`real`](math_ops.md#real)
* [`reduce_all`](math_ops.md#reduce_all)
* [`reduce_any`](math_ops.md#reduce_any)
* [`reduce_max`](math_ops.md#reduce_max)
* [`reduce_mean`](math_ops.md#reduce_mean)
* [`reduce_min`](math_ops.md#reduce_min)
* [`reduce_prod`](math_ops.md#reduce_prod)
* [`reduce_sum`](math_ops.md#reduce_sum)
* [`round`](math_ops.md#round)
* [`rsqrt`](math_ops.md#rsqrt)
* [`segment_max`](math_ops.md#segment_max)
* [`segment_mean`](math_ops.md#segment_mean)
* [`segment_min`](math_ops.md#segment_min)
* [`segment_prod`](math_ops.md#segment_prod)
* [`segment_sum`](math_ops.md#segment_sum)
* [`sign`](math_ops.md#sign)
* [`sin`](math_ops.md#sin)
* [`sparse_segment_mean`](math_ops.md#sparse_segment_mean)
* [`sparse_segment_sum`](math_ops.md#sparse_segment_sum)
* [`sqrt`](math_ops.md#sqrt)
* [`square`](math_ops.md#square)
* [`sub`](math_ops.md#sub)
* [`transpose`](math_ops.md#transpose)
* [`unique`](math_ops.md#unique)
* [`unsorted_segment_sum`](math_ops.md#unsorted_segment_sum)
* [`where`](math_ops.md#where)
* <b>[Control Flow](control_flow_ops.md)</b>: [Assert](control_flow_ops.md#Assert),
[Print](control_flow_ops.md#Print),
[add_check_numerics_ops](control_flow_ops.md#add_check_numerics_ops),
[check_numerics](control_flow_ops.md#check_numerics),
[count_up_to](control_flow_ops.md#count_up_to),
[equal](control_flow_ops.md#equal),
[greater](control_flow_ops.md#greater),
[greater_equal](control_flow_ops.md#greater_equal),
[group](control_flow_ops.md#group),
[identity](control_flow_ops.md#identity),
[is_finite](control_flow_ops.md#is_finite),
[is_inf](control_flow_ops.md#is_inf),
[is_nan](control_flow_ops.md#is_nan),
[less](control_flow_ops.md#less),
[less_equal](control_flow_ops.md#less_equal),
[logical_and](control_flow_ops.md#logical_and),
[logical_not](control_flow_ops.md#logical_not),
[logical_or](control_flow_ops.md#logical_or),
[logical_xor](control_flow_ops.md#logical_xor),
[no_op](control_flow_ops.md#no_op),
[not_equal](control_flow_ops.md#not_equal),
[select](control_flow_ops.md#select),
[tuple](control_flow_ops.md#tuple),
[verify_tensor_all_finite](control_flow_ops.md#verify_tensor_all_finite),
[where](control_flow_ops.md#where)
* **[Control Flow](control_flow_ops.md)**:
* [`add_check_numerics_ops`](control_flow_ops.md#add_check_numerics_ops)
* [`Assert`](control_flow_ops.md#Assert)
* [`check_numerics`](control_flow_ops.md#check_numerics)
* [`count_up_to`](control_flow_ops.md#count_up_to)
* [`equal`](control_flow_ops.md#equal)
* [`greater`](control_flow_ops.md#greater)
* [`greater_equal`](control_flow_ops.md#greater_equal)
* [`group`](control_flow_ops.md#group)
* [`identity`](control_flow_ops.md#identity)
* [`is_finite`](control_flow_ops.md#is_finite)
* [`is_inf`](control_flow_ops.md#is_inf)
* [`is_nan`](control_flow_ops.md#is_nan)
* [`less`](control_flow_ops.md#less)
* [`less_equal`](control_flow_ops.md#less_equal)
* [`logical_and`](control_flow_ops.md#logical_and)
* [`logical_not`](control_flow_ops.md#logical_not)
* [`logical_or`](control_flow_ops.md#logical_or)
* [`logical_xor`](control_flow_ops.md#logical_xor)
* [`no_op`](control_flow_ops.md#no_op)
* [`not_equal`](control_flow_ops.md#not_equal)
* [`Print`](control_flow_ops.md#Print)
* [`select`](control_flow_ops.md#select)
* [`tuple`](control_flow_ops.md#tuple)
* [`verify_tensor_all_finite`](control_flow_ops.md#verify_tensor_all_finite)
* [`where`](control_flow_ops.md#where)
* <b>[Images](image.md)</b>: [adjust_brightness](image.md#adjust_brightness),
[adjust_contrast](image.md#adjust_contrast),
[crop_to_bounding_box](image.md#crop_to_bounding_box),
[decode_jpeg](image.md#decode_jpeg),
[decode_png](image.md#decode_png),
[encode_jpeg](image.md#encode_jpeg),
[encode_png](image.md#encode_png),
[extract_glimpse](image.md#extract_glimpse),
[flip_left_right](image.md#flip_left_right),
[flip_up_down](image.md#flip_up_down),
[pad_to_bounding_box](image.md#pad_to_bounding_box),
[per_image_whitening](image.md#per_image_whitening),
[random_brightness](image.md#random_brightness),
[random_contrast](image.md#random_contrast),
[random_crop](image.md#random_crop),
[random_flip_left_right](image.md#random_flip_left_right),
[random_flip_up_down](image.md#random_flip_up_down),
[resize_area](image.md#resize_area),
[resize_bicubic](image.md#resize_bicubic),
[resize_bilinear](image.md#resize_bilinear),
[resize_image_with_crop_or_pad](image.md#resize_image_with_crop_or_pad),
[resize_images](image.md#resize_images),
[resize_nearest_neighbor](image.md#resize_nearest_neighbor),
[transpose_image](image.md#transpose_image)
* **[Images](image.md)**:
* [`adjust_brightness`](image.md#adjust_brightness)
* [`adjust_contrast`](image.md#adjust_contrast)
* [`crop_to_bounding_box`](image.md#crop_to_bounding_box)
* [`decode_jpeg`](image.md#decode_jpeg)
* [`decode_png`](image.md#decode_png)
* [`encode_jpeg`](image.md#encode_jpeg)
* [`encode_png`](image.md#encode_png)
* [`extract_glimpse`](image.md#extract_glimpse)
* [`flip_left_right`](image.md#flip_left_right)
* [`flip_up_down`](image.md#flip_up_down)
* [`pad_to_bounding_box`](image.md#pad_to_bounding_box)
* [`per_image_whitening`](image.md#per_image_whitening)
* [`random_brightness`](image.md#random_brightness)
* [`random_contrast`](image.md#random_contrast)
* [`random_crop`](image.md#random_crop)
* [`random_flip_left_right`](image.md#random_flip_left_right)
* [`random_flip_up_down`](image.md#random_flip_up_down)
* [`resize_area`](image.md#resize_area)
* [`resize_bicubic`](image.md#resize_bicubic)
* [`resize_bilinear`](image.md#resize_bilinear)
* [`resize_image_with_crop_or_pad`](image.md#resize_image_with_crop_or_pad)
* [`resize_images`](image.md#resize_images)
* [`resize_nearest_neighbor`](image.md#resize_nearest_neighbor)
* [`transpose_image`](image.md#transpose_image)
* <b>[Sparse Tensors](sparse_ops.md)</b>: [class SparseTensor](sparse_ops.md#SparseTensor),
[class SparseTensorValue](sparse_ops.md#SparseTensorValue),
[shape](sparse_ops.md#shape),
[sparse_concat](sparse_ops.md#sparse_concat),
[sparse_fill_empty_rows](sparse_ops.md#sparse_fill_empty_rows),
[sparse_reorder](sparse_ops.md#sparse_reorder),
[sparse_retain](sparse_ops.md#sparse_retain),
[sparse_tensor_to_dense](sparse_ops.md#sparse_tensor_to_dense),
[sparse_to_dense](sparse_ops.md#sparse_to_dense),
[sparse_to_indicator](sparse_ops.md#sparse_to_indicator)
* **[Sparse Tensors](sparse_ops.md)**:
* [`shape`](sparse_ops.md#shape)
* [`sparse_concat`](sparse_ops.md#sparse_concat)
* [`sparse_fill_empty_rows`](sparse_ops.md#sparse_fill_empty_rows)
* [`sparse_reorder`](sparse_ops.md#sparse_reorder)
* [`sparse_retain`](sparse_ops.md#sparse_retain)
* [`sparse_tensor_to_dense`](sparse_ops.md#sparse_tensor_to_dense)
* [`sparse_to_dense`](sparse_ops.md#sparse_to_dense)
* [`sparse_to_indicator`](sparse_ops.md#sparse_to_indicator)
* [`SparseTensor`](sparse_ops.md#SparseTensor)
* [`SparseTensorValue`](sparse_ops.md#SparseTensorValue)
* <b>[Inputs and Readers](io_ops.md)</b>: [class FIFOQueue](io_ops.md#FIFOQueue),
[class FixedLengthRecordReader](io_ops.md#FixedLengthRecordReader),
[class IdentityReader](io_ops.md#IdentityReader),
[class QueueBase](io_ops.md#QueueBase),
[class RandomShuffleQueue](io_ops.md#RandomShuffleQueue),
[class ReaderBase](io_ops.md#ReaderBase),
[class TFRecordReader](io_ops.md#TFRecordReader),
[class TextLineReader](io_ops.md#TextLineReader),
[class WholeFileReader](io_ops.md#WholeFileReader),
[batch](io_ops.md#batch),
[batch_join](io_ops.md#batch_join),
[decode_csv](io_ops.md#decode_csv),
[decode_raw](io_ops.md#decode_raw),
[limit_epochs](io_ops.md#limit_epochs),
[match_filenames_once](io_ops.md#match_filenames_once),
[matching_files](io_ops.md#matching_files),
[parse_example](io_ops.md#parse_example),
[parse_single_example](io_ops.md#parse_single_example),
[placeholder](io_ops.md#placeholder),
[range_input_producer](io_ops.md#range_input_producer),
[read_file](io_ops.md#read_file),
[shuffle_batch](io_ops.md#shuffle_batch),
[shuffle_batch_join](io_ops.md#shuffle_batch_join),
[size](io_ops.md#size),
[slice_input_producer](io_ops.md#slice_input_producer),
[string_input_producer](io_ops.md#string_input_producer)
* **[Inputs and Readers](io_ops.md)**:
* [`batch`](io_ops.md#batch)
* [`batch_join`](io_ops.md#batch_join)
* [`decode_csv`](io_ops.md#decode_csv)
* [`decode_raw`](io_ops.md#decode_raw)
* [`FIFOQueue`](io_ops.md#FIFOQueue)
* [`FixedLengthRecordReader`](io_ops.md#FixedLengthRecordReader)
* [`IdentityReader`](io_ops.md#IdentityReader)
* [`limit_epochs`](io_ops.md#limit_epochs)
* [`match_filenames_once`](io_ops.md#match_filenames_once)
* [`matching_files`](io_ops.md#matching_files)
* [`parse_example`](io_ops.md#parse_example)
* [`parse_single_example`](io_ops.md#parse_single_example)
* [`placeholder`](io_ops.md#placeholder)
* [`QueueBase`](io_ops.md#QueueBase)
* [`RandomShuffleQueue`](io_ops.md#RandomShuffleQueue)
* [`range_input_producer`](io_ops.md#range_input_producer)
* [`read_file`](io_ops.md#read_file)
* [`ReaderBase`](io_ops.md#ReaderBase)
* [`shuffle_batch`](io_ops.md#shuffle_batch)
* [`shuffle_batch_join`](io_ops.md#shuffle_batch_join)
* [`size`](io_ops.md#size)
* [`slice_input_producer`](io_ops.md#slice_input_producer)
* [`string_input_producer`](io_ops.md#string_input_producer)
* [`TextLineReader`](io_ops.md#TextLineReader)
* [`TFRecordReader`](io_ops.md#TFRecordReader)
* [`WholeFileReader`](io_ops.md#WholeFileReader)
* <b>[Data IO (Python functions)](python_io.md)</b>: [class TFRecordWriter](python_io.md#TFRecordWriter),
[tf_record_iterator](python_io.md#tf_record_iterator)
* **[Data IO (Python functions)](python_io.md)**:
* [`tf_record_iterator`](python_io.md#tf_record_iterator)
* [`TFRecordWriter`](python_io.md#TFRecordWriter)
* <b>[Neural Network](nn.md)</b>: [avg_pool](nn.md#avg_pool),
[bias_add](nn.md#bias_add),
[compute_accidental_hits](nn.md#compute_accidental_hits),
[conv2d](nn.md#conv2d),
[depthwise_conv2d](nn.md#depthwise_conv2d),
[dropout](nn.md#dropout),
[embedding_lookup](nn.md#embedding_lookup),
[embedding_lookup_sparse](nn.md#embedding_lookup_sparse),
[fixed_unigram_candidate_sampler](nn.md#fixed_unigram_candidate_sampler),
[in_top_k](nn.md#in_top_k),
[l2_loss](nn.md#l2_loss),
[l2_normalize](nn.md#l2_normalize),
[learned_unigram_candidate_sampler](nn.md#learned_unigram_candidate_sampler),
[local_response_normalization](nn.md#local_response_normalization),
[log_uniform_candidate_sampler](nn.md#log_uniform_candidate_sampler),
[max_pool](nn.md#max_pool),
[max_pool_with_argmax](nn.md#max_pool_with_argmax),
[moments](nn.md#moments),
[nce_loss](nn.md#nce_loss),
[relu](nn.md#relu),
[relu6](nn.md#relu6),
[sampled_softmax_loss](nn.md#sampled_softmax_loss),
[separable_conv2d](nn.md#separable_conv2d),
[sigmoid](nn.md#sigmoid),
[sigmoid_cross_entropy_with_logits](nn.md#sigmoid_cross_entropy_with_logits),
[softmax](nn.md#softmax),
[softmax_cross_entropy_with_logits](nn.md#softmax_cross_entropy_with_logits),
[softplus](nn.md#softplus),
[tanh](nn.md#tanh),
[top_k](nn.md#top_k),
[uniform_candidate_sampler](nn.md#uniform_candidate_sampler)
* **[Neural Network](nn.md)**:
* [`avg_pool`](nn.md#avg_pool)
* [`bias_add`](nn.md#bias_add)
* [`compute_accidental_hits`](nn.md#compute_accidental_hits)
* [`conv2d`](nn.md#conv2d)
* [`depthwise_conv2d`](nn.md#depthwise_conv2d)
* [`dropout`](nn.md#dropout)
* [`embedding_lookup`](nn.md#embedding_lookup)
* [`embedding_lookup_sparse`](nn.md#embedding_lookup_sparse)
* [`fixed_unigram_candidate_sampler`](nn.md#fixed_unigram_candidate_sampler)
* [`in_top_k`](nn.md#in_top_k)
* [`l2_loss`](nn.md#l2_loss)
* [`l2_normalize`](nn.md#l2_normalize)
* [`learned_unigram_candidate_sampler`](nn.md#learned_unigram_candidate_sampler)
* [`local_response_normalization`](nn.md#local_response_normalization)
* [`log_uniform_candidate_sampler`](nn.md#log_uniform_candidate_sampler)
* [`max_pool`](nn.md#max_pool)
* [`max_pool_with_argmax`](nn.md#max_pool_with_argmax)
* [`moments`](nn.md#moments)
* [`nce_loss`](nn.md#nce_loss)
* [`relu`](nn.md#relu)
* [`relu6`](nn.md#relu6)
* [`sampled_softmax_loss`](nn.md#sampled_softmax_loss)
* [`separable_conv2d`](nn.md#separable_conv2d)
* [`sigmoid`](nn.md#sigmoid)
* [`sigmoid_cross_entropy_with_logits`](nn.md#sigmoid_cross_entropy_with_logits)
* [`softmax`](nn.md#softmax)
* [`softmax_cross_entropy_with_logits`](nn.md#softmax_cross_entropy_with_logits)
* [`softplus`](nn.md#softplus)
* [`tanh`](nn.md#tanh)
* [`top_k`](nn.md#top_k)
* [`uniform_candidate_sampler`](nn.md#uniform_candidate_sampler)
* <b>[Running Graphs](client.md)</b>: [class AbortedError](client.md#AbortedError),
[class AlreadyExistsError](client.md#AlreadyExistsError),
[class CancelledError](client.md#CancelledError),
[class DataLossError](client.md#DataLossError),
[class DeadlineExceededError](client.md#DeadlineExceededError),
[class FailedPreconditionError](client.md#FailedPreconditionError),
[class InternalError](client.md#InternalError),
[class InvalidArgumentError](client.md#InvalidArgumentError),
[class NotFoundError](client.md#NotFoundError),
[class OpError](client.md#OpError),
[class OutOfRangeError](client.md#OutOfRangeError),
[class PermissionDeniedError](client.md#PermissionDeniedError),
[class ResourceExhaustedError](client.md#ResourceExhaustedError),
[class Session](client.md#Session),
[class UnauthenticatedError](client.md#UnauthenticatedError),
[class UnavailableError](client.md#UnavailableError),
[class UnimplementedError](client.md#UnimplementedError),
[class UnknownError](client.md#UnknownError),
[get_default_session](client.md#get_default_session)
* **[Running Graphs](client.md)**:
* [`AbortedError`](client.md#AbortedError)
* [`AlreadyExistsError`](client.md#AlreadyExistsError)
* [`CancelledError`](client.md#CancelledError)
* [`DataLossError`](client.md#DataLossError)
* [`DeadlineExceededError`](client.md#DeadlineExceededError)
* [`FailedPreconditionError`](client.md#FailedPreconditionError)
* [`get_default_session`](client.md#get_default_session)
* [`InternalError`](client.md#InternalError)
* [`InvalidArgumentError`](client.md#InvalidArgumentError)
* [`NotFoundError`](client.md#NotFoundError)
* [`OpError`](client.md#OpError)
* [`OutOfRangeError`](client.md#OutOfRangeError)
* [`PermissionDeniedError`](client.md#PermissionDeniedError)
* [`ResourceExhaustedError`](client.md#ResourceExhaustedError)
* [`Session`](client.md#Session)
* [`UnauthenticatedError`](client.md#UnauthenticatedError)
* [`UnavailableError`](client.md#UnavailableError)
* [`UnimplementedError`](client.md#UnimplementedError)
* [`UnknownError`](client.md#UnknownError)
* <b>[Training](train.md)</b>: [class AdagradOptimizer](train.md#AdagradOptimizer),
[class AdamOptimizer](train.md#AdamOptimizer),
[class AggregationMethod](train.md#AggregationMethod),
[class Coordinator](train.md#Coordinator),
[class ExponentialMovingAverage](train.md#ExponentialMovingAverage),
[class FtrlOptimizer](train.md#FtrlOptimizer),
[class GradientDescentOptimizer](train.md#GradientDescentOptimizer),
[class MomentumOptimizer](train.md#MomentumOptimizer),
[class Optimizer](train.md#Optimizer),
[class QueueRunner](train.md#QueueRunner),
[class RMSPropOptimizer](train.md#RMSPropOptimizer),
[class SummaryWriter](train.md#SummaryWriter),
[add_queue_runner](train.md#add_queue_runner),
[clip_by_average_norm](train.md#clip_by_average_norm),
[clip_by_global_norm](train.md#clip_by_global_norm),
[clip_by_norm](train.md#clip_by_norm),
[clip_by_value](train.md#clip_by_value),
[exponential_decay](train.md#exponential_decay),
[global_norm](train.md#global_norm),
[global_step](train.md#global_step),
[gradients](train.md#gradients),
[histogram_summary](train.md#histogram_summary),
[image_summary](train.md#image_summary),
[merge_all_summaries](train.md#merge_all_summaries),
[merge_summary](train.md#merge_summary),
[scalar_summary](train.md#scalar_summary),
[start_queue_runners](train.md#start_queue_runners),
[stop_gradient](train.md#stop_gradient),
[summary_iterator](train.md#summary_iterator),
[write_graph](train.md#write_graph),
[zero_fraction](train.md#zero_fraction)
* **[Training](train.md)**:
* [`AdagradOptimizer`](train.md#AdagradOptimizer)
* [`AdamOptimizer`](train.md#AdamOptimizer)
* [`add_queue_runner`](train.md#add_queue_runner)
* [`AggregationMethod`](train.md#AggregationMethod)
* [`clip_by_average_norm`](train.md#clip_by_average_norm)
* [`clip_by_global_norm`](train.md#clip_by_global_norm)
* [`clip_by_norm`](train.md#clip_by_norm)
* [`clip_by_value`](train.md#clip_by_value)
* [`Coordinator`](train.md#Coordinator)
* [`exponential_decay`](train.md#exponential_decay)
* [`ExponentialMovingAverage`](train.md#ExponentialMovingAverage)
* [`FtrlOptimizer`](train.md#FtrlOptimizer)
* [`global_norm`](train.md#global_norm)
* [`global_step`](train.md#global_step)
* [`GradientDescentOptimizer`](train.md#GradientDescentOptimizer)
* [`gradients`](train.md#gradients)
* [`histogram_summary`](train.md#histogram_summary)
* [`image_summary`](train.md#image_summary)
* [`merge_all_summaries`](train.md#merge_all_summaries)
* [`merge_summary`](train.md#merge_summary)
* [`MomentumOptimizer`](train.md#MomentumOptimizer)
* [`Optimizer`](train.md#Optimizer)
* [`QueueRunner`](train.md#QueueRunner)
* [`RMSPropOptimizer`](train.md#RMSPropOptimizer)
* [`scalar_summary`](train.md#scalar_summary)
* [`start_queue_runners`](train.md#start_queue_runners)
* [`stop_gradient`](train.md#stop_gradient)
* [`summary_iterator`](train.md#summary_iterator)
* [`SummaryWriter`](train.md#SummaryWriter)
* [`write_graph`](train.md#write_graph)
* [`zero_fraction`](train.md#zero_fraction)
<div class="sections-order" style="display: none;">
<!--

View File

@ -1,6 +1,10 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Inputs and Readers
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Placeholders](#AUTOGENERATED-placeholders)
@ -15,6 +19,7 @@
* [Converting](#AUTOGENERATED-converting)
* [tf.decode_csv(records, record_defaults, field_delim=None, name=None)](#decode_csv)
* [tf.decode_raw(bytes, out_type, little_endian=None, name=None)](#decode_raw)
* [Example protocol buffer](#AUTOGENERATED-example-protocol-buffer)
* [tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample')](#parse_example)
* [tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample')](#parse_single_example)
* [Queues](#AUTOGENERATED-queues)
@ -1061,109 +1066,106 @@ Reinterpret the bytes of a string as a vector of numbers.
of bytes divided by the number of bytes to represent out_type.
- - -
### Example protocol buffer <div class="md-anchor" id="AUTOGENERATED-example-protocol-buffer">{#AUTOGENERATED-example-protocol-buffer}</div>
TensorFlow's [recommended format for training
examples](../../how_tos/reading_data/index.md#standard-tensorflow-format)
is serialized `Example` protocol buffers, [described
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto).
They contain `Features`, [described
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/feature.proto).
- - -
### tf.parse_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseExample') <div class="md-anchor" id="parse_example">{#parse_example}</div>
Parse Example protos.
Parses `Example` protos.
##### Args:
Parses a number of serialized [`Example`]
(https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto)
protos given in `serialized`.
`names` may contain descriptive names for the corresponding serialized protos.
These may be useful for debugging purposes, but they have no effect on the
output. If not `None`, `names` must be the same length as `serialized`.
* <b>serialized</b>: string vector, a batch of binary serialized Example protos.
* <b>names</b>: A string vector, the names of the serialized protos.
"names" may contain, e.g., table key (descriptive) names for the
corresponding serialized protos. These are purely useful for debugging
purposes, and the presence of values here has no effect on the output.
"names" may be an empty vector, if no names are available.
If non-empty, this vector must be the same length as "serialized".
* <b>sparse_keys</b>: A string list of keys in the Examples' features.
These keys are associated with sparse values.
* <b>sparse_types</b>: A list of DTypes.
This list's length must match that of sparse_keys. Currently
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
and tf.string (BytesList).
* <b>dense_keys</b>: A string list of keys in the Examples' features.
These keys are associated with dense values.
* <b>dense_types</b>: A list of DTypes.
This list's length must match that of dense_keys. Currently
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
and tf.string (BytesList).
* <b>dense_defaults</b>: A dict of {key:Tensor} (some may be missing).
The keys of the dict must match the dense_keys of the feature.
If a key is not present in this dictionary, the corresponding dense
Feature is required in all elements of serialized.
* <b>dense_shapes</b>: A list of tuples.
Entries provide the shape of data in each dense Feature in features.
The length of dense_shapes must be the same as the length of dense_keys.
The number of elements in the Feature corresponding to dense_key[j]
must always have np.prod(dense_shapes[j]) entries.
If dense_shapes[j] == (D0, D1, ..., DN) then the the shape of output
Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
The dense outputs are just the inputs row-stacked by batch.
* <b>name</b>: (Optional) Name of Op in the graph.
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects respectively, depending on whether the keys appear
in `sparse_keys` or `dense_keys`.
##### Returns:
The key `dense_keys[j]` is mapped to a `Tensor` of type `dense_types[j]` and
of shape `(serialized.size(),) + dense_shapes[j]`.
A dictionary mapping keys to Tensors and SparseTensors.
`dense_defaults` provides defaults for values referenced using `dense_keys`.
If a key is not present in this dictionary, the corresponding dense `Feature`
is required in all elements of `serialized`.
The key dense_keys[j] is mapped to a tensor of type dense_types[j] and
of shape (serialized.size(),) + dense_shapes[j] (i.e., the dense outputs are
inputs, reshaped in row-major format and then row-stacked by batch).
`dense_shapes[j]` provides the shape of each `Feature` entry referenced by
`dense_keys[j]`. The number of elements in the `Feature` corresponding to
`dense_key[j]` must always have `np.prod(dense_shapes[j])` entries. The
returned `Tensor` for `dense_key[j]` has shape `[N] + dense_shape[j]`, where
`N` is the number of `Example`s in `serialized`.
The key sparse_keys[j] is mapped to a SparseTensor of type sparse_types[j].
The SparseTensor represents a ragged matrix. Its indices are [batch, index]
where "batch" is is the batch entry the value is from, and "index" is the
value's index in the list of values associated with that feature
and example. For example, if one expects a tf.float32 sparse feature "ft"
and three serialized examples are provided:
The key `sparse_keys[j]` is mapped to a `SparseTensor` of type
`sparse_types[j]`. The `SparseTensor` represents a ragged matrix.
Its indices are `[batch, index]` where `batch` is the batch entry the value
is from, and `index` is the value's index in the list of values associated
with that feature and example.
serialized = [
Examples:
* <b>features</b>:
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
* <b>features</b>:
{ feature: [] },
* <b>features</b>:
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
]
For example, if one expects a `tf.float32` sparse feature `ft` and three
serialized `Example`s are provided:
then the output will look like:
```
serialized = [
features:
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
features:
{ feature: [] },
features:
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
]
```
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
shape=(3, 2)) }
then the output will look like:
##### Raises:
```
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
shape=(3, 2)) }
```
Given two `Example` input protos in `serialized`:
* <b>ValueError</b>: If sparse and dense keys intersect, or input lengths do not
match up for sparse_* (similarly for dense_*).
* <b>TypeError</b>: If an input is malformed.
```
[
features: {
feature: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } }
feature: { key: "gps" value: { float_list: { value: [] } } }
},
features: {
feature: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } }
feature: { key: "dank" value: { int64_list: { value: [ 42 ] } } }
feature: { key: "gps" value: { } }
}
]
```
Example input, format, and output: Just Sparse Inputs
================================================
And arguments
Given two brain.Example input protos:
```
names: ["input0", "input1"],
sparse_keys: ["kw", "dank", "gps"]
sparse_types: [DT_STRING, DT_INT64, DT_FLOAT]
```
Then the output is a dictionary:
* <b>serialized</b>: // serialized versions of the protos below
[features: {
* <b>feature</b>: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } }
* <b>feature</b>: { key: "gps" value: { float_list: { value: [] } } }
},
* <b>features</b>: {
* <b>feature</b>: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } }
* <b>feature</b>: { key: "dank" value: { int64_list: { value: [ 42 ] } } }
* <b>feature</b>: { key: "gps" value: { } }
}]
* <b>names</b>: ["input0", "input1"],
* <b>sparse_keys</b>: ["kw", "dank", "gps"]
* <b>sparse_types</b>: [DT_STRING, DT_INT64, DT_FLOAT]
Then the expected output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
@ -1178,78 +1180,96 @@ Then the expected output is a dictionary:
values=[],
shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
Example input, format, and output: Dense Inputs (without defaults)
==================================================================
Given two brain.Example input protos:
* <b>serialized</b>: // serialized versions of the protos below
[features: {
* <b>feature</b>: { key: "age" value: { int64_list: { value: [ 0 ] } } }
* <b>feature</b>: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
```
[
features: {
feature: { key: "age" value: { int64_list: { value: [ 0 ] } } }
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
},
* <b>features</b>: {
* <b>feature</b>: { key: "age" value: { int64_list: { value: [] } } }
* <b>feature</b>: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
}]
features: {
feature: { key: "age" value: { int64_list: { value: [] } } }
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
}
]
```
* <b>names</b>: ["input0", "input1"],
* <b>dense_keys</b>: np.array(["age", "gender"])
* <b>dense_types</b>: [tf.int64, tf.string]
* <b>dense_defaults</b>: {
"age": -1 # defaults to -1 if missing
We can use arguments:
```
names: ["input0", "input1"],
dense_keys: np.array(["age", "gender"]),
dense_types: [tf.int64, tf.string],
dense_defaults: {
"age": -1 # "age" defaults to -1 if missing
# "gender" has no specified default so it's required
}
dense_shapes: [(1,), (1,)], # age, gender, label, weight
```
* <b>dense_shapes</b>: [(1,), (1,)] # age, gender, label, weight
And the expected output is:
Then the expected output is a dictionary:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
##### Args:
Example input, format, and output: Dense Inputs (with defaults)
===============================================================
* <b>serialized</b>: A list of strings, a batch of binary serialized `Example`
protos.
* <b>names</b>: A list of strings, the names of the serialized protos.
* <b>sparse_keys</b>: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
* <b>sparse_types</b>: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
* <b>dense_keys</b>: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
* <b>dense_types</b>: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
* <b>dense_defaults</b>: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
* <b>dense_shapes</b>: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
* <b>name</b>: A name for this operation (optional).
Given two brain.Example input protos:
##### Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
##### Raises:
* <b>serialized</b>: // serialized versions of the protos below
[features: {
* <b>feature</b>: { key: "weight" value: { float_list: { value: [ 1.0 ] } } }
},
* <b>features</b>: {
* <b>feature</b>: { key: "label" value: { float_list: { value: [ -1.0, 0.0 ] } } }
}]
* <b>names</b>: ["input0", "input1"],
* <b>dense_keys</b>: np.array(["label", "weight"])
* <b>dense_defaults</b>: {
"label": [1.0, 2.0], # float (default: vector)
"weight": 5.0 # float (default: scalar, 5.0)
}
* <b>dense_shapes</b>: [(2,), (1,)] # age, gender, label, weight
Then the expected output is a dictionary:
{
"label": [[1.0, 2.0], [-1.0, 0.0]],
"weight": [[1.0], [5.0]],
}
* <b>ValueError</b>: If sparse and dense key sets intersect, or input lengths do not
match up.
- - -
### tf.parse_single_example(serialized, names=None, sparse_keys=None, sparse_types=None, dense_keys=None, dense_types=None, dense_defaults=None, dense_shapes=None, name='ParseSingleExample') <div class="md-anchor" id="parse_single_example">{#parse_single_example}</div>
Identical to parse_example but for scalar serialized and names.
Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (batch_size) entry of the shape vector is removed (it is now a
single element vector).
See also `parse_example`.
##### Args:
@ -1264,22 +1284,12 @@ Identical to parse_example but for scalar serialized and names.
* <b>dense_types</b>: See parse_example documentation for more details.
* <b>dense_defaults</b>: See parse_example documentation for more details.
* <b>dense_shapes</b>: See parse_example documentation for more details.
* <b>name</b>: Optional op name.
* <b>name</b>: A name for this operation (optional).
##### Returns:
A dictionary mapping keys to Tensors and SparseTensors.
For dense tensors, the Tensor is identical to the output of parse_example,
except it is one less dimension (the first, batch, dimension is removed).
For SparseTensors:
The first (batch) column of the indices matrix is removed
(it is now a column vector).
The values vector is unchanged.
The first (batch_size) entry of the shape vector is removed
(it is now a single element vector).
##### Raises:
@ -1632,7 +1642,7 @@ Reads and outputs the entire contents of the input filename.
## Input pipeline <div class="md-anchor" id="AUTOGENERATED-input-pipeline">{#AUTOGENERATED-input-pipeline}</div>
TensorFlow functions for setting up an input-prefetching pipeline.
Please see the [reading data how-to](../../how_tos/reading_data.md)
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
### Beginning of an input pipeline <div class="md-anchor" id="AUTOGENERATED-beginning-of-an-input-pipeline">{#AUTOGENERATED-beginning-of-an-input-pipeline}</div>
@ -1822,41 +1832,45 @@ is added to the current Graph's QUEUE_RUNNER collection.
Run a list of tensors to fill a queue to create batches of examples.
This version enqueues a different list of tensors in different threads.
Implemented using a queue -- a QueueRunner for the queue
is added to the current Graph's QUEUE_RUNNER collection.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
the tensors from tensor_list[i]. `tensor_list[i1][j]` must match
`tensor_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is false, each `tensor_list_list[i]` is assumed to
represent a single example. Otherwise, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed by
example, and all members of `tensor_list_list[i]` should have the same size
in the first dimension.
If `enqueue_many` is false, then an input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`. If `enqueue_many` is true, the
slices of any input tensor `x` are treated as examples, and the output tensors
will have shape `[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching
is allowed to grow the queues.
##### Args:
* <b>tensor_list_list</b>: A list of tuples of tensors to enqueue.
len(tensor_list_list) threads will be started, with the i-th
thread enqueuing the tensors from tensor_list[i].
tensor_list[i1][j] must match tensor_list[i2][j] in type and
shape (except in the first dimension if enqueue_many is true).
* <b>batch_size</b>: The new batch size pulled from the queue.
* <b>capacity</b>: Maximum number of elements in the queue, controls the
how far ahead the prefetching allowed is allowed to get and
memory usage.
* <b>enqueue_many</b>: If False, each tensor_list_list[i] is assumed to
represent a single example. If True, tensor_list_list[i] is
assumed to represent a batch of examples, where the first
dimension is indexed by example, and all members of
tensor_list_list[i] should have the same size in the first
dimension.
* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
inferred shapes for tensor_list_list[i] (which must match, after
leaving off the first dimension if enqueue_many is True).
* <b>batch_size</b>: An integer. The new batch size pulled from the queue.
* <b>capacity</b>: An integer. The maximum number of elements in the queue.
* <b>enqueue_many</b>: Whether each tensor in `tensor_list_list` is a single
example.
* <b>shapes</b>: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
* <b>name</b>: A name for the operations (optional).
##### Returns:
A list of tensors with the same number and types as
tensor_list_list[i]. If enqueue_many is false, then an input
tensor with shape `[x, y, z]` will be output as a tensor with
shape `[batch_size, x, y, z]`. If enqueue_many is True, and an
input tensor has shape `[*, x, y, z]`, the the output will have
shape `[batch_size, x, y, z]`.
`tensor_list_list[i]`.
- - -
@ -1933,15 +1947,15 @@ It adds:
* <b>min_after_dequeue</b>: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
* <b>seed</b>: Seed for the random shuffling within the queue.
* <b>enqueue_many</b>: If False, each tensor_list_list[i] is assumed to
represent a single example. If True, tensor_list_list[i] is
* <b>enqueue_many</b>: If `False`, each tensor_list_list[i] is assumed to
represent a single example. If `True`, tensor_list_list[i] is
assumed to represent a batch of examples, where the first
dimension is indexed by example, and all members of
tensor_list_list[i] should have the same size in the first
dimension.
* <b>shapes</b>: Optional. The shapes for each example. Defaults to the
inferred shapes for tensor_list_list[i] (which must match, after
leaving off the first dimension if enqueue_many is True).
inferred shapes for `tensor_list_list[i]` (which must match, after
leaving off the first dimension if enqueue_many is `True`).
* <b>name</b>: A name for the operations (optional).
##### Returns:

View File

@ -1,6 +1,10 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Math
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Arithmetic Operators](#AUTOGENERATED-arithmetic-operators)

View File

@ -1,6 +1,10 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Neural Network
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Activation Functions](#AUTOGENERATED-activation-functions)
@ -899,7 +903,7 @@ only considering a small randomly-chosen subset of contrastive classes
(called candidates) for each batch of training examples.
See our [Candidate Sampling Algorithms Reference]
(http://www.tensorflow.org/extras/candidate_sampling.pdf)
(../../extras/candidate_sampling.pdf)
### Sampled Loss Functions <div class="md-anchor" id="AUTOGENERATED-sampled-loss-functions">{#AUTOGENERATED-sampled-loss-functions}</div>

View File

@ -1,16 +1,20 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Sparse Tensors
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Sparse Tensor Representation.](#AUTOGENERATED-sparse-tensor-representation.)
* [Sparse Tensor Representation](#AUTOGENERATED-sparse-tensor-representation)
* [class tf.SparseTensor](#SparseTensor)
* [class tf.SparseTensorValue](#SparseTensorValue)
* [Sparse to Dense Conversion.](#AUTOGENERATED-sparse-to-dense-conversion.)
* [Sparse to Dense Conversion](#AUTOGENERATED-sparse-to-dense-conversion)
* [tf.sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, name=None)](#sparse_to_dense)
* [tf.sparse_tensor_to_dense(sp_input, default_value, name=None)](#sparse_tensor_to_dense)
* [tf.sparse_to_indicator(sp_input, vocab_size, name=None)](#sparse_to_indicator)
* [Manipulation.](#AUTOGENERATED-manipulation.)
* [Manipulation](#AUTOGENERATED-manipulation)
* [tf.sparse_concat(concat_dim, sp_inputs, name=None)](#sparse_concat)
* [tf.sparse_reorder(sp_input, name=None)](#sparse_reorder)
* [tf.sparse_retain(sp_input, to_retain)](#sparse_retain)
@ -19,7 +23,7 @@
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
## Sparse Tensor Representation. <div class="md-anchor" id="AUTOGENERATED-sparse-tensor-representation.">{#AUTOGENERATED-sparse-tensor-representation.}</div>
## Sparse Tensor Representation <div class="md-anchor" id="AUTOGENERATED-sparse-tensor-representation">{#AUTOGENERATED-sparse-tensor-representation}</div>
Tensorflow supports a `SparseTensor` representation for data that is sparse
in multiple dimensions. Contrast this representation with `IndexedSlices`,
@ -157,7 +161,7 @@ Alias for field number 1
## Sparse to Dense Conversion. <div class="md-anchor" id="AUTOGENERATED-sparse-to-dense-conversion.">{#AUTOGENERATED-sparse-to-dense-conversion.}</div>
## Sparse to Dense Conversion <div class="md-anchor" id="AUTOGENERATED-sparse-to-dense-conversion">{#AUTOGENERATED-sparse-to-dense-conversion}</div>
- - -
@ -296,7 +300,7 @@ The input `SparseTensor` must be in row-major order.
## Manipulation. <div class="md-anchor" id="AUTOGENERATED-manipulation.">{#AUTOGENERATED-manipulation.}</div>
## Manipulation <div class="md-anchor" id="AUTOGENERATED-manipulation">{#AUTOGENERATED-manipulation}</div>
- - -

View File

@ -1,6 +1,10 @@
<!-- This file is machine generated: DO NOT EDIT! -->
# Variables
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Variables](#AUTOGENERATED-variables)
@ -11,7 +15,7 @@
* [tf.initialize_all_variables()](#initialize_all_variables)
* [tf.initialize_variables(var_list, name='init')](#initialize_variables)
* [tf.assert_variables_initialized(var_list=None)](#assert_variables_initialized)
* [Saving and Restoring Variables.](#AUTOGENERATED-saving-and-restoring-variables.)
* [Saving and Restoring Variables](#AUTOGENERATED-saving-and-restoring-variables)
* [class tf.train.Saver](#Saver)
* [tf.train.latest_checkpoint(checkpoint_dir, latest_filename=None)](#latest_checkpoint)
* [tf.train.get_checkpoint_state(checkpoint_dir, latest_filename=None)](#get_checkpoint_state)
@ -325,7 +329,7 @@ This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph containing this
variable has been launched. If no session is passed, the default session is
used. See the [Session class](../client.md#Session) for more information on
used. See the [Session class](client.md#Session) for more information on
launching a graph and on sessions.
```python
@ -506,7 +510,7 @@ logged by the C++ runtime. This is expected.
## Saving and Restoring Variables. <div class="md-anchor" id="AUTOGENERATED-saving-and-restoring-variables.">{#AUTOGENERATED-saving-and-restoring-variables.}</div>
## Saving and Restoring Variables <div class="md-anchor" id="AUTOGENERATED-saving-and-restoring-variables">{#AUTOGENERATED-saving-and-restoring-variables}</div>
- - -

View File

@ -3,7 +3,7 @@
# Training
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Optimizers.](#AUTOGENERATED-optimizers.)
* [Optimizers](#AUTOGENERATED-optimizers)
* [class tf.train.Optimizer](#Optimizer)
* [Usage](#AUTOGENERATED-usage)
* [Processing gradients before applying them.](#AUTOGENERATED-processing-gradients-before-applying-them.)
@ -15,7 +15,7 @@
* [class tf.train.AdamOptimizer](#AdamOptimizer)
* [class tf.train.FtrlOptimizer](#FtrlOptimizer)
* [class tf.train.RMSPropOptimizer](#RMSPropOptimizer)
* [Gradient Computation.](#AUTOGENERATED-gradient-computation.)
* [Gradient Computation](#AUTOGENERATED-gradient-computation)
* [tf.gradients(ys, xs, grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None)](#gradients)
* [class tf.AggregationMethod](#AggregationMethod)
* [tf.stop_gradient(input, name=None)](#stop_gradient)
@ -25,26 +25,26 @@
* [tf.clip_by_average_norm(t, clip_norm, name=None)](#clip_by_average_norm)
* [tf.clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None)](#clip_by_global_norm)
* [tf.global_norm(t_list, name=None)](#global_norm)
* [Decaying the learning rate.](#AUTOGENERATED-decaying-the-learning-rate.)
* [Decaying the learning rate](#AUTOGENERATED-decaying-the-learning-rate)
* [tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None)](#exponential_decay)
* [Moving Averages.](#AUTOGENERATED-moving-averages.)
* [Moving Averages](#AUTOGENERATED-moving-averages)
* [class tf.train.ExponentialMovingAverage](#ExponentialMovingAverage)
* [Coordinator and QueueRunner.](#AUTOGENERATED-coordinator-and-queuerunner.)
* [Coordinator and QueueRunner](#AUTOGENERATED-coordinator-and-queuerunner)
* [class tf.train.Coordinator](#Coordinator)
* [class tf.train.QueueRunner](#QueueRunner)
* [tf.train.add_queue_runner(qr, collection='queue_runners')](#add_queue_runner)
* [tf.train.start_queue_runners(sess=None, coord=None, daemon=True, start=True, collection='queue_runners')](#start_queue_runners)
* [Summary Operations.](#AUTOGENERATED-summary-operations.)
* [Summary Operations](#AUTOGENERATED-summary-operations)
* [tf.scalar_summary(tags, values, collections=None, name=None)](#scalar_summary)
* [tf.image_summary(tag, tensor, max_images=None, collections=None, name=None)](#image_summary)
* [tf.histogram_summary(tag, values, collections=None, name=None)](#histogram_summary)
* [tf.nn.zero_fraction(value, name=None)](#zero_fraction)
* [tf.merge_summary(inputs, collections=None, name=None)](#merge_summary)
* [tf.merge_all_summaries(key='summaries')](#merge_all_summaries)
* [Adding Summaries to Event Files.](#AUTOGENERATED-adding-summaries-to-event-files.)
* [Adding Summaries to Event Files](#AUTOGENERATED-adding-summaries-to-event-files)
* [class tf.train.SummaryWriter](#SummaryWriter)
* [tf.train.summary_iterator(path)](#summary_iterator)
* [Training utilities.](#AUTOGENERATED-training-utilities.)
* [Training utilities](#AUTOGENERATED-training-utilities)
* [tf.train.global_step(sess, global_step_tensor)](#global_step)
* [tf.train.write_graph(graph_def, logdir, name, as_text=True)](#write_graph)
@ -53,7 +53,7 @@
This library provides a set of classes and functions that helps train models.
## Optimizers. <div class="md-anchor" id="AUTOGENERATED-optimizers.">{#AUTOGENERATED-optimizers.}</div>
## Optimizers <div class="md-anchor" id="AUTOGENERATED-optimizers">{#AUTOGENERATED-optimizers}</div>
The Optimizer base class provides methods to compute gradients for a loss and
apply gradients to variables. A collection of subclasses implement classic
@ -523,7 +523,7 @@ Construct a new RMSProp optimizer.
## Gradient Computation. <div class="md-anchor" id="AUTOGENERATED-gradient-computation.">{#AUTOGENERATED-gradient-computation.}</div>
## Gradient Computation <div class="md-anchor" id="AUTOGENERATED-gradient-computation">{#AUTOGENERATED-gradient-computation}</div>
TensorFlow provides functions to compute the derivatives for a given
TensorFlow computation graph, adding operations to the graph. The
@ -816,7 +816,7 @@ Any entries in `t_list` that are of type None are ignored.
## Decaying the learning rate. <div class="md-anchor" id="AUTOGENERATED-decaying-the-learning-rate.">{#AUTOGENERATED-decaying-the-learning-rate.}</div>
## Decaying the learning rate <div class="md-anchor" id="AUTOGENERATED-decaying-the-learning-rate">{#AUTOGENERATED-decaying-the-learning-rate}</div>
- - -
### tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=False, name=None) <div class="md-anchor" id="exponential_decay">{#exponential_decay}</div>
@ -873,7 +873,7 @@ optimizer.minimize(...my loss..., global_step=global_step)
## Moving Averages. <div class="md-anchor" id="AUTOGENERATED-moving-averages.">{#AUTOGENERATED-moving-averages.}</div>
## Moving Averages <div class="md-anchor" id="AUTOGENERATED-moving-averages">{#AUTOGENERATED-moving-averages}</div>
Some training algorithms, such as GradientDescent and Momentum often benefit
from maintaining a moving average of variables during optimization. Using the
@ -1075,7 +1075,7 @@ Returns the `Variable` holding the average of `var`.
## Coordinator and QueueRunner. <div class="md-anchor" id="AUTOGENERATED-coordinator-and-queuerunner.">{#AUTOGENERATED-coordinator-and-queuerunner.}</div>
## Coordinator and QueueRunner <div class="md-anchor" id="AUTOGENERATED-coordinator-and-queuerunner">{#AUTOGENERATED-coordinator-and-queuerunner}</div>
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
for how to use threads and queues. For documentation on the Queue API,
@ -1399,17 +1399,21 @@ the list of all threads.
## Summary Operations. <div class="md-anchor" id="AUTOGENERATED-summary-operations.">{#AUTOGENERATED-summary-operations.}</div>
## Summary Operations <div class="md-anchor" id="AUTOGENERATED-summary-operations">{#AUTOGENERATED-summary-operations}</div>
The following ops output
[`Summary`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto)
protocol buffers as serialized string tensors.
You can fetch the output of a summary op in a session, and pass it to a
[SummaryWriter](train.md#SummaryWriter) to append it to an event file. You can
then use TensorBoard to visualize the contents of the event files. See
[TensorBoard and Summaries](../../how_tos/summaries_and_tensorboard/index.md)
for more details.
You can fetch the output of a summary op in a session, and pass it to
a [SummaryWriter](train.md#SummaryWriter) to append it to an event
file. Event files contain
[`Event`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/util/event.proto)
protos that can contain `Summary` protos along with the timestamp and
step. You can then use TensorBoard to visualize the contents of the
event files. See [TensorBoard and
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
details.
- - -
@ -1587,7 +1591,7 @@ Merges all summaries collected in the default graph.
## Adding Summaries to Event Files. <div class="md-anchor" id="AUTOGENERATED-adding-summaries-to-event-files.">{#AUTOGENERATED-adding-summaries-to-event-files.}</div>
## Adding Summaries to Event Files <div class="md-anchor" id="AUTOGENERATED-adding-summaries-to-event-files">{#AUTOGENERATED-adding-summaries-to-event-files}</div>
See [Summaries and
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
@ -1768,7 +1772,7 @@ for more information about their attributes.
## Training utilities. <div class="md-anchor" id="AUTOGENERATED-training-utilities.">{#AUTOGENERATED-training-utilities.}</div>
## Training utilities <div class="md-anchor" id="AUTOGENERATED-training-utilities">{#AUTOGENERATED-training-utilities}</div>
- - -

View File

@ -77,7 +77,7 @@ The default graph now has three nodes: two `constant()` ops and one `matmul()`
op. To actually multiply the matrices, and get the result of the multiplication,
you must launch the graph in a session.
## Launching the graph in a Session
### Launching the graph in a session
Launching follows construction. To launch a graph, create a `Session` object.
Without arguments the session constructor launches the default graph.
@ -102,20 +102,16 @@ sess = tf.Session()
# The output of the op is returned in 'result' as a numpy `ndarray` object.
result = sess.run(product)
print result
# ==> [[ 12.]]
# Close the Session when we're done.
sess.close()
# Stdout output ==> [[ 12.]]
```
Sessions should be closed to release resources. You can also enter a `Session`
with a "with" block. The `Session` closes automatically at the end of the
`with` block.
```python
with tf.Session() as sess:
result = sess.run([product])
@ -150,6 +146,37 @@ Devices are specified with strings. The currently supported devices are:
See [Using GPUs](../how_tos/using_gpu/index.md) for more information about GPUs
and TensorFlow.
## Interactive Usage
The Python examples in the documentation launch the graph with a
[`Session`](../api_docs/python/client.md#Session) and use the
[`Session.run()`](../api_docs/python/client.md#Session.run) method to execute
operations.
For ease of use in interactive Python environments, such as
[IPython](http://ipython.org) you can instead use the
[`InteractiveSession`](../api_docs/python/client.md#InteractiveSession) class,
and the [`Tensor.eval()`](../api_docs/python/framework.md#Tensor.eval) and
[`Operation.run()`](../api_docs/python/framework.md#Operation.run) methods. This
avoids having to keep a variable holding the session.
```python
# Enter an interactive TensorFlow Session.
import tensorflow as tf
sess = tf.InteractiveSession()
x = tf.Variable([1.0, 2.0])
a = tf.constant([3.0, 3.0])
# Initialize 'x' using the run() method of its initializer op.
x.initializer.run()
# Add an op to subtact 'a' from 'x'. Run it and print the result
sub = tf.sub(x, a)
print sub.eval()
# ==> [-2. -1.]
```
## Tensors
TensorFlow programs use a tensor data structure to represent all data -- only
@ -159,14 +186,6 @@ static type a rank, and a shape. To learn more about how TensorFlow handles
these concepts, see the [Rank, Shape, and Type](../resources/dims_types.md)
reference.
# output:
# [array([ 21.], dtype=float32), array([ 7.], dtype=float32)]
## Variables
Variables maintain state across executions of the graph. The following example

View File

@ -52,17 +52,15 @@ other software packages, please take the red pill. If you've never even heard
of MNIST, definitely take the blue pill. If you're somewhere in between, we
suggest skimming blue, then red.
TODO(danmane): Add in creative commons attribution for these images.
Also, make sure the sizes are precisely the same.
<div style="width:100%; margin:auto; margin-bottom:10px; margin-top:20px; display: flex; flex-direction: row">
<a href="../tutorials/mnist/beginners/index.md">
<img style="flex-grow:1; flex-shrink:1;border: 1px solid black;" src="./blue_pill.jpg">
<img style="flex-grow:1; flex-shrink:1;border: 1px solid black;" src="./blue_pill.png">
</a>
<a href="../tutorials/mnist/pros/index.md">
<img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="./red_pill.jpg">
<img style="flex-grow:1; flex-shrink:1; border: 1px solid black;" src="./red_pill.png">
</a>
</div>
<p style="font-size:10px;">Images licensed CC BY-SA 4.0; original by W. Carter</p>
If you're already sure you want to learn and install TensorFlow you can skip
these and charge ahead. Don't worry, you'll still get to see MNIST -- we'll

View File

@ -4,10 +4,10 @@
### Ubuntu/Linux
Make sure you have `pip` and `numpy` installed :
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
```sh
$ sudo apt-get install python-pip python-numpy
$ sudo apt-get install python-pip
```
Install TensorFlow:
@ -22,7 +22,7 @@ $ sudo pip install https://storage.googleapis.com/tensorflow/linux/gpu/tensorflo
### Mac OS X
Make sure you have `pip` installed:
Make sure you have [pip](https://pypi.python.org/pypi/pip) installed:
If using `easy_install`:
@ -90,16 +90,16 @@ Validation error: 84.6%
```
## Source Installation {#source}
## Installing from sources {#source}
### Clone the TensorFlow repository
TODO(keveman): Supply clone command for external users.
```sh
$ git clone --recurse-submodules https://YOUR_WHITELISTED_EMAIL_WITH_AT_REPLACED_BY_DOT@tensorflow.googlesource.com/tf3
$ git clone --recurse-submodules https://tensorflow.googlesource.com/tensorflow
```
`--recurse-submodules` is required to fetch the protobuf library that TensorFlow
depends on.
### Installation for Linux
@ -162,11 +162,9 @@ GPU support will be enabled for TensorFlow
Please specify the location where CUDA 7.0 toolkit is installed. Refer to
README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
CUDA 7.0 toolkit found
Please specify the location where CUDNN 6.5 V2 library is installed. Refer to
README.md for more details. [default is: /usr/local/cuda]: /usr/local/cuda
CUDNN 6.5 V2 library found
Setting up Cuda include
Setting up Cuda lib64
@ -191,9 +189,6 @@ $ bazel-bin/tensorflow/cc/tutorials_example_trainer --use_gpu
000009/000005 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
000006/000001 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
000009/000009 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
000006/000008 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
000009/000003 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
000006/000006 lambda = 2.000000 x = [0.894427 -0.447214] y = [1.788854 -0.894427]
```
Note that "--config=cuda" is needed to enable the GPU support.
@ -231,14 +226,24 @@ Notes : You need to install
Follow installation instructions [here](http://docs.scipy.org/doc/numpy/user/install.html).
### Build and train your first TensorFlow neural net model
### Create the pip package and install
```sh
$ cd tf3
$ bazel build -c opt //tensorflow/tools/pip_package:build_pip_package
$ bazel build tensorflow/models/image/mnist:convolutional
$ bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
$ bazel-bin/tensorflow/models/image/mnist/convolutional
# The name of the .whl file will depend on your platform.
$ pip install /tmp/tensorflow_pkg/tensorflow-0.5.0-cp27-none-linux_x86_64.whl
```
### Train your first TensorFlow neural net model
From the root of your source tree, run:
```sh
$ python tensorflow/models/image/mnist/convolutional.py
Succesfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Succesfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Succesfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.

View File

@ -8,8 +8,11 @@ your TensorFlow graph, quantitative metrics about the execution of your graph,
and even additional data like images that pass through it. When TensorBoard is
fully configured, it looks like this:
TODO(danmane): Enable a live TensorBoard
![MNIST TensorBoard](./mnist_tensorboard.png "MNIST TensorBoard")
![MNIST TensorBoard](./mnist_tensorboard.png "MNIST TensorBoard") If you're on
desktop Chrome or FF, try playing around with [this live
TensorBoard](/tensorboard/cifar.html).
## Serializing the data

View File

@ -1,3 +1,5 @@
# BibTex Citation
```
@misc{tensorflow2015-whitepaper,
title={{TensorFlow}: Large-Scale Machine Learning on Heterogeneous Systems},
url={http://www.tensorflow.org/extras/tensorflow-whitepaper2015.pdf},
@ -9,7 +11,7 @@ author={
Eugene~Brevdo and
Zhifeng~Chen and
Craig~Citro and
Greg~Corrado and
Greg~S-Corrado and
Andy~Davis and
Jeffrey~Dean and
Matthieu~Devin and
@ -43,3 +45,4 @@ author={
Xiaoqiang~Zheng},
year={2015},
}
```

View File

@ -1,21 +1,28 @@
# Frequently Asked Questions
This document provides answers to some of the frequently asked questions about
TensorFlow. If you have a question that is not covered here, please
[get in touch](index.md).
TensorFlow. If you have a question that is not covered here, you might find an
answer on one of the TensorFlow [community resources](index.md).
<!-- TOC-BEGIN This section is generated by neural network: DO NOT EDIT! -->
## Contents
* [Building a TensorFlow graph](#AUTOGENERATED-building-a-tensorflow-graph)
* [Running a TensorFlow computation](#AUTOGENERATED-running-a-tensorflow-computation)
* [Variables](#AUTOGENERATED-variables)
* [Tensor shapes](#AUTOGENERATED-tensor-shapes)
* [TensorBoard](#AUTOGENERATED-tensorboard)
* [Extending TensorFlow](#AUTOGENERATED-extending-tensorflow)
* [Miscellaneous](#AUTOGENERATED-miscellaneous)
<!-- TOC-END This section was generated by neural network, THANKS FOR READING! -->
#### Building a TensorFlow graph
### Building a TensorFlow graph <div class="md-anchor" id="AUTOGENERATED-building-a-tensorflow-graph">{#AUTOGENERATED-building-a-tensorflow-graph}</div>
See also the
[API documentation on building graphs](../api_docs/python/framework.md).
##### Why does `c = tf.matmul(a, b)` not execute the matrix multiplication immediately?
#### Why does `c = tf.matmul(a, b)` not execute the matrix multiplication immediately?
In the TensorFlow Python API, `a`, `b`, and `c` are
[`Tensor`](../api_docs/python/framework.md#Tensor) objects. A `Tensor` object is
@ -28,12 +35,12 @@ a dataflow graph. You then offload the computation of the entire dataflow graph
whole computation much more efficiently than executing the operations
one-by-one.
##### How are devices named?
#### How are devices named?
The supported device names are `"/device:CPU:0"` (or `"/cpu:0"`) for the CPU
device, and `"/device:GPU:i"` (or `"/gpu:i"`) for the *i*th GPU device.
##### How do I place operations on a particular device?
#### How do I place operations on a particular device?
To place a group of operations on a device, create them within a
[`with tf.device(name):`](../api_docs/python/framework.md#device) context. See
@ -43,17 +50,17 @@ TensorFlow assigns operations to devices, and the
[CIFAR-10 tutorial](../tutorials/deep_cnn/index.md) for an example model that
uses multiple GPUs.
##### What are the different types of tensors that are available?
#### What are the different types of tensors that are available?
TensorFlow supports a variety of different data types and tensor shapes. See the
[ranks, shapes, and types reference](dims_types.md) for more details.
#### Running a TensorFlow computation
### Running a TensorFlow computation <div class="md-anchor" id="AUTOGENERATED-running-a-tensorflow-computation">{#AUTOGENERATED-running-a-tensorflow-computation}</div>
See also the
[API documentation on running graphs](../api_docs/python/client.md).
##### What's the deal with feeding and placeholders?
#### What's the deal with feeding and placeholders?
Feeding is a mechanism in the TensorFlow Session API that allows you to
substitute different values for one or more tensors at run time. The `feed_dict`
@ -69,7 +76,7 @@ optionally allows you to constrain their shape as well. See the
example of how placeholders and feeding can be used to provide the training data
for a neural network.
##### What is the difference between `Session.run()` and `Tensor.eval()`?
#### What is the difference between `Session.run()` and `Tensor.eval()`?
If `t` is a [`Tensor`](../api_docs/python/framework.md#Tensor) object,
[`t.eval()`](../api_docs/python/framework.md#Tensor.eval) is shorthand for
@ -96,7 +103,7 @@ the `with` block. The context manager approach can lead to more concise code for
simple use cases (like unit tests); if your code deals with multiple graphs and
sessions, it may be more straightforward to explicit calls to `Session.run()`.
##### Do Sessions have a lifetime? What about intermediate tensors?
#### Do Sessions have a lifetime? What about intermediate tensors?
Sessions can own resources, such
[variables](../api_docs/python/state_ops.md#Variable),
@ -110,13 +117,13 @@ The intermediate tensors that are created as part of a call to
[`Session.run()`](../api_docs/python/client.md) will be freed at or before the
end of the call.
##### Can I run distributed training on multiple computers?
#### Can I run distributed training on multiple computers?
The initial open-source release of TensorFlow supports multiple devices (CPUs
and GPUs) in a single computer. We are working on a distributed version as well:
if you are interested, please let us know so we can prioritize accordingly.
##### Does the runtime parallelize parts of graph execution?
#### Does the runtime parallelize parts of graph execution?
The TensorFlow runtime parallelizes graph execution across many different
dimensions:
@ -131,7 +138,7 @@ dimensions:
enables the runtime to get higher throughput, if a single step does not use
all of the resources in your computer.
##### Which client languages are supported in TensorFlow?
#### Which client languages are supported in TensorFlow?
TensorFlow is designed to support multiple client languages. Currently, the
best-supported client language is [Python](../api_docs/python/index.md). The
@ -145,7 +152,7 @@ interest. TensorFlow has a
that makes it easy to build a client in many different languages. We invite
contributions of new language bindings.
##### Does TensorFlow make use of all the devices (GPUs and CPUs) available on my machine?
#### Does TensorFlow make use of all the devices (GPUs and CPUs) available on my machine?
TensorFlow supports multiple GPUs and CPUs. See the how-to documentation on
[using GPUs with TensorFlow](../how_tos/using_gpu/index.md) for details of how
@ -156,10 +163,10 @@ uses multiple GPUs.
Note that TensorFlow only uses GPU devices with a compute capability greater
than 3.5.
##### Why does `Session.run()` hang when using a reader or a queue?
#### Why does `Session.run()` hang when using a reader or a queue?
The [reader](../api_docs/io_ops.md#ReaderBase) and
[queue](../api_docs/io_ops.md#QueueBase) classes provide special operations that
The [reader](../api_docs/python/io_ops.md#ReaderBase) and
[queue](../api_docs/python/io_ops.md#QueueBase) classes provide special operations that
can *block* until input (or free space in a bounded queue) becomes
available. These operations allow you to build sophisticated
[input pipelines](../how_tos/reading_data/index.md), at the cost of making the
@ -168,20 +175,20 @@ for
[using `QueueRunner` objects to drive queues and readers](../how_tos/reading_data/index.md#QueueRunners)
for more information on how to use them.
#### Variables
### Variables <div class="md-anchor" id="AUTOGENERATED-variables">{#AUTOGENERATED-variables}</div>
See also the how-to documentation on [variables](../how_tos/variables/index.md)
and [variable scopes](../how_tos/variable_scope/index.md), and
[the API documentation for variables](../api_docs/python/state_ops.md).
##### What is the lifetime of a variable?
#### What is the lifetime of a variable?
A variable is created when you first run the
[`tf.Variable.initializer`](../api_docs/python/state_ops.md#Variable.initializer)
operation for that variable in a session. It is destroyed when that
[`session is closed`](../api_docs/python/client.md#Session.close).
##### How do variables behave when they are concurrently accessed?
#### How do variables behave when they are concurrently accessed?
Variables allow concurrent read and write operations. The value read from a
variable may change it is concurrently updated. By default, concurrent assigment
@ -189,12 +196,12 @@ operations to a variable are allowed to run with no mutual exclusion. To acquire
a lock when assigning to a variable, pass `use_locking=True` to
[`Variable.assign()`](../api_docs/python/state_ops.md#Variable.assign).
#### Tensor shapes
### Tensor shapes <div class="md-anchor" id="AUTOGENERATED-tensor-shapes">{#AUTOGENERATED-tensor-shapes}</div>
See also the
[`TensorShape` API documentation](../api_docs/python/framework.md#TensorShape).
##### How can I determine the shape of a tensor in Python?
#### How can I determine the shape of a tensor in Python?
In TensorFlow, a tensor has both a static (inferred) shape and a dynamic (true)
shape. The static shape can be read using the
@ -205,7 +212,7 @@ tensor, and may be
shape is not fully defined, the dynamic shape of a `Tensor` `t` can be
determined by evaluating [`tf.shape(t)`](../api_docs/python/array_ops.md#shape).
##### What is the difference between `x.set_shape()` and `x = tf.reshape(x)`?
#### What is the difference between `x.set_shape()` and `x = tf.reshape(x)`?
The [`tf.Tensor.set_shape()`](../api_docs/python/framework.md) method updates
the static shape of a `Tensor` object, and it is typically used to provide
@ -215,7 +222,7 @@ change the dynamic shape of the tensor.
The [`tf.reshape()`](../api_docs/python/array_ops.md#reshape) operation creates
a new tensor with a different dynamic shape.
##### How do I build a graph that works with variable batch sizes?
#### How do I build a graph that works with variable batch sizes?
It is often useful to build a graph that works with variable batch sizes, for
example so that the same code can be used for (mini-)batch training, and
@ -241,31 +248,31 @@ to encode the batch size as a Python constant, but instead to use a symbolic
[`tf.placeholder(..., shape=[None, ...])`](../api_docs/python/io_ops.md#placeholder). The
`None` element of the shape corresponds to a variable-sized dimension.
#### TensorBoard
### TensorBoard <div class="md-anchor" id="AUTOGENERATED-tensorboard">{#AUTOGENERATED-tensorboard}</div>
See also the
[how-to documentation on TensorBoard](../how_tos/graph_viz/index.md).
##### What is the simplest way to send data to tensorboard? # TODO(danmane)
#### What is the simplest way to send data to tensorboard? # TODO(danmane)
Add summary_ops to your TensorFlow graph, and use a SummaryWriter to write all
of these summaries to a log directory. Then, startup TensorBoard using
<SOME_COMMAND> and pass the --logdir flag so that it points to your
log directory. For more details, see <YET_UNWRITTEN_TENSORBOARD_TUTORIAL>.
#### Extending TensorFlow
### Extending TensorFlow <div class="md-anchor" id="AUTOGENERATED-extending-tensorflow">{#AUTOGENERATED-extending-tensorflow}</div>
See also the how-to documentation for
[adding a new operation to TensorFlow](../how_tos/adding_an_op/index.md).
##### My data is in a custom format. How do I read it using TensorFlow?
#### My data is in a custom format. How do I read it using TensorFlow?
There are two main options for dealing with data in a custom format.
The easier option is to write parsing code in Python that transforms the data
into a numpy array, then feed a
[tf.placeholder()](../api_docs/python/io_ops.md#placeholder) a tensor with that
data. See the documentation on
into a numpy array, then feed a [`tf.placeholder()`]
(../api_docs/python/io_ops.md#placeholder) a tensor with that data. See the
documentation on
[using placeholders for input](../how_tos/reading_data/index.md#Feeding) for
more details. This approach is easy to get up and running, but the parsing can
be a performance bottleneck.
@ -276,7 +283,7 @@ data format. The
[guide to handling new data formats](../how_tos/new_data_formats/index.md) has
more information about the steps for doing this.
##### How do I define an operation that takes a variable number of inputs?
#### How do I define an operation that takes a variable number of inputs?
The TensorFlow op registration mechanism allows you to define inputs that are a
single tensor, a list of tensors with the same type (for example when adding
@ -286,15 +293,15 @@ how-to documentation for
[adding an op with a list of inputs or outputs](../how_tos/adding_an_op/index.md#list-input-output)
for more details of how to define these different input types.
#### Miscellaneous
### Miscellaneous <div class="md-anchor" id="AUTOGENERATED-miscellaneous">{#AUTOGENERATED-miscellaneous}</div>
##### Does TensorFlow work with Python 3?
#### Does TensorFlow work with Python 3?
We have only tested TensorFlow using Python 2.7. We are aware of some changes
that will be required for Python 3 compatibility, and welcome contributions
towards this effort.
##### What is TensorFlow's coding style convention?
#### What is TensorFlow's coding style convention?
The TensorFlow Python API adheres to the
[PEP8](https://www.python.org/dev/peps/pep-0008/) conventions.<sup>*</sup> In

View File

@ -1,5 +1,7 @@
# Glossary
TODO(someone): Fix several broken links in Glossary
**Broadcasting operation**
An operation that uses [numpy-style broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)

View File

@ -12,25 +12,33 @@ implementation can be found in out white paper:
If you use TensorFlow in your research and would like to cite the TensorFlow
system, we suggest you cite the paper above. You can use this [BibTeX
entry](../extras/tensorflow-whitepaper2015.bib). As the project progresses, we
entry](bib.md). As the project progresses, we
may update the suggested citation with new papers.
## Community
TODO(rajatmonga): Write this!
### Discuss
* NO - google group
* YES, ASAP - internal support mailing list
* YES, ASAP - stack overflow presence
* SOON - slack
* GitHub: <https://github.com/tensorflow/tensorflow>
* Stack Overflow: <https://stackoverflow.com/questions/tagged/tensorflow>
* [TensorFlow discuss mailing list](
https://groups.google.com/forum/#!forum/tensorflow-discuss)
### Report Issues
* [TensorFlow issues](https://github.com/tensorflow/tensorflow/issues)
### Development
* If you are interested in contributing to TensorFlow please
[review the contributing guide](
https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md).
<div class='sections-order' style="display: none;">
<!--
<!-- bib.md -->
<!-- uses.md -->
<!-- faq.md -->
<!-- glossary.md -->

View File

@ -36,7 +36,3 @@ Listed below are some of the many uses of TensorFlow.
* **Description**: On-device computer vision model to do optical character recoignition to enable real-time translation.
* **More info**: [Google Research blog post](http://googleresearch.blogspot.com/2015/07/how-google-translate-squeezes-deep.html)
}
* TODO(opensource): Add several other research projects
* TODO(opensource): Pointer Sets?
* TODO(opensource): Others

View File

@ -198,7 +198,8 @@ loss and all these weight decay terms, as returned by the `loss()` function.
We visualize it in TensorBoard with a [scalar_summary](../../api_docs/python/train.md?#scalar_summary):
[![CIFAR-10 Loss](./cifar_loss.png "CIFAR-10 Total Loss")](#TODO(migmigmig)#TODO(danmane))
![CIFAR-10 Loss](./cifar_loss.png "CIFAR-10 Total Loss")
###### [View this TensorBoard live! (Chrome/FF)](/tensorboard/cifar.html)
We train the model using standard
[gradient descent](https://en.wikipedia.org/wiki/Gradient_descent)
@ -207,7 +208,8 @@ with a learning rate that
[exponentially decays](../../api_docs/python/train.md#exponential_decay)
over time.
[![CIFAR-10 Learning Rate Decay](./cifar_lr_decay.png "CIFAR-10 Learning Rate Decay")](#TODO(migmigmig)#TODO(danmane))
![CIFAR-10 Learning Rate Decay](./cifar_lr_decay.png "CIFAR-10 Learning Rate Decay")
###### [View this TensorBoard live! (Chrome/FF)](/tensorboard/cifar.html)
The `train()` function adds the operations needed to minimize the objective by
calculating the gradient and updating the learned variables (see

View File

@ -90,15 +90,6 @@ stuff.
[View Tutorial](mnist/download/index.md)
## Sparse Linear Regression
In many practical machine learning settings we have a large number input
features, only very few of which are active for any given example. TensorFlow
has great tools for learning predictive models in these settings.
COMING SOON
## Visual Object Recognition
We will be releasing our state-of-the-art Inception object recognition model,
@ -116,14 +107,6 @@ visual hallucination software.
COMING SOON
## Automated Image Captioning
TODO(vinyals): Write me, three lines max.
COMING SOON
<div class='sections-order' style="display: none;">
<!--
<!-- mnist/beginners/index.md -->

View File

@ -212,19 +212,12 @@ from Python, TensorFlow lets us describe a graph of interacting operations that
run entirely outside Python. (Approaches like this can be seen in a few
machine learning libraries.)
To run computations, TensorFlow needs to connect to its backend. This connection
is called a `Session`. To use TensorFlow, we need to import it and create a
session.
To use TensorFlow, we need to import it.
```python
import tensorflow as tf
sess = tf.InteractiveSession()
```
(Using an `InteractiveSession` makes TensorFlow a bit more flexible about how
you structure your code. In particular, it's helpful for work in interactive
contexts like iPython.)
We describe these interacting operations by manipulating symbolic variables.
Let's create one:
@ -350,11 +343,19 @@ implement backpropagation and gradient descent. Then it gives you back a
single operation which, when run, will do a step of gradient descent training,
slightly tweaking your variables to reduce the cost.
Now we have our model set up to train. But before we start, we need to
initialize the variables we created:
Now we have our model set up to train. One last thing before we launch it,
we have to add an operation to initialize the variables we created:
```python
tf.initialize_all_variables().run()
init = tf.initialize_all_variables()
```
We can now launch the model in a `Session`, and run the operation that
initializes the variables:
```python
sess = tf.Session()
sess.run(init)
```
Let's train -- we'll run the training step 1000 times!
@ -362,7 +363,7 @@ Let's train -- we'll run the training step 1000 times!
```python
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x: batch_xs, y_: batch_ys})
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
```
Each step of the loop, we get a "batch" of one hundred random data points from
@ -403,7 +404,7 @@ accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
Finally, we ask for our accuracy on our test data.
```python
print accuracy.eval({x: mnist.test.images, y_: mnist.test.labels})
print sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})
```
This should be about 91%.

View File

@ -32,18 +32,14 @@ testing sets as NumPy arrays.
It also provides a function for iterating through data minibatches, which we
will use below.
### Start TensorFlow Session
### Start TensorFlow InteractiveSession
Tensorflow relies on a highly efficient C++ backend to do its computation. The
connection to this backend is called a session. We will need to create a session
before we can do any computation.
connection to this backend is called a session. The common usage for TensorFlow
programs is to first create a graph and then launch it in a session.
```python
import tensorflow as tf
sess = tf.InteractiveSession()
```
Using an `InteractiveSession` makes TensorFlow more flexible about how you
Here we instead use the convenience `InteractiveSession` class, which
makes TensorFlow more flexible about how you
structure your code.
It allows you to interleave operations which build a
[computation graph](../../../get_started/basic_usage.md#the-computation-graph)
@ -54,6 +50,11 @@ If you are not using an `InteractiveSession`, then you should build
the entire computation graph before starting a session and [launching the
graph](../../../get_started/basic_usage.md#launching-the-graph-in-a-session).
```python
import tensorflow as tf
sess = tf.InteractiveSession()
```
#### Computation Graph
To do efficient numerical computing in Python, we typically use libraries like

View File

@ -373,14 +373,14 @@ less time). For example, the naive code we used in this tutorial would suffer
compromised speed because we use Python for reading and feeding data items --
each of which require very little work on the TensorFlow back-end. If you find
your model is seriously bottlenecked on input data, you may want to implement a
custom data reader for your problem, as described in [New Data
Formats](../how_tos/new_data_formats/index.md). For the case of Skip-Gram
custom data reader for your problem, as described in
[New Data Formats](../../how_tos/new_data_formats/index.md). For the case of Skip-Gram
modeling, we've actually already done this for you as an example in
[tensorflow/models/embedding/word2vec.py](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/embedding/word2vec.py).
If your model is no longer I/O bound but you want still more performance, you
can take things further by writing your own TensorFlow Ops, as described in
[Adding a New Op](../how_tos/adding_an_op/index.md). Again we've provided an
[Adding a New Op](../../how_tos/adding_an_op/index.md). Again we've provided an
example of this for the Skip-Gram case
[tensorflow/models/embedding/word2vec_optimized.py](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/models/embedding/word2vec_optimized.py).
Feel free to benchmark these against each other to measure performance

View File

@ -1,3 +1,5 @@
import tensorflow.python.platform
import collections
import math
import numpy as np
@ -216,4 +218,3 @@ try:
except ImportError:
print "Please install sklearn and matplotlib to visualize embeddings."

View File

@ -31,6 +31,26 @@ py_binary(
],
)
py_test(
name = "word2vec_test",
size = "small",
srcs = ["word2vec_test.py"],
deps = [
":word2vec",
"//tensorflow:tensorflow_py",
],
)
py_test(
name = "word2vec_optimized_test",
size = "small",
srcs = ["word2vec_optimized_test.py"],
deps = [
":word2vec_optimized",
"//tensorflow:tensorflow_py",
],
)
cc_library(
name = "word2vec_ops",
srcs = [

View File

@ -402,7 +402,7 @@ class Word2Vec(object):
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
opts.save_path + "model",
global_step=step)
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
@ -482,6 +482,9 @@ def _start_shell(local_ns=None):
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print "--train_data --eval_data and --save_path must be specified."
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
model = Word2Vec(opts, session)

View File

@ -386,6 +386,9 @@ def _start_shell(local_ns=None):
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print "--train_data --eval_data and --save_path must be specified."
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
model = Word2Vec(opts, session)

View File

@ -0,0 +1,44 @@
"""Tests for word2vec_optimized module."""
import os
import tensorflow.python.platform
import tensorflow as tf
from tensorflow.models.embedding import word2vec_optimized as word2vec_optimized
flags = tf.app.flags
FLAGS = flags.FLAGS
class Word2VecTest(tf.test.TestCase):
def setUp(self):
FLAGS.train_data = os.path.join(self.get_temp_dir() + "test-text.txt")
FLAGS.eval_data = os.path.join(self.get_temp_dir() + "eval-text.txt")
FLAGS.save_path = self.get_temp_dir()
with open(FLAGS.train_data, "w") as f:
f.write(
"""alice was beginning to get very tired of sitting by her sister on
the bank, and of having nothing to do: once or twice she had peeped
into the book her sister was reading, but it had no pictures or
conversations in it, 'and what is the use of a book,' thought alice
'without pictures or conversations?' So she was considering in her own
mind (as well as she could, for the hot day made her feel very sleepy
and stupid), whether the pleasure of making a daisy-chain would be
worth the trouble of getting up and picking the daisies, when suddenly
a White rabbit with pink eyes ran close by her.\n""")
with open(FLAGS.eval_data, "w") as f:
f.write("alice she rabbit once\n")
def testWord2VecOptimized(self):
FLAGS.batch_size = 5
FLAGS.num_neg_samples = 10
FLAGS.epochs_to_train = 1
FLAGS.min_count = 0
word2vec_optimized.main([])
if __name__ == "__main__":
tf.test.main()

View File

@ -0,0 +1,44 @@
"""Tests for word2vec module."""
import os
import tensorflow.python.platform
import tensorflow as tf
from tensorflow.models.embedding import word2vec as word2vec
flags = tf.app.flags
FLAGS = flags.FLAGS
class Word2VecTest(tf.test.TestCase):
def setUp(self):
FLAGS.train_data = os.path.join(self.get_temp_dir() + "test-text.txt")
FLAGS.eval_data = os.path.join(self.get_temp_dir() + "eval-text.txt")
FLAGS.save_path = self.get_temp_dir()
with open(FLAGS.train_data, "w") as f:
f.write(
"""alice was beginning to get very tired of sitting by her sister on
the bank, and of having nothing to do: once or twice she had peeped
into the book her sister was reading, but it had no pictures or
conversations in it, 'and what is the use of a book,' thought alice
'without pictures or conversations?' So she was considering in her own
mind (as well as she could, for the hot day made her feel very sleepy
and stupid), whether the pleasure of making a daisy-chain would be
worth the trouble of getting up and picking the daisies, when suddenly
a White rabbit with pink eyes ran close by her.\n""")
with open(FLAGS.eval_data, "w") as f:
f.write("alice she rabbit once\n")
def testWord2Vec(self):
FLAGS.batch_size = 5
FLAGS.num_neg_samples = 10
FLAGS.epochs_to_train = 1
FLAGS.min_count = 0
word2vec.main([])
if __name__ == "__main__":
tf.test.main()

View File

@ -90,7 +90,7 @@ class Device(object):
for y in splits:
ly = len(y)
if y:
# NOTE(mdevin): we use the property getters here.
# NOTE(touts): we use the property getters here.
if ly == 2 and y[0] == "job":
self.job = y[1]
elif ly == 2 and y[0] == "replica":

View File

@ -60,18 +60,18 @@ class Index(Document):
print >>f, ""
print >>f, "# TensorFlow Python reference documentation"
print >>f, ""
fullname_f = lambda name: self._members[name][0]
anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name))
for filename, library in self._filename_to_library_map:
per_symbol_links = []
for name in sorted(library.mentioned):
if name in self._members:
fullname, member = self._members[name]
anchor = _get_anchor(self._module_to_name, fullname)
prefix = "class " * inspect.isclass(member)
per_symbol_links.append("[%s%s](%s#%s)" %
(prefix, name, filename, anchor))
if per_symbol_links:
print >>f, "* <b>[%s](%s)</b>: %s" % (library.title, filename,
",\n ".join(per_symbol_links))
sorted_names = sorted(library.mentioned, key=str.lower)
member_names = [n for n in sorted_names if n in self._members]
links = ["[`%s`](%s#%s)" % (name, filename, anchor_f(name))
for name in member_names]
if links:
print >>f, "* **[%s](%s)**:" % (library.title, filename)
for link in links:
print >>f, " * %s" % link
print >>f, ""
# actually include the files right here
@ -146,7 +146,7 @@ class Library(Document):
members,
documented,
exclude_symbols=(),
catch_all=False):
prefix=None):
"""Creates a new Library.
Args:
@ -157,6 +157,7 @@ class Library(Document):
members: Dictionary mapping member name to (fullname, member).
documented: Set of documented names to update.
exclude_symbols: A list of specific symbols to exclude.
prefix: A string to include at the beginning of the page.
"""
self._title = title
self._module = module
@ -166,6 +167,7 @@ class Library(Document):
documented.update(exclude_symbols)
self._documented = documented
self._mentioned = set()
self._prefix = prefix or ""
@property
def title(self):
@ -400,7 +402,7 @@ class Library(Document):
# defined by the class itself (not inherited). If NO methods were
# described, describe all methods.
#
# TODO(mdevin): when all methods have been categorized make it an error
# TODO(touts): when all methods have been categorized make it an error
# if some methods are not categorized.
any_method_called_out = (len(methods) != num_methods)
if any_method_called_out:
@ -429,9 +431,11 @@ class Library(Document):
"""
print >>f, "<!-- This file is machine generated: DO NOT EDIT! -->"
print >>f, ""
# TODO(mdevin): Do not insert these. Let the doc writer put them in
# TODO(touts): Do not insert these. Let the doc writer put them in
# the module docstring explicitly.
print >>f, "#", self._title
if self._prefix:
print >>f, self._prefix
print >>f, "[TOC]"
print >>f, ""
if self._module is not None:

View File

@ -43,7 +43,8 @@ class OpError(Exception):
or `Recv` op, there will be no corresponding
[`Operation`](framework.md#Operation) object. In that case, this
will return `None`, and you should instead use the
[`node_def`](OpError.node_def) to discover information about the op.
[`OpError.node_def`](#OpError.node_def) to discover information about the
op.
Returns:
The `Operation` that failed, or None.
@ -293,7 +294,7 @@ class AbortedError(OpError):
For example, running a [`queue.enqueue()`](io_ops.md#QueueBase.enqueue)
operation may raise `AbortedError` if a
[`queue.close()`](io_ops.md@QueueBase.close) operation previously ran.
[`queue.close()`](io_ops.md#QueueBase.close) operation previously ran.
@@__init__
"""

View File

@ -1,5 +1,5 @@
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
"""Import names from the framework library.
"""Classes and functions for building TensorFlow graphs.
## Core graph data structures

View File

@ -18,6 +18,12 @@ tf.flags.DEFINE_boolean("print_hidden_regex", False,
FLAGS = tf.flags.FLAGS
PREFIX_TEXT = """
Note: Functions taking `Tensor` arguments can also take anything
accepted by [`tf.convert_to_tensor`](framework.md#convert_to_tensor).
"""
def get_module_to_name():
return {tf: 'tf',
tf.errors: 'tf.errors',
@ -42,20 +48,24 @@ def all_libraries(module_to_name, members, documented):
return [
# Splits of module 'tf'.
library("framework", "Building Graphs", framework_lib),
library("constant_op", "Constants, Sequences, and Random Values"),
library("state_ops", "Variables"),
library("constant_op", "Constants, Sequences, and Random Values",
prefix=PREFIX_TEXT),
library("state_ops", "Variables", prefix=PREFIX_TEXT),
library("array_ops", "Tensor Transformations",
exclude_symbols=["list_diff"]),
exclude_symbols=["list_diff"], prefix=PREFIX_TEXT),
library("math_ops", "Math",
exclude_symbols=["sparse_matmul", "arg_min", "arg_max",
"lin_space", "sparse_segment_mean_grad"]),
library("control_flow_ops", "Control Flow"),
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"]),
library("sparse_ops", "Sparse Tensors"),
"lin_space", "sparse_segment_mean_grad"],
prefix=PREFIX_TEXT),
library("control_flow_ops", "Control Flow", prefix=PREFIX_TEXT),
library("image", "Images", tf.image, exclude_symbols=["ResizeMethod"],
prefix=PREFIX_TEXT),
library("sparse_ops", "Sparse Tensors", prefix=PREFIX_TEXT),
library("io_ops", "Inputs and Readers",
exclude_symbols=["LookupTableBase", "HashTable",
"initialize_all_tables",
"string_to_hash_bucket"]),
"string_to_hash_bucket"],
prefix=PREFIX_TEXT),
library("python_io", "Data IO (Python functions)", tf.python_io),
library("nn", "Neural Network", tf.nn,
exclude_symbols=["deconv2d", "conv2d_backprop_input",
@ -66,7 +76,8 @@ def all_libraries(module_to_name, members, documented):
"xw_plus_b", "relu_layer", "lrn",
"batch_norm_with_global_normalization",
"batch_norm_with_global_normalization_grad",
"all_candidate_sampler"]),
"all_candidate_sampler"],
prefix=PREFIX_TEXT),
library('client', "Running Graphs", client_lib,
exclude_symbols=["InteractiveSession"]),
library("train", "Training", tf.train,

View File

@ -1604,6 +1604,10 @@ class Graph(object):
def as_graph_def(self, from_version=None):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using [`import_graph_def()`](#import_graph_def)) or used with the
[C++ Session API](../cc/index.md).
This method is thread-safe.
Args:
@ -1612,8 +1616,7 @@ class Graph(object):
its `version` property had the given value.
Returns:
A
[`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
A [`GraphDef`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/graph.proto)
protocol buffer.
"""
graph = graph_pb2.GraphDef()
@ -2116,7 +2119,7 @@ class Graph(object):
self._names_in_use[name] = 1
return name
# TODO(mdevin): remove
# TODO(touts): remove
def _plain_name(self, name):
"""Return the fully scoped 'name'.

View File

@ -39,7 +39,7 @@ if _FAST_TENSOR_UTIL_AVAILABLE:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
types.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
@ -81,7 +81,7 @@ else:
types.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
types.quint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
types.qint32.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
@ -472,7 +472,7 @@ def ConstantValue(tensor):
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
# TODO(mdevin): Support Variables?
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":

View File

@ -332,7 +332,7 @@ _NP_TO_TF = frozenset([
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint32, qint32),
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_FLOAT: np.float32,
@ -341,7 +341,7 @@ _TF_TO_NP = {
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(mdevin): For strings we use np.object as it supports variable length
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,

View File

@ -34,7 +34,7 @@ class TypesTest(test_util.TensorFlowTestCase):
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != types.bfloat16:
# NOTE(mdevin): Intentionally no way to feed a DT_BFLOAT16.
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
types.as_dtype(datatype_enum).base_dtype, types.as_dtype(numpy_dtype))

View File

@ -98,7 +98,7 @@ class PadOpTest(tf.test.TestCase):
tf.constant([-1, 0], shape=[1, 2]))
def testIntTypes(self):
# TODO(mdevin): Figure out why the padding tests do not work on GPU
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int32, np.int64]:
self._testPad((np.random.rand(4, 3, 3) * 100).astype(t),

View File

@ -33,7 +33,7 @@ class VariableOpTest(tf.test.TestCase):
x = vals.astype(dtype)
tftype = _NP_TO_TF[dtype]
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=False))
# NOTE(mdevin): the GPU test should pass for all types, whether the
# NOTE(touts): the GPU test should pass for all types, whether the
# Variable op has an implementation for that type on GPU as we expect
# that Variable and Assign have GPU implementations for matching tf.
self.assertAllEqual(x, self._initFetch(x, tftype, use_gpu=True))

View File

@ -1192,7 +1192,7 @@ def _GroupControlDeps(dev, deps, name=None):
return no_op(name=name)
# TODO(mdevin): Accept "inputs" as a list.
# TODO(touts): Accept "inputs" as a list.
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
@ -1217,7 +1217,7 @@ def group(*inputs, **kwargs):
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
if not inputs:
# TODO(mdevin): Would make sense to return a NoOp.
# TODO(touts): Would make sense to return a NoOp.
raise ValueError("No inputs provided")
with ops.op_scope(inputs, name, "group_deps") as name:
# Sorts *inputs according to their devices.

View File

@ -37,7 +37,7 @@ def _OpsBetween(graph, to_ops, from_ops):
The list of operations between "from_ops" and "to_ops", sorted by
decreasing operation id. This list contains all elements of to_ops.
TODO(mdevin): Think about returning an empty list if from_ops are not
TODO(touts): Think about returning an empty list if from_ops are not
reachable from to_ops. Presently it returns to_ops in that case.
"""
# List of booleans, indexed by operation id, indicating if

View File

@ -1,4 +1,5 @@
"""## Encoding and Decoding.
# pylint: disable=g-short-docstring-punctuation
"""## Encoding and Decoding
TensorFlow provides Ops to decode and encode JPEG and PNG formats. Encoded
images are represented by scalar string Tensors, decoded images by 3-D uint8
@ -17,7 +18,7 @@ presently only support RGB, HSV, and GrayScale.
@@decode_png
@@encode_png
## Resizing.
## Resizing
The resizing Ops accept input images as tensors of several types. They always
output resized images as float32 tensors.
@ -51,7 +52,7 @@ images from the Queue.</i>
@@resize_nearest_neighbor
## Cropping.
## Cropping
@@resize_image_with_crop_or_pad
@ -60,7 +61,7 @@ images from the Queue.</i>
@@random_crop
@@extract_glimpse
## Flipping and Transposing.
## Flipping and Transposing
@@flip_up_down
@@random_flip_up_down
@ -70,7 +71,7 @@ images from the Queue.</i>
@@transpose_image
## Image Adjustments.
## Image Adjustments
TensorFlow provides functions to adjust images in various ways: brightness,
contrast, hue, and saturation. Each adjustment can be done with predefined

View File

@ -1,3 +1,4 @@
# pylint: disable=line-too-long
"""## Placeholders
TensorFlow provides a placeholder operation that must be fed with data
@ -26,6 +27,18 @@ formats into tensors.
@@decode_csv
@@decode_raw
- - -
### Example protocol buffer
TensorFlow's [recommended format for training
examples](../../how_tos/reading_data/index.md#standard-tensorflow-format)
is serialized `Example` protocol buffers, [described
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto).
They contain `Features`, [described
here](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/feature.proto).
@@parse_example
@@parse_single_example
@ -49,7 +62,7 @@ Queues](../../how_tos/threading_and_queues/index.md).
## Input pipeline
TensorFlow functions for setting up an input-prefetching pipeline.
Please see the [reading data how-to](../../how_tos/reading_data.md)
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
### Beginning of an input pipeline

View File

@ -289,7 +289,7 @@ def cast(x, dtype, name=None):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(mdevin): Handle what Josh said.
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
@ -801,7 +801,7 @@ def _as_indexed_slices(x):
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(mdevin): op_scope
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):

View File

@ -143,7 +143,7 @@ only considering a small randomly-chosen subset of contrastive classes
(called candidates) for each batch of training examples.
See our [Candidate Sampling Algorithms Reference]
(http://www.tensorflow.org/extras/candidate_sampling.pdf)
(../../extras/candidate_sampling.pdf)
### Sampled Loss Functions

View File

@ -28,96 +28,90 @@ def parse_example(serialized,
dense_defaults=None,
dense_shapes=None,
name="ParseExample"):
"""Parse Example protos.
"""Parses `Example` protos.
Args:
serialized: string vector, a batch of binary serialized Example protos.
names: A string vector, the names of the serialized protos.
"names" may contain, e.g., table key (descriptive) names for the
corresponding serialized protos. These are purely useful for debugging
purposes, and the presence of values here has no effect on the output.
"names" may be an empty vector, if no names are available.
If non-empty, this vector must be the same length as "serialized".
sparse_keys: A string list of keys in the Examples' features.
These keys are associated with sparse values.
sparse_types: A list of DTypes.
This list's length must match that of sparse_keys. Currently
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
and tf.string (BytesList).
dense_keys: A string list of keys in the Examples' features.
These keys are associated with dense values.
dense_types: A list of DTypes.
This list's length must match that of dense_keys. Currently
parse_example supports tf.float32 (FloatList), tf.int64 (Int64List),
and tf.string (BytesList).
dense_defaults: A dict of {key:Tensor} (some may be missing).
The keys of the dict must match the dense_keys of the feature.
If a key is not present in this dictionary, the corresponding dense
Feature is required in all elements of serialized.
dense_shapes: A list of tuples.
Entries provide the shape of data in each dense Feature in features.
The length of dense_shapes must be the same as the length of dense_keys.
The number of elements in the Feature corresponding to dense_key[j]
must always have np.prod(dense_shapes[j]) entries.
If dense_shapes[j] == (D0, D1, ..., DN) then the the shape of output
Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN):
The dense outputs are just the inputs row-stacked by batch.
name: (Optional) Name of Op in the graph.
Parses a number of serialized [`Example`]
(https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/example/example.proto)
protos given in `serialized`.
Returns:
A dictionary mapping keys to Tensors and SparseTensors.
`names` may contain descriptive names for the corresponding serialized protos.
These may be useful for debugging purposes, but they have no effect on the
output. If not `None`, `names` must be the same length as `serialized`.
The key dense_keys[j] is mapped to a tensor of type dense_types[j] and
of shape (serialized.size(),) + dense_shapes[j] (i.e., the dense outputs are
inputs, reshaped in row-major format and then row-stacked by batch).
This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects respectively, depending on whether the keys appear
in `sparse_keys` or `dense_keys`.
The key sparse_keys[j] is mapped to a SparseTensor of type sparse_types[j].
The SparseTensor represents a ragged matrix. Its indices are [batch, index]
where "batch" is is the batch entry the value is from, and "index" is the
value's index in the list of values associated with that feature
and example. For example, if one expects a tf.float32 sparse feature "ft"
and three serialized examples are provided:
The key `dense_keys[j]` is mapped to a `Tensor` of type `dense_types[j]` and
of shape `(serialized.size(),) + dense_shapes[j]`.
serialized = [
features:
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
features:
{ feature: [] },
features:
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
]
`dense_defaults` provides defaults for values referenced using `dense_keys`.
If a key is not present in this dictionary, the corresponding dense `Feature`
is required in all elements of `serialized`.
then the output will look like:
`dense_shapes[j]` provides the shape of each `Feature` entry referenced by
`dense_keys[j]`. The number of elements in the `Feature` corresponding to
`dense_key[j]` must always have `np.prod(dense_shapes[j])` entries. The
returned `Tensor` for `dense_key[j]` has shape `[N] + dense_shape[j]`, where
`N` is the number of `Example`s in `serialized`.
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
shape=(3, 2)) }
The key `sparse_keys[j]` is mapped to a `SparseTensor` of type
`sparse_types[j]`. The `SparseTensor` represents a ragged matrix.
Its indices are `[batch, index]` where `batch` is the batch entry the value
is from, and `index` is the value's index in the list of values associated
with that feature and example.
Raises:
ValueError: If sparse and dense keys intersect, or input lengths do not
match up for sparse_* (similarly for dense_*).
TypeError: If an input is malformed.
Examples:
Example input, format, and output: Just Sparse Inputs
================================================
For example, if one expects a `tf.float32` sparse feature `ft` and three
serialized `Example`s are provided:
Given two brain.Example input protos:
```
serialized = [
features:
{ feature: [ key: { "ft" value: float_list: { value: [1.0, 2.0] } } ] },
features:
{ feature: [] },
features:
{ feature: [ key: { "ft" value: float_list: { value: [3.0] } } ] }
]
```
serialized: // serialized versions of the protos below
[features: {
then the output will look like:
```
{"ft": SparseTensor(indices=[[0, 0], [0, 1], [2, 0]],
values=[1.0, 2.0, 3.0],
shape=(3, 2)) }
```
Given two `Example` input protos in `serialized`:
```
[
features: {
feature: { key: "kw" value: { bytes_list: { value: [ "knit", "big" ] } } }
feature: { key: "gps" value: { float_list: { value: [] } } }
},
features: {
},
features: {
feature: { key: "kw" value: { bytes_list: { value: [ "emmy" ] } } }
feature: { key: "dank" value: { int64_list: { value: [ 42 ] } } }
feature: { key: "gps" value: { } }
}]
names: ["input0", "input1"],
sparse_keys: ["kw", "dank", "gps"]
sparse_types: [DT_STRING, DT_INT64, DT_FLOAT]
}
]
```
Then the expected output is a dictionary:
And arguments
```
names: ["input0", "input1"],
sparse_keys: ["kw", "dank", "gps"]
sparse_types: [DT_STRING, DT_INT64, DT_FLOAT]
```
Then the output is a dictionary:
```python
{
"kw": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
@ -132,63 +126,71 @@ def parse_example(serialized,
values=[],
shape=[2, 0]),
}
```
For dense results in two serialized `Example`s:
Example input, format, and output: Dense Inputs (without defaults)
==================================================================
Given two brain.Example input protos:
serialized: // serialized versions of the protos below
[features: {
```
[
features: {
feature: { key: "age" value: { int64_list: { value: [ 0 ] } } }
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
},
features: {
feature: { key: "age" value: { int64_list: { value: [] } } }
feature: { key: "gender" value: { bytes_list: { value: [ "f" ] } } }
}]
}
]
```
We can use arguments:
```
names: ["input0", "input1"],
dense_keys: np.array(["age", "gender"])
dense_types: [tf.int64, tf.string]
dense_keys: np.array(["age", "gender"]),
dense_types: [tf.int64, tf.string],
dense_defaults: {
"age": -1 # defaults to -1 if missing
"age": -1 # "age" defaults to -1 if missing
# "gender" has no specified default so it's required
}
dense_shapes: [(1,), (1,)] # age, gender, label, weight
dense_shapes: [(1,), (1,)], # age, gender, label, weight
```
Then the expected output is a dictionary:
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
}
```
Args:
serialized: A list of strings, a batch of binary serialized `Example`
protos.
names: A list of strings, the names of the serialized protos.
sparse_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `SparseTensor` objects.
sparse_types: A list of `DTypes` of the same length as `sparse_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
name: A name for this operation (optional).
Example input, format, and output: Dense Inputs (with defaults)
===============================================================
Returns:
A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
Given two brain.Example input protos:
serialized: // serialized versions of the protos below
[features: {
feature: { key: "weight" value: { float_list: { value: [ 1.0 ] } } }
},
features: {
feature: { key: "label" value: { float_list: { value: [ -1.0, 0.0 ] } } }
}]
names: ["input0", "input1"],
dense_keys: np.array(["label", "weight"])
dense_defaults: {
"label": [1.0, 2.0], # float (default: vector)
"weight": 5.0 # float (default: scalar, 5.0)
}
dense_shapes: [(2,), (1,)] # age, gender, label, weight
Then the expected output is a dictionary:
{
"label": [[1.0, 2.0], [-1.0, 0.0]],
"weight": [[1.0], [5.0]],
}
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
match up.
"""
names = [] if names is None else names
dense_defaults = {} if dense_defaults is None else dense_defaults
@ -262,7 +264,20 @@ def parse_single_example(serialized, # pylint: disable=invalid-name
dense_defaults=None,
dense_shapes=None,
name="ParseSingleExample"):
"""Identical to parse_example but for scalar serialized and names.
"""Parses a single `Example` proto.
Similar to `parse_example`, except:
For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
For `SparseTensor`s, the first (batch) column of the indices matrix is removed
(the indices matrix is a column vector), the values vector is unchanged, and
the first (batch_size) entry of the shape vector is removed (it is now a
single element vector).
See also `parse_example`.
Args:
serialized: A scalar string, a single serialized Example.
@ -275,21 +290,11 @@ def parse_single_example(serialized, # pylint: disable=invalid-name
dense_types: See parse_example documentation for more details.
dense_defaults: See parse_example documentation for more details.
dense_shapes: See parse_example documentation for more details.
name: Optional op name.
name: A name for this operation (optional).
Returns:
A dictionary mapping keys to Tensors and SparseTensors.
For dense tensors, the Tensor is identical to the output of parse_example,
except it is one less dimension (the first, batch, dimension is removed).
For SparseTensors:
The first (batch) column of the indices matrix is removed
(it is now a column vector).
The values vector is unchanged.
The first (batch_size) entry of the shape vector is removed
(it is now a single element vector).
Raises:
ValueError: if "scalar" or "names" have known shapes, and are not scalars.
"""

View File

@ -1,4 +1,5 @@
"""## Sparse Tensor Representation.
# pylint: disable=g-short-docstring-punctuation
"""## Sparse Tensor Representation
Tensorflow supports a `SparseTensor` representation for data that is sparse
in multiple dimensions. Contrast this representation with `IndexedSlices`,
@ -8,13 +9,13 @@ dimension, and dense along all other dimensions.
@@SparseTensor
@@SparseTensorValue
## Sparse to Dense Conversion.
## Sparse to Dense Conversion
@@sparse_to_dense
@@sparse_tensor_to_dense
@@sparse_to_indicator
## Manipulation.
## Manipulation
@@sparse_concat
@@sparse_reorder

View File

@ -14,7 +14,7 @@ collected in the graph.
@@initialize_variables
@@assert_variables_initialized
## Saving and Restoring Variables.
## Saving and Restoring Variables
@@Saver

View File

@ -207,7 +207,7 @@ class Variable(object):
This convenience method requires a session where the graph containing this
variable has been launched. If no session is passed, the default session is
used. See the [Session class](../client.md#Session) for more information on
used. See the [Session class](client.md#Session) for more information on
launching a graph and on sessions.
```python
@ -543,7 +543,7 @@ def assert_variables_initialized(var_list=None):
"""
if var_list is None:
var_list = all_variables()
# Backwards compatibility for old-style variables. TODO(mdevin): remove.
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():

View File

@ -5,7 +5,7 @@ package tensorflow;
// Protocol buffer representing the checkpoint state.
//
// TODO(mdevin): Add other attributes as needed.
// TODO(touts): Add other attributes as needed.
message CheckpointState {
// Path to the most-recent model checkpoint.
string model_checkpoint_path = 1;

View File

@ -329,38 +329,42 @@ def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, name=None):
"""Run a list of tensors to fill a queue to create batches of examples.
This version enqueues a different list of tensors in different threads.
Implemented using a queue -- a QueueRunner for the queue
is added to the current Graph's QUEUE_RUNNER collection.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
the tensors from tensor_list[i]. `tensor_list[i1][j]` must match
`tensor_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is false, each `tensor_list_list[i]` is assumed to
represent a single example. Otherwise, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed by
example, and all members of `tensor_list_list[i]` should have the same size
in the first dimension.
If `enqueue_many` is false, then an input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`. If `enqueue_many` is true, the
slices of any input tensor `x` are treated as examples, and the output tensors
will have shape `[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching
is allowed to grow the queues.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
len(tensor_list_list) threads will be started, with the i-th
thread enqueuing the tensors from tensor_list[i].
tensor_list[i1][j] must match tensor_list[i2][j] in type and
shape (except in the first dimension if enqueue_many is true).
batch_size: The new batch size pulled from the queue.
capacity: Maximum number of elements in the queue, controls the
how far ahead the prefetching allowed is allowed to get and
memory usage.
enqueue_many: If False, each tensor_list_list[i] is assumed to
represent a single example. If True, tensor_list_list[i] is
assumed to represent a batch of examples, where the first
dimension is indexed by example, and all members of
tensor_list_list[i] should have the same size in the first
dimension.
shapes: Optional. The shapes for each example. Defaults to the
inferred shapes for tensor_list_list[i] (which must match, after
leaving off the first dimension if enqueue_many is True).
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
name: A name for the operations (optional).
Returns:
A list of tensors with the same number and types as
tensor_list_list[i]. If enqueue_many is false, then an input
tensor with shape `[x, y, z]` will be output as a tensor with
shape `[batch_size, x, y, z]`. If enqueue_many is True, and an
input tensor has shape `[*, x, y, z]`, the the output will have
shape `[batch_size, x, y, z]`.
`tensor_list_list[i]`.
"""
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
@ -462,15 +466,15 @@ def shuffle_batch_join(tensor_list_list, batch_size, capacity,
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: If False, each tensor_list_list[i] is assumed to
represent a single example. If True, tensor_list_list[i] is
enqueue_many: If `False`, each tensor_list_list[i] is assumed to
represent a single example. If `True`, tensor_list_list[i] is
assumed to represent a batch of examples, where the first
dimension is indexed by example, and all members of
tensor_list_list[i] should have the same size in the first
dimension.
shapes: Optional. The shapes for each example. Defaults to the
inferred shapes for tensor_list_list[i] (which must match, after
leaving off the first dimension if enqueue_many is True).
inferred shapes for `tensor_list_list[i]` (which must match, after
leaving off the first dimension if enqueue_many is `True`).
name: A name for the operations (optional).
Returns:

View File

@ -9,7 +9,7 @@ from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
# TODO(mdevin): switch to variables.Variable.
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, name=None):
"""Compute the moving average of a variable.
@ -182,7 +182,7 @@ class ExponentialMovingAverage(object):
ValueError: If the moving average of one of the variables is already
being computed.
"""
# TODO(mdevin): op_scope
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
for var in var_list:

View File

@ -159,7 +159,7 @@ class BaseSaverBuilder(object):
restore_control_inputs = assign_ops[-1:] if restore_sequentially else []
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(mdevin): Re-enable restore on GPU when we can support annotating
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
with ops.device(graph_util.set_cpu0(v.device) if v.device else None):
with ops.control_dependencies(restore_control_inputs):
@ -214,7 +214,7 @@ class BaseSaverBuilder(object):
def _GroupByDevices(self, vars_to_save):
"""Group Variable tensor slices per device.
TODO(mdevin): Make sure that all the devices found are on different
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices as unspecified.

View File

@ -1,7 +1,7 @@
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
"""This library provides a set of classes and functions that helps train models.
## Optimizers.
## Optimizers
The Optimizer base class provides methods to compute gradients for a loss and
apply gradients to variables. A collection of subclasses implement classic
@ -19,7 +19,7 @@ of the subclasses.
@@FtrlOptimizer
@@RMSPropOptimizer
## Gradient Computation.
## Gradient Computation
TensorFlow provides functions to compute the derivatives for a given
TensorFlow computation graph, adding operations to the graph. The
@ -46,10 +46,10 @@ gradients.
@@clip_by_global_norm
@@global_norm
## Decaying the learning rate.
## Decaying the learning rate
@@exponential_decay
## Moving Averages.
## Moving Averages
Some training algorithms, such as GradientDescent and Momentum often benefit
from maintaining a moving average of variables during optimization. Using the
@ -57,7 +57,7 @@ moving averages for evaluations often improve results significantly.
@@ExponentialMovingAverage
## Coordinator and QueueRunner.
## Coordinator and QueueRunner
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
for how to use threads and queues. For documentation on the Queue API,
@ -68,17 +68,21 @@ see [Queues](../../api_docs/python/io_ops.md#queues).
@@add_queue_runner
@@start_queue_runners
## Summary Operations.
## Summary Operations
The following ops output
[`Summary`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/framework/summary.proto)
protocol buffers as serialized string tensors.
You can fetch the output of a summary op in a session, and pass it to a
[SummaryWriter](train.md#SummaryWriter) to append it to an event file. You can
then use TensorBoard to visualize the contents of the event files. See
[TensorBoard and Summaries](../../how_tos/summaries_and_tensorboard/index.md)
for more details.
You can fetch the output of a summary op in a session, and pass it to
a [SummaryWriter](train.md#SummaryWriter) to append it to an event
file. Event files contain
[`Event`](https://tensorflow.googlesource.com/tensorflow/+/master/tensorflow/core/util/event.proto)
protos that can contain `Summary` protos along with the timestamp and
step. You can then use TensorBoard to visualize the contents of the
event files. See [TensorBoard and
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
details.
@@scalar_summary
@@image_summary
@ -88,7 +92,7 @@ for more details.
@@merge_summary
@@merge_all_summaries
## Adding Summaries to Event Files.
## Adding Summaries to Event Files
See [Summaries and
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
@ -97,10 +101,11 @@ overview of summaries, event files, and visualization in TensorBoard.
@@SummaryWriter
@@summary_iterator
## Training utilities.
## Training utilities
@@global_step
@@write_graph
"""
# Optimizers.
@ -134,5 +139,5 @@ from tensorflow.python.training.training_util import global_step
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
# Utility op. Open Source. TODO(mdevin): move to nn?
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import exponential_decay

View File

@ -194,7 +194,7 @@ Extract3DPatches(
return Extract3DPatches(input, patchPlanes, patchRows, patchCols,
stridePlanes, strideRows, strideCols,
0, 0, 0, 0, 0, 0, padding_value);
case PADDING_SAME:
case PADDING_SAME: {
// The side of the tensor before striding should be just the expected
// output times the stride.
const TensorIndex size_z = ceil(inputPlanes / static_cast<float>(stridePlanes)) * stridePlanes;
@ -214,6 +214,13 @@ Extract3DPatches(
dy - dy / 2, dy / 2,
dx - dx / 2, dx / 2,
padding_value);
}
default:
eigen_assert(false && "unexpected padding");
// unreachable code to avoid missing return warning.
return Extract3DPatches(input, patchPlanes, patchRows, patchCols,
stridePlanes, strideRows, strideCols,
0, 0, 0, 0, 0, 0, padding_value);
}
}