diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py index c920764803d..32ca0c38d91 100644 --- a/tensorflow/contrib/layers/python/layers/layers.py +++ b/tensorflow/contrib/layers/python/layers/layers.py @@ -844,7 +844,7 @@ def convolution(inputs, variable would be created and added the activations. Finally, if `activation_fn` is not `None`, it is applied to the activations as well. - Performs a'trous convolution with input stride/dilation rate equal to `rate` + Performs atrous convolution with input stride/dilation rate equal to `rate` if a value > 1 for any dimension of `rate` is specified. In this case `stride` values != 1 are not supported. @@ -870,7 +870,7 @@ def convolution(inputs, "NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For N=3, the valid values are "NDHWC" (default) and "NCDHW". rate: A sequence of N positive integers specifying the dilation rate to use - for a'trous convolution. Can be a single integer to specify the same + for atrous convolution. Can be a single integer to specify the same value for all spatial dimensions. Specifying any `rate` value != 1 is incompatible with specifying any `stride` value != 1. activation_fn: Activation function. The default value is a ReLU function. @@ -1865,7 +1865,7 @@ def separable_convolution2d( depthwise convolution stride. Can be an int if both strides are the same. padding: One of 'VALID' or 'SAME'. rate: A list of length 2: [rate_height, rate_width], specifying the dilation - rates for a'trous convolution. Can be an int if both rates are the same. + rates for atrous convolution. Can be an int if both rates are the same. If any value is larger than one, then both stride values need to be one. activation_fn: Activation function. The default value is a ReLU function. Explicitly set it to None to skip it and maintain a linear activation. diff --git a/tensorflow/docs_src/performance/index.md b/tensorflow/docs_src/performance/index.md index 746dc0c74fe..7c1cd152d37 100644 --- a/tensorflow/docs_src/performance/index.md +++ b/tensorflow/docs_src/performance/index.md @@ -9,7 +9,7 @@ deeper with techniques detailed in @{$performance_models$High-Performance Models practices for optimizing your TensorFlow code. * @{$performance_models$High-Performance Models}, which contains a collection - advanced techniques to build highly scalable models targeting different + of advanced techniques to build highly scalable models targeting different system types and network topologies. * @{$benchmarks$Benchmarks}, which contains a collection of benchmark diff --git a/tensorflow/docs_src/programmers_guide/variable_scope.md b/tensorflow/docs_src/programmers_guide/variable_scope.md index 5084acbab97..f4d2b3f37b8 100644 --- a/tensorflow/docs_src/programmers_guide/variable_scope.md +++ b/tensorflow/docs_src/programmers_guide/variable_scope.md @@ -5,7 +5,7 @@ in the way described in the @{$variables$Variables HowTo}. But when building complex models you often need to share large sets of variables and you might want to initialize all of them in one place. This tutorial shows how this can be done using `tf.variable_scope()` and -the `tf.get_variable()`. +`tf.get_variable()`. ## The Problem @@ -368,6 +368,6 @@ sequence-to-sequence models. File | What's in it? --- | --- -`models/tutorials/image/cifar10/cifar10.py` | Model for detecting objects in images. -`models/tutorials/rnn/rnn_cell.py` | Cell functions for recurrent neural networks. -`models/tutorials/rnn/seq2seq.py` | Functions for building sequence-to-sequence models. +`tutorials/image/cifar10/cifar10.py` | Model for detecting objects in images. +`tutorials/rnn/rnn_cell.py` | Cell functions for recurrent neural networks. +`tutorials/rnn/seq2seq.py` | Functions for building sequence-to-sequence models. diff --git a/tensorflow/docs_src/tutorials/deep_cnn.md b/tensorflow/docs_src/tutorials/deep_cnn.md index d6a136fee47..f60c8fd7701 100644 --- a/tensorflow/docs_src/tutorials/deep_cnn.md +++ b/tensorflow/docs_src/tutorials/deep_cnn.md @@ -83,7 +83,7 @@ for details. It consists of 1,068,298 learnable parameters and requires about ## Code Organization The code for this tutorial resides in -[`tensorflow_models/tutorials/image/cifar10/`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/). +[`models/tutorials/image/cifar10/`](https://github.com/tensorflow/models/tree/master/tutorials/image/cifar10/). File | Purpose --- | --- diff --git a/tensorflow/python/layers/normalization.py b/tensorflow/python/layers/normalization.py index 871f840c529..f92ea9b05f5 100644 --- a/tensorflow/python/layers/normalization.py +++ b/tensorflow/python/layers/normalization.py @@ -400,7 +400,9 @@ def batch_normalization(inputs, training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode (normalized with statistics of the current batch) or in inference mode - (normalized with moving statistics). + (normalized with moving statistics). **NOTE**: make sure to set this + parameter correctly, or else your training/inference will not work + properly. trainable: Boolean, if `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). name: String, the name of the layer. diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py index 43addbe5a52..a29ddfa9f2f 100644 --- a/tensorflow/python/ops/variable_scope.py +++ b/tensorflow/python/ops/variable_scope.py @@ -1292,7 +1292,7 @@ def _pure_variable_scope(name_or_scope, well-defined semantics. Defaults to False (will later change to True). Yields: - A scope that can be to captured and reused. + A scope that can be captured and reused. Raises: ValueError: when trying to reuse within a create scope, or create within