From 72e2918ccd615691591c70c2200d3c25b8fd7d35 Mon Sep 17 00:00:00 2001 From: Taehoon Lee <taehoonlee@snu.ac.kr> Date: Thu, 22 Jun 2017 13:57:31 +0900 Subject: [PATCH] Fix typos --- tensorflow/compiler/tests/ftrl_test.py | 2 +- tensorflow/compiler/xla/xla_data.proto | 2 +- .../distributions/python/ops/relaxed_bernoulli.py | 2 +- .../contrib/distributions/python/ops/sample_stats.py | 2 +- tensorflow/contrib/graph_editor/transform.py | 2 +- .../contrib/training/python/training/evaluation.py | 2 +- tensorflow/core/lib/gtl/optional.h | 2 +- .../docs_src/api_guides/python/contrib.losses.md | 10 +++++----- tensorflow/python/debug/cli/analyzer_cli.py | 2 +- 9 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tensorflow/compiler/tests/ftrl_test.py b/tensorflow/compiler/tests/ftrl_test.py index 6b328fb618b..a75a5cd2cf4 100644 --- a/tensorflow/compiler/tests/ftrl_test.py +++ b/tensorflow/compiler/tests/ftrl_test.py @@ -218,7 +218,7 @@ class FtrlOptimizerTest(XLATestCase): self.assertAllClose(np.array([-0.24059935, -0.46829352]), var0.eval()) self.assertAllClose(np.array([-0.02406147, -0.04830509]), var1.eval()) - # When variables are intialized with Zero, FTRL-Proximal has two properties: + # When variables are initialized with Zero, FTRL-Proximal has two properties: # 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical # with GradientDescent. # 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is idential diff --git a/tensorflow/compiler/xla/xla_data.proto b/tensorflow/compiler/xla/xla_data.proto index b53bf98e1c0..86c72b34496 100644 --- a/tensorflow/compiler/xla/xla_data.proto +++ b/tensorflow/compiler/xla/xla_data.proto @@ -200,7 +200,7 @@ message OpMetadata { string op_name = 2; // Indicate a file and line that this op is associated to in a user's program. // - // e.g. it could be be the file and line of user code that generated the op. + // e.g. it could be the file and line of user code that generated the op. string source_file = 3; int32 source_line = 4; } diff --git a/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py b/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py index 5b57a95c55e..b5258090155 100644 --- a/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py +++ b/tensorflow/contrib/distributions/python/ops/relaxed_bernoulli.py @@ -52,7 +52,7 @@ class RelaxedBernoulli(transformed_distribution.TransformedDistribution): the RelaxedBernoulli can suffer from underflow issues. In many case loss functions such as these are invariant under invertible transformations of the random variables. The KL divergence, found in the variational autoencoder - loss, is an example. Because RelaxedBernoullis are sampled by by a Logistic + loss, is an example. Because RelaxedBernoullis are sampled by a Logistic random variable followed by a `tf.sigmoid` op, one solution is to treat the Logistic as the random variable and `tf.sigmoid` as downstream. The KL divergences of two Logistics, which are always followed by a `tf.sigmoid` diff --git a/tensorflow/contrib/distributions/python/ops/sample_stats.py b/tensorflow/contrib/distributions/python/ops/sample_stats.py index 26cf922d0af..2a4b92c7290 100644 --- a/tensorflow/contrib/distributions/python/ops/sample_stats.py +++ b/tensorflow/contrib/distributions/python/ops/sample_stats.py @@ -47,7 +47,7 @@ def percentile(x, """Compute the `q`-th percentile of `x`. Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the - way from the minimum to the maximum in in a sorted copy of `x`. + way from the minimum to the maximum in a sorted copy of `x`. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized diff --git a/tensorflow/contrib/graph_editor/transform.py b/tensorflow/contrib/graph_editor/transform.py index 762bc448141..2234400fdcb 100644 --- a/tensorflow/contrib/graph_editor/transform.py +++ b/tensorflow/contrib/graph_editor/transform.py @@ -446,7 +446,7 @@ class Transformer(object): # TODO(fkp): return a subgraph? op_, op_outputs_ = self.transform_op_handler(info, op) if op is op_: - raise ValueError("In-place tranformation not allowed.") + raise ValueError("In-place transformation not allowed.") # Process op. info.transformed_ops[op] = op_ diff --git a/tensorflow/contrib/training/python/training/evaluation.py b/tensorflow/contrib/training/python/training/evaluation.py index 24b733dd29c..a895d90b8e5 100644 --- a/tensorflow/contrib/training/python/training/evaluation.py +++ b/tensorflow/contrib/training/python/training/evaluation.py @@ -226,7 +226,7 @@ def checkpoints_iterator(checkpoint_dir, This behavior gives control to callers on what to do if checkpoints do not come fast enough or stop being generated. For example, if callers have a way - to detect that the training has stopped and know that no new new checkpoints + to detect that the training has stopped and know that no new checkpoints will be generated, they can provide a `timeout_fn` that returns `True` when the training has stopped. If they know that the training is still going on they return `False` instead. diff --git a/tensorflow/core/lib/gtl/optional.h b/tensorflow/core/lib/gtl/optional.h index 8ba4b091434..2ff8b9c7d1a 100644 --- a/tensorflow/core/lib/gtl/optional.h +++ b/tensorflow/core/lib/gtl/optional.h @@ -656,7 +656,7 @@ class optional : private internal_optional::optional_data<T>, constexpr const T& reference() const { return *this->pointer(); } T& reference() { return *(this->pointer()); } - // T constaint checks. You can't have an optional of nullopt_t, in_place_t or + // T constraint checks. You can't have an optional of nullopt_t, in_place_t or // a reference. static_assert( !std::is_same<nullopt_t, typename std::remove_cv<T>::type>::value, diff --git a/tensorflow/docs_src/api_guides/python/contrib.losses.md b/tensorflow/docs_src/api_guides/python/contrib.losses.md index 8c289dd5563..30123e367f3 100644 --- a/tensorflow/docs_src/api_guides/python/contrib.losses.md +++ b/tensorflow/docs_src/api_guides/python/contrib.losses.md @@ -13,8 +13,8 @@ of samples in the batch and `d1` ... `dN` are the remaining dimensions. It is common, when training with multiple loss functions, to adjust the relative strengths of individual losses. This is performed by rescaling the losses via a `weight` parameter passed to the loss functions. For example, if we were -training with both log_loss and mean_square_error, and we wished that the -log_loss penalty be twice as severe as the mean_square_error, we would +training with both log_loss and mean_squared_error, and we wished that the +log_loss penalty be twice as severe as the mean_squared_error, we would implement this as: ```python @@ -22,7 +22,7 @@ implement this as: tf.contrib.losses.log(predictions, labels, weight=2.0) # Uses default weight of 1.0 - tf.contrib.losses.mean_square_error(predictions, labels) + tf.contrib.losses.mean_squared_error(predictions, labels) # All the losses are collected into the `GraphKeys.LOSSES` collection. losses = tf.get_collection(tf.GraphKeys.LOSSES) @@ -74,7 +74,7 @@ these predictions. predictions = MyModelPredictions(images) weight = tf.cast(tf.greater(depths, 0), tf.float32) - loss = tf.contrib.losses.mean_square_error(predictions, depths, weight) + loss = tf.contrib.losses.mean_squared_error(predictions, depths, weight) ``` Note that when using weights for the losses, the final average is computed @@ -100,7 +100,7 @@ weighted average over the individual prediction errors: weight = MyComplicatedWeightingFunction(labels) weight = tf.div(weight, tf.size(weight)) - loss = tf.contrib.losses.mean_square_error(predictions, depths, weight) + loss = tf.contrib.losses.mean_squared_error(predictions, depths, weight) ``` @{tf.contrib.losses.absolute_difference} diff --git a/tensorflow/python/debug/cli/analyzer_cli.py b/tensorflow/python/debug/cli/analyzer_cli.py index da27f4cebea..e863c2ddb9d 100644 --- a/tensorflow/python/debug/cli/analyzer_cli.py +++ b/tensorflow/python/debug/cli/analyzer_cli.py @@ -1312,7 +1312,7 @@ class DebugAnalyzer(object): all_inputs = copy.copy(tracker(node_name, is_control=False)) is_ctrl = [False] * len(all_inputs) if include_control: - # Sort control inputs or recipients in in alphabetical order of the node + # Sort control inputs or recipients in alphabetical order of the node # names. ctrl_inputs = sorted(tracker(node_name, is_control=True)) all_inputs.extend(ctrl_inputs)