diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c36ef1ecd3b..c78b6b1a150 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,7 +40,7 @@ TensorFlow coding style. * Include unit tests when you contribute new features, as they help to a) prove that your code works correctly, b) guard against future breaking changes to lower the maintenance cost. -* Bug fixes also generally require unit tests, because the presense of bugs +* Bug fixes also generally require unit tests, because the presence of bugs usually indicates insufficient test coverage. * Keep API compatibility in mind when you change code in core TensorFlow, e.g., code in [tensorflow/core](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core) and [tensorflow/python](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python). diff --git a/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py b/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py index e6947bf6093..4cee2997909 100644 --- a/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py +++ b/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py @@ -115,7 +115,7 @@ class OperatorPDIdentity(operator_pd.OperatorPDBase): """Static check that the argument `x` is proper `shape`, `dtype`.""" # x is a typical argument e.g. to matmul or solve. In both cases, x should # have the same type/shape since this is a square matrix. These checks are - # ususally not needed since we ususally have some tensor backing this + # usually not needed since we usually have some tensor backing this # distribution, and the calls to tf.matmul do a shape/type check. # # Static checks only for efficiency, the identity should be fast. diff --git a/tensorflow/contrib/keras/python/keras/datasets/imdb.py b/tensorflow/contrib/keras/python/keras/datasets/imdb.py index bafd92aca69..5c087fe63f5 100644 --- a/tensorflow/contrib/keras/python/keras/datasets/imdb.py +++ b/tensorflow/contrib/keras/python/keras/datasets/imdb.py @@ -41,7 +41,7 @@ def load_data(path='imdb.npz', num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept - skip_top: skip the top N most frequently occuring words + skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: truncate sequences after this length. seed: random seed for sample shuffling. diff --git a/tensorflow/contrib/keras/python/keras/datasets/reuters.py b/tensorflow/contrib/keras/python/keras/datasets/reuters.py index 81e940a8463..b1c22fee63d 100644 --- a/tensorflow/contrib/keras/python/keras/datasets/reuters.py +++ b/tensorflow/contrib/keras/python/keras/datasets/reuters.py @@ -43,7 +43,7 @@ def load_data(path='reuters.npz', num_words: max number of words to include. Words are ranked by how often they occur (in the training set) and only the most frequent words are kept - skip_top: skip the top N most frequently occuring words + skip_top: skip the top N most frequently occurring words (which may not be informative). maxlen: truncate sequences after this length. test_split: Fraction of the dataset to be used as test data. diff --git a/tensorflow/contrib/keras/python/keras/engine/topology.py b/tensorflow/contrib/keras/python/keras/engine/topology.py index 0336fc4bf4b..3d9ed51a1c0 100644 --- a/tensorflow/contrib/keras/python/keras/engine/topology.py +++ b/tensorflow/contrib/keras/python/keras/engine/topology.py @@ -649,7 +649,7 @@ class Layer(tf_base_layers.Layer): 'but was passed an input_mask: ' + str(mask)) # masking not explicitly supported: return None as mask return None - # if masking is explictly supported, by default + # if masking is explicitly supported, by default # carry over the input mask return mask diff --git a/tensorflow/contrib/keras/python/keras/engine/training.py b/tensorflow/contrib/keras/python/keras/engine/training.py index ba6201713ed..96d1c2f2622 100644 --- a/tensorflow/contrib/keras/python/keras/engine/training.py +++ b/tensorflow/contrib/keras/python/keras/engine/training.py @@ -245,7 +245,7 @@ def _check_array_lengths(inputs, targets, weights): def _check_loss_and_target_compatibility(targets, loss_fns, output_shapes): - """Does validation on the compatiblity of targets and loss functions. + """Does validation on the compatibility of targets and loss functions. This helps prevent users from using loss functions incorrectly. diff --git a/tensorflow/contrib/keras/python/keras/layers/normalization.py b/tensorflow/contrib/keras/python/keras/layers/normalization.py index df77401aee0..ea229fdce1f 100644 --- a/tensorflow/contrib/keras/python/keras/layers/normalization.py +++ b/tensorflow/contrib/keras/python/keras/layers/normalization.py @@ -169,7 +169,7 @@ class BatchNormalization(Layer): def normalize_inference(): if needs_broadcasting: - # In this case we must explictly broadcast all parameters. + # In this case we must explicitly broadcast all parameters. broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape) broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape) diff --git a/tensorflow/contrib/keras/python/keras/models.py b/tensorflow/contrib/keras/python/keras/models.py index 52456a4bb54..1c041091fc1 100644 --- a/tensorflow/contrib/keras/python/keras/models.py +++ b/tensorflow/contrib/keras/python/keras/models.py @@ -221,7 +221,7 @@ def load_model(filepath, custom_objects=None): obj: object, dict, or list. Returns: - The same structure, where occurences + The same structure, where occurrences of a custom object name have been replaced with the custom object. """ diff --git a/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py b/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py index 5a24a63b014..692a359ead3 100644 --- a/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py +++ b/tensorflow/contrib/keras/python/keras/preprocessing/sequence.py @@ -156,7 +156,7 @@ def skipgrams(sequence, of word indices (integers). If using a `sampling_table`, word indices are expected to match the rank of the words in a reference dataset (e.g. 10 would encode - the 10-th most frequently occuring token). + the 10-th most frequently occurring token). Note that index 0 is expected to be a non-word and will be skipped. vocabulary_size: int. maximum possible word index + 1 window_size: int. actually half-window. diff --git a/tensorflow/contrib/labeled_tensor/python/ops/core.py b/tensorflow/contrib/labeled_tensor/python/ops/core.py index 393c7f93f36..e6aded92ca5 100644 --- a/tensorflow/contrib/labeled_tensor/python/ops/core.py +++ b/tensorflow/contrib/labeled_tensor/python/ops/core.py @@ -810,7 +810,7 @@ def axis_order_scope(axis_order=None): Example usage: with lt.axis_order_scope(['x', 'y', 'z']): - # result is guranteed to have the correct axis order + # result is guaranteed to have the correct axis order result = w + b You can nest scopes, in which case only the inner-most scope applies, e.g., diff --git a/tensorflow/contrib/layers/python/layers/feature_column.py b/tensorflow/contrib/layers/python/layers/feature_column.py index 95a4b032b04..451137190f1 100644 --- a/tensorflow/contrib/layers/python/layers/feature_column.py +++ b/tensorflow/contrib/layers/python/layers/feature_column.py @@ -451,7 +451,7 @@ class _SparseColumn( return input_tensor def is_compatible(self, other_column): - """Check compatability of two sparse columns.""" + """Check compatibility of two sparse columns.""" if self.lookup_config and other_column.lookup_config: return self.lookup_config == other_column.lookup_config compatible = (self.length == other_column.length and @@ -2069,7 +2069,7 @@ class _CrossedColumn(_FeatureColumn, "hash_key", "combiner", "ckpt_to_load_from", "tensor_name_in_ckpt"])): - """Represents a cross transformation also known as conjuction or combination. + """Represents a cross transformation also known as conjunction or combination. Instances of this class are immutable. It crosses given `columns`. Crossed column output will be hashed to hash_bucket_size. diff --git a/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py b/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py index 01239212664..7590ed4ca0d 100644 --- a/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py +++ b/tensorflow/contrib/layers/python/layers/feature_column_ops_test.py @@ -1467,8 +1467,8 @@ class SequenceInputFromFeatureColumnTest(test.TestCase): expected_input_shape = [4, 3, embedding_dimension] self.assertAllEqual(expected_input_shape, model_input.shape) - # `ids_tensor` consists of 7 instances of , 3 occurences of "b", - # 2 occurences of "c" and 1 instance of "a". + # `ids_tensor` consists of 7 instances of , 3 occurrences of "b", + # 2 occurrences of "c" and 1 instance of "a". expected_gradient_values = sorted([0., 3., 2., 1.] * embedding_dimension) actual_gradient_values = np.sort(gradients[0].values, axis=None) self.assertAllClose(expected_gradient_values, actual_gradient_values) diff --git a/tensorflow/contrib/layers/python/layers/feature_column_test.py b/tensorflow/contrib/layers/python/layers/feature_column_test.py index aa3912a4088..0f606a787d2 100644 --- a/tensorflow/contrib/layers/python/layers/feature_column_test.py +++ b/tensorflow/contrib/layers/python/layers/feature_column_test.py @@ -173,7 +173,7 @@ class FeatureColumnTest(test.TestCase): for i in range(len(b1_value)): self.assertAllClose(b1_value[i], b2_value[i]) - # Test the case when a shared_embedding_name is explictly specified. + # Test the case when a shared_embedding_name is explicitly specified. d = fc.shared_embedding_columns( [a1, a2], dimension=4, diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py index 3681829f657..7a429f75bbf 100644 --- a/tensorflow/contrib/layers/python/layers/layers.py +++ b/tensorflow/contrib/layers/python/layers/layers.py @@ -278,7 +278,7 @@ def _fused_batch_norm( trainable=trainable_gamma) # Create moving_mean and moving_variance variables and add them to the - # appropiate collections. + # appropriate collections. moving_mean_collections = utils.get_variable_collections( variables_collections, 'moving_mean') moving_mean_initializer = param_initializers.get( @@ -632,7 +632,7 @@ def batch_norm(inputs, trainable=trainable) # Create moving_mean and moving_variance variables and add them to the - # appropiate collections. We disable variable partitioning while creating + # appropriate collections. We disable variable partitioning while creating # them, because assign_moving_average is not yet supported for partitioned # variables. partitioner = variable_scope.get_variable_scope().partitioner diff --git a/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py b/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py index d86ef8d477d..fc092fccd78 100644 --- a/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py +++ b/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py @@ -115,7 +115,7 @@ def dict_to_state_tuple(input_dict, cell): def _concatenate_context_input(sequence_input, context_input): - """Replicates `context_input` accross all timesteps of `sequence_input`. + """Replicates `context_input` across all timesteps of `sequence_input`. Expands dimension 1 of `context_input` then tiles it `sequence_length` times. This value is appended to `sequence_input` on dimension 2 and the result is @@ -177,7 +177,7 @@ def build_sequence_input(features, describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns - describing context features i.e. features that apply accross all time + describing context features i.e. features that apply across all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. weight_collections: List of graph collections to which weights are added. @@ -419,7 +419,7 @@ def _get_dynamic_rnn_model_fn( describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns - describing context features, i.e., features that apply accross all time + describing context features, i.e., features that apply across all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. predict_probabilities: A boolean indicating whether to predict probabilities @@ -603,7 +603,7 @@ class DynamicRnnEstimator(estimator.Estimator): describing sequence features. All items in the iterable should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns - describing context features, i.e., features that apply accross all time + describing context features, i.e., features that apply across all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. num_classes: the number of classes for a classification problem. Only diff --git a/tensorflow/contrib/learn/python/learn/estimators/head.py b/tensorflow/contrib/learn/python/learn/estimators/head.py index 25f2922bf8e..52b42134630 100644 --- a/tensorflow/contrib/learn/python/learn/estimators/head.py +++ b/tensorflow/contrib/learn/python/learn/estimators/head.py @@ -163,7 +163,7 @@ class Head(object): ModeFnOps.loss to compute and apply gradients. logits: logits `Tensor` to be used by the head. logits_input: `Tensor` from which to build logits, often needed when you - don't want to compute the logits. Typicaly this is the activation of the + don't want to compute the logits. Typically this is the activation of the last hidden layer in a DNN. Some heads (like the ones responsible for candidate sampling) intrinsically avoid computing full logits and only accepts logits_input. diff --git a/tensorflow/contrib/learn/python/learn/estimators/head_test.py b/tensorflow/contrib/learn/python/learn/estimators/head_test.py index d5777088de7..f7934fc1889 100644 --- a/tensorflow/contrib/learn/python/learn/estimators/head_test.py +++ b/tensorflow/contrib/learn/python/learn/estimators/head_test.py @@ -895,7 +895,7 @@ class BinaryClassificationHeadTest(test.TestCase): _assert_summary_tags(self, ["loss"]) # logloss: z:label, x:logit # z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) - # expected_loss is (total_weighted_loss)/1 since htere is 1 nonzero + # expected_loss is (total_weighted_loss)/1 since there is 1 nonzero # weight. expected_loss = 0.062652342 _assert_metrics( diff --git a/tensorflow/contrib/learn/python/learn/estimators/state_saving_rnn_estimator.py b/tensorflow/contrib/learn/python/learn/estimators/state_saving_rnn_estimator.py index 02acd708123..9cb4c3515a9 100644 --- a/tensorflow/contrib/learn/python/learn/estimators/state_saving_rnn_estimator.py +++ b/tensorflow/contrib/learn/python/learn/estimators/state_saving_rnn_estimator.py @@ -144,7 +144,7 @@ def _prepare_features_for_sqss(features, labels, mode, describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns - describing context features, i.e., features that apply accross all time + describing context features, i.e., features that apply across all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. @@ -261,7 +261,7 @@ def _read_batch(cell, describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns - describing context features, i.e., features that apply accross all time + describing context features, i.e., features that apply across all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. num_threads: The Python integer number of threads enqueuing input examples @@ -420,7 +420,7 @@ def _get_rnn_model_fn(cell_type, describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns - describing context features, i.e., features that apply accross all time + describing context features, i.e., features that apply across all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. predict_probabilities: A boolean indicating whether to predict probabilities @@ -563,7 +563,7 @@ class StateSavingRnnEstimator(estimator.Estimator): describing sequence features. All items in the set should be instances of classes derived from `FeatureColumn`. context_feature_columns: An iterable containing all the feature columns - describing context features, i.e., features that apply accross all time + describing context features, i.e., features that apply across all time steps. All items in the set should be instances of classes derived from `FeatureColumn`. num_classes: The number of classes for categorization. Used only and diff --git a/tensorflow/contrib/learn/python/learn/monitors.py b/tensorflow/contrib/learn/python/learn/monitors.py index 9f133926660..4d8f53c1709 100644 --- a/tensorflow/contrib/learn/python/learn/monitors.py +++ b/tensorflow/contrib/learn/python/learn/monitors.py @@ -473,7 +473,7 @@ class LoggingTrainable(EveryN): def every_n_step_begin(self, step): super(LoggingTrainable, self).every_n_step_begin(step) - # Get a list of trainable variables at the begining of every N steps. + # Get a list of trainable variables at the beginning of every N steps. # We cannot get this in __init__ because train_op has not been generated. trainables = ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope=self._scope) diff --git a/tensorflow/contrib/makefile/README.md b/tensorflow/contrib/makefile/README.md index f061b58775e..9ba5c035a26 100644 --- a/tensorflow/contrib/makefile/README.md +++ b/tensorflow/contrib/makefile/README.md @@ -295,7 +295,7 @@ itself, you'll see it's broken up into host and target sections. If you are cross-compiling, you should look at customizing the target settings to match what you need for your desired system. -## Dependency Managment +## Dependency Management The Makefile loads in a list of dependencies stored in text files. These files are generated from the main Bazel build by running diff --git a/tensorflow/contrib/session_bundle/example/export_half_plus_two.py b/tensorflow/contrib/session_bundle/example/export_half_plus_two.py index 08ca47058c8..4a56509e596 100644 --- a/tensorflow/contrib/session_bundle/example/export_half_plus_two.py +++ b/tensorflow/contrib/session_bundle/example/export_half_plus_two.py @@ -97,7 +97,7 @@ def Export(export_dir, use_checkpoint_v2): } # Create two filename assets and corresponding tensors. - # TODO(b/26254158) Consider adding validation of file existance as well as + # TODO(b/26254158) Consider adding validation of file existence as well as # hashes (e.g. sha1) for consistency. original_filename1 = tf.constant("hello1.txt") tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1) diff --git a/tensorflow/contrib/slim/python/slim/learning_test.py b/tensorflow/contrib/slim/python/slim/learning_test.py index cf3a878450d..83d45f6f5ad 100644 --- a/tensorflow/contrib/slim/python/slim/learning_test.py +++ b/tensorflow/contrib/slim/python/slim/learning_test.py @@ -840,7 +840,7 @@ class TrainTest(test.TestCase): # Initialize the variables. sess.run(variables_lib.global_variables_initializer()) - # Get the intial weights and biases values. + # Get the initial weights and biases values. weights_values, biases_values = sess.run([weights, biases]) self.assertGreater(np.linalg.norm(weights_values), 0) self.assertAlmostEqual(np.linalg.norm(biases_values), 0) diff --git a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py index 89dbcd96f86..c8b4e472c99 100644 --- a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py +++ b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_loss_test.py @@ -159,7 +159,7 @@ class SparsemaxLossTest(test.TestCase): self.assertShapeEqual(q, tf_sparsemax_op) def _test_gradient_against_estimate(self, dtype, random, use_gpu): - """check sparsemax-loss Rop, aginst estimated-loss Rop""" + """check sparsemax-loss Rop, against estimated-loss Rop""" z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype) q = np.zeros((test_obs, 10)).astype(dtype) q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1 @@ -178,7 +178,7 @@ class SparsemaxLossTest(test.TestCase): self.assertLess(err, 1e-4) def _test_gradient_against_numpy(self, dtype, random, use_gpu): - """check sparsemax-loss Rop, aginst numpy Rop""" + """check sparsemax-loss Rop, against numpy Rop""" z = random.uniform(low=-3, high=3, size=(test_obs, 10)) q = np.zeros((test_obs, 10)) q[np.arange(0, test_obs), np.random.randint(0, 10, size=test_obs)] = 1 diff --git a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py index eafac1b9ae7..82d36ee9cb2 100644 --- a/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py +++ b/tensorflow/contrib/sparsemax/python/kernel_tests/sparsemax_test.py @@ -188,7 +188,7 @@ class SparsemaxTest(test.TestCase): self.assertShapeEqual(z, tf_sparsemax_op) def _test_gradient_against_estimate(self, dtype, random, use_gpu): - """check sparsemax Rop, aginst estimated Rop""" + """check sparsemax Rop, against estimated Rop""" z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype) logits = array_ops.placeholder(dtype, name='z') @@ -204,7 +204,7 @@ class SparsemaxTest(test.TestCase): self.assertLess(err, 1e-4) def _test_gradient_against_numpy(self, dtype, random, use_gpu): - """check sparsemax Rop, aginst numpy Rop""" + """check sparsemax Rop, against numpy Rop""" z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype) logits = constant_op.constant(z, name='z') diff --git a/tensorflow/contrib/tensor_forest/client/random_forest.py b/tensorflow/contrib/tensor_forest/client/random_forest.py index 01697df086d..5f6e8119ac9 100644 --- a/tensorflow/contrib/tensor_forest/client/random_forest.py +++ b/tensorflow/contrib/tensor_forest/client/random_forest.py @@ -79,7 +79,7 @@ class TensorForestLossHook(session_run_hook.SessionRunHook): current_loss = run_values.results['current_loss'] current_step = run_values.results['global_step'] self.steps += 1 - # Gaurd against the global step going backwards, which might happen + # Guard against the global step going backwards, which might happen # if we recover from something. if self.last_step == -1 or self.last_step > current_step: logging.info('TensorForestLossHook resetting last_step.') diff --git a/tensorflow/contrib/training/python/training/evaluation.py b/tensorflow/contrib/training/python/training/evaluation.py index 8a985fe0e2f..bc0c60c85ce 100644 --- a/tensorflow/contrib/training/python/training/evaluation.py +++ b/tensorflow/contrib/training/python/training/evaluation.py @@ -254,7 +254,7 @@ def checkpoints_iterator(checkpoint_dir, logging.info('Timed-out waiting for a checkpoint.') return if timeout_fn(): - # The timeout_fn indicated that we are truely done. + # The timeout_fn indicated that we are truly done. return else: # The timeout_fn indicated that more checkpoints may come. diff --git a/tensorflow/contrib/training/python/training/feeder.py b/tensorflow/contrib/training/python/training/feeder.py index a7f43cc07e9..a5cd7c5c947 100644 --- a/tensorflow/contrib/training/python/training/feeder.py +++ b/tensorflow/contrib/training/python/training/feeder.py @@ -18,7 +18,7 @@ This helper handles the plumbing in order to set up a feeder task to push generated inputs to a pool of remote consumers; or to run an -identical feeding mechanism in a seperate thread in the same process. +identical feeding mechanism in a separate thread in the same process. Example usage for distributed feeding: @@ -331,7 +331,7 @@ class Feeder(object): they never close their queue. Second, they are added to the `Feeder.REMOTE_QUEUE_RUNNERS` collection, rather than `ops.GraphKeys.QUEUE_RUNNERS`, so they can be started/stopped - seperately. + separately. Args: queue: The queue. diff --git a/tensorflow/contrib/training/python/training/feeder_test.py b/tensorflow/contrib/training/python/training/feeder_test.py index 4d5cf9eff26..f3a2fee0463 100644 --- a/tensorflow/contrib/training/python/training/feeder_test.py +++ b/tensorflow/contrib/training/python/training/feeder_test.py @@ -156,7 +156,7 @@ class FeederTest(test.TestCase): coord.join() def testFeederSeparateThread(self): - # Start a feeder on a seperate thread, but with a shared local queue + # Start a feeder on a separate thread, but with a shared local queue servers = self._create_local_cluster(worker=1) coord = coordinator.Coordinator() feed_thread = FeederThread(self, coord, servers, 'worker', 0) diff --git a/tensorflow/contrib/training/python/training/hparam.py b/tensorflow/contrib/training/python/training/hparam.py index 1d177828207..2e085936997 100644 --- a/tensorflow/contrib/training/python/training/hparam.py +++ b/tensorflow/contrib/training/python/training/hparam.py @@ -164,7 +164,7 @@ class HParams(object): import argparse parser = argparse.ArgumentParser(description='Train my model.') parser.add_argument('--hparams', type=str, - help='Comma seperated list of "name=value" pairs.') + help='Comma separated list of "name=value" pairs.') args = parser.parse_args() ... def my_program(): diff --git a/tensorflow/contrib/training/python/training/training_test.py b/tensorflow/contrib/training/python/training/training_test.py index e7c8fcd2a09..0af79cf2e36 100644 --- a/tensorflow/contrib/training/python/training/training_test.py +++ b/tensorflow/contrib/training/python/training/training_test.py @@ -508,7 +508,7 @@ class TrainTest(test.TestCase): # Initialize the variables. session.run(variables_lib2.global_variables_initializer()) - # Get the intial weights and biases values. + # Get the initial weights and biases values. weights_values, biases_values = session.run([weights, biases]) self.assertGreater(np.linalg.norm(weights_values), 0) self.assertAlmostEqual(np.linalg.norm(biases_values), 0) diff --git a/tensorflow/core/common_runtime/visitable_allocator.h b/tensorflow/core/common_runtime/visitable_allocator.h index c83e4a4e3a1..8edf922d11e 100644 --- a/tensorflow/core/common_runtime/visitable_allocator.h +++ b/tensorflow/core/common_runtime/visitable_allocator.h @@ -44,7 +44,7 @@ class VisitableAllocator : public Allocator { }; // Needed for cases when a VisitableAllocator gets wrapped for tracking. -// Multiple-inheritance is considered acceptible in this case because +// Multiple-inheritance is considered acceptable in this case because // VisitableAllocator is a pure virtual interface and only TrackingAllocator // has default implementation. class TrackingVisitableAllocator : public TrackingAllocator, diff --git a/tensorflow/docs_src/install/install_java.md b/tensorflow/docs_src/install/install_java.md index 55d9c2c08f3..bf7f6db82f1 100644 --- a/tensorflow/docs_src/install/install_java.md +++ b/tensorflow/docs_src/install/install_java.md @@ -106,7 +106,7 @@ As an example, these steps will create a Maven project that uses TensorFlow: The preceding command should output Hello from version. If it -does, you've succesfully set up TensorFlow for Java and are ready to use it in +does, you've successfully set up TensorFlow for Java and are ready to use it in Maven projects. If not, check [Stack Overflow](http://stackoverflow.com/questions/tagged/tensorflow) for possible solutions. You can skip reading the rest of this document. diff --git a/tensorflow/python/debug/cli/debugger_cli_common.py b/tensorflow/python/debug/cli/debugger_cli_common.py index 64a22e6be4a..889fc6a8f64 100644 --- a/tensorflow/python/debug/cli/debugger_cli_common.py +++ b/tensorflow/python/debug/cli/debugger_cli_common.py @@ -648,7 +648,7 @@ class CommandHandlerRegistry(object): 3) the handler is found for the prefix, but it fails to return a RichTextLines or raise any exception. CommandLineExit: - If the command handler raises this type of exception, tihs method will + If the command handler raises this type of exception, this method will simply pass it along. """ if not prefix: diff --git a/tensorflow/python/debug/cli/profile_analyzer_cli.py b/tensorflow/python/debug/cli/profile_analyzer_cli.py index 42440521eba..7de2c6329e0 100644 --- a/tensorflow/python/debug/cli/profile_analyzer_cli.py +++ b/tensorflow/python/debug/cli/profile_analyzer_cli.py @@ -55,7 +55,7 @@ class ProfileDatum(object): @property def exec_time(self): - """Measures compute function exection time plus pre- and post-processing.""" + """Measures compute function execution time plus pre- and post-processing.""" return self.node_exec_stats.all_end_rel_micros diff --git a/tensorflow/python/debug/lib/debug_data.py b/tensorflow/python/debug/lib/debug_data.py index ce4bc82e0a8..de1e1ce017d 100644 --- a/tensorflow/python/debug/lib/debug_data.py +++ b/tensorflow/python/debug/lib/debug_data.py @@ -975,7 +975,7 @@ class DebugDumpDir(object): slot = datum.output_slot # In some cases (e.g., system clocks with insufficient precision), # the upstream and downstream tensors may have identical timestamps, the - # following check examines this possibilty and avoids raising an error if + # following check examines this possibility and avoids raising an error if # that is the case. if not self._satisfied_at_timestamp( pending_inputs[node], datum.timestamp, start_i=i + 1): diff --git a/tensorflow/python/debug/wrappers/hooks.py b/tensorflow/python/debug/wrappers/hooks.py index 65713f0b714..b0344a5029e 100644 --- a/tensorflow/python/debug/wrappers/hooks.py +++ b/tensorflow/python/debug/wrappers/hooks.py @@ -66,7 +66,7 @@ class LocalCLIDebugHook(session_run_hook.SessionRunHook, """Add a tensor filter. See doc of `LocalCLIDebugWrapperSession.add_tensor_filter()` for details. - Override default behavior to accomodate the possibility of this method being + Override default behavior to accommodate the possibility of this method being called prior to the initialization of the underlying `LocalCLIDebugWrapperSession` object. diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py index 91c6e33f22c..2654bca31c8 100644 --- a/tensorflow/python/framework/subscribe.py +++ b/tensorflow/python/framework/subscribe.py @@ -276,7 +276,7 @@ def subscribe(tensors, side_effects): Subscribed tensors, which are identity copies of the passed in tensors in the same passed in structure, but the graph has been modified such that these are downstream of the control dependencies for - the side effect graphs. Use these functionally equivelant tensors + the side effect graphs. Use these functionally equivalent tensors instead of the passed in tensors for further construction or running. """ if not hasattr(side_effects, '__iter__'): diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py index c3169e23a5c..ac551a6e1a4 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py @@ -250,7 +250,7 @@ class TensorFlowTestCase(googletest.TestCase): """Returns a unique temporary directory for the test to use. If you call this method multiple times during in a test, it will return the - same folder. However, accross different runs the directories will be + same folder. However, across different runs the directories will be different. This will ensure that across different runs tests will not be able to pollute each others environment. If you need multiple unique directories within a single test, you should diff --git a/tensorflow/python/kernel_tests/linalg_ops_test.py b/tensorflow/python/kernel_tests/linalg_ops_test.py index 153d4ab6623..2d31ac85b02 100644 --- a/tensorflow/python/kernel_tests/linalg_ops_test.py +++ b/tensorflow/python/kernel_tests/linalg_ops_test.py @@ -28,7 +28,7 @@ from tensorflow.python.platform import test def _random_pd_matrix(n, rng): - """Random postive definite matrix.""" + """Random positive definite matrix.""" temp = rng.randn(n, n) return temp.dot(temp.T) diff --git a/tensorflow/python/layers/normalization.py b/tensorflow/python/layers/normalization.py index f92ea9b05f5..b3d28f010f7 100644 --- a/tensorflow/python/layers/normalization.py +++ b/tensorflow/python/layers/normalization.py @@ -322,7 +322,7 @@ class BatchNormalization(base.Layer): def _broadcast(v): if needs_broadcasting and v is not None: - # In this case we must explictly broadcast all parameters. + # In this case we must explicitly broadcast all parameters. return array_ops.reshape(v, broadcast_shape) return v diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index b762205cf70..158016ff37d 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -2331,7 +2331,7 @@ def tensordot(a, b, axes, name=None): using `array_ops.transpose` and `array_ops.reshape`. The method takes a tensor and performs the correct transpose and reshape operation for a given set of indices. It returns the reshaped tensor as well as a list of indices - necesary to reshape the tensor again after matrix multiplication. + necessary to reshape the tensor again after matrix multiplication. Args: a: `Tensor`. diff --git a/tensorflow/python/ops/metrics_impl.py b/tensorflow/python/ops/metrics_impl.py index 28ed3af9d73..0d35f50894f 100644 --- a/tensorflow/python/ops/metrics_impl.py +++ b/tensorflow/python/ops/metrics_impl.py @@ -1735,7 +1735,7 @@ def _streaming_sparse_true_positive_at_k(labels, A tuple of `Variable` and update `Operation`. Raises: - ValueError: If `weights` is not `None` and has an incomptable shape. + ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope( name, _at_k_name('true_positive', k, class_id=class_id), @@ -1831,7 +1831,7 @@ def _streaming_sparse_false_negative_at_k(labels, A tuple of `Variable` and update `Operation`. Raises: - ValueError: If `weights` is not `None` and has an incomptable shape. + ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope( name, _at_k_name('false_negative', k, class_id=class_id), @@ -2653,7 +2653,7 @@ def _streaming_sparse_false_positive_at_k(labels, A tuple of `Variable` and update `Operation`. Raises: - ValueError: If `weights` is not `None` and has an incomptable shape. + ValueError: If `weights` is not `None` and has an incompatible shape. """ with ops.name_scope( name, _at_k_name('false_positive', k, class_id=class_id), diff --git a/tensorflow/python/ops/parsing_ops.py b/tensorflow/python/ops/parsing_ops.py index 796ea20eb76..c2f99617316 100644 --- a/tensorflow/python/ops/parsing_ops.py +++ b/tensorflow/python/ops/parsing_ops.py @@ -58,7 +58,7 @@ class SparseFeature( ["index_key", "value_key", "dtype", "size", "already_sorted"])): """Configuration for parsing a sparse input feature from an `Example`. - Note, preferrably use `VarLenFeature` (possibly in combination with a + Note, preferably use `VarLenFeature` (possibly in combination with a `SequenceExample`) in order to parse out `SparseTensor`s instead of `SparseFeature` due to its simplicity. diff --git a/tensorflow/python/training/coordinator.py b/tensorflow/python/training/coordinator.py index fea2f8240ee..d234df71c10 100644 --- a/tensorflow/python/training/coordinator.py +++ b/tensorflow/python/training/coordinator.py @@ -366,7 +366,7 @@ class Coordinator(object): # If any thread is still alive, wait for the grace period to expire. # By the time this check is executed, threads may still be shutting down, # so we add a sleep of increasing duration to give them a chance to shut - # down without loosing too many cycles. + # down without losing too many cycles. # The sleep duration is limited to the remaining grace duration. stop_wait_secs = 0.001 while any(t.is_alive() for t in threads) and stop_grace_period_secs >= 0.0: diff --git a/tensorflow/python/util/deprecation.py b/tensorflow/python/util/deprecation.py index 73fc3e24087..1e1599afb4b 100644 --- a/tensorflow/python/util/deprecation.py +++ b/tensorflow/python/util/deprecation.py @@ -182,7 +182,7 @@ def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples): return d def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec): - """Builds a dictionary from deprecated arguments to thier spec. + """Builds a dictionary from deprecated arguments to their spec. Returned dict is keyed by argument name. Each value is a DeprecatedArgSpec with the following fields: diff --git a/tensorflow/stream_executor/lib/statusor.h b/tensorflow/stream_executor/lib/statusor.h index 06278d51525..bb423e390aa 100644 --- a/tensorflow/stream_executor/lib/statusor.h +++ b/tensorflow/stream_executor/lib/statusor.h @@ -135,7 +135,7 @@ class StatusOr { // operators, to support move-only types and avoid unnecessary copying. StatusOr(T&& value); // NOLINT - // Move conversion operator to avoid unecessary copy. + // Move conversion operator to avoid unnecessary copy. // T must be assignable from U. // Not marked with explicit so the implicit conversion can happen. template diff --git a/tensorflow/tools/docs/py_guide_parser.py b/tensorflow/tools/docs/py_guide_parser.py index 3ca6d11b847..245643cb32e 100644 --- a/tensorflow/tools/docs/py_guide_parser.py +++ b/tensorflow/tools/docs/py_guide_parser.py @@ -34,7 +34,7 @@ def md_files_in_dir(py_guide_src_dir): class PyGuideParser(object): """Simple parsing of a guide .md file. - Decendents can override the process_*() functions (called by process()) + Descendants can override the process_*() functions (called by process()) to either record infromation from the guide, or call replace_line() to affect the return value of process(). """