diff --git a/tensorflow/compiler/xla/service/cpu/parallel_cpu_executable.cc b/tensorflow/compiler/xla/service/cpu/parallel_cpu_executable.cc index 19909f4bed8..598858d4ed9 100644 --- a/tensorflow/compiler/xla/service/cpu/parallel_cpu_executable.cc +++ b/tensorflow/compiler/xla/service/cpu/parallel_cpu_executable.cc @@ -440,7 +440,7 @@ Status ParallelCpuExecutable::ExecuteComputeFunctions( // TODO(b/27458679) Manage scheduling based on in-flight concurrency limits. // For example, if we expect a library conv/matmul call to run at max // concurrency, we should not dispatch runnable instructions until the - // libary call is finished (to avoid expensive cache invalidation). + // library call is finished (to avoid expensive cache invalidation). Executor executor(functions, run_options, &pending, &results, buffer_pointers.data(), profile_counters.data(), assignment_.get()); diff --git a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc index 442585772f1..d67b48dff0c 100644 --- a/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc +++ b/tensorflow/compiler/xla/service/hlo_alias_analysis_test.cc @@ -495,7 +495,7 @@ TEST_F(HloAliasAnalysisTest, NestedWhiles) { }; // Build separate condition computations so the call graph is flat. The // callgraph is always flattened in the compiler pipeline, and the flattened - // callgraph enables representive interference analysis. + // callgraph enables representative interference analysis. HloComputation* condition1 = module_->AddEmbeddedComputation(build_cond_computation()); HloComputation* condition2 = diff --git a/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/categorical-feature-column-handler.cc b/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/categorical-feature-column-handler.cc index 2f76224ff06..3a6c409f846 100644 --- a/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/categorical-feature-column-handler.cc +++ b/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/categorical-feature-column-handler.cc @@ -106,7 +106,7 @@ void CategoricalFeatureColumnHandler::GenerateFeatureSplitCandidates( NodeStats left_node_stats(learner_config, left_gradient_stats); NodeStats right_node_stats(learner_config, right_gradient_stats); - // Generate split candiate and update best split candidate for the + // Generate split candidate and update best split candidate for the // current root if needed. FeatureSplitCandidate split_candidate( slot_id_, diff --git a/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/dense-quantized-feature-column-handler.cc b/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/dense-quantized-feature-column-handler.cc index f43112c8a45..ca7bb71e7d0 100644 --- a/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/dense-quantized-feature-column-handler.cc +++ b/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/dense-quantized-feature-column-handler.cc @@ -93,7 +93,7 @@ void DenseQuantizedFeatureColumnHandler::GenerateFeatureSplitCandidates( NodeStats left_node_stats(learner_config, left_gradient_stats); NodeStats right_node_stats(learner_config, right_gradient_stats); - // Generate split candiate. + // Generate split candidate. const float threshold = dense_quantiles_(bucket_id); FeatureSplitCandidate split_candidate( slot_id_, CreateDenseSplitNode(dense_feature_column_, threshold), diff --git a/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/sparse-quantized-feature-column-handler.cc b/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/sparse-quantized-feature-column-handler.cc index c7f49a46990..a0e9efbbc50 100644 --- a/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/sparse-quantized-feature-column-handler.cc +++ b/tensorflow/contrib/boosted_trees/lib/learner/stochastic/handlers/sparse-quantized-feature-column-handler.cc @@ -109,7 +109,7 @@ void SparseQuantizedFeatureColumnHandler::GenerateFeatureSplitCandidates( NodeStats left_node_stats(learner_config, left_gradient_stats); NodeStats right_node_stats(learner_config, right_gradient_stats); - // Generate split candiate. + // Generate split candidate. const float threshold = sparse_quantiles_(bucket_id); FeatureSplitCandidate split_candidate( slot_id_, @@ -147,7 +147,7 @@ void SparseQuantizedFeatureColumnHandler::GenerateFeatureSplitCandidates( NodeStats left_node_stats(learner_config, left_gradient_stats); NodeStats right_node_stats(learner_config, right_gradient_stats); - // Generate split candiate. + // Generate split candidate. const float threshold = sparse_quantiles_(bucket_id - 1); FeatureSplitCandidate split_candidate( slot_id_, diff --git a/tensorflow/contrib/factorization/python/ops/factorization_ops.py b/tensorflow/contrib/factorization/python/ops/factorization_ops.py index 14a2311a424..fb400dbceaf 100644 --- a/tensorflow/contrib/factorization/python/ops/factorization_ops.py +++ b/tensorflow/contrib/factorization/python/ops/factorization_ops.py @@ -800,7 +800,7 @@ class WALSModel(object): regularization: A tensor (scalar) that contains the normalized regularization term for the minibatch loss corresponding to sp_input. sum_weights: The sum of the weights corresponding to sp_input. This - can be used with unregularized loss to caluclate the root weighted + can be used with unregularized loss to calculate the root weighted squared error. """ assert isinstance(sp_input, sparse_tensor.SparseTensor) diff --git a/tensorflow/contrib/timeseries/python/timeseries/math_utils.py b/tensorflow/contrib/timeseries/python/timeseries/math_utils.py index b7832a10cdf..2d3d8c3b1e8 100644 --- a/tensorflow/contrib/timeseries/python/timeseries/math_utils.py +++ b/tensorflow/contrib/timeseries/python/timeseries/math_utils.py @@ -689,11 +689,11 @@ class InputStatisticsFromMiniBatch(object): values = features[TrainEvalFeatures.VALUES] else: # times and values may not be available, for example during prediction. We - # still need to retreive our variables so that they can be read from, even + # still need to retrieve our variables so that they can be read from, even # if we're not going to update them. times = None values = None - # Create/retreive variables representing input statistics, initialized + # Create/retrieve variables representing input statistics, initialized # without data to avoid deadlocking if variables are initialized before # queue runners are started. with variable_scope.variable_scope("input_statistics"): diff --git a/tensorflow/contrib/timeseries/python/timeseries/state_management.py b/tensorflow/contrib/timeseries/python/timeseries/state_management.py index ba51a82f4c5..dc8f8fdb92b 100644 --- a/tensorflow/contrib/timeseries/python/timeseries/state_management.py +++ b/tensorflow/contrib/timeseries/python/timeseries/state_management.py @@ -196,7 +196,7 @@ class ChainingStateManager(_OverridableStateManager): return time // self._state_saving_interval def _get_cached_states(self, times): - """Retreive cached states for a batch of times.""" + """Retrieve cached states for a batch of times.""" read_chunk_numbers = self._get_chunk_number(times) looked_up_state = list(self._cached_states.lookup( math_ops.cast(read_chunk_numbers, dtypes.int64))) @@ -242,7 +242,7 @@ class ChainingStateManager(_OverridableStateManager): # written to the next bucket). This assumes fixed missing times (i.e. if we # were presented with times [10, 50] we will never see times [30, 50]). # - # TODO(allenl): Retreive the highest time less than the current time rather + # TODO(allenl): Retrieve the highest time less than the current time rather # than relying on fixed bucketing. write_chunk_numbers = math_ops.maximum( self._get_chunk_number(array_ops.concat( diff --git a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/filtering_postprocessor.py b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/filtering_postprocessor.py index 163819ae2f6..7fa538a16ec 100644 --- a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/filtering_postprocessor.py +++ b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/filtering_postprocessor.py @@ -150,7 +150,7 @@ class StateInterpolatingAnomalyDetector(FilteringStepPostprocessor): This is simply Bayes' theorem, where p(data | anomaly) is the alternative/anomaly distribution, p(data | not anomaly) is the model's predicted distribution, and anomaly_prior_probability is the prior probability - of an anomaly occuring (user-specified, defaulting to 1%). + of an anomaly occurring (user-specified, defaulting to 1%). Rather than computing p(anomaly | data) directly, we use the odds ratio: diff --git a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble.py b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble.py index 26ab7265911..a7a80a8e3ef 100644 --- a/tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble.py +++ b/tensorflow/contrib/timeseries/python/timeseries/state_space_models/structural_ensemble.py @@ -70,7 +70,7 @@ class StructuralEnsemble(state_space_model.StateSpaceIndependentEnsemble): `observation_noise`, `level_noise`, `trend noise`, `seasonality_noise`, and `transient` are (typically scalar) Gaussian random variables whose variance is - learned from data, and that variance is not time dependant in this + learned from data, and that variance is not time dependent in this implementation. Level noise is optional due to its similarity with observation noise in some cases. Seasonality is enforced by constraining a full cycle of seasonal variables to have zero expectation, allowing seasonality to adapt diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_feed.py b/tensorflow/contrib/tpu/python/tpu/tpu_feed.py index c955de524ca..3d0a532010e 100644 --- a/tensorflow/contrib/tpu/python/tpu/tpu_feed.py +++ b/tensorflow/contrib/tpu/python/tpu/tpu_feed.py @@ -298,7 +298,7 @@ class InfeedQueue(object): input_tensors is a list of lists of Tensors whose types and shapes are used to set the queue configuration. The length of the outer list is the number of shards required, and each inner list is the tuple of Tensors to use to - determine the types and shapes of the correponding shard. This method + determine the types and shapes of the corresponding shard. This method depends on the shard dimension, and calling it freezes the shard policy. Args: diff --git a/tensorflow/core/framework/op_gen_lib.cc b/tensorflow/core/framework/op_gen_lib.cc index cedfd6fc9c0..143da996a1e 100644 --- a/tensorflow/core/framework/op_gen_lib.cc +++ b/tensorflow/core/framework/op_gen_lib.cc @@ -73,7 +73,7 @@ bool ConsumeEquals(StringPiece* description) { return false; } -// Split `*orig` into two pieces at the first occurence of `split_ch`. +// Split `*orig` into two pieces at the first occurrence of `split_ch`. // Returns whether `split_ch` was found. Afterwards, `*before_split` // contains the maximum prefix of the input `*orig` that doesn't // contain `split_ch`, and `*orig` contains everything after the diff --git a/tensorflow/examples/ios/README.md b/tensorflow/examples/ios/README.md index a412381196b..7974b8c879a 100644 --- a/tensorflow/examples/ios/README.md +++ b/tensorflow/examples/ios/README.md @@ -66,7 +66,7 @@ target 'YourProjectName' ``` - Then you run ```pod install``` to download and install the - TensorFlow-experimental pod, and finaly perform + TensorFlow-experimental pod, and finally perform ```open YourProjectName.xcworkspace``` and add your code. - In your apps "Build Settings", make sure to add $(inherited) to sections diff --git a/tensorflow/python/util/nest.py b/tensorflow/python/util/nest.py index aeb44054213..b93842b3a79 100644 --- a/tensorflow/python/util/nest.py +++ b/tensorflow/python/util/nest.py @@ -180,7 +180,7 @@ def assert_same_structure(nest1, nest2, check_types=True): nest1: an arbitrarily nested structure. nest2: an arbitrarily nested structure. check_types: if `True` (default) types of sequences are checked as - well, incuding the keys of dictionaries. If set to `False`, for example + well, including the keys of dictionaries. If set to `False`, for example a list and a tuple of objects will look the same if they have the same size. diff --git a/tensorflow/tools/docs/parser.py b/tensorflow/tools/docs/parser.py index 1bf8a2979c8..18c3c98dc2b 100644 --- a/tensorflow/tools/docs/parser.py +++ b/tensorflow/tools/docs/parser.py @@ -54,7 +54,7 @@ class _Errors(object): """Add an error to the collection. Args: - full_name: The path to the file in which the error occured. + full_name: The path to the file in which the error occurred. message: The message to display with the error. """ self._errors.append((full_name, message))