diff --git a/tensorflow/c/eager/c_api_experimental.h b/tensorflow/c/eager/c_api_experimental.h index 92132b078d7..0165eb3b781 100644 --- a/tensorflow/c/eager/c_api_experimental.h +++ b/tensorflow/c/eager/c_api_experimental.h @@ -27,7 +27,7 @@ extern "C" { // creating a new op every time. If `raw_device_name` is `NULL` or empty, it // does not set the device name. If it's not `NULL`, then it attempts to parse // and set the device name. It's effectively `TFE_OpSetDevice`, but it is faster -// than seperately calling it because if the existing op has the same +// than separately calling it because if the existing op has the same // `raw_device_name`, it skips parsing and just leave as it is. TF_CAPI_EXPORT extern void TFE_OpReset(TFE_Op* op_to_reset, const char* op_or_function_name, diff --git a/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc b/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc index 1755b1a14f0..53e247cd038 100644 --- a/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc +++ b/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc @@ -1569,7 +1569,7 @@ TEST_P(ModularFileSystemTest, TestRoundTrip) { if (!status.ok()) GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status; - char scratch[64 /* big enough to accomodate test_data */] = {0}; + char scratch[64 /* big enough to accommodate test_data */] = {0}; StringPiece result; status = read_file->Read(0, test_data.size(), &result, scratch); EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK); diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc b/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc index 68b14a63ef4..da9815e3a90 100644 --- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc +++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc @@ -937,7 +937,7 @@ class ConvertFusedBatchNormGradBase // Gets the result values. Value x_backprop, scale_backprop, offset_backprop; if (op.is_training()) { // training - // TODO(b/145536565): handle GPU logic seperately. + // TODO(b/145536565): handle GPU logic separately. // Infers the output type with the converted `act`. Type feature_type = RankedTensorType::get( {GetDimSize(act_type, feature_dim)}, kernel_type); diff --git a/tensorflow/core/grappler/optimizers/data/auto_shard.cc b/tensorflow/core/grappler/optimizers/data/auto_shard.cc index 7ed80a1056b..ddcd77d0a28 100644 --- a/tensorflow/core/grappler/optimizers/data/auto_shard.cc +++ b/tensorflow/core/grappler/optimizers/data/auto_shard.cc @@ -405,7 +405,7 @@ Status OptimizeGraph(const GrapplerItem& item, int64 num_workers, int64 index, // the latest occurrence of a ReaderDataset (e.g. CSVDataset, TFRecordDataset, // etc...). We then add a shard after that dataset to shard the outputs of // that dataset, in effect giving a piece to each worker. Finally, we remove - // occurences from randomness from before that point in the graph (e.g. things + // occurrences from randomness from before that point in the graph (e.g. things // like ShuffleDataset) to ensure that `shard` returns a sensible result. switch (policy) { case AutoShardPolicy::OFF: diff --git a/tensorflow/core/protobuf/eager_service.proto b/tensorflow/core/protobuf/eager_service.proto index 4335d87309a..bc773e14605 100644 --- a/tensorflow/core/protobuf/eager_service.proto +++ b/tensorflow/core/protobuf/eager_service.proto @@ -248,7 +248,7 @@ service EagerService { // Contexts are always created with a deadline and no RPCs within a deadline // will trigger a context garbage collection. KeepAlive calls can be used to - // delay this. It can also be used to validate the existance of a context ID + // delay this. It can also be used to validate the existence of a context ID // on remote eager worker. If the context is on remote worker, return the same // ID and the current context view ID. This is useful for checking if the // remote worker (potentially with the same task name and hostname / port) is diff --git a/tensorflow/lite/experimental/ruy/prepacked_cache_test.cc b/tensorflow/lite/experimental/ruy/prepacked_cache_test.cc index b584cb8da7e..63cb855192c 100644 --- a/tensorflow/lite/experimental/ruy/prepacked_cache_test.cc +++ b/tensorflow/lite/experimental/ruy/prepacked_cache_test.cc @@ -156,7 +156,7 @@ TEST(PrepackedCacheTest, TestCacheOnCacheable) { dst.data = dst_data; ruy::BasicSpec spec; - // Perform the multiplication and confirm no caching occured. + // Perform the multiplication and confirm no caching occurred. ruy::Mul(lhs, rhs, spec, &context, &dst); EXPECT_EQ(cache->TotalSize(), 0); diff --git a/tensorflow/lite/micro/tools/make/transform_arduino_source.py b/tensorflow/lite/micro/tools/make/transform_arduino_source.py index c5c74b7a131..9883fc13bc1 100644 --- a/tensorflow/lite/micro/tools/make/transform_arduino_source.py +++ b/tensorflow/lite/micro/tools/make/transform_arduino_source.py @@ -41,7 +41,7 @@ def replace_includes(line, supplied_headers_list): def replace_main(line): - """Updates any occurences of a bare main definition to the Arduino equivalent.""" + """Updates any occurrences of a bare main definition to the Arduino equivalent.""" main_match = re.match(r'(.*int )(main)(\(.*)', line) if main_match: line = main_match.group(1) + 'tflite_micro_main' + main_match.group(3) diff --git a/tensorflow/lite/micro/tools/make/transform_source.py b/tensorflow/lite/micro/tools/make/transform_source.py index 7957476121e..def6eab80ff 100644 --- a/tensorflow/lite/micro/tools/make/transform_source.py +++ b/tensorflow/lite/micro/tools/make/transform_source.py @@ -48,7 +48,7 @@ def replace_arduino_includes(line, supplied_headers_list): def replace_arduino_main(line): - """Updates any occurences of a bare main definition to the Arduino equivalent.""" + """Updates any occurrences of a bare main definition to the Arduino equivalent.""" main_match = re.match(r'(.*int )(main)(\(.*)', line) if main_match: line = main_match.group(1) + 'tflite_micro_main' + main_match.group(3) diff --git a/tensorflow/lite/toco/logging/conversion_log_util.h b/tensorflow/lite/toco/logging/conversion_log_util.h index 0cd1a537b08..9ed688085b6 100644 --- a/tensorflow/lite/toco/logging/conversion_log_util.h +++ b/tensorflow/lite/toco/logging/conversion_log_util.h @@ -32,7 +32,7 @@ std::vector GetOperatorNames(const Model& model); // Counts the number of different types of operators in the model: // Built-in ops, custom ops and select ops. // Each map is mapping from the name of the operator (such as 'Conv') to its -// total number of occurences in the model. +// total number of occurrences in the model. void CountOperatorsByType(const Model& model, std::map* built_in_ops, std::map* custom_ops, diff --git a/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py b/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py index d829863b994..0dd7ae1f083 100644 --- a/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py +++ b/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py @@ -107,7 +107,7 @@ class BucketBySequenceLengthTest(test_base.DatasetTestBase, # Calculate the expected occurrence of individual batch sizes. expected_batch_sizes[length] = \ [batch_size] * (bucket_elements // batch_size) - # Calculate the expected occurence of individual sequence lengths. + # Calculate the expected occurrence of individual sequence lengths. expected_lengths.extend([length] * (bucket_elements // batch_size)) def build_dataset(sparse): diff --git a/tensorflow/python/keras/layers/preprocessing/categorical_encoding.py b/tensorflow/python/keras/layers/preprocessing/categorical_encoding.py index 3d45fb41b3c..87276943255 100644 --- a/tensorflow/python/keras/layers/preprocessing/categorical_encoding.py +++ b/tensorflow/python/keras/layers/preprocessing/categorical_encoding.py @@ -307,7 +307,7 @@ class _CategoricalEncodingCombiner(Combiner): # Any newly created token counts in 'base_accumulator''s # per_doc_count_dict will have a last_doc_id of -1. This is always # less than the next doc id (which are strictly positive), so any - # future occurences are guaranteed to be counted. + # future occurrences are guaranteed to be counted. base_accumulator.per_doc_count_dict[token]["count"] += value["count"] return base_accumulator diff --git a/tensorflow/python/keras/layers/preprocessing/text_vectorization.py b/tensorflow/python/keras/layers/preprocessing/text_vectorization.py index a315df00194..4e1e9b4aca5 100644 --- a/tensorflow/python/keras/layers/preprocessing/text_vectorization.py +++ b/tensorflow/python/keras/layers/preprocessing/text_vectorization.py @@ -756,7 +756,7 @@ class _TextVectorizationCombiner(Combiner): # Any newly created token counts in 'base_accumulator''s # per_doc_count_dict will have a last_doc_id of -1. This is always # less than the next doc id (which are strictly positive), so any - # future occurences are guaranteed to be counted. + # future occurrences are guaranteed to be counted. base_accumulator.per_doc_count_dict[token]["count"] += value["count"] return base_accumulator diff --git a/tensorflow/python/ops/image_grad.py b/tensorflow/python/ops/image_grad.py index 3b3bd015f13..23b7e81ec75 100644 --- a/tensorflow/python/ops/image_grad.py +++ b/tensorflow/python/ops/image_grad.py @@ -173,8 +173,8 @@ def _RGBToHSVGrad(op, grad): This function is a piecewise continuous function as defined here: https://en.wikipedia.org/wiki/HSL_and_HSV#From_RGB - We perform the multi variate derivative and compute all partial derivates - seperately before adding them in the end. Formulas are given before each + We perform the multivariate derivative and compute all partial derivatives + separately before adding them in the end. Formulas are given before each partial derivative calculation. Args: diff --git a/tensorflow/python/tpu/client/pip_package/README b/tensorflow/python/tpu/client/pip_package/README index 301365c906e..b8877a506e3 100644 --- a/tensorflow/python/tpu/client/pip_package/README +++ b/tensorflow/python/tpu/client/pip_package/README @@ -1,3 +1,3 @@ -Client responsible for communicating the Cloud TPU API. Released seperately from tensorflow. +Client responsible for communicating the Cloud TPU API. Released separately from tensorflow. https://pypi.org/project/cloud-tpu-client/ \ No newline at end of file diff --git a/tensorflow/python/training/tracking/data_structures.py b/tensorflow/python/training/tracking/data_structures.py index 53f6eacd886..76a920eb122 100644 --- a/tensorflow/python/training/tracking/data_structures.py +++ b/tensorflow/python/training/tracking/data_structures.py @@ -434,8 +434,8 @@ class ListWrapper( @_non_append_mutation.setter def _non_append_mutation(self, value): - # Trackable only cares that a mutation occured at some point; when - # attempting to save it checks whether a mutation occured and the object is + # Trackable only cares that a mutation occurred at some point; when + # attempting to save it checks whether a mutation occurred and the object is # in a "dirty" state but otherwise the specifics of how it got to that state # are ignored. By contrast, the attribute cache needs to signal the mutation # immediately since a caller could query the value of an attribute (And