From bd8e308b4cff8941a7abc0424f29fcb23c0849e9 Mon Sep 17 00:00:00 2001 From: Kazuaki Ishizaki Date: Tue, 10 Dec 2019 15:02:40 +0900 Subject: [PATCH] minor spelling tweaks --- .../converters/conditional_expressions.py | 2 +- .../autograph/converters/logical_expressions.py | 2 +- .../autograph/converters/return_statements.py | 2 +- .../converters/return_statements_test.py | 2 +- tensorflow/python/autograph/core/converter.py | 4 ++-- .../python/autograph/core/function_wrappers.py | 2 +- .../autograph/core/function_wrappers_test.py | 2 +- tensorflow/python/autograph/core/naming.py | 2 +- tensorflow/python/autograph/impl/api.py | 2 +- tensorflow/python/autograph/impl/api_test.py | 2 +- tensorflow/python/autograph/impl/conversion.py | 4 ++-- .../python/autograph/operators/control_flow.py | 4 ++-- .../python/autograph/operators/py_builtins.py | 6 +++--- tensorflow/python/autograph/pyct/cfg.py | 2 +- .../pyct/common_transformers/anf_test.py | 4 ++-- tensorflow/python/autograph/pyct/origin_info.py | 2 +- tensorflow/python/autograph/pyct/parser.py | 2 +- .../autograph/pyct/static_analysis/activity.py | 2 +- .../pyct/static_analysis/reaching_definitions.py | 2 +- tensorflow/python/autograph/pyct/transformer.py | 2 +- tensorflow/python/client/virtual_gpu_test.py | 2 +- tensorflow/python/compat/compat.py | 2 +- tensorflow/python/compat/compat_test.py | 2 +- .../python/compat/disable_v2_behavior_test.py | 2 +- .../tensorrt/test/biasadd_matmul_test.py | 2 +- .../tensorrt/test/dynamic_input_shapes_test.py | 2 +- .../tensorrt/test/identity_output_test.py | 2 +- .../python/compiler/tensorrt/test/int32_test.py | 2 +- .../python/compiler/tensorrt/trt_convert.py | 8 ++++---- .../python/compiler/tensorrt/trt_convert_test.py | 2 +- .../python/data/benchmarks/map_benchmark.py | 2 +- .../python/data/experimental/ops/readers.py | 2 +- .../data/kernel_tests/from_generator_test.py | 2 +- tensorflow/python/data/ops/dataset_ops.py | 6 +++--- tensorflow/python/data/util/sparse.py | 2 +- tensorflow/python/data/util/structure_test.py | 2 +- tensorflow/python/debug/cli/curses_ui.py | 2 +- .../python/debug/cli/debugger_cli_common.py | 2 +- .../python/debug/cli/debugger_cli_common_test.py | 8 ++++---- tensorflow/python/debug/cli/evaluator.py | 2 +- .../debug/cli/profile_analyzer_cli_test.py | 4 ++-- .../python/debug/cli/tensor_format_test.py | 2 +- .../python/debug/lib/check_numerics_callback.py | 2 +- .../debug/lib/check_numerics_callback_test.py | 6 +++--- tensorflow/python/debug/lib/debug_data.py | 2 +- tensorflow/python/debug/lib/debug_gradients.py | 6 +++--- .../debug/lib/debug_graph_reconstruction_test.py | 2 +- tensorflow/python/debug/lib/debug_v2_ops_test.py | 2 +- tensorflow/python/debug/lib/dumping_callback.py | 2 +- .../python/debug/lib/dumping_callback_test.py | 2 +- .../python/debug/lib/session_debug_grpc_test.py | 4 ++-- tensorflow/python/debug/lib/source_remote.py | 2 +- tensorflow/python/debug/lib/source_utils.py | 2 +- tensorflow/python/debug/lib/source_utils_test.py | 2 +- .../python/debug/wrappers/dumping_wrapper.py | 2 +- tensorflow/python/debug/wrappers/framework.py | 4 ++-- .../python/debug/wrappers/local_cli_wrapper.py | 2 +- .../debug/wrappers/local_cli_wrapper_test.py | 2 +- tensorflow/python/distribute/all_reduce.py | 2 +- .../cluster_resolver/slurm_cluster_resolver.py | 4 ++-- .../distribute/collective_all_reduce_strategy.py | 4 ++-- tensorflow/python/distribute/cross_device_ops.py | 2 +- .../python/distribute/cross_device_utils.py | 2 +- tensorflow/python/distribute/device_util.py | 2 +- .../python/distribute/distribute_coordinator.py | 8 ++++---- .../distribute/distribute_coordinator_test.py | 4 ++-- tensorflow/python/distribute/distribute_lib.py | 2 +- .../python/distribute/estimator_training.py | 2 +- tensorflow/python/distribute/input_lib.py | 2 +- .../python/distribute/mirrored_strategy.py | 2 +- .../python/distribute/mirrored_strategy_test.py | 2 +- .../model_collection/model_collection_base.py | 2 +- .../distribute/model_collection/simple_models.py | 2 +- .../distribute/parameter_server_strategy.py | 2 +- .../distribute/parameter_server_strategy_test.py | 4 ++-- tensorflow/python/distribute/reduce_util.py | 2 +- .../python/distribute/saved_model_test_base.py | 2 +- tensorflow/python/distribute/values_test.py | 2 +- tensorflow/python/eager/benchmarks_test.py | 2 +- tensorflow/python/eager/def_function.py | 6 +++--- tensorflow/python/eager/forwardprop_test.py | 2 +- tensorflow/python/eager/forwardprop_util.py | 2 +- tensorflow/python/eager/function.py | 16 ++++++++-------- tensorflow/python/eager/function_test.py | 2 +- tensorflow/python/eager/pywrap_tensor.cc | 8 ++++---- tensorflow/python/eager/pywrap_tfe.h | 4 ++-- tensorflow/python/eager/tape.py | 2 +- tensorflow/python/eager/wrap_function.py | 6 +++--- .../python/feature_column/feature_column_test.py | 6 +++--- .../python/feature_column/feature_column_v2.py | 2 +- .../feature_column/feature_column_v2_test.py | 6 +++--- .../python/feature_column/serialization.py | 2 +- tensorflow/python/framework/config_test.py | 2 +- .../python/framework/convert_to_constants.py | 2 +- tensorflow/python/framework/device_spec.py | 4 ++-- tensorflow/python/framework/func_graph.py | 4 ++-- tensorflow/python/framework/function.py | 2 +- .../python/framework/function_def_to_graph.py | 2 +- tensorflow/python/framework/graph_util_impl.py | 2 +- tensorflow/python/framework/meta_graph.py | 2 +- tensorflow/python/framework/op_callbacks.py | 2 +- tensorflow/python/framework/op_callbacks_test.py | 8 ++++---- .../python/framework/op_def_library_test.py | 2 +- tensorflow/python/framework/ops.py | 10 +++++----- tensorflow/python/framework/random_seed.py | 2 +- tensorflow/python/framework/smart_cond.py | 2 +- tensorflow/python/framework/subscribe.py | 2 +- tensorflow/python/framework/tensor_like.py | 2 +- tensorflow/python/framework/tensor_shape.py | 2 +- tensorflow/python/framework/tensor_shape_test.py | 4 ++-- tensorflow/python/framework/tensor_util.py | 2 +- tensorflow/python/framework/test_combinations.py | 2 +- tensorflow/python/framework/test_util.py | 8 ++++---- tensorflow/python/framework/test_util_test.py | 8 ++++---- tensorflow/python/framework/type_spec.py | 4 ++-- tensorflow/python/grappler/cost_analyzer.cc | 4 ++-- tensorflow/python/grappler/item_test.py | 2 +- tensorflow/python/keras/backend_config_test.py | 2 +- .../distribute/distributed_training_utils.py | 4 ++-- .../distribute/keras_correctness_test_base.py | 6 +++--- .../distribute/multi_worker_callback_tf1_test.py | 2 +- .../multi_worker_fault_tolerance_test.py | 8 ++++---- tensorflow/python/keras/engine/base_layer.py | 2 +- tensorflow/python/keras/engine/data_adapter.py | 4 ++-- tensorflow/python/keras/engine/network_test.py | 4 ++-- tensorflow/python/keras/engine/training.py | 4 ++-- .../python/keras/engine/training_distributed.py | 2 +- tensorflow/python/keras/engine/training_utils.py | 4 ++-- tensorflow/python/keras/engine/training_v1.py | 6 +++--- tensorflow/python/keras/layers/normalization.py | 4 ++-- .../layers/preprocessing/image_preprocessing.py | 2 +- .../layers/preprocessing/text_vectorization.py | 2 +- .../preprocessing/text_vectorization_test.py | 2 +- tensorflow/python/keras/layers/recurrent.py | 2 +- tensorflow/python/keras/layers/recurrent_test.py | 4 ++-- .../keras/layers/rnn_cell_wrapper_v2_test.py | 2 +- .../python/keras/layers/serialization_test.py | 2 +- tensorflow/python/keras/metrics.py | 4 ++-- .../python/keras/optimizer_v2/optimizer_v2.py | 4 ++-- .../keras/optimizer_v2/optimizer_v2_test.py | 2 +- tensorflow/python/keras/premade/wide_deep.py | 2 +- .../python/keras/saving/saved_model/save_impl.py | 2 +- .../keras/saving/saved_model/saved_model_test.py | 2 +- .../saving/saved_model/serialized_attributes.py | 4 ++-- .../python/keras/tests/integration_test.py | 2 +- tensorflow/python/keras/utils/conv_utils.py | 2 +- tensorflow/python/keras/utils/io_utils_test.py | 2 +- tensorflow/python/keras/utils/metrics_utils.py | 2 +- .../kernel_tests/bias_op_deterministic_test.py | 2 +- tensorflow/python/kernel_tests/clip_ops_test.py | 4 ++-- .../python/kernel_tests/critical_section_test.py | 2 +- .../distributions/special_math_test.py | 4 ++-- tensorflow/python/kernel_tests/eig_op_test.py | 2 +- tensorflow/python/kernel_tests/einsum_op_test.py | 2 +- .../kernel_tests/fractional_max_pool_op_test.py | 4 ++-- .../linalg/linear_operator_diag_test.py | 2 +- .../linalg/linear_operator_full_matrix_test.py | 2 +- .../linear_operator_low_rank_update_test.py | 2 +- .../linear_operator_lower_triangular_test.py | 2 +- .../python/kernel_tests/linalg_grad_test.py | 4 ++-- tensorflow/python/kernel_tests/metrics_test.py | 4 ++-- tensorflow/python/kernel_tests/numerics_test.py | 6 +++--- .../python/kernel_tests/pooling_ops_test.py | 4 ++-- tensorflow/python/kernel_tests/py_func_test.py | 2 +- tensorflow/python/kernel_tests/rnn_test.py | 2 +- .../python/kernel_tests/scatter_nd_ops_test.py | 2 +- .../kernel_tests/segment_reduction_ops_test.py | 2 +- .../kernel_tests/self_adjoint_eig_op_test.py | 2 +- tensorflow/python/kernel_tests/svd_op_test.py | 2 +- tensorflow/python/kernel_tests/template_test.py | 2 +- tensorflow/python/kernel_tests/while_v2_test.py | 2 +- tensorflow/python/layers/base_test.py | 2 +- tensorflow/python/layers/utils.py | 4 ++-- tensorflow/python/lib/core/py_func.cc | 2 +- tensorflow/python/lib/core/pybind11_lib.h | 2 +- tensorflow/python/ops/array_grad.py | 4 ++-- tensorflow/python/ops/clip_ops.py | 4 ++-- tensorflow/python/ops/clip_ops_test.py | 6 +++--- tensorflow/python/ops/collective_ops.py | 2 +- tensorflow/python/ops/control_flow_ops.py | 2 +- tensorflow/python/ops/control_flow_util.py | 2 +- tensorflow/python/ops/control_flow_util_v2.py | 2 +- tensorflow/python/ops/control_flow_v2_toggles.py | 2 +- tensorflow/python/ops/ctc_ops.py | 2 +- tensorflow/python/ops/data_flow_ops.py | 2 +- .../python/ops/distributions/bijector_impl.py | 2 +- .../python/ops/distributions/distribution.py | 4 ++-- .../python/ops/distributions/special_math.py | 2 +- tensorflow/python/ops/embedding_ops.py | 4 ++-- tensorflow/python/ops/image_grad.py | 2 +- tensorflow/python/ops/image_ops_impl.py | 2 +- .../ops/linalg/linear_operator_circulant.py | 2 +- .../ops/linalg/linear_operator_test_util.py | 2 +- .../python/ops/linalg/linear_operator_util.py | 2 +- tensorflow/python/ops/linalg_grad.py | 2 +- tensorflow/python/ops/linalg_ops.py | 2 +- tensorflow/python/ops/lookup_ops.py | 4 ++-- tensorflow/python/ops/math_ops.py | 2 +- tensorflow/python/ops/metrics_impl.py | 4 ++-- tensorflow/python/ops/nn_impl.py | 4 ++-- tensorflow/python/ops/nn_ops.py | 4 ++-- tensorflow/python/ops/nn_test.py | 10 +++++----- tensorflow/python/ops/op_selector_test.py | 2 +- tensorflow/python/ops/parallel_for/gradients.py | 2 +- tensorflow/python/ops/parallel_for/pfor.py | 8 ++++---- .../python/ops/ragged/ragged_concat_op_test.py | 2 +- .../python/ops/ragged/ragged_concat_ops.py | 2 +- tensorflow/python/ops/ragged/ragged_config.py | 2 +- .../python/ops/ragged/ragged_conversion_ops.py | 2 +- tensorflow/python/ops/ragged/ragged_getitem.py | 2 +- tensorflow/python/ops/ragged/ragged_tensor.py | 2 +- .../python/ops/ragged/ragged_tensor_shape.py | 4 ++-- .../python/ops/ragged/ragged_tensor_test.py | 2 +- .../ops/ragged/ragged_to_tensor_op_test.py | 4 ++-- tensorflow/python/ops/random_ops.py | 4 ++-- tensorflow/python/ops/rnn_cell_wrapper_impl.py | 4 ++-- tensorflow/python/ops/script_ops.py | 4 ++-- tensorflow/python/ops/special_math_ops_test.py | 2 +- tensorflow/python/ops/stateful_random_ops.py | 2 +- .../python/ops/structured/structured_tensor.py | 2 +- tensorflow/python/ops/summary_ops_v2.py | 2 +- tensorflow/python/ops/template.py | 2 +- tensorflow/python/ops/tensor_array_grad.py | 2 +- tensorflow/python/ops/while_v2.py | 2 +- .../ops/while_v2_indexed_slices_rewriter.py | 8 ++++---- .../python/profiler/internal/flops_registry.py | 4 ++-- tensorflow/python/profiler/model_analyzer.py | 2 +- .../python/profiler/model_analyzer_test.py | 2 +- tensorflow/python/saved_model/builder_impl.py | 2 +- .../saved_model/function_deserialization.py | 6 +++--- tensorflow/python/saved_model/load_test.py | 2 +- .../saved_model/model_utils/export_test.py | 4 ++-- tensorflow/python/saved_model/utils_impl.py | 4 ++-- tensorflow/python/tools/freeze_graph.py | 4 ++-- tensorflow/python/tools/saved_model_cli.py | 2 +- tensorflow/python/tpu/bfloat16_test.py | 2 +- tensorflow/python/tpu/feature_column.py | 2 +- tensorflow/python/tpu/ops/tpu_ops.py | 2 +- tensorflow/python/tpu/session_support.py | 2 +- tensorflow/python/tpu/tensor_tracer.py | 4 ++-- tensorflow/python/tpu/tensor_tracer_flags.py | 2 +- tensorflow/python/tpu/tpu.py | 2 +- tensorflow/python/tpu/tpu_embedding.py | 8 ++++---- tensorflow/python/tpu/tpu_embedding_gradient.py | 4 ++-- tensorflow/python/tpu/tpu_system_metadata.py | 2 +- .../python/training/checkpoint_management.py | 2 +- .../experimental/loss_scale_optimizer.py | 12 ++++++------ .../python/training/monitored_session_test.py | 4 ++-- tensorflow/python/training/optimizer_test.py | 2 +- tensorflow/python/training/saver.py | 2 +- tensorflow/python/training/saver_test.py | 2 +- .../python/training/sync_replicas_optimizer.py | 4 ++-- .../training/sync_replicas_optimizer_test.py | 2 +- .../python/training/tracking/tracking_test.py | 2 +- .../tracking/util_with_v1_optimizers_test.py | 2 +- .../python/util/example_parser_configuration.py | 2 +- tensorflow/python/util/tf_inspect.py | 2 +- tensorflow/python/util/util.h | 2 +- 258 files changed, 392 insertions(+), 392 deletions(-) diff --git a/tensorflow/python/autograph/converters/conditional_expressions.py b/tensorflow/python/autograph/converters/conditional_expressions.py index 125ef5375be..44ab6dee926 100644 --- a/tensorflow/python/autograph/converters/conditional_expressions.py +++ b/tensorflow/python/autograph/converters/conditional_expressions.py @@ -23,7 +23,7 @@ from tensorflow.python.autograph.pyct import templates class ConditionalExpressionTransformer(converter.Base): - """Converts contitional expressions to functional form.""" + """Converts conditional expressions to functional form.""" def visit_IfExp(self, node): return templates.replace_as_expression( diff --git a/tensorflow/python/autograph/converters/logical_expressions.py b/tensorflow/python/autograph/converters/logical_expressions.py index 6f2c0ca029b..615dc21052f 100644 --- a/tensorflow/python/autograph/converters/logical_expressions.py +++ b/tensorflow/python/autograph/converters/logical_expressions.py @@ -24,7 +24,7 @@ from tensorflow.python.autograph.core import converter from tensorflow.python.autograph.pyct import parser from tensorflow.python.autograph.pyct import templates -# TODO(mdan): Properly extrack boolean ops according to lazy eval rules. +# TODO(mdan): Properly extract boolean ops according to lazy eval rules. # Note that this isn't completely safe either, because tensors may have control # dependencies. # Note that for loops that should be done after the loop was converted to diff --git a/tensorflow/python/autograph/converters/return_statements.py b/tensorflow/python/autograph/converters/return_statements.py index 8fa6b3f8308..89f72ce1863 100644 --- a/tensorflow/python/autograph/converters/return_statements.py +++ b/tensorflow/python/autograph/converters/return_statements.py @@ -39,7 +39,7 @@ class _RewriteBlock(object): class ConditionalReturnRewriter(converter.Base): - """Rewrites a a pattern where it's unbovious that all paths return a value. + """Rewrites a a pattern where it's unobvious that all paths return a value. This rewrite allows avoiding intermediate None return values. diff --git a/tensorflow/python/autograph/converters/return_statements_test.py b/tensorflow/python/autograph/converters/return_statements_test.py index 1b8e82ff7bd..df687927638 100644 --- a/tensorflow/python/autograph/converters/return_statements_test.py +++ b/tensorflow/python/autograph/converters/return_statements_test.py @@ -67,7 +67,7 @@ class SingleReturnTest(converter_testing.TestCase): self.assertTransformedEquivalent(test_fn, 2) self.assertTransformedEquivalent(test_fn, -2) - def test_contitional_missing_else(self): + def test_conditional_missing_else(self): def test_fn(x): if x > 0: diff --git a/tensorflow/python/autograph/core/converter.py b/tensorflow/python/autograph/core/converter.py index e286e38d855..bc79d5fe506 100644 --- a/tensorflow/python/autograph/core/converter.py +++ b/tensorflow/python/autograph/core/converter.py @@ -23,10 +23,10 @@ The class hierarchy is as follows: [extends] converter.Base [extends] transformer.Base [extends] gast.nodeTransformer - [uses] transfomer.SourceInfo + [uses] transformer.SourceInfo [uses] converter.EntityContext [uses] converter.ProgramContext - [uses] transfomer.SourceInfo + [uses] transformer.SourceInfo converter.Base is a specialization of transformer.Base for AutoGraph. It's a very lightweight subclass that adds a `ctx` attribute holding the corresponding diff --git a/tensorflow/python/autograph/core/function_wrappers.py b/tensorflow/python/autograph/core/function_wrappers.py index 55b1071b029..cc0e7b98de5 100644 --- a/tensorflow/python/autograph/core/function_wrappers.py +++ b/tensorflow/python/autograph/core/function_wrappers.py @@ -34,7 +34,7 @@ class FunctionScope(object): * optional TF name scopes - these name scopes match the name of the function, for easy visualization in tensorBoard; * optional automatic control dependencies - this adds the same mechanism - for control dependenecies that is used by `@tf.function`; it can be + for control dependencies that is used by `@tf.function`; it can be optionally enabled when using `tf.autograph.to_graph`; * tracking of autograph conversion state (whether it's enabled by the user, conversion options; diff --git a/tensorflow/python/autograph/core/function_wrappers_test.py b/tensorflow/python/autograph/core/function_wrappers_test.py index 01918007bbd..917a5358633 100644 --- a/tensorflow/python/autograph/core/function_wrappers_test.py +++ b/tensorflow/python/autograph/core/function_wrappers_test.py @@ -39,7 +39,7 @@ class FunctionWrappersTest(test.TestCase): t = constant_op.constant(1) self.assertIn('test_name', t.name) - def test_auto_cotrol_deps(self): + def test_auto_control_deps(self): v = variables.Variable(1) with function_wrappers.FunctionScope( '_', None, diff --git a/tensorflow/python/autograph/core/naming.py b/tensorflow/python/autograph/core/naming.py index aa23779dfb5..67a565a9270 100644 --- a/tensorflow/python/autograph/core/naming.py +++ b/tensorflow/python/autograph/core/naming.py @@ -30,7 +30,7 @@ class _NamingStyle(enum.Enum): class Namer(object): - """Symbol name generartor.""" + """Symbol name generator.""" def __init__(self, global_namespace): self.global_namespace = global_namespace diff --git a/tensorflow/python/autograph/impl/api.py b/tensorflow/python/autograph/impl/api.py index adbdf147653..616a74e4f2a 100644 --- a/tensorflow/python/autograph/impl/api.py +++ b/tensorflow/python/autograph/impl/api.py @@ -192,7 +192,7 @@ def tf_convert(f, ctx, convert_by_default=True, user_requested=False): # TODO(mdan): Grab features from context. # Note: we pass the original context through to convert to properly handle the - # following scenario, which can be used insite TF implementations: + # following scenario, which can be used inside TF implementations: # # ctx = ag_ctx.control_status_ctx() # @function(autograph=False) # Low-level graph code diff --git a/tensorflow/python/autograph/impl/api_test.py b/tensorflow/python/autograph/impl/api_test.py index 8586df3012d..4365edaaa8e 100644 --- a/tensorflow/python/autograph/impl/api_test.py +++ b/tensorflow/python/autograph/impl/api_test.py @@ -1021,7 +1021,7 @@ class ApiTest(test.TestCase): ag_ctx.Status.ENABLED) return 0 - # Note: the autograph=False sets the contect to Status.DISABLED. The test + # Note: the autograph=False sets the connect to Status.DISABLED. The test # verifies that to_graph overrides that. @def_function.function(autograph=False) def f(): diff --git a/tensorflow/python/autograph/impl/conversion.py b/tensorflow/python/autograph/impl/conversion.py index 24ed97468d8..9e222c30335 100644 --- a/tensorflow/python/autograph/impl/conversion.py +++ b/tensorflow/python/autograph/impl/conversion.py @@ -167,7 +167,7 @@ class _UnboundInstanceCache(_FunctionCache): # Using a re-entrant lock to guard against the unlikely possibility that the -# conversion process tiggers additional code execution. +# conversion process triggers additional code execution. _CACHE_LOCK = threading.RLock() @@ -253,7 +253,7 @@ def _wrap_into_dynamic_factory(nodes, entity_name, factory_factory_name, def _convert_with_cache(entity, program_ctx, free_nonglobal_var_names): """Returns a (possibly cached) factory for the converted result of entity.""" - # The cache subkey encompases any conversion options on which the generated + # The cache subkey encompasses any conversion options on which the generated # code may depend. # The cached factory includes the necessary definitions to distinguish # between the global and non-global free variables. For this reason, the diff --git a/tensorflow/python/autograph/operators/control_flow.py b/tensorflow/python/autograph/operators/control_flow.py index c75356eaa3f..15cf53de8aa 100644 --- a/tensorflow/python/autograph/operators/control_flow.py +++ b/tensorflow/python/autograph/operators/control_flow.py @@ -773,7 +773,7 @@ class _PythonLoopChecker(object): self.check_op_count_after_iteration = False self.ops_before_iteration = None - def _verify_ineffcient_unroll(self): + def _verify_inefficient_unroll(self): """Checks for possibly-inefficient creation of ops in a Python loop.""" assert self.ops_before_iteration is not None ops_after_iteration = self._get_ops() @@ -810,7 +810,7 @@ class _PythonLoopChecker(object): self._check_unroll_limits() if self.check_op_count_after_iteration: - did_warn = self._verify_ineffcient_unroll() + did_warn = self._verify_inefficient_unroll() if did_warn: self._stop_checking_inefficient_unroll() # Only warn once. elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3: diff --git a/tensorflow/python/autograph/operators/py_builtins.py b/tensorflow/python/autograph/operators/py_builtins.py index 20565f28277..c6ae65ff412 100644 --- a/tensorflow/python/autograph/operators/py_builtins.py +++ b/tensorflow/python/autograph/operators/py_builtins.py @@ -54,7 +54,7 @@ UNSPECIFIED = object() def overload_of(f): if f in SUPPORTED_BUILTINS: - return BUILTIN_FUINCTIONS_MAP[f.__name__] + return BUILTIN_FUNCTIONS_MAP[f.__name__] return f @@ -441,7 +441,7 @@ def all_(iterable): return _py_all(iterable) -# all() operation is similiar to any() and could be translated +# all() operation is similar to any() and could be translated # to `filter(False)` then `take(1)`, and check if `False` exists. def _tf_dataset_all(iterable): # check and make sure iterable.element_spec only consists of one @@ -467,7 +467,7 @@ SUPPORTED_BUILTINS = (abs, float, int, len, print, range, enumerate, zip, map, if six.PY2: SUPPORTED_BUILTINS += (xrange,) -BUILTIN_FUINCTIONS_MAP = { +BUILTIN_FUNCTIONS_MAP = { 'abs': abs_, 'float': float_, 'int': int_, diff --git a/tensorflow/python/autograph/pyct/cfg.py b/tensorflow/python/autograph/pyct/cfg.py index ec67d9b23bd..71145802ed9 100644 --- a/tensorflow/python/autograph/pyct/cfg.py +++ b/tensorflow/python/autograph/pyct/cfg.py @@ -248,7 +248,7 @@ class GraphBuilder(object): This builder ignores the flow generated by exceptions, which are assumed to always be catastrophic and present purely for diagnostic purposes (e.g. to print debug information). Statements like raise and try/catch sections are - allowed and will generate control flow edges, but ordinaty statements are + allowed and will generate control flow edges, but ordinary statements are assumed not to raise exceptions. Finally sections are also correctly interleaved between break/continue/return diff --git a/tensorflow/python/autograph/pyct/common_transformers/anf_test.py b/tensorflow/python/autograph/pyct/common_transformers/anf_test.py index a8bf0e6fe05..80715f115be 100644 --- a/tensorflow/python/autograph/pyct/common_transformers/anf_test.py +++ b/tensorflow/python/autograph/pyct/common_transformers/anf_test.py @@ -47,7 +47,7 @@ class DummyGensym(object): # These two test functions have to be top-level, not nested, for compatibility # with some unknown version of Python 2.7 preceding 2.7.15. Why? Because -# `exec` and nested function definitions _incomaptibly_ change the +# `exec` and nested function definitions _incompatibly_ change the # representation of local variables, such that `exec` inside a nested function # definition is a syntax error in that version. The tuple form of `exec` fixes # this problem, but apparently that was introduced in some unknown version of @@ -465,7 +465,7 @@ class AnfNonTransformationTest(AnfTransformerTest): node, _ = parser.parse_entity(test_fn, future_features=()) orig_source = parser.unparse(node, indentation=' ') orig_str = textwrap.dedent(orig_source).strip() - config = [(anf.ANY, anf.LEAVE)] # Configuration to trasform nothing + config = [(anf.ANY, anf.LEAVE)] # Configuration to transform nothing node = anf.transform( node, self._simple_context(), config=config, gensym_source=DummyGensym) diff --git a/tensorflow/python/autograph/pyct/origin_info.py b/tensorflow/python/autograph/pyct/origin_info.py index 3501754d729..32f0462cb9a 100644 --- a/tensorflow/python/autograph/pyct/origin_info.py +++ b/tensorflow/python/autograph/pyct/origin_info.py @@ -252,7 +252,7 @@ def resolve(node, source, context_filepath, context_lineno, context_col_offset): def resolve_entity(node, source, entity): - """Like resolve, but extracts the context informartion from an entity.""" + """Like resolve, but extracts the context information from an entity.""" lines, lineno = tf_inspect.getsourcelines(entity) filepath = tf_inspect.getsourcefile(entity) diff --git a/tensorflow/python/autograph/pyct/parser.py b/tensorflow/python/autograph/pyct/parser.py index 88b0e163929..747d56e401d 100644 --- a/tensorflow/python/autograph/pyct/parser.py +++ b/tensorflow/python/autograph/pyct/parser.py @@ -91,7 +91,7 @@ def dedent_block(code_string): # See: # https://docs.python.org/3/reference/lexical_analysis.html#indentation raise errors.UnsupportedLanguageElementError( - 'code mixing tabs and spaces for intentation is not allowed') + 'code mixing tabs and spaces for indentation is not allowed') if len(tok_string) >= block_level: tok_string = tok_string[block_level:] tokens[i] = (tok_type, tok_string) diff --git a/tensorflow/python/autograph/pyct/static_analysis/activity.py b/tensorflow/python/autograph/pyct/static_analysis/activity.py index 8614dcacfea..274fb40fbec 100644 --- a/tensorflow/python/autograph/pyct/static_analysis/activity.py +++ b/tensorflow/python/autograph/pyct/static_analysis/activity.py @@ -44,7 +44,7 @@ class Scope(object): Scope objects are mutable during construction only, and must be frozen using `Scope.finalize()` before use. Furthermore, a scope is consistent only after - all its chiledren have been frozen. While analysing code blocks, scopes are + all its children have been frozen. While analysing code blocks, scopes are being gradually built, from the innermost scope outward. Freezing indicates that the analysis of a code block is complete. Once frozen, mutation is no longer allowed. `is_final` tracks whether the scope is frozen or not. Certain diff --git a/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py b/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py index 1c502a1d99a..dda132c9114 100644 --- a/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py +++ b/tensorflow/python/autograph/pyct/static_analysis/reaching_definitions.py @@ -234,7 +234,7 @@ class TreeAnnotator(transformer.Base): # Recursively process any remaining subfunctions. self.current_analyzer = analyzer # Note: not visiting name, decorator_list and returns because they don't - # apply to this anlysis. + # apply to this analysis. # TODO(mdan): Should we still process the function name? node.args = self.visit(node.args) node.body = self.visit_block(node.body) diff --git a/tensorflow/python/autograph/pyct/transformer.py b/tensorflow/python/autograph/pyct/transformer.py index ffd881a0a34..d8b8b6e7168 100644 --- a/tensorflow/python/autograph/pyct/transformer.py +++ b/tensorflow/python/autograph/pyct/transformer.py @@ -253,7 +253,7 @@ class Base(gast.NodeTransformer): self.enter_local_scope() # Allows scoping of local variables to keep state across calls to visit_* - # methods. Multiple scope hierchies may exist and are keyed by tag. A scope + # methods. Multiple scope hierarchies may exist and are keyed by tag. A scope # is valid at one or more nodes and all its children. Scopes created in # child nodes supersede their parent. Scopes are isolated from one another. self.state = _State() diff --git a/tensorflow/python/client/virtual_gpu_test.py b/tensorflow/python/client/virtual_gpu_test.py index f6dee3bfd8e..f6591f977ad 100644 --- a/tensorflow/python/client/virtual_gpu_test.py +++ b/tensorflow/python/client/virtual_gpu_test.py @@ -97,7 +97,7 @@ class VirtualGpuTestUtil(object): # Generates a list of 3-tuples, each tuple contains the source and destination # device index for a binary operation like 'add', like: - # (src_devcie_1, src_device_2, dst_device) + # (src_device_1, src_device_2, dst_device) def _GenerateOperationPlacement(self): result = [] for unused_i in range(self._num_ops): diff --git a/tensorflow/python/compat/compat.py b/tensorflow/python/compat/compat.py index 5edea94a4e4..0dea53423b0 100644 --- a/tensorflow/python/compat/compat.py +++ b/tensorflow/python/compat/compat.py @@ -96,7 +96,7 @@ def forward_compatible(year, month, day): if compat.forward_compatible(year, month, day): # Can use the awesome new implementation. return gen_math_ops.my_new_awesome_add(inputs, name) - # To maintain forward compatibiltiy, use the old implementation. + # To maintain forward compatibility, use the old implementation. return gen_math_ops.add(inputs, name) ``` diff --git a/tensorflow/python/compat/compat_test.py b/tensorflow/python/compat/compat_test.py index 72bf27c14a5..3d06649ede8 100644 --- a/tensorflow/python/compat/compat_test.py +++ b/tensorflow/python/compat/compat_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for forward and backwards compatibility utilties.""" +"""Tests for forward and backwards compatibility utilities.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/compat/disable_v2_behavior_test.py b/tensorflow/python/compat/disable_v2_behavior_test.py index c247eac395e..4b955d3f46d 100644 --- a/tensorflow/python/compat/disable_v2_behavior_test.py +++ b/tensorflow/python/compat/disable_v2_behavior_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for forward and backwards compatibility utilties.""" +"""Tests for forward and backwards compatibility utilities.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py b/tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py index 5e35711fb2a..933232c37ce 100644 --- a/tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py +++ b/tensorflow/python/compiler/tensorrt/test/biasadd_matmul_test.py @@ -114,7 +114,7 @@ class BiasaddMatMulTest(trt_test.TfTrtIntegrationTestBase): run_params=run_params, conversion_params=conversion_params, # Disable layout optimizer, since it will convert BiasAdd with NHWC - # format to NCHW format under four dimentional input. + # format to NCHW format under four dimensional input. disable_non_trt_optimizers=True) return conversion_params._replace( rewriter_config_template=rewrite_config_with_trt) diff --git a/tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py b/tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py index 2a291a62c37..9af3cf3a779 100644 --- a/tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py +++ b/tensorflow/python/compiler/tensorrt/test/dynamic_input_shapes_test.py @@ -89,7 +89,7 @@ class DynamicInputShapesTest(trt_test.TfTrtIntegrationTestBase): run_params=run_params, conversion_params=conversion_params, # Disable layout optimizer, since it will convert BiasAdd with NHWC - # format to NCHW format under four dimentional input. + # format to NCHW format under four dimensional input. disable_non_trt_optimizers=True) return conversion_params._replace( rewriter_config_template=rewrite_config_with_trt) diff --git a/tensorflow/python/compiler/tensorrt/test/identity_output_test.py b/tensorflow/python/compiler/tensorrt/test/identity_output_test.py index 80aced35035..8cf1c1ef268 100644 --- a/tensorflow/python/compiler/tensorrt/test/identity_output_test.py +++ b/tensorflow/python/compiler/tensorrt/test/identity_output_test.py @@ -14,7 +14,7 @@ # ============================================================================== """This test checks a situation where the same tensor is considered as an output -multiple times because it has been duplicated by 2+ indentity ops. Previously, +multiple times because it has been duplicated by 2+ identity ops. Previously, the tensor would be renamed multiple times, overwriting the output binding name which resulted in a runtime error when the binding would not be found. """ diff --git a/tensorflow/python/compiler/tensorrt/test/int32_test.py b/tensorflow/python/compiler/tensorrt/test/int32_test.py index ba50f0c81fa..81a3b01828d 100644 --- a/tensorflow/python/compiler/tensorrt/test/int32_test.py +++ b/tensorflow/python/compiler/tensorrt/test/int32_test.py @@ -55,7 +55,7 @@ class ExcludeUnsupportedInt32Test(trt_test.TfTrtIntegrationTestBase): run_params=run_params, conversion_params=conversion_params, # Disable layout optimizer, since it will convert BiasAdd with NHWC - # format to NCHW format under four dimentional input. + # format to NCHW format under four dimensional input. disable_non_trt_optimizers=True) return conversion_params._replace( rewriter_config_template=rewrite_config_with_trt) diff --git a/tensorflow/python/compiler/tensorrt/trt_convert.py b/tensorflow/python/compiler/tensorrt/trt_convert.py index f50446f1289..2ea22ebba49 100644 --- a/tensorflow/python/compiler/tensorrt/trt_convert.py +++ b/tensorflow/python/compiler/tensorrt/trt_convert.py @@ -145,7 +145,7 @@ class TrtConversionParams(collections.namedtuple("TrtConversionParams", [ missing ranges. The calibration graph must be converted to an inference graph by running calibration with calibrate(). If set to False, quantization nodes will be expected for every tensor in the graph - (exlcuding those which will be fused). If a range is missing, an error + (excluding those which will be fused). If a range is missing, an error will occur. Please note that accuracy may be negatively affected if there is a mismatch between which tensors TRT quantizes and which tensors were trained with fake quantization. @@ -207,7 +207,7 @@ def _check_conversion_params(conversion_params, is_v2=False): "Found more than one TensorRTOptimizer in " "rewriter_config_template while only one is allowed.") trt_optimizer = optimizer - # If rewriter_config_template is set, it should inculde TensorRTOptimizer. + # If rewriter_config_template is set, it should include TensorRTOptimizer. # It is possible to remove this requirement if needed. if not trt_optimizer: raise ValueError( @@ -327,7 +327,7 @@ def get_tensorrt_rewriter_config(conversion_params, rewriter_config_with_trt.CopyFrom( conversion_params.rewriter_config_template) - # Disabling optimizers should happen after CopyFrom the temaplte + # Disabling optimizers should happen after CopyFrom the template # otherwise the template can overwrite the disablement. if disable_non_trt_optimizers: off = rewriter_config_pb2.RewriterConfig.OFF @@ -443,7 +443,7 @@ class TrtGraphConverter(object): missing ranges. The calibration graph must be converted to an inference graph by running calibration with calibrate(). If set to False, quantization nodes will be expected for every tensor in the graph - (exlcuding those which will be fused). If a range is missing, an error + (excluding those which will be fused). If a range is missing, an error will occur. Please note that accuracy may be negatively affected if there is a mismatch between which tensors TRT quantizes and which tensors were trained with fake quantization. diff --git a/tensorflow/python/compiler/tensorrt/trt_convert_test.py b/tensorflow/python/compiler/tensorrt/trt_convert_test.py index f168ee2f3be..8d64d5e3ba9 100644 --- a/tensorflow/python/compiler/tensorrt/trt_convert_test.py +++ b/tensorflow/python/compiler/tensorrt/trt_convert_test.py @@ -500,7 +500,7 @@ class TrtConvertTest(test_util.TensorFlowTestCase, parameterized.TestCase): # Load and verify the converted model. # # TODO(laigd): the name of the new input_signature of the - # `root_with_trt.run` function is empty string (originaly was None), + # `root_with_trt.run` function is empty string (originally was None), # investigate why. root_with_trt = load.load(output_saved_model_dir) # TODO(laigd): `root_with_trt.run` is still using the original graph without diff --git a/tensorflow/python/data/benchmarks/map_benchmark.py b/tensorflow/python/data/benchmarks/map_benchmark.py index 04cf93b48f3..6638842bd56 100644 --- a/tensorflow/python/data/benchmarks/map_benchmark.py +++ b/tensorflow/python/data/benchmarks/map_benchmark.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Bechmarks for `tf.data.Dataset.map()`.""" +"""Benchmarks for `tf.data.Dataset.map()`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function diff --git a/tensorflow/python/data/experimental/ops/readers.py b/tensorflow/python/data/experimental/ops/readers.py index be81a835d11..8795a206bb1 100644 --- a/tensorflow/python/data/experimental/ops/readers.py +++ b/tensorflow/python/data/experimental/ops/readers.py @@ -440,7 +440,7 @@ def make_csv_dataset_v2( if compression_type is not None: compression_type_value = tensor_util.constant_value(compression_type) if compression_type_value is None: - raise ValueError("Received unkown compression_type") + raise ValueError("Received unknown compression_type") if compression_type_value == "GZIP": file_io_fn = lambda filename: gzip.open(filename, "rt") elif compression_type_value == "ZLIB": diff --git a/tensorflow/python/data/kernel_tests/from_generator_test.py b/tensorflow/python/data/kernel_tests/from_generator_test.py index 49753babacb..d320b281136 100644 --- a/tensorflow/python/data/kernel_tests/from_generator_test.py +++ b/tensorflow/python/data/kernel_tests/from_generator_test.py @@ -122,7 +122,7 @@ class FromGeneratorTest(test_base.DatasetTestBase, parameterized.TestCase): # The interleave transformation is essentially a flat map that # draws from multiple input datasets concurrently (in a cyclic - # fashion). By placing `Datsaet.from_generator()` inside an + # fashion). By placing `Dataset.from_generator()` inside an # interleave, we test its behavior when multiple iterators are # active at the same time; by additionally prefetching inside the # interleave, we create the possibility of parallel (modulo GIL) diff --git a/tensorflow/python/data/ops/dataset_ops.py b/tensorflow/python/data/ops/dataset_ops.py index f796556202e..febc8da4512 100644 --- a/tensorflow/python/data/ops/dataset_ops.py +++ b/tensorflow/python/data/ops/dataset_ops.py @@ -2526,7 +2526,7 @@ def get_structure(dataset_or_iterator): Returns: A nested structure of `tf.TypeSpec` objects matching the structure of an - element of `dataset_or_iterator` and spacifying the type of individal + element of `dataset_or_iterator` and specifying the type of individual components. Raises: @@ -3013,7 +3013,7 @@ class StructuredFunctionWrapper(object): use_legacy_function: (Optional.) A boolean that determines whether the function be created using `tensorflow.python.eager.function.defun` (default behavior) or `tensorflow.python.framework.function.Defun` - (legacy beheavior). + (legacy behavior). defun_kwargs: (Optional.) A dictionary mapping string argument names to values. If supplied, will be passed to `function` as keyword arguments. @@ -3049,7 +3049,7 @@ class StructuredFunctionWrapper(object): # There is no graph to add in eager mode. add_to_graph &= not context.executing_eagerly() # There are some lifetime issues when a legacy function is not added to a - # out-living graph. It's already deprecated so de-priotizing the fix. + # out-living graph. It's already deprecated so de-prioritizing the fix. add_to_graph |= use_legacy_function if defun_kwargs is None: diff --git a/tensorflow/python/data/util/sparse.py b/tensorflow/python/data/util/sparse.py index f2e22fefd31..d7e516e24f9 100644 --- a/tensorflow/python/data/util/sparse.py +++ b/tensorflow/python/data/util/sparse.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Python dataset sparse tensor utility functitons.""" +"""Python dataset sparse tensor utility functions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function diff --git a/tensorflow/python/data/util/structure_test.py b/tensorflow/python/data/util/structure_test.py index 5b5fd2553bc..c4f9af69eb4 100644 --- a/tensorflow/python/data/util/structure_test.py +++ b/tensorflow/python/data/util/structure_test.py @@ -398,7 +398,7 @@ class StructureTest(test_base.DatasetTestBase, parameterized.TestCase, # Define three mutually incompatible values/structures, and assert that: # 1. Using one structure to flatten a value with an incompatible structure # fails. - # 2. Using one structure to restructre a flattened value with an + # 2. Using one structure to restructure a flattened value with an # incompatible structure fails. value_tensor = constant_op.constant(42.0) s_tensor = structure.type_spec_from_value(value_tensor) diff --git a/tensorflow/python/debug/cli/curses_ui.py b/tensorflow/python/debug/cli/curses_ui.py index 7b87972d694..87885450058 100644 --- a/tensorflow/python/debug/cli/curses_ui.py +++ b/tensorflow/python/debug/cli/curses_ui.py @@ -1214,7 +1214,7 @@ class CursesUI(base_ui.BaseUI): """Pad the whitespace at the end of a line with the default color pair. Prevents spurious color pairs from appearing at the end of the lines in - certain text terimnals. + certain text terminals. Args: pad: The curses pad object to operate on. diff --git a/tensorflow/python/debug/cli/debugger_cli_common.py b/tensorflow/python/debug/cli/debugger_cli_common.py index d3d20024a7b..9b9955300fc 100644 --- a/tensorflow/python/debug/cli/debugger_cli_common.py +++ b/tensorflow/python/debug/cli/debugger_cli_common.py @@ -115,7 +115,7 @@ def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None): Args: rich_text_list: a list of RichLine objects or strings - annotations: annotatoins for the resultant RichTextLines object. + annotations: annotations for the resultant RichTextLines object. Returns: A corresponding RichTextLines object. diff --git a/tensorflow/python/debug/cli/debugger_cli_common_test.py b/tensorflow/python/debug/cli/debugger_cli_common_test.py index 774f49f65d1..eb46a0a4062 100644 --- a/tensorflow/python/debug/cli/debugger_cli_common_test.py +++ b/tensorflow/python/debug/cli/debugger_cli_common_test.py @@ -476,7 +476,7 @@ class CommandHandlerRegistryTest(test_util.TensorFlowTestCase): help_lines = registry.get_help().lines # The help info should list commands in alphabetically sorted order, - # regardless of order in which the commands are reigstered. + # regardless of order in which the commands are registered. self.assertEqual("cols", help_lines[0]) self.assertTrue(help_lines[1].endswith("Aliases: c")) self.assertFalse(help_lines[2]) @@ -790,7 +790,7 @@ class SliceRichTextLinesTest(test_util.TensorFlowTestCase): self.assertEqual(["Roses are red"], sliced.lines) self.assertEqual({0: [(0, 5, "red")]}, sliced.font_attr_segs) - # Non-line-number metadata should be preseved. + # Non-line-number metadata should be preserved. self.assertEqual({ 0: "longer wavelength", "foo_metadata": "bar" @@ -1024,7 +1024,7 @@ class CommandHistoryTest(test_util.TensorFlowTestCase): self.assertEqual( ["help 2\n", "help 3\n", "help 4\n"], f.readlines()) - def testCommandHistoryHandlesReadingIOErrorGracoiusly(self): + def testCommandHistoryHandlesReadingIOErrorGraciously(self): with open(self._history_file_path, "wt") as f: f.write("help\n") @@ -1037,7 +1037,7 @@ class CommandHistoryTest(test_util.TensorFlowTestCase): self._restoreFileReadWritePermissions(self._history_file_path) - def testCommandHistoryHandlesWritingIOErrorGracoiusly(self): + def testCommandHistoryHandlesWritingIOErrorGraciously(self): with open(self._history_file_path, "wt") as f: f.write("help\n") diff --git a/tensorflow/python/debug/cli/evaluator.py b/tensorflow/python/debug/cli/evaluator.py index e474332f74c..3e5e0deea04 100644 --- a/tensorflow/python/debug/cli/evaluator.py +++ b/tensorflow/python/debug/cli/evaluator.py @@ -55,7 +55,7 @@ def _parse_debug_tensor_name(debug_tensor_name): `None`. node_name: Name of the node. output_slot: Output slot index as an `int`. - debug_op: If the debug op suffix exists, the debug op name; otheriwse, + debug_op: If the debug op suffix exists, the debug op name; otherwise, `None`. exec_index: Execution index (applicable to cases in which a debug tensor is computed multiple times in a `tf.Session.run` call, e.g., due to diff --git a/tensorflow/python/debug/cli/profile_analyzer_cli_test.py b/tensorflow/python/debug/cli/profile_analyzer_cli_test.py index d6d2b58b5f8..8f159a0a740 100644 --- a/tensorflow/python/debug/cli/profile_analyzer_cli_test.py +++ b/tensorflow/python/debug/cli/profile_analyzer_cli_test.py @@ -273,11 +273,11 @@ class ProfileAnalyzerListProfileTest(test_util.TensorFlowTestCase): prof_output = prof_analyzer.list_profile(["-f", ".*file2"]).lines _assert_at_least_one_line_matches(r"Add/123", prof_output) _assert_no_lines_match(r"Mul/456", prof_output) - # Fitler by execution time. + # Filter by execution time. prof_output = prof_analyzer.list_profile(["-e", "[5, 10]"]).lines _assert_at_least_one_line_matches(r"Mul/456", prof_output) _assert_no_lines_match(r"Add/123", prof_output) - # Fitler by op time. + # Filter by op time. prof_output = prof_analyzer.list_profile(["-o", ">=2"]).lines _assert_at_least_one_line_matches(r"Add/123", prof_output) _assert_no_lines_match(r"Mul/456", prof_output) diff --git a/tensorflow/python/debug/cli/tensor_format_test.py b/tensorflow/python/debug/cli/tensor_format_test.py index 18ddbb6437c..804b6c0143d 100644 --- a/tensorflow/python/debug/cli/tensor_format_test.py +++ b/tensorflow/python/debug/cli/tensor_format_test.py @@ -78,7 +78,7 @@ class RichTextLinesTest(test_util.TensorFlowTestCase): def _checkTensorElementLocations(self, out, a): """Check the results of locate_tensor_element on an ndarray representation. - that represents a numpy.ndaray. + that represents a numpy.ndarray. Args: out: An instance of RichTextLines representing a numpy.ndarray. diff --git a/tensorflow/python/debug/lib/check_numerics_callback.py b/tensorflow/python/debug/lib/check_numerics_callback.py index 4b48dd6c874..fa9a16dd8ab 100644 --- a/tensorflow/python/debug/lib/check_numerics_callback.py +++ b/tensorflow/python/debug/lib/check_numerics_callback.py @@ -426,7 +426,7 @@ def disable_check_numerics(): """Disable the eager/graph unified numerics checking mechanism. This method can be used after a call to `tf.debugging.enable_check_numerics()` - to disable the numerics-checking mechanism that catches inifnity and NaN + to disable the numerics-checking mechanism that catches infinity and NaN values output by ops executed eagerly or in tf.function-compiled graphs. This method is idempotent. Calling it multiple times has the same effect diff --git a/tensorflow/python/debug/lib/check_numerics_callback_test.py b/tensorflow/python/debug/lib/check_numerics_callback_test.py index bf1d3127e40..2c655e864a3 100644 --- a/tensorflow/python/debug/lib/check_numerics_callback_test.py +++ b/tensorflow/python/debug/lib/check_numerics_callback_test.py @@ -430,7 +430,7 @@ class CheckNumericsCallbackUnhealthyTest(test_util.TensorFlowTestCase): self.assertIn("one_over_x = 1.0 / x", message) @test_util.run_in_graph_and_eager_modes - def testInfInCustomKerasLayerWithoutTfFuntionPredictCall(self): + def testInfInCustomKerasLayerWithoutTfFunctionPredictCall(self): """Test catching Infinity in a custom layer, w/o tf.function.""" check_numerics_callback.enable_check_numerics() @@ -483,7 +483,7 @@ class CheckNumericsCallbackUnhealthyTest(test_util.TensorFlowTestCase): check_numerics_callback.enable_check_numerics() def generate_nan(x): - """Intetionally generates NaNs by taking log of negative number.""" + """Intentionally generates NaNs by taking log of negative number.""" casted_x = math_ops.cast(x, dtypes.float32) return math_ops.log([[-1.0, 1.0], [3.0, 5.0]]) + casted_x @@ -503,7 +503,7 @@ class CheckNumericsCallbackUnhealthyTest(test_util.TensorFlowTestCase): message) @test_util.run_in_graph_and_eager_modes - def testCustomGradietWithNaNWithTfFunction(self): + def testCustomGradientWithNaNWithTfFunction(self): """Test that callback catches NaN in a gradient function during backprop.""" check_numerics_callback.enable_check_numerics() diff --git a/tensorflow/python/debug/lib/debug_data.py b/tensorflow/python/debug/lib/debug_data.py index ceabd2e86d4..0178f1f5c9e 100644 --- a/tensorflow/python/debug/lib/debug_data.py +++ b/tensorflow/python/debug/lib/debug_data.py @@ -296,7 +296,7 @@ class DebugTensorDatum(object): directory is `/tmp/tfdbg_1` and the dump file is at `/tmp/tfdbg_1//>ns_1/node_a_0_DebugIdentity_123456789`, then the value of the debug_dump_rel_path should be - `/ns_1/node_a_0_DebugIdenity_1234456789`. + `/ns_1/node_a_0_DebugIdentity_1234456789`. Raises: ValueError: If the base file name of the dump file does not conform to diff --git a/tensorflow/python/debug/lib/debug_gradients.py b/tensorflow/python/debug/lib/debug_gradients.py index 2108cbd9f8c..e7c17eb54b8 100644 --- a/tensorflow/python/debug/lib/debug_gradients.py +++ b/tensorflow/python/debug/lib/debug_gradients.py @@ -116,7 +116,7 @@ class GradientsDebugger(object): The side effect of this method is that when gradient tensor(s) are created with respect to the any paths that include the `input_tensor`, the gradient - tensor(s) with repsect to `input_tensor` will be registered with this + tensor(s) with respect to `input_tensor` will be registered with this this `GradientsDebugger` instance and can later be retrieved, with the methods `gradient_tensor` and `gradient_tensors`. @@ -141,7 +141,7 @@ class GradientsDebugger(object): Args: input_tensor: the input `tf.Tensor` object whose related gradient tensors - are to be reigstered with this `GradientsDebugger` instance when they + are to be registered with this `GradientsDebugger` instance when they are created, e.g., during `tf.gradients` calls or the construction of optimization (training) op that uses `tf.gradients`. @@ -173,7 +173,7 @@ class GradientsDebugger(object): The side effect of this method is that when gradient tensor(s) are created with respect to the any paths that include the `x_tensor`s, the gradient - tensor(s) with repsect to the tensor will be registered with this + tensor(s) with respect to the tensor will be registered with this this `GradientsDebugger` instance and can later be retrieved, with the methods `gradient_tensor` and `gradient_tensors`. diff --git a/tensorflow/python/debug/lib/debug_graph_reconstruction_test.py b/tensorflow/python/debug/lib/debug_graph_reconstruction_test.py index 85d52503b7f..fb722efab4e 100644 --- a/tensorflow/python/debug/lib/debug_graph_reconstruction_test.py +++ b/tensorflow/python/debug/lib/debug_graph_reconstruction_test.py @@ -144,7 +144,7 @@ class ReconstructNonDebugGraphTest(test_util.TensorFlowTestCase): self._compareOriginalAndReconstructedGraphDefs( sess, c, expected_output=400.0) - def testReonstructGraphWithCond(self): + def testReconstructGraphWithCond(self): with session.Session(config=self._no_rewrite_session_config()) as sess: x = variables.Variable(10.0, name="x") y = variables.Variable(20.0, name="y") diff --git a/tensorflow/python/debug/lib/debug_v2_ops_test.py b/tensorflow/python/debug/lib/debug_v2_ops_test.py index d6f0d4310a2..c76cbeeac6c 100644 --- a/tensorflow/python/debug/lib/debug_v2_ops_test.py +++ b/tensorflow/python/debug/lib/debug_v2_ops_test.py @@ -658,7 +658,7 @@ class DebugIdentityV2OpTest(dumping_callback_test_lib.DumpingCallbackTestBase): tensor_id=x._id, output_dtype=dtypes.float64)), x._id - # Assert the same op is returns a consistant value + # Assert the same op is returns a consistent value x = np.zeros([100, 100], dtype=np.float16) x[32, 47] = np.nan x[0:4, 3] = np.inf diff --git a/tensorflow/python/debug/lib/dumping_callback.py b/tensorflow/python/debug/lib/dumping_callback.py index 8b0f75574af..c3c0c973412 100644 --- a/tensorflow/python/debug/lib/dumping_callback.py +++ b/tensorflow/python/debug/lib/dumping_callback.py @@ -67,7 +67,7 @@ def _concrete_tensor_to_proto(tensor): class _DumpingCallback(object): - """An object holding the states surrouding the dumping callback.""" + """An object holding the states surrounding the dumping callback.""" def __init__(self, dump_root, diff --git a/tensorflow/python/debug/lib/dumping_callback_test.py b/tensorflow/python/debug/lib/dumping_callback_test.py index 9eee3a59e02..5382965ebc4 100644 --- a/tensorflow/python/debug/lib/dumping_callback_test.py +++ b/tensorflow/python/debug/lib/dumping_callback_test.py @@ -1352,7 +1352,7 @@ class TracingCallbackTest( ("FullTensor", "FULL_TENSOR"), ) @test_util.run_in_graph_and_eager_modes - def testMobiletNetV2Fit(self, tensor_debug_mode): + def testMobileNetV2Fit(self, tensor_debug_mode): """Test training Keras MobileNetV2 works with dumping.""" # Use a large circular-buffer to make sure we capture all the executed ops. writer = dumping_callback.enable_dump_debug_info( diff --git a/tensorflow/python/debug/lib/session_debug_grpc_test.py b/tensorflow/python/debug/lib/session_debug_grpc_test.py index a0bfa543345..196c34695e4 100644 --- a/tensorflow/python/debug/lib/session_debug_grpc_test.py +++ b/tensorflow/python/debug/lib/session_debug_grpc_test.py @@ -574,7 +574,7 @@ class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase): if i in (0, 2): # During runs 0 and 2, the server should have received the published # debug tensor delta:0:DebugIdentity. The breakpoint should have been - # unblocked by EventReply reponses from the server. + # unblocked by EventReply responses from the server. self.assertAllClose( [5.0], self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"]) @@ -628,7 +628,7 @@ class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase): if i in (0, 2): # During runs 0 and 2, the server should have received the published # debug tensor delta:0:DebugIdentity. The breakpoint should have been - # unblocked by EventReply reponses from the server. + # unblocked by EventReply responses from the server. self.assertAllClose( [5.0], self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"]) diff --git a/tensorflow/python/debug/lib/source_remote.py b/tensorflow/python/debug/lib/source_remote.py index e0a3695df43..2264f1ff203 100644 --- a/tensorflow/python/debug/lib/source_remote.py +++ b/tensorflow/python/debug/lib/source_remote.py @@ -116,7 +116,7 @@ def _send_call_tracebacks(destinations, origin_stack: The traceback stack for the origin of the execution call. For graph execution, this is the traceback of the `tf.Session.run()` invocation. For eager execution, this is the traceback of the Python - line that executes the eager opertion. + line that executes the eager operation. is_eager_execution: (`bool`) whether an eager execution call (i.e., not a `tf.Session.run` or derived methods) is being sent. call_key: The key of the execution call, as a string. For graph execution, diff --git a/tensorflow/python/debug/lib/source_utils.py b/tensorflow/python/debug/lib/source_utils.py index ecfee1aa2c2..033e9c4361e 100644 --- a/tensorflow/python/debug/lib/source_utils.py +++ b/tensorflow/python/debug/lib/source_utils.py @@ -73,7 +73,7 @@ def guess_is_tensorflow_py_library(py_file_path): Raises: ValueError: if the extension name of py_file_path does not indicate a Python - source file (compiled or uncomplied). + source file (compiled or uncompiled). """ if (not is_extension_uncompiled_python_source(py_file_path) and not is_extension_compiled_python_source(py_file_path)): diff --git a/tensorflow/python/debug/lib/source_utils_test.py b/tensorflow/python/debug/lib/source_utils_test.py index cddad99b56b..faf2365fc9c 100644 --- a/tensorflow/python/debug/lib/source_utils_test.py +++ b/tensorflow/python/debug/lib/source_utils_test.py @@ -341,7 +341,7 @@ class ListSourceAgainstDumpTest(test_util.TensorFlowTestCase): # while/Less:0 4 # while/LoopCond:0 4 # while/Switch:0 1 - # while/Swtich:1 3 + # while/Switch:1 3 # while/Identity:0 3 # while/Add/y:0 3 # while/Add:0 3 diff --git a/tensorflow/python/debug/wrappers/dumping_wrapper.py b/tensorflow/python/debug/wrappers/dumping_wrapper.py index c02d5f66ec9..3cd163918b3 100644 --- a/tensorflow/python/debug/wrappers/dumping_wrapper.py +++ b/tensorflow/python/debug/wrappers/dumping_wrapper.py @@ -90,7 +90,7 @@ class DumpingDebugWrapperSession(framework.NonInteractiveDebugWrapperSession): self._run_counter_lock = threading.Lock() def prepare_run_debug_urls(self, fetches, feed_dict): - """Implementation of abstrat method in superclass. + """Implementation of abstract method in superclass. See doc of `NonInteractiveDebugWrapperSession.prepare_run_debug_urls()` for details. This implementation creates a run-specific subdirectory under diff --git a/tensorflow/python/debug/wrappers/framework.py b/tensorflow/python/debug/wrappers/framework.py index d67c801bcb4..9b107fe9a2b 100644 --- a/tensorflow/python/debug/wrappers/framework.py +++ b/tensorflow/python/debug/wrappers/framework.py @@ -44,7 +44,7 @@ c) (To be implemented in a future CL) Enter an instruction loop to let an 3) The callback handles the request and returns a OnSessionInitResponse object with an action field, directing the wrapper session what to do next. -If the action field in the OnSessionInitResponse is PROCEED, the constuctor +If the action field in the OnSessionInitResponse is PROCEED, the constructor returns. Control is released back to the caller of the constructor, which can invoke run() method of wrapper session with the same syntax as a non-wrapped session, e.g.,: @@ -69,7 +69,7 @@ A1) Right at the start of each run() call, the on_run_start() callback is A2) Right before the run() returns, the on_run_end() callback is invoked, with an OnRunEndRequest object as the argument, which carries information - including the actual action performed in the warpper run() call and the + including the actual action performed in the wrapper run() call and the run_metadata from the run() call. However, if the action field in OnSessionInitResponse is diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper.py b/tensorflow/python/debug/wrappers/local_cli_wrapper.py index d8f4b3b8e8a..0d8c71396f0 100644 --- a/tensorflow/python/debug/wrappers/local_cli_wrapper.py +++ b/tensorflow/python/debug/wrappers/local_cli_wrapper.py @@ -393,7 +393,7 @@ class LocalCLIDebugWrapperSession(framework.BaseDebugWrapperSession): and caused the preparation of this run-end CLI (if any). passed_filter_exclude_node_names: (None or str) Regular expression used with the tensor filter to exclude ops with names matching the regular - expresssion. + expression. """ if tf_error: diff --git a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py index f252ba9bf9e..30bb99387b2 100644 --- a/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py +++ b/tensorflow/python/debug/wrappers/local_cli_wrapper_test.py @@ -345,7 +345,7 @@ class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase): self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"])) self.assertEqual([], wrapped_sess.observers["tf_errors"]) - def testRunMixingDebugModeAndMultpleTimes(self): + def testRunMixingDebugModeAndMultipleTimes(self): wrapped_sess = LocalCLIDebuggerWrapperSessionForTest( [["run", "-n"], ["run", "-t", "2"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir) diff --git a/tensorflow/python/distribute/all_reduce.py b/tensorflow/python/distribute/all_reduce.py index ab5e62106d3..1e9055fd55b 100644 --- a/tensorflow/python/distribute/all_reduce.py +++ b/tensorflow/python/distribute/all_reduce.py @@ -519,7 +519,7 @@ def _build_recursive_hd_gather(input_tensors, devices, red_op): def _build_recursive_hd_scatter(input_tensors, devices): - """Construct the scatter phase of recursive halving-doublng all-reduce. + """Construct the scatter phase of recursive halving-doubling all-reduce. Args: input_tensors: list of T `tf.Tensor` that are fully-reduced shards. diff --git a/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py b/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py index 1d6d346ddf2..78589762ae5 100644 --- a/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py +++ b/tensorflow/python/distribute/cluster_resolver/slurm_cluster_resolver.py @@ -190,7 +190,7 @@ class SlurmClusterResolver(ClusterResolver): defaults to None. Returns: - A string specifying job name the process belongs to and an integner + A string specifying job name the process belongs to and an integer specifying the task index the process belongs to in that job. """ return self.task_type, self.task_id @@ -200,7 +200,7 @@ class SlurmClusterResolver(ClusterResolver): Args: task_type: (Optional) Overrides the default auto-selected task type. - task_id: (Optional) Overrides the default auto-slected task index. + task_id: (Optional) Overrides the default auto-selected task index. rpc_layer: (Optional) Overrides the default RPC protocol TensorFlow uses to communicate across nodes. diff --git a/tensorflow/python/distribute/collective_all_reduce_strategy.py b/tensorflow/python/distribute/collective_all_reduce_strategy.py index 89d13c0777f..e77fce0f2a5 100644 --- a/tensorflow/python/distribute/collective_all_reduce_strategy.py +++ b/tensorflow/python/distribute/collective_all_reduce_strategy.py @@ -63,7 +63,7 @@ class CollectiveAllReduceStrategy(distribute_lib.Strategy): When 'TF_CONFIG' environment variable is set, it parses cluster_spec, task_type and task_id from 'TF_CONFIG' and turns into a multi-worker strategy - which mirrores models on GPUs of all machines in a cluster. In the current + which mirrored models on GPUs of all machines in a cluster. In the current implementation, it uses all GPUs in a cluster and it assumes all workers have the same number of GPUs. @@ -111,7 +111,7 @@ class CollectiveAllReduceStrategy(distribute_lib.Strategy): @classmethod def _from_local_devices(cls, devices): - """A convenience method to create an obejct with a list of devices.""" + """A convenience method to create an object with a list of devices.""" obj = cls() obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access return obj diff --git a/tensorflow/python/distribute/cross_device_ops.py b/tensorflow/python/distribute/cross_device_ops.py index 7225f2fce5c..4b2814eca3e 100644 --- a/tensorflow/python/distribute/cross_device_ops.py +++ b/tensorflow/python/distribute/cross_device_ops.py @@ -435,7 +435,7 @@ def _group_value_by_device(per_replica_values): ] Args: - per_replica_values: a list of PerReplica obejcts. + per_replica_values: a list of PerReplica objects. Returns: a list of lists, each sublist has components for its corresponding device of diff --git a/tensorflow/python/distribute/cross_device_utils.py b/tensorflow/python/distribute/cross_device_utils.py index ea61ec3e74b..3afb8b55b24 100644 --- a/tensorflow/python/distribute/cross_device_utils.py +++ b/tensorflow/python/distribute/cross_device_utils.py @@ -761,7 +761,7 @@ def stitch_values(values_and_indices_list): Args: values_and_indices_list: a list of tuples of values and indices indicating - the values and postions in the returned list. + the values and positions in the returned list. Returns: a stitched list of values. diff --git a/tensorflow/python/distribute/device_util.py b/tensorflow/python/distribute/device_util.py index db6009d1a45..7f32ed39aed 100644 --- a/tensorflow/python/distribute/device_util.py +++ b/tensorflow/python/distribute/device_util.py @@ -62,7 +62,7 @@ def canonicalize(d, default=None): result = result.make_merged_spec( tf_device.DeviceSpec.from_string(default)) - # Apply `d` last, so that it's values take precidence over the defaults. + # Apply `d` last, so that it's values take precedence over the defaults. result = result.make_merged_spec(d) return result.to_string() diff --git a/tensorflow/python/distribute/distribute_coordinator.py b/tensorflow/python/distribute/distribute_coordinator.py index 8f73fdb3a7e..eb1cc5217fc 100644 --- a/tensorflow/python/distribute/distribute_coordinator.py +++ b/tensorflow/python/distribute/distribute_coordinator.py @@ -284,12 +284,12 @@ class _WorkerContext(object): @property def task_type(self): - """Returns the role of the corresponing task.""" + """Returns the role of the corresponding task.""" return self._task_type @property def task_id(self): - """Returns the id or index of the corresponing task.""" + """Returns the id or index of the corresponding task.""" return self._task_id @property @@ -364,7 +364,7 @@ def _split_cluster_for_evaluator(cluster_spec, task_type): """Split the cluster for evaluator since it needn't talk to other tasks.""" # Splitting the cluster is important to prevent the evaluator from talking to # other tasks in the cluster. Since we allow evaluator not to use - # distribution strategies and as a result ops in the evalauator task may have + # distribution strategies and as a result ops in the evaluator task may have # unspecified devices. Those ops may end up on other tasks if we don't split # the cluster. # Note: if you bypass distribute coordinator and bring the cluster yourself, @@ -694,7 +694,7 @@ def run_distribute_coordinator(worker_fn, operations. This method is intended to be invoked by high-level APIs so that users don't - have to explictly call it to run this coordinator. For those who don't use + have to explicitly call it to run this coordinator. For those who don't use high-level APIs, to change a program to use this coordinator, wrap everything in a the program after global data definitions such as commandline flag definition into the `worker_fn` and get task-specific configurations from diff --git a/tensorflow/python/distribute/distribute_coordinator_test.py b/tensorflow/python/distribute/distribute_coordinator_test.py index 3e4d2e09253..4bc6f34262a 100644 --- a/tensorflow/python/distribute/distribute_coordinator_test.py +++ b/tensorflow/python/distribute/distribute_coordinator_test.py @@ -593,7 +593,7 @@ class DistributeCoordinatorTestStandaloneMode(DistributeCoordinatorTestBase): ("fake_evaluator", 3, True, False)) -class DistributeCoordinatorTestInpendentWorkerMode( +class DistributeCoordinatorTestIndependentWorkerMode( DistributeCoordinatorTestBase): def testInGraph(self): @@ -946,7 +946,7 @@ class RunStandardTensorflowServerTest(test.TestCase): if __name__ == "__main__": - # TODO(yuefengz): find a smart way to terminite std server threads. + # TODO(yuefengz): find a smart way to terminate std server threads. with test.mock.patch.object(sys, "exit", os._exit): # Reduce `recovery_wait_secs` from 30 seconds so the test completes quickly. orig_init = session_manager.SessionManager.__init__ diff --git a/tensorflow/python/distribute/distribute_lib.py b/tensorflow/python/distribute/distribute_lib.py index 65d176185bd..9b8c0b742d4 100644 --- a/tensorflow/python/distribute/distribute_lib.py +++ b/tensorflow/python/distribute/distribute_lib.py @@ -1998,7 +1998,7 @@ class StrategyExtendedV1(StrategyExtendedV2): - last_step_outputs: A dictionary containing tensors set using `context.set_last_step_output`. Evaluating this returns the value of the tensors after the last iteration. - - non_tensor_outputs: A dictionatry containing anything that was set by + - non_tensor_outputs: A dictionary containing anything that was set by `fn` by calling `context.set_non_tensor_output`. """ _require_cross_replica_or_default_context_extended(self) diff --git a/tensorflow/python/distribute/estimator_training.py b/tensorflow/python/distribute/estimator_training.py index 65447dd9841..af90f7c3c31 100644 --- a/tensorflow/python/distribute/estimator_training.py +++ b/tensorflow/python/distribute/estimator_training.py @@ -158,7 +158,7 @@ def init_run_config(config, tf_config): return # Don't use distribute coordinator if it is local training or cluster has a - # MASTER job or `train_distribute` is not specifed. + # MASTER job or `train_distribute` is not specified. if (not cluster_spec or 'master' in cluster_spec.jobs or not config._train_distribute): config._distribute_coordinator_mode = None diff --git a/tensorflow/python/distribute/input_lib.py b/tensorflow/python/distribute/input_lib.py index afaf642be5b..59590ae36c2 100644 --- a/tensorflow/python/distribute/input_lib.py +++ b/tensorflow/python/distribute/input_lib.py @@ -176,7 +176,7 @@ def _get_next_as_optional(iterator, strategy, name=None): with ops.device(worker): worker_has_value, next_element = ( iterator._iterators[i].get_next_as_list(new_name)) # pylint: disable=protected-access - # Collective all-reduce requires explict devices for inputs. + # Collective all-reduce requires explicit devices for inputs. with ops.device("/cpu:0"): # Converting to integers for all-reduce. worker_has_value = math_ops.cast(worker_has_value, dtypes.int32) diff --git a/tensorflow/python/distribute/mirrored_strategy.py b/tensorflow/python/distribute/mirrored_strategy.py index 0102d8ebb17..20b1274f81f 100644 --- a/tensorflow/python/distribute/mirrored_strategy.py +++ b/tensorflow/python/distribute/mirrored_strategy.py @@ -260,7 +260,7 @@ def _group_device_list(devices): Returns: a dict of list of device strings mapping from task_type to a list of devices - for the task_type in the asceding order of task_id. + for the task_type in the ascending order of task_id. """ assert not _is_device_list_single_worker(devices) device_dict = {} diff --git a/tensorflow/python/distribute/mirrored_strategy_test.py b/tensorflow/python/distribute/mirrored_strategy_test.py index f31a7d17386..b2ab4bb6ec6 100644 --- a/tensorflow/python/distribute/mirrored_strategy_test.py +++ b/tensorflow/python/distribute/mirrored_strategy_test.py @@ -1319,7 +1319,7 @@ class MirroredVariableStopGradientTest(test.TestCase, parameterized.TestCase): class FunctionTest(test.TestCase): - def testBackwardFuctionDevicePlacement(self): + def testBackwardFunctionDevicePlacement(self): if context.num_gpus() < 1: self.skipTest("At least one GPU is required.") devices = [device_util.resolve("/device:GPU:0"), diff --git a/tensorflow/python/distribute/model_collection/model_collection_base.py b/tensorflow/python/distribute/model_collection/model_collection_base.py index bbfae297c68..17af48ce679 100644 --- a/tensorflow/python/distribute/model_collection/model_collection_base.py +++ b/tensorflow/python/distribute/model_collection/model_collection_base.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""A base class to provid a model and corresponding input data for testing.""" +"""A base class to provide a model and corresponding input data for testing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function diff --git a/tensorflow/python/distribute/model_collection/simple_models.py b/tensorflow/python/distribute/model_collection/simple_models.py index 407f3149e05..63a2bfcb520 100644 --- a/tensorflow/python/distribute/model_collection/simple_models.py +++ b/tensorflow/python/distribute/model_collection/simple_models.py @@ -42,7 +42,7 @@ def _get_data_for_simple_models(): class SimpleFunctionalModel(model_collection_base.ModelAndInput): - """A simple functinal model and its inputs.""" + """A simple functional model and its inputs.""" def get_model(self, **kwargs): output_name = 'output_layer' diff --git a/tensorflow/python/distribute/parameter_server_strategy.py b/tensorflow/python/distribute/parameter_server_strategy.py index 2791dfa9a7b..b9dca636726 100644 --- a/tensorflow/python/distribute/parameter_server_strategy.py +++ b/tensorflow/python/distribute/parameter_server_strategy.py @@ -550,7 +550,7 @@ class ParameterServerStrategyExtended(distribute_lib.StrategyExtendedV1): cluster_spec=None, task_type=None, task_id=None): - """Configures the strategy class with `cluser_spec`. + """Configures the strategy class with `cluster_spec`. The strategy object will be re-initialized if `cluster_spec` is passed to `configure` but was not passed when instantiating the strategy. diff --git a/tensorflow/python/distribute/parameter_server_strategy_test.py b/tensorflow/python/distribute/parameter_server_strategy_test.py index ce289a7e1e3..f2c662b6713 100644 --- a/tensorflow/python/distribute/parameter_server_strategy_test.py +++ b/tensorflow/python/distribute/parameter_server_strategy_test.py @@ -186,7 +186,7 @@ class ParameterServerStrategyTestBase( g = e + 1.0 self.assertEqual(g.device, worker_device + '/device:CPU:1') - # Ths ops.colocate_with will be ignored when defining a variale but not + # Ths ops.colocate_with will be ignored when defining a variable but not # for a normal tensor. with ops.colocate_with(x): u = variable_scope.get_variable('u', initializer=30.0) @@ -340,7 +340,7 @@ class ParameterServerStrategyTestBase( g = e + 1.0 self.assertEqual(g.device, device_util.canonicalize('/device:CPU:1')) - # Ths ops.colocate_with will be ignored when defining a variale but not + # Ths ops.colocate_with will be ignored when defining a variable but not # for a normal tensor. with ops.colocate_with(x): u = variable_scope.get_variable('u', initializer=30.0) diff --git a/tensorflow/python/distribute/reduce_util.py b/tensorflow/python/distribute/reduce_util.py index 5fe1bd4cfa1..16600f54e80 100644 --- a/tensorflow/python/distribute/reduce_util.py +++ b/tensorflow/python/distribute/reduce_util.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Utilites for reduce operations.""" +"""Utilities for reduce operations.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/distribute/saved_model_test_base.py b/tensorflow/python/distribute/saved_model_test_base.py index fab4aad2338..832bb4f1dbd 100644 --- a/tensorflow/python/distribute/saved_model_test_base.py +++ b/tensorflow/python/distribute/saved_model_test_base.py @@ -142,7 +142,7 @@ class TestSavedModelBase(test.TestCase, parameterized.TestCase): def _save_model(self, model, saved_dir): """Save the given model to the given saved_dir. - This method needs to be implemeted by the subclasses. + This method needs to be implemented by the subclasses. Args: model: a keras model object to save. diff --git a/tensorflow/python/distribute/values_test.py b/tensorflow/python/distribute/values_test.py index c2624a10a9e..425930f722a 100644 --- a/tensorflow/python/distribute/values_test.py +++ b/tensorflow/python/distribute/values_test.py @@ -1409,7 +1409,7 @@ class PerReplicaTest(test.TestCase, parameterized.TestCase): vals = (constant_op.constant(1.), constant_op.constant([5., 6.0]),) per_replica = values.PerReplica(vals) - # Note: nest.map_structutre exercises nest.flatten and + # Note: nest.map_structure exercises nest.flatten and # nest.pack_sequence_as. result = nest.map_structure( lambda t: t + 10, per_replica, expand_composites=True) diff --git a/tensorflow/python/eager/benchmarks_test.py b/tensorflow/python/eager/benchmarks_test.py index e7b90a19308..056e7f765ec 100644 --- a/tensorflow/python/eager/benchmarks_test.py +++ b/tensorflow/python/eager/benchmarks_test.py @@ -25,7 +25,7 @@ To run a subset of benchmarks using --benchmarks flag. --benchmarks: the list of benchmarks to run. The specified value is interpreted as a regular expression and any benchmark whose name contains a partial match to the regular expression is executed. -e.g. --benchmarks=".*matmul*." will run all matmul related benmarks. +e.g. --benchmarks=".*matmul*." will run all matmul related benchmarks. """ from __future__ import absolute_import diff --git a/tensorflow/python/eager/def_function.py b/tensorflow/python/eager/def_function.py index a13e4a68142..a2bcb91918b 100644 --- a/tensorflow/python/eager/def_function.py +++ b/tensorflow/python/eager/def_function.py @@ -65,7 +65,7 @@ class _CallCounter(object): break def called_without_tracing(self): - # We don't count tracing when users load a concrete function dicretly or + # We don't count tracing when users load a concrete function directly or # call get_concrete_function, so the first call can be not a tracing call. if not self._calls_per_tracings: self._calls_per_tracings = [0] @@ -380,7 +380,7 @@ class Function(object): tensorflow.autograph.Feature values. Allows enabling additional conversion options when autograph is set to True. experimental_relax_shapes: When true, argument shapes may be relaxed to - avoid unecessary retracing. + avoid unnecessary retracing. experimental_compile: If false, execute the function in a regular way. The function is optimized by some graph rewrite passes (some ops might be clustered into a single op) and interpreted by the standard TensorFlow @@ -728,7 +728,7 @@ class Function(object): @function_lib.defun(autograph=False) def initialize_variables(): op_map = object_identity.ObjectIdentityDictionary() - # Stack all the var_is_initialized values into one tensor and intepret the + # Stack all the var_is_initialized values into one tensor and interpret the # numpy value. This will reduce the number of RPCs between client and # worker in the remote case. with ops.init_scope(): diff --git a/tensorflow/python/eager/forwardprop_test.py b/tensorflow/python/eager/forwardprop_test.py index 0f88ee2d4a6..79c0714c720 100644 --- a/tensorflow/python/eager/forwardprop_test.py +++ b/tensorflow/python/eager/forwardprop_test.py @@ -936,7 +936,7 @@ class ForwardpropTest(test.TestCase, parameterized.TestCase): # NOTE: assert_no_new_pyobjects_executing_eagerly fails flakily on this # test... could be something wrong with the test decorator, or some sort of - # nondeterminstic caching. + # nondeterministic caching. def testMirroredVariableWatched(self): def _replicated(input_tangent): diff --git a/tensorflow/python/eager/forwardprop_util.py b/tensorflow/python/eager/forwardprop_util.py index f618525d01b..bb1b087a53b 100644 --- a/tensorflow/python/eager/forwardprop_util.py +++ b/tensorflow/python/eager/forwardprop_util.py @@ -65,7 +65,7 @@ def push_forwardprop_state(): temporarily reset its state. This is useful when building forwardprop versions of functions, where an accumulator will trigger function building and then must process captured symbolic tensors while building it. Without pushing and - poping, accumulators ignore operations executed as a direct result of their + popping, accumulators ignore operations executed as a direct result of their own jvp computations. Yields: diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py index 33fdc3a1758..3f5d2d8f0bf 100644 --- a/tensorflow/python/eager/function.py +++ b/tensorflow/python/eager/function.py @@ -358,7 +358,7 @@ def add_function_callback(function_callback): wherein `function` is the just-created _EagerDefinedFunction. The callback is invoked immediately after a new `_EagerDefinedFunction` - is created. The return value(s) of the callback fucntion (if any) is ignored. + is created. The return value(s) of the callback function (if any) is ignored. Repeated registration of the same callback function is idempotent. After a callback is added, it can be removed with the @@ -850,7 +850,7 @@ class _DelayedRewriteGradientFunctions(object): higher-order symbolic gradients (tf.gradients). Args: - flat_outputs: The restult of running `forward`. + flat_outputs: The result of running `forward`. inference_args: A flat list of Tensors with inference inputs to the operation. input_tangents: A flat list of Tensors with input tangents consumed by the @@ -1314,7 +1314,7 @@ class _TapeGradientFunctions(object): have produced tangents which need to be recorded. Args: - flat_outputs: The restult of running `forward`. + flat_outputs: The result of running `forward`. inference_args: A flat list of Tensors with inference inputs to the operation. input_tangents: A flat list of Tensors with input tangents consumed by the @@ -1757,7 +1757,7 @@ class ConcreteFunction(object): return self._build_call_outputs(flat_outputs) def _experimental_with_cancellation_manager(self, cancellation_manager): - """Returns a callable that invokes a cancelable version of this function. + """Returns a callable that invokes a cancellable version of this function. Args: cancellation_manager: A `CancellationManager` object that can be used to @@ -2376,7 +2376,7 @@ class Function(object): `when autograph=True`. See https://www.tensorflow.org/guide/autograph for more information. experimental_relax_shapes: When true, argument shapes may be relaxed to - avoid unecessary retracing. + avoid unnecessary retracing. capture_by_value: Experimental. Whether to capture resource variables by value or reference. If None, will inherit from a parent context or default to False. @@ -2668,7 +2668,7 @@ class Function(object): return graph_function def _define_function_with_shape_relaxation(self, args, kwargs): - """Define a function, relaxing arg shapes to avoid unecessary retracing.""" + """Define a function, relaxing arg shapes to avoid unnecessary retracing.""" rank_only_cache_key = self._cache_key( args, kwargs, include_tensor_ranks_only=True) @@ -2824,7 +2824,7 @@ def defun(func=None, the values of its non-Tensor Python objects. When eager execution is enabled, the ability to create graphs from Python - functions makes it possible to incrementally trade off debugability and + functions makes it possible to incrementally trade off debuggability and interactivity for performance. Functions compiled with `defun` cannot be inspected with `pdb`; however, executing a graph generated by `defun` sometimes takes less time and memory than eagerly @@ -3130,7 +3130,7 @@ def defun(func=None, of tensorflow.autograph.Feature values) to control behavior when autograph=True. experimental_relax_shapes: When true, argument shapes may be relaxed to - avoid unecessary retracing. + avoid unnecessary retracing. Returns: If `func` is not None, returns a callable that will execute the compiled diff --git a/tensorflow/python/eager/function_test.py b/tensorflow/python/eager/function_test.py index b03586633ee..0a34d4a3852 100644 --- a/tensorflow/python/eager/function_test.py +++ b/tensorflow/python/eager/function_test.py @@ -3508,7 +3508,7 @@ class MultiDeviceTest(test.TestCase, parameterized.TestCase): self.assertEqual(r2.numpy(), 34000.0 + 13.0 * 7.0) @test_util.run_gpu_only - def testArgumentPrunning(self): + def testArgumentPruning(self): """Tests functions taking unnecessary arguments.""" with ops.device('/device:CPU:0'): c1 = constant_op.constant(5.0) diff --git a/tensorflow/python/eager/pywrap_tensor.cc b/tensorflow/python/eager/pywrap_tensor.cc index 723d4d69887..b5c9bfb6824 100644 --- a/tensorflow/python/eager/pywrap_tensor.cc +++ b/tensorflow/python/eager/pywrap_tensor.cc @@ -359,7 +359,7 @@ typedef struct EagerTensor { TFE_TensorHandle* handle; int64_t id; // This mirrors tensorflow.core.framework.ops.Tensor._handle_data Which will - // be None for tensors of type other than DT_REOSURCE. For DT_RESOURCE + // be None for tensors of type other than DT_RESOURCE. For DT_RESOURCE // tensors, this will contain a serialized HandleData proto with shape // inference metadata about shapes and dtypes of resources accessible from // this handle. @@ -660,7 +660,7 @@ static PyObject* EagerTensor_backing_device(EagerTensor* self) { #endif } -static PyGetSetDef EagerTensor_getseters[] = { +static PyGetSetDef EagerTensor_getsetters[] = { {const_cast("_id"), (getter)EagerTensor_getid, nullptr, const_cast("Tensor ID."), nullptr}, {const_cast("device"), (getter)EagerTensor_device, nullptr, @@ -758,7 +758,7 @@ PyTypeObject* EagerTensorType = nullptr; static PyType_Slot EagerTensor_Type_slots[] = { {Py_tp_dealloc, reinterpret_cast(EagerTensor_dealloc)}, {Py_tp_methods, reinterpret_cast(EagerTensor_methods)}, - {Py_tp_getset, reinterpret_cast(EagerTensor_getseters)}, + {Py_tp_getset, reinterpret_cast(EagerTensor_getsetters)}, {Py_tp_init, reinterpret_cast(EagerTensor_init)}, {0, nullptr}, }; @@ -799,7 +799,7 @@ static PyTypeObject _EagerTensorType = { nullptr, /* tp_iternext */ EagerTensor_methods, /* tp_methods */ EagerTensor_members, /* tp_members */ - EagerTensor_getseters, /* tp_getset */ + EagerTensor_getsetters, /* tp_getset */ nullptr, /* tp_base */ nullptr, /* tp_dict */ nullptr, /* tp_descr_get */ diff --git a/tensorflow/python/eager/pywrap_tfe.h b/tensorflow/python/eager/pywrap_tfe.h index 53d658039b1..398c8aa14a8 100755 --- a/tensorflow/python/eager/pywrap_tfe.h +++ b/tensorflow/python/eager/pywrap_tfe.h @@ -197,7 +197,7 @@ PyObject* TFE_Py_TapeSetIsStopped(); // forwardprop to, given the gradients of the output tensors, produce the // gradients of the input tensors. This function is automatically transposed // during forwardprop. -// - forward_function is an optional special-case for fowardprop, taking input +// - forward_function is an optional special-case for forwardprop, taking input // jvps and returning output jvps. // // Records an operation both for backprop (gradient tape) and forwardprop @@ -307,7 +307,7 @@ PyObject* TFE_Py_ForwardAccumulatorJVP(PyObject* accumulator, PyObject* tensor); // temporarily reset its state. This is useful when building forwardprop // versions of functions, where an accumulator will trigger function building // and then must process captured symbolic tensors while building it. Without -// pushing and poping, accumulators ignore operations executed as a direct +// pushing and popping, accumulators ignore operations executed as a direct // result of their own jvp computations. PyObject* TFE_Py_ForwardAccumulatorPushState(); PyObject* TFE_Py_ForwardAccumulatorPopState(); diff --git a/tensorflow/python/eager/tape.py b/tensorflow/python/eager/tape.py index 70a48c8b0da..2ecac8bbb73 100644 --- a/tensorflow/python/eager/tape.py +++ b/tensorflow/python/eager/tape.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Gradient tape utilites.""" +"""Gradient tape utilities.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/eager/wrap_function.py b/tensorflow/python/eager/wrap_function.py index 3c3adec61f7..2601570a01e 100644 --- a/tensorflow/python/eager/wrap_function.py +++ b/tensorflow/python/eager/wrap_function.py @@ -270,7 +270,7 @@ class WrappedFunction(function.ConcreteFunction): tensor_fetches = [] tensor_infos = [] - def _fetch_preprocesing_callback(fetch): + def _fetch_preprocessing_callback(fetch): """Extract out lists of ops, tensors, and tensor type info. Turns TensorInfos into Tensors in the original `fetches` structure. @@ -300,9 +300,9 @@ class WrappedFunction(function.ConcreteFunction): return fetch else: graph_element = self.graph.as_graph_element(fetch) - return _fetch_preprocesing_callback(graph_element) + return _fetch_preprocessing_callback(graph_element) - fetches = nest.map_structure(_fetch_preprocesing_callback, fetches) + fetches = nest.map_structure(_fetch_preprocessing_callback, fetches) # Expand composite tensors into their component dense Tensors. tensor_fetches = nest.flatten(tensor_fetches, expand_composites=True) diff --git a/tensorflow/python/feature_column/feature_column_test.py b/tensorflow/python/feature_column/feature_column_test.py index 3ff9a7dbc15..b9206f40ba0 100644 --- a/tensorflow/python/feature_column/feature_column_test.py +++ b/tensorflow/python/feature_column/feature_column_test.py @@ -63,7 +63,7 @@ def _initialized_session(config=None): class LazyColumnTest(test.TestCase): - def test_transormations_called_once(self): + def test_transformations_called_once(self): class TransformCounter(_FeatureColumn): @@ -1131,7 +1131,7 @@ class CrossedColumnTest(test.TestCase): def test_linear_model(self): """Tests linear_model. - Uses data from test_get_sparse_tesnsors_simple. + Uses data from test_get_sparse_tensors_simple. """ a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc._bucketized_column(a, boundaries=(0, 1)) @@ -1213,7 +1213,7 @@ class CrossedColumnTest(test.TestCase): def test_keras_linear_model(self): """Tests _LinearModel. - Uses data from test_get_sparse_tesnsors_simple. + Uses data from test_get_sparse_tensors_simple. """ a = fc._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc._bucketized_column(a, boundaries=(0, 1)) diff --git a/tensorflow/python/feature_column/feature_column_v2.py b/tensorflow/python/feature_column/feature_column_v2.py index fc02360e226..0b1453119d0 100644 --- a/tensorflow/python/feature_column/feature_column_v2.py +++ b/tensorflow/python/feature_column/feature_column_v2.py @@ -683,7 +683,7 @@ class LinearModel(training.Model): } ``` - with `sparse_combiner` as "mean", the linear model outputs conceptly are + with `sparse_combiner` as "mean", the linear model outputs conceptually are ``` y_0 = 1.0 / 2.0 * ( w_a + w_ b) + w_c + b_0 y_1 = w_d + 1.0 / 3.0 * ( w_e + w_ f + w_g) + b_1 diff --git a/tensorflow/python/feature_column/feature_column_v2_test.py b/tensorflow/python/feature_column/feature_column_v2_test.py index cc9420cfe40..8c9aee722a6 100644 --- a/tensorflow/python/feature_column/feature_column_v2_test.py +++ b/tensorflow/python/feature_column/feature_column_v2_test.py @@ -1369,7 +1369,7 @@ class CrossedColumnTest(test.TestCase): def test_linear_model(self): """Tests linear_model. - Uses data from test_get_sparse_tesnsors_simple. + Uses data from test_get_sparse_tensors_simple. """ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) @@ -1463,7 +1463,7 @@ class CrossedColumnTest(test.TestCase): def test_old_linear_model(self): """Tests linear_model. - Uses data from test_get_sparse_tesnsors_simple. + Uses data from test_get_sparse_tensors_simple. """ a = fc.numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) @@ -1573,7 +1573,7 @@ class CrossedColumnTest(test.TestCase): def test_old_linear_model_old_numeric(self): """Tests linear_model. - Uses data from test_get_sparse_tesnsors_simple. + Uses data from test_get_sparse_tensors_simple. """ a = fc_old._numeric_column('a', dtype=dtypes.int32, shape=(2,)) b = fc.bucketized_column(a, boundaries=(0, 1)) diff --git a/tensorflow/python/feature_column/serialization.py b/tensorflow/python/feature_column/serialization.py index eb183d7bbcd..970227c4e75 100644 --- a/tensorflow/python/feature_column/serialization.py +++ b/tensorflow/python/feature_column/serialization.py @@ -197,7 +197,7 @@ def _column_name_with_class_name(fc): Without this two FeatureColumns that have the same name and where one wraps the other, such as an IndicatorColumn wrapping a SequenceCategoricalColumn, will fail to deserialize because they will have the - same name in colums_by_name, causing the wrong column to be returned. + same name in columns_by_name, causing the wrong column to be returned. Args: fc: A FeatureColumn. diff --git a/tensorflow/python/framework/config_test.py b/tensorflow/python/framework/config_test.py index f956d8cf170..72612a21cbf 100644 --- a/tensorflow/python/framework/config_test.py +++ b/tensorflow/python/framework/config_test.py @@ -614,7 +614,7 @@ class DeviceTest(test.TestCase): self.assertIsNotNone(gpu.name) @reset_eager - def testV1CompatibilityDummyInivisibleDeviceList(self): + def testV1CompatibilityDummyInvisibleDeviceList(self): gpus = config.list_physical_devices('GPU') if gpus: self.skipTest('Test requires no GPUs') diff --git a/tensorflow/python/framework/convert_to_constants.py b/tensorflow/python/framework/convert_to_constants.py index f990dae9966..5c260b9983f 100644 --- a/tensorflow/python/framework/convert_to_constants.py +++ b/tensorflow/python/framework/convert_to_constants.py @@ -249,7 +249,7 @@ def _get_control_flow_function_data(node_defs, tensor_data, name_to_node): def get_source_node_name_through_identities(node_name): # Trace the source node along with a chain of Identity nodes. - # For example, given Plaecholder -> Identity -> Identity -> node_name + # For example, given Placeholder -> Identity -> Identity -> node_name # The function will return the name of the Placeholder. while name_to_node[node_name].op == "Identity": node_name = _get_tensor_name(name_to_node[node_name].input[0]) diff --git a/tensorflow/python/framework/device_spec.py b/tensorflow/python/framework/device_spec.py index 08875ad9452..36d30450e92 100644 --- a/tensorflow/python/framework/device_spec.py +++ b/tensorflow/python/framework/device_spec.py @@ -212,7 +212,7 @@ class DeviceSpecV2(object): def make_merged_spec(self, dev): """Returns a new DeviceSpec which incorporates `dev`. - When combining specs, `dev` will take precidence over the current spec. + When combining specs, `dev` will take precedence over the current spec. So for instance: ``` first_spec = tf.DeviceSpec(job=0, device_type="CPU") @@ -253,7 +253,7 @@ class DeviceSpecV2(object): job=self.job, replica=self.replica, task=self.task, device_type=self.device_type, device_index=self.device_index) - # Explicitly provided kwargs take precidence. + # Explicitly provided kwargs take precedence. init_kwargs.update(kwargs) return self.__class__(**init_kwargs) diff --git a/tensorflow/python/framework/func_graph.py b/tensorflow/python/framework/func_graph.py index e4b086dc152..d686df562a6 100644 --- a/tensorflow/python/framework/func_graph.py +++ b/tensorflow/python/framework/func_graph.py @@ -77,7 +77,7 @@ def convert_structure_to_signature(structure, arg_names=None): Returns: Identical structure that has TensorSpec objects instead of Tensors and - UknownArgument instead of any unsupported types. + UnknownArgument instead of any unsupported types. """ def encode_arg(arg, path): """A representation for this argument, for converting into signatures.""" @@ -1197,7 +1197,7 @@ def _get_defun_inputs(args, names, structure, flat_shapes=None): "either zero or all names have to be specified.") for arg in flattened: - # We have a shape entry for each arg, regadless of whether it's a real + # We have a shape entry for each arg, regardless of whether it's a real # Tensor or not. For non-tensor entries it should be None. shape = next(shapes_iter) if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)): diff --git a/tensorflow/python/framework/function.py b/tensorflow/python/framework/function.py index b44a4b85c6c..82ddac50c4d 100644 --- a/tensorflow/python/framework/function.py +++ b/tensorflow/python/framework/function.py @@ -290,7 +290,7 @@ class _DefinedFunction(object): device_funcs = ops.get_default_graph()._device_functions_outer_to_inner # pylint: enable=protected-access - # Get the innermost device if possbile. + # Get the innermost device if possible. self._caller_device = device_funcs[-1] if device_funcs else None # Cached OpDef for this function. When C API is enabled, this is diff --git a/tensorflow/python/framework/function_def_to_graph.py b/tensorflow/python/framework/function_def_to_graph.py index 76ffc41ecee..69aa38dade3 100644 --- a/tensorflow/python/framework/function_def_to_graph.py +++ b/tensorflow/python/framework/function_def_to_graph.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= -"""Utlity to convert FunctionDef to GraphDef and Graph.""" +"""Utility to convert FunctionDef to GraphDef and Graph.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/framework/graph_util_impl.py b/tensorflow/python/framework/graph_util_impl.py index b8d434278e3..a658dfe0143 100644 --- a/tensorflow/python/framework/graph_util_impl.py +++ b/tensorflow/python/framework/graph_util_impl.py @@ -124,7 +124,7 @@ def _node_name(n): def _get_colocated_node_name(colocated_node_name): - """Decodes colocated node name and returns it without loc:@ preprended.""" + """Decodes colocated node name and returns it without loc:@ prepended.""" colocated_node_decoded = colocated_node_name.decode("utf-8") if colocated_node_decoded.startswith("loc:@"): return colocated_node_decoded[5:] diff --git a/tensorflow/python/framework/meta_graph.py b/tensorflow/python/framework/meta_graph.py index af31affa431..327b476c576 100644 --- a/tensorflow/python/framework/meta_graph.py +++ b/tensorflow/python/framework/meta_graph.py @@ -1045,7 +1045,7 @@ def export_scoped_meta_graph(filename=None, name, _ = os.path.splitext(filename) debug_filename = "{name}{ext}".format(name=name, ext=".debug") - # Gets the operation from the graph by the name. Exludes variable nodes, + # Gets the operation from the graph by the name. Excludes variable nodes, # so only the nodes in the frozen models are included. # TODO(liufengdb): fix this for functions. ops_to_export = [] diff --git a/tensorflow/python/framework/op_callbacks.py b/tensorflow/python/framework/op_callbacks.py index 29f8dfb2c16..bfd41f0465a 100644 --- a/tensorflow/python/framework/op_callbacks.py +++ b/tensorflow/python/framework/op_callbacks.py @@ -107,7 +107,7 @@ def add_op_callback(callback_fn): if not callable(callback_fn): raise ValueError( "Callback function passed to op_callback() is expected to be callable, " - "but is not. Recevied %s" % callback_fn) + "but is not. Received %s" % callback_fn) ctx = context.context() ctx.add_op_callback(callback_fn) if ctx.executing_eagerly(): diff --git a/tensorflow/python/framework/op_callbacks_test.py b/tensorflow/python/framework/op_callbacks_test.py index 109b749449f..0253b0444ed 100644 --- a/tensorflow/python/framework/op_callbacks_test.py +++ b/tensorflow/python/framework/op_callbacks_test.py @@ -62,13 +62,13 @@ _MUL_OP = b"Mul" _NEXT_ITERATION_OP = b"NextIteration" _PLACEHOLDER_OP = b"Placeholder" _POW_OP = b"Pow" -_READ_VARIALBE_OP = b"ReadVariableOp" +_READ_VARIABLE_OP = b"ReadVariableOp" _SIN_OP = b"Sin" _SPARSE_TENSOR_DENSE_MATMUL_OP = b"SparseTensorDenseMatMul" _SQRT_OP = b"Sqrt" _SQUARE_OP = b"Square" _STATELESS_IF_OP = b"StatelessIf" -_SWTICH_OP = b"Switch" +_SWITCH_OP = b"Switch" _UNIQUE_OP = b"Unique" _VAR_HANDLE_OP = b"VarHandleOp" _WHILE_OP = b"While" @@ -109,7 +109,7 @@ class _NumpyFunctionCallback(object): for output in outputs: if compat.as_bytes(op_type) in ( _ENTER_OP, _EXIT_OP, _IF_OP, _MERGE_OP, _NEXT_ITERATION_OP, - _STATELESS_IF_OP, _SWTICH_OP, _WHILE_OP, _IDENTITY_OP, + _STATELESS_IF_OP, _SWITCH_OP, _WHILE_OP, _IDENTITY_OP, _VAR_HANDLE_OP, _PLACEHOLDER_OP): # TODO(cais): Overriding the output of StatelessIf, If and While ops # currently fails with error. Investigate (b/139668453). @@ -660,7 +660,7 @@ class OpCallbacksTest(test_util.TensorFlowTestCase): # Check the graph internal ndarrays recorded at runtime. read_variable_op_outputs = instrument.graph_internal_ndarrays[ - _READ_VARIALBE_OP] + _READ_VARIABLE_OP] self.assertAllClose(read_variable_op_outputs, [1.0, 2.0, 4.0, 8.0]) less_op_outputs = instrument.graph_internal_ndarrays[_LESS_OP] self.assertAllClose(less_op_outputs, [True, True, True, True, False]) diff --git a/tensorflow/python/framework/op_def_library_test.py b/tensorflow/python/framework/op_def_library_test.py index 5c810d29bee..4c84741105f 100644 --- a/tensorflow/python/framework/op_def_library_test.py +++ b/tensorflow/python/framework/op_def_library_test.py @@ -1094,7 +1094,7 @@ class OpDefLibraryTest(test_util.TensorFlowTestCase): attr { key: 'M' value { i: 2 } } """, op.node_def) - # Empty input lists: assume defaut type for T. + # Empty input lists: assume default type for T. op = op_def_library.apply_op( "InPolymorphicTwice", a=[], b=[], name="r") self.assertProtoEquals(""" diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index 2c8cdc325e2..ddff8da8a93 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -3640,7 +3640,7 @@ class Graph(object): """Returns the `Operation` with the given `name`. This is a internal unsafe version of get_operation_by_name. It skips many - checks and does not have user friedly error messages but runs considerably + checks and does not have user friendly error messages but runs considerably faster. This method may be called concurrently from multiple threads. Args: @@ -5161,7 +5161,7 @@ def control_dependencies(control_inputs): """ if context.executing_eagerly(): if control_inputs: - # Excute any pending callables. + # Execute any pending callables. for control in control_inputs: if callable(control): control() @@ -6178,8 +6178,8 @@ def name_scope(name, default_name=None, values=None, skip_on_eager=True): values: The list of `Tensor` arguments that are passed to the op function. skip_on_eager: Indicates to return NullContextmanager if executing eagerly. By default this is True since naming tensors and operations in eager mode - have little use and cause unecessary performance overhead. However, it is - important to preseve variable names since they are often useful for + have little use and cause unnecessary performance overhead. However, it is + important to preserve variable names since they are often useful for debugging and saved models. Returns: @@ -6650,7 +6650,7 @@ def add_exit_callback_to_default_func_graph(fn): To be executed when exiting func graph scope. Raises: - RuntimeError: If executed when the current defualt graph is not a FuncGraph, + RuntimeError: If executed when the current default graph is not a FuncGraph, or not currently executing in function creation mode (e.g., if inside an init_scope). """ diff --git a/tensorflow/python/framework/random_seed.py b/tensorflow/python/framework/random_seed.py index 7a204e41a52..a5e727816c0 100644 --- a/tensorflow/python/framework/random_seed.py +++ b/tensorflow/python/framework/random_seed.py @@ -291,7 +291,7 @@ def set_seed(seed): ``` The reason we get 'A2' instead 'A1' on the second call of `tf.random.uniform` - above is because the same `tf.random.uniform` kernel (i.e. internel + above is because the same `tf.random.uniform` kernel (i.e. internal representation) is used by TensorFlow for all calls of it with the same arguments, and the kernel maintains an internal counter which is incremented every time it is executed, generating different results. diff --git a/tensorflow/python/framework/smart_cond.py b/tensorflow/python/framework/smart_cond.py index d4af130184c..cecd3a113a4 100644 --- a/tensorflow/python/framework/smart_cond.py +++ b/tensorflow/python/framework/smart_cond.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""smart_cond and related utilties.""" +"""smart_cond and related utilities.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py index 80e3b360771..8c3f91f62d8 100644 --- a/tensorflow/python/framework/subscribe.py +++ b/tensorflow/python/framework/subscribe.py @@ -187,7 +187,7 @@ def _is_subscribed_identity(tensor): tensor: A `tf.Tensor` to check. Returns: - True if the given tensor matches the criteria for subscription identies: + True if the given tensor matches the criteria for subscription identities: its op type is `Identity`, its name matches the name of its input and conforms to the convention for subscribed nodes. False otherwise. diff --git a/tensorflow/python/framework/tensor_like.py b/tensorflow/python/framework/tensor_like.py index ef3c201d2a1..4cca711d6fe 100644 --- a/tensorflow/python/framework/tensor_like.py +++ b/tensorflow/python/framework/tensor_like.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Base class for tensor-like ojects.""" +"""Base class for tensor-like objects.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/framework/tensor_shape.py b/tensorflow/python/framework/tensor_shape.py index e6470bf815d..7c741380636 100644 --- a/tensorflow/python/framework/tensor_shape.py +++ b/tensorflow/python/framework/tensor_shape.py @@ -556,7 +556,7 @@ class Dimension(object): def __mod__(self, other): """Returns `self` modulo `other`. - Dimension moduli are computed as follows: + Dimension modulo are computed as follows: ```python tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(n) == diff --git a/tensorflow/python/framework/tensor_shape_test.py b/tensorflow/python/framework/tensor_shape_test.py index bc179acfc55..e1bc6d5e8aa 100644 --- a/tensorflow/python/framework/tensor_shape_test.py +++ b/tensorflow/python/framework/tensor_shape_test.py @@ -142,7 +142,7 @@ class DimensionTest(test_util.TensorFlowTestCase): tensor_shape.Dimension(None) == tensor_shape.Dimension(None)) # None indicates ambiguous comparison, but comparison vs the wrong type - # is unambigously False. + # is unambiguously False. self.assertIsNotNone(tensor_shape.Dimension(None) == 12.99) self.assertNotEqual(tensor_shape.Dimension(None), 12.99) @@ -162,7 +162,7 @@ class DimensionTest(test_util.TensorFlowTestCase): tensor_shape.Dimension(None) != tensor_shape.Dimension(None)) # None indicates ambiguous comparison, but comparison vs the wrong type - # is unambigously False. + # is unambiguously False. self.assertIsNotNone(tensor_shape.Dimension(None) != 12.99) self.assertNotEqual(tensor_shape.Dimension(None), 12.99) diff --git a/tensorflow/python/framework/tensor_util.py b/tensorflow/python/framework/tensor_util.py index ffe3a8cb845..7504f7e27cd 100644 --- a/tensorflow/python/framework/tensor_util.py +++ b/tensorflow/python/framework/tensor_util.py @@ -1010,7 +1010,7 @@ def shape_tensor(shape): # pylint: disable=invalid-name # If there are Dimension objects in the shape, unwrap them. This can be a # problem if v1 and v2 TensorShape objects get mixed up in partial # conversions, leading to shapes such as (1, 2, Dimension(5)), which are - # not convertible to Tensors becasue of mixed content. + # not convertible to Tensors because of mixed content. shape = tuple(map(tensor_shape.dimension_value, shape)) return ops.convert_to_tensor(shape, dtype=dtype, name="shape") diff --git a/tensorflow/python/framework/test_combinations.py b/tensorflow/python/framework/test_combinations.py index a58520ecd38..5a43704c548 100644 --- a/tensorflow/python/framework/test_combinations.py +++ b/tensorflow/python/framework/test_combinations.py @@ -37,7 +37,7 @@ results. The execution of generated tests can be customized in a number of ways: - The test can be skipped if it is not running in the correct environment. -- The arguments that are passed to the test can be additionaly transformed. +- The arguments that are passed to the test can be additionally transformed. - The test can be run with specific Python context managers. These behaviors can be customized by providing instances of `TestCombination` to `generate()`. diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py index b57a68eb059..a225fd94100 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py @@ -636,7 +636,7 @@ def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2): # There should be no new Python objects hanging around. obj_count_by_type = _get_object_count_by_type() - obj_count_by_type - # In some cases (specifacally on MacOS), new_count is somehow + # In some cases (specifically on MacOS), new_count is somehow # smaller than previous_count. # Using plain assert because not all classes using this decorator # have assertLessEqual @@ -766,7 +766,7 @@ def _find_reference_cycle(objects, idx): return "{}, {}".format(type(obj), id(obj)) def build_ref_graph(obj, graph, reprs, blacklist): - """Builds a reference graph as -> . + """Builds a reference graph as -> . Args: obj: The object to start from. The graph will be built by recursively @@ -1394,7 +1394,7 @@ def run_gpu_only(func=None): def run_cuda_only(func=None): """Execute the decorated test only if a GPU is available. - This function is intended to be applied to tests that require the precense + This function is intended to be applied to tests that require the presence of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped. Args: @@ -1583,7 +1583,7 @@ class FakeEagerSession(object): self._test_case = test_case def run(self, fetches, *args, **kwargs): - """Evalaute `fetches`. + """Evaluate `fetches`. Fail if additional args are specified. diff --git a/tensorflow/python/framework/test_util_test.py b/tensorflow/python/framework/test_util_test.py index f18e6e9cb21..eec7010fbdf 100644 --- a/tensorflow/python/framework/test_util_test.py +++ b/tensorflow/python/framework/test_util_test.py @@ -621,7 +621,7 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase): @test_util.run_deprecated_v1 def testRandomSeed(self): - # Call setUp again for WithCApi case (since it makes a new defeault graph + # Call setUp again for WithCApi case (since it makes a new default graph # after setup). # TODO(skyewm): remove this when C API is permanently enabled. self.setUp() @@ -744,10 +744,10 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase): def test_build_as_function_and_v1_graph(self): - class GraphModeAndFuncionTest(parameterized.TestCase): + class GraphModeAndFunctionTest(parameterized.TestCase): def __init__(inner_self): # pylint: disable=no-self-argument - super(GraphModeAndFuncionTest, inner_self).__init__() + super(GraphModeAndFunctionTest, inner_self).__init__() inner_self.graph_mode_tested = False inner_self.inside_function_tested = False @@ -763,7 +763,7 @@ class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase): self.assertFalse(inner_self.graph_mode_tested) inner_self.graph_mode_tested = True - test_object = GraphModeAndFuncionTest() + test_object = GraphModeAndFunctionTest() test_object.test_modes_v1_graph() test_object.test_modes_function() self.assertTrue(test_object.graph_mode_tested) diff --git a/tensorflow/python/framework/type_spec.py b/tensorflow/python/framework/type_spec.py index ce61290eb68..550678a72bb 100644 --- a/tensorflow/python/framework/type_spec.py +++ b/tensorflow/python/framework/type_spec.py @@ -150,7 +150,7 @@ class TypeSpec(object): Args: components: A nested structure of `tf.Tensor` or `tf.CompositeTensor`, - compatible with `self._component_specs`. (Caller is repsonsible for + compatible with `self._component_specs`. (Caller is responsible for ensuring compatibility.) Returns: @@ -373,7 +373,7 @@ class TypeSpec(object): * If they are both dicts with the same keys, then recursively combine the respective dict elements. * If they are both TypeSpecs, then combine using - TypeSpec.most_specific_comptible_type. + TypeSpec.most_specific_compatible_type. * If they are both TensorShapes, then combine using TensorShape.most_specific_compatible_shape. * If they are both TensorSpecs with the same dtype, then combine using diff --git a/tensorflow/python/grappler/cost_analyzer.cc b/tensorflow/python/grappler/cost_analyzer.cc index de4b82c84dc..79aba5f3409 100644 --- a/tensorflow/python/grappler/cost_analyzer.cc +++ b/tensorflow/python/grappler/cost_analyzer.cc @@ -110,7 +110,7 @@ void CostAnalyzer::PreprocessCosts() { double analytical_compute_cost = analytical.compute_time(); if (analytical_compute_cost == 0) { - // Negative infinity indidates unavailable data. + // Negative infinity indicates unavailable data. perf->set_compute_efficiency(-INFINITY); } else { perf->set_compute_efficiency(analytical_compute_cost / measured_cost); @@ -118,7 +118,7 @@ void CostAnalyzer::PreprocessCosts() { double analytical_memory_cost = analytical.memory_time(); if (analytical_memory_cost == 0) { - // Negative infinity indidates unavailable data. + // Negative infinity indicates unavailable data. perf->set_memory_efficiency(-INFINITY); } else { perf->set_memory_efficiency(analytical_memory_cost / measured_cost); diff --git a/tensorflow/python/grappler/item_test.py b/tensorflow/python/grappler/item_test.py index 3ec901a15ea..eaba87c45f5 100644 --- a/tensorflow/python/grappler/item_test.py +++ b/tensorflow/python/grappler/item_test.py @@ -109,7 +109,7 @@ class ItemTest(test.TestCase): self.assertEqual(new_tf_item, newest_tf_item) @test_util.run_v1_only('b/120545219') - def testColocationContraints(self): + def testColocationConstraints(self): with ops.Graph().as_default() as g: c = constant_op.constant([10]) v = variables.VariableV1([3], dtype=dtypes.int32) diff --git a/tensorflow/python/keras/backend_config_test.py b/tensorflow/python/keras/backend_config_test.py index 59e003d81e1..1e17c27c513 100644 --- a/tensorflow/python/keras/backend_config_test.py +++ b/tensorflow/python/keras/backend_config_test.py @@ -28,7 +28,7 @@ class BackendConfigTest(test.TestCase): def test_backend(self): self.assertEqual(keras.backend.backend(), 'tensorflow') - def test_espilon(self): + def test_epsilon(self): epsilon = 1e-2 keras.backend_config.set_epsilon(epsilon) self.assertEqual(keras.backend_config.epsilon(), epsilon) diff --git a/tensorflow/python/keras/distribute/distributed_training_utils.py b/tensorflow/python/keras/distribute/distributed_training_utils.py index 662fd9ec7de..05613fdfcb0 100644 --- a/tensorflow/python/keras/distribute/distributed_training_utils.py +++ b/tensorflow/python/keras/distribute/distributed_training_utils.py @@ -712,7 +712,7 @@ def _build_network_on_replica(model, mode, inputs=None, targets=None): placeholders for the input and the output that are not accessible till we call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`. - The sharing of weights and layers between the old and the new model gaurantee + The sharing of weights and layers between the old and the new model guarantee that we're using Strategy variables and any updates on either model are reflected correctly in callbacks and loop iterations. @@ -936,7 +936,7 @@ def _make_execution_function_with_cloning(model, mode): distributed_model = get_distributed_model(model, mode) assert distributed_model - # Also create an execution fuction on that distributed model. + # Also create an execution function on that distributed model. if context.executing_eagerly(): distributed_function = _make_eager_execution_function(model, mode) else: diff --git a/tensorflow/python/keras/distribute/keras_correctness_test_base.py b/tensorflow/python/keras/distribute/keras_correctness_test_base.py index b9527127a5b..a6db8e533f5 100644 --- a/tensorflow/python/keras/distribute/keras_correctness_test_base.py +++ b/tensorflow/python/keras/distribute/keras_correctness_test_base.py @@ -301,7 +301,7 @@ def compare_results(results_with_ds, default_tolerance = 1e-2 relaxed_tolerance = 1e-2 elif partial_last_batch == 'train_and_eval': - # We relax the tolerence a lot in the partial last batch case as + # We relax the tolerance a lot in the partial last batch case as # 1. the examples in uneven batches may have different weights when # applying the gradients in the distributed case. # 2. TF Keras and TF Keras DS have different ways to handle the case when @@ -418,7 +418,7 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase, **kwargs: key word arguments about how to create the input dictionaries Returns: - Three dictionaries representing the input for fit(), evalutate() and + Three dictionaries representing the input for fit(), evaluate() and predict() """ @@ -530,7 +530,7 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase, **kwargs: key word arguments about how to create the input dictionaries Returns: - Three dictionaries representing the input for fit(), evalutate() and + Three dictionaries representing the input for fit(), evaluate() and predict() """ diff --git a/tensorflow/python/keras/distribute/multi_worker_callback_tf1_test.py b/tensorflow/python/keras/distribute/multi_worker_callback_tf1_test.py index 550b5ebfc27..95a235e7b33 100644 --- a/tensorflow/python/keras/distribute/multi_worker_callback_tf1_test.py +++ b/tensorflow/python/keras/distribute/multi_worker_callback_tf1_test.py @@ -87,7 +87,7 @@ def generate_callback_test_function(custom_callable): threading_local=kwargs['threading_local']) # Pass saving_filepath from the parent thread to ensure every worker has the - # same fileapth to save. + # same filepath to save. saving_filepath = os.path.join(self.get_temp_dir(), 'checkpoint.' + file_format) barrier = dc._Barrier(2) diff --git a/tensorflow/python/keras/distribute/multi_worker_fault_tolerance_test.py b/tensorflow/python/keras/distribute/multi_worker_fault_tolerance_test.py index c64e5ce61bf..fa58d2479ac 100644 --- a/tensorflow/python/keras/distribute/multi_worker_fault_tolerance_test.py +++ b/tensorflow/python/keras/distribute/multi_worker_fault_tolerance_test.py @@ -49,14 +49,14 @@ class KerasMultiWorkerFaultToleranceTest(test_base.IndependentWorkerTestBase, parameterized.TestCase): class PreemptionAtBatchBoundarySimulatingCallback(callbacks.Callback): - """Callback to simulate preemtion at batch boundary.""" + """Callback to simulate preemption at batch boundary.""" def on_epoch_begin(self, epoch, logs=None): self._current_epoch = epoch def on_batch_begin(self, batch, logs=None): if self._current_epoch == 1 and batch == 1 and not test_base.is_chief(): - # Simulate preemtion at the start of second batch of second epoch. + # Simulate preemption at the start of second batch of second epoch. raise RuntimeError('Preemption!') def on_batch_end(self, batch, logs=None): @@ -67,11 +67,11 @@ class KerasMultiWorkerFaultToleranceTest(test_base.IndependentWorkerTestBase, # TODO(rchao): Add tests for checking 0th and 2nd epoch boundary. class PreemptionAtEpochBoundarySimulatingCallback(callbacks.Callback): - """Callback to simulate preemtion at epoch boundary.""" + """Callback to simulate preemption at epoch boundary.""" def on_epoch_begin(self, epoch, logs=None): if epoch == 1 and not test_base.is_chief(): - # Simulate preemtion at the start of second epoch. + # Simulate preemption at the start of second epoch. raise RuntimeError('Preemption!') def on_epoch_end(self, epoch, logs=None): diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py index 6281005087d..3e93a469f9e 100644 --- a/tensorflow/python/keras/engine/base_layer.py +++ b/tensorflow/python/keras/engine/base_layer.py @@ -1022,7 +1022,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector): x = tf.keras.layers.Dense(10)(inputs) outputs = tf.keras.layers.Dense(1)(x) model = tf.keras.Model(inputs, outputs) - # Actvity regularization. + # Activity regularization. model.add_loss(tf.abs(tf.reduce_mean(x))) ``` diff --git a/tensorflow/python/keras/engine/data_adapter.py b/tensorflow/python/keras/engine/data_adapter.py index ebcf0db5d10..87380e7853d 100644 --- a/tensorflow/python/keras/engine/data_adapter.py +++ b/tensorflow/python/keras/engine/data_adapter.py @@ -877,7 +877,7 @@ class GeneratorDataAdapter(DataAdapter): return peek, itertools.chain([peek], x) def _make_callable(self, x, workers, use_multiprocessing, max_queue_size): - """Create a callable, and possilbly include an Enqueuer.""" + """Create a callable, and possibly include an Enqueuer.""" if workers > 1 or (workers > 0 and use_multiprocessing): if use_multiprocessing: logging.warning( @@ -1094,7 +1094,7 @@ def is_none_or_empty(inputs): def broadcast_sample_weight_modes(target_structure, sample_weight_modes): - """Match sample_weigt_modes structure with output structure.""" + """Match sample_weight_modes structure with output structure.""" if target_structure is None or not nest.flatten(target_structure): return sample_weight_modes diff --git a/tensorflow/python/keras/engine/network_test.py b/tensorflow/python/keras/engine/network_test.py index 8013e01b172..68eb3d3c2f6 100644 --- a/tensorflow/python/keras/engine/network_test.py +++ b/tensorflow/python/keras/engine/network_test.py @@ -1357,10 +1357,10 @@ class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase): self.assertEqual(output.shape, (1, 3)) @test_util.run_in_graph_and_eager_modes() - def testNoneInShapeWithFunctinalAPI(self): + def testNoneInShapeWithFunctionalAPI(self): class BasicBlock(keras.Model): - # Inherting from keras.layers.Layer since we are calling this layer + # Inheriting from keras.layers.Layer since we are calling this layer # inside a model created using functional API. def __init__(self): diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py index 3e775c19c21..567cb6b5e6e 100644 --- a/tensorflow/python/keras/engine/training.py +++ b/tensorflow/python/keras/engine/training.py @@ -1843,7 +1843,7 @@ class Model(network.Network, version_utils.ModelVersionSelector): 'optimizer.') # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because - # `_feed_sample_weights` list has been updated on re-copmpile. + # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'train_function', None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() @@ -1885,7 +1885,7 @@ class Model(network.Network, version_utils.ModelVersionSelector): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() # If we have re-compiled the loss/weighted metric sub-graphs then create # test function even if one exists already. This is because - # `_feed_sample_weights` list has been updated on re-copmpile. + # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'test_function', None) is None or has_recompiled: inputs = (self._feed_inputs + self._feed_targets + diff --git a/tensorflow/python/keras/engine/training_distributed.py b/tensorflow/python/keras/engine/training_distributed.py index 342ad133cd4..e0f5028ab72 100644 --- a/tensorflow/python/keras/engine/training_distributed.py +++ b/tensorflow/python/keras/engine/training_distributed.py @@ -80,7 +80,7 @@ def _make_train_step_fn(model, mode, strategy, output_labels): # When input feature is a dictionary of tensors, dictionary is flattended # to an array and passed as a model input. This results in input mismatch # when model input layer names are not sorted in alphabetical order as - # `nest.flatten()`sorts dictioary elements by keys. As so, transform input + # `nest.flatten()`sorts dictionary elements by keys. As so, transform input # tensors into an array and order it along `model._feed_input_names`. if isinstance(inputs, dict): inputs = [inputs[input_name] for input_name in model._feed_input_names] diff --git a/tensorflow/python/keras/engine/training_utils.py b/tensorflow/python/keras/engine/training_utils.py index 3b42bff8d30..4690a14d8a4 100644 --- a/tensorflow/python/keras/engine/training_utils.py +++ b/tensorflow/python/keras/engine/training_utils.py @@ -229,7 +229,7 @@ class SliceAggregator(Aggregator): There is, however, some scheduling and context switching overhead which will offset the gains from pipelining the slice assignment. Below a given threshold it is faster to simply assign in the main thread rather than enqueue the - assigmnet in a side thread. The exact threshold will vary from system to + assignment in a side thread. The exact threshold will vary from system to system, but the time is not very sensitive to the exact transition so a value of 2 ** 14 was chosen which should be reasonable on most systems. """ @@ -1506,7 +1506,7 @@ def prepare_loss_functions(loss, output_names): def prepare_loss_weights(training_endpoints, loss_weights=None): """Converts loss weights to a list of loss weights. - The result loss weights will be populated on the trainging endpoint. + The result loss weights will be populated on the training endpoint. Arguments: training_endpoints: List of model training endpoints. diff --git a/tensorflow/python/keras/engine/training_v1.py b/tensorflow/python/keras/engine/training_v1.py index 29ddc2c1a90..67840a505e9 100644 --- a/tensorflow/python/keras/engine/training_v1.py +++ b/tensorflow/python/keras/engine/training_v1.py @@ -1995,7 +1995,7 @@ class Model(training_lib.Model): 'optimizer.') # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because - # `_feed_sample_weights` list has been updated on re-copmpile. + # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'train_function', None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() @@ -2038,7 +2038,7 @@ class Model(training_lib.Model): has_recompiled = self._recompile_weights_loss_and_weighted_metrics() # If we have re-compiled the loss/weighted metric sub-graphs then create # test function even if one exists already. This is because - # `_feed_sample_weights` list has been updated on re-copmpile. + # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'test_function', None) is None or has_recompiled: inputs = (self._feed_inputs + self._feed_targets + @@ -2853,7 +2853,7 @@ class DistributedCallbackModel(Model): orig_model_weights) def __getattr__(self, item): - # Whitelisted atttributes of the model that can be accessed by the user + # Whitelisted attributes of the model that can be accessed by the user # during a callback. if item not in ('_setattr_tracking', '_layers'): logging.warning('You are accessing attribute ' + item + ' of the ' diff --git a/tensorflow/python/keras/layers/normalization.py b/tensorflow/python/keras/layers/normalization.py index 0c1fc43463b..a0432690bc2 100644 --- a/tensorflow/python/keras/layers/normalization.py +++ b/tensorflow/python/keras/layers/normalization.py @@ -60,7 +60,7 @@ class BatchNormalizationBase(Layer): 3) When performing inference using a model containing batch normalization, it is generally (though not always) desirable to use accumulated statistics - rather than mini-batch statistics. This is acomplished by passing + rather than mini-batch statistics. This is accomplished by passing `training=False` when calling the model, or using `model.predict`. Arguments: @@ -1123,7 +1123,7 @@ class LayerNormalization(Layer): # self.gamma and self.beta have the wrong shape for fused_batch_norm, so # we cannot pass them as the scale and offset parameters. Therefore, we # create two constant tensors in correct shapes for fused_batch_norm and - # later constuct a separate calculation on the scale and offset. + # later construct a separate calculation on the scale and offset. scale = _set_const_tensor(1.0, self.dtype, [pre_dim]) offset = _set_const_tensor(0.0, self.dtype, [pre_dim]) diff --git a/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py b/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py index 0cc56ba92b0..730bd6de20d 100644 --- a/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py +++ b/tensorflow/python/keras/layers/preprocessing/image_preprocessing.py @@ -178,7 +178,7 @@ class RandomCrop(Layer): This layer will crop all the images in the same batch to the same cropping location. - By default, random croppping is only applied during training. At inference + By default, random cropping is only applied during training. At inference time, the images will be first rescaled to preserve the shorter side, and center cropped. If you need to apply random cropping at inference time, set `training` to True when calling the layer. diff --git a/tensorflow/python/keras/layers/preprocessing/text_vectorization.py b/tensorflow/python/keras/layers/preprocessing/text_vectorization.py index 5ab28cc7b3d..da5fb687eb2 100644 --- a/tensorflow/python/keras/layers/preprocessing/text_vectorization.py +++ b/tensorflow/python/keras/layers/preprocessing/text_vectorization.py @@ -69,7 +69,7 @@ _OOV_IDF_NAME = "oov_idf" _ACCUMULATOR_VOCAB_NAME = "vocab" # The total counts of each token in the vocabulary _ACCUMULATOR_COUNTS_NAME = "counts" -# The number of doccumeents / examples that each token appears in. +# The number of documents / examples that each token appears in. _ACCUMULATOR_DOCUMENT_COUNTS = "document_counts" # The total number of documents / examples in the dataset. _ACCUMULATOR_NUM_DOCUMENTS = "num_documents" diff --git a/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py b/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py index 38658f9e201..899e24283bf 100644 --- a/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py +++ b/tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py @@ -534,7 +534,7 @@ class TextVectorizationPreprocessingTest( def test_splitting_with_invalid_split_arg(self): input_data = keras.Input(shape=(1,), dtype=dtypes.string) layer = get_layer_class()() - layer._split = "unsuppported" + layer._split = "unsupported" with self.assertRaisesRegex(ValueError, ".*is not a supported splitting.*"): _ = layer(input_data) diff --git a/tensorflow/python/keras/layers/recurrent.py b/tensorflow/python/keras/layers/recurrent.py index 6d20bfd6154..e30527a73ca 100644 --- a/tensorflow/python/keras/layers/recurrent.py +++ b/tensorflow/python/keras/layers/recurrent.py @@ -862,7 +862,7 @@ class RNN(Layer): 'make sure that there is no mask passed in by upstream ' 'layers.') if self.unroll: - raise ValueError('The input received constains RaggedTensors and does ' + raise ValueError('The input received contains RaggedTensors and does ' 'not support unrolling. Disable unrolling by passing ' '`unroll=False` in the RNN Layer constructor.') diff --git a/tensorflow/python/keras/layers/recurrent_test.py b/tensorflow/python/keras/layers/recurrent_test.py index a03bc5d1fbf..4098c1e8799 100644 --- a/tensorflow/python/keras/layers/recurrent_test.py +++ b/tensorflow/python/keras/layers/recurrent_test.py @@ -1646,7 +1646,7 @@ class RNNTest(keras_parameterized.TestCase): # Must raise error when unroll is set to True unroll_rnn_layer = layer(3, unroll=True) with self.assertRaisesRegexp( - ValueError, 'The input received constains RaggedTensors *'): + ValueError, 'The input received contains RaggedTensors *'): unroll_rnn_layer(inputs) # Check if return sequences outputs are correct @@ -1666,7 +1666,7 @@ class RNNTest(keras_parameterized.TestCase): model_2 = keras.models.Model(x_dense, y_dense) dense_data = ragged_data.to_tensor() output_dense = model_2.predict(dense_data, steps=1) - # Convert the output here to ragged for value comparision + # Convert the output here to ragged for value comparison output_dense = ragged_tensor.RaggedTensor.from_tensor( output_dense, lengths=row_lengths) self.assertAllClose(output_ragged, output_dense) diff --git a/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py b/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py index 52c5fa2e5c8..8f817afdd47 100644 --- a/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py +++ b/tensorflow/python/keras/layers/rnn_cell_wrapper_v2_test.py @@ -222,7 +222,7 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase): reconstructed_wrapper = wrapper_cls.from_config(config) self.assertFalse(reconstructed_wrapper._dropout_state_filter(None)) - def testDroputWrapperWithKerasLSTMCell(self): + def testDropoutWrapperWithKerasLSTMCell(self): wrapper_cls = rnn_cell_wrapper_v2.DropoutWrapper cell = layers.LSTMCell(10) diff --git a/tensorflow/python/keras/layers/serialization_test.py b/tensorflow/python/keras/layers/serialization_test.py index 74b43a740cf..cd2668840f6 100644 --- a/tensorflow/python/keras/layers/serialization_test.py +++ b/tensorflow/python/keras/layers/serialization_test.py @@ -120,7 +120,7 @@ class LayerSerializationTest(parameterized.TestCase, test.TestCase): @parameterized.parameters( [batchnorm_v1.BatchNormalization, batchnorm_v2.BatchNormalization]) - def test_deserialize_batchnorm_backwards_compatiblity(self, batchnorm_layer): + def test_deserialize_batchnorm_backwards_compatibility(self, batchnorm_layer): layer = batchnorm_layer( momentum=0.9, beta_initializer='zeros', gamma_regularizer='l2') config = keras.layers.serialize(layer) diff --git a/tensorflow/python/keras/metrics.py b/tensorflow/python/keras/metrics.py index 9acb37f7da2..cd6094b6fb4 100644 --- a/tensorflow/python/keras/metrics.py +++ b/tensorflow/python/keras/metrics.py @@ -1848,7 +1848,7 @@ class AUC(Metric): summation_method) super(AUC, self).__init__(name=name, dtype=dtype) - # Handle multilable arguments. + # Handle multilabel arguments. self.multi_label = multi_label if label_weights is not None: label_weights = constant_op.constant(label_weights, dtype=self.dtype) @@ -1941,7 +1941,7 @@ class AUC(Metric): (self.false_positives, ('T', 'L')), (self.false_negatives, ('T', 'L'))]) if self.label_weights is not None: - # label_weights should be of lenght equal to the number of labels. + # label_weights should be of length equal to the number of labels. shapes.append((self.label_weights, ('L',))) deps = [ check_ops.assert_shapes( diff --git a/tensorflow/python/keras/optimizer_v2/optimizer_v2.py b/tensorflow/python/keras/optimizer_v2/optimizer_v2.py index ed4504136df..30bf526de73 100644 --- a/tensorflow/python/keras/optimizer_v2/optimizer_v2.py +++ b/tensorflow/python/keras/optimizer_v2/optimizer_v2.py @@ -693,7 +693,7 @@ class OptimizerV2(trackable.Trackable): @abc.abstractmethod def get_config(self): - """Returns the config of the optimimizer. + """Returns the config of the optimizer. An optimizer config is a Python dictionary (serializable) containing the configuration of an optimizer. @@ -1149,7 +1149,7 @@ class RestoredOptimizer(OptimizerV2): def get_config(self): # TODO(allenl): Save and restore the Optimizer's config raise NotImplementedError( - "Restoring functional Optimzers from SavedModels is not currently " + "Restoring functional Optimizers from SavedModels is not currently " "supported. Please file a feature request if this limitation bothers " "you.") diff --git a/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py b/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py index 95f4c55dff5..da3e4e4acb4 100644 --- a/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py +++ b/tensorflow/python/keras/optimizer_v2/optimizer_v2_test.py @@ -1076,7 +1076,7 @@ def make_model(): | Dense | --------- - This topology is chosen because it excercises both dense and sparse update + This topology is chosen because it exercises both dense and sparse update paths. Returns: diff --git a/tensorflow/python/keras/premade/wide_deep.py b/tensorflow/python/keras/premade/wide_deep.py index bf90314253c..ba524367bc6 100644 --- a/tensorflow/python/keras/premade/wide_deep.py +++ b/tensorflow/python/keras/premade/wide_deep.py @@ -129,7 +129,7 @@ class WideDeepModel(keras_training.Model): self._check_trainable_weights_consistency() # If we have re-compiled the loss/weighted metric sub-graphs then create # train function even if one exists already. This is because - # `_feed_sample_weights` list has been updated on re-copmpile. + # `_feed_sample_weights` list has been updated on re-compile. if getattr(self, 'train_function', None) is None or has_recompiled: # Restore the compiled trainable state. current_trainable_state = self._get_trainable_state() diff --git a/tensorflow/python/keras/saving/saved_model/save_impl.py b/tensorflow/python/keras/saving/saved_model/save_impl.py index 580e4527d60..3fcc649cba5 100644 --- a/tensorflow/python/keras/saving/saved_model/save_impl.py +++ b/tensorflow/python/keras/saving/saved_model/save_impl.py @@ -582,7 +582,7 @@ def _create_call_fn_decorator(layer, wrapped_call): def _wrap_unconditional_loss(loss_fn, index): - """Wraps callable/unconditonal loss, returning a serializable function.""" + """Wraps callable/unconditional loss, returning a serializable function.""" # Extract original loss function from partial function fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn if isinstance(fn, def_function.Function): diff --git a/tensorflow/python/keras/saving/saved_model/saved_model_test.py b/tensorflow/python/keras/saving/saved_model/saved_model_test.py index a9387c28f81..f39f217b682 100644 --- a/tensorflow/python/keras/saving/saved_model/saved_model_test.py +++ b/tensorflow/python/keras/saving/saved_model/saved_model_test.py @@ -227,7 +227,7 @@ class TestModelSavingAndLoadingV2(keras_parameterized.TestCase): loaded = keras_load.load(saved_model_dir) input_arr = array_ops.ones((4, 3)) - # Run the layer, and use the keras backend learing phase + # Run the layer, and use the keras backend learning phase keras.backend.set_learning_phase(0) self.assertAllEqual(input_arr, loaded(input_arr)) keras.backend.set_learning_phase(1) diff --git a/tensorflow/python/keras/saving/saved_model/serialized_attributes.py b/tensorflow/python/keras/saving/saved_model/serialized_attributes.py index 2153d4838e7..06c643945e7 100644 --- a/tensorflow/python/keras/saving/saved_model/serialized_attributes.py +++ b/tensorflow/python/keras/saving/saved_model/serialized_attributes.py @@ -114,7 +114,7 @@ class SerializedAttributes(object): checkpointable_objects: List of checkpointable objects to be serialized in the SavedModel. functions: List of functions to be serialized in the SavedModel. - copy_from: List of other SerializedAttributes subclasses. The returend + copy_from: List of other SerializedAttributes subclasses. The returned class will copy checkpoint objects/functions from each subclass. Returns: @@ -219,7 +219,7 @@ class CommonEndpoints(SerializedAttributes.with_attributes( variables: List of all variables in the model and its sublayers. trainable_variables: List of all trainable variables in the model and its sublayers. - regulariation_losses: List of all unconditional losses (losses not dependent + regularization_losses: List of all unconditional losses (losses not dependent on the inputs) in the model and its sublayers. __call__: Function that takes inputs and returns the outputs of the model call function. diff --git a/tensorflow/python/keras/tests/integration_test.py b/tensorflow/python/keras/tests/integration_test.py index 090eba8bd74..321fa205857 100644 --- a/tensorflow/python/keras/tests/integration_test.py +++ b/tensorflow/python/keras/tests/integration_test.py @@ -301,7 +301,7 @@ class ActivationV2IntegrationTest(keras_parameterized.TestCase): """Tests activation function V2 in model exporting and loading. This test is to verify in TF 2.x, when 'tf.nn.softmax' is used as an - activition function, its model exporting and loading work as expected. + activation function, its model exporting and loading work as expected. Check b/123041942 for details. """ diff --git a/tensorflow/python/keras/utils/conv_utils.py b/tensorflow/python/keras/utils/conv_utils.py index 74862102184..f38fdc18252 100644 --- a/tensorflow/python/keras/utils/conv_utils.py +++ b/tensorflow/python/keras/utils/conv_utils.py @@ -381,7 +381,7 @@ def conv_kernel_idxs(input_shape, kernel_shape, strides, padding, filters_in, elif data_format == 'channels_last': concat_idxs = lambda spatial_idx, filter_idx: spatial_idx + (filter_idx,) else: - raise ValueError('Data format %s not recignized.' + raise ValueError('Data format %s not recognized.' '`data_format` must be "channels_first" or ' '"channels_last".' % data_format) diff --git a/tensorflow/python/keras/utils/io_utils_test.py b/tensorflow/python/keras/utils/io_utils_test.py index f193718a33b..a8ba8835f63 100644 --- a/tensorflow/python/keras/utils/io_utils_test.py +++ b/tensorflow/python/keras/utils/io_utils_test.py @@ -91,7 +91,7 @@ class TestIOUtils(keras_parameterized.TestCase): # Note: you have to use shuffle='batch' or False with HDF5Matrix model.fit(x_train, y_train, batch_size=32, shuffle='batch', verbose=False) - # test that evalutation and prediction + # test that evaluation and prediction # don't crash and return reasonable results out_pred = model.predict(x_test, batch_size=32, verbose=False) out_eval = model.evaluate(x_test, y_test, batch_size=32, verbose=False) diff --git a/tensorflow/python/keras/utils/metrics_utils.py b/tensorflow/python/keras/utils/metrics_utils.py index f9676b48b75..f3ab32cd92c 100644 --- a/tensorflow/python/keras/utils/metrics_utils.py +++ b/tensorflow/python/keras/utils/metrics_utils.py @@ -507,7 +507,7 @@ def ragged_assert_compatible_and_get_flat_values(values, mask=None): values = [values] to_be_stripped = True - # NOTE: we leave the flat_values compatiblity to + # NOTE: we leave the flat_values compatibility to # tf.TensorShape `assert_is_compatible_with` # check if both dynamic dimensions are equal and then use the flat_values. nested_row_split_list = [rt.nested_row_splits for rt in values] diff --git a/tensorflow/python/kernel_tests/bias_op_deterministic_test.py b/tensorflow/python/kernel_tests/bias_op_deterministic_test.py index 738e6cf6fc6..766969c60d3 100644 --- a/tensorflow/python/kernel_tests/bias_op_deterministic_test.py +++ b/tensorflow/python/kernel_tests/bias_op_deterministic_test.py @@ -97,7 +97,7 @@ class BiasAddDeterministicTest(bias_op_base.BiasAddTestBase, data_type, shape=out_shape, name='upstream_gradients') gradient_injector_op = bias_add_op * upstream_gradients # The gradient function behaves as if grad_ys is multiplied by the op - # gradient result, not passing the upstram gradients through the op's + # gradient result, not passing the upstream gradients through the op's # gradient generation graph. This is the reason for using the # gradient_injector_op grad_ys = None diff --git a/tensorflow/python/kernel_tests/clip_ops_test.py b/tensorflow/python/kernel_tests/clip_ops_test.py index 93a3c384942..433ec689375 100644 --- a/tensorflow/python/kernel_tests/clip_ops_test.py +++ b/tensorflow/python/kernel_tests/clip_ops_test.py @@ -156,8 +156,8 @@ class ClipTest(test.TestCase): indices = constant_op.constant(indices) shape = constant_op.constant(shape) # IndexedSlices mode - indixed_slices = ops.IndexedSlices(values, indices, shape) - clipped = clip_ops.clip_by_value(indixed_slices, clip_value_min, + indexed_slices = ops.IndexedSlices(values, indices, shape) + clipped = clip_ops.clip_by_value(indexed_slices, clip_value_min, clip_value_max) # clipped should be IndexedSlices self.assertIsInstance(clipped, ops.IndexedSlices) diff --git a/tensorflow/python/kernel_tests/critical_section_test.py b/tensorflow/python/kernel_tests/critical_section_test.py index ecd70b1c29f..55c1219580a 100644 --- a/tensorflow/python/kernel_tests/critical_section_test.py +++ b/tensorflow/python/kernel_tests/critical_section_test.py @@ -347,7 +347,7 @@ class CriticalSectionTest(test.TestCase, parameterized.TestCase): # It's OK if neither requests exclusive resource access. cs1.execute(lambda: v2 + 1, exclusive_resource_access=False) - # It's not OK if the second request requires exlusive resource + # It's not OK if the second request requires exclusive resource # access. with self.assertRaisesRegexp( ValueError, "requested exclusive resource access"): diff --git a/tensorflow/python/kernel_tests/distributions/special_math_test.py b/tensorflow/python/kernel_tests/distributions/special_math_test.py index 14f8280c371..ce2cd8614f5 100644 --- a/tensorflow/python/kernel_tests/distributions/special_math_test.py +++ b/tensorflow/python/kernel_tests/distributions/special_math_test.py @@ -132,7 +132,7 @@ class NdtriTest(test.TestCase): 1. - np.exp(-32.), 1., ]).astype(dtype)) - # Not having the lambda sanitzer means we'd get an `IndexError` whenever + # Not having the lambda sanitizer means we'd get an `IndexError` whenever # the user supplied function has default args. _, grads = _value_and_gradient( lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda @@ -276,7 +276,7 @@ class NdtrGradientTest(test.TestCase): x = constant_op.constant([-100., 0., 100.], dtype=dtype) output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x)) fn = sm.log_ndtr if self._use_log else sm.ndtr - # Not having the lambda sanitzer means we'd get an `IndexError` whenever + # Not having the lambda sanitizer means we'd get an `IndexError` whenever # the user supplied function has default args. output, grad_output = _value_and_gradient( lambda x_: fn(x_), x) # pylint: disable=unnecessary-lambda diff --git a/tensorflow/python/kernel_tests/eig_op_test.py b/tensorflow/python/kernel_tests/eig_op_test.py index ffc61b7bcfe..ce7b5ed9d57 100644 --- a/tensorflow/python/kernel_tests/eig_op_test.py +++ b/tensorflow/python/kernel_tests/eig_op_test.py @@ -108,7 +108,7 @@ def EquilibrateEigenVectorPhases(x, y): """Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`. Eigenvectors are only unique up to an arbitrary phase. This function rotates x - such that it matches y. Precondition: The coluns of x and y differ by a + such that it matches y. Precondition: The columns of x and y differ by a multiplicative complex phase factor only. Args: diff --git a/tensorflow/python/kernel_tests/einsum_op_test.py b/tensorflow/python/kernel_tests/einsum_op_test.py index 6065038ebe6..26cfb6abbb8 100644 --- a/tensorflow/python/kernel_tests/einsum_op_test.py +++ b/tensorflow/python/kernel_tests/einsum_op_test.py @@ -254,7 +254,7 @@ class EinsumOpTest(test.TestCase): output = self.evaluate(gen_linalg_ops.einsum(inputs, equation)) self.assertAllClose(output, np.zeros(output_shape), atol=1e-4, rtol=1e-4) - # Contractions along zero-sized dimensons. + # Contractions along zero-sized dimensions. check('ab,bc->ac', [(0, 10), (10, 10)], (0, 10)) # From transformer xl. check('ibnd,ijbn->jnd', [(1, 0, 5, 10), (1, 1, 0, 5)], (1, 5, 10)) diff --git a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py index fa886cc215a..2b1e30a8bbd 100644 --- a/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py +++ b/tensorflow/python/kernel_tests/fractional_max_pool_op_test.py @@ -146,7 +146,7 @@ class FractionalMaxPoolTest(test.TestCase): Since _GetExpectedFractionalMaxPoolResult is 'automated', it feel safer to have a test case that you can see what's happening. This test will generate a small, random, int 2D matrix, and feed it to - FractinalMaxPool and _GetExpectedFractionalMaxPoolResult. + FractionalMaxPool and _GetExpectedFractionalMaxPoolResult. """ num_rows = 6 num_cols = 6 @@ -341,7 +341,7 @@ class FractionalMaxPoolGradTest(test.TestCase): _SEED = 123456 def _GenerateUniqueRandomInputTensor(self, shape): - """Generate 'unqiue' random input tensor. + """Generate 'unique' random input tensor. 'Unique' means there's no collision values in the tensor, all elements are different. This is done by generating sequence of integers with step of 1 diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py index 8f96b112360..59f5fa20024 100644 --- a/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py +++ b/tensorflow/python/kernel_tests/linalg/linear_operator_diag_test.py @@ -92,7 +92,7 @@ class LinearOperatorDiagTest( self.evaluate(linalg.LinearOperatorDiag(diag).assert_positive_definite()) def test_assert_non_singular_raises_if_zero_eigenvalue(self): - # Singlular matrix with one positive eigenvalue and one zero eigenvalue. + # Singular matrix with one positive eigenvalue and one zero eigenvalue. with self.cached_session(): diag = [1.0, 0.0] operator = linalg.LinearOperatorDiag(diag, is_self_adjoint=True) diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py index a276c0887c3..7e01626e1db 100644 --- a/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py +++ b/tensorflow/python/kernel_tests/linalg/linear_operator_full_matrix_test.py @@ -130,7 +130,7 @@ class SquareLinearOperatorFullMatrixSymmetricPositiveDefiniteTest( def setUp(self): # Increase from 1e-6 to 1e-5. This reduction in tolerance happens, # presumably, because we are taking a different code path in the operator - # and the matrix. The operator uses a Choleksy, the matrix uses standard + # and the matrix. The operator uses a Cholesky, the matrix uses standard # solve. self._atol[dtypes.float32] = 1e-5 self._rtol[dtypes.float32] = 1e-5 diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py index fcff2697ac7..d27ab7d6ba5 100644 --- a/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py +++ b/tensorflow/python/kernel_tests/linalg/linear_operator_low_rank_update_test.py @@ -270,7 +270,7 @@ class LinearOperatorLowRankUpdatetestWithDiagNotSquare( _use_v = True -class LinearOpearatorLowRankUpdateBroadcastsShape(test.TestCase): +class LinearOperatorLowRankUpdateBroadcastsShape(test.TestCase): """Test that the operator's shape is the broadcast of arguments.""" def test_static_shape_broadcasts_up_from_operator_to_other_args(self): diff --git a/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py b/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py index 15f41105232..22a7aa798b5 100644 --- a/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py +++ b/tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py @@ -66,7 +66,7 @@ class LinearOperatorLowerTriangularTest( return operator, matrix def test_assert_non_singular(self): - # Singlular matrix with one positive eigenvalue and one zero eigenvalue. + # Singular matrix with one positive eigenvalue and one zero eigenvalue. with self.cached_session(): tril = [[1., 0.], [1., 0.]] operator = linalg.LinearOperatorLowerTriangular(tril) diff --git a/tensorflow/python/kernel_tests/linalg_grad_test.py b/tensorflow/python/kernel_tests/linalg_grad_test.py index 64ecd491b3c..3e0676b0746 100644 --- a/tensorflow/python/kernel_tests/linalg_grad_test.py +++ b/tensorflow/python/kernel_tests/linalg_grad_test.py @@ -165,8 +165,8 @@ if __name__ == '__main__': test_lib.is_built_with_rocm(): # Skip this one particular subtest on the ROCm platform # It will fail because of 1 element in 10,000 mismatch, - # and the mismatch is minor (tolerance is 0.20, mismtach is 0,22) - # TODO(rocm) : investigate cause of mistmach and fix + # and the mismatch is minor (tolerance is 0.20, mismatch is 0,22) + # TODO(rocm) : investigate cause of mismatch and fix continue _AddTest(MatrixBinaryFunctorGradientTest, 'MatrixTriangularSolveGradient', name, diff --git a/tensorflow/python/kernel_tests/metrics_test.py b/tensorflow/python/kernel_tests/metrics_test.py index 2cf6e629339..5f7db2764cc 100644 --- a/tensorflow/python/kernel_tests/metrics_test.py +++ b/tensorflow/python/kernel_tests/metrics_test.py @@ -3773,7 +3773,7 @@ class MeanIOUTest(test.TestCase): @test_util.run_deprecated_v1 def testMultipleUpdatesWithMissingClass(self): - # Test the case where there are no predicions and labels for + # Test the case where there are no predictions and labels for # one class, and thus there is one row and one column with # zero entries in the confusion matrix. num_classes = 3 @@ -4094,7 +4094,7 @@ class MeanPerClassAccuracyTest(test.TestCase): @test_util.run_deprecated_v1 def testMultipleUpdatesWithMissingClass(self): - # Test the case where there are no predicions and labels for + # Test the case where there are no predictions and labels for # one class, and thus there is one row and one column with # zero entries in the confusion matrix. num_classes = 3 diff --git a/tensorflow/python/kernel_tests/numerics_test.py b/tensorflow/python/kernel_tests/numerics_test.py index 8a4cdff96c2..4c77028d8fd 100644 --- a/tensorflow/python/kernel_tests/numerics_test.py +++ b/tensorflow/python/kernel_tests/numerics_test.py @@ -130,7 +130,7 @@ class NumericsTest(test.TestCase): r"or `tf.while_loop\(\)`\."): numerics.add_check_numerics_ops() - def testCheckNumericsV2OpNegativeAndPositveInf(self): + def testCheckNumericsV2OpNegativeAndPositiveInf(self): """Test that CheckNumericsV2 op distinguishes negative and positive infs.""" with self.session(graph=ops.Graph()): t1 = constant_op.constant([-1.0, 1.0]) @@ -145,7 +145,7 @@ class NumericsTest(test.TestCase): self.assertIn("had -Inf and +Inf values", caught.message) self.assertIn("pass through test", caught.message) - def testCheckNumericsV2OpNegativeAndPositveInfAndNaN(self): + def testCheckNumericsV2OpNegativeAndPositiveInfAndNaN(self): """CheckNumericsV2 op distinguishes - & + infs when nan is present.""" with self.session(graph=ops.Graph()): t1 = constant_op.constant([-1.0, 1.0, 0.0]) @@ -160,7 +160,7 @@ class NumericsTest(test.TestCase): self.assertIn("had -Inf, +Inf, and NaN values", caught.message) self.assertIn("pass through test", caught.message) - def testCheckNumericsV2PositveInfAndNaN(self): + def testCheckNumericsV2PositiveInfAndNaN(self): """Test that CheckNumericsV2 op shows sign of inf when nan is present.""" with self.session(graph=ops.Graph()): t1 = constant_op.constant([0.0, 1.0]) diff --git a/tensorflow/python/kernel_tests/pooling_ops_test.py b/tensorflow/python/kernel_tests/pooling_ops_test.py index d369f8c6cf1..2e47c50acef 100644 --- a/tensorflow/python/kernel_tests/pooling_ops_test.py +++ b/tensorflow/python/kernel_tests/pooling_ops_test.py @@ -1465,7 +1465,7 @@ class PoolingTest(test.TestCase): # The functionality associated with TF_ENABLE_NANPROP is currently # not supported on the ROCm platform, so skip this part of the test # NANs in input lead to non-deterministic results, and hence skipping - # the remaining tests altogeher on the ROCm platform + # the remaining tests altogether on the ROCm platform if test.is_built_with_rocm(): return @@ -1552,7 +1552,7 @@ class PoolingTest(test.TestCase): # The functionality associated with TF_ENABLE_NANPROP is currently # not supported on the ROCm platform, so skip this part of the test # NANs in input lead to non-deterministic results, and hence skipping - # the remaining tests altogeher on the ROCm platform + # the remaining tests altogether on the ROCm platform if test.is_built_with_rocm(): return diff --git a/tensorflow/python/kernel_tests/py_func_test.py b/tensorflow/python/kernel_tests/py_func_test.py index 5383410f999..5365e9a490e 100644 --- a/tensorflow/python/kernel_tests/py_func_test.py +++ b/tensorflow/python/kernel_tests/py_func_test.py @@ -740,7 +740,7 @@ class EagerPyFuncTest(PyFuncTestBase): self.assertEqual(dy_dx, 2.0) @test_util.run_v1_only("b/120545219") - def testEagerRespectsDevicePlacmentOfOp(self): + def testEagerRespectsDevicePlacementOfOp(self): def f(x): return math_ops.square(x) diff --git a/tensorflow/python/kernel_tests/rnn_test.py b/tensorflow/python/kernel_tests/rnn_test.py index 7b35d7338b1..dc45599777b 100644 --- a/tensorflow/python/kernel_tests/rnn_test.py +++ b/tensorflow/python/kernel_tests/rnn_test.py @@ -649,7 +649,7 @@ class RNNTest(test.TestCase): save.restore(sess, save_path) self.assertAllEqual([10.] * 4, self.evaluate(lstm_cell._bias)) - # TODO(scottzhu): Look into updating for V2 Intializers. + # TODO(scottzhu): Look into updating for V2 Initializers. @test_util.run_deprecated_v1 def testRNNCellSerialization(self): for cell in [ diff --git a/tensorflow/python/kernel_tests/scatter_nd_ops_test.py b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py index e1878dc1090..ed66a4f75ab 100644 --- a/tensorflow/python/kernel_tests/scatter_nd_ops_test.py +++ b/tensorflow/python/kernel_tests/scatter_nd_ops_test.py @@ -657,7 +657,7 @@ class ScatterNdTest(test.TestCase): self.assertAllEqual(expected_input_grad, self.evaluate(input_grad)) @test_util.run_deprecated_v1 - def testScatterNdRepatedIndicesAdd(self): + def testScatterNdRepeatedIndicesAdd(self): indices = array_ops.zeros([100000, 1], dtypes.int32) values = np.random.randn(100000) shape = [1] diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py index b0208ce1e80..98a4c0c10ae 100644 --- a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py +++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py @@ -66,7 +66,7 @@ class SegmentReductionHelper(test.TestCase): output[index] = op1(output[index], x_flat[i]) else: output[index] = x_flat[i] - # zero initialize values that are still uncalcuated. + # zero initialize values that are still uncalculated. initial_value_slice = np.ones(slice_shape) * initial_value output = [o if o is not None else initial_value_slice for o in output] if op2 is not None: diff --git a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py index 0ada446e84b..53677c15c84 100644 --- a/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py +++ b/tensorflow/python/kernel_tests/self_adjoint_eig_op_test.py @@ -104,7 +104,7 @@ def EquilibrateEigenVectorPhases(x, y): """Equilibrate the phase of the Eigenvectors in the columns of `x` and `y`. Eigenvectors are only unique up to an arbitrary phase. This function rotates x - such that it matches y. Precondition: The coluns of x and y differ by a + such that it matches y. Precondition: The columns of x and y differ by a multiplicative complex phase factor only. Args: diff --git a/tensorflow/python/kernel_tests/svd_op_test.py b/tensorflow/python/kernel_tests/svd_op_test.py index bbcab12a163..e64d0ee8139 100644 --- a/tensorflow/python/kernel_tests/svd_op_test.py +++ b/tensorflow/python/kernel_tests/svd_op_test.py @@ -405,7 +405,7 @@ if __name__ == "__main__": full_matrices) _AddTest(SvdGradOpTest, "SvdGrad", name, _GetSvdGradOpTest(dtype, shape, compute_uv, full_matrices)) - # The results are too inacurate for float32. + # The results are too inaccurate for float32. if dtype in (np.float64, np.complex128): _AddTest( SvdGradGradOpTest, "SvdGradGrad", name, diff --git a/tensorflow/python/kernel_tests/template_test.py b/tensorflow/python/kernel_tests/template_test.py index ab7eb4a5cad..d7ceaca2155 100644 --- a/tensorflow/python/kernel_tests/template_test.py +++ b/tensorflow/python/kernel_tests/template_test.py @@ -204,7 +204,7 @@ class TemplateTest(test.TestCase): with context.eager_mode(): with self.assertRaisesRegexp( ValueError, - "unique_name_ cannot be used when eager exeuction is enabled."): + "unique_name_ cannot be used when eager execution is enabled."): template.make_template( "_", variable_scoped_function, unique_name_="s1") diff --git a/tensorflow/python/kernel_tests/while_v2_test.py b/tensorflow/python/kernel_tests/while_v2_test.py index d2e87b77301..15a389294e5 100644 --- a/tensorflow/python/kernel_tests/while_v2_test.py +++ b/tensorflow/python/kernel_tests/while_v2_test.py @@ -715,7 +715,7 @@ class WhileV2Test(test.TestCase, parameterized.TestCase): # Skip over Identity. while_op = r.op.inputs[0].op # We can't directly use while_op.inputs.index() because Tensors are not - # hashshable. + # hashable. index = GetInputIndex(while_op, v) self._assertNotAccumulated(while_op, index) diff --git a/tensorflow/python/layers/base_test.py b/tensorflow/python/layers/base_test.py index 4a426bebf5c..321a1854819 100644 --- a/tensorflow/python/layers/base_test.py +++ b/tensorflow/python/layers/base_test.py @@ -161,7 +161,7 @@ class BaseLayerTest(test.TestCase): trainable=True) @test_util.run_deprecated_v1 - def testReusePartitionedVaraiblesAndRegularizers(self): + def testReusePartitionedVariablesAndRegularizers(self): regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3 partitioner = partitioned_variables.fixed_size_partitioner(3) for reuse in [False, True]: diff --git a/tensorflow/python/layers/utils.py b/tensorflow/python/layers/utils.py index af76ac50d61..cd431c5914d 100644 --- a/tensorflow/python/layers/utils.py +++ b/tensorflow/python/layers/utils.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================= -"""Contains layer utilies for input validation and format conversion. +"""Contains layer utilities for input validation and format conversion. """ from __future__ import absolute_import from __future__ import division @@ -214,7 +214,7 @@ def constant_value(pred): Raises: TypeError: If `pred` is not a Variable, Tensor or bool, or Python - interger 1 or 0. + integer 1 or 0. """ # Allow integer booleans. if isinstance(pred, int): diff --git a/tensorflow/python/lib/core/py_func.cc b/tensorflow/python/lib/core/py_func.cc index 59c68f5983d..281e9d61a4e 100644 --- a/tensorflow/python/lib/core/py_func.cc +++ b/tensorflow/python/lib/core/py_func.cc @@ -18,7 +18,7 @@ limitations under the License. #include // clang-format: off -// Must be inlcluded first. +// Must be included first. #include "tensorflow/python/lib/core/numpy.h" // clang-format: on diff --git a/tensorflow/python/lib/core/pybind11_lib.h b/tensorflow/python/lib/core/pybind11_lib.h index 080f50f30bd..93cae530337 100644 --- a/tensorflow/python/lib/core/pybind11_lib.h +++ b/tensorflow/python/lib/core/pybind11_lib.h @@ -45,7 +45,7 @@ inline py::object pyo(PyObject* ptr) { return py::reinterpret_steal(ptr); } -// Raise an exception if the PyErrOcurred flag is set or else return the Python +// Raise an exception if the PyErrOccurred flag is set or else return the Python // object. inline py::object pyo_or_throw(PyObject* ptr) { diff --git a/tensorflow/python/ops/array_grad.py b/tensorflow/python/ops/array_grad.py index 4147ef207d1..d3b2f95d31b 100644 --- a/tensorflow/python/ops/array_grad.py +++ b/tensorflow/python/ops/array_grad.py @@ -59,7 +59,7 @@ def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index): each output of the op. start_value_index: An integer index of the first value in the op.inputs. end_value_index: An integer index of the last value in the op.inputs. - dim_index: An interger index of concat_dim or axis parameter in op.inputs. + dim_index: An integer index of concat_dim or axis parameter in op.inputs. Returns: Tensors representing the partial gradients with respect to each input @@ -651,7 +651,7 @@ def _GatherV2Grad(op, grad): params_grad = ops.IndexedSlices(values, indices, params_shape) else: # Handle axis by transposing the axis dimension to be the first non-batch - # dimension, compute the gradiend and transpose the result back. + # dimension, compute the gradient and transpose the result back. outer_shape = params_shape[:axis] inner_shape = params_shape[axis:][1:] values_shape = array_ops.concat([outer_shape, [-1], inner_shape], 0) diff --git a/tensorflow/python/ops/clip_ops.py b/tensorflow/python/ops/clip_ops.py index d316d6df6d7..36b5ec76a7d 100644 --- a/tensorflow/python/ops/clip_ops.py +++ b/tensorflow/python/ops/clip_ops.py @@ -120,12 +120,12 @@ def clip_by_value(t, clip_value_min, clip_value_max, t_max = ops.IndexedSlices(t_max, t.indices, t.dense_shape) return t_max - # TODO(scottzhu): switch to use new implmentation in 2 weeks. + # TODO(scottzhu): switch to use new implementation in 2 weeks. # return gen_math_ops.clip_by_value( # t, clip_value_min, clip_value_max, name=name) -# TODO(scottzhu): switch to use new implmentation in 2 weeks. +# TODO(scottzhu): switch to use new implementation in 2 weeks. # @ops.RegisterGradient("ClipByValue") def _clip_by_value_grad(op, grad): """Returns grad of clip_by_value.""" diff --git a/tensorflow/python/ops/clip_ops_test.py b/tensorflow/python/ops/clip_ops_test.py index a59a0c22d40..2d487b06291 100644 --- a/tensorflow/python/ops/clip_ops_test.py +++ b/tensorflow/python/ops/clip_ops_test.py @@ -46,14 +46,14 @@ class ClipOpsTest(test.TestCase): indices = constant_op.constant(indices) shape = constant_op.constant(shape) # IndexedSlices mode - indixed_slices = ops.IndexedSlices(values, indices, shape) - clipped = clip_ops.clip_by_norm(indixed_slices, max_norm, axes) + indexed_slices = ops.IndexedSlices(values, indices, shape) + clipped = clip_ops.clip_by_norm(indexed_slices, max_norm, axes) # clipped should be IndexedSlices self.assertIsInstance(clipped, ops.IndexedSlices) clipped = ops.convert_to_tensor(clipped) # Tensor mode - dense_tensor = ops.convert_to_tensor(indixed_slices) + dense_tensor = ops.convert_to_tensor(indexed_slices) dense_clipped = clip_ops.clip_by_norm(dense_tensor, max_norm, axes) result, expected = self.evaluate([clipped, dense_clipped]) self.assertAllClose(result, expected) diff --git a/tensorflow/python/ops/collective_ops.py b/tensorflow/python/ops/collective_ops.py index 61c74c6b07c..f34d6631783 100644 --- a/tensorflow/python/ops/collective_ops.py +++ b/tensorflow/python/ops/collective_ops.py @@ -129,7 +129,7 @@ def broadcast_send(t, shape, dtype, group_size, group_key, instance_key, 'Parameter group_size to broadcast_send must be at least 2.') if t.shape != shape: raise ValueError( - 'Shape of broadcast_send tensor not equal to delcared shape') + 'Shape of broadcast_send tensor not equal to declared shape') if t.dtype != dtype: raise ValueError( 'Type of broadcast_send tensor not equal to declared type') diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py index 84603012261..989b3a4c338 100644 --- a/tensorflow/python/ops/control_flow_ops.py +++ b/tensorflow/python/ops/control_flow_ops.py @@ -573,7 +573,7 @@ def _EnforceShapeInvariant(merge_var, next_var): Raises: ValueError: If any tensor in `merge_var` has a more specific shape than - its correspnding tensor in `next_var`. + its corresponding tensor in `next_var`. """ if isinstance(merge_var, ops.Tensor): m_shape = merge_var.get_shape() diff --git a/tensorflow/python/ops/control_flow_util.py b/tensorflow/python/ops/control_flow_util.py index 0f984189aef..ba03c6304ed 100644 --- a/tensorflow/python/ops/control_flow_util.py +++ b/tensorflow/python/ops/control_flow_util.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================== -"""Utilty functions for control flow. +"""Utility functions for control flow. This file is necessary to avoid cyclic dependencies between ops.py and control_flow_ops.py. diff --git a/tensorflow/python/ops/control_flow_util_v2.py b/tensorflow/python/ops/control_flow_util_v2.py index 195a8694241..db03bd3e573 100644 --- a/tensorflow/python/ops/control_flow_util_v2.py +++ b/tensorflow/python/ops/control_flow_util_v2.py @@ -13,7 +13,7 @@ # limitations under the License. # ============================================================================== -"""Utilties for V2 control flow.""" +"""Utilities for V2 control flow.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/python/ops/control_flow_v2_toggles.py b/tensorflow/python/ops/control_flow_v2_toggles.py index 9bae4e37ed7..08e306a65aa 100644 --- a/tensorflow/python/ops/control_flow_v2_toggles.py +++ b/tensorflow/python/ops/control_flow_v2_toggles.py @@ -79,7 +79,7 @@ def output_all_intermediates(state): # pylint: disable=invalid-name to output the needed intermediates. We work around this by proactively outputting the needed intermediates when building the forward pass itself. Ideally any such extra tensors should be pruned out at runtime. However, if - for any reason this doesn't work for you or if you have an infernce-only model + for any reason this doesn't work for you or if you have an inference-only model you can turn this behavior off using `tf.compat.v1.experimental.output_all_intermediates(False)`. diff --git a/tensorflow/python/ops/ctc_ops.py b/tensorflow/python/ops/ctc_ops.py index dc4bec09df7..d0298fd8b6d 100644 --- a/tensorflow/python/ops/ctc_ops.py +++ b/tensorflow/python/ops/ctc_ops.py @@ -937,7 +937,7 @@ def ctc_loss_dense(labels, The dense implementation supports both CPU, GPU and TPU. A fast path is provided that significantly improves memory use for large vocabulary if the caller preprocesses label sequences to get unique label indices on the CPU - (eg. in the data input pipeline) using ctc_ops.unique and simplies this in + (eg. in the data input pipeline) using ctc_ops.unique and simplifies this in the optional "unique" kwarg. This is especially useful for TPU and GPU but also works with if used on CPU. diff --git a/tensorflow/python/ops/data_flow_ops.py b/tensorflow/python/ops/data_flow_ops.py index 303e02603df..94491b038fb 100644 --- a/tensorflow/python/ops/data_flow_ops.py +++ b/tensorflow/python/ops/data_flow_ops.py @@ -1699,7 +1699,7 @@ class BaseStagingArea(object): Returns: A (tensors, indices) tuple where `tensors` is a list of `Tensor` objects - and `indices` is a list of indices associed with the tensors. + and `indices` is a list of indices associated with the tensors. Raises: ValueError: If `vals` or `indices` is invalid. diff --git a/tensorflow/python/ops/distributions/bijector_impl.py b/tensorflow/python/ops/distributions/bijector_impl.py index fa78b2605d8..b49536ec350 100644 --- a/tensorflow/python/ops/distributions/bijector_impl.py +++ b/tensorflow/python/ops/distributions/bijector_impl.py @@ -442,7 +442,7 @@ class Bijector(object): Non injective maps `g` are supported, provided their domain `D` can be partitioned into `k` disjoint subsets, `Union{D1, ..., Dk}`, such that, ignoring sets of measure zero, the restriction of `g` to each subset is a - differentiable bijection onto `g(D)`. In particular, this imples that for + differentiable bijection onto `g(D)`. In particular, this implies that for `y in g(D)`, the set inverse, i.e. `g^{-1}(y) = {x in D : g(x) = y}`, always contains exactly `k` distinct points. diff --git a/tensorflow/python/ops/distributions/distribution.py b/tensorflow/python/ops/distributions/distribution.py index d551830fb84..26ab35a5263 100644 --- a/tensorflow/python/ops/distributions/distribution.py +++ b/tensorflow/python/ops/distributions/distribution.py @@ -242,12 +242,12 @@ class ReparameterizationType(object): self._rep_type = rep_type def __repr__(self): - return "" % self._rep_type + return "" % self._rep_type def __eq__(self, other): """Determine if this `ReparameterizationType` is equal to another. - Since RepaparameterizationType instances are constant static global + Since ReparameterizationType instances are constant static global instances, equality checks if two instances' id() values are equal. Args: diff --git a/tensorflow/python/ops/distributions/special_math.py b/tensorflow/python/ops/distributions/special_math.py index c529fb45d43..2c5647c59c5 100644 --- a/tensorflow/python/ops/distributions/special_math.py +++ b/tensorflow/python/ops/distributions/special_math.py @@ -479,7 +479,7 @@ def log_cdf_laplace(x, name="log_cdf_laplace"): # exp{-x} --> inf, for x << -1 safe_exp_neg_x = math_ops.exp(-math_ops.abs(x)) - # log1p(z) = log(1 + z) approx z for |z| << 1. This approxmation is used + # log1p(z) = log(1 + z) approx z for |z| << 1. This approximation is used # internally by log1p, rather than being done explicitly here. upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x) diff --git a/tensorflow/python/ops/embedding_ops.py b/tensorflow/python/ops/embedding_ops.py index 7af8b2dc32a..731599dad05 100644 --- a/tensorflow/python/ops/embedding_ops.py +++ b/tensorflow/python/ops/embedding_ops.py @@ -137,7 +137,7 @@ def _embedding_lookup_and_transform(params, array_ops.gather(params[0], ids, name=name), ids, max_norm) if transform_fn: result = transform_fn(result) - # Make sure the final result does not have colocation contraints on the + # Make sure the final result does not have colocation constraints on the # params. Similar to the case np > 1 where parallel_dynamic_stitch is # outside the scioe of all with ops.colocate_with(params[p]). return array_ops.identity(result) @@ -208,7 +208,7 @@ def _embedding_lookup_and_transform(params, if transform_fn: # If transform_fn is provided, the clip_by_norm precedes # the transform and hence must be co-located. See below - # for the counterpart if transform_fn is not proveded. + # for the counterpart if transform_fn is not provided. result = transform_fn(_clip(result, pids, max_norm)) partitioned_result.append(result) # Stitch these back together diff --git a/tensorflow/python/ops/image_grad.py b/tensorflow/python/ops/image_grad.py index 23b7e81ec75..8deac46aa06 100644 --- a/tensorflow/python/ops/image_grad.py +++ b/tensorflow/python/ops/image_grad.py @@ -248,7 +248,7 @@ def _RGBToHSVGrad(op, grad): # dh_dr_1 -> # if red was MAX, then derivative = 60 * -1 * (G-B)/square(MAX-MIN) == 60 *\ # -1 * (greens-blues) * reciprocal(square(saturation)) * \ - # reciprical(square(value)) + # reciprocal(square(value)) # elif green was MAX, there are two subcases # ie when red was MIN and when red was NOT MIN # dh_dr_2 -> diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py index 3e2f7c814e6..94c3dc278b8 100644 --- a/tensorflow/python/ops/image_ops_impl.py +++ b/tensorflow/python/ops/image_ops_impl.py @@ -3547,7 +3547,7 @@ def _ssim_per_channel(img1, # TODO(sjhwang): Try FFT. # TODO(sjhwang): Gaussian kernel is separable in space. Consider applying - # 1-by-n and n-by-1 Gaussain filters instead of an n-by-n filter. + # 1-by-n and n-by-1 Gaussian filters instead of an n-by-n filter. def reducer(x): shape = array_ops.shape(x) x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0)) diff --git a/tensorflow/python/ops/linalg/linear_operator_circulant.py b/tensorflow/python/ops/linalg/linear_operator_circulant.py index 4c57271f6af..ace276900fc 100644 --- a/tensorflow/python/ops/linalg/linear_operator_circulant.py +++ b/tensorflow/python/ops/linalg/linear_operator_circulant.py @@ -152,7 +152,7 @@ class _BaseLinearOperatorCirculant(linear_operator.LinearOperator): |z y x w| ``` - `block_depth = 2` means `A` is block symmetric circulant with symemtric + `block_depth = 2` means `A` is block symmetric circulant with symmetric circulant blocks. For example, with `W`, `X`, `Y`, `Z` symmetric circulant, ``` diff --git a/tensorflow/python/ops/linalg/linear_operator_test_util.py b/tensorflow/python/ops/linalg/linear_operator_test_util.py index cbdbe5b3eee..d49448f5cd8 100644 --- a/tensorflow/python/ops/linalg/linear_operator_test_util.py +++ b/tensorflow/python/ops/linalg/linear_operator_test_util.py @@ -1009,7 +1009,7 @@ def random_normal_correlated_columns(shape, If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries. - If `M >= N`, then the colums of `A` will be made almost dependent as follows: + If `M >= N`, then the columns of `A` will be made almost dependent as follows: ``` L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1) diff --git a/tensorflow/python/ops/linalg/linear_operator_util.py b/tensorflow/python/ops/linalg/linear_operator_util.py index 762b775db61..e176d140b8a 100644 --- a/tensorflow/python/ops/linalg/linear_operator_util.py +++ b/tensorflow/python/ops/linalg/linear_operator_util.py @@ -301,7 +301,7 @@ def broadcast_matrix_batch_dims(batch_matrices, name=None): name: A string name to prepend to created ops. Returns: - bcast_matrices: List of `Tensor`s, with `bcast_matricies[i]` containing + bcast_matrices: List of `Tensor`s, with `bcast_matrices[i]` containing the values from `batch_matrices[i]`, with possibly broadcast batch dims. Raises: diff --git a/tensorflow/python/ops/linalg_grad.py b/tensorflow/python/ops/linalg_grad.py index 94ef2a9bff4..f456581ef60 100644 --- a/tensorflow/python/ops/linalg_grad.py +++ b/tensorflow/python/ops/linalg_grad.py @@ -745,7 +745,7 @@ def _SvdGrad(op, grad_s, grad_u, grad_v): # only defined up a (k-dimensional) subspace. In practice, this can # lead to numerical instability when singular values are close but not # exactly equal. - # To avoid nan in cases with degenrate sigular values or zero sigular values + # To avoid nan in cases with degenerate sigular values or zero singular values # in calculating f and s_inv_mat, we introduce a Lorentz brodening. def _SafeReciprocal(x, epsilon=1E-20): diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py index fcbfd51e394..52d216cfd71 100644 --- a/tensorflow/python/ops/linalg_ops.py +++ b/tensorflow/python/ops/linalg_ops.py @@ -200,7 +200,7 @@ def eye(num_rows, ==> [[1., 0.], [0., 1.]] - # Construct a batch of 3 identity matricies, each 2 x 2. + # Construct a batch of 3 identity matrices, each 2 x 2. # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2. batch_identity = tf.eye(2, batch_shape=[3]) diff --git a/tensorflow/python/ops/lookup_ops.py b/tensorflow/python/ops/lookup_ops.py index c043eca9103..395e6a7feaa 100644 --- a/tensorflow/python/ops/lookup_ops.py +++ b/tensorflow/python/ops/lookup_ops.py @@ -1533,7 +1533,7 @@ def index_to_string_table_from_file(vocabulary_file, value_column_index=value_column_index, delimiter=delimiter) - # TODO(yleon): Use a more effienct structure. + # TODO(yleon): Use a more efficient structure. return StaticHashTableV1(init, default_value) @@ -1595,7 +1595,7 @@ def index_to_string_table_from_tensor(vocabulary_list, init = KeyValueTensorInitializer( keys, vocabulary_list, dtypes.int64, dtypes.string, name="table_init") - # TODO(yleon): Use a more effienct structure. + # TODO(yleon): Use a more efficient structure. return StaticHashTableV1(init, default_value) diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py index 464edeaf021..8dbb65f81fe 100644 --- a/tensorflow/python/ops/math_ops.py +++ b/tensorflow/python/ops/math_ops.py @@ -1290,7 +1290,7 @@ def _mul_dispatch(x, y, name=None): # NOTE(aselle): When integer division is added for sparse_dense_cwise, # div, truediv, and floordiv should be delegated appropriately for -# Python sematnics, analogous to dense cwise tensor operations. +# Python semantics, analogous to dense cwise tensor operations. _OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div", sparse_tensor.SparseTensor) _OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv", diff --git a/tensorflow/python/ops/metrics_impl.py b/tensorflow/python/ops/metrics_impl.py index d2b9274f42f..d233fd5d791 100644 --- a/tensorflow/python/ops/metrics_impl.py +++ b/tensorflow/python/ops/metrics_impl.py @@ -284,7 +284,7 @@ def _aggregate_across_replicas(metrics_collections, metric_value_fn, *args): if hasattr(distribution.extended, '_outer_control_flow_context'): # If there was an outer context captured before this method was called, # then we enter that context to create the metric value op. If the - # caputred context is `None`, ops.control_dependencies(None) gives the + # captured context is `None`, ops.control_dependencies(None) gives the # desired behavior. Else we use `Enter` and `Exit` to enter and exit the # captured context. # This special handling is needed because sometimes the metric is created @@ -629,7 +629,7 @@ def _aggregate_variable(v, collections): @tf_export(v1=['metrics.auc']) @deprecated(None, 'The value of AUC returned by this may race with the update so ' - 'this is deprected. Please use tf.keras.metrics.AUC instead.') + 'this is deprecated. Please use tf.keras.metrics.AUC instead.') def auc(labels, predictions, weights=None, diff --git a/tensorflow/python/ops/nn_impl.py b/tensorflow/python/ops/nn_impl.py index 2c00e051db2..46ca9b1c087 100644 --- a/tensorflow/python/ops/nn_impl.py +++ b/tensorflow/python/ops/nn_impl.py @@ -934,7 +934,7 @@ def separable_conv2d(input, rate = [1, 1] # The layout of the ops in the graph are expected to be as follows: - # depthwise_conv2d // Conv2D op corresponding to native deptwise conv. + # depthwise_conv2d // Conv2D op corresponding to native depthwise conv. # separable_conv2d // Conv2D op corresponding to the pointwise conv. def op(input_converted, _, padding): @@ -1295,7 +1295,7 @@ def weighted_moments(x, axes, frequency_weights, name=None, keep_dims=None, # The shape of the weights isn't necessarily the same as x's # shape, just broadcast-compatible with it -- so this expression # performs broadcasting to give a per-item weight, with the same - # shape as (freqency_weights * x). This avoids having to reason + # shape as (frequency_weights * x). This avoids having to reason # through all the broadcast logic to compute a correct # sum_of_weights. broadcasted_weights = frequency_weights + array_ops.zeros_like(x) diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py index e4a477ecda4..f5446026d2f 100644 --- a/tensorflow/python/ops/nn_ops.py +++ b/tensorflow/python/ops/nn_ops.py @@ -533,7 +533,7 @@ class _WithSpaceToBatch(object): spatial_dims = sorted(set(int(x) for x in orig_spatial_dims)) if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims): raise ValueError( - "spatial_dims must be a montonically increasing sequence of positive " + "spatial_dims must be a monotonically increasing sequence of positive " "integers") if data_format is not None and data_format.startswith("NC"): @@ -4359,7 +4359,7 @@ def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): rely on the output of other nodes. More precisely: With probability `rate` elements of `x` are set to `0`. - The remaining elemenst are scaled up by `1.0 / (1 - rate)`, so that the + The remaining elements are scaled up by `1.0 / (1 - rate)`, so that the expected value is preserved. >>> tf.random.set_seed(0) diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py index 30df5cf7c47..860bdc60387 100644 --- a/tensorflow/python/ops/nn_test.py +++ b/tensorflow/python/ops/nn_test.py @@ -1270,7 +1270,7 @@ class AvgPoolTest(test_lib.TestCase): self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test1DNumpy(self): - # explicilty use float32 for ROCm, as MIOpen does not yet support float64 + # explicitly use float32 for ROCm, as MIOpen does not yet support float64 # np.ones defaults to using float64 when dtype is not explicitly specified dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64 x = np.ones([3, 6, 5], dtype=dtype) @@ -1304,7 +1304,7 @@ class AvgPoolTest(test_lib.TestCase): self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2DNumpy(self): - # explicilty use float32 for ROCm, as MIOpen does not yet support float64 + # explicitly use float32 for ROCm, as MIOpen does not yet support float64 # np.ones defaults to using float64 when dtype is not explicitly specified dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64 x = np.ones([3, 6, 6, 5], dtype=dtype) @@ -1355,7 +1355,7 @@ class MaxPoolTest(test_lib.TestCase): self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test1DNumpy(self): - # explicilty use float32 for ROCm, as MIOpen does not yet support float64 + # explicitly use float32 for ROCm, as MIOpen does not yet support float64 # np.ones defaults to using float64 when dtype is not explicitly specified dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64 x = np.ones([3, 6, 5], dtype=dtype) @@ -1389,7 +1389,7 @@ class MaxPoolTest(test_lib.TestCase): self.assertAllEqual(self.evaluate(y1), self.evaluate(y2)) def test2DNumpy(self): - # explicilty use float32 for ROCm, as MIOpen does not yet support float64 + # explicitly use float32 for ROCm, as MIOpen does not yet support float64 # np.ones defaults to using float64 when dtype is not explicitly specified dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64 x = np.ones([3, 6, 6, 5], dtype=dtype) @@ -1442,7 +1442,7 @@ class MaxPoolTest(test_lib.TestCase): class ConvolutionTest(test_lib.TestCase): def testUnknownSize(self): - # explicilty use float32 for ROCm, as MIOpen does not yet support float64 + # explicitly use float32 for ROCm, as MIOpen does not yet support float64 # np.ones defaults to using float64 when dtype is not explicitly specified dtype = np.float32 if test_lib.is_built_with_rocm() else np.float64 x = tensor_spec.TensorSpec(None, dtypes.float32, name="x") diff --git a/tensorflow/python/ops/op_selector_test.py b/tensorflow/python/ops/op_selector_test.py index 2a70a843812..249c78bae3d 100644 --- a/tensorflow/python/ops/op_selector_test.py +++ b/tensorflow/python/ops/op_selector_test.py @@ -166,7 +166,7 @@ class SelectTest(test.TestCase): self.a.op, self.b.op, self.c.op, self.d.op, self.f.op, self.h.op ])) - # Vanially backward search via self.h.op includes everything excpet e.op. + # Vanially backward search via self.h.op includes everything except e.op. ops = op_selector.get_backward_walk_ops(seed_ops, inclusive=True) self.assertEqual( set(ops), diff --git a/tensorflow/python/ops/parallel_for/gradients.py b/tensorflow/python/ops/parallel_for/gradients.py index 3ab60a327cb..94ab49951aa 100644 --- a/tensorflow/python/ops/parallel_for/gradients.py +++ b/tensorflow/python/ops/parallel_for/gradients.py @@ -37,7 +37,7 @@ def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): parallel. This knob can be used to control the total memory usage. Returns: - A tensor or a nested strucutre of tensors with the same structure as + A tensor or a nested structure of tensors with the same structure as `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has shape [x_1, ..., x_m], the corresponding jacobian has shape diff --git a/tensorflow/python/ops/parallel_for/pfor.py b/tensorflow/python/ops/parallel_for/pfor.py index 43632c8e062..e6284c2f99a 100644 --- a/tensorflow/python/ops/parallel_for/pfor.py +++ b/tensorflow/python/ops/parallel_for/pfor.py @@ -351,7 +351,7 @@ class WhileOp(object): return inp, stacked def _maybe_stacked(self, cache, inp): - """Heuristic to figue out if the coverting inp leads to a stacked value. + """Heuristic to figue out if the converting inp leads to a stacked value. Args: @@ -609,7 +609,7 @@ class WhileOp(object): return not_all_done def body(not_all_done, indices, *args): - # See documentatin for __call__ for the structure of *args. + # See documentation for __call__ for the structure of *args. num_enters = len(self._enters) inputs = args[:num_enters] output_tas = args[num_enters:] @@ -1433,7 +1433,7 @@ class PFor(object): else: new_outputs = [wrap(x, False) for x in y_op.outputs] elif not (is_stateful or is_while_loop or some_input_stacked): - # All inputs are unstacked or uncoverted but some control inputs are + # All inputs are unstacked or unconverted but some control inputs are # converted. # TODO(rachelim): Handle the case where some inputs are sparsely # stacked (i.e. any(x.is_sparse_stacked for x in converted_inputs)) @@ -3378,7 +3378,7 @@ def _convert_stack_pop_v2(pfor_input): stack_cache_key = _stack_cache_key(pfor_input) stacked = _stack_cache.get(stack_cache_key, None) # If a StackPushV2 has not been converted yet, we default to unstacked since - # the push could be outside of pfor, or the covertor may not be called if the + # the push could be outside of pfor, or the convertor may not be called if the # inputs are unconverted. if stacked is None: stacked = False diff --git a/tensorflow/python/ops/ragged/ragged_concat_op_test.py b/tensorflow/python/ops/ragged/ragged_concat_op_test.py index 5802c0a939f..4661061de33 100644 --- a/tensorflow/python/ops/ragged/ragged_concat_op_test.py +++ b/tensorflow/python/ops/ragged/ragged_concat_op_test.py @@ -220,7 +220,7 @@ class RaggedConcatOpTest(test_util.TensorFlowTestCase, axis=0, expected=[1, 2, 3, 4, 5, 6]), dict( - descr='One input (so ragged_conat is a noop)', + descr='One input (so ragged_concat is a noop)', rt_inputs=([['a00', 'a01'], [], ['a20', 'a21']],), axis=0, expected=[[b'a00', b'a01'], [], [b'a20', b'a21']]), diff --git a/tensorflow/python/ops/ragged/ragged_concat_ops.py b/tensorflow/python/ops/ragged/ragged_concat_ops.py index fea0dc6fb0d..24dbab47fc0 100644 --- a/tensorflow/python/ops/ragged/ragged_concat_ops.py +++ b/tensorflow/python/ops/ragged/ragged_concat_ops.py @@ -260,7 +260,7 @@ def _ragged_stack_concat_axis_1(rt_inputs, stack_values): ] with ops.control_dependencies(nrows_checks): - # Concatentate the inputs together to put them in a single ragged tensor. + # Concatenate the inputs together to put them in a single ragged tensor. concatenated_rt = _ragged_stack_concat_axis_0(rt_inputs, stack_values=False) # Use ragged.gather to permute the rows of concatenated_rt. In particular, diff --git a/tensorflow/python/ops/ragged/ragged_config.py b/tensorflow/python/ops/ragged/ragged_config.py index 91056680912..03b0b466d16 100644 --- a/tensorflow/python/ops/ragged/ragged_config.py +++ b/tensorflow/python/ops/ragged/ragged_config.py @@ -20,7 +20,7 @@ from __future__ import print_function def auto_cast_partition_dtype(): - """Whether incopmatible row-partitioning dtypes should be auto-converted. + """Whether incompatible row-partitioning dtypes should be auto-converted. If true, then operations that combine RaggedTensors but have different row-partitioning tensor dtypes will be automatically cast to a diff --git a/tensorflow/python/ops/ragged/ragged_conversion_ops.py b/tensorflow/python/ops/ragged/ragged_conversion_ops.py index b75790e7353..e8c625ccc73 100644 --- a/tensorflow/python/ops/ragged/ragged_conversion_ops.py +++ b/tensorflow/python/ops/ragged/ragged_conversion_ops.py @@ -108,7 +108,7 @@ def _ragged_tensor_to_tensor_grad(op, grad): def _rank_ignoring_leading_dims_with_size_1(value): - """Returns `rank(value)`, ignorning any leading dimesions with size 1.""" + """Returns `rank(value)`, ignoring any leading dimensions with size 1.""" # Compute the result using static shape, if possible. if value.shape.rank is not None: ndims = value.shape.rank diff --git a/tensorflow/python/ops/ragged/ragged_getitem.py b/tensorflow/python/ops/ragged/ragged_getitem.py index 2c8228770db..eca3cc3cdfa 100644 --- a/tensorflow/python/ops/ragged/ragged_getitem.py +++ b/tensorflow/python/ops/ragged/ragged_getitem.py @@ -56,7 +56,7 @@ def ragged_tensor_getitem(self, key): `Tensor`s * `Ellipsis` * `tf.newaxis` - * `tuple` containing any of the above (for multidimentional indexing) + * `tuple` containing any of the above (for multidimensional indexing) Returns: A `Tensor` or `RaggedTensor` object. Values that include at least one diff --git a/tensorflow/python/ops/ragged/ragged_tensor.py b/tensorflow/python/ops/ragged/ragged_tensor.py index d5c04501476..dc0c90ef4b5 100644 --- a/tensorflow/python/ops/ragged/ragged_tensor.py +++ b/tensorflow/python/ops/ragged/ragged_tensor.py @@ -1688,7 +1688,7 @@ class RaggedTensor(composite_tensor.CompositeTensor): # If the padding isn't a scalar, then require that all values in the # padding match each item in the tensor. After this block of code, # `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just - # use reduce_all for both cases, becaue when you pass an empty `axis` + # use reduce_all for both cases, because when you pass an empty `axis` # list to reduce_all, it reduces all axes; but we want it to reduce no # axes -- i.e., to be a no-op.) tensor_rank = array_ops.rank(tensor) diff --git a/tensorflow/python/ops/ragged/ragged_tensor_shape.py b/tensorflow/python/ops/ragged/ragged_tensor_shape.py index eacc397b2f7..5dd84c5387e 100644 --- a/tensorflow/python/ops/ragged/ragged_tensor_shape.py +++ b/tensorflow/python/ops/ragged/ragged_tensor_shape.py @@ -38,7 +38,7 @@ class RaggedTensorDynamicShape(object): Each `RaggedTensorDynamicShape` consists of an ordered list of dimension sizes. There are two dimension types: - * "Uniform dimensions" are dimenisons where all slices have the same + * "Uniform dimensions" are dimensions where all slices have the same length. `RaggedTensorDynamicShape` records the size of each uniform dimension using a single scalar integer. @@ -60,7 +60,7 @@ class RaggedTensorDynamicShape(object): The sizes of partitioned dimensions are recorded using `partitioned_dim_sizes` and `inner_dim_sizes`: - * `paritioned_dim_sizes` is a list of tensors (one for each partitioned + * `partitioned_dim_sizes` is a list of tensors (one for each partitioned dimension). * For uniform dimensions, the tensor is an integer scalar specifying the diff --git a/tensorflow/python/ops/ragged/ragged_tensor_test.py b/tensorflow/python/ops/ragged/ragged_tensor_test.py index 683c622d2ba..e0d11aea15d 100644 --- a/tensorflow/python/ops/ragged/ragged_tensor_test.py +++ b/tensorflow/python/ops/ragged/ragged_tensor_test.py @@ -1442,7 +1442,7 @@ class RaggedTensorTest(test_util.TensorFlowTestCase, 'factory': RaggedTensor.from_row_lengths, 'values': [1, 2, 3, 4], 'row_lengths': [[1, 2], [1, 0]]}, - {'descr': 'negatve row_lengths', + {'descr': 'negative row_lengths', 'factory': RaggedTensor.from_row_lengths, 'values': [1, 2, 3, 4], 'row_lengths': [3, -1, 2]}, diff --git a/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py b/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py index fef6a7947e4..fc2047de954 100644 --- a/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py +++ b/tensorflow/python/ops/ragged/ragged_to_tensor_op_test.py @@ -736,11 +736,11 @@ class RaggedToDenseBenchmark(googletest.Benchmark): default_shape=(), output_shape=None, min_iters=1000): - """Run a benchmark with the specified configuraiton parameters. + """Run a benchmark with the specified configuration parameters. Args: shape: Bounding box for the input ragged tensor. - ragged_rank: Ragged rank for the input ragged tensor. Defauts to + ragged_rank: Ragged rank for the input ragged tensor. Defaults to `len(shape)-1`. dtype: Data type for the input ragged tensor. fill: How full each dimension should be (0-1). Corresponds 1:1 with diff --git a/tensorflow/python/ops/random_ops.py b/tensorflow/python/ops/random_ops.py index 4fadc8d7561..5513f6fc1ab 100644 --- a/tensorflow/python/ops/random_ops.py +++ b/tensorflow/python/ops/random_ops.py @@ -55,7 +55,7 @@ def random_normal(shape, >>> tf.random.normal([4], 0, 1, tf.float32) - Example that outputs a reproduceable result: + Example that outputs a reproducible result: >>> tf.random.set_seed(5); >>> tf.random.normal([2,2], 0, 1, tf.float32, seed=1) @@ -64,7 +64,7 @@ def random_normal(shape, [-0.169515 , 1.0824056 ]], dtype=float32)> In this case, we are setting both the global and operation-level seed to - ensure this result is reproduceable. See `tf.random.set_seed` for more + ensure this result is reproducible. See `tf.random.set_seed` for more information. Args: diff --git a/tensorflow/python/ops/rnn_cell_wrapper_impl.py b/tensorflow/python/ops/rnn_cell_wrapper_impl.py index f2f17375fdd..06ff5daa811 100644 --- a/tensorflow/python/ops/rnn_cell_wrapper_impl.py +++ b/tensorflow/python/ops/rnn_cell_wrapper_impl.py @@ -387,7 +387,7 @@ class ResidualWrapperBase(object): self._residual_fn) config = {"residual_fn": function, "residual_fn_type": function_type, - "residule_fn_module": function_module} + "residual_fn_module": function_module} else: config = {} base_config = super(ResidualWrapperBase, self).get_config() @@ -399,7 +399,7 @@ class ResidualWrapperBase(object): config = config.copy() residual_function = _parse_config_to_function( config, custom_objects, "residual_fn", "residual_fn_type", - "residule_fn_module") + "residual_fn_module") config["residual_fn"] = residual_function return super(ResidualWrapperBase, cls).from_config( config, custom_objects=custom_objects) diff --git a/tensorflow/python/ops/script_ops.py b/tensorflow/python/ops/script_ops.py index e3396852861..bee85dc4a5b 100644 --- a/tensorflow/python/ops/script_ops.py +++ b/tensorflow/python/ops/script_ops.py @@ -93,7 +93,7 @@ class EagerFunc(object): Returns: A tensor of type `dtype`, or a zeros tensor if value is None and - this function is in fact a grdient function. + this function is in fact a gradient function. Raises: RuntimeError: if `value` is a variable. @@ -108,7 +108,7 @@ class EagerFunc(object): "question: %s" % value) if value is None and self._is_grad_func: # Gradient functions may legitimately return a list that contains - # both Tensors and Python Nones. Unfortuantely this breaks the + # both Tensors and Python Nones. Unfortunately this breaks the # OpKernel, so for now we replace None objects with zeros, which is # mathematically correct but will prevent short-circuiting gradient # computations. diff --git a/tensorflow/python/ops/special_math_ops_test.py b/tensorflow/python/ops/special_math_ops_test.py index 25b2fcc0694..788ff81cb4b 100644 --- a/tensorflow/python/ops/special_math_ops_test.py +++ b/tensorflow/python/ops/special_math_ops_test.py @@ -745,7 +745,7 @@ class EinsumTest(test.TestCase): output = self.evaluate(special_math_ops.einsum(equation, *input_tensors)) self.assertAllClose(output, np.zeros(output_shape), atol=1e-4, rtol=1e-4) - # Contractions along zero-sized dimensons. + # Contractions along zero-sized dimensions. check('ab,bc->ac', [(0, 10), (10, 10)], (0, 10)) # From transformer xl. check('ibnd,ijbn->jnd', [(1, 0, 5, 10), (1, 1, 0, 5)], (1, 5, 10)) diff --git a/tensorflow/python/ops/stateful_random_ops.py b/tensorflow/python/ops/stateful_random_ops.py index 95f1ba54475..16ff41712f2 100644 --- a/tensorflow/python/ops/stateful_random_ops.py +++ b/tensorflow/python/ops/stateful_random_ops.py @@ -356,7 +356,7 @@ class Generator(tracking.AutoTrackable, composite_tensor.CompositeTensor): [https://www.thesalmons.org/john/random123/papers/random123sc11.pdf]). The string names `"philox"` and `"threefry"` can also be used. Note `PHILOX` guarantees the same numbers are produced (given - the same random state) across all architextures (CPU, GPU, XLA etc). + the same random state) across all architectures (CPU, GPU, XLA etc). Throws: ValueError: if the generator is created inside a synchronous diff --git a/tensorflow/python/ops/structured/structured_tensor.py b/tensorflow/python/ops/structured/structured_tensor.py index b3c821301a2..ad56aa72975 100644 --- a/tensorflow/python/ops/structured/structured_tensor.py +++ b/tensorflow/python/ops/structured/structured_tensor.py @@ -450,7 +450,7 @@ class StructuredTensor(composite_tensor.CompositeTensor): value = value.to_list() elif isinstance(value, StructuredTensor): value = value.to_pyval() - # TODO(edloper): Throw an excpetion if value is an unexpected type. + # TODO(edloper): Throw an exception if value is an unexpected type. result[key] = value # If rank>0, then re-group each value from dict-of-list to list-of-dict. diff --git a/tensorflow/python/ops/summary_ops_v2.py b/tensorflow/python/ops/summary_ops_v2.py index a95af01fa31..6f4472dbc09 100644 --- a/tensorflow/python/ops/summary_ops_v2.py +++ b/tensorflow/python/ops/summary_ops_v2.py @@ -1107,7 +1107,7 @@ def keras_model(name, data, step=None): Writing the Keras model configuration allows the TensorBoard graph plugin to render a conceptual graph, as opposed to graph of ops. In case the model fails - to serialze as JSON, it ignores and returns False. + to serialize as JSON, it ignores and returns False. Args: name: A name for this summary. The summary tag used for TensorBoard will be diff --git a/tensorflow/python/ops/template.py b/tensorflow/python/ops/template.py index ff5919a8d50..780146ed0d2 100644 --- a/tensorflow/python/ops/template.py +++ b/tensorflow/python/ops/template.py @@ -215,7 +215,7 @@ def make_template_internal(name_, if context.executing_eagerly(): if unique_name_ is not None: raise ValueError( - "unique_name_ cannot be used when eager exeuction is enabled.") + "unique_name_ cannot be used when eager execution is enabled.") return EagerTemplate( name_, func_, diff --git a/tensorflow/python/ops/tensor_array_grad.py b/tensorflow/python/ops/tensor_array_grad.py index 0ba452fa71b..b0549041466 100644 --- a/tensorflow/python/ops/tensor_array_grad.py +++ b/tensorflow/python/ops/tensor_array_grad.py @@ -44,7 +44,7 @@ def _GetGradSource(op_or_tensor): TensorArray gradient calls use an accumulator TensorArray object. If multiple gradients are calculated and run in the same session, the multiple - gradient nodes may accidentally flow throuth the same accumulator TensorArray. + gradient nodes may accidentally flow through the same accumulator TensorArray. This double counting breaks the TensorArray gradient flow. The solution is to identify which gradient call this particular diff --git a/tensorflow/python/ops/while_v2.py b/tensorflow/python/ops/while_v2.py index d1ce8925839..4b8f45e2617 100644 --- a/tensorflow/python/ops/while_v2.py +++ b/tensorflow/python/ops/while_v2.py @@ -370,7 +370,7 @@ def _WhileGrad(op, *grads): # pylint: disable=invalid-name [t.shape for t in new_outputs]) _copy_handle_data(new_outputs, op.outputs[orig_num_params:]) - # Do not ingore grads wrt extra outputs when computing higher order + # Do not ignore grads wrt extra outputs when computing higher order # derivatives. while_op._set_attr("_num_original_outputs", attr_value_pb2.AttrValue(i=len(while_op.outputs))) diff --git a/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py b/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py index 8695b6d79c4..9637ee174d7 100644 --- a/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py +++ b/tensorflow/python/ops/while_v2_indexed_slices_rewriter.py @@ -114,7 +114,7 @@ def _rewrite_input_as_indexed_slices(body_grad_graph, grad_output_slices, Args: body_grad_graph: _WhileBodyGradFuncGraph. grad_output_slices: IndexedSlices output of body_grad_graph. - forward_input: the corresonding Tensor input to the forward loop. + forward_input: the corresponding Tensor input to the forward loop. loop_vars: list of Tensors. The inputs to body_grad_graph. Returns: @@ -161,7 +161,7 @@ def _create_grad_indexed_slices_init(grad_output_slices, forward_input): Args: grad_output_slices: IndexedSlices. The corresponding while grad function output. - forward_input: Tensor. The corresonding input to the forward while op. + forward_input: Tensor. The corresponding input to the forward while op. Returns: Zeros IndexedSlices, created in current Graph. @@ -202,7 +202,7 @@ def _create_grad_indexed_slices_init(grad_output_slices, forward_input): def _rewrite_grad_indexed_slices_output(old_output_slices, new_input_slices): - """Creates a new verson of old_output_slices with new_input_slices as input. + """Creates a new version of old_output_slices with new_input_slices as input. This method assumes that old_output_slices.{values,indices} are produced by concatenating the incoming gradient Tensor input with the IndexedSlices @@ -252,7 +252,7 @@ def _update_indexed_slices_param(graph, loop_vars, init_slices, input_slices, input_slices: the new IndexedSlices in graph that should be fed by init_slices. output_slices: the new IndexedSlices in graph that should be the - corresonding output to input_slices. + corresponding output to input_slices. old_output_slices: the IndexedSlices in graph that are currently being output. diff --git a/tensorflow/python/profiler/internal/flops_registry.py b/tensorflow/python/profiler/internal/flops_registry.py index d859c4853d4..356fa67a97f 100644 --- a/tensorflow/python/profiler/internal/flops_registry.py +++ b/tensorflow/python/profiler/internal/flops_registry.py @@ -124,7 +124,7 @@ def _l2_loss_flops(graph, node): @ops.RegisterStatistics("Softmax", "flops") def _softmax_flops(graph, node): """Compute flops for Softmax operation.""" - # Softmax implenetation: + # Softmax implemetation: # # Approximate flops breakdown: # 2*n -- compute shifted logits @@ -313,7 +313,7 @@ def _pool_flops(graph, node): # - padding # - data_format # - # Pooling implenetation: + # Pooling implemetation: out_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name) out_shape.assert_is_fully_defined() kernel_shape = list(node.attr["ksize"].list.i) diff --git a/tensorflow/python/profiler/model_analyzer.py b/tensorflow/python/profiler/model_analyzer.py index aa876e6dafb..a62930af9c7 100644 --- a/tensorflow/python/profiler/model_analyzer.py +++ b/tensorflow/python/profiler/model_analyzer.py @@ -282,7 +282,7 @@ class Profiler(object): Args: options: A dict of options. See ALL_ADVICE example above. Returns: - A Advise proto that conains the reports from all checkers. + An Advise proto that contains the reports from all checkers. """ advise_pb = tfprof_output_pb2.AdviceProto() opts = _build_advisor_options(options) diff --git a/tensorflow/python/profiler/model_analyzer_test.py b/tensorflow/python/profiler/model_analyzer_test.py index aa0e8539246..64a2322d7b4 100644 --- a/tensorflow/python/profiler/model_analyzer_test.py +++ b/tensorflow/python/profiler/model_analyzer_test.py @@ -236,7 +236,7 @@ class PrintModelAnalysisTest(test.TestCase): self.assertLess(0, tfprof_node.total_exec_micros) self.assertEqual(2844, tfprof_node.total_parameters) - #The graph is modifed when MKL is enabled,total_float_ops will + #The graph is modified when MKL is enabled,total_float_ops will #be different if test_util.IsMklEnabled(): self.assertLess(101600, tfprof_node.total_float_ops) diff --git a/tensorflow/python/saved_model/builder_impl.py b/tensorflow/python/saved_model/builder_impl.py index c8668f2bd22..46b1fb57de2 100644 --- a/tensorflow/python/saved_model/builder_impl.py +++ b/tensorflow/python/saved_model/builder_impl.py @@ -189,7 +189,7 @@ class _SavedModelBuilder(object): Validation of entries in the signature def map includes ensuring that the `name` and `dtype` fields of the TensorInfo protos of the `inputs` and `outputs` of each `SignatureDef` are populated. Also ensures that reserved - SigantureDef keys for the initialization and train ops are not used. + SignatureDef keys for the initialization and train ops are not used. Args: signature_def_map: The map of signature defs to be validated. diff --git a/tensorflow/python/saved_model/function_deserialization.py b/tensorflow/python/saved_model/function_deserialization.py index 0a5ee9d04d5..2d3358c443d 100644 --- a/tensorflow/python/saved_model/function_deserialization.py +++ b/tensorflow/python/saved_model/function_deserialization.py @@ -78,7 +78,7 @@ def _call_concrete_function(function, inputs): def _try_convert_to_tensor_spec(arg, dtype_hint): """Returns None or TensorSpec obtained if `arg` is converted to tensor.""" try: - # Note: try conversion in a FuncGraph to avoid poluting current context. + # Note: try conversion in a FuncGraph to avoid polluting current context. with func_graph_lib.FuncGraph(name="guess_conversion").as_default(): result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint) return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype) @@ -446,7 +446,7 @@ def _list_function_deps(fdef, library_function_names): return deps -_FUNCTION_WARPPER_NAME_REGEX = r"^%s(.*)_\d+$" % ( +_FUNCTION_WRAPPER_NAME_REGEX = r"^%s(.*)_\d+$" % ( function_lib._INFERENCE_PREFIX) # pylint:disable=protected-access @@ -454,7 +454,7 @@ def _clean_function_name(name): """Vanity function to keep the function names comprehensible.""" # Note: each time a function is wrapped into `function_lib.ConcreteFunction` # its name becomes "__inference__xyz". - match = re.search(_FUNCTION_WARPPER_NAME_REGEX, name) + match = re.search(_FUNCTION_WRAPPER_NAME_REGEX, name) if match: return match.group(1) else: diff --git a/tensorflow/python/saved_model/load_test.py b/tensorflow/python/saved_model/load_test.py index 3168ab5354d..40f67bd4b5d 100644 --- a/tensorflow/python/saved_model/load_test.py +++ b/tensorflow/python/saved_model/load_test.py @@ -1340,7 +1340,7 @@ class LoadTest(test.TestCase, parameterized.TestCase): self.assertAllEqual([0, -1, -1, 2], sess.run(output1)) self.assertAllEqual([2, 0, 1, -1], sess.run(output2)) - def test_perserve_argspec(self, cycles): + def test_preserve_argspec(self, cycles): def f(a, b, c): # pylint: disable=unused-argument return None diff --git a/tensorflow/python/saved_model/model_utils/export_test.py b/tensorflow/python/saved_model/model_utils/export_test.py index c5f7f404545..f7c8637080b 100644 --- a/tensorflow/python/saved_model/model_utils/export_test.py +++ b/tensorflow/python/saved_model/model_utils/export_test.py @@ -110,7 +110,7 @@ class ExportTest(test_util.TensorFlowTestCase): {"some_output_3": output_3}) # Note that the alternatives 'other:serving_default' and - # 'other:head-2' are invalid, because regession and classification + # 'other:head-2' are invalid, because regression and classification # signatures must take a single string input. Here we verify that # these invalid signatures are not included in the export_utils. } @@ -167,7 +167,7 @@ class ExportTest(test_util.TensorFlowTestCase): {"some_output_3": output_3}) # Note that the alternatives 'other:serving_default' and 'other:head-2' - # are invalid, because regession and classification signatures must take + # are invalid, because regression and classification signatures must take # a single string input. Here we verify that these invalid signatures # are not included in the export_utils. } diff --git a/tensorflow/python/saved_model/utils_impl.py b/tensorflow/python/saved_model/utils_impl.py index 70e507a3c13..45dfca5617a 100644 --- a/tensorflow/python/saved_model/utils_impl.py +++ b/tensorflow/python/saved_model/utils_impl.py @@ -110,14 +110,14 @@ def build_tensor_info_from_op(op): is for the Op of the call site for the defunned function: ```python @function.defun - def some_vairable_initialiation_fn(value_a, value_b): + def some_variable_initialization_fn(value_a, value_b): a = value_a b = value_b value_a = constant_op.constant(1, name="a") value_b = constant_op.constant(2, name="b") op_info = utils.build_op_info( - some_vairable_initialiation_fn(value_a, value_b)) + some_variable_initialization_fn(value_a, value_b)) ``` Args: diff --git a/tensorflow/python/tools/freeze_graph.py b/tensorflow/python/tools/freeze_graph.py index 9ffc98f1743..f1372f612c0 100644 --- a/tensorflow/python/tools/freeze_graph.py +++ b/tensorflow/python/tools/freeze_graph.py @@ -166,7 +166,7 @@ def freeze_graph_with_def_protos(input_graph_def, # List of all partition variables. Because the condition is heuristic # based, the list could include false positives. - all_parition_variable_names = [ + all_partition_variable_names = [ tensor.name.split(":")[0] for op in sess.graph.get_operations() for tensor in op.values() @@ -177,7 +177,7 @@ def freeze_graph_with_def_protos(input_graph_def, for key in var_to_shape_map: try: tensor = sess.graph.get_tensor_by_name(key + ":0") - if any(key in name for name in all_parition_variable_names): + if any(key in name for name in all_partition_variable_names): has_partition_var = True except KeyError: # This tensor doesn't exist in the graph (for example it's diff --git a/tensorflow/python/tools/saved_model_cli.py b/tensorflow/python/tools/saved_model_cli.py index 01494bbf04f..e7641198955 100644 --- a/tensorflow/python/tools/saved_model_cli.py +++ b/tensorflow/python/tools/saved_model_cli.py @@ -713,7 +713,7 @@ def show(args): if args.all: _show_all(args.dir) else: - # If no tag is specified, display all tag_set, if no signaure_def key is + # If no tag is specified, display all tag_set, if no signature_def key is # specified, display all SignatureDef keys, else show input output tensor # information corresponding to the given SignatureDef key if args.tag_set is None: diff --git a/tensorflow/python/tpu/bfloat16_test.py b/tensorflow/python/tpu/bfloat16_test.py index e087dda3799..993e1458ab9 100644 --- a/tensorflow/python/tpu/bfloat16_test.py +++ b/tensorflow/python/tpu/bfloat16_test.py @@ -29,7 +29,7 @@ from tensorflow.python.tpu import bfloat16 class BFloat16ScopeTest(test.TestCase): def testScopeName(self): - """Test if name for the variable scope is propogated correctly. + """Test if name for the variable scope is propagated correctly. """ with bfloat16.bfloat16_scope() as bf: self.assertEqual(bf.name, "") diff --git a/tensorflow/python/tpu/feature_column.py b/tensorflow/python/tpu/feature_column.py index bd79fbb4464..5f9535dc3c6 100644 --- a/tensorflow/python/tpu/feature_column.py +++ b/tensorflow/python/tpu/feature_column.py @@ -148,7 +148,7 @@ def embedding_column(categorical_column, learning_rate_fn=learning_rate_fn, use_safe_embedding_lookup=use_safe_embedding_lookup) # For Embedding column, the initializer is hidden inside the creator Fn, which - # is not accessiable later. So, we attach it to a speicial field. Also note + # is not accessible later. So, we attach it to a special field. Also note # that non-TPU Embedding column and non-TPU shared Embedding column handle the # initializer differently. See shared_embedding_columns for details. column._tpu_initializer = initializer diff --git a/tensorflow/python/tpu/ops/tpu_ops.py b/tensorflow/python/tpu/ops/tpu_ops.py index 9264437b41e..c1ea3641757 100644 --- a/tensorflow/python/tpu/ops/tpu_ops.py +++ b/tensorflow/python/tpu/ops/tpu_ops.py @@ -79,7 +79,7 @@ def all_to_all(x, def _all_to_all_grad(op, grad): # The gradient of a all-to-all is also a all-to-all but the # split_dimension and concat_dimension is swapped. - # The graident with respect to group_assignment is None. + # The gradient with respect to group_assignment is None. return [ gen_tpu_ops.all_to_all( grad, diff --git a/tensorflow/python/tpu/session_support.py b/tensorflow/python/tpu/session_support.py index e90f5043c82..2b4abfde447 100644 --- a/tensorflow/python/tpu/session_support.py +++ b/tensorflow/python/tpu/session_support.py @@ -358,7 +358,7 @@ class GracefulShutdownHook(session_run_hook.SessionRunHook): self._heartbeat_supported = False else: logging.warn( - 'No workers support hearbeats. Failure handling will be disabled.') + 'No workers support heartbeats. Failure handling will be disabled.') def saver(self): if self._saver: diff --git a/tensorflow/python/tpu/tensor_tracer.py b/tensorflow/python/tpu/tensor_tracer.py index 01294283a36..87a2309cedf 100644 --- a/tensorflow/python/tpu/tensor_tracer.py +++ b/tensorflow/python/tpu/tensor_tracer.py @@ -112,7 +112,7 @@ def op_priority(op_type): """ if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range', 'VariableShape', 'Fill', 'OneHot', 'ShapeN'): - # Lowest priority ops, e.g., constant ops accross different steps, + # Lowest priority ops, e.g., constant ops across different steps, # They will be traced only if trace_level>=7 return 7 @@ -566,7 +566,7 @@ class TensorTracer(object): Cache update operation. """ # state_ops.scatter_update allows updates only along the first dimension. - # Make a compact array by concantating different signatures, and update + # Make a compact array by concatenating different signatures, and update # them all together. sorted_update = [] if self._num_signature_dimensions() > 1: diff --git a/tensorflow/python/tpu/tensor_tracer_flags.py b/tensorflow/python/tpu/tensor_tracer_flags.py index 57e54b1fb42..37f8ce408b1 100644 --- a/tensorflow/python/tpu/tensor_tracer_flags.py +++ b/tensorflow/python/tpu/tensor_tracer_flags.py @@ -315,7 +315,7 @@ class TTParameters(object): return {signature: idx for idx, signature in enumerate(tt_signatures)} def get_signature_to_agg_fn_map(self): - """Returns a map that contains the aggragate function for each signature.""" + """Returns a map that contains the aggregate function for each signature.""" return {TT_SUMMARY_NORM: linalg_ops.norm, TT_SUMMARY_MAX: math_ops.reduce_max, TT_SUMMARY_MIN: math_ops.reduce_min, diff --git a/tensorflow/python/tpu/tpu.py b/tensorflow/python/tpu/tpu.py index 3db5a31820d..0ae7926552b 100644 --- a/tensorflow/python/tpu/tpu.py +++ b/tensorflow/python/tpu/tpu.py @@ -1699,7 +1699,7 @@ def rewrite(computation, inputs: A list of input tensors or `None` (equivalent to an empty list). Each input can be a nested structure containing values that are convertible to tensors. Note that passing an N-dimension list of - compatible values will result in a N-dimention list of scalar tensors + compatible values will result in a N-dimension list of scalar tensors rather than a single Rank-N tensors. If you need different behavior, convert part of inputs to tensors with `tf.convert_to_tensor`. infeed_queue: If not `None`, the `InfeedQueue` from which to append a tuple diff --git a/tensorflow/python/tpu/tpu_embedding.py b/tensorflow/python/tpu/tpu_embedding.py index c4ae6c9bb46..a4ca0765f08 100644 --- a/tensorflow/python/tpu/tpu_embedding.py +++ b/tensorflow/python/tpu/tpu_embedding.py @@ -1080,13 +1080,13 @@ class TPUEmbedding(object): enqueue_data.embedding_indices.device): raise ValueError( 'Device of sample_indices does not agree with ' - 'that of emebdding_indices for feature {}.'.format(feature)) + 'that of embedding_indices for feature {}.'.format(feature)) if (enqueue_data.aggregation_weights is not None and enqueue_data.aggregation_weights.device != enqueue_data.embedding_indices.device): raise ValueError( 'Device of aggregation_weights does not agree with ' - 'that of emebdding_indices for feature {}.'.format(feature)) + 'that of embedding_indices for feature {}.'.format(feature)) # Check all features are on the same device. if device is None: device = enqueue_data.embedding_indices.device @@ -1279,7 +1279,7 @@ def _validate_batch_size(batch_size, num_cores): def _validate_optimization_parameters(optimization_parameters): if not isinstance(optimization_parameters, _OptimizationParameters): raise ValueError('`optimization_parameters` must inherit from ' - '`_OptimizationPramaters`. ' + '`_OptimizationParameters`. ' '`type(optimization_parameters)`={}'.format( type(optimization_parameters))) @@ -1727,7 +1727,7 @@ def _create_partitioned_variables(name, embedding_dimension, initializer, collections=None): # pylint: disable=redefined-outer-name - """Creates ParitionedVariables based on `num_hosts` for `table`.""" + """Creates PartitionedVariables based on `num_hosts` for `table`.""" num_slices = min(vocabulary_size, num_hosts) diff --git a/tensorflow/python/tpu/tpu_embedding_gradient.py b/tensorflow/python/tpu/tpu_embedding_gradient.py index 99e680406db..2e259bcab70 100644 --- a/tensorflow/python/tpu/tpu_embedding_gradient.py +++ b/tensorflow/python/tpu/tpu_embedding_gradient.py @@ -54,7 +54,7 @@ def create_dummy_table_variables(tpu_embedding): """Create dummy embedding table variables. The sole purpose of these dummy variables are to trigger gradient - calcuation wrt them so that the gradients wrt activation can be captured + calculation wrt them so that the gradients wrt activation can be captured and later sent to TPU embedding. Args: @@ -159,7 +159,7 @@ def get_gradients_through_dummy_table_variables(tpu_embedding): 'are sent back to TPUEmbedding instead. Gradients of zeros and no ' 'gradients are equivalent for SGD, AdaGrad, FTRL, momentum, etc, but ' 'might differ for other optimizers due to implementation of tpu ' - 'embedding optimziers.' + 'embedding optimizers.' .format(table, table_id)) for feature, gradient in zip(tpu_embedding.table_to_features_dict[table], table_gradients): diff --git a/tensorflow/python/tpu/tpu_system_metadata.py b/tensorflow/python/tpu/tpu_system_metadata.py index cc03f3e72dd..9a5bb0b8e28 100644 --- a/tensorflow/python/tpu/tpu_system_metadata.py +++ b/tensorflow/python/tpu/tpu_system_metadata.py @@ -177,7 +177,7 @@ def get_session_config_with_timeout(timeout_in_secs, cluster_def): def master_job(master, cluster_def): - """Returns the canonnical job name to use to place TPU computations on. + """Returns the canonical job name to use to place TPU computations on. Args: master: A `string` representing the TensorFlow master to use. diff --git a/tensorflow/python/training/checkpoint_management.py b/tensorflow/python/training/checkpoint_management.py index 338b5fea5f6..5a00e8f56c8 100644 --- a/tensorflow/python/training/checkpoint_management.py +++ b/tensorflow/python/training/checkpoint_management.py @@ -589,7 +589,7 @@ class CheckpointManager(object): self._maybe_delete = collections.OrderedDict() if recovered_state is None: self._latest_checkpoint = None - # Set the clock back slightly to avoid race conditions when quckly + # Set the clock back slightly to avoid race conditions when quickly # re-creating a CheckpointManager. self._last_preserved_timestamp = current_clock - 1. else: diff --git a/tensorflow/python/training/experimental/loss_scale_optimizer.py b/tensorflow/python/training/experimental/loss_scale_optimizer.py index ae8d1e8c788..15a1dd565ed 100644 --- a/tensorflow/python/training/experimental/loss_scale_optimizer.py +++ b/tensorflow/python/training/experimental/loss_scale_optimizer.py @@ -88,7 +88,7 @@ class MixedPrecisionLossScaleOptimizer(optimizer.Optimizer): This adjusts the dynamic range of the gradient evaluation by scaling up the `loss` value. The gradient values are then scaled back down by the - recipricol of the loss scale. This is useful in reduced precision training + reciprocal of the loss scale. This is useful in reduced precision training where small gradient values would otherwise underflow the representable range. @@ -137,17 +137,17 @@ class MixedPrecisionLossScaleOptimizer(optimizer.Optimizer): def _unscale_grads(self, grads): loss_scale = self._loss_scale() - loss_scale_reciprical = 1 / loss_scale + loss_scale_reciprocal = 1 / loss_scale return [ - None if g is None else self._scale_grad(g, loss_scale_reciprical) + None if g is None else self._scale_grad(g, loss_scale_reciprocal) for g in grads ] - def _scale_grad(self, grad, loss_scale_reciprical): + def _scale_grad(self, grad, loss_scale_reciprocal): if isinstance(grad, ops.IndexedSlices): - grad_vals = grad.values * loss_scale_reciprical + grad_vals = grad.values * loss_scale_reciprocal return ops.IndexedSlices(grad_vals, grad.indices, grad.dense_shape) - return grad * loss_scale_reciprical + return grad * loss_scale_reciprocal def apply_gradients(self, grads_and_vars, global_step=None, name=None): """Apply gradients to variables. diff --git a/tensorflow/python/training/monitored_session_test.py b/tensorflow/python/training/monitored_session_test.py index ee4105299ef..a78674a3d7c 100644 --- a/tensorflow/python/training/monitored_session_test.py +++ b/tensorflow/python/training/monitored_session_test.py @@ -2011,7 +2011,7 @@ class MonitoredSessionTest(test.TestCase): with ops.Graph().as_default(): var = resource_variable_ops.ResourceVariable(0.0) - # This test higlights the interaction of hooks with + # This test highlights the interaction of hooks with # `Monitoredsession.run_step_fn`. The order of execution of operations # below is: # 0. stage_0 @@ -2025,7 +2025,7 @@ class MonitoredSessionTest(test.TestCase): # are complete. To obtain a consistent result of adding two different # constants to `var`, we rely on a control dependency and # `ResourceVariable`. Otherwise, it is possible that one of the - # additions overwites the result of the other addition. + # additions overwrites the result of the other addition. with ops.control_dependencies([stage_1_0]): stage_1_1 = state_ops.assign_add(var, 0.5) stage_2 = state_ops.assign_add(var, 1.1) diff --git a/tensorflow/python/training/optimizer_test.py b/tensorflow/python/training/optimizer_test.py index ac831cb6422..5775d0b8091 100644 --- a/tensorflow/python/training/optimizer_test.py +++ b/tensorflow/python/training/optimizer_test.py @@ -202,7 +202,7 @@ class OptimizerTest(test.TestCase): ] self.evaluate(variables.global_variables_initializer()) - # Run convert_ops to achieve the gradietns converting + # Run convert_ops to achieve the gradients converting self.evaluate(convert_ops) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) diff --git a/tensorflow/python/training/saver.py b/tensorflow/python/training/saver.py index bce270db8a5..4defeee3444 100644 --- a/tensorflow/python/training/saver.py +++ b/tensorflow/python/training/saver.py @@ -1654,7 +1654,7 @@ def saver_from_object_based_checkpoint(checkpoint_path, `var_list` will be set to all saveable objects. builder: a `BaseSaverBuilder` instance. If `None`, a new `BulkSaverBuilder` will be created. - names_to_keys: dict mapping string tensor names to checkpooint keys. If + names_to_keys: dict mapping string tensor names to checkpoint keys. If `None`, this dict will be generated from the checkpoint file. cached_saver: Cached `Saver` object with remapped variables. diff --git a/tensorflow/python/training/saver_test.py b/tensorflow/python/training/saver_test.py index 75698ea3e19..2c8bdadd5d7 100644 --- a/tensorflow/python/training/saver_test.py +++ b/tensorflow/python/training/saver_test.py @@ -2865,7 +2865,7 @@ class ScopedGraphTest(test.TestCase): self.assertAllClose(expected, sess.run("hidden1/relu:0")) self.assertAllClose(expected, sess.run("hidden2/relu:0")) - # Verifies copy to differen graph. + # Verifies copy to different graph. graph2 = ops_lib.Graph() new_var_list_1 = meta_graph.copy_scoped_meta_graph( from_scope="hidden1", diff --git a/tensorflow/python/training/sync_replicas_optimizer.py b/tensorflow/python/training/sync_replicas_optimizer.py index 3b2d8629aea..d7c94ba27b3 100644 --- a/tensorflow/python/training/sync_replicas_optimizer.py +++ b/tensorflow/python/training/sync_replicas_optimizer.py @@ -45,7 +45,7 @@ from tensorflow.python.util.tf_export import tf_export class SyncReplicasOptimizer(optimizer.Optimizer): """Class to synchronize, aggregate gradients and pass them to the optimizer. - This class is deprecated. For synchrononous training, please use [Distribution + This class is deprecated. For synchronous training, please use [Distribution Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute). In a typical asynchronous training environment, it's common to have some @@ -146,7 +146,7 @@ class SyncReplicasOptimizer(optimizer.Optimizer): @deprecation.deprecated( None, - "The `SyncReplicaOptimizer` class is deprecated. For synchrononous " + "The `SyncReplicaOptimizer` class is deprecated. For synchronous " "training, please use [Distribution Strategies](https://github.com/" "tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).", warn_once=True) diff --git a/tensorflow/python/training/sync_replicas_optimizer_test.py b/tensorflow/python/training/sync_replicas_optimizer_test.py index 428583d048a..03c07173252 100644 --- a/tensorflow/python/training/sync_replicas_optimizer_test.py +++ b/tensorflow/python/training/sync_replicas_optimizer_test.py @@ -197,7 +197,7 @@ class SyncReplicasOptimizerTest(test.TestCase): local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0") global_step = graphs[1].get_tensor_by_name("global_step:0") - # The steps should also be initilized. + # The steps should also be initialized. self.assertAllEqual(0, sessions[1].run(global_step)) self.assertAllEqual(0, sessions[1].run(local_step_1)) diff --git a/tensorflow/python/training/tracking/tracking_test.py b/tensorflow/python/training/tracking/tracking_test.py index 90e6c6cbd53..7fcd27f5624 100644 --- a/tensorflow/python/training/tracking/tracking_test.py +++ b/tensorflow/python/training/tracking/tracking_test.py @@ -313,7 +313,7 @@ class InterfaceTests(test.TestCase): # Note(taylorrobie): The reason that it is safe to time a unit test is that # a cache hit will be << 1 second, and a cache miss is - # guaranteed to be >= 1 second. Emperically confirmed by + # guaranteed to be >= 1 second. Empirically confirmed by # 100,000 runs with no flakes. self.assertLess(total_time, 0.95) diff --git a/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py b/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py index f469e321c3a..653698f4e85 100644 --- a/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py +++ b/tensorflow/python/training/tracking/util_with_v1_optimizers_test.py @@ -219,7 +219,7 @@ class CheckpointingTests(test.TestCase): on_create_model = MyModel() on_create_optimizer = adam.AdamOptimizer( 0.001, - # Preserve beta1_power and beta2_power when appying gradients so we can + # Preserve beta1_power and beta2_power when applying gradients so we can # test that they've been restored correctly. beta1=1.0, beta2=1.0) on_create_root = trackable_utils.Checkpoint( diff --git a/tensorflow/python/util/example_parser_configuration.py b/tensorflow/python/util/example_parser_configuration.py index e7a5ae59ee4..25eea32e50c 100644 --- a/tensorflow/python/util/example_parser_configuration.py +++ b/tensorflow/python/util/example_parser_configuration.py @@ -40,7 +40,7 @@ def extract_example_parser_configuration(parse_example_op, sess): elif parse_example_op.type == "ParseExampleV2": return _extract_from_parse_example_v2(parse_example_op, sess) else: - raise ValueError("Unexpeected op type: %s" % parse_example_op.type) + raise ValueError("Unexpected op type: %s" % parse_example_op.type) def _extract_from_parse_example(parse_example_op, sess): diff --git a/tensorflow/python/util/tf_inspect.py b/tensorflow/python/util/tf_inspect.py index ee2df3cb21b..8f1b668539a 100644 --- a/tensorflow/python/util/tf_inspect.py +++ b/tensorflow/python/util/tf_inspect.py @@ -151,7 +151,7 @@ def _get_argspec_for_partial(obj): """Implements `getargspec` for `functools.partial` objects. Args: - obj: The `functools.partial` obeject + obj: The `functools.partial` object Returns: An `inspect.ArgSpec` Raises: diff --git a/tensorflow/python/util/util.h b/tensorflow/python/util/util.h index 7cd4b0cb495..154bdc1ba37 100644 --- a/tensorflow/python/util/util.h +++ b/tensorflow/python/util/util.h @@ -177,7 +177,7 @@ PyObject* SameNamedtuples(PyObject* o1, PyObject* o2); // // Note that namedtuples with identical name and fields are always considered // to have the same shallow structure (even with `check_types=True`). -// For intance, this code will print `True`: +// For instance, this code will print `True`: // // ```python // def nt(a, b):