From c55943eb30056dc94cf22be6a83fd71c47f541c4 Mon Sep 17 00:00:00 2001 From: mshr-h <mshr-h@users.noreply.github.com> Date: Wed, 29 Apr 2020 21:10:09 +0900 Subject: [PATCH] Fix misspelling --- .../lite/tools/benchmark/benchmark_performance_options.cc | 4 ++-- tensorflow/lite/tools/delegates/external_delegate_provider.cc | 2 +- .../lite/tools/evaluation/evaluation_delegate_provider.h | 2 +- .../tools/evaluation/evaluation_delegate_provider_test.cc | 2 +- tensorflow/lite/tools/make/Makefile | 2 +- tensorflow/lite/tools/optimize/operator_property.h | 2 +- .../lite/tools/optimize/python/modify_model_interface_lib.py | 2 +- tensorflow/lite/tools/versioning/op_version.cc | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc b/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc index 26fed5e279f..cafef6fa133 100644 --- a/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc +++ b/tensorflow/lite/tools/benchmark/benchmark_performance_options.cc @@ -334,7 +334,7 @@ void BenchmarkPerformanceOptions::Run() { // profiling listener etc. in each Run() invoke because such listeners may be // reset and become invalid in the next Run(). As a result, we record the // number of externally-added listeners here to prevent they're cleared later. - const int num_external_listners = single_option_run_->NumListeners(); + const int num_external_listeners = single_option_run_->NumListeners(); // Now perform all runs, each with different performance-affecting parameters. for (const auto& run_params : all_run_params_) { @@ -349,7 +349,7 @@ void BenchmarkPerformanceOptions::Run() { // Clear internally created listeners before each run but keep externally // created ones. - single_option_run_->RemoveListeners(num_external_listners); + single_option_run_->RemoveListeners(num_external_listeners); all_run_stats_->MarkBenchmarkStart(*single_option_run_params_); single_option_run_->Run(); diff --git a/tensorflow/lite/tools/delegates/external_delegate_provider.cc b/tensorflow/lite/tools/delegates/external_delegate_provider.cc index 95b0e42802f..193860820b1 100644 --- a/tensorflow/lite/tools/delegates/external_delegate_provider.cc +++ b/tensorflow/lite/tools/delegates/external_delegate_provider.cc @@ -119,7 +119,7 @@ std::vector<Flag> ExternalDelegateProvider::CreateFlags( "The library path for the underlying external."), CreateFlag<std::string>( "external_delegate_options", params, - "Comma-seperated options to be passed to the external delegate")}; + "Comma-separated options to be passed to the external delegate")}; return flags; } diff --git a/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h b/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h index 36f80469a97..9ff20d630ce 100644 --- a/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h +++ b/tensorflow/lite/tools/evaluation/evaluation_delegate_provider.h @@ -33,7 +33,7 @@ class DelegateProviders { DelegateProviders(); // Initialize delegate-related parameters from commandline arguments and - // returns true if sucessful. + // returns true if successful. bool InitFromCmdlineArgs(int* argc, const char** argv); // Get all parameters from all registered delegate providers. diff --git a/tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc b/tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc index c2dfa8d0360..5d0a4dfa7d3 100644 --- a/tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc +++ b/tensorflow/lite/tools/evaluation/evaluation_delegate_provider_test.cc @@ -66,7 +66,7 @@ TEST(EvaluationDelegateProviderTest, GetAllParamsWithTfliteInferenceParams) { TfliteInferenceParams params; params.set_delegate(TfliteInferenceParams::NONE); params.set_num_threads(4); - // The same-meaning parameter in TfliteInferenceParams takes precendence. + // The same-meaning parameter in TfliteInferenceParams takes precedence. tools::ToolParams tool_params = providers.GetAllParams(params); EXPECT_EQ(4, tool_params.Get<int>("num_threads")); EXPECT_EQ(1, argc); diff --git a/tensorflow/lite/tools/make/Makefile b/tensorflow/lite/tools/make/Makefile index ad3832f9962..41f87fb033d 100644 --- a/tensorflow/lite/tools/make/Makefile +++ b/tensorflow/lite/tools/make/Makefile @@ -246,7 +246,7 @@ BENCHMARK_LIB_SRCS := $(filter-out \ $(BENCHMARK_ALL_SRCS)) # These target-specific makefiles should modify or replace options like -# CXXFLAGS or LIBS to work for a specific targetted architecture. All logic +# CXXFLAGS or LIBS to work for a specific targeted architecture. All logic # based on platforms or architectures should happen within these files, to # keep this main makefile focused on the sources and dependencies. include $(wildcard $(MAKEFILE_DIR)/targets/*_makefile.inc) diff --git a/tensorflow/lite/tools/optimize/operator_property.h b/tensorflow/lite/tools/optimize/operator_property.h index 995595e7878..95b0e5000c3 100644 --- a/tensorflow/lite/tools/optimize/operator_property.h +++ b/tensorflow/lite/tools/optimize/operator_property.h @@ -86,7 +86,7 @@ struct OperatorProperty { bool restrict_same_input_output_scale = false; // Use same min of min and max of max for each group. - // Incompatable with restrict_same_input_output_scale and restricted_value. + // Incompatible with restrict_same_input_output_scale and restricted_value. // TODO(jianlijianli): make it compatible with other restrictions when there // is a use case. std::vector<std::vector<int>> restrict_scale = {}; diff --git a/tensorflow/lite/tools/optimize/python/modify_model_interface_lib.py b/tensorflow/lite/tools/optimize/python/modify_model_interface_lib.py index 5e4bf99ccdf..782d88cbc9b 100644 --- a/tensorflow/lite/tools/optimize/python/modify_model_interface_lib.py +++ b/tensorflow/lite/tools/optimize/python/modify_model_interface_lib.py @@ -74,6 +74,6 @@ def modify_model_interface(input_file, output_file, input_type, output_type): # Throw an exception if the return status is an error. if status != 0: raise RuntimeError( - 'Error occured when trying to modify the model input type from float ' + 'Error occurred when trying to modify the model input type from float ' 'to {input_type} and output type from float to {output_type}.'.format( input_type=input_type, output_type=output_type)) diff --git a/tensorflow/lite/tools/versioning/op_version.cc b/tensorflow/lite/tools/versioning/op_version.cc index 0b892cf847f..e60865b85a8 100644 --- a/tensorflow/lite/tools/versioning/op_version.cc +++ b/tensorflow/lite/tools/versioning/op_version.cc @@ -110,7 +110,7 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) { if (op_sig.input_types.size() == 2) { return 6; } - // `keep_num_dims` is supported at verison 5. + // `keep_num_dims` is supported at version 5. if (op_sig.options.fully_connected.keep_num_dims) { return 5; }