From f54b1374af8d1f47508e9b8159751e1b4653229b Mon Sep 17 00:00:00 2001 From: Kazuaki Ishizaki Date: Thu, 10 Oct 2019 15:38:58 +0900 Subject: [PATCH 1/2] minor spelling tweaks --- tensorflow/compiler/mlir/g3doc/tfl_ops.md | 2 +- tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td | 2 +- .../api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt | 2 +- tensorflow/core/profiler/g3doc/options.md | 2 +- .../core/profiler/g3doc/profile_model_architecture.md | 2 +- tensorflow/go/op/wrappers.go | 2 +- tensorflow/lite/experimental/examples/lstm/g3doc/README.md | 2 +- .../micro/examples/micro_speech/apollo3/README.md | 6 +++--- .../experimental/micro/tools/make/targets/ecm3531/README.md | 2 +- .../lite/g3doc/models/image_classification/android.md | 2 +- tensorflow/lite/g3doc/performance/delegates.md | 2 +- 11 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tensorflow/compiler/mlir/g3doc/tfl_ops.md b/tensorflow/compiler/mlir/g3doc/tfl_ops.md index 1ca745e539c..d2ae435cf2e 100644 --- a/tensorflow/compiler/mlir/g3doc/tfl_ops.md +++ b/tensorflow/compiler/mlir/g3doc/tfl_ops.md @@ -1217,7 +1217,7 @@ Softmax operator ### Description: -Computes element-wise softmax activiations with the following formula +Computes element-wise softmax activations with the following formula exp(input) / tf.reduce_sum(exp(input * beta), dim) diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td index 8fc3ce2cca7..a0b201c2768 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td @@ -3033,7 +3033,7 @@ This op determines the maximum scale_factor that would map the initial quantized range. It determines the scale from one of input_min and input_max, then updates the -other one to maximize the respresentable range. +other one to maximize the representable range. e.g. diff --git a/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt index eebdcbf93a7..1830e14442c 100644 --- a/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt +++ b/tensorflow/core/api_def/base_api/api_def_QuantizeAndDequantizeV2.pbtxt @@ -103,7 +103,7 @@ This op determines the maximum scale_factor that would map the initial quantized range. It determines the scale from one of input_min and input_max, then updates the -other one to maximize the respresentable range. +other one to maximize the representable range. e.g. diff --git a/tensorflow/core/profiler/g3doc/options.md b/tensorflow/core/profiler/g3doc/options.md index 8c4b45db689..0668813f787 100644 --- a/tensorflow/core/profiler/g3doc/options.md +++ b/tensorflow/core/profiler/g3doc/options.md @@ -100,7 +100,7 @@ accelerator_micros and cpu_micros. Note: cpu and accelerator can run in parallel `-order_by`: Order the results by [name|depth|bytes|peak_bytes|residual_bytes|output_bytes|micros|accelerator_micros|cpu_micros|params|float_ops|occurrence] -`-account_type_regexes`: Account and display the nodes whose types match one of the type regexes specified. tfprof allow user to define extra operation types for graph nodes through tensorflow.tfprof.OpLogProto proto. regexes are comma-sperated. +`-account_type_regexes`: Account and display the nodes whose types match one of the type regexes specified. tfprof allow user to define extra operation types for graph nodes through tensorflow.tfprof.OpLogProto proto. regexes are comma-separated. `-start_name_regexes`: Show node starting from the node that matches the regexes, recursively. regexes are comma-separated. diff --git a/tensorflow/core/profiler/g3doc/profile_model_architecture.md b/tensorflow/core/profiler/g3doc/profile_model_architecture.md index 4ccd43ce683..2fb3d849928 100644 --- a/tensorflow/core/profiler/g3doc/profile_model_architecture.md +++ b/tensorflow/core/profiler/g3doc/profile_model_architecture.md @@ -63,7 +63,7 @@ For an operation to have float operation statistics: run_count. ```python -# To profile float opertions in commandline, you need to pass --graph_path +# To profile float operations in commandline, you need to pass --graph_path # and --op_log_path. tfprof> scope -min_float_ops 1 -select float_ops -account_displayed_op_only node name | # float_ops diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index 75a20af6ef3..f1b2f3ee2e7 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -643,7 +643,7 @@ func QuantizeAndDequantizeV2NarrowRange(value bool) QuantizeAndDequantizeV2Attr // quantized range. // // It determines the scale from one of input_min and input_max, then updates the -// other one to maximize the respresentable range. +// other one to maximize the representable range. // // e.g. // diff --git a/tensorflow/lite/experimental/examples/lstm/g3doc/README.md b/tensorflow/lite/experimental/examples/lstm/g3doc/README.md index 63873fd9752..20179d18a34 100644 --- a/tensorflow/lite/experimental/examples/lstm/g3doc/README.md +++ b/tensorflow/lite/experimental/examples/lstm/g3doc/README.md @@ -316,7 +316,7 @@ def run_main(_): '--use_post_training_quantize', action='store_true', default=True, - help='Whether or not to use post_training_quatize.') + help='Whether or not to use post_training_quantize.') parsed_flags, _ = parser.parse_known_args() train_and_export(parsed_flags) diff --git a/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md b/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md index 10be9f136a9..be291c86af5 100644 --- a/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md +++ b/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md @@ -42,7 +42,7 @@ * cmsis_power.txt: the magnitude squared of the DFT * cmsis_power_avg.txt: the 6-bin average of the magnitude squared of the DFT - * Run both verisons of the 1KHz pre-processor test and then compare. + * Run both versons of the 1KHz pre-processor test and then compare. * These files can be plotted with "python compare\_1k.py" * Also prints out the number of cycles the code took to execute (using the DWT->CYCCNT register) @@ -60,7 +60,7 @@ * micro_power.txt: the magnitude squared of the DFT * micro_power_avg.txt: the 6-bin average of the magnitude squared of the DFT - * Run both verisons of the 1KHz pre-processor test and then compare. + * Run both versons of the 1KHz pre-processor test and then compare. * These files can be plotted with "python compare\_1k.py" * Also prints out the number of cycles the code took to execute (using the DWT->CYCCNT register) @@ -79,7 +79,7 @@ is the same: a 1 kHz sinusoid. * **get\_yesno\_data.cmd**: A GDB command file that runs preprocessor_test (where TARGET=apollo3evb) and dumps the calculated data for the "yes" and - "no" input wavfeorms to text files + "no" input waveforms to text files * **\_main.c**: Point of entry for the micro_speech test * **preprocessor_1k.cc**: A version of preprocessor.cc where a 1 kHz sinusoid is provided as input to the preprocessor diff --git a/tensorflow/lite/experimental/micro/tools/make/targets/ecm3531/README.md b/tensorflow/lite/experimental/micro/tools/make/targets/ecm3531/README.md index 3e339fe635d..a92fc8312be 100644 --- a/tensorflow/lite/experimental/micro/tools/make/targets/ecm3531/README.md +++ b/tensorflow/lite/experimental/micro/tools/make/targets/ecm3531/README.md @@ -4,6 +4,6 @@ https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/experimenta CONTACT INFORMATION: Contact info@etacompute.com for more information on obtaining the Eta Compute -SDK and evalution board. +SDK and evaluation board. www.etacompute.com diff --git a/tensorflow/lite/g3doc/models/image_classification/android.md b/tensorflow/lite/g3doc/models/image_classification/android.md index 51e354e1834..05fbffdd010 100644 --- a/tensorflow/lite/g3doc/models/image_classification/android.md +++ b/tensorflow/lite/g3doc/models/image_classification/android.md @@ -186,7 +186,7 @@ protected void runInference() { The output of the inference is stored in a byte array `labelProbArray`, which is allocated in the subclass's constructor. It consists of a single outer element, -containing one innner element for each label in the classification model. +containing one inner element for each label in the classification model. To run inference, we call `run()` on the interpreter instance, passing the input and output buffers as arguments. diff --git a/tensorflow/lite/g3doc/performance/delegates.md b/tensorflow/lite/g3doc/performance/delegates.md index b1ccb9ef072..bf4a86a8a09 100644 --- a/tensorflow/lite/g3doc/performance/delegates.md +++ b/tensorflow/lite/g3doc/performance/delegates.md @@ -81,7 +81,7 @@ class MyDelegate { }; // Create the TfLiteRegistration for the Kernel node which will replace -// the subrgaph in the main TfLite graph. +// the subgraph in the main TfLite graph. TfLiteRegistration GetMyDelegateNodeRegistration() { // This is the registration for the Delegate Node that gets added to // the TFLite graph instead of the subGraph it replaces. From eff45711cc54a2909f9bc207406524b484d38cd4 Mon Sep 17 00:00:00 2001 From: Kazuaki Ishizaki Date: Fri, 11 Oct 2019 00:44:56 +0900 Subject: [PATCH 2/2] address @jpienaar's comment --- .../micro/examples/micro_speech/apollo3/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md b/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md index be291c86af5..dbdf2a31e02 100644 --- a/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md +++ b/tensorflow/lite/experimental/micro/examples/micro_speech/apollo3/README.md @@ -42,7 +42,7 @@ * cmsis_power.txt: the magnitude squared of the DFT * cmsis_power_avg.txt: the 6-bin average of the magnitude squared of the DFT - * Run both versons of the 1KHz pre-processor test and then compare. + * Run both versions of the 1KHz pre-processor test and then compare. * These files can be plotted with "python compare\_1k.py" * Also prints out the number of cycles the code took to execute (using the DWT->CYCCNT register) @@ -60,7 +60,7 @@ * micro_power.txt: the magnitude squared of the DFT * micro_power_avg.txt: the 6-bin average of the magnitude squared of the DFT - * Run both versons of the 1KHz pre-processor test and then compare. + * Run both versions of the 1KHz pre-processor test and then compare. * These files can be plotted with "python compare\_1k.py" * Also prints out the number of cycles the code took to execute (using the DWT->CYCCNT register)