From 49418bd074e9bbdd44d88e334d04c6039e93ccea Mon Sep 17 00:00:00 2001 From: Kazuaki Ishizaki Date: Tue, 24 Mar 2020 12:34:02 +0900 Subject: [PATCH] minor spelling tweaks --- tensorflow/compiler/mlir/lite/ir/tfl_ops.td | 2 +- tensorflow/compiler/mlir/xla/ir/hlo_client_ops.td | 2 +- .../compiler/mlir/xla/transforms/legalize_tf_patterns.td | 6 +++--- tensorflow/lite/delegates/xnnpack/README.md | 2 +- tensorflow/lite/experimental/ruy/profiler/README.md | 2 +- tensorflow/lite/g3doc/convert/python_api.md | 4 ++-- tensorflow/lite/g3doc/performance/best_practices.md | 2 +- tensorflow/lite/micro/examples/micro_speech/README.md | 2 +- tensorflow/lite/micro/examples/person_detection/README.md | 2 +- .../lite/micro/examples/person_detection/esp/README_ESP.md | 2 +- tensorflow/lite/tools/benchmark/android/README.md | 4 ++-- tensorflow/tools/ci_build/README.md | 2 +- 12 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tensorflow/compiler/mlir/lite/ir/tfl_ops.td b/tensorflow/compiler/mlir/lite/ir/tfl_ops.td index c90fdfbfe1c..ae5cb5eb5e3 100644 --- a/tensorflow/compiler/mlir/lite/ir/tfl_ops.td +++ b/tensorflow/compiler/mlir/lite/ir/tfl_ops.td @@ -3397,7 +3397,7 @@ def TFL_BidirectionalSequenceLSTMOp : let summary = "Bidirectional sequence lstm operator"; let description = [{ - Bidirectional lstm is essentiallay two lstms, one running forward & the + Bidirectional lstm is essentially two lstms, one running forward & the other running backward. And the output is the concatenation of the two lstms. }]; diff --git a/tensorflow/compiler/mlir/xla/ir/hlo_client_ops.td b/tensorflow/compiler/mlir/xla/ir/hlo_client_ops.td index 6a60a42861a..48b765f2299 100644 --- a/tensorflow/compiler/mlir/xla/ir/hlo_client_ops.td +++ b/tensorflow/compiler/mlir/xla/ir/hlo_client_ops.td @@ -51,7 +51,7 @@ class HLOClient_Op traits> : // broadcasting (via the broadcast_dimensions attribute) and implicit degenerate // shape broadcasting. // -// These have 1:1 correspondance with same-named ops in the xla_hlo dialect; +// These have 1:1 correspondence with same-named ops in the xla_hlo dialect; // however, those operations do not support broadcasting. // // See: diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_patterns.td b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_patterns.td index b9599201601..125d164bfb5 100644 --- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_patterns.td +++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_patterns.td @@ -382,7 +382,7 @@ class createIotaOp: NativeCodeCall< def createConvertOp: NativeCodeCall< "CreateConvertOp(&($_builder), $0.getOwner()->getLoc(), $1, $2)">; -// Performs a substitution of MatrixBandPartOp for XLA HLO ops. Psuedocode is +// Performs a substitution of MatrixBandPartOp for XLA HLO ops. Pseudocode is // shown below, given a tensor `input` with k dimensions [I, J, K, ..., M, N] // and two integers, `num_lower` and `num_upper`: // @@ -454,14 +454,14 @@ def : Pat<(TF_ConstOp:$res ElementsAttr:$value), (HLO_ConstOp $value), // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will // require HLO canonicalization of min and max on a tensor to ClampOp. -// TODO(hinsu): Lower unsinged and quantized types after supporting +// TODO(hinsu): Lower unsigned and quantized types after supporting // them in GetScalarOfType. def : Pat<(TF_ReluOp AnyRankedTensor:$input), (HLO_MaxOp (HLO_ConstOp:$zero (GetScalarOfType<0> $input)), $input, (BinBroadcastDimensions $zero, $input)), [(TF_SintOrFpTensor $input)]>; -// TODO(hinsu): Lower unsinged and quantized types after supporting +// TODO(hinsu): Lower unsigned and quantized types after supporting // them in GetScalarOfType. def : Pat<(TF_Relu6Op AnyRankedTensor:$input), (HLO_ClampOp (HLO_ConstOp (GetScalarOfType<0> $input)), $input, diff --git a/tensorflow/lite/delegates/xnnpack/README.md b/tensorflow/lite/delegates/xnnpack/README.md index 5c2a8569fee..3f86a6143bc 100644 --- a/tensorflow/lite/delegates/xnnpack/README.md +++ b/tensorflow/lite/delegates/xnnpack/README.md @@ -40,7 +40,7 @@ interpreter->Invoke() ... -// IMPORTANT: release the interpreter before destroing the delegate +// IMPORTANT: release the interpreter before destroying the delegate interpreter.reset(); TfLiteXNNPackDelegateDelete(xnnpack_delegate); ``` diff --git a/tensorflow/lite/experimental/ruy/profiler/README.md b/tensorflow/lite/experimental/ruy/profiler/README.md index 28cc55020e5..8d7902566b3 100644 --- a/tensorflow/lite/experimental/ruy/profiler/README.md +++ b/tensorflow/lite/experimental/ruy/profiler/README.md @@ -133,7 +133,7 @@ But also the following advantages: The philosophy underlying this profiler is that software performance depends on software engineers profiling often, and a key factor limiting that in practice is the difficulty or cumbersome aspects of profiling with more serious profilers -such as Linux's "perf", espectially in embedded/mobile development: multiple +such as Linux's "perf", especially in embedded/mobile development: multiple command lines are involved to copy symbol files to devices, retrieve profile data from the device, etc. In that context, it is useful to make profiling as easy as benchmarking, even on embedded targets, even if the price to pay for diff --git a/tensorflow/lite/g3doc/convert/python_api.md b/tensorflow/lite/g3doc/convert/python_api.md index ef9bdf2c9ef..ba86eac25fd 100644 --- a/tensorflow/lite/g3doc/convert/python_api.md +++ b/tensorflow/lite/g3doc/convert/python_api.md @@ -171,7 +171,7 @@ TensorFlow Lite metadata provides a standard for model descriptions. The metadata is an important source of knowledge about what the model does and its input / output information. This makes it easier for other developers to understand the best practices and for code generators to create platform -specific wrapper code. For more infomation, please refer to the +specific wrapper code. For more information, please refer to the [TensorFlow Lite Metadata](metadata.md) section. ## Installing TensorFlow @@ -192,7 +192,7 @@ either install the nightly build with [Docker](https://www.tensorflow.org/install/docker), or [build the pip package from source](https://www.tensorflow.org/install/source). -### Custom ops in the experimenal new converter +### Custom ops in the experimental new converter There is a behavior change in how models containing [custom ops](https://www.tensorflow.org/lite/guide/ops_custom) (those for which diff --git a/tensorflow/lite/g3doc/performance/best_practices.md b/tensorflow/lite/g3doc/performance/best_practices.md index 56093e63722..32f5ef485aa 100644 --- a/tensorflow/lite/g3doc/performance/best_practices.md +++ b/tensorflow/lite/g3doc/performance/best_practices.md @@ -52,7 +52,7 @@ operator is executed. Check out our Model optimization aims to create smaller models that are generally faster and more energy efficient, so that they can be deployed on mobile devices. There are -multiple optimization techniques suppored by TensorFlow Lite, such as +multiple optimization techniques supported by TensorFlow Lite, such as quantization. Check out our [model optimization docs](model_optimization.md) for details. diff --git a/tensorflow/lite/micro/examples/micro_speech/README.md b/tensorflow/lite/micro/examples/micro_speech/README.md index 368f8056e5e..593db8cebb1 100644 --- a/tensorflow/lite/micro/examples/micro_speech/README.md +++ b/tensorflow/lite/micro/examples/micro_speech/README.md @@ -420,7 +420,7 @@ using [ARM Mbed](https://github.com/ARMmbed/mbed-cli). ``` mbed compile --target K66F --toolchain GCC_ARM --profile release ``` -8. For some mbed compliers, you may get compile error in mbed_rtc_time.cpp. +8. For some mbed compilers, you may get compile error in mbed_rtc_time.cpp. Go to `mbed-os/platform/mbed_rtc_time.h` and comment line 32 and line 37: ``` diff --git a/tensorflow/lite/micro/examples/person_detection/README.md b/tensorflow/lite/micro/examples/person_detection/README.md index 12c6b7b9b9f..5ee7bda9914 100644 --- a/tensorflow/lite/micro/examples/person_detection/README.md +++ b/tensorflow/lite/micro/examples/person_detection/README.md @@ -202,7 +202,7 @@ The next steps assume that the * The `IDF_PATH` environment variable is set * `idf.py` and Xtensa-esp32 tools (e.g. `xtensa-esp32-elf-gcc`) are in `$PATH` -* `esp32-camera` should be downloaded in `comopnents/` dir of example as +* `esp32-camera` should be downloaded in `components/` dir of example as explained in `Building the example`(below) ### Generate the examples diff --git a/tensorflow/lite/micro/examples/person_detection/esp/README_ESP.md b/tensorflow/lite/micro/examples/person_detection/esp/README_ESP.md index 35e974d985a..78a7561d5b5 100644 --- a/tensorflow/lite/micro/examples/person_detection/esp/README_ESP.md +++ b/tensorflow/lite/micro/examples/person_detection/esp/README_ESP.md @@ -16,7 +16,7 @@ The next steps assume that the [IDF environment variables are set](https://docs.espressif.com/projects/esp-idf/en/latest/get-started/index.html#step-4-set-up-the-environment-variables) : * The `IDF_PATH` environment variable is set. * `idf.py` and Xtensa-esp32 tools (e.g., `xtensa-esp32-elf-gcc`) are in `$PATH`. * `esp32-camera` should be -downloaded in `comopnents/` dir of example as explained in `Build the +downloaded in `components/` dir of example as explained in `Build the example`(below) ## Build the example diff --git a/tensorflow/lite/tools/benchmark/android/README.md b/tensorflow/lite/tools/benchmark/android/README.md index 3e66b7f13f1..40fb5a79553 100644 --- a/tensorflow/lite/tools/benchmark/android/README.md +++ b/tensorflow/lite/tools/benchmark/android/README.md @@ -37,7 +37,7 @@ bazel build -c opt \ adb install -r -d -g bazel-bin/tensorflow/lite/tools/benchmark/android/benchmark_model.apk ``` Note: Make sure to install with "-g" option to grant the permission for reading -extenal storage. +external storage. (3) Push the compute graph that you need to test. @@ -119,6 +119,6 @@ a trace file, between tracing formats and [create](https://developer.android.com/topic/performance/tracing/on-device#create-html-report) an HTML report. -Note that, the catured tracing file format is either in Perfetto format or in +Note that, the captured tracing file format is either in Perfetto format or in Systrace format depending on the Android version of your device. Select the appropriate method to handle the generated file. diff --git a/tensorflow/tools/ci_build/README.md b/tensorflow/tools/ci_build/README.md index 988c6706c11..bf1993e8c7a 100644 --- a/tensorflow/tools/ci_build/README.md +++ b/tensorflow/tools/ci_build/README.md @@ -83,7 +83,7 @@ this UI, to see the logs for a failed build: * Submit special pull request (PR) comment to trigger CI: **bot:mlx:test** * Test session is run automatically. -* Test results and artefacts (log files) are reported via PR comments +* Test results and artifacts (log files) are reported via PR comments ##### CI Steps