diff --git a/tensorflow/lite/c/common.h b/tensorflow/lite/c/common.h
index e04e1a12cd4..389a08528f1 100644
--- a/tensorflow/lite/c/common.h
+++ b/tensorflow/lite/c/common.h
@@ -80,7 +80,7 @@ struct TfLiteRegistration;
 
 // An external context is a collection of information unrelated to the TF Lite
 // framework, but useful to a subset of the ops. TF Lite knows very little
-// about about the actual contexts, but it keeps a list of them, and is able to
+// about the actual contexts, but it keeps a list of them, and is able to
 // refresh them if configurations like the number of recommended threads
 // change.
 typedef struct TfLiteExternalContext {
diff --git a/tensorflow/lite/core/subgraph.cc b/tensorflow/lite/core/subgraph.cc
index 20d68590740..1deda07d397 100644
--- a/tensorflow/lite/core/subgraph.cc
+++ b/tensorflow/lite/core/subgraph.cc
@@ -219,7 +219,7 @@ Subgraph::Subgraph(ErrorReporter* error_reporter,
   // Reserve some space for the tensors to avoid excessive resizing.
   tensors_.reserve(kTensorsReservedCapacity);
   nodes_and_registration().reserve(kTensorsReservedCapacity);
-  // Invalid to call these these except from TfLiteDelegate
+  // Invalid to call these except from TfLiteDelegate
   SwitchToKernelContext();
 }
 
@@ -311,7 +311,7 @@ TfLiteDelegateParams* CreateDelegateParams(TfLiteDelegate* delegate,
   // Use `char*` for conveniently step through the allocated space by bytes.
   char* allocation = static_cast<char*>(malloc(allocation_size));
 
-  // Step 3: Fill all data structures structures.
+  // Step 3: Fill all data structures.
   TfLiteDelegateParams* params =
       reinterpret_cast<TfLiteDelegateParams*>(allocation);
   params->delegate = delegate;
diff --git a/tensorflow/lite/delegates/delegate_test.cc b/tensorflow/lite/delegates/delegate_test.cc
index a51d5bc431a..857a94b67b4 100644
--- a/tensorflow/lite/delegates/delegate_test.cc
+++ b/tensorflow/lite/delegates/delegate_test.cc
@@ -196,7 +196,7 @@ class TestDelegate : public ::testing::Test {
             kTfLiteOk);
 
         if (simple->min_ops_per_subset() > 0) {
-          // Build a new vector of ops from subsets with atleast the minimum
+          // Build a new vector of ops from subsets with at least the minimum
           // size.
           std::vector<int> allowed_ops;
           for (int idx = 0; idx < num_partitions; ++idx) {
@@ -1304,7 +1304,7 @@ TEST_F(TestDelegateWithDynamicTensors, ShapePropagation_FlagNotSet) {
 // Input: 0, Output:12.
 // All constants are 2, so the function is: (x + 2 + 2) * 2 + 2 = 2x + 10
 //
-// Delegate only supports ADD, so can have upto two delegated partitions.
+// Delegate only supports ADD, so can have up to two delegated partitions.
 // TODO(b/156707497): Add more cases here once we have landed CPU kernels
 // supporting FP16.
 class TestFP16Delegation : public ::testing::TestWithParam<int> {
diff --git a/tensorflow/lite/delegates/gpu/api.h b/tensorflow/lite/delegates/gpu/api.h
index 7892d0ce2f6..075e66bef50 100644
--- a/tensorflow/lite/delegates/gpu/api.h
+++ b/tensorflow/lite/delegates/gpu/api.h
@@ -364,7 +364,7 @@ struct InferenceOptions {
 };
 
 // Returns a position number for the priority. If priority is missing,
-// then it it would return 'max num priorities + 1'.
+// then it would return 'max num priorities + 1'.
 int GetPosition(const InferenceOptions& options, InferencePriority p);
 
 // Return true if options are valid.
diff --git a/tensorflow/lite/delegates/gpu/gl/node_shader.h b/tensorflow/lite/delegates/gpu/gl/node_shader.h
index 0575182f361..9abe41fa07b 100644
--- a/tensorflow/lite/delegates/gpu/gl/node_shader.h
+++ b/tensorflow/lite/delegates/gpu/gl/node_shader.h
@@ -44,10 +44,10 @@ enum class IOStructure {
   ONLY_DEFINITIONS,
 
   // For inputs:
-  //   Source code runs computations using 'vec4 value_N' declared by
-  //   the compiler, where where N is an index of the input. Each value comes
-  //   from inputs using coordinates set by GlobalInvocationID and a dispatch
-  //   method, therefore, source code should not explicitly read values.
+  //   Source code runs computations using 'vec4 value_N' declared by the
+  //   compiler, where N is an index of the input. Each value comes from inputs
+  //   using coordinates set by GlobalInvocationID and a dispatch method,
+  //   therefore, source code should not explicitly read values.
   //
   // For outputs:
   //   Source code runs computations and leaves results in 'vec4 value_N'
diff --git a/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc b/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
index 913e35cb9d9..c12e9c0da9c 100644
--- a/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
+++ b/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc
@@ -1359,7 +1359,7 @@ class NNAPIOpBuilder {
     if (tensor->allocation_type == kTfLiteMmapRo) {
       if (IsQuantized(tensor_type) && need_int8_conversion &&
           nn_type != ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
-        // We need to to add a tensor and convert the weights into uint8.
+        // We need to add a tensor and convert the weights into uint8.
         // Currently this is only needed for fully_connected. The new_tensor is
         // needed for lifetime management for the converted weights.
         int new_tensor_index = -1;
@@ -2488,7 +2488,7 @@ bool NNAPIDelegateKernel::Validate(
           context->tensors[node->inputs->data[1]].dims;
       Expect(TfLiteIntArrayEqual(condition_shape, input_shape),
              NNAPIValidationFailureType::kUnsupportedOperandValue,
-             "Condition and inputs tensors shuld have the same shape",
+             "Condition and inputs tensors should have the same shape",
              &val_ctx);
     } break;
     case kTfLiteBuiltinGather: {
diff --git a/tensorflow/lite/delegates/utils/dummy_delegate/README.md b/tensorflow/lite/delegates/utils/dummy_delegate/README.md
index d55ba421cba..6b394d12160 100644
--- a/tensorflow/lite/delegates/utils/dummy_delegate/README.md
+++ b/tensorflow/lite/delegates/utils/dummy_delegate/README.md
@@ -21,7 +21,7 @@ the ideas above. For more sophisticated examples, refer to [Flex delegate](https
 ## Testing & Tooling
 
 There are currently **two options** to plug in a newly created TFLite delegate
-to reuse existing TFLite kernel tests and and tooling:
+to reuse existing TFLite kernel tests and tooling:
 
 - Utilize the **[delegate registrar](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/tools/delegates)**
 mechanism
diff --git a/tensorflow/lite/experimental/writer/option_writer_generator.cc b/tensorflow/lite/experimental/writer/option_writer_generator.cc
index 14d7219f304..e8cef523c60 100644
--- a/tensorflow/lite/experimental/writer/option_writer_generator.cc
+++ b/tensorflow/lite/experimental/writer/option_writer_generator.cc
@@ -119,7 +119,7 @@ class OpOptionData {
   const std::unordered_map<std::string, std::string>& op_to_option() {
     return op_to_option_;
   }
-  // Maps from option to to C struct i.e. 'AddOptions' -> 'TfLiteAddOptions'
+  // Maps from option to C struct i.e. 'AddOptions' -> 'TfLiteAddOptions'
   const std::unordered_map<std::string, std::string>& option_to_struct() {
     return option_to_struct_;
   }
diff --git a/tensorflow/lite/g3doc/guide/build_arm64.md b/tensorflow/lite/g3doc/guide/build_arm64.md
index c07c81cd69b..9a7bf12c1b8 100644
--- a/tensorflow/lite/g3doc/guide/build_arm64.md
+++ b/tensorflow/lite/g3doc/guide/build_arm64.md
@@ -126,7 +126,7 @@ page for the detail.
 bazel build --config=elinux_aarch64 -c opt //tensorflow/lite:libtensorflowlite.so
 ```
 
-You can find a shared library library in:
+You can find a shared library in:
 `bazel-bin/tensorflow/lite/libtensorflowlite.so`.
 
 Currently, there is no straightforward way to extract all header files needed,
diff --git a/tensorflow/lite/g3doc/guide/build_rpi.md b/tensorflow/lite/g3doc/guide/build_rpi.md
index f43a81dd268..408a0f11856 100644
--- a/tensorflow/lite/g3doc/guide/build_rpi.md
+++ b/tensorflow/lite/g3doc/guide/build_rpi.md
@@ -119,7 +119,7 @@ cd tensorflow_src && ./tensorflow/lite/tools/make/download_dependencies.sh
 
 You can use
 [ARM GCC toolchains](https://github.com/tensorflow/tensorflow/tree/master/third_party/toolchains/embedded/arm-linux)
-with Bazel to build an armhf shared library which is compatibile with Raspberry
+with Bazel to build an armhf shared library which is compatible with Raspberry
 Pi 2, 3 and 4.
 
 Note: The generated shared library requires glibc 2.28 or higher to run.
@@ -165,7 +165,7 @@ page for the detail.
 bazel build --config=elinux_armhf -c opt //tensorflow/lite:libtensorflowlite.so
 ```
 
-You can find a shared library library in:
+You can find a shared library in:
 `bazel-bin/tensorflow/lite/libtensorflowlite.so`.
 
 Currently, there is no straightforward way to extract all header files needed,
diff --git a/tensorflow/lite/g3doc/guide/ops_select.md b/tensorflow/lite/g3doc/guide/ops_select.md
index 3aa81528c1f..73466791078 100644
--- a/tensorflow/lite/g3doc/guide/ops_select.md
+++ b/tensorflow/lite/g3doc/guide/ops_select.md
@@ -222,7 +222,7 @@ pip package version since 2.3 for Linux and 2.4 for other environments.
 ### Performance
 
 When using a mixture of both builtin and select TensorFlow ops, all of the same
-TensorFlow Lite optimizations and optimized builtin ops will be be available and
+TensorFlow Lite optimizations and optimized builtin ops will be available and
 usable with the converted model.
 
 The following table describes the average time taken to run inference on
diff --git a/tensorflow/lite/g3doc/inference_with_metadata/task_library/customized_task_api.md b/tensorflow/lite/g3doc/inference_with_metadata/task_library/customized_task_api.md
index 04f0477552e..d7b8d315365 100644
--- a/tensorflow/lite/g3doc/inference_with_metadata/task_library/customized_task_api.md
+++ b/tensorflow/lite/g3doc/inference_with_metadata/task_library/customized_task_api.md
@@ -110,7 +110,7 @@ To build an API object,you must provide the following information by extending
                                   std::vector<QaAnswer>, // OutputType
                                   const std::string&, const std::string& // InputTypes
                                   > {
-      // Convert API input into into tensors
+      // Convert API input into tensors
       absl::Status BertQuestionAnswerer::Preprocess(
         const std::vector<TfLiteTensor*>& input_tensors, // input tensors of the model
         const std::string& context, const std::string& query // InputType of the API
@@ -230,7 +230,7 @@ following information by extending
 [`BaseTaskApi`](https://github.com/tensorflow/tflite-support/blob/master/tensorflow_lite_support/java/src/java/org/tensorflow/lite/task/core/BaseTaskApi.java),
 which provides JNI handlings for all Java Task APIs.
 
-*   __Determine the API I/O__ - This usually mirriors the native interfaces. e.g
+*   __Determine the API I/O__ - This usually mirrors the native interfaces. e.g
     `BertQuestionAnswerer` takes `(String context, String question)` as input
     and outputs `List<QaAnswer>`. The implementation calls a private native
     function with similar signature, except it has an additional parameter `long
diff --git a/tensorflow/lite/g3doc/performance/gpu.md b/tensorflow/lite/g3doc/performance/gpu.md
index 077f88e1b12..e992518baf1 100644
--- a/tensorflow/lite/g3doc/performance/gpu.md
+++ b/tensorflow/lite/g3doc/performance/gpu.md
@@ -114,7 +114,7 @@ OR
 pod 'TensorFlowLiteSwift', '~> 0.0.1-nightly', :subspecs => ['Metal']
 ```
 
-You can do similiarly for `TensorFlowLiteC` if you want to use the C API.
+You can do similarly for `TensorFlowLiteC` if you want to use the C API.
 
 #### Step 3. Enable the GPU delegate
 
@@ -154,9 +154,9 @@ Lastly make sure to select Release-only builds on 64-bit architecture. Under
 
 ### Android
 
-Note: The TensorFlow Lite Interpreter must be created on the same thread as when
-is is run. Otherwise, `TfLiteGpuDelegate Invoke: GpuDelegate must run on the
-same thread where it was initialized.` may occur.
+Note: The TensorFlow Lite Interpreter must be created on the same thread as
+where it is run. Otherwise, `TfLiteGpuDelegate Invoke: GpuDelegate must run on
+the same thread where it was initialized.` may occur.
 
 Look at the demo to see how to add the delegate. In your application, add the
 AAR as above, import `org.tensorflow.lite.gpu.GpuDelegate` module, and use
diff --git a/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb b/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb
index ef650c6b05b..f0da8e4b91a 100644
--- a/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb
+++ b/tensorflow/lite/g3doc/tutorials/model_maker_image_classification.ipynb
@@ -84,7 +84,7 @@
       "source": [
         "## Prerequisites\n",
         "\n",
-        "To run this example, we first need to install serveral required packages, including Model Maker package that in github [repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker)."
+        "To run this example, we first need to install several required packages, including Model Maker package that in GitHub [repo](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker)."
       ]
     },
     {
@@ -346,7 +346,7 @@
         "id": "NNRNv_mloS89"
       },
       "source": [
-        "If you prefer not to upload your images to the cloud, you could try to run the library locally following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker) in github."
+        "If you prefer not to upload your images to the cloud, you could try to run the library locally following the [guide](https://github.com/tensorflow/examples/tree/master/tensorflow_examples/lite/model_maker) in GitHub."
       ]
     },
     {
@@ -783,7 +783,7 @@
         "id": "-4jQaxyT5_KV"
       },
       "source": [
-        "You can also evalute the tflite model with the `evaluate_tflite` method."
+        "You can also evaluate the tflite model with the `evaluate_tflite` method."
       ]
     },
     {
@@ -805,7 +805,7 @@
       "source": [
         "## Advanced Usage\n",
         "\n",
-        "The `create` function is the critical part of this library. It uses transfer learning with a pretrained model similiar to the [tutorial](https://www.tensorflow.org/tutorials/images/transfer_learning).\n",
+        "The `create` function is the critical part of this library. It uses transfer learning with a pretrained model similar to the [tutorial](https://www.tensorflow.org/tutorials/images/transfer_learning).\n",
         "\n",
         "The `create`function contains the following steps:\n",
         "\n",
@@ -843,7 +843,7 @@
         "id": "iyIo0d5TCzE2"
       },
       "source": [
-        "Model Maker supports multiple post-training quantization options. Let's take full integer quantization as an instance. First, define the quantization config to enforce enforce full integer quantization for all ops including the input and output. The input type and output type are `uint8` by default. You may also change them to other types like `int8` by setting `inference_input_type` and `inference_output_type` in config."
+        "Model Maker supports multiple post-training quantization options. Let's take full integer quantization as an instance. First, define the quantization config to enforce full integer quantization for all ops including the input and output. The input type and output type are `uint8` by default. You may also change them to other types like `int8` by setting `inference_input_type` and `inference_output_type` in config."
       ]
     },
     {
@@ -1018,7 +1018,7 @@
         "      `use_hub_library` is True. None by default.\n",
         "*   `shuffle`: Boolean, whether the data should be shuffled. False by default.\n",
         "*   `use_augmentation`: Boolean, use data augmentation for preprocessing. False by default.\n",
-        "*   `use_hub_library`: Boolean, use `make_image_classifier_lib` from tensorflow hub to retrain the model. This training pipline could achieve better performance for complicated dataset with many categories. True by default. \n",
+        "*   `use_hub_library`: Boolean, use `make_image_classifier_lib` from tensorflow hub to retrain the model. This training pipeline could achieve better performance for complicated dataset with many categories. True by default. \n",
         "*   `warmup_steps`: Number of warmup steps for warmup schedule on learning rate. If None, the default warmup_steps is used which is the total training steps in two epochs. Only used when `use_hub_library` is False. None by default.\n",
         "*   `model_dir`: Optional, the location of the model checkpoint files. Only used when `use_hub_library` is False. None by default.\n",
         "\n",
diff --git a/tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb b/tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb
index 06f534522c7..328f9d0cb70 100644
--- a/tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb
+++ b/tensorflow/lite/g3doc/tutorials/model_maker_question_answer.ipynb
@@ -665,7 +665,7 @@
         "id": "HZKYthlVrTos"
       },
       "source": [
-        "You can also evalute the tflite model with the `evaluate_tflite` method. This step is expected to take a long time."
+        "You can also evaluate the tflite model with the `evaluate_tflite` method. This step is expected to take a long time."
       ]
     },
     {
diff --git a/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb b/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb
index ba6d266361b..2fc40f8a1f0 100644
--- a/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb
+++ b/tensorflow/lite/g3doc/tutorials/model_maker_text_classification.ipynb
@@ -747,7 +747,7 @@
         "id": "HZKYthlVrTos"
       },
       "source": [
-        "You can evalute the tflite model with `evaluate_tflite` method to get its accuracy."
+        "You can evaluate the tflite model with `evaluate_tflite` method to get its accuracy."
       ]
     },
     {
diff --git a/tensorflow/lite/java/BUILD b/tensorflow/lite/java/BUILD
index 9bceb939c02..c86872b18e3 100644
--- a/tensorflow/lite/java/BUILD
+++ b/tensorflow/lite/java/BUILD
@@ -91,9 +91,9 @@ tflite_flex_android_library(
     visibility = ["//visibility:public"],
 )
 
-# EXPERIMENTAL: Android target target for GPU acceleration. Note that this
-# library contains *only* the GPU delegate and its Java wrapper; clients must
-# also include the core `tensorflowlite` runtime.
+# EXPERIMENTAL: Android target for GPU acceleration. Note that this library
+# contains *only* the GPU delegate and its Java wrapper; clients must also
+# include the core `tensorflowlite` runtime.
 android_library(
     name = "tensorflowlite_gpu",
     srcs = ["//tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu:gpu_delegate"],
diff --git a/tensorflow/lite/kernels/transpose_conv.cc b/tensorflow/lite/kernels/transpose_conv.cc
index 52ee0414dd6..7a2b1a8dceb 100644
--- a/tensorflow/lite/kernels/transpose_conv.cc
+++ b/tensorflow/lite/kernels/transpose_conv.cc
@@ -22,10 +22,10 @@ limitations under the License.
 #include "tensorflow/lite/c/common.h"
 #include "tensorflow/lite/kernels/cpu_backend_context.h"
 #include "tensorflow/lite/kernels/internal/compatibility.h"
-// NOLINTNEXTLINE - This header file should't go to the top.
+// NOLINTNEXTLINE - This header file shouldn't go to the top.
 #include "tensorflow/lite/kernels/internal/optimized/integer_ops/transpose_conv.h"
 #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
-// NOLINTNEXTLINE - This header file should't go to the top.
+// NOLINTNEXTLINE - This header file shouldn't go to the top.
 #include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h"
 #include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
 #include "tensorflow/lite/kernels/internal/tensor.h"
@@ -204,7 +204,7 @@ TfLiteStatus ResizeAndTransposeWeights(TfLiteContext* context,
   TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, transposed_weights,
                                               transposed_weights_shape_array));
 
-  // Transpose the weights from from OHWI order to HWOI order.
+  // Transpose the weights from OHWI order to HWOI order.
   TransposeParams transpose_params;
   transpose_params.perm_count = 4;
   transpose_params.perm[0] = 1;
diff --git a/tensorflow/lite/kernels/variable_ops_test.cc b/tensorflow/lite/kernels/variable_ops_test.cc
index 077a03df21d..1716f896805 100644
--- a/tensorflow/lite/kernels/variable_ops_test.cc
+++ b/tensorflow/lite/kernels/variable_ops_test.cc
@@ -44,7 +44,7 @@ class VariableOpsTest : public ::testing::Test {
   }
 
   void ConstructGraph() {
-    // Construct a graph like ths:
+    // Construct a graph like this:
     //   Input: %0, %1, %2
     //   Output: %3
     //   variable_assign(%0, %2)
diff --git a/tensorflow/lite/micro/examples/magic_wand/train/README.md b/tensorflow/lite/micro/examples/magic_wand/train/README.md
index f85ca015a9f..0b562a322de 100644
--- a/tensorflow/lite/micro/examples/magic_wand/train/README.md
+++ b/tensorflow/lite/micro/examples/magic_wand/train/README.md
@@ -84,7 +84,7 @@ $ python train.py --model CNN --person true
 
 #### Model type
 
-In the `--model` argument, you can can provide `CNN` or `LSTM`. The CNN
+In the `--model` argument, you can provide `CNN` or `LSTM`. The CNN
 model has a smaller size and lower latency.
 
 ## Collecting new data
diff --git a/tensorflow/lite/micro/examples/micro_speech/README.md b/tensorflow/lite/micro/examples/micro_speech/README.md
index f896e40de2e..8490458e3a9 100644
--- a/tensorflow/lite/micro/examples/micro_speech/README.md
+++ b/tensorflow/lite/micro/examples/micro_speech/README.md
@@ -223,7 +223,7 @@ make -f tensorflow/lite/micro/tools/make/Makefile TARGET=esp generate_micro_spee
 
 ### Building the example
 
-Go the the example project directory
+Go to the example project directory
 ```
 cd tensorflow/lite/micro/tools/make/gen/esp_xtensa-esp32/prj/micro_speech/esp-idf
 ```
@@ -577,7 +577,7 @@ using [ARM Mbed](https://github.com/ARMmbed/mbed-cli).
 
 The following instructions will help you build and deploy this example to
 [HIMAX WE1 EVB](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_board_brief)
-board. To undstand more about using this board, please check
+board. To understand more about using this board, please check
 [HIMAX WE1 EVB user guide](https://github.com/HimaxWiseEyePlus/bsp_tflu/tree/master/HIMAX_WE1_EVB_user_guide).
 
 ### Initial Setup
diff --git a/tensorflow/lite/micro/examples/micro_speech/esp/ringbuf.c b/tensorflow/lite/micro/examples/micro_speech/esp/ringbuf.c
index b297069e80c..6bf1585ea5a 100644
--- a/tensorflow/lite/micro/examples/micro_speech/esp/ringbuf.c
+++ b/tensorflow/lite/micro/examples/micro_speech/esp/ringbuf.c
@@ -291,7 +291,7 @@ void rb_abort(ringbuf_t *rb) {
 }
 
 /**
- * Reset the ringbuffer and keep keep rb_write aborted.
+ * Reset the ringbuffer and keep rb_write aborted.
  * Note that we are taking lock before even toggling `abort_write` variable.
  * This serves a special purpose to not allow this abort to be mixed with
  * rb_write.
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h
index 7c27379f6de..01e6605b844 100644
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h
+++ b/tensorflow/lite/micro/examples/micro_speech/micro_features/no_feature_data_slice.h
@@ -16,7 +16,7 @@ limitations under the License.
 // This data was extracted from the larger feature data held in
 // no_features_data.cc and consists of the 29th spectrogram slice of 43 values.
 // This is the expected result of running the sample data in
-// no_30ms_sample_data.cc through through the preprocessing pipeline.
+// no_30ms_sample_data.cc through the preprocessing pipeline.
 
 #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
 #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_NO_FEATURE_DATA_SLICE_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h b/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h
index 2427ee70063..18faadcf971 100644
--- a/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h
+++ b/tensorflow/lite/micro/examples/micro_speech/micro_features/yes_feature_data_slice.h
@@ -16,7 +16,7 @@ limitations under the License.
 // This data was extracted from the larger feature data held in
 // no_micro_features_data.cc and consists of the 26th spectrogram slice of 40
 // values. This is the expected result of running the sample data in
-// yes_30ms_sample_data.cc through through the preprocessing pipeline.
+// yes_30ms_sample_data.cc through the preprocessing pipeline.
 
 #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
 #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_MICRO_FEATURES_YES_FEATURE_DATA_SLICE_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h
index 463a4951cf1..f20362349f2 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h
+++ b/tensorflow/lite/micro/examples/micro_speech/simple_features/no_power_spectrum_data.h
@@ -16,7 +16,7 @@ limitations under the License.
 // This data was extracted from the larger feature data held in
 // no_features_data.cc and consists of the 29th spectrogram slice of 43 values.
 // This is the expected result of running the sample data in
-// no_30ms_sample_data.cc through through the preprocessing pipeline.
+// no_30ms_sample_data.cc through the preprocessing pipeline.
 
 #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
 #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_NO_POWER_SPECTRUM_DATA_H_
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc
index 0de36b48e41..204bfc857d0 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc
+++ b/tensorflow/lite/micro/examples/micro_speech/simple_features/simple_features_generator.cc
@@ -136,7 +136,7 @@ TfLiteStatus GenerateSimpleFeatures(tflite::ErrorReporter* error_reporter,
     // Quantize the result into eight bits, effectively multiplying by two.
     // The 127.5 constant here has to match the features_max value defined in
     // tensorflow/examples/speech_commands/input_data.py, and this also assumes
-    // that features_min is zero. It it wasn't, we'd have to subtract it first.
+    // that features_min is zero. If it wasn't, we'd have to subtract it first.
     int quantized_average = roundf(average * (255.0f / 127.5f));
     if (quantized_average < 0) {
       quantized_average = 0;
diff --git a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h b/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h
index 7e0c146ace0..5264e6262fc 100644
--- a/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h
+++ b/tensorflow/lite/micro/examples/micro_speech/simple_features/yes_power_spectrum_data.h
@@ -16,7 +16,7 @@ limitations under the License.
 // This data was extracted from the larger feature data held in
 // no_features_data.cc and consists of the 26th spectrogram slice of 43 values.
 // This is the expected result of running the sample data in
-// yes_30ms_sample_data.cc through through the preprocessing pipeline.
+// yes_30ms_sample_data.cc through the preprocessing pipeline.
 
 #ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
 #define TENSORFLOW_LITE_MICRO_EXAMPLES_MICRO_SPEECH_SIMPLE_FEATURES_YES_POWER_SPECTRUM_DATA_H_
diff --git a/tensorflow/lite/micro/examples/person_detection/README.md b/tensorflow/lite/micro/examples/person_detection/README.md
index 8f437524ef0..7312582f9b9 100644
--- a/tensorflow/lite/micro/examples/person_detection/README.md
+++ b/tensorflow/lite/micro/examples/person_detection/README.md
@@ -311,7 +311,7 @@ make -f tensorflow/lite/micro/tools/make/Makefile TARGET=esp generate_person_det
 
 ### Building the example
 
-Go the the example project directory
+Go to the example project directory
 ```
 cd tensorflow/lite/micro/tools/make/gen/esp_xtensa-esp32/prj/person_detection/esp-idf
 ```
diff --git a/tensorflow/lite/micro/kernels/kernel_runner.h b/tensorflow/lite/micro/kernels/kernel_runner.h
index 45d107e7a37..064aabe97cf 100644
--- a/tensorflow/lite/micro/kernels/kernel_runner.h
+++ b/tensorflow/lite/micro/kernels/kernel_runner.h
@@ -23,12 +23,12 @@ limitations under the License.
 namespace tflite {
 namespace micro {
 
-// Helper class to perform a simulated kernel (i.e. TfLiteRegistration) lifecyle
-// (init, prepare, invoke). All internal allocations are handled by this class.
-// Simply pass in the registration, list of required tensors, inputs array,
-// outputs array, and any pre-builtin data. Calling Invoke() will automatically
-// walk the kernl and outputs will be ready on the the TfLiteTensor output
-// provided during construction.
+// Helper class to perform a simulated kernel (i.e. TfLiteRegistration)
+// lifecycle (init, prepare, invoke). All internal allocations are handled by
+// this class. Simply pass in the registration, list of required tensors, inputs
+// array, outputs array, and any pre-builtin data. Calling Invoke() will
+// automatically walk the kernel and outputs will be ready on the TfLiteTensor
+// output provided during construction.
 class KernelRunner {
  public:
   KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors,
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc
index 79a44e2c670..75eb2838034 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc
@@ -32,7 +32,7 @@ struct OpData {
 };
 
 // Number of unique int8_t and int16_t values.  Used in exponent lookup table
-// conputation.
+// computation.
 constexpr int kInt8Range =
     std::numeric_limits<int8_t>::max() - std::numeric_limits<int8_t>::min() + 1;
 constexpr int kInt16Range = std::numeric_limits<int16_t>::max() -
@@ -52,7 +52,7 @@ constexpr int kMaxExponentValue = (1 << kExpFractionalBits);
 TfLiteStatus Softmax(OpData op_data, const RuntimeShape& input_shape,
                      const int8_t* input_data, const RuntimeShape& output_shape,
                      int16_t* output_data) {
-  // The last dimension is depth.  Outer size is the the total input size
+  // The last dimension is depth.  Outer size is the total input size
   // divided by depth.
   const int trailing_dim = input_shape.DimensionsCount() - 1;
   const int outer_size =
@@ -75,7 +75,7 @@ TfLiteStatus Softmax(OpData op_data, const RuntimeShape& input_shape,
           input_diff == 0 ? kMaxExponentValue : op_data.exp_lut[input_diff];
     }
 
-    // Ensure we cannnot overflow the full_range_output value.  We need to
+    // Ensure we cannot overflow the full_range_output value.  We need to
     // guarantee that kInt16Range * max(input_data) / sum_of_exps < kInt16Range.
     TFLITE_DCHECK(sum_of_exps >= kMaxExponentValue);
 
diff --git a/tensorflow/lite/micro/tools/make/Makefile b/tensorflow/lite/micro/tools/make/Makefile
index b28fd19d15e..1b28152bb17 100644
--- a/tensorflow/lite/micro/tools/make/Makefile
+++ b/tensorflow/lite/micro/tools/make/Makefile
@@ -6,9 +6,9 @@ endif
 TENSORFLOW_ROOT :=
 MAKEFILE_DIR := tensorflow/lite/micro/tools/make
 
-#  Override this on make command line to to parse thirdy party downloads during project generation 
-#  make -f tensorflow/lite/micro/tools/make/Makefile PARSE_THIRD_PARTY=true TARGET=apollo3evb generate_hello_world_make_project 
-PARSE_THIRD_PARTY := 
+#  Override this on make command line to parse third party downloads during project generation
+#  make -f tensorflow/lite/micro/tools/make/Makefile PARSE_THIRD_PARTY=true TARGET=apollo3evb generate_hello_world_make_project
+PARSE_THIRD_PARTY :=
 
 
 # Pull in some convenience functions.
diff --git a/tensorflow/lite/micro/tools/make/templates/arc/README_ARC_EMSDP.md.tpl b/tensorflow/lite/micro/tools/make/templates/arc/README_ARC_EMSDP.md.tpl
index 9d2801ed6b7..766450253cc 100644
--- a/tensorflow/lite/micro/tools/make/templates/arc/README_ARC_EMSDP.md.tpl
+++ b/tensorflow/lite/micro/tools/make/templates/arc/README_ARC_EMSDP.md.tpl
@@ -1,6 +1,6 @@
 # TensorFlow Lite Micro ARC Make Project for EM SDP Board.
 
-This folder has been autogenerated by TensorFlow, and contains source, header, and project files needed to build a single TensorFlow Lite Micro target using make tool and and a Synopsys DesignWare ARC processor compatible toolchain, specifically the ARC MetaWare Development Toolkit (MWDT).  
+This folder has been autogenerated by TensorFlow, and contains source, header, and project files needed to build a single TensorFlow Lite Micro target using make tool and a Synopsys DesignWare ARC processor compatible toolchain, specifically the ARC MetaWare Development Toolkit (MWDT).  
 
 This project has been generated for the ARC EM Software Development Platform (EM SDP). The built application can be run only on this platform.
 
diff --git a/tensorflow/lite/profiling/profile_summary_formatter.h b/tensorflow/lite/profiling/profile_summary_formatter.h
index 8f6f9f33e46..d19dfc8fdfa 100644
--- a/tensorflow/lite/profiling/profile_summary_formatter.h
+++ b/tensorflow/lite/profiling/profile_summary_formatter.h
@@ -38,7 +38,7 @@ class ProfileSummaryFormatter {
       const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
           stats_calculator_map,
       const tensorflow::StatsCalculator& delegate_stats_calculator) const = 0;
-  // Returns a string detailing the short summary of the the accumulated runtime
+  // Returns a string detailing the short summary of the accumulated runtime
   // stats in StatsCalculator of ProfileSummarizer.
   virtual std::string GetShortSummary(
       const std::map<uint32_t, std::unique_ptr<tensorflow::StatsCalculator>>&
diff --git a/tensorflow/lite/python/optimize/calibrator.py b/tensorflow/lite/python/optimize/calibrator.py
index e1758e87eeb..0527104329c 100644
--- a/tensorflow/lite/python/optimize/calibrator.py
+++ b/tensorflow/lite/python/optimize/calibrator.py
@@ -32,7 +32,7 @@ _calibration_wrapper = LazyLoader(
 
 
 def add_intermediate_tensors(model_content):
-  """Adds intermedaite tensors to fused op if needed."""
+  """Adds intermediate tensors to fused op if needed."""
   return _calibration_wrapper.AddIntermediateTensors(model_content)
 
 
diff --git a/tensorflow/lite/tools/benchmark/experimental/c/c_api_types.h b/tensorflow/lite/tools/benchmark/experimental/c/c_api_types.h
index e04e1a12cd4..389a08528f1 100644
--- a/tensorflow/lite/tools/benchmark/experimental/c/c_api_types.h
+++ b/tensorflow/lite/tools/benchmark/experimental/c/c_api_types.h
@@ -80,7 +80,7 @@ struct TfLiteRegistration;
 
 // An external context is a collection of information unrelated to the TF Lite
 // framework, but useful to a subset of the ops. TF Lite knows very little
-// about about the actual contexts, but it keeps a list of them, and is able to
+// about the actual contexts, but it keeps a list of them, and is able to
 // refresh them if configurations like the number of recommended threads
 // change.
 typedef struct TfLiteExternalContext {
diff --git a/tensorflow/lite/tools/optimize/operator_property.cc b/tensorflow/lite/tools/optimize/operator_property.cc
index 6ec320c4144..75af0df34a4 100644
--- a/tensorflow/lite/tools/optimize/operator_property.cc
+++ b/tensorflow/lite/tools/optimize/operator_property.cc
@@ -239,7 +239,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
         property.quantizable = false;
         break;
       }
-      // TODO(jianlijianli): extend LSTM op spec to inlucde input, bias etc.
+      // TODO(jianlijianli): extend LSTM op spec to include input, bias etc.
       // LSTM needs 5 intermediate tensors. This agrees with the fully quantized
       // kernels in lstm_eval.cc
       if (op_variant.use_layer_norm && op_variant.use_projection &&
@@ -522,7 +522,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
         tensor_property_9.symmetric = true;
         // Without layer norm, we choose to quantize bias with the scale of
         // input and its corresponding weight. The other choice will
-        // be to ues the scale of recurrent and its corresponding weight but we
+        // be to use the scale of recurrent and its corresponding weight but we
         // choose to use the smaller scale, which means higher resolution.
         TensorProperty tensor_property_12;
         tensor_property_12.use_derived_scale = true;
@@ -574,7 +574,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
         property.outputs = {{0, {}}};
         property.intermediates = {
             // Without layer normalization, intermediate tensors 0, 1, 2, 3 are
-            // not used and and their quantization parameters are ignored.
+            // not used and their quantization parameters are ignored.
             {0, {}},
             {1, {}},
             {2, {}},
@@ -589,7 +589,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
           !op_variant.use_peephole) {
         // Without layer norm, we choose to quantize bias with the scale of
         // input and its corresponding weight. The other choice will
-        // be to ues the scale of recurrent and its corresponding weight but we
+        // be to use the scale of recurrent and its corresponding weight but we
         // choose to use the smaller scale, which means higher resolution.
         TensorProperty tensor_property_12;
         tensor_property_12.use_derived_scale = true;
@@ -656,7 +656,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
         tensor_property_9.symmetric = true;
         // Without layer norm, we choose to quantize bias with the scale of
         // input and its corresponding weight. The other choice will
-        // be to ues the scale of recurrent and its corresponding weight but we
+        // be to use the scale of recurrent and its corresponding weight but we
         // choose to use the smaller scale, which means higher resolution.
         TensorProperty tensor_property_12;
         tensor_property_12.use_derived_scale = true;
@@ -722,7 +722,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
           !op_variant.use_peephole) {
         // Without layer norm, we choose to quantize bias with the scale of
         // input and its corresponding weight. The other choice will
-        // be to ues the scale of recurrent and its corresponding weight but we
+        // be to use the scale of recurrent and its corresponding weight but we
         // choose to use the smaller scale, which means higher resolution.
         TensorProperty tensor_property_12;
         tensor_property_12.use_derived_scale = true;
@@ -949,7 +949,7 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
     case BuiltinOperator_SVDF: {
       TensorProperty tensor_property_time;
       // Only 10bits are needed because 6bits are reserved for the reduce
-      // operation after elemement-wise multiplication between state and time
+      // operation after element-wise multiplication between state and time
       // weights.
       tensor_property_time.number_of_bits = 10;
       TensorProperty tensor_property_bias;
diff --git a/tensorflow/lite/tools/versioning/op_version.cc b/tensorflow/lite/tools/versioning/op_version.cc
index 8627c492c70..5668bc06f8c 100644
--- a/tensorflow/lite/tools/versioning/op_version.cc
+++ b/tensorflow/lite/tools/versioning/op_version.cc
@@ -168,7 +168,7 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
         return 3;
       }
       // For float and uint8 fixed point kernels, if the weight is
-      // Shuffled4x16Int8, is is version 2.
+      // Shuffled4x16Int8, it is version 2.
       if (op_sig.options.fully_connected.weights_format ==
           FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8) {
         return 2;