From 29d635bccc81222d51328afeddb562570f9984a9 Mon Sep 17 00:00:00 2001
From: Advait Jain <advaitjain@google.com>
Date: Thu, 23 Jul 2020 22:24:35 -0700
Subject: [PATCH] Prefer the standard integral types over custom type-aliases.

PiperOrigin-RevId: 322937618
Change-Id: I0c0560a13856ee1df1ff187d30244a99cce04f86
---
 .../lite/micro/benchmarks/conv_benchmark.cc   |   8 +-
 .../benchmarks/depthwise_conv_benchmark.cc    |   8 +-
 .../micro_speech/recognize_commands.cc        |   2 +-
 .../main_functions.cc                         |   2 +-
 tensorflow/lite/micro/kernels/activations.cc  |  18 +--
 tensorflow/lite/micro/kernels/add.cc          |  16 +--
 tensorflow/lite/micro/kernels/arc_mli/conv.cc |  14 +-
 .../micro/kernels/arc_mli/depthwise_conv.cc   |  16 +--
 .../micro/kernels/arc_mli/fully_connected.cc  |   2 +-
 .../lite/micro/kernels/arc_mli/mli_tf_utils.h |   2 +-
 .../lite/micro/kernels/arc_mli/pooling.cc     |   2 +-
 .../kernels/arc_mli/pooling_slicing_test.cc   |   4 +-
 tensorflow/lite/micro/kernels/arg_min_max.cc  |   7 +-
 .../lite/micro/kernels/circular_buffer.cc     |   2 +-
 tensorflow/lite/micro/kernels/cmsis-nn/add.cc |  16 +--
 .../lite/micro/kernels/cmsis-nn/conv.cc       |  10 +-
 .../micro/kernels/cmsis-nn/depthwise_conv.cc  |  10 +-
 .../micro/kernels/cmsis-nn/fully_connected.cc |   6 +-
 .../lite/micro/kernels/cmsis-nn/softmax.cc    |   3 +-
 tensorflow/lite/micro/kernels/comparisons.cc  |   4 +-
 .../lite/micro/kernels/concatenation.cc       |   2 +-
 tensorflow/lite/micro/kernels/conv.cc         |   8 +-
 tensorflow/lite/micro/kernels/conv_test.cc    |  16 +--
 .../lite/micro/kernels/depthwise_conv.cc      |  10 +-
 .../lite/micro/kernels/depthwise_conv_test.cc |  16 +--
 tensorflow/lite/micro/kernels/hard_swish.cc   |   3 +-
 tensorflow/lite/micro/kernels/l2norm.cc       |  16 +--
 tensorflow/lite/micro/kernels/l2norm_test.cc  |   6 +-
 tensorflow/lite/micro/kernels/pad.cc          |   6 +-
 tensorflow/lite/micro/kernels/pooling_test.cc |   4 +-
 tensorflow/lite/micro/kernels/prelu.cc        |   2 +-
 .../micro/kernels/quantization_util_test.cc   |  12 +-
 tensorflow/lite/micro/kernels/quantize.cc     |   2 +-
 .../lite/micro/kernels/quantize_test.cc       |   2 +-
 tensorflow/lite/micro/kernels/reduce.cc       |   6 +-
 .../micro/kernels/resize_nearest_neighbor.cc  |  12 +-
 .../kernels/resize_nearest_neighbor_test.cc   | 120 +++++++++---------
 tensorflow/lite/micro/kernels/softmax.cc      |   3 +-
 tensorflow/lite/micro/kernels/sub.cc          |  16 +--
 tensorflow/lite/micro/kernels/svdf.cc         |   6 +-
 .../lite/micro/kernels/xtensa_hifi/add.cc     |  16 +--
 .../lite/micro/kernels/xtensa_hifi/conv.cc    |  14 +-
 .../kernels/xtensa_hifi/depthwise_conv.cc     |  16 +--
 .../lite/micro/kernels/xtensa_hifi/pooling.cc |  16 +--
 .../lite/micro/kernels/xtensa_hifi/softmax.cc |   3 +-
 .../lite/micro/kernels/xtensa_hifi/svdf.cc    |   6 +-
 .../micro/kernels/xtensa_hifimini/conv.cc     |  48 +++----
 .../kernels/xtensa_hifimini/depthwise_conv.cc |  70 +++++-----
 .../xtensa_hifimini/fully_connected.cc        |  14 +-
 .../micro/kernels/xtensa_hifimini/quantize.cc |   2 +-
 .../micro/kernels/xtensa_hifimini/softmax.cc  |  21 +--
 .../micro/kernels/xtensa_hifimini/svdf.cc     |  10 +-
 .../xtensa_hifimini_staging/quantize.cc       |   4 +-
 .../xtensa_hifimini_staging/softmax.cc        |   7 +-
 .../kernels/xtensa_hifimini_staging/svdf.cc   |   6 +-
 tensorflow/lite/micro/micro_utils.h           |   8 +-
 tensorflow/lite/micro/test_helpers.cc         |   8 +-
 tensorflow/lite/micro/test_helpers.h          |   2 +-
 tensorflow/lite/micro/testing/test_utils.cc   |   4 +-
 59 files changed, 351 insertions(+), 344 deletions(-)

diff --git a/tensorflow/lite/micro/benchmarks/conv_benchmark.cc b/tensorflow/lite/micro/benchmarks/conv_benchmark.cc
index cbddbd23f26..d64b31dd39a 100644
--- a/tensorflow/lite/micro/benchmarks/conv_benchmark.cc
+++ b/tensorflow/lite/micro/benchmarks/conv_benchmark.cc
@@ -151,7 +151,7 @@ int main() {
   // Output scale of 50 is needed to accomodate a float range of [-6400, 6350]
   float output_scale = 50.0f;
 
-  // Create per-tensor quantized int8 input tensor.
+  // Create per-tensor quantized int8_t input tensor.
   int8_t input_quantized[32];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
       input_values, input_quantized, input_dims, input_scale, input_zero_point);
@@ -163,7 +163,7 @@ int main() {
       tflite::testing::IntArrayFromInts(input_zero_points)};
   input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
 
-  // Create per-tensor quantized int8 filter tensor.
+  // Create per-tensor quantized int8_t filter tensor.
   int8_t filter_quantized[32 * 32];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       filter_values, filter_quantized, filter_dims, filter_scale,
@@ -176,7 +176,7 @@ int main() {
       tflite::testing::IntArrayFromInts(filter_zero_points)};
   filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
 
-  // Create per-tensor quantized int32 bias tensor.
+  // Create per-tensor quantized int32_t bias tensor.
   int32_t bias_quantized[32];
   tflite::SymmetricQuantize(bias_values, bias_quantized, 32,
                             input_scale * output_scale);
@@ -192,7 +192,7 @@ int main() {
       tflite::testing::IntArrayFromInts(bias_zero_points)};
   bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
 
-  // Create per-tensor quantized int8 output tensor.
+  // Create per-tensor quantized int8_t output tensor.
   int8_t output_quantized[32];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
       output_quantized, output_dims, output_scale, output_zero_point);
diff --git a/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc b/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc
index ddaea133221..a4133680b9f 100644
--- a/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc
+++ b/tensorflow/lite/micro/benchmarks/depthwise_conv_benchmark.cc
@@ -157,7 +157,7 @@ int main() {
   TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape);
   TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape);
 
-  // Create per-tensor quantized int8 input tensor.
+  // Create per-tensor quantized int8_t input tensor.
   int8_t input_quantized[input_elements];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
       input_values, input_quantized, input_dims, input_scale, input_zero_point);
@@ -170,7 +170,7 @@ int main() {
       tflite::testing::IntArrayFromInts(input_zero_points)};
   input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
 
-  // Create per-tensor quantized int8 filter tensor.
+  // Create per-tensor quantized int8_t filter tensor.
   int8_t filter_quantized[filter_elements];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       filter_values, filter_quantized, filter_dims, filter_scale, 0);
@@ -183,7 +183,7 @@ int main() {
       tflite::testing::IntArrayFromInts(filter_zero_points)};
   filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
 
-  // Create per-tensor quantized int32 bias tensor.
+  // Create per-tensor quantized int32_t bias tensor.
   int32_t bias_quantized[bias_elements];
   // See https://www.tensorflow.org/lite/performance/quantization_spec for a
   // detailed explanation of why bias scale is input_scale * filter_scale.
@@ -200,7 +200,7 @@ int main() {
       tflite::testing::IntArrayFromInts(bias_zero_points)};
   bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
 
-  // Create per-tensor quantized int8 output tensor.
+  // Create per-tensor quantized int8_t output tensor.
   int8_t output_quantized[output_elements];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
       output_quantized, output_dims, output_scale, output_zero_point);
diff --git a/tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc b/tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc
index 47bd10074d3..265c494670d 100644
--- a/tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc
+++ b/tensorflow/lite/micro/examples/micro_speech/recognize_commands.cc
@@ -50,7 +50,7 @@ TfLiteStatus RecognizeCommands::ProcessLatestResults(
   if (latest_results->type != kTfLiteInt8) {
     TF_LITE_REPORT_ERROR(
         error_reporter_,
-        "The results for recognition should be int8 elements, but are %d",
+        "The results for recognition should be int8_t elements, but are %d",
         latest_results->type);
     return kTfLiteError;
   }
diff --git a/tensorflow/lite/micro/examples/person_detection_experimental/main_functions.cc b/tensorflow/lite/micro/examples/person_detection_experimental/main_functions.cc
index 53b87bffb41..f1ded80d1b9 100644
--- a/tensorflow/lite/micro/examples/person_detection_experimental/main_functions.cc
+++ b/tensorflow/lite/micro/examples/person_detection_experimental/main_functions.cc
@@ -32,7 +32,7 @@ const tflite::Model* model = nullptr;
 tflite::MicroInterpreter* interpreter = nullptr;
 TfLiteTensor* input = nullptr;
 
-// In order to use optimized tensorflow lite kernels, a signed int8 quantized
+// In order to use optimized tensorflow lite kernels, a signed int8_t quantized
 // model is preferred over the legacy unsigned model format. This means that
 // throughout this project, input images must be converted from unisgned to
 // signed format. The easiest and quickest way to convert from unsigned to
diff --git a/tensorflow/lite/micro/kernels/activations.cc b/tensorflow/lite/micro/kernels/activations.cc
index 128ab8ecbd8..2bdc0b5169a 100644
--- a/tensorflow/lite/micro/kernels/activations.cc
+++ b/tensorflow/lite/micro/kernels/activations.cc
@@ -53,7 +53,7 @@ inline void ReluQuantized(const ReluOpData& data,
                           T* output_data) {
   const int flat_size = MatchingFlatSize(input_shape, output_shape);
   for (int i = 0; i < flat_size; ++i) {
-    const int32 val = static_cast<int32_t>(input_data[i]);
+    const int32_t val = static_cast<int32_t>(input_data[i]);
     int32_t clamped =
         data.params.output_offset +
         MultiplyByQuantizedMultiplier(val - data.params.input_offset,
@@ -79,17 +79,17 @@ inline void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output,
   QuantizeMultiplier(real_multiplier, &data->params.output_multiplier,
                      &data->params.output_shift);
 
-  data->params.quantized_activation_min =
-      std::max(static_cast<int32_t>(std::numeric_limits<T>::min()),
-               output->params.zero_point +
-                   static_cast<int32>(roundf(act_min / output->params.scale)));
+  data->params.quantized_activation_min = std::max(
+      static_cast<int32_t>(std::numeric_limits<T>::min()),
+      output->params.zero_point +
+          static_cast<int32_t>(roundf(act_min / output->params.scale)));
   data->params.quantized_activation_max =
       act_max == std::numeric_limits<float>::infinity()
           ? static_cast<int32_t>(std::numeric_limits<T>::max())
-          : std::min(
-                static_cast<int32_t>(std::numeric_limits<T>::max()),
-                output->params.zero_point +
-                    static_cast<int32>(roundf(act_max / output->params.scale)));
+          : std::min(static_cast<int32_t>(std::numeric_limits<T>::max()),
+                     output->params.zero_point +
+                         static_cast<int32_t>(
+                             roundf(act_max / output->params.scale)));
   data->params.input_offset = input->params.zero_point;
   data->params.output_offset = output->params.zero_point;
 }
diff --git a/tensorflow/lite/micro/kernels/add.cc b/tensorflow/lite/micro/kernels/add.cc
index 7190f2af548..79a04875def 100644
--- a/tensorflow/lite/micro/kernels/add.cc
+++ b/tensorflow/lite/micro/kernels/add.cc
@@ -42,18 +42,18 @@ struct OpData {
   // and the special 16-bit -> 16bit quantized path
   int input1_shift;
   int input2_shift;
-  int32 output_activation_min;
-  int32 output_activation_max;
+  int32_t output_activation_min;
+  int32_t output_activation_max;
 
   // These fields are used only in the general 8-bit -> 8bit quantized path
-  int32 input1_multiplier;
-  int32 input2_multiplier;
-  int32 output_multiplier;
+  int32_t input1_multiplier;
+  int32_t input2_multiplier;
+  int32_t output_multiplier;
   int output_shift;
   int left_shift;
-  int32 input1_offset;
-  int32 input2_offset;
-  int32 output_offset;
+  int32_t input1_offset;
+  int32_t input2_offset;
+  int32_t output_offset;
 
   // Used only for float evals:
   float output_activation_min_f32;
diff --git a/tensorflow/lite/micro/kernels/arc_mli/conv.cc b/tensorflow/lite/micro/kernels/arc_mli/conv.cc
index 6f137590b91..905feb1a529 100644
--- a/tensorflow/lite/micro/kernels/arc_mli/conv.cc
+++ b/tensorflow/lite/micro/kernels/arc_mli/conv.cc
@@ -78,8 +78,8 @@ bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
                      const TfLiteConvParams* params) {
   const auto* affine_quantization =
       reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
-  // MLI optimized version only supports int8 dataype, dilation factor of 1 and
-  // per-axis quantization of weights (no broadcasting/per-tensor)
+  // MLI optimized version only supports int8_t dataype, dilation factor of 1
+  // and per-axis quantization of weights (no broadcasting/per-tensor)
   bool ret_val = (filter->type == kTfLiteInt8) &&
                  (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) &&
                  (params->dilation_width_factor == 1) &&
@@ -176,7 +176,7 @@ TfLiteStatus EvalMliQuantizedPerChannel(
     OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter,
     const TfLiteTensor* bias, TfLiteTensor* output) {
   // Run Conv MLI kernel
-  // MLI optimized version only supports int8 dataype and dilation factor of 1
+  // MLI optimized version only supports int8_t dataype and dilation factor of 1
   if ((input->type == kTfLiteInt8) && (params->dilation_width_factor == 1) &&
       (params->dilation_height_factor == 1)) {
     mli_tensor mli_in = {0};
@@ -353,10 +353,10 @@ TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   reference_integer_ops::ConvPerChannel(
       op_params, data->per_channel_output_multiplier,
       data->per_channel_output_shift, GetTensorShape(input),
-      GetTensorData<int8>(input), GetTensorShape(filter),
-      GetTensorData<int8>(filter), GetTensorShape(bias),
-      GetTensorData<int32>(bias), GetTensorShape(output),
-      GetTensorData<int8>(output));
+      GetTensorData<int8_t>(input), GetTensorShape(filter),
+      GetTensorData<int8_t>(filter), GetTensorShape(bias),
+      GetTensorData<int32_t>(bias), GetTensorShape(output),
+      GetTensorData<int8_t>(output));
   return kTfLiteOk;
 #else
   TF_LITE_KERNEL_LOG(context,
diff --git a/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv.cc b/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv.cc
index 9a7edcb847c..9f8a6b4004c 100644
--- a/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/arc_mli/depthwise_conv.cc
@@ -71,10 +71,10 @@ bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
   const int in_ch = SizeOfDimension(input, 3);
   const int filters_num = SizeOfDimension(filter, 3);
 
-  // MLI optimized version only supports int8 dataype, dilation factor of 1 and
-  // per-axis quantization of weights (no broadcasting/per-tensor)
-  // (in_ch == filters_num) || (in_ch == 1)) is a forbidding of
-  // channel multiplier logic for multichannel input.
+  // MLI optimized version only supports int8_t dataype, dilation factor of 1
+  // and per-axis quantization of weights (no broadcasting/per-tensor) (in_ch ==
+  // filters_num) || (in_ch == 1)) is a forbidding of channel multiplier logic
+  // for multichannel input.
   bool ret_val = (filter->type == kTfLiteInt8) &&
                  (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) &&
                  (params->dilation_width_factor == 1) &&
@@ -373,10 +373,10 @@ TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   reference_integer_ops::DepthwiseConvPerChannel(
       op_params, data->per_channel_output_multiplier,
       data->per_channel_output_shift, GetTensorShape(input),
-      GetTensorData<int8>(input), GetTensorShape(filter),
-      GetTensorData<int8>(filter), GetTensorShape(bias),
-      GetTensorData<int32>(bias), GetTensorShape(output),
-      GetTensorData<int8>(output));
+      GetTensorData<int8_t>(input), GetTensorShape(filter),
+      GetTensorData<int8_t>(filter), GetTensorShape(bias),
+      GetTensorData<int32_t>(bias), GetTensorShape(output),
+      GetTensorData<int8_t>(output));
   return kTfLiteOk;
 #else
   TF_LITE_KERNEL_LOG(context,
diff --git a/tensorflow/lite/micro/kernels/arc_mli/fully_connected.cc b/tensorflow/lite/micro/kernels/arc_mli/fully_connected.cc
index 41c65faafb2..24b3fed0998 100644
--- a/tensorflow/lite/micro/kernels/arc_mli/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/arc_mli/fully_connected.cc
@@ -55,7 +55,7 @@ constexpr int kOutputTensor = 0;
 bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
                      const TfLiteTensor* filter, const TfLiteTensor* bias,
                      const TfLiteFullyConnectedParams* params) {
-  // MLI optimized version only supports int8 dataype and no fused Relu and
+  // MLI optimized version only supports int8_t dataype and no fused Relu and
   // symmetric per-tensor quantization of weights (not per-axis)
   bool ret_val = (filter->type == kTfLiteInt8) &&
                  (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) &&
diff --git a/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h b/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h
index 3f9933ada47..1764f1fdf45 100644
--- a/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h
+++ b/tensorflow/lite/micro/kernels/arc_mli/mli_tf_utils.h
@@ -34,7 +34,7 @@ static void ConvertToMliTensorData(const TfLiteTensor* tfT, mli_tensor* mliT) {
   } else if (tfT->type == kTfLiteInt32) {
     mliT->el_type = MLI_EL_ASYM_I32;
   } else {
-    TF_LITE_FATAL("Wrong data type. Expected int8 or int32.");
+    TF_LITE_FATAL("Wrong data type. Expected int8_t or int32_t.");
   }
 
   mliT->capacity = tfT->bytes;
diff --git a/tensorflow/lite/micro/kernels/arc_mli/pooling.cc b/tensorflow/lite/micro/kernels/arc_mli/pooling.cc
index d59a673d925..44bc966a8e2 100644
--- a/tensorflow/lite/micro/kernels/arc_mli/pooling.cc
+++ b/tensorflow/lite/micro/kernels/arc_mli/pooling.cc
@@ -43,7 +43,7 @@ enum MliPoolingType { AveragePooling = 0, MaxPooling = 1 };
 
 bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
                      const TfLitePoolParams* params) {
-  // MLI optimized version only supports int8 dataype and no fused Relu
+  // MLI optimized version only supports int8_t dataype and no fused Relu
   return (input->type == kTfLiteInt8 && params->activation == kTfLiteActNone);
 }
 
diff --git a/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc b/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc
index 7f21a67d9f7..516b1bf63d6 100644
--- a/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc
+++ b/tensorflow/lite/micro/kernels/arc_mli/pooling_slicing_test.cc
@@ -41,7 +41,7 @@ void TestAveragePoolingQuantized(
     const T* expected_output_data, const int* output_dims_data,
     float output_min, float output_max, TfLitePadding padding,
     TfLiteFusedActivation activation, T* output_data) {
-  static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
+  static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
 
   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
@@ -112,7 +112,7 @@ void TestMaxPoolQuantized(const int* input_dims_data, const T* input_data,
                           float output_min, float output_max,
                           const int* output_dims_data, TfLitePadding padding,
                           TfLiteFusedActivation activation, T* output_data) {
-  static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
+  static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
 
   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
   TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
diff --git a/tensorflow/lite/micro/kernels/arg_min_max.cc b/tensorflow/lite/micro/kernels/arg_min_max.cc
index 3baf9f04e61..12ac0019c05 100644
--- a/tensorflow/lite/micro/kernels/arg_min_max.cc
+++ b/tensorflow/lite/micro/kernels/arg_min_max.cc
@@ -74,18 +74,19 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
           break;
         default:
           TF_LITE_KERNEL_LOG(context,
-                             "Only float32, uint8 and int8 are "
+                             "Only float32, uint8_t and int8_t are "
                              "supported currently, got %s.",
                              TfLiteTypeGetName(input->type));
           return kTfLiteError;
       }
     } else {
-      TF_LITE_KERNEL_LOG(context, "Only int32 are supported currently, got %s.",
+      TF_LITE_KERNEL_LOG(context,
+                         "Only int32_t are supported currently, got %s.",
                          TfLiteTypeGetName(output->type));
       return kTfLiteError;
     }
   } else {
-    TF_LITE_KERNEL_LOG(context, "Only int32 are supported currently, got %s.",
+    TF_LITE_KERNEL_LOG(context, "Only int32_t are supported currently, got %s.",
                        TfLiteTypeGetName(axis->type));
     return kTfLiteError;
   }
diff --git a/tensorflow/lite/micro/kernels/circular_buffer.cc b/tensorflow/lite/micro/kernels/circular_buffer.cc
index 876ea569196..b5a8ae1be3b 100644
--- a/tensorflow/lite/micro/kernels/circular_buffer.cc
+++ b/tensorflow/lite/micro/kernels/circular_buffer.cc
@@ -92,7 +92,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
 
   TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
 
-  // The circular buffer custom operator currently only supports int8.
+  // The circular buffer custom operator currently only supports int8_t.
   TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
 
   // TODO(b/132070898): Use statically slotted OpData structures until a
diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/add.cc b/tensorflow/lite/micro/kernels/cmsis-nn/add.cc
index 4190e041d9e..c98e7a2c329 100644
--- a/tensorflow/lite/micro/kernels/cmsis-nn/add.cc
+++ b/tensorflow/lite/micro/kernels/cmsis-nn/add.cc
@@ -41,18 +41,18 @@ struct OpData {
   // and the special 16-bit -> 16bit quantized path
   int input1_shift;
   int input2_shift;
-  int32 output_activation_min;
-  int32 output_activation_max;
+  int32_t output_activation_min;
+  int32_t output_activation_max;
 
   // These fields are used only in the general 8-bit -> 8bit quantized path
-  int32 input1_multiplier;
-  int32 input2_multiplier;
-  int32 output_multiplier;
+  int32_t input1_multiplier;
+  int32_t input2_multiplier;
+  int32_t output_multiplier;
   int output_shift;
   int left_shift;
-  int32 input1_offset;
-  int32 input2_offset;
-  int32 output_offset;
+  int32_t input1_offset;
+  int32_t input2_offset;
+  int32_t output_offset;
 };
 
 TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params,
diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc b/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc
index 64e0b22a5f5..834f107dad0 100644
--- a/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc
+++ b/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc
@@ -304,7 +304,7 @@ TfLiteStatus EvalQuantizedPerChannel(
   arm_status status = arm_convolve_wrapper_s8(
       &ctx, &conv_params, &quant_params, &input_dims,
       GetTensorData<int8_t>(input), &filter_dims, GetTensorData<int8_t>(filter),
-      &bias_dims, GetTensorData<int32>(bias), &output_dims,
+      &bias_dims, GetTensorData<int32_t>(bias), &output_dims,
       GetTensorData<int8_t>(output));
 
   if (status == ARM_MATH_SUCCESS) {
@@ -332,10 +332,10 @@ TfLiteStatus EvalQuantizedPerChannel(
   reference_integer_ops::ConvPerChannel(
       op_params, data->per_channel_output_multiplier,
       data->per_channel_output_shift, GetTensorShape(input),
-      GetTensorData<int8>(input), GetTensorShape(filter),
-      GetTensorData<int8>(filter), GetTensorShape(bias),
-      GetTensorData<int32>(bias), GetTensorShape(output),
-      GetTensorData<int8>(output));
+      GetTensorData<int8_t>(input), GetTensorShape(filter),
+      GetTensorData<int8_t>(filter), GetTensorShape(bias),
+      GetTensorData<int32_t>(bias), GetTensorShape(output),
+      GetTensorData<int8_t>(output));
 
 #endif
   return kTfLiteOk;
diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc b/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc
index 53d2d5692ec..457b3f854de 100644
--- a/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc
@@ -304,7 +304,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
                          &ctx, &dw_conv_params, &quant_params, &input_dims,
                          GetTensorData<int8_t>(input), &filter_dims,
                          GetTensorData<int8_t>(filter), &bias_dims,
-                         GetTensorData<int32>(bias), &output_dims,
+                         GetTensorData<int32_t>(bias), &output_dims,
                          GetTensorData<int8_t>(output)),
                      ARM_MATH_SUCCESS);
   } else {
@@ -327,10 +327,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
     reference_integer_ops::DepthwiseConvPerChannel(
         op_params, data->per_channel_output_multiplier,
         data->per_channel_output_shift, GetTensorShape(input),
-        GetTensorData<int8>(input), GetTensorShape(filter),
-        GetTensorData<int8>(filter), GetTensorShape(bias),
-        GetTensorData<int32>(bias), GetTensorShape(output),
-        GetTensorData<int8>(output));
+        GetTensorData<int8_t>(input), GetTensorShape(filter),
+        GetTensorData<int8_t>(filter), GetTensorShape(bias),
+        GetTensorData<int32_t>(bias), GetTensorShape(output),
+        GetTensorData<int8_t>(output));
   }
 }
 
diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc b/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc
index 1ea7f98ea1b..074f4a9f251 100644
--- a/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc
@@ -99,7 +99,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
                                         input->type, input, filter, bias,
                                         output, data));
 
-  if (input->type == kTfLiteInt8 && nullptr != GetTensorData<int32>(bias)) {
+  if (input->type == kTfLiteInt8 && nullptr != GetTensorData<int32_t>(bias)) {
     RuntimeShape filter_shape = GetTensorShape(filter);
     RuntimeShape output_shape = GetTensorShape(output);
 
@@ -130,7 +130,7 @@ TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
                                const TfLiteTensor* bias, TfLiteTensor* output) {
   // The 'if' condition can be removed when null handling of bias is added to
   // arm_fully_connected_s8
-  if (nullptr != GetTensorData<int32>(bias)) {
+  if (nullptr != GetTensorData<int32_t>(bias)) {
     RuntimeShape output_shape = GetTensorShape(output);
     TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
     const int batches = output_shape.Dims(0);
@@ -189,7 +189,7 @@ TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
         arm_fully_connected_s8(&ctx, &fc_params, &quant_params, &input_dims,
                                GetTensorData<int8_t>(input), &filter_dims,
                                GetTensorData<int8_t>(filter), &bias_dims,
-                               GetTensorData<int32>(bias), &output_dims,
+                               GetTensorData<int32_t>(bias), &output_dims,
                                GetTensorData<int8_t>(output)),
         ARM_MATH_SUCCESS);
   } else {
diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc b/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc
index b18d1c9b1e8..790af35f217 100644
--- a/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc
+++ b/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc
@@ -38,7 +38,8 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
       TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
       if (output->type == kTfLiteInt16) {
         TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768);
-        // NOTE: Current int16 softmax output does not require symmetric scaling
+        // NOTE: Current int16_t softmax output does not require symmetric
+        // scaling
         // - so no need to verify scale here.
       } else {
         TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
diff --git a/tensorflow/lite/micro/kernels/comparisons.cc b/tensorflow/lite/micro/kernels/comparisons.cc
index ed814527e94..ed7a20086f8 100644
--- a/tensorflow/lite/micro/kernels/comparisons.cc
+++ b/tensorflow/lite/micro/kernels/comparisons.cc
@@ -626,12 +626,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
     auto input2_offset = -input2->params.zero_point;
     const int kLeftShift = 8;
 
-    int32 input1_multiplier;
+    int32_t input1_multiplier;
     int input1_shift;
     QuantizeMultiplierSmallerThanOneExp(
         static_cast<double>(input1->params.scale), &input1_multiplier,
         &input1_shift);
-    int32 input2_multiplier;
+    int32_t input2_multiplier;
     int input2_shift;
     QuantizeMultiplierSmallerThanOneExp(
         static_cast<double>(input2->params.scale), &input2_multiplier,
diff --git a/tensorflow/lite/micro/kernels/concatenation.cc b/tensorflow/lite/micro/kernels/concatenation.cc
index fb47349f283..f64362745be 100644
--- a/tensorflow/lite/micro/kernels/concatenation.cc
+++ b/tensorflow/lite/micro/kernels/concatenation.cc
@@ -122,7 +122,7 @@ void EvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node) {
   reference_ops::ConcatenationWithScaling(
       data->params, inputs_shape_ptr, inputs_data,
       tflite::micro::GetTensorShape(output),
-      tflite::micro::GetTensorData<uint8>(output));
+      tflite::micro::GetTensorData<uint8_t>(output));
 }
 
 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
diff --git a/tensorflow/lite/micro/kernels/conv.cc b/tensorflow/lite/micro/kernels/conv.cc
index ff20cf684d6..b04906a147b 100644
--- a/tensorflow/lite/micro/kernels/conv.cc
+++ b/tensorflow/lite/micro/kernels/conv.cc
@@ -237,13 +237,13 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   reference_integer_ops::ConvPerChannel(
       op_params, data.per_channel_output_multiplier,
       data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
-      tflite::micro::GetTensorData<int8>(input),
+      tflite::micro::GetTensorData<int8_t>(input),
       tflite::micro::GetTensorShape(filter),
-      tflite::micro::GetTensorData<int8>(filter),
+      tflite::micro::GetTensorData<int8_t>(filter),
       tflite::micro::GetTensorShape(bias),
-      tflite::micro::GetTensorData<int32>(bias),
+      tflite::micro::GetTensorData<int32_t>(bias),
       tflite::micro::GetTensorShape(output),
-      tflite::micro::GetTensorData<int8>(output));
+      tflite::micro::GetTensorData<int8_t>(output));
 }
 
 void EvalFloat(TfLiteContext* context, TfLiteNode* node,
diff --git a/tensorflow/lite/micro/kernels/conv_test.cc b/tensorflow/lite/micro/kernels/conv_test.cc
index d73f03e34a1..be646d63659 100644
--- a/tensorflow/lite/micro/kernels/conv_test.cc
+++ b/tensorflow/lite/micro/kernels/conv_test.cc
@@ -601,7 +601,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
   TfLiteIntArray* output_dims =
       tflite::testing::IntArrayFromInts(tflite::testing::kOutputShape);
 
-  // Create per-layer quantized int8 input tensor.
+  // Create per-layer quantized int8_t input tensor.
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
       tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
   int input_zero_points[2] = {1, 0};
@@ -611,7 +611,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(input_zero_points), 0};
   input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
 
-  // Create per-layer quantized int8 filter tensor.
+  // Create per-layer quantized int8_t filter tensor.
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       tflite::testing::kFilterData, filter_quantized, filter_dims, filter_scale,
       0);
@@ -622,7 +622,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(filter_zero_points), 0};
   filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
 
-  // Create per-layer quantized int32 bias tensor.
+  // Create per-layer quantized int32_t bias tensor.
   tflite::SymmetricQuantize(tflite::testing::kBiasData, bias_quantized,
                             tflite::testing::kBiasElements,
                             input_scale * output_scale);
@@ -636,7 +636,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(bias_zero_points), 0};
   bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
 
-  // Create per-layer quantized int8 output tensor.
+  // Create per-layer quantized int8_t output tensor.
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
       output_data, output_dims, output_scale, 0 /* quantized dimension */);
   int output_zero_points[2] = {1, 0};
@@ -723,7 +723,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
   // Output scale of 50 is needed to accomodate a float range of [-6400, 6350]
   float output_scale = 50.0f;
 
-  // Create per-tensor quantized int8 input tensor.
+  // Create per-tensor quantized int8_t input tensor.
   int8_t input_quantized[kSampleSize];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
       input_values, input_quantized, input_dims, input_scale, input_zero_point);
@@ -735,7 +735,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(input_zero_points), 0};
   input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
 
-  // Create per-tensor quantized int8 filter tensor.
+  // Create per-tensor quantized int8_t filter tensor.
   int8_t filter_quantized[kNumFilters * kSampleSize];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       filter_values, filter_quantized, filter_dims, filter_scale,
@@ -748,7 +748,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(filter_zero_points), 0};
   filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
 
-  // Create per-tensor quantized int32 bias tensor.
+  // Create per-tensor quantized int32_t bias tensor.
   int32_t bias_quantized[kSampleSize];
   tflite::SymmetricQuantize(bias_values, bias_quantized, kSampleSize,
                             input_scale * output_scale);
@@ -764,7 +764,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(bias_zero_points), 0};
   bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
 
-  // Create per-tensor quantized int8 output tensor.
+  // Create per-tensor quantized int8_t output tensor.
   int8_t output_quantized[kSampleSize];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
       output_quantized, output_dims, output_scale, output_zero_point);
diff --git a/tensorflow/lite/micro/kernels/depthwise_conv.cc b/tensorflow/lite/micro/kernels/depthwise_conv.cc
index 687537e2c59..2f6083d56c1 100644
--- a/tensorflow/lite/micro/kernels/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/depthwise_conv.cc
@@ -123,7 +123,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   int filter_width = SizeOfDimension(filter, 2);
   int filter_height = SizeOfDimension(filter, 1);
 
-  // Per channel quantization is only needed for int8 inference. For other
+  // Per channel quantization is only needed for int8_t inference. For other
   // quantized types, only a single scale and zero point is needed.
   const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
   // Dynimically allocate per-channel quantization parameters.
@@ -221,13 +221,13 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   reference_integer_ops::DepthwiseConvPerChannel(
       op_params, data.per_channel_output_multiplier,
       data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
-      tflite::micro::GetTensorData<int8>(input),
+      tflite::micro::GetTensorData<int8_t>(input),
       tflite::micro::GetTensorShape(filter),
-      tflite::micro::GetTensorData<int8>(filter),
+      tflite::micro::GetTensorData<int8_t>(filter),
       tflite::micro::GetTensorShape(bias),
-      tflite::micro::GetTensorData<int32>(bias),
+      tflite::micro::GetTensorData<int32_t>(bias),
       tflite::micro::GetTensorShape(output),
-      tflite::micro::GetTensorData<int8>(output));
+      tflite::micro::GetTensorData<int8_t>(output));
 }
 
 void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
diff --git a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc
index 5e35d54dcb1..e16e9f893cb 100644
--- a/tensorflow/lite/micro/kernels/depthwise_conv_test.cc
+++ b/tensorflow/lite/micro/kernels/depthwise_conv_test.cc
@@ -787,7 +787,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
   TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape);
   TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape);
 
-  // Create per-layer quantized int8 input tensor.
+  // Create per-layer quantized int8_t input tensor.
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
       input_values, input_quantized, input_dims, input_scale, 0);
   int input_zero_points[2] = {1, 0};
@@ -797,7 +797,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
       tflite::testing::IntArrayFromInts(input_zero_points), 0};
   input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
 
-  // Create per-layer quantized int8 filter tensor.
+  // Create per-layer quantized int8_t filter tensor.
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       filter_values, filter_quantized, filter_dims, filter_scale, 0);
   int filter_zero_points[2] = {1, 0};
@@ -807,7 +807,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
       tflite::testing::IntArrayFromInts(filter_zero_points), 0};
   filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
 
-  // Create per-layer quantized int32 bias tensor.
+  // Create per-layer quantized int32_t bias tensor.
   tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
                             input_scale * output_scale);
   TfLiteTensor bias_tensor =
@@ -820,7 +820,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
       tflite::testing::IntArrayFromInts(bias_zero_points), 0};
   bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
 
-  // Create per-layer quantized int8 output tensor.
+  // Create per-layer quantized int8_t output tensor.
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
       output_data, output_dims, output_scale, 0);
   int output_zero_points[2] = {1, 0};
@@ -922,7 +922,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
   TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape);
   TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape);
 
-  // Create per-tensor quantized int8 input tensor.
+  // Create per-tensor quantized int8_t input tensor.
   int8_t input_quantized[input_elements];
   TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
       input_values, input_quantized, input_dims, input_scale, input_zero_point);
@@ -935,7 +935,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(input_zero_points), 0};
   input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
 
-  // Create per-tensor quantized int8 filter tensor.
+  // Create per-tensor quantized int8_t filter tensor.
   int8_t filter_quantized[filter_elements];
   TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
       filter_values, filter_quantized, filter_dims, filter_scale, 0);
@@ -948,7 +948,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(filter_zero_points), 0};
   filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
 
-  // Create per-tensor quantized int32 bias tensor.
+  // Create per-tensor quantized int32_t bias tensor.
   int32_t bias_quantized[bias_elements];
   // See https://www.tensorflow.org/lite/performance/quantization_spec for a
   // detailed explanation of why bias scale is input_scale * filter_scale.
@@ -965,7 +965,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
       tflite::testing::IntArrayFromInts(bias_zero_points), 0};
   bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
 
-  // Create per-tensor quantized int8 output tensor.
+  // Create per-tensor quantized int8_t output tensor.
   int8_t output_quantized[output_elements];
   TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
       output_quantized, output_dims, output_scale, output_zero_point);
diff --git a/tensorflow/lite/micro/kernels/hard_swish.cc b/tensorflow/lite/micro/kernels/hard_swish.cc
index fecb8bda409..3e8ecca7cc3 100644
--- a/tensorflow/lite/micro/kernels/hard_swish.cc
+++ b/tensorflow/lite/micro/kernels/hard_swish.cc
@@ -104,7 +104,8 @@ TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) {
     } break;
     default: {
       TF_LITE_KERNEL_LOG(
-          context, "Only float32/int8/uint8 are supported currently, got %s",
+          context,
+          "Only float32/int8_t/uint8_t are supported currently, got %s",
           TfLiteTypeGetName(input->type));
       return kTfLiteError;
     }
diff --git a/tensorflow/lite/micro/kernels/l2norm.cc b/tensorflow/lite/micro/kernels/l2norm.cc
index 16a982344e1..ab4067058a4 100644
--- a/tensorflow/lite/micro/kernels/l2norm.cc
+++ b/tensorflow/lite/micro/kernels/l2norm.cc
@@ -97,12 +97,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
     TF_LITE_L2NORM(reference_ops);
 #undef TF_LITE_L2NORM
   } else if (output->type == kTfLiteUInt8) {
-#define TF_LITE_L2NORM(type)                                                 \
-  tflite::L2NormalizationParams op_params;                                   \
-  op_params.input_zero_point = input->params.zero_point;                     \
-  type::L2Normalization(op_params, GetTensorShape(input),                    \
-                        GetTensorData<uint8>(input), GetTensorShape(output), \
-                        GetTensorData<uint8>(output))
+#define TF_LITE_L2NORM(type)                                                   \
+  tflite::L2NormalizationParams op_params;                                     \
+  op_params.input_zero_point = input->params.zero_point;                       \
+  type::L2Normalization(op_params, GetTensorShape(input),                      \
+                        GetTensorData<uint8_t>(input), GetTensorShape(output), \
+                        GetTensorData<uint8_t>(output))
 
     TF_LITE_L2NORM(reference_ops);
 #undef TF_LITE_L2NORM
@@ -115,8 +115,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
     const int outer_size =
         MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
     reference_integer_ops::L2Normalization(input->params.zero_point, outer_size,
-                                           depth, GetTensorData<int8>(input),
-                                           GetTensorData<int8>(output));
+                                           depth, GetTensorData<int8_t>(input),
+                                           GetTensorData<int8_t>(output));
   } else {
     TF_LITE_KERNEL_LOG(context, "Output type is %s, requires float.",
                        TfLiteTypeGetName(output->type));
diff --git a/tensorflow/lite/micro/kernels/l2norm_test.cc b/tensorflow/lite/micro/kernels/l2norm_test.cc
index 39eb92a8849..89029bb260a 100644
--- a/tensorflow/lite/micro/kernels/l2norm_test.cc
+++ b/tensorflow/lite/micro/kernels/l2norm_test.cc
@@ -23,7 +23,7 @@ namespace tflite {
 namespace testing {
 namespace {
 
-// used to set the quantization parameters for the int8 and uint8 tests
+// used to set the quantization parameters for the int8_t and uint8_t tests
 constexpr float kInputMin = -2.0;
 constexpr float kInputMax = 2.0;
 constexpr float kOutputMin = -1.0;
@@ -50,7 +50,7 @@ TfLiteTensor CreateL2NormTensor(const float* data, TfLiteIntArray* dims,
   return CreateFloatTensor(data, dims);
 }
 
-TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
+TfLiteTensor CreateL2NormTensor(const uint8_t* data, TfLiteIntArray* dims,
                                 bool is_input) {
   TfLiteTensor tensor;
 
@@ -64,7 +64,7 @@ TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
   return tensor;
 }
 
-TfLiteTensor CreateL2NormTensor(const int8* data, TfLiteIntArray* dims,
+TfLiteTensor CreateL2NormTensor(const int8_t* data, TfLiteIntArray* dims,
                                 bool is_input) {
   TfLiteTensor tensor;
 
diff --git a/tensorflow/lite/micro/kernels/pad.cc b/tensorflow/lite/micro/kernels/pad.cc
index 7ac39943c5c..b0ddcfda0de 100644
--- a/tensorflow/lite/micro/kernels/pad.cc
+++ b/tensorflow/lite/micro/kernels/pad.cc
@@ -50,7 +50,7 @@ struct PadContext {
 
     resizing_category = ResizingCategory::kGenericResize;
     const int paddings_total = GetTensorShape(paddings).FlatSize();
-    const int32* paddings_data = GetTensorData<int32>(paddings);
+    const int32_t* paddings_data = GetTensorData<int32_t>(paddings);
     // Paddings will be a n,2 array, and we need to detect 4D arrays with the
     // pattern { {0,0}, {a, b}, {c, d}, {0,0} }.
     if (IsConstantTensor(paddings) && paddings_total == 8 &&
@@ -83,7 +83,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
                     op_context.output->dims->size * 2);
 
   // On Micro, outputs must be properly sized by the converter.
-  const int32* paddings_data = GetTensorData<int32>(op_context.paddings);
+  const int32_t* paddings_data = GetTensorData<int32_t>(op_context.paddings);
   for (int i = 0; i < op_context.output->dims->size; i++) {
     int output_dim = op_context.output->dims->data[i];
     int expected_dim = op_context.input->dims->data[i] + paddings_data[i * 2] +
@@ -107,7 +107,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
   }
 
   // Create before and after padding arrays that are accepted by the kernel.
-  const int32* paddings_data = GetTensorData<int32>(op_context.paddings);
+  const int32_t* paddings_data = GetTensorData<int32_t>(op_context.paddings);
 
   tflite::PadParams op_params;
   memset(&op_params, 0, sizeof(PadParams));
diff --git a/tensorflow/lite/micro/kernels/pooling_test.cc b/tensorflow/lite/micro/kernels/pooling_test.cc
index 23d4b506d8e..73d5d80de7c 100644
--- a/tensorflow/lite/micro/kernels/pooling_test.cc
+++ b/tensorflow/lite/micro/kernels/pooling_test.cc
@@ -105,7 +105,7 @@ void TestAveragePoolingQuantized(
     std::initializer_list<int> output_dims_data, float output_min,
     float output_max, TfLitePadding padding, TfLiteFusedActivation activation,
     T* output_data) {
-  static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
+  static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
 
   TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data);
   TfLiteIntArray* output_dims = IntArrayFromInitializer(output_dims_data);
@@ -246,7 +246,7 @@ void TestMaxPoolQuantized(std::initializer_list<int> input_dims_data,
                           std::initializer_list<int> output_dims_data,
                           TfLitePadding padding,
                           TfLiteFusedActivation activation, T* output_data) {
-  static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
+  static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
 
   TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data);
   TfLiteIntArray* output_dims = IntArrayFromInitializer(output_dims_data);
diff --git a/tensorflow/lite/micro/kernels/prelu.cc b/tensorflow/lite/micro/kernels/prelu.cc
index d1d8f977850..3adb63312af 100644
--- a/tensorflow/lite/micro/kernels/prelu.cc
+++ b/tensorflow/lite/micro/kernels/prelu.cc
@@ -120,7 +120,7 @@ TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) {
     } break;
     default:
       TF_LITE_KERNEL_LOG(
-          context, "Only float32 and uint8 are supported currently, got %d.",
+          context, "Only float32 and uint8_t are supported currently, got %d.",
           TfLiteTypeGetName(input->type));
       return kTfLiteError;
   }
diff --git a/tensorflow/lite/micro/kernels/quantization_util_test.cc b/tensorflow/lite/micro/kernels/quantization_util_test.cc
index 5929f5fd7b5..76ee9eefb7e 100644
--- a/tensorflow/lite/micro/kernels/quantization_util_test.cc
+++ b/tensorflow/lite/micro/kernels/quantization_util_test.cc
@@ -203,7 +203,7 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_SafeCast) {
 //  128       | 10.0
 TF_LITE_MICRO_TEST(QuantizationUtilTest_ChooseQuantizationParams) {
   tflite::QuantizationParams qp =
-      tflite::ChooseQuantizationParams<uint8>(-10.0, 30.0);
+      tflite::ChooseQuantizationParams<uint8_t>(-10.0, 30.0);
   TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.156863, 1e-5);
   TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 64);
 }
@@ -211,7 +211,7 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_ChooseQuantizationParams) {
 TF_LITE_MICRO_TEST(
     QuantizationUtilTest_ChooseQuantizationParamsZeroPointOnMinBoundary) {
   tflite::QuantizationParams qp =
-      tflite::ChooseQuantizationParams<uint8>(0.0, 30.0);
+      tflite::ChooseQuantizationParams<uint8_t>(0.0, 30.0);
   TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.117647, 1e-5);
   TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 0);
 }
@@ -219,7 +219,7 @@ TF_LITE_MICRO_TEST(
 TF_LITE_MICRO_TEST(
     QuantizationUtilTest_ChooseQuantizationParamsEmptyRangeZero) {
   tflite::QuantizationParams qp =
-      tflite::ChooseQuantizationParams<uint8>(0.0, 0.0);
+      tflite::ChooseQuantizationParams<uint8_t>(0.0, 0.0);
   TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.0, 1e-5);
   TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 0);
 }
@@ -227,7 +227,7 @@ TF_LITE_MICRO_TEST(
 TF_LITE_MICRO_TEST(
     QuantizationUtilTest_ChooseQuantizationParamsZeroPointOnMaxBoundary) {
   tflite::QuantizationParams qp =
-      tflite::ChooseQuantizationParams<uint8>(-10.0, 0.0);
+      tflite::ChooseQuantizationParams<uint8_t>(-10.0, 0.0);
   TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.039216, 1e-5);
   TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 255);
 }
@@ -418,11 +418,11 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_QuantizeMultiplierArray) {
                             0.125, 0.25, 0.5, 1,    2,     4};
 
   const int size = 13;
-  int32 effective_scale_significand[size];
+  int32_t effective_scale_significand[size];
   int effective_scale_shift[size];
   tflite::QuantizeMultiplierArray(weights, size, effective_scale_significand,
                                   effective_scale_shift);
-  const int32 expected_effective_scale_significand[] = {
+  const int32_t expected_effective_scale_significand[] = {
       -1073741824,  // float scale = -4
       -1073741824,  // float scale = -2
       -1073741824,  // float scale = -1
diff --git a/tensorflow/lite/micro/kernels/quantize.cc b/tensorflow/lite/micro/kernels/quantize.cc
index 2817697919f..309d2b59b7d 100644
--- a/tensorflow/lite/micro/kernels/quantize.cc
+++ b/tensorflow/lite/micro/kernels/quantize.cc
@@ -152,7 +152,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
 
 // This Op (QUANTIZE) quantizes the input and produces quantized output.
 // AffineQuantize takes scale and zero point and quantizes the float value to
-// quantized output, in int8 or uint8 format.
+// quantized output, in int8_t or uint8_t format.
 TfLiteRegistration Register_QUANTIZE() {
   return {/*init=*/quantize::Init,
           /*free=*/nullptr,
diff --git a/tensorflow/lite/micro/kernels/quantize_test.cc b/tensorflow/lite/micro/kernels/quantize_test.cc
index b6f885d09e7..2e76fc566af 100644
--- a/tensorflow/lite/micro/kernels/quantize_test.cc
+++ b/tensorflow/lite/micro/kernels/quantize_test.cc
@@ -32,7 +32,7 @@ void ValidateQuantizeGoldens(TfLiteTensor* tensors, int tensors_size,
   TfLiteContext context;
   PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
 
-  // Version 1 of quantize supports int8 and uint8 quantization.
+  // Version 1 of quantize supports int8_t and uint8_t quantization.
   ::tflite::AllOpsResolver resolver;
   const TfLiteRegistration* registration =
       resolver.FindOp(tflite::BuiltinOperator_QUANTIZE);
diff --git a/tensorflow/lite/micro/kernels/reduce.cc b/tensorflow/lite/micro/kernels/reduce.cc
index 464b7faafad..8d0dbe1ad34 100644
--- a/tensorflow/lite/micro/kernels/reduce.cc
+++ b/tensorflow/lite/micro/kernels/reduce.cc
@@ -50,7 +50,7 @@ TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) {
 
 TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_OK(context, PrepareSimple(context, node));
-  // TODO(b/144955155): Support uint8(b/144955155) and int8(b/144955018)
+  // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018)
   return kTfLiteOk;
 }
 
@@ -58,7 +58,7 @@ void ResolveAxis(const int* axis_data, int axis_count,
                  tflite::MeanParams* op_params) {
   int i = 0;
   for (; i < axis_count; ++i) {
-    op_params->axis[i] = static_cast<int16>(axis_data[i]);
+    op_params->axis[i] = static_cast<int16_t>(axis_data[i]);
   }
   for (; i < 4; ++i) {
     op_params->axis[i] = 1;
@@ -110,7 +110,7 @@ TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
       }
     } break;
     default:
-      // TODO(b/144955155): Support uint8(b/144955155) and int8(b/144955018)
+      // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018)
       TF_LITE_ENSURE_MSG(context, false,
                          "Currently, only float32 input type "
                          "is supported.");
diff --git a/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc b/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc
index dc39bfeebf0..38df726cada 100644
--- a/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc
+++ b/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc
@@ -71,22 +71,22 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
 
   if (output->type == kTfLiteFloat32) {
     reference_ops::ResizeNearestNeighbor(
-        op_params, GetTensorShape(input), GetTensorData<int32>(input),
-        GetTensorShape(size), GetTensorData<int32>(size),
-        GetTensorShape(output), GetTensorData<int32>(output));
+        op_params, GetTensorShape(input), GetTensorData<int32_t>(input),
+        GetTensorShape(size), GetTensorData<int32_t>(size),
+        GetTensorShape(output), GetTensorData<int32_t>(output));
   } else if (output->type == kTfLiteUInt8) {
     reference_ops::ResizeNearestNeighbor(
         op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
-        GetTensorShape(size), GetTensorData<int32>(size),
+        GetTensorShape(size), GetTensorData<int32_t>(size),
         GetTensorShape(output), GetTensorData<uint8_t>(output));
   } else if (output->type == kTfLiteInt8) {
     reference_ops::ResizeNearestNeighbor(
         op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
-        GetTensorShape(size), GetTensorData<int32>(size),
+        GetTensorShape(size), GetTensorData<int32_t>(size),
         GetTensorShape(output), GetTensorData<int8_t>(output));
   } else {
     TF_LITE_KERNEL_LOG(context,
-                       "Output type is %d, requires float, uint8 or int8.",
+                       "Output type is %d, requires float, uint8_t or int8_t.",
                        output->type);
     return kTfLiteError;
   }
diff --git a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc
index 1c2c22645e6..cbc68bbc9e1 100644
--- a/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc
+++ b/tensorflow/lite/micro/kernels/resize_nearest_neighbor_test.cc
@@ -22,18 +22,18 @@ namespace tflite {
 namespace testing {
 namespace {
 
-using uint8 = std::uint8_t;
-using int32 = std::int32_t;
+using uint8_t = std::uint8_t;
+using int32_t = std::int32_t;
 
 TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims) {
   return CreateFloatTensor(data, dims);
 }
 
-TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims) {
+TfLiteTensor TestCreateTensor(const uint8_t* data, TfLiteIntArray* dims) {
   return CreateQuantizedTensor(data, dims, 0, 255);
 }
 
-TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims) {
+TfLiteTensor TestCreateTensor(const int8_t* data, TfLiteIntArray* dims) {
   return CreateQuantizedTensor(data, dims, -128, 127);
 }
 
@@ -42,7 +42,7 @@ TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims) {
 // Expected sizes should be a 1-D tensor with 2 elements: new_height & new_width
 template <typename T>
 void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data,
-                               const int32* expected_size_data,
+                               const int32_t* expected_size_data,
                                const T* expected_output_data,
                                const int* output_dims_data, T* output_data) {
   TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
@@ -101,7 +101,7 @@ TF_LITE_MICRO_TESTS_BEGIN
 TF_LITE_MICRO_TEST(HorizontalResize) {
   const int input_dims[] = {4, 1, 1, 2, 1};
   const float input_data[] = {3, 6};
-  const int32 expected_size_data[] = {1, 3};
+  const int32_t expected_size_data[] = {1, 3};
   const float expected_output_data[] = {3, 3, 6};
   const int output_dims[] = {4, 1, 1, 3, 1};
   float output_data[3];
@@ -112,32 +112,32 @@ TF_LITE_MICRO_TEST(HorizontalResize) {
 }
 TF_LITE_MICRO_TEST(HorizontalResizeUInt8) {
   const int input_dims[] = {4, 1, 1, 2, 1};
-  const uint8 input_data[] = {3, 6};
-  const int32 expected_size_data[] = {1, 3};
-  const uint8 expected_output_data[] = {3, 3, 6};
+  const uint8_t input_data[] = {3, 6};
+  const int32_t expected_size_data[] = {1, 3};
+  const uint8_t expected_output_data[] = {3, 3, 6};
   const int output_dims[] = {4, 1, 1, 3, 1};
-  uint8 output_data[3];
+  uint8_t output_data[3];
 
-  tflite::testing::TestResizeNearestNeighbor<uint8>(
+  tflite::testing::TestResizeNearestNeighbor<uint8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
 TF_LITE_MICRO_TEST(HorizontalResizeInt8) {
   const int input_dims[] = {4, 1, 1, 2, 1};
-  const int8 input_data[] = {-3, 6};
-  const int32 expected_size_data[] = {1, 3};
-  const int8 expected_output_data[] = {-3, -3, 6};
+  const int8_t input_data[] = {-3, 6};
+  const int32_t expected_size_data[] = {1, 3};
+  const int8_t expected_output_data[] = {-3, -3, 6};
   const int output_dims[] = {4, 1, 1, 3, 1};
-  int8 output_data[3];
+  int8_t output_data[3];
 
-  tflite::testing::TestResizeNearestNeighbor<int8>(
+  tflite::testing::TestResizeNearestNeighbor<int8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
 TF_LITE_MICRO_TEST(VerticalResize) {
   const int input_dims[] = {4, 1, 2, 1, 1};
   const float input_data[] = {3, 9};
-  const int32 expected_size_data[] = {3, 1};
+  const int32_t expected_size_data[] = {3, 1};
   const float expected_output_data[] = {3, 3, 9};
   const int output_dims[] = {4, 1, 3, 1, 1};
   float output_data[3];
@@ -148,25 +148,25 @@ TF_LITE_MICRO_TEST(VerticalResize) {
 }
 TF_LITE_MICRO_TEST(VerticalResizeUInt8) {
   const int input_dims[] = {4, 1, 2, 1, 1};
-  const uint8 input_data[] = {3, 9};
-  const int32 expected_size_data[] = {3, 1};
-  const uint8 expected_output_data[] = {3, 3, 9};
+  const uint8_t input_data[] = {3, 9};
+  const int32_t expected_size_data[] = {3, 1};
+  const uint8_t expected_output_data[] = {3, 3, 9};
   const int output_dims[] = {4, 1, 3, 1, 1};
-  uint8 output_data[3];
+  uint8_t output_data[3];
 
-  tflite::testing::TestResizeNearestNeighbor<uint8>(
+  tflite::testing::TestResizeNearestNeighbor<uint8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
 TF_LITE_MICRO_TEST(VerticalResizeInt8) {
   const int input_dims[] = {4, 1, 2, 1, 1};
-  const int8 input_data[] = {3, -9};
-  const int32 expected_size_data[] = {3, 1};
-  const int8 expected_output_data[] = {3, 3, -9};
+  const int8_t input_data[] = {3, -9};
+  const int32_t expected_size_data[] = {3, 1};
+  const int8_t expected_output_data[] = {3, 3, -9};
   const int output_dims[] = {4, 1, 3, 1, 1};
-  int8 output_data[3];
+  int8_t output_data[3];
 
-  tflite::testing::TestResizeNearestNeighbor<int8>(
+  tflite::testing::TestResizeNearestNeighbor<int8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
@@ -176,7 +176,7 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) {
       3, 6,   //
       9, 12,  //
   };
-  const int32 expected_size_data[] = {3, 3};
+  const int32_t expected_size_data[] = {3, 3};
   const float expected_output_data[] = {
       3, 3, 6,  //
       3, 3, 6,  //
@@ -192,39 +192,39 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) {
 }
 TF_LITE_MICRO_TEST(TwoDimensionalResizeUInt8) {
   const int input_dims[] = {4, 1, 2, 2, 1};
-  const uint8 input_data[] = {
+  const uint8_t input_data[] = {
       3, 6,  //
       9, 12  //
   };
-  const int32 expected_size_data[] = {3, 3};
-  const uint8 expected_output_data[] = {
+  const int32_t expected_size_data[] = {3, 3};
+  const uint8_t expected_output_data[] = {
       3, 3, 6,  //
       3, 3, 6,  //
       9, 9, 12  //
   };
   const int output_dims[] = {4, 1, 3, 3, 1};
-  uint8 output_data[9];
+  uint8_t output_data[9];
 
-  tflite::testing::TestResizeNearestNeighbor<uint8>(
+  tflite::testing::TestResizeNearestNeighbor<uint8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
 TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) {
   const int input_dims[] = {4, 1, 2, 2, 1};
-  const int8 input_data[] = {
+  const int8_t input_data[] = {
       3, -6,  //
       9, 12,  //
   };
-  const int32 expected_size_data[] = {3, 3};
-  const int8 expected_output_data[] = {
+  const int32_t expected_size_data[] = {3, 3};
+  const int8_t expected_output_data[] = {
       3, 3, -6,  //
       3, 3, -6,  //
       9, 9, 12,  //
   };
   const int output_dims[] = {4, 1, 3, 3, 1};
-  int8 output_data[9];
+  int8_t output_data[9];
 
-  tflite::testing::TestResizeNearestNeighbor<int8>(
+  tflite::testing::TestResizeNearestNeighbor<int8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
@@ -236,7 +236,7 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatches) {
       4,  10,  //
       10, 16   //
   };
-  const int32 expected_size_data[] = {3, 3};
+  const int32_t expected_size_data[] = {3, 3};
   const float expected_output_data[] = {
       3,  3,  6,   //
       3,  3,  6,   //
@@ -254,14 +254,14 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatches) {
 }
 TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesUInt8) {
   const int input_dims[] = {4, 2, 2, 2, 1};
-  const uint8 input_data[] = {
+  const uint8_t input_data[] = {
       3,  6,   //
       9,  12,  //
       4,  10,  //
       10, 16   //
   };
-  const int32 expected_size_data[] = {3, 3};
-  const uint8 expected_output_data[] = {
+  const int32_t expected_size_data[] = {3, 3};
+  const uint8_t expected_output_data[] = {
       3,  3,  6,   //
       3,  3,  6,   //
       9,  9,  12,  //
@@ -270,22 +270,22 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesUInt8) {
       10, 10, 16,  //
   };
   const int output_dims[] = {4, 2, 3, 3, 1};
-  uint8 output_data[18];
+  uint8_t output_data[18];
 
-  tflite::testing::TestResizeNearestNeighbor<uint8>(
+  tflite::testing::TestResizeNearestNeighbor<uint8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
 TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesInt8) {
   const int input_dims[] = {4, 2, 2, 2, 1};
-  const int8 input_data[] = {
+  const int8_t input_data[] = {
       3,  6,    //
       9,  -12,  //
       -4, 10,   //
       10, 16    //
   };
-  const int32 expected_size_data[] = {3, 3};
-  const int8 expected_output_data[] = {
+  const int32_t expected_size_data[] = {3, 3};
+  const int8_t expected_output_data[] = {
       3,  3,  6,    //
       3,  3,  6,    //
       9,  9,  -12,  //
@@ -294,9 +294,9 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesInt8) {
       10, 10, 16,   //
   };
   const int output_dims[] = {4, 2, 3, 3, 1};
-  int8 output_data[18];
+  int8_t output_data[18];
 
-  tflite::testing::TestResizeNearestNeighbor<int8>(
+  tflite::testing::TestResizeNearestNeighbor<int8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
@@ -306,7 +306,7 @@ TF_LITE_MICRO_TEST(ThreeDimensionalResize) {
       3, 4,  6,  10,  //
       9, 10, 12, 16,  //
   };
-  const int32 expected_size_data[] = {3, 3};
+  const int32_t expected_size_data[] = {3, 3};
   const float expected_output_data[] = {
       3, 4,  3, 4,  6,  10,  //
       3, 4,  3, 4,  6,  10,  //
@@ -321,39 +321,39 @@ TF_LITE_MICRO_TEST(ThreeDimensionalResize) {
 }
 TF_LITE_MICRO_TEST(ThreeDimensionalResizeUInt8) {
   const int input_dims[] = {4, 1, 2, 2, 2};
-  const uint8 input_data[] = {
+  const uint8_t input_data[] = {
       3,  4,  6,  10,  //
       10, 12, 14, 16,  //
   };
-  const int32 expected_size_data[] = {3, 3};
-  const uint8 expected_output_data[] = {
+  const int32_t expected_size_data[] = {3, 3};
+  const uint8_t expected_output_data[] = {
       3,  4,  3,  4,  6,  10,  //
       3,  4,  3,  4,  6,  10,  //
       10, 12, 10, 12, 14, 16,  //
   };
   const int output_dims[] = {4, 1, 3, 3, 2};
-  uint8 output_data[18];
+  uint8_t output_data[18];
 
-  tflite::testing::TestResizeNearestNeighbor<uint8>(
+  tflite::testing::TestResizeNearestNeighbor<uint8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
 TF_LITE_MICRO_TEST(ThreeDimensionalResizeInt8) {
   const int input_dims[] = {4, 1, 2, 2, 2};
-  const int8 input_data[] = {
+  const int8_t input_data[] = {
       3,  4,  -6,  10,  //
       10, 12, -14, 16,  //
   };
-  const int32 expected_size_data[] = {3, 3};
-  const int8 expected_output_data[] = {
+  const int32_t expected_size_data[] = {3, 3};
+  const int8_t expected_output_data[] = {
       3,  4,  3,  4,  -6,  10,  //
       3,  4,  3,  4,  -6,  10,  //
       10, 12, 10, 12, -14, 16,  //
   };
   const int output_dims[] = {4, 1, 3, 3, 2};
-  int8 output_data[18];
+  int8_t output_data[18];
 
-  tflite::testing::TestResizeNearestNeighbor<int8>(
+  tflite::testing::TestResizeNearestNeighbor<int8_t>(
       input_dims, input_data, expected_size_data, expected_output_data,
       output_dims, output_data);
 }
diff --git a/tensorflow/lite/micro/kernels/softmax.cc b/tensorflow/lite/micro/kernels/softmax.cc
index 881efdae3e1..e806fe9ae29 100644
--- a/tensorflow/lite/micro/kernels/softmax.cc
+++ b/tensorflow/lite/micro/kernels/softmax.cc
@@ -42,7 +42,8 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
       TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
       if (output->type == kTfLiteInt16) {
         TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768);
-        // NOTE: Current int16 softmax output does not require symmetric scaling
+        // NOTE: Current int16_t softmax output does not require symmetric
+        // scaling
         // - so no need to verify scale here.
       } else {
         TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
diff --git a/tensorflow/lite/micro/kernels/sub.cc b/tensorflow/lite/micro/kernels/sub.cc
index ddc03d81856..6c3dc5f917b 100644
--- a/tensorflow/lite/micro/kernels/sub.cc
+++ b/tensorflow/lite/micro/kernels/sub.cc
@@ -40,18 +40,18 @@ struct OpData {
   // and the special 16-bit -> 16bit quantized path
   int input1_shift;
   int input2_shift;
-  int32 output_activation_min;
-  int32 output_activation_max;
+  int32_t output_activation_min;
+  int32_t output_activation_max;
 
   // These fields are used only in the general 8-bit -> 8bit quantized path
-  int32 input1_multiplier;
-  int32 input2_multiplier;
-  int32 output_multiplier;
+  int32_t input1_multiplier;
+  int32_t input2_multiplier;
+  int32_t output_multiplier;
   int output_shift;
   int left_shift;
-  int32 input1_offset;
-  int32 input2_offset;
-  int32 output_offset;
+  int32_t input1_offset;
+  int32_t input2_offset;
+  int32_t output_offset;
 };
 
 TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteSubParams* params,
diff --git a/tensorflow/lite/micro/kernels/svdf.cc b/tensorflow/lite/micro/kernels/svdf.cc
index c0bae4acc48..c3adb4d3782 100644
--- a/tensorflow/lite/micro/kernels/svdf.cc
+++ b/tensorflow/lite/micro/kernels/svdf.cc
@@ -32,8 +32,8 @@ namespace svdf {
 namespace {
 
 struct OpData {
-  int32 effective_scale_1_a;
-  int32 effective_scale_2_a;
+  int32_t effective_scale_1_a;
+  int32_t effective_scale_2_a;
   // b versions of each scale are kept at int since the numbers are just the
   // shift value - typically between [-32, 32].
   int effective_scale_1_b;
@@ -377,7 +377,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
 
   // Validate Tensor Output:
-  // [0] = float/int8, {2, batch_size, num_units}
+  // [0] = float/int8_t, {2, batch_size, num_units}
   TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
   TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/add.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/add.cc
index 0e911762981..90590ab0632 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifi/add.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifi/add.cc
@@ -42,18 +42,18 @@ struct OpData {
   // and the special 16-bit -> 16bit quantized path
   int input1_shift;
   int input2_shift;
-  int32 output_activation_min;
-  int32 output_activation_max;
+  int32_t output_activation_min;
+  int32_t output_activation_max;
 
   // These fields are used only in the general 8-bit -> 8bit quantized path
-  int32 input1_multiplier;
-  int32 input2_multiplier;
-  int32 output_multiplier;
+  int32_t input1_multiplier;
+  int32_t input2_multiplier;
+  int32_t output_multiplier;
   int output_shift;
   int left_shift;
-  int32 input1_offset;
-  int32 input2_offset;
-  int32 output_offset;
+  int32_t input1_offset;
+  int32_t input2_offset;
+  int32_t output_offset;
 };
 
 TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params,
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc
index ca968f8ab1c..2de3345bcbf 100755
--- a/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc
@@ -219,9 +219,9 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
     const int stride_height = params->stride_height;
     const int pad_width = data.padding.width;
     const int pad_height = data.padding.height;
-    const int32 output_activation_min = data.output_activation_min;
-    const int32 output_activation_max = data.output_activation_max;
-    const int32 output_multiplier = data.output_multiplier;
+    const int32_t output_activation_min = data.output_activation_min;
+    const int32_t output_activation_max = data.output_activation_max;
+    const int32_t output_multiplier = data.output_multiplier;
     const int output_shift = -data.output_shift;
     TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
     TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
@@ -362,10 +362,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   reference_integer_ops::ConvPerChannel(
       op_params, data.per_channel_output_multiplier,
       data.per_channel_output_shift, GetTensorShape(input),
-      GetTensorData<int8>(input), GetTensorShape(filter),
-      GetTensorData<int8>(filter), GetTensorShape(bias),
-      GetTensorData<int32>(bias), GetTensorShape(output),
-      GetTensorData<int8>(output));
+      GetTensorData<int8_t>(input), GetTensorShape(filter),
+      GetTensorData<int8_t>(filter), GetTensorShape(bias),
+      GetTensorData<int32_t>(bias), GetTensorShape(output),
+      GetTensorData<int8_t>(output));
 }
 
 TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc
index 0c5b484229b..2dd11ed060f 100755
--- a/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc
@@ -142,7 +142,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   int filter_width = SizeOfDimension(filter, 2);
   int filter_height = SizeOfDimension(filter, 1);
 
-  // Per channel quantization is only needed for int8 inference. For other
+  // Per channel quantization is only needed for int8_t inference. For other
   // quantized types, only a single scale and zero point is needed.
   const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
   // Dynimically allocate per-channel quantization parameters.
@@ -335,10 +335,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   reference_integer_ops::DepthwiseConvPerChannel(
       op_params, data->per_channel_output_multiplier,
       data->per_channel_output_shift, GetTensorShape(input),
-      GetTensorData<int8>(input), GetTensorShape(filter),
-      GetTensorData<int8>(filter), GetTensorShape(bias),
-      GetTensorData<int32>(bias), GetTensorShape(output),
-      GetTensorData<int8>(output));
+      GetTensorData<int8_t>(input), GetTensorShape(filter),
+      GetTensorData<int8_t>(filter), GetTensorShape(bias),
+      GetTensorData<int32_t>(bias), GetTensorShape(output),
+      GetTensorData<int8_t>(output));
 }
 
 TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
@@ -370,9 +370,9 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
     const int pad_width = data->padding.width;
     const int pad_height = data->padding.height;
     const int depth_multiplier = params->depth_multiplier;
-    const int32 output_activation_min = data->output_activation_min;
-    const int32 output_activation_max = data->output_activation_max;
-    const int32 output_multiplier = data->output_multiplier;
+    const int32_t output_activation_min = data->output_activation_min;
+    const int32_t output_activation_max = data->output_activation_max;
+    const int32_t output_multiplier = data->output_multiplier;
     // Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
     const int output_shift = -data->output_shift;
     TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc
index 0e6f0d0ab30..ccb3c11844f 100755
--- a/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc
@@ -148,7 +148,7 @@ TfLiteStatus AverageEvalFloat(TfLiteContext* context, const TfLiteNode* node,
   }
 
   out_length = batches * output_height * output_width * depth;
-  uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
+  uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
   p_align_val = (p_unalign_val + 7) & (~7);
 
   // pre loop for activation_min_max
@@ -215,8 +215,8 @@ TfLiteStatus AverageEvalQuantized(TfLiteContext* context,
     const int output_height = output_shape.Dims(1);
     const int output_width = output_shape.Dims(2);
 
-    const uint8* inp_data_ptr;
-    uint8* out_data_ptr;
+    const uint8_t* inp_data_ptr;
+    uint8_t* out_data_ptr;
     int inp_data_format = 0, out_data_format = 0, out_length;
     int inp_precision = PREC_ASYM8, out_precision = PREC_ASYM8;
     void* p_scratch;
@@ -262,7 +262,7 @@ TfLiteStatus AverageEvalQuantized(TfLiteContext* context,
     }
 
     out_length = batches * output_height * output_width * depth;
-    uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
+    uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
     p_align_val = (p_unalign_val + 7) & (~7);
 
     // pre loop for activation_min_max
@@ -372,7 +372,7 @@ TfLiteStatus MaxEvalFloat(TfLiteContext* context, TfLiteNode* node,
   }
 
   out_length = batches * output_height * output_width * depth;
-  uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
+  uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
   p_align_val = (p_unalign_val + 7) & (~7);
 
   // pre loop for activation_min_max
@@ -438,8 +438,8 @@ TfLiteStatus MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node,
     const int output_height = output_shape.Dims(1);
     const int output_width = output_shape.Dims(2);
 
-    const uint8* inp_data_ptr;
-    uint8* out_data_ptr;
+    const uint8_t* inp_data_ptr;
+    uint8_t* out_data_ptr;
     int inp_data_format = 0, out_data_format = 0, out_length;
     int inp_precision = PREC_ASYM8, out_precision = PREC_ASYM8;
     void* p_scratch;
@@ -482,7 +482,7 @@ TfLiteStatus MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node,
     }
 
     out_length = batches * output_height * output_width * depth;
-    uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
+    uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
     p_align_val = (p_unalign_val + 7) & (~7);
 
     // pre loop for activation_min_max
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc
index e4fa19671c2..9d256b3aecc 100755
--- a/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc
@@ -63,7 +63,8 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
       TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
       if (output->type == kTfLiteInt16) {
         TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768);
-        // NOTE: Current int16 softmax output does not require symmetric scaling
+        // NOTE: Current int16_t softmax output does not require symmetric
+        // scaling
         // - so no need to verify scale here.
       } else {
         TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc
index f9d846bf8b3..a208713fb9d 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc
@@ -53,8 +53,8 @@ namespace svdf {
 namespace {
 
 struct OpData {
-  int32 effective_scale_1_a;
-  int32 effective_scale_2_a;
+  int32_t effective_scale_1_a;
+  int32_t effective_scale_2_a;
   // b versions of each scale are kept at int since the numbers are just the
   // shift value - typically between [-32, 32].
   int effective_scale_1_b;
@@ -461,7 +461,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
 
   // Validate Tensor Output:
-  // [0] = float/int8, {2, batch_size, num_units}
+  // [0] = float/int8_t, {2, batch_size, num_units}
   TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
   TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc
index 0e71bfbcb26..011cfc426a1 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc
@@ -33,22 +33,22 @@ namespace conv {
 namespace xtensa {
 namespace hifimini {
 
-void ConvPerChannel(const ConvParams& params, const int32* output_multiplier,
-                    const int32* output_shift, const RuntimeShape& input_shape,
-                    const int8* input_data, const RuntimeShape& filter_shape,
-                    const int8* filter_data, const RuntimeShape& bias_shape,
-                    const int32* bias_data, const RuntimeShape& output_shape,
-                    int8* output_data) {
+void ConvPerChannel(const ConvParams& params, const int32_t* output_multiplier,
+                    const int32_t* output_shift,
+                    const RuntimeShape& input_shape, const int8_t* input_data,
+                    const RuntimeShape& filter_shape, const int8_t* filter_data,
+                    const RuntimeShape& bias_shape, const int32_t* bias_data,
+                    const RuntimeShape& output_shape, int8_t* output_data) {
   const int stride_width = params.stride_width;
   const int stride_height = params.stride_height;
   const int dilation_width_factor = params.dilation_width_factor;
   const int dilation_height_factor = params.dilation_height_factor;
   const int pad_width = params.padding_values.width;
   const int pad_height = params.padding_values.height;
-  const int32 input_offset = params.input_offset;
-  const int32 output_offset = params.output_offset;
-  const int32 output_activation_min = params.quantized_activation_min;
-  const int32 output_activation_max = params.quantized_activation_max;
+  const int32_t input_offset = params.input_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
 
   const int batches = input_shape.Dims(0);
 
@@ -169,11 +169,11 @@ void ConvPerChannel(const ConvParams& params, const int32* output_multiplier,
 inline void Conv1x32Input32x32Filter(
     const int input_offset, const int output_offset,
     const int quantized_activation_min, const int quantized_activation_max,
-    const int32* output_multiplier, const int32* output_shift,
-    const RuntimeShape& input_shape, const int8* input_data,
-    const RuntimeShape& filter_shape, const int8* filter_data,
-    const RuntimeShape& bias_shape, const int32* bias_data,
-    const RuntimeShape& output_shape, int8* output_data) {
+    const int32_t* output_multiplier, const int32_t* output_shift,
+    const RuntimeShape& input_shape, const int8_t* input_data,
+    const RuntimeShape& filter_shape, const int8_t* filter_data,
+    const RuntimeShape& bias_shape, const int32_t* bias_data,
+    const RuntimeShape& output_shape, int8_t* output_data) {
   ae_p24x2s input_offset_24x2 = AE_MOVPA24(input_offset);
   ae_q56s output_offset_56 = AE_CVTQ48A32S(output_offset);
   ae_q56s output_activation_max_56 = AE_CVTQ48A32S(quantized_activation_max);
@@ -324,7 +324,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   int output_width = output->dims->data[2];
   int output_height = output->dims->data[1];
 
-  // Per channel quantization is only needed for int8 inference. For other
+  // Per channel quantization is only needed for int8_t inference. For other
   // quantized types, only a single scale and zero point is needed.
   const int num_channels = filter->dims->data[kConvQuantizedDimension];
   // Dynimically allocate per-channel quantization parameters.
@@ -382,10 +382,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   xtensa::hifimini::ConvPerChannel(
       op_params, data->per_channel_output_multiplier,
       data->per_channel_output_shift, GetTensorShape(input),
-      GetTensorData<int8>(input), GetTensorShape(filter),
-      GetTensorData<int8>(filter), GetTensorShape(bias),
-      GetTensorData<int32>(bias), GetTensorShape(output),
-      GetTensorData<int8>(output));
+      GetTensorData<int8_t>(input), GetTensorShape(filter),
+      GetTensorData<int8_t>(filter), GetTensorShape(bias),
+      GetTensorData<int32_t>(bias), GetTensorShape(output),
+      GetTensorData<int8_t>(output));
 }
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@@ -409,10 +409,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
         op_data->output_activation_min, op_data->output_activation_max,
         op_data->per_channel_output_multiplier,
         op_data->per_channel_output_shift, GetTensorShape(input),
-        GetTensorData<int8>(input), GetTensorShape(filter),
-        GetTensorData<int8>(filter), GetTensorShape(bias),
-        GetTensorData<int32>(bias), GetTensorShape(output),
-        GetTensorData<int8>(output));
+        GetTensorData<int8_t>(input), GetTensorShape(filter),
+        GetTensorData<int8_t>(filter), GetTensorShape(bias),
+        GetTensorData<int32_t>(bias), GetTensorShape(output),
+        GetTensorData<int8_t>(output));
     return kTfLiteOk;
   }
 
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc
index 656fb1b04cb..1f08b2c4ff4 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc
@@ -34,12 +34,12 @@ namespace xtensa {
 namespace hifimini {
 
 inline void DepthwiseConvPerChannel(
-    const DepthwiseParams& params, const int32* output_multiplier,
-    const int32* output_shift, const RuntimeShape& input_shape,
-    const int8* input_data, const RuntimeShape& filter_shape,
-    const int8* filter_data, const RuntimeShape& bias_shape,
-    const int32* bias_data, const RuntimeShape& output_shape,
-    int8* output_data) {
+    const DepthwiseParams& params, const int32_t* output_multiplier,
+    const int32_t* output_shift, const RuntimeShape& input_shape,
+    const int8_t* input_data, const RuntimeShape& filter_shape,
+    const int8_t* filter_data, const RuntimeShape& bias_shape,
+    const int32_t* bias_data, const RuntimeShape& output_shape,
+    int8_t* output_data) {
   // TODO(b/154032858): Investigate removing extra copies.
   const int stride_width = params.stride_width;
   const int stride_height = params.stride_height;
@@ -48,10 +48,10 @@ inline void DepthwiseConvPerChannel(
   const int pad_width = params.padding_values.width;
   const int pad_height = params.padding_values.height;
   const int depth_multiplier = params.depth_multiplier;
-  const int32 input_offset = params.input_offset;
-  const int32 output_offset = params.output_offset;
-  const int32 output_activation_min = params.quantized_activation_min;
-  const int32 output_activation_max = params.quantized_activation_max;
+  const int32_t input_offset = params.input_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
 
   const int batches = input_shape.Dims(0);
 
@@ -99,16 +99,16 @@ inline void DepthwiseConvPerChannel(
                       ((batch * input_height + in_y) * input_width + in_x) *
                           input_depth +
                       (in_channel);
-                  int32 input_val = input_data[input_idx];
+                  int32_t input_val = input_data[input_idx];
 
                   // Find current filter index, minus 2 for Xtensa load
                   // alignments:
                   int filter_idx =
                       ((filter_y)*filter_width + filter_x) * filter_depth +
                       (output_channel);
-                  int32 filter_val = filter_data[filter_idx];
+                  int32_t filter_val = filter_data[filter_idx];
 
-                  // Load 8bit value as int32 into a 24x24 register and right
+                  // Load 8bit value as int32_t into a 24x24 register and right
                   // shift into 24bit space. Note: value is duplicated in the HH
                   // and LL register - but all calculations are done on the HH
                   // side.
@@ -171,11 +171,11 @@ constexpr int kConvolutionalKernelDepth = 32;
 inline void DepthwiseConv4x32MatchingInputAndFilter(
     const int input_offset, const int output_offset,
     const int quantized_activation_min, const int quantized_activation_max,
-    const int32* output_multiplier, const int32* output_shift,
-    const RuntimeShape& input_shape, const int8* input_data,
-    const RuntimeShape& filter_shape, const int8* filter_data,
-    const RuntimeShape& bias_shape, const int32* bias_data,
-    const RuntimeShape& output_shape, int8* output_data) {
+    const int32_t* output_multiplier, const int32_t* output_shift,
+    const RuntimeShape& input_shape, const int8_t* input_data,
+    const RuntimeShape& filter_shape, const int8_t* filter_data,
+    const RuntimeShape& bias_shape, const int32_t* bias_data,
+    const RuntimeShape& output_shape, int8_t* output_data) {
   // Convert the (unsigned) 32-bit multiplier down to a 24-bit multiplier.
   const int32_t mult = output_multiplier[0] >> 8;
   const int32_t shift = output_shift[0];
@@ -189,16 +189,16 @@ inline void DepthwiseConv4x32MatchingInputAndFilter(
   const int stride_elements =
       (kConvolutionalKernelDepth / kConvolutionalKernelWidth);
 
-  const int8* input_0_ptr = (const int8*)(input_data - 2);
-  const int8* weight_0_ptr = (const int8*)(filter_data - 2);
+  const int8_t* input_0_ptr = (const int8_t*)(input_data - 2);
+  const int8_t* weight_0_ptr = (const int8_t*)(filter_data - 2);
   // Apply the kernels in blocks of 4 for all the channels.
-  const int8* input_1_ptr = input_0_ptr + stride_elements * 4;
-  const int8* input_2_ptr = input_1_ptr + stride_elements * 4;
-  const int8* input_3_ptr = input_2_ptr + stride_elements * 4;
+  const int8_t* input_1_ptr = input_0_ptr + stride_elements * 4;
+  const int8_t* input_2_ptr = input_1_ptr + stride_elements * 4;
+  const int8_t* input_3_ptr = input_2_ptr + stride_elements * 4;
 
-  const int8* weight_1_ptr = weight_0_ptr + stride_elements * 4;
-  const int8* weight_2_ptr = weight_1_ptr + stride_elements * 4;
-  const int8* weight_3_ptr = weight_2_ptr + stride_elements * 4;
+  const int8_t* weight_1_ptr = weight_0_ptr + stride_elements * 4;
+  const int8_t* weight_2_ptr = weight_1_ptr + stride_elements * 4;
+  const int8_t* weight_3_ptr = weight_2_ptr + stride_elements * 4;
 
   for (int i = 0; i < num_blocks; ++i) {
     ae_q56s block_0_acc = AE_ZEROQ56();
@@ -372,7 +372,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   int filter_width = SizeOfDimension(filter, 2);
   int filter_height = SizeOfDimension(filter, 1);
 
-  // Per channel quantization is only needed for int8 inference. For other
+  // Per channel quantization is only needed for int8_t inference. For other
   // quantized types, only a single scale and zero point is needed.
   const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
   // Dynimically allocate per-channel quantization parameters.
@@ -430,10 +430,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
   xtensa::hifimini::DepthwiseConvPerChannel(
       op_params, data->per_channel_output_multiplier,
       data->per_channel_output_shift, GetTensorShape(input),
-      GetTensorData<int8>(input), GetTensorShape(filter),
-      GetTensorData<int8>(filter), GetTensorShape(bias),
-      GetTensorData<int32>(bias), GetTensorShape(output),
-      GetTensorData<int8>(output));
+      GetTensorData<int8_t>(input), GetTensorShape(filter),
+      GetTensorData<int8_t>(filter), GetTensorShape(bias),
+      GetTensorData<int32_t>(bias), GetTensorShape(output),
+      GetTensorData<int8_t>(output));
 }
 
 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@@ -460,10 +460,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
         std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max(),
         op_data->per_channel_output_multiplier,
         op_data->per_channel_output_shift, GetTensorShape(input),
-        GetTensorData<int8>(input), GetTensorShape(filter),
-        GetTensorData<int8>(filter), GetTensorShape(bias),
-        GetTensorData<int32>(bias), GetTensorShape(output),
-        GetTensorData<int8>(output));
+        GetTensorData<int8_t>(input), GetTensorShape(filter),
+        GetTensorData<int8_t>(filter), GetTensorShape(bias),
+        GetTensorData<int32_t>(bias), GetTensorShape(output),
+        GetTensorData<int8_t>(output));
     return kTfLiteOk;
   }
   switch (input->type) {  // Already know in/out types are same.
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc
index 6ebfbe75067..8383e02e598 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc
@@ -36,16 +36,16 @@ namespace hifimini {
 void FullyConnected(const FullyConnectedParams& params,
                     const RuntimeShape& input_shape, const int8_t* input_data,
                     const RuntimeShape& filter_shape, const int8_t* filter_data,
-                    const RuntimeShape& bias_shape, const int32* bias_data,
+                    const RuntimeShape& bias_shape, const int32_t* bias_data,
                     const RuntimeShape& output_shape, int8_t* output_data) {
   // TODO(b/154032858): Investigate removing extra copies.
-  const int32 input_offset = params.input_offset;
-  const int32 filter_offset = params.weights_offset;
-  const int32 output_offset = params.output_offset;
-  const int32 output_multiplier = params.output_multiplier;
+  const int32_t input_offset = params.input_offset;
+  const int32_t filter_offset = params.weights_offset;
+  const int32_t output_offset = params.output_offset;
+  const int32_t output_multiplier = params.output_multiplier;
   const int output_shift = params.output_shift;
-  const int32 output_activation_min = params.quantized_activation_min;
-  const int32 output_activation_max = params.quantized_activation_max;
+  const int32_t output_activation_min = params.quantized_activation_min;
+  const int32_t output_activation_max = params.quantized_activation_max;
 
   const int filter_dim_count = filter_shape.DimensionsCount();
   const int batches = output_shape.Dims(0);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc
index e735214dd38..d46cc723114 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc
@@ -156,7 +156,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
 
 // This Op (QUANTIZE) quantizes the input and produces quantized output.
 // AffineQuantize takes scale and zero point and quantizes the float value to
-// quantized output, in int8 or uint8 format.
+// quantized output, in int8_t or uint8_t format.
 TfLiteRegistration Register_QUANTIZE() {
   return {/*init=*/quantize::Init,
           /*free=*/nullptr,
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc
index f222387c831..83cddd49889 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc
@@ -33,12 +33,12 @@ struct OpData {
   uint16_t* exp_lut;
 };
 
-// Number of unique int8 and int16 values.  Used in exponent lookup table
+// Number of unique int8_t and int16_t values.  Used in exponent lookup table
 // conputation.
 constexpr int kInt8Range =
-    std::numeric_limits<int8_t>::max() - std::numeric_limits<int8>::min() + 1;
-constexpr int kInt16Range =
-    std::numeric_limits<int16_t>::max() - std::numeric_limits<int16>::min() + 1;
+    std::numeric_limits<int8_t>::max() - std::numeric_limits<int8_t>::min() + 1;
+constexpr int kInt16Range = std::numeric_limits<int16_t>::max() -
+                            std::numeric_limits<int16_t>::min() + 1;
 // Each 16-bit precalculated exponent is expressed as a Q0.16 fixedpoint
 // value. We special-case e^0 since 1.0 requires 1 integer bit to
 // express.
@@ -47,7 +47,7 @@ constexpr int kExpFractionalBits = 16;
 // specially.
 constexpr int kMaxExponentValue = (1 << kExpFractionalBits);
 
-// Quantized softmax with int8 input and int16 output.
+// Quantized softmax with int8_t input and int16_t output.
 // Passing OpData by value does not have much savings in this op, but following
 // that as a best practice, at least for the xtensa kernels. See b/155656675 for
 // more details.
@@ -97,7 +97,7 @@ TfLiteStatus Softmax(OpData op_data, const RuntimeShape& input_shape,
       }
       output_data[i * depth + c] = static_cast<int16_t>(std::max(
           std::min(full_range_output,
-                   static_cast<int32>(std::numeric_limits<int16_t>::max())),
+                   static_cast<int32_t>(std::numeric_limits<int16_t>::max())),
           static_cast<int32_t>(std::numeric_limits<int16_t>::min())));
     }
   }
@@ -118,7 +118,8 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
       if (output->type == kTfLiteInt16) {
         TF_LITE_ENSURE_EQ(context, output->params.zero_point,
                           std::numeric_limits<int16_t>::min());
-        // NOTE: Current int16 softmax output does not require symmetric scaling
+        // NOTE: Current int16_t softmax output does not require symmetric
+        // scaling
         // - so no need to verify scale here.
       } else {
         TF_LITE_ENSURE_EQ(context, output->params.zero_point,
@@ -127,10 +128,10 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
       }
     }
 
-    // Precompute e^(-x * input_scale * beta) for every possible int8 input.
+    // Precompute e^(-x * input_scale * beta) for every possible int8_t input.
     // This computation is used for every iteration of Softmax.  We must compute
     // using pre-scaled inputs to avoid introducing additional error, while
-    // restricting our input range to the int8 range. This is valid since beta
+    // restricting our input range to the int8_t range. This is valid since beta
     // and input scale are constant for a given op in the graph. Skip index 0
     // since that is a special case which requires 1 integer bit instead of 0.
     for (int i = 1; i <= kInt8Range; i++) {
@@ -163,7 +164,7 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
   TFLITE_DCHECK(node->user_data != nullptr);
   OpData* op_data = static_cast<OpData*>(node->user_data);
 
-  // Allocate an array to precompute exponents over all int8 inputs, applying
+  // Allocate an array to precompute exponents over all int8_t inputs, applying
   // the scale and beta before calculating exp. It is mandatory to apply beta
   // and scale here, since each softmax op may have different beta and scale
   // values. Beta and scale will remain constant for a given softmax op.
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc
index 8520dc2db72..3d6ad33cfcb 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc
@@ -33,8 +33,8 @@ namespace svdf {
 namespace {
 
 struct OpData {
-  int32 effective_scale_1_a;
-  int32 effective_scale_2_a;
+  int32_t effective_scale_1_a;
+  int32_t effective_scale_2_a;
   // b versions of each scale are kept at int since the numbers are just the
   // shift value - typically between [-32, 32].
   int effective_scale_1_b;
@@ -153,7 +153,7 @@ void EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node,
                 dot_prod_24x2, data.effective_scale_1_a,
                 data.effective_scale_1_b);
 
-        // Cap min/max and convert to int32:
+        // Cap min/max and convert to int32_t:
         dot_prod_56 = AE_MAXQ56S(dot_prod_56, output_int16_min_56);
         dot_prod_56 = AE_MINQ56S(dot_prod_56, output_int16_max_56);
         // Truncate immediately since the QR register is already 32 bit aligned:
@@ -246,7 +246,7 @@ void EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node,
               data.effective_scale_2_b);
       // Add output adjustment:
       x_56 = AE_ADDQ56(x_56, output_zp_56);
-      // Cap min/max and convert to int32 (already aligned to 32bit):
+      // Cap min/max and convert to int32_t (already aligned to 32bit):
       x_56 = AE_MAXQ56S(x_56, output_int8_min_56);
       x_56 = AE_MINQ56S(x_56, output_int8_max_56);
       GetTensorData<int8_t>(output_tensor)[i] =
@@ -308,7 +308,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
 
   // Validate Tensor Output:
-  // [0] = float/int8, {2, batch_size, num_units}
+  // [0] = float/int8_t, {2, batch_size, num_units}
   TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
   TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/quantize.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/quantize.cc
index 513f926fae9..13c19cc6f34 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/quantize.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/quantize.cc
@@ -34,7 +34,7 @@ void AffineQuantize(int scale_multiplier,
                     const tflite::QuantizationParams& op_params,
                     const RuntimeShape& input_shape, const int16_t* input_data,
                     const RuntimeShape& output_shape, int8_t* output_data) {
-  const int32 zero_point = op_params.zero_point;
+  const int32_t zero_point = op_params.zero_point;
   const int flat_size = MatchingFlatSize(input_shape, output_shape);
   ae_q56s min_val_56 = AE_CVTQ48A32S(INT16_MIN);
   ae_q56s max_val_56 = AE_CVTQ48A32S(INT16_MAX);
@@ -155,7 +155,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
 
 // This Op (QUANTIZE) quantizes the input and produces quantized output.
 // AffineQuantize takes scale and zero point and quantizes the float value to
-// quantized output, in int8 or uint8 format.
+// quantized output, in int8_t or uint8_t format.
 TfLiteRegistration Register_QUANTIZE() {
   return {/*init=*/quantize::Init,
           /*free=*/nullptr,
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/softmax.cc
index 90fc2cd9903..3e5ef198928 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/softmax.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/softmax.cc
@@ -72,7 +72,8 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
       if (output->type == kTfLiteInt16) {
         TF_LITE_ENSURE_EQ(context, output->params.zero_point,
                           std::numeric_limits<int16_t>::min());
-        // NOTE: Current int16 softmax output does not require symmetric scaling
+        // NOTE: Current int16_t softmax output does not require symmetric
+        // scaling
         // - so no need to verify scale here.
       } else {
         TF_LITE_ENSURE_EQ(context, output->params.zero_point,
@@ -124,7 +125,7 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
   const TfLiteStatus scratch_status = context->RequestScratchBufferInArena(
       context, scratch_size, &(op_data->scratch_tensor_index));
   TF_LITE_ENSURE_OK(context, scratch_status);
-  // Allocate an array to precompute exponents over all int8 inputs, applying
+  // Allocate an array to precompute exponents over all int8_t inputs, applying
   // the scale and beta before calculating exp. It is mandatory to apply beta
   // and scale here, since each softmax op may have different beta and scale
   // values. Beta and scale will remain constant for a given softmax op.
@@ -145,7 +146,7 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
     const RuntimeShape& input_shape = GetTensorShape(input);
     const int8_t* input_data = GetTensorData<int8_t>(input);
     const RuntimeShape& output_shape = GetTensorShape(output);
-    int16* output_data = GetTensorData<int16>(output);
+    int16_t* output_data = GetTensorData<int16_t>(output);
     const int trailing_dim = input_shape.DimensionsCount() - 1;
     const int outer_size =
         MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/svdf.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/svdf.cc
index 537b48db8eb..05256f33306 100644
--- a/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/svdf.cc
+++ b/tensorflow/lite/micro/kernels/xtensa_hifimini_staging/svdf.cc
@@ -55,8 +55,8 @@ namespace svdf {
 namespace {
 
 struct OpData {
-  int32 effective_scale_1_a;
-  int32 effective_scale_2_a;
+  int32_t effective_scale_1_a;
+  int32_t effective_scale_2_a;
   // b versions of each scale are kept at int since the numbers are just the
   // shift value - typically between [-32, 32].
   int effective_scale_1_b;
@@ -239,7 +239,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
   TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
 
   // Validate Tensor Output:
-  // [0] = float/int8, {2, batch_size, num_units}
+  // [0] = float/int8_t, {2, batch_size, num_units}
   TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
   TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);
diff --git a/tensorflow/lite/micro/micro_utils.h b/tensorflow/lite/micro/micro_utils.h
index 1fc63d130e4..24aebad8a78 100644
--- a/tensorflow/lite/micro/micro_utils.h
+++ b/tensorflow/lite/micro/micro_utils.h
@@ -48,10 +48,10 @@ int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale);
 //
 // There are several key flavors of quantization in TfLite:
 //        asymmetric symmetric  per channel
-// int8  |     X    |    X    |     X      |
-// uint8 |     X    |    X    |            |
-// int16 |     X    |         |            |
-// int32 |          |    X    |     X      |
+// int8_t  |     X    |    X    |     X      |
+// uint8_t |     X    |    X    |            |
+// int16_t |     X    |         |            |
+// int32_t |          |    X    |     X      |
 //
 // The per-op quantization spec can be found here:
 // https://www.tensorflow.org/lite/performance/quantization_spec
diff --git a/tensorflow/lite/micro/test_helpers.cc b/tensorflow/lite/micro/test_helpers.cc
index 7278fea48b3..2888a846e94 100644
--- a/tensorflow/lite/micro/test_helpers.cc
+++ b/tensorflow/lite/micro/test_helpers.cc
@@ -584,7 +584,7 @@ TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context,
                                        TfLiteNode* node) {
   OpData* data = reinterpret_cast<OpData*>(node->user_data);
 
-  // Make sure that the input is in uint8 with at least 1 data entry.
+  // Make sure that the input is in uint8_t with at least 1 data entry.
   const TfLiteTensor* input = tflite::GetInput(context, node, kInputTensor);
   if (input->type != kTfLiteUInt8) return kTfLiteError;
   if (NumElements(input->dims) == 0) return kTfLiteError;
@@ -925,8 +925,8 @@ TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
   TfLiteTensor result = CreateTensor(dims, is_variable);
   result.type = kTfLiteInt32;
   result.data.i32 = const_cast<int32_t*>(quantized);
-  // Quantized int32 tensors always have a zero point of 0, since the range of
-  // int32 values is large, and because zero point costs extra cycles during
+  // Quantized int32_t tensors always have a zero point of 0, since the range of
+  // int32_t values is large, and because zero point costs extra cycles during
   // processing.
   result.params = {bias_scale, 0};
   result.quantization = {kTfLiteAffineQuantization, nullptr};
@@ -934,7 +934,7 @@ TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
   return result;
 }
 
-// Quantizes int32 bias tensor with per-channel weights determined by input
+// Quantizes int32_t bias tensor with per-channel weights determined by input
 // scale multiplied by weight scale for each channel.
 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
     const float* input, int32_t* quantized, TfLiteIntArray* dims,
diff --git a/tensorflow/lite/micro/test_helpers.h b/tensorflow/lite/micro/test_helpers.h
index 8941e394587..a7897145d26 100644
--- a/tensorflow/lite/micro/test_helpers.h
+++ b/tensorflow/lite/micro/test_helpers.h
@@ -164,7 +164,7 @@ TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
                                        float weights_scale,
                                        bool is_variable = false);
 
-// Quantizes int32 bias tensor with per-channel weights determined by input
+// Quantizes int32_t bias tensor with per-channel weights determined by input
 // scale multiplied by weight scale for each channel.
 TfLiteTensor CreatePerChannelQuantizedBiasTensor(
     const float* input, int32_t* quantized, TfLiteIntArray* dims,
diff --git a/tensorflow/lite/micro/testing/test_utils.cc b/tensorflow/lite/micro/testing/test_utils.cc
index ec5396e15e8..fe89e904769 100644
--- a/tensorflow/lite/micro/testing/test_utils.cc
+++ b/tensorflow/lite/micro/testing/test_utils.cc
@@ -248,8 +248,8 @@ TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
   result.type = kTfLiteInt32;
   result.data.i32 = const_cast<int32_t*>(data);
   result.dims = dims;
-  // Quantized int32 tensors always have a zero point of 0, since the range of
-  // int32 values is large, and because zero point costs extra cycles during
+  // Quantized int32_t tensors always have a zero point of 0, since the range of
+  // int32_t values is large, and because zero point costs extra cycles during
   // processing.
   result.params = {scale, 0};
   result.allocation_type = kTfLiteMemNone;