diff --git a/tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h b/tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h index a8f41d5a108..a8903c1d275 100644 --- a/tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h +++ b/tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h @@ -769,7 +769,7 @@ void FloatDepthwiseConvAccumRow(int stride, int dilation_factor, int out_x_buffer_start, int out_x_buffer_end, int output_depth, float* acc_buffer) { ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__); - // Sanity check parameters. This is important in particular to ensure + // Consistency check parameters. This is important in particular to ensure // that we keep the number of template instantiations minimal, so we don't // increase binary size unnecessarily. static_assert(kFixedDepthMultiplier || !kFixedInputDepth, ""); diff --git a/tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h b/tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h index 3f93a491862..8ec4af7b018 100644 --- a/tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h +++ b/tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h @@ -1478,7 +1478,7 @@ void QuantizedDepthwiseConvAccumRow(int stride, int dilation_factor, int out_x_buffer_end, int output_depth, int32* acc_buffer) { ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__); - // Sanity check parameters. This is important in particular to ensure + // Consistency check parameters. This is important in particular to ensure // that we keep the number of template instantiations minimal, so we don't // increase binary size unnecessarily. static_assert(kFixedDepthMultiplier || !kFixedInputDepth, ""); diff --git a/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h b/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h index c84e7dc04d9..1b4b88fc622 100644 --- a/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h +++ b/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h @@ -1430,7 +1430,7 @@ void QuantizedDepthwiseConvAccumRow(int stride, int dilation_factor, int out_x_buffer_end, int output_depth, int32* acc_buffer) { ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__); - // Sanity check parameters. This is important in particular to ensure + // Consistency check parameters. This is important in particular to ensure // that we keep the number of template instantiations minimal, so we don't // increase binary size unnecessarily. static_assert(kFixedDepthMultiplier || !kFixedInputDepth, ""); diff --git a/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h b/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h index 9131c7dbe57..df6b787338d 100644 --- a/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +++ b/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h @@ -42,7 +42,7 @@ inline void ConvPerChannel( const int32 output_activation_min = params.quantized_activation_min; const int32 output_activation_max = params.quantized_activation_max; - // Sanity check. + // Consistency check. TFLITE_DCHECK_LE(output_activation_min, output_activation_max); TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); @@ -143,7 +143,7 @@ inline void ConvPerChannel( const int32 output_activation_min = params.quantized_activation_min; const int32 output_activation_max = params.quantized_activation_max; - // Sanity check. + // Consistency check. TFLITE_DCHECK_LE(output_activation_min, output_activation_max); TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); diff --git a/tensorflow/lite/kernels/internal/resize_nearest_neighbor_test.cc b/tensorflow/lite/kernels/internal/resize_nearest_neighbor_test.cc index 4659d3a80e4..f8a455e7451 100644 --- a/tensorflow/lite/kernels/internal/resize_nearest_neighbor_test.cc +++ b/tensorflow/lite/kernels/internal/resize_nearest_neighbor_test.cc @@ -42,7 +42,7 @@ void TestReferenceResizeNearestNeighbor( ASSERT_EQ(expected_output_data, output_data); } -// Sanity test values are from +// Consistency test values are from // third_party/tensorflow/core/kernels/resize_nearest_neighbor_op_test.cc. TEST(ResizeNearestNeighborReference, Test2x2To1x1) { diff --git a/tensorflow/lite/tools/optimize/modify_model_interface.cc b/tensorflow/lite/tools/optimize/modify_model_interface.cc index 9451483b79d..91c9b7e8b74 100644 --- a/tensorflow/lite/tools/optimize/modify_model_interface.cc +++ b/tensorflow/lite/tools/optimize/modify_model_interface.cc @@ -211,7 +211,7 @@ TfLiteStatus SetOutputTypeToUINT8(ModelT* model, TfLiteStatus RemoveInputTensor(ModelT* model, const std::vector& inputs, int32 original_number_tensors) { - // Sanity check to make sure that erase start from the end. + // Consistency check to make sure that erase start from the end. int last_op_index = std::numeric_limits::max(); int last_tensor_index = std::numeric_limits::max(); for (auto tot : inputs) { @@ -237,7 +237,7 @@ TfLiteStatus RemoveInputTensor(ModelT* model, TfLiteStatus RemoveOutputTensor(ModelT* model, const std::vector& outputs, int32 original_number_tensors) { - // Sanity check to make sure that erase start from the end. + // Consistency check to make sure that erase start from the end. int last_op_index = std::numeric_limits::max(); int last_tensor_index = std::numeric_limits::max(); for (auto tot : outputs) { @@ -338,7 +338,7 @@ TfLiteStatus ModifyModelInterface(const string& input_file, const string& output_file, const TensorType& input_type, const TensorType& output_type) { - // Sanity Check + // Consistency Check if (input_type != tflite::TensorType_INT8 && input_type != tflite::TensorType_UINT8) { return kTfLiteError;