Rename "sanity check" to "consistency check".

PiperOrigin-RevId: 318357382
Change-Id: I57fd8c9fd45efa5255e853a20b482111dfa0c274
This commit is contained in:
Jian Li 2020-06-25 15:12:03 -07:00 committed by TensorFlower Gardener
parent aa1499f356
commit 0e11504518
6 changed files with 9 additions and 9 deletions

View File

@ -769,7 +769,7 @@ void FloatDepthwiseConvAccumRow(int stride, int dilation_factor,
int out_x_buffer_start, int out_x_buffer_end,
int output_depth, float* acc_buffer) {
ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__);
// Sanity check parameters. This is important in particular to ensure
// Consistency check parameters. This is important in particular to ensure
// that we keep the number of template instantiations minimal, so we don't
// increase binary size unnecessarily.
static_assert(kFixedDepthMultiplier || !kFixedInputDepth, "");

View File

@ -1478,7 +1478,7 @@ void QuantizedDepthwiseConvAccumRow(int stride, int dilation_factor,
int out_x_buffer_end, int output_depth,
int32* acc_buffer) {
ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__);
// Sanity check parameters. This is important in particular to ensure
// Consistency check parameters. This is important in particular to ensure
// that we keep the number of template instantiations minimal, so we don't
// increase binary size unnecessarily.
static_assert(kFixedDepthMultiplier || !kFixedInputDepth, "");

View File

@ -1430,7 +1430,7 @@ void QuantizedDepthwiseConvAccumRow(int stride, int dilation_factor,
int out_x_buffer_end, int output_depth,
int32* acc_buffer) {
ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__);
// Sanity check parameters. This is important in particular to ensure
// Consistency check parameters. This is important in particular to ensure
// that we keep the number of template instantiations minimal, so we don't
// increase binary size unnecessarily.
static_assert(kFixedDepthMultiplier || !kFixedInputDepth, "");

View File

@ -42,7 +42,7 @@ inline void ConvPerChannel(
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
// Sanity check.
// Consistency check.
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
@ -143,7 +143,7 @@ inline void ConvPerChannel(
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
// Sanity check.
// Consistency check.
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);

View File

@ -42,7 +42,7 @@ void TestReferenceResizeNearestNeighbor(
ASSERT_EQ(expected_output_data, output_data);
}
// Sanity test values are from
// Consistency test values are from
// third_party/tensorflow/core/kernels/resize_nearest_neighbor_op_test.cc.
TEST(ResizeNearestNeighborReference, Test2x2To1x1) {

View File

@ -211,7 +211,7 @@ TfLiteStatus SetOutputTypeToUINT8(ModelT* model,
TfLiteStatus RemoveInputTensor(ModelT* model,
const std::vector<TensorOpTensor>& inputs,
int32 original_number_tensors) {
// Sanity check to make sure that erase start from the end.
// Consistency check to make sure that erase start from the end.
int last_op_index = std::numeric_limits<int32_t>::max();
int last_tensor_index = std::numeric_limits<int32_t>::max();
for (auto tot : inputs) {
@ -237,7 +237,7 @@ TfLiteStatus RemoveInputTensor(ModelT* model,
TfLiteStatus RemoveOutputTensor(ModelT* model,
const std::vector<TensorOpTensor>& outputs,
int32 original_number_tensors) {
// Sanity check to make sure that erase start from the end.
// Consistency check to make sure that erase start from the end.
int last_op_index = std::numeric_limits<int32_t>::max();
int last_tensor_index = std::numeric_limits<int32_t>::max();
for (auto tot : outputs) {
@ -338,7 +338,7 @@ TfLiteStatus ModifyModelInterface(const string& input_file,
const string& output_file,
const TensorType& input_type,
const TensorType& output_type) {
// Sanity Check
// Consistency Check
if (input_type != tflite::TensorType_INT8 &&
input_type != tflite::TensorType_UINT8) {
return kTfLiteError;