From d1a0520270ee43b25204aa290512a20501cc3b85 Mon Sep 17 00:00:00 2001 From: ANSHUMAN TRIPATHY Date: Mon, 18 Mar 2019 16:49:56 +0530 Subject: [PATCH] Lite: Kernel Util bug fix --- tensorflow/lite/kernels/kernel_util.cc | 9 +- tensorflow/lite/kernels/kernel_util_test.cc | 179 ++++++++++++++++++++ 2 files changed, 185 insertions(+), 3 deletions(-) diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc index 7ff61ac51ad..7f5ab194af3 100644 --- a/tensorflow/lite/kernels/kernel_util.cc +++ b/tensorflow/lite/kernels/kernel_util.cc @@ -110,11 +110,14 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, TfLiteTensor* output, double* multiplier) { const double input_product_scale = input->params.scale * filter->params.scale; - const double bias_scale = bias->params.scale; // TODO(ahentz): The following conditions must be guaranteed by the training // pipeline. - TF_LITE_ENSURE(context, std::abs(input_product_scale - bias_scale) <= - 1e-6 * std::min(input_product_scale, bias_scale)); + if (bias) { + const double bias_scale = bias->params.scale; + TF_LITE_ENSURE(context, + std::abs(input_product_scale - bias_scale) <= + 1e-6 * std::min(input_product_scale, bias_scale)); + } return GetQuantizedConvolutionMultipler(context, input, filter, output, multiplier); } diff --git a/tensorflow/lite/kernels/kernel_util_test.cc b/tensorflow/lite/kernels/kernel_util_test.cc index a31befbcd16..fdcd30255d4 100644 --- a/tensorflow/lite/kernels/kernel_util_test.cc +++ b/tensorflow/lite/kernels/kernel_util_test.cc @@ -360,6 +360,185 @@ TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) { TfLiteTensorFree(&output); } +TEST_F(KernelUtilTest, CheckAndPopulateUint8) { + // Create input. + TfLiteTensor input; + input.type = kTfLiteUInt8; + input.allocation_type = kTfLiteArenaRw; + input.dims = TfLiteIntArrayCreate(1); + input.dims->data[0] = 2; + TfLiteQuantizationParams input_quant = {1, 5}; + input.params = input_quant; + input.quantization.type = kTfLiteAffineQuantization; + auto* input_params = reinterpret_cast( + malloc(sizeof(TfLiteAffineQuantization))); + input_params->scale = TfLiteFloatArrayCreate(1); + input_params->scale->data[0] = 1; + input_params->zero_point = TfLiteIntArrayCreate(1); + input_params->zero_point->data[0] = 5; + input.quantization.params = reinterpret_cast(input_params); + + // Create filter. + TfLiteTensor filter; + filter.type = kTfLiteUInt8; + filter.allocation_type = kTfLiteArenaRw; + filter.dims = TfLiteIntArrayCreate(4); + filter.dims->data[0] = 3; + filter.dims->data[1] = 4; + filter.dims->data[2] = 5; + filter.dims->data[3] = 6; + TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0}; + filter.params = filter_quant; + filter.quantization.type = kTfLiteAffineQuantization; + auto* filter_params = reinterpret_cast( + malloc(sizeof(TfLiteAffineQuantization))); + filter_params->scale = TfLiteFloatArrayCreate(1); + int32_t two_pow_neg_31 = 0x30000000; // 2^-31 so shift = -30. + filter_params->scale->data[0] = *reinterpret_cast(&two_pow_neg_31); + filter_params->zero_point = TfLiteIntArrayCreate(1); + filter_params->zero_point->data[0] = 0; + filter_params->quantized_dimension = 0; + filter.quantization.params = reinterpret_cast(filter_params); + + // Create bias. + TfLiteTensor bias; + bias.type = kTfLiteInt32; + bias.allocation_type = kTfLiteArenaRw; + bias.dims = TfLiteIntArrayCreate(4); + TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9}; + bias.params = bias_quant; + bias.quantization.type = kTfLiteAffineQuantization; + auto* bias_params = reinterpret_cast( + malloc(sizeof(TfLiteAffineQuantization))); + bias_params->scale = TfLiteFloatArrayCreate(1); + bias_params->scale->data[0] = 4.6566129e-10; // 2^-31 + bias_params->zero_point = TfLiteIntArrayCreate(1); + bias_params->zero_point->data[0] = 11; + bias.quantization.params = reinterpret_cast(bias_params); + + // Create output. + TfLiteTensor output; + output.type = kTfLiteUInt8; + output.allocation_type = kTfLiteArenaRw; + output.dims = nullptr; + TfLiteQuantizationParams output_quant = {1, -128}; + output.params = output_quant; + output.quantization.type = kTfLiteAffineQuantization; + auto* output_params = reinterpret_cast( + malloc(sizeof(TfLiteAffineQuantization))); + output_params->scale = TfLiteFloatArrayCreate(1); + output_params->scale->data[0] = 1; + output_params->zero_point = TfLiteIntArrayCreate(1); + output_params->zero_point->data[0] = -128; + output.quantization.params = reinterpret_cast(output_params); + + // Create call parameters. + TfLiteContext context; + int32_t multiplier; + int shift; + int32_t output_activation_min; + int32_t output_activation_max; + std::vector per_channel_multiplier(1); + std::vector per_channel_shift(1); + + // Call and verify results for per channel case. + EXPECT_EQ( + kTfLiteOk, + PopulateConvolutionQuantizationParams( + &context, &input, &filter, &bias, &output, kTfLiteActRelu, + &multiplier, &shift, &output_activation_min, &output_activation_max, + per_channel_multiplier.data(), per_channel_shift.data())); + EXPECT_THAT(per_channel_multiplier, ::testing::ElementsAre(1073741824)); + EXPECT_THAT(per_channel_shift, ::testing::ElementsAre(-30)); + + // Release. + TfLiteTensorFree(&input); + TfLiteTensorFree(&filter); + TfLiteTensorFree(&bias); + TfLiteTensorFree(&output); +} + +TEST_F(KernelUtilTest, CheckAndPopulateWithoutBias) { + // Create input. + TfLiteTensor input; + input.type = kTfLiteUInt8; + input.allocation_type = kTfLiteArenaRw; + input.dims = TfLiteIntArrayCreate(1); + input.dims->data[0] = 2; + TfLiteQuantizationParams input_quant = {1, 5}; + input.params = input_quant; + input.quantization.type = kTfLiteAffineQuantization; + auto* input_params = reinterpret_cast( + malloc(sizeof(TfLiteAffineQuantization))); + input_params->scale = TfLiteFloatArrayCreate(1); + input_params->scale->data[0] = 1; + input_params->zero_point = TfLiteIntArrayCreate(1); + input_params->zero_point->data[0] = 5; + input.quantization.params = reinterpret_cast(input_params); + + // Create filter. + TfLiteTensor filter; + filter.type = kTfLiteUInt8; + filter.allocation_type = kTfLiteArenaRw; + filter.dims = TfLiteIntArrayCreate(4); + filter.dims->data[0] = 3; + filter.dims->data[1] = 4; + filter.dims->data[2] = 5; + filter.dims->data[3] = 6; + TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0}; + filter.params = filter_quant; + filter.quantization.type = kTfLiteAffineQuantization; + auto* filter_params = reinterpret_cast( + malloc(sizeof(TfLiteAffineQuantization))); + filter_params->scale = TfLiteFloatArrayCreate(1); + int32_t two_pow_neg_31 = 0x30000000; // 2^-31 so shift = -30. + filter_params->scale->data[0] = *reinterpret_cast(&two_pow_neg_31); + filter_params->zero_point = TfLiteIntArrayCreate(1); + filter_params->zero_point->data[0] = 0; + filter_params->quantized_dimension = 0; + filter.quantization.params = reinterpret_cast(filter_params); + + // Create output. + TfLiteTensor output; + output.type = kTfLiteUInt8; + output.allocation_type = kTfLiteArenaRw; + output.dims = nullptr; + TfLiteQuantizationParams output_quant = {1, -128}; + output.params = output_quant; + output.quantization.type = kTfLiteAffineQuantization; + auto* output_params = reinterpret_cast( + malloc(sizeof(TfLiteAffineQuantization))); + output_params->scale = TfLiteFloatArrayCreate(1); + output_params->scale->data[0] = 1; + output_params->zero_point = TfLiteIntArrayCreate(1); + output_params->zero_point->data[0] = -128; + output.quantization.params = reinterpret_cast(output_params); + + // Create call parameters. + TfLiteContext context; + int32_t multiplier; + int shift; + int32_t output_activation_min; + int32_t output_activation_max; + std::vector per_channel_multiplier(1); + std::vector per_channel_shift(1); + + // Call and verify results for per channel case. + EXPECT_EQ( + kTfLiteOk, + PopulateConvolutionQuantizationParams( + &context, &input, &filter, nullptr, &output, kTfLiteActRelu, + &multiplier, &shift, &output_activation_min, &output_activation_max, + per_channel_multiplier.data(), per_channel_shift.data())); + EXPECT_THAT(per_channel_multiplier, ::testing::ElementsAre(1073741824)); + EXPECT_THAT(per_channel_shift, ::testing::ElementsAre(-30)); + + // Release. + TfLiteTensorFree(&input); + TfLiteTensorFree(&filter); + TfLiteTensorFree(&output); +} + } // namespace } // namespace tflite