From c1348607d126e9cb3ea8e226f640758152743f6a Mon Sep 17 00:00:00 2001 From: Thibaut Goetghebuer-Planchon Date: Fri, 28 Aug 2020 09:51:01 +0100 Subject: [PATCH] Fix uint8 MUL operator with broadcast The MulSimpleBroadcast function must use MultiplyByQuantizedMultiplier instead of MultiplyByQuantizedMultiplierSmallerThanOneExp as the MUL operator doesn't guarantee that the quantized multiplier is smaller than 1. --- .../internal/optimized/optimized_ops.h | 6 ++-- tensorflow/lite/kernels/mul_test.cc | 31 ++++++++++++------- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h index 5290a08a3cd..b6a6307b81d 100644 --- a/tensorflow/lite/kernels/internal/optimized/optimized_ops.h +++ b/tensorflow/lite/kernels/internal/optimized/optimized_ops.h @@ -2536,9 +2536,9 @@ inline void MulSimpleBroadcast(int size, const ArithmeticParams& params, const int32 input2_val = params.input2_offset + input2_data[i]; const int32 unclamped_result = params.output_offset + - MultiplyByQuantizedMultiplierSmallerThanOneExp(input1_val * input2_val, - params.output_multiplier, - params.output_shift); + MultiplyByQuantizedMultiplier(input1_val * input2_val, + params.output_multiplier, + params.output_shift); const int32 clamped_output = std::min(params.quantized_activation_max, std::max(params.quantized_activation_min, unclamped_result)); diff --git a/tensorflow/lite/kernels/mul_test.cc b/tensorflow/lite/kernels/mul_test.cc index 9499fd40bea..cd6abe04624 100644 --- a/tensorflow/lite/kernels/mul_test.cc +++ b/tensorflow/lite/kernels/mul_test.cc @@ -395,19 +395,26 @@ void WithBroadcast() { float kQuantizedTolerance = GetTolerance(-3.0, 3.0); std::vector> test_shapes = { {6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}}; + // Test with a smaller than 1 and greater than 1 quantization multiplier + std::vector> test_input_range = {{-3.0, 3.0}, + {-6.0, 6.0}}; for (int i = 0; i < test_shapes.size(); ++i) { - QuantizedMulOpModel m({tensor_type, test_shapes[i], -3.0, 3.0}, - {tensor_type, {}, -3.0, 3.0}, // always a scalar - {tensor_type, {}, -3.0, 3.0}, - ActivationFunctionType_NONE); - m.QuantizeAndPopulate(m.input1(), - {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}); - m.QuantizeAndPopulate(m.input2(), {0.1}); - m.Invoke(); - EXPECT_THAT(m.GetDequantizedOutput(), - ElementsAreArray(ArrayFloatNear( - {-0.2, 0.02, 0.07, 0.08, 0.11, 0.2}, kQuantizedTolerance))) - << "With shape number " << i; + for (int j = 0; j < test_input_range.size(); ++j) { + const std::pair& input_range = test_input_range[j]; + QuantizedMulOpModel m( + {tensor_type, test_shapes[i], input_range.first, input_range.second}, + {tensor_type, {}, input_range.first, input_range.second}, + {tensor_type, {}, -0.2, 0.2}, ActivationFunctionType_NONE); + m.QuantizeAndPopulate(m.input1(), + {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}); + m.QuantizeAndPopulate(m.input2(), {0.1}); + m.Invoke(); + EXPECT_THAT( + m.GetDequantizedOutput(), + ElementsAreArray(ArrayFloatNear({-0.2, 0.02, 0.07, 0.08, 0.11, 0.2}, + kQuantizedTolerance))) + << "With shape number " << i << " and range number " << j; + } } }