Fix uint8 MUL operator with broadcast

The MulSimpleBroadcast function must use MultiplyByQuantizedMultiplier instead of MultiplyByQuantizedMultiplierSmallerThanOneExp as the MUL operator doesn't guarantee that the quantized multiplier is smaller than 1.
This commit is contained in:
Thibaut Goetghebuer-Planchon 2020-08-28 09:51:01 +01:00
parent cd035167ca
commit c1348607d1
2 changed files with 22 additions and 15 deletions

View File

@ -2536,9 +2536,9 @@ inline void MulSimpleBroadcast(int size, const ArithmeticParams& params,
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplierSmallerThanOneExp(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));

View File

@ -395,19 +395,26 @@ void WithBroadcast() {
float kQuantizedTolerance = GetTolerance(-3.0, 3.0);
std::vector<std::vector<int>> test_shapes = {
{6}, {2, 3}, {2, 1, 3}, {1, 3, 1, 2}};
// Test with a smaller than 1 and greater than 1 quantization multiplier
std::vector<std::pair<float, float>> test_input_range = {{-3.0, 3.0},
{-6.0, 6.0}};
for (int i = 0; i < test_shapes.size(); ++i) {
QuantizedMulOpModel m({tensor_type, test_shapes[i], -3.0, 3.0},
{tensor_type, {}, -3.0, 3.0}, // always a scalar
{tensor_type, {}, -3.0, 3.0},
ActivationFunctionType_NONE);
m.QuantizeAndPopulate<integer_dtype>(m.input1(),
{-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
m.QuantizeAndPopulate<integer_dtype>(m.input2(), {0.1});
m.Invoke();
EXPECT_THAT(m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(
{-0.2, 0.02, 0.07, 0.08, 0.11, 0.2}, kQuantizedTolerance)))
<< "With shape number " << i;
for (int j = 0; j < test_input_range.size(); ++j) {
const std::pair<float, float>& input_range = test_input_range[j];
QuantizedMulOpModel m(
{tensor_type, test_shapes[i], input_range.first, input_range.second},
{tensor_type, {}, input_range.first, input_range.second},
{tensor_type, {}, -0.2, 0.2}, ActivationFunctionType_NONE);
m.QuantizeAndPopulate<integer_dtype>(m.input1(),
{-2.0, 0.2, 0.7, 0.8, 1.1, 2.0});
m.QuantizeAndPopulate<integer_dtype>(m.input2(), {0.1});
m.Invoke();
EXPECT_THAT(
m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear({-0.2, 0.02, 0.07, 0.08, 0.11, 0.2},
kQuantizedTolerance)))
<< "With shape number " << i << " and range number " << j;
}
}
}