From a5622fee575a238ecae9b70ff079d2f7a2f903a6 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 27 May 2020 11:18:31 -0700 Subject: [PATCH] Fix bugs in quantized PRELU operations. PiperOrigin-RevId: 313426576 Change-Id: Ifb53ef0add80b5793e428fbfacbbea779bc9ae63 --- tensorflow/lite/micro/kernels/prelu.cc | 5 +++-- tensorflow/lite/micro/kernels/prelu_test.cc | 22 ++++++++++----------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/tensorflow/lite/micro/kernels/prelu.cc b/tensorflow/lite/micro/kernels/prelu.cc index 801181abba4..921aa208ea2 100644 --- a/tensorflow/lite/micro/kernels/prelu.cc +++ b/tensorflow/lite/micro/kernels/prelu.cc @@ -68,8 +68,9 @@ TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { int output_shift_1 = 0; int32_t output_multiplier_2 = 0; int output_shift_2 = 0; - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt16) { - double real_multiplier_1 = static_cast(input->params.scale) * + if (output->type == kTfLiteInt8 || output->type == kTfLiteUInt8 || + output->type == kTfLiteInt16) { + double real_multiplier_1 = static_cast(input->params.scale) / static_cast(output->params.scale); double real_multiplier_2 = static_cast(input->params.scale) * static_cast(alpha->params.scale) / diff --git a/tensorflow/lite/micro/kernels/prelu_test.cc b/tensorflow/lite/micro/kernels/prelu_test.cc index 66c0a609e8a..4199ae69689 100644 --- a/tensorflow/lite/micro/kernels/prelu_test.cc +++ b/tensorflow/lite/micro/kernels/prelu_test.cc @@ -156,14 +156,14 @@ TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TEST(FloatPreluActivationsOpTest) { const int output_dims_count = 12; float output_data[output_dims_count]; - tflite::testing::TestPreluFloat({1, 2, 2, 3}, // input shape + tflite::testing::TestPreluFloat({3, 2, 2, 3}, // input shape { 0.0f, 0.0f, 0.0f, // Row 1, Column 1 1.0f, 1.0f, 1.0f, // Row 1, Column 2 -1.0f, -1.0f, -1.0f, // Row 2, Column 1 -2.0f, -2.0f, -2.0f, // Row 1, Column 2 }, - {1, 1, 1, 3}, // alpha shape + {3, 1, 1, 3}, // alpha shape {0.0f, 1.0f, 2.0f}, // alpha values { 0.0f, 0.0f, 0.0f, // Row 1, Column 1 @@ -171,26 +171,26 @@ TF_LITE_MICRO_TEST(FloatPreluActivationsOpTest) { 0.0f, -1.0f, -2.0f, // Row 2, Column 1 0.0f, -2.0f, -4.0f, // Row 1, Column 2 }, - {1, 2, 2, 3}, // output shape + {3, 2, 2, 3}, // output shape output_data); } TF_LITE_MICRO_TEST(QuantizedUint8PreluActivationsOpTest) { using tflite::testing::F2Q; - const float kMin = -1; - const float kMax = 127.f / 128.f; + const float kMin = -4; + const float kMax = 127.f / 32.f; const float kAlphaMin = -0.5f; const float kAlphaMax = 0.5f; const int output_dims_count = 12; uint8_t output_data[output_dims_count]; tflite::testing::TestPreluQuantized( - {1, 2, 2, 3}, // input shape + {3, 2, 2, 3}, // input shape {F2Q(0.0f, kMin, kMax), F2Q(0.0f, kMin, kMax), F2Q(0.0f, kMin, kMax), F2Q(0.5f, kMin, kMax), F2Q(0.5f, kMin, kMax), F2Q(0.5f, kMin, kMax), F2Q(-1.0f, kMin, kMax), F2Q(-1.0f, kMin, kMax), F2Q(-1.0f, kMin, kMax), F2Q(-0.25f, kMin, kMax), F2Q(-0.25f, kMin, kMax), F2Q(-0.25f, kMin, kMax)}, - kMin, kMax, {1, 1, 1, 3}, // alpha shape + kMin, kMax, {3, 1, 1, 3}, // alpha shape {F2Q(0.0f, kMin, kMax), F2Q(0.5f, kMin, kMax), F2Q(-0.5f, kMin, kMax)}, kMin, kMax, {F2Q(0.0f, kMin, kMax), F2Q(0.0f, kMin, kMax), F2Q(0.0f, kMin, kMax), @@ -198,7 +198,7 @@ TF_LITE_MICRO_TEST(QuantizedUint8PreluActivationsOpTest) { F2Q(0.0f, kMin, kMax), F2Q(-0.5f, kMin, kMax), F2Q(0.5f, kMin, kMax), F2Q(0.0f, kMin, kMax), F2Q(-0.125f, kMin, kMax), F2Q(0.125f, kMin, kMax)}, - {1, 2, 2, 3}, // output shape + {3, 2, 2, 3}, // output shape kMin, kMax, output_data); } @@ -211,13 +211,13 @@ TF_LITE_MICRO_TEST(QuantizedInt8PreluActivationsOpTest) { const int output_dims_count = 12; int8_t output_data[output_dims_count]; tflite::testing::TestPreluQuantized( - {1, 2, 2, 3}, // input shape + {3, 2, 2, 3}, // input shape {F2QS(0.0f, kMin, kMax), F2QS(0.0f, kMin, kMax), F2QS(0.0f, kMin, kMax), F2QS(0.5f, kMin, kMax), F2QS(0.5f, kMin, kMax), F2QS(0.5f, kMin, kMax), F2QS(-1.0f, kMin, kMax), F2QS(-1.0f, kMin, kMax), F2QS(-1.0f, kMin, kMax), F2QS(-0.25f, kMin, kMax), F2QS(-0.25f, kMin, kMax), F2QS(-0.25f, kMin, kMax)}, - kMin, kMax, {1, 1, 1, 3}, // alpha shape + kMin, kMax, {3, 1, 1, 3}, // alpha shape {F2QS(0.0f, kMin, kMax), F2QS(0.5f, kMin, kMax), F2QS(-0.5f, kMin, kMax)}, kMin, kMax, {F2QS(0.0f, kMin, kMax), F2QS(0.0f, kMin, kMax), F2QS(0.0f, kMin, kMax), @@ -225,7 +225,7 @@ TF_LITE_MICRO_TEST(QuantizedInt8PreluActivationsOpTest) { F2QS(0.0f, kMin, kMax), F2QS(-0.5f, kMin, kMax), F2QS(0.5f, kMin, kMax), F2QS(0.0f, kMin, kMax), F2QS(-0.125f, kMin, kMax), F2QS(0.125f, kMin, kMax)}, - {1, 2, 2, 3}, // output shape + {3, 2, 2, 3}, // output shape kMin, kMax, output_data); } TF_LITE_MICRO_TESTS_END