Relax the symmetric quantization constrain on int16 input for Quantize. There are other valid use cases that have non-zero zero point.
PiperOrigin-RevId: 358605601 Change-Id: I3102d1419cfc237fd465df264dd336e8041f2aad
This commit is contained in:
parent
a7e6b1b379
commit
ef27805770
@ -136,10 +136,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
&data->output_shift);
|
||||
}
|
||||
|
||||
if (input->type == kTfLiteInt16) {
|
||||
if (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) {
|
||||
TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0);
|
||||
}
|
||||
if (output->type == kTfLiteInt16) {
|
||||
TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
|
||||
}
|
||||
|
||||
|
@ -423,6 +423,19 @@ TEST(QuantizeOpTest, Int16Int8SameScale) {
|
||||
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19}));
|
||||
}
|
||||
|
||||
// Input scale 0.500000, output scale 0.500000, input zeropoint -1, output
|
||||
// zeropoint -1.
|
||||
TEST(QuantizeOpTest, Int16ZeroPointInt8) {
|
||||
QuantizeOpModel m({TensorType_INT16, {1, 1, 2, 5}, 0, 0, 0.5, -1},
|
||||
{TensorType_INT8, {1, 1, 2, 5}, 0, 0, 0.5, -1});
|
||||
|
||||
// Input will quantized to {2,4,6,8,10,12,14,16,18,20}.
|
||||
m.SetInputAndQuantize<int16_t>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int8_t>(),
|
||||
ElementsAreArray({1, 3, 5, 7, 9, 11, 13, 15, 17, 19}));
|
||||
}
|
||||
|
||||
// Input scale 0.500000, output scale 1.000000, input zeropoint 0, output
|
||||
// zeropoint -1
|
||||
TEST(QuantizeOpTest, Int16Int8LargerScale) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user