Fix quantize kernel to prepare quantization for int16->int32 requant.

Previously the kernel would run but quantized multiplier and shift were not generated properly during Prepare.

PiperOrigin-RevId: 341496881
Change-Id: Id9d2534b09c91b8353e4364bfdb1af5ef3a81f82
This commit is contained in:
Nat Jeffries 2020-11-09 15:23:30 -08:00 committed by TensorFlower Gardener
parent b7687e53d1
commit e6ffdb7c6c

View File

@ -70,9 +70,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
output->type == kTfLiteInt16 ||
output->type == kTfLiteInt32);
if (((input->type == kTfLiteInt16 || input->type == kTfLiteInt8) &&
output->type == kTfLiteInt8) ||
(input->type == kTfLiteInt16 && output->type == kTfLiteInt16)) {
if ((input->type == kTfLiteInt16 && output->type == kTfLiteInt8) ||
(input->type == kTfLiteInt8 && output->type == kTfLiteInt8) ||
(input->type == kTfLiteInt16 && output->type == kTfLiteInt16) ||
(input->type == kTfLiteInt16 && output->type == kTfLiteInt32)) {
double effective_scale = static_cast<double>(input->params.scale) /
static_cast<double>(output->params.scale);