Fix kernel_util: all we need to do is input_product_scale & bias_scale are very similar.

previous logic can fail if both input_product_scale & bias_scale are already very small numbers.

PiperOrigin-RevId: 306210519
Change-Id: Iad5f31d31f4e75dc0f569803c174bc265f36d699
This commit is contained in:
Renjie Liu 2020-04-13 04:11:03 -07:00 committed by TensorFlower Gardener
parent f453f33156
commit dc8475e8c9

View File

@ -124,9 +124,11 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
// pipeline.
if (bias) {
const double bias_scale = static_cast<double>(bias->params.scale);
TF_LITE_ENSURE(context,
std::abs(input_product_scale - bias_scale) <=
1e-6 * std::min(input_product_scale, bias_scale));
// Here we're making sure the input_product_scale & bias_scale the same.
// Normally this should be guaranteed by the training pipeline, we are
// setting the threshold to be 2e-6 to allow some numeric stability
// difference.
TF_LITE_ENSURE(context, std::abs(input_product_scale - bias_scale) <= 2e-6);
}
return GetQuantizedConvolutionMultipler(context, input, filter, output,
multiplier);