From 7b6d3a04900b9e72daf21f13baa7315c58697b8f Mon Sep 17 00:00:00 2001 From: ANSHUMAN TRIPATHY Date: Fri, 22 Mar 2019 10:33:31 +0530 Subject: [PATCH] Lite: Kernel_util refactored --- tensorflow/lite/kernels/add.cc | 18 ++++++------------ tensorflow/lite/kernels/kernel_util.cc | 25 +++---------------------- tensorflow/lite/kernels/kernel_util.h | 7 +------ tensorflow/lite/kernels/mul.cc | 14 +++----------- tensorflow/lite/kernels/pooling.cc | 17 +++++++++-------- tensorflow/lite/kernels/sub.cc | 19 +++++++------------ 6 files changed, 29 insertions(+), 71 deletions(-) diff --git a/tensorflow/lite/kernels/add.cc b/tensorflow/lite/kernels/add.cc index 3d3e6b36533..d9b8c87eeb7 100644 --- a/tensorflow/lite/kernels/add.cc +++ b/tensorflow/lite/kernels/add.cc @@ -118,15 +118,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { QuantizeMultiplierSmallerThanOneExp( real_output_multiplier, &data->output_multiplier, &data->output_shift); - if (output->type == kTfLiteUInt8) { - CalculateActivationRangeUint8(params->activation, output, - &data->output_activation_min, - &data->output_activation_max); - } else { - CalculateActivationRangeInt8(params->activation, output, - &data->output_activation_min, - &data->output_activation_max); - } + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); } else if (output->type == kTfLiteInt16) { // 16bit -> 16bit special quantized path, supporting only a rather // narrow case of quantization parameters: zero_points must all be 0 @@ -164,9 +158,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, data->input1_shift <= 0); TF_LITE_ENSURE(context, data->input2_shift <= 0); - CalculateActivationRangeQuantized(context, params->activation, output, - &data->output_activation_min, - &data->output_activation_max); + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); } return context->ResizeTensor(context, output, output_size); diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc index 202140dea4a..715a530317e 100644 --- a/tensorflow/lite/kernels/kernel_util.cc +++ b/tensorflow/lite/kernels/kernel_util.cc @@ -84,8 +84,9 @@ TfLiteStatus PopulateConvolutionQuantizationParams( // Populate quantization parameteters with multiplier and shift. QuantizeMultiplier(real_multiplier, multiplier, &exponent); *shift = -exponent; - CalculateActivationRangeUint8(activation, output, output_activation_min, - output_activation_max); + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, activation, output, output_activation_min, + output_activation_max)); } return kTfLiteOk; } @@ -174,26 +175,6 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, return kTfLiteOk; } -void CalculateActivationRangeUint8(TfLiteFusedActivation activation, - TfLiteTensor* output, int32_t* act_min, - int32_t* act_max) { - const int32_t qmin = std::numeric_limits::min(); - const int32_t qmax = std::numeric_limits::max(); - - CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, - act_max); -} - -void CalculateActivationRangeInt8(TfLiteFusedActivation activation, - TfLiteTensor* output, int32_t* act_min, - int32_t* act_max) { - const int32_t qmin = std::numeric_limits::min(); - const int32_t qmax = std::numeric_limits::max(); - - CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, - act_max); -} - bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { return TfLiteIntArrayEqual(input1->dims, input2->dims); } diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h index 6155ed6a862..b0caaa539d2 100644 --- a/tensorflow/lite/kernels/kernel_util.h +++ b/tensorflow/lite/kernels/kernel_util.h @@ -143,12 +143,7 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, TfLiteTensor* output, int32_t* act_min, int32_t* act_max); -void CalculateActivationRangeUint8(TfLiteFusedActivation activation, - TfLiteTensor* output, int32_t* act_min, - int32_t* act_max); -void CalculateActivationRangeInt8(TfLiteFusedActivation activation, - TfLiteTensor* output, int32_t* act_min, - int32_t* act_max); + // Calculates the useful range of an activation layer given its activation // tensor.a template diff --git a/tensorflow/lite/kernels/mul.cc b/tensorflow/lite/kernels/mul.cc index ab4cf0879c0..de50619ac6d 100644 --- a/tensorflow/lite/kernels/mul.cc +++ b/tensorflow/lite/kernels/mul.cc @@ -83,19 +83,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { output_size = TfLiteIntArrayCopy(input1->dims); } - if (output->type == kTfLiteUInt8) { - CalculateActivationRangeUint8(params->activation, output, - &data->output_activation_min, - &data->output_activation_max); - } - if (output->type == kTfLiteInt8) { - CalculateActivationRangeInt8(params->activation, output, - &data->output_activation_min, - &data->output_activation_max); - } - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); double real_multiplier = input1->params.scale * input2->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, diff --git a/tensorflow/lite/kernels/pooling.cc b/tensorflow/lite/kernels/pooling.cc index 69342a16262..e871b72f4a1 100644 --- a/tensorflow/lite/kernels/pooling.cc +++ b/tensorflow/lite/kernels/pooling.cc @@ -144,8 +144,8 @@ void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; - CalculateActivationRangeUint8(params->activation, output, &activation_min, - &activation_max); + (void)CalculateActivationRangeQuantized(context, params->activation, output, + &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ @@ -173,8 +173,9 @@ void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; - CalculateActivationRangeInt8(params->activation, output, &activation_min, - &activation_max); + + (void)CalculateActivationRangeQuantized(context, params->activation, output, + &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ @@ -229,8 +230,8 @@ void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; - CalculateActivationRangeUint8(params->activation, output, &activation_min, - &activation_max); + (void)CalculateActivationRangeQuantized(context, params->activation, output, + &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ @@ -258,8 +259,8 @@ void MaxEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; - CalculateActivationRangeInt8(params->activation, output, &activation_min, - &activation_max); + (void)CalculateActivationRangeQuantized(context, params->activation, output, + &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ diff --git a/tensorflow/lite/kernels/sub.cc b/tensorflow/lite/kernels/sub.cc index f3a81033cd6..f2913faeb76 100644 --- a/tensorflow/lite/kernels/sub.cc +++ b/tensorflow/lite/kernels/sub.cc @@ -131,15 +131,10 @@ TfLiteStatus Prepare8BitSubOp(TfLiteContext* context, tflite::QuantizeMultiplierSmallerThanOneExp(real_output_multiplier, &op_params->output_multiplier, &op_params->output_shift); - if (output->type == kTfLiteUInt8) { - CalculateActivationRangeUint8(params->activation, output, - &op_params->output_activation_min, - &op_params->output_activation_max); - } else { - CalculateActivationRangeInt8(params->activation, output, - &op_params->output_activation_min, - &op_params->output_activation_max); - } + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &op_params->output_activation_min, + &op_params->output_activation_max)); return kTfLiteOk; } @@ -183,9 +178,9 @@ TfLiteStatus PrepareInt16SubOp(TfLiteContext* context, TF_LITE_ENSURE(context, data->input1_shift <= 0); TF_LITE_ENSURE(context, data->input2_shift <= 0); - CalculateActivationRangeQuantized(context, params->activation, output, - &data->output_activation_min, - &data->output_activation_max); + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); return kTfLiteOk; }