Lite: Kernel_util refactored

This commit is contained in:
ANSHUMAN TRIPATHY 2019-03-22 10:33:31 +05:30 committed by ANSHUMAN TRIPATHY
parent aa8a34071d
commit 7b6d3a0490
6 changed files with 29 additions and 71 deletions

View File

@ -118,15 +118,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
QuantizeMultiplierSmallerThanOneExp(
real_output_multiplier, &data->output_multiplier, &data->output_shift);
if (output->type == kTfLiteUInt8) {
CalculateActivationRangeUint8(params->activation, output,
&data->output_activation_min,
&data->output_activation_max);
} else {
CalculateActivationRangeInt8(params->activation, output,
&data->output_activation_min,
&data->output_activation_max);
}
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
} else if (output->type == kTfLiteInt16) {
// 16bit -> 16bit special quantized path, supporting only a rather
// narrow case of quantization parameters: zero_points must all be 0
@ -164,9 +158,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE(context, data->input1_shift <= 0);
TF_LITE_ENSURE(context, data->input2_shift <= 0);
CalculateActivationRangeQuantized(context, params->activation, output,
&data->output_activation_min,
&data->output_activation_max);
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
}
return context->ResizeTensor(context, output, output_size);

View File

@ -84,8 +84,9 @@ TfLiteStatus PopulateConvolutionQuantizationParams(
// Populate quantization parameteters with multiplier and shift.
QuantizeMultiplier(real_multiplier, multiplier, &exponent);
*shift = -exponent;
CalculateActivationRangeUint8(activation, output, output_activation_min,
output_activation_max);
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, activation, output, output_activation_min,
output_activation_max));
}
return kTfLiteOk;
}
@ -174,26 +175,6 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
return kTfLiteOk;
}
void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
TfLiteTensor* output, int32_t* act_min,
int32_t* act_max) {
const int32_t qmin = std::numeric_limits<uint8_t>::min();
const int32_t qmax = std::numeric_limits<uint8_t>::max();
CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min,
act_max);
}
void CalculateActivationRangeInt8(TfLiteFusedActivation activation,
TfLiteTensor* output, int32_t* act_min,
int32_t* act_max) {
const int32_t qmin = std::numeric_limits<int8_t>::min();
const int32_t qmax = std::numeric_limits<int8_t>::max();
CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min,
act_max);
}
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
return TfLiteIntArrayEqual(input1->dims, input2->dims);
}

View File

@ -143,12 +143,7 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
TfLiteTensor* output,
int32_t* act_min,
int32_t* act_max);
void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
TfLiteTensor* output, int32_t* act_min,
int32_t* act_max);
void CalculateActivationRangeInt8(TfLiteFusedActivation activation,
TfLiteTensor* output, int32_t* act_min,
int32_t* act_max);
// Calculates the useful range of an activation layer given its activation
// tensor.a
template <typename T>

View File

@ -83,19 +83,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
output_size = TfLiteIntArrayCopy(input1->dims);
}
if (output->type == kTfLiteUInt8) {
CalculateActivationRangeUint8(params->activation, output,
&data->output_activation_min,
&data->output_activation_max);
}
if (output->type == kTfLiteInt8) {
CalculateActivationRangeInt8(params->activation, output,
&data->output_activation_min,
&data->output_activation_max);
}
if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 ||
output->type == kTfLiteInt16) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
double real_multiplier =
input1->params.scale * input2->params.scale / output->params.scale;
QuantizeMultiplier(real_multiplier, &data->output_multiplier,

View File

@ -144,8 +144,8 @@ void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node,
TfLiteTensor* output) {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeUint8(params->activation, output, &activation_min,
&activation_max);
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
#define TF_LITE_AVERAGE_POOL(type) \
tflite::PoolParams op_params; \
op_params.stride_height = params->stride_height; \
@ -173,8 +173,9 @@ void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, TfLiteTensor* output) {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeInt8(params->activation, output, &activation_min,
&activation_max);
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
#define TF_LITE_AVERAGE_POOL(type) \
tflite::PoolParams op_params; \
op_params.stride_height = params->stride_height; \
@ -229,8 +230,8 @@ void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, TfLiteTensor* output) {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeUint8(params->activation, output, &activation_min,
&activation_max);
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
#define TF_LITE_MAX_POOL(type) \
tflite::PoolParams op_params; \
op_params.stride_height = params->stride_height; \
@ -258,8 +259,8 @@ void MaxEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, TfLiteTensor* output) {
int32_t activation_min;
int32_t activation_max;
CalculateActivationRangeInt8(params->activation, output, &activation_min,
&activation_max);
(void)CalculateActivationRangeQuantized(context, params->activation, output,
&activation_min, &activation_max);
#define TF_LITE_MAX_POOL(type) \
tflite::PoolParams op_params; \
op_params.stride_height = params->stride_height; \

View File

@ -131,15 +131,10 @@ TfLiteStatus Prepare8BitSubOp(TfLiteContext* context,
tflite::QuantizeMultiplierSmallerThanOneExp(real_output_multiplier,
&op_params->output_multiplier,
&op_params->output_shift);
if (output->type == kTfLiteUInt8) {
CalculateActivationRangeUint8(params->activation, output,
&op_params->output_activation_min,
&op_params->output_activation_max);
} else {
CalculateActivationRangeInt8(params->activation, output,
&op_params->output_activation_min,
&op_params->output_activation_max);
}
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &op_params->output_activation_min,
&op_params->output_activation_max));
return kTfLiteOk;
}
@ -183,9 +178,9 @@ TfLiteStatus PrepareInt16SubOp(TfLiteContext* context,
TF_LITE_ENSURE(context, data->input1_shift <= 0);
TF_LITE_ENSURE(context, data->input2_shift <= 0);
CalculateActivationRangeQuantized(context, params->activation, output,
&data->output_activation_min,
&data->output_activation_max);
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, params->activation, output, &data->output_activation_min,
&data->output_activation_max));
return kTfLiteOk;
}