Use persistent buffer in conv and xtensa_hifimini/conv.

PiperOrigin-RevId: 309825806
Change-Id: Ia5266ae0b0902ee3dc64f96955a76838ad96d45a
This commit is contained in:
Nat Jeffries 2020-05-04 15:13:42 -07:00 committed by TensorFlower Gardener
parent 411ddcf013
commit 0a5a4f0d83
8 changed files with 135 additions and 96 deletions
tensorflow/lite/micro
examples
kernels
conv.cc
xtensa_hifimini

View File

@ -53,7 +53,7 @@ TF_LITE_MICRO_TEST(TestImageRecognitionInvoke) {
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX, micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX,
tflite::ops::micro::Register_SOFTMAX()); tflite::ops::micro::Register_SOFTMAX());
const int tensor_arena_size = 45 * 1024; const int tensor_arena_size = 50 * 1024;
uint8_t tensor_arena[tensor_arena_size]; uint8_t tensor_arena[tensor_arena_size];
tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena, tflite::MicroInterpreter interpreter(model, micro_op_resolver, tensor_arena,

View File

@ -67,7 +67,7 @@ int main(int argc, char** argv) {
micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX, micro_op_resolver.AddBuiltin(tflite::BuiltinOperator_SOFTMAX,
tflite::ops::micro::Register_SOFTMAX()); tflite::ops::micro::Register_SOFTMAX());
constexpr int tensor_arena_size = 45 * 1024; constexpr int tensor_arena_size = 50 * 1024;
uint8_t tensor_arena[tensor_arena_size]; uint8_t tensor_arena[tensor_arena_size];
tflite::MicroInterpreter interpreter(model, resolver, tensor_arena, tflite::MicroInterpreter interpreter(model, resolver, tensor_arena,
tensor_arena_size, error_reporter); tensor_arena_size, error_reporter);

View File

@ -34,7 +34,7 @@ tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr; TfLiteTensor* input = nullptr;
// An area of memory to use for input, output, and intermediate arrays. // An area of memory to use for input, output, and intermediate arrays.
constexpr int kTensorArenaSize = 73 * 1024; constexpr int kTensorArenaSize = 93 * 1024;
static uint8_t tensor_arena[kTensorArenaSize]; static uint8_t tensor_arena[kTensorArenaSize];
} // namespace } // namespace

View File

@ -27,7 +27,7 @@ limitations under the License.
#include "tensorflow/lite/version.h" #include "tensorflow/lite/version.h"
// Create an area of memory to use for input, output, and intermediate arrays. // Create an area of memory to use for input, output, and intermediate arrays.
constexpr int tensor_arena_size = 73 * 1024; constexpr int tensor_arena_size = 93 * 1024;
uint8_t tensor_arena[tensor_arena_size]; uint8_t tensor_arena[tensor_arena_size];
TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TESTS_BEGIN

View File

@ -41,7 +41,7 @@ TfLiteTensor* input = nullptr;
// signed value. // signed value.
// An area of memory to use for input, output, and intermediate arrays. // An area of memory to use for input, output, and intermediate arrays.
constexpr int kTensorArenaSize = 125 * 1024; constexpr int kTensorArenaSize = 136 * 1024;
static uint8_t tensor_arena[kTensorArenaSize]; static uint8_t tensor_arena[kTensorArenaSize];
} // namespace } // namespace

View File

@ -27,7 +27,7 @@ limitations under the License.
#include "tensorflow/lite/version.h" #include "tensorflow/lite/version.h"
// Create an area of memory to use for input, output, and intermediate arrays. // Create an area of memory to use for input, output, and intermediate arrays.
constexpr int tensor_arena_size = 125 * 1024; constexpr int tensor_arena_size = 136 * 1024;
uint8_t tensor_arena[tensor_arena_size]; uint8_t tensor_arena[tensor_arena_size];
TF_LITE_MICRO_TESTS_BEGIN TF_LITE_MICRO_TESTS_BEGIN

View File

@ -33,7 +33,6 @@ constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1; constexpr int kFilterTensor = 1;
constexpr int kBiasTensor = 2; constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0; constexpr int kOutputTensor = 0;
constexpr int kMaxChannels = 1024;
// Conv is quantized along dimension 0: // Conv is quantized along dimension 0:
// https://www.tensorflow.org/lite/performance/quantization_spec // https://www.tensorflow.org/lite/performance/quantization_spec
@ -49,9 +48,8 @@ struct OpData {
int output_shift; int output_shift;
// Per channel output multiplier and shift. // Per channel output multiplier and shift.
// TODO(b/141139247): Allocate these dynamically when possible. int32_t* per_channel_output_multiplier;
int32_t per_channel_output_multiplier[kMaxChannels]; int32_t* per_channel_output_shift;
int32_t per_channel_output_shift[kMaxChannels];
// The range of the fused activation layer. For example for kNone and // The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255. // uint8_t these would be 0 and 255.
@ -72,10 +70,10 @@ inline PaddingType RuntimePaddingType(TfLitePadding padding) {
} }
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, int width, int height, const TfLiteConvParams* params, int width,
int filter_width, int filter_height, int out_width, int height, int filter_width, int filter_height,
int out_height, const TfLiteType data_type, int out_width, int out_height,
OpData* data) { const TfLiteType data_type, OpData* data) {
bool has_bias = node->inputs->size == 3; bool has_bias = node->inputs->size == 3;
// Check number of inputs/outputs // Check number of inputs/outputs
TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2);
@ -109,8 +107,69 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
return kTfLiteOk; return kTfLiteOk;
} }
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
void* data = nullptr;
if (context->AllocatePersistentBuffer(context, sizeof(OpData), &data) ==
kTfLiteError) {
return nullptr;
}
return data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
OpData* data = static_cast<OpData*>(node->user_data);
const auto params = static_cast<const TfLiteConvParams*>(node->builtin_data);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
int input_width = input->dims->data[2];
int input_height = input->dims->data[1];
int filter_width = filter->dims->data[2];
int filter_height = filter->dims->data[1];
int output_width = output->dims->data[2];
int output_height = output->dims->data[1];
// Dynimically allocate per-channel quantization parameters.
const int num_channels = filter->dims->data[kConvQuantizedDimension];
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&data->per_channel_output_multiplier)));
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&data->per_channel_output_shift)));
// All per-channel quantized tensors need valid zero point and scale arrays.
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
static_cast<TfLiteAffineQuantization*>(filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
TF_LITE_ENSURE(context, affine_quantization->zero_point);
TF_LITE_ENSURE(context,
affine_quantization->scale->size == 1 ||
affine_quantization->scale->size ==
filter->dims->data[kConvQuantizedDimension]);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size,
affine_quantization->zero_point->size);
}
return CalculateOpData(context, node, params, input_width, input_height,
filter_width, filter_height, output_width,
output_height, input->type, data);
} // namespace conv
void EvalQuantized(TfLiteContext* context, TfLiteNode* node, void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data, TfLiteConvParams* params, const OpData& data,
const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* im2col, const TfLiteTensor* bias, TfLiteTensor* im2col,
TfLiteTensor* hwcn_weights, TfLiteTensor* output) { TfLiteTensor* hwcn_weights, TfLiteTensor* output) {
@ -118,10 +177,11 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
const int32_t filter_offset = -filter->params.zero_point; const int32_t filter_offset = -filter->params.zero_point;
const int32_t output_offset = output->params.zero_point; const int32_t output_offset = output->params.zero_point;
// TODO(b/154032858): Investigate removing extra copies.
ConvParams op_params; ConvParams op_params;
op_params.padding_type = RuntimePaddingType(params->padding); op_params.padding_type = RuntimePaddingType(params->padding);
op_params.padding_values.width = data->padding.width; op_params.padding_values.width = data.padding.width;
op_params.padding_values.height = data->padding.height; op_params.padding_values.height = data.padding.height;
op_params.stride_width = params->stride_width; op_params.stride_width = params->stride_width;
op_params.stride_height = params->stride_height; op_params.stride_height = params->stride_height;
op_params.dilation_width_factor = params->dilation_width_factor; op_params.dilation_width_factor = params->dilation_width_factor;
@ -129,10 +189,10 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
op_params.input_offset = input_offset; op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset; op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset; op_params.output_offset = output_offset;
op_params.output_multiplier = data->output_multiplier; op_params.output_multiplier = data.output_multiplier;
op_params.output_shift = -data->output_shift; op_params.output_shift = -data.output_shift;
op_params.quantized_activation_min = data->output_activation_min; op_params.quantized_activation_min = data.output_activation_min;
op_params.quantized_activation_max = data->output_activation_max; op_params.quantized_activation_max = data.output_activation_max;
reference_ops::Conv(op_params, GetTensorShape(input), reference_ops::Conv(op_params, GetTensorShape(input),
GetTensorData<uint8_t>(input), GetTensorShape(filter), GetTensorData<uint8_t>(input), GetTensorShape(filter),
GetTensorData<uint8_t>(filter), GetTensorShape(bias), GetTensorData<uint8_t>(filter), GetTensorShape(bias),
@ -142,11 +202,12 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
} }
void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data, TfLiteConvParams* params, const OpData& data,
const TfLiteTensor* input, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteTensor* bias, TfLiteTensor* output,
TfLiteTensor* im2col) { TfLiteTensor* im2col) {
// TODO(b/154032858): Investigate removing extra copies.
ConvParams op_params; ConvParams op_params;
op_params.input_offset = -input->params.zero_point; op_params.input_offset = -input->params.zero_point;
op_params.output_offset = output->params.zero_point; op_params.output_offset = output->params.zero_point;
@ -154,14 +215,14 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
op_params.stride_width = params->stride_width; op_params.stride_width = params->stride_width;
op_params.dilation_height_factor = params->dilation_height_factor; op_params.dilation_height_factor = params->dilation_height_factor;
op_params.dilation_width_factor = params->dilation_width_factor; op_params.dilation_width_factor = params->dilation_width_factor;
op_params.padding_values.height = data->padding.height; op_params.padding_values.height = data.padding.height;
op_params.padding_values.width = data->padding.width; op_params.padding_values.width = data.padding.width;
op_params.quantized_activation_min = data->output_activation_min; op_params.quantized_activation_min = data.output_activation_min;
op_params.quantized_activation_max = data->output_activation_max; op_params.quantized_activation_max = data.output_activation_max;
reference_integer_ops::ConvPerChannel( reference_integer_ops::ConvPerChannel(
op_params, data->per_channel_output_multiplier, op_params, data.per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input), data.per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter), GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias), GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output), GetTensorData<int32>(bias), GetTensorShape(output),
@ -169,18 +230,18 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
} }
void EvalFloat(TfLiteContext* context, TfLiteNode* node, void EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data, TfLiteConvParams* params, const OpData& data,
const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* im2col, const TfLiteTensor* bias, TfLiteTensor* im2col,
TfLiteTensor* hwcn_weights, TfLiteTensor* output) { TfLiteTensor* hwcn_weights, TfLiteTensor* output) {
float output_activation_min, output_activation_max; float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min, CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max); &output_activation_max);
// TODO(b/154032858): Investigate removing extra copies.
ConvParams op_params; ConvParams op_params;
op_params.padding_type = RuntimePaddingType(params->padding); op_params.padding_type = RuntimePaddingType(params->padding);
op_params.padding_values.width = data->padding.width; op_params.padding_values.width = data.padding.width;
op_params.padding_values.height = data->padding.height; op_params.padding_values.height = data.padding.height;
op_params.stride_width = params->stride_width; op_params.stride_width = params->stride_width;
op_params.stride_height = params->stride_height; op_params.stride_height = params->stride_height;
op_params.dilation_width_factor = params->dilation_width_factor; op_params.dilation_width_factor = params->dilation_width_factor;
@ -204,50 +265,20 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
int input_width = input->dims->data[2]; TFLITE_DCHECK(node->user_data != nullptr);
int input_height = input->dims->data[1]; const OpData& data = *(static_cast<const OpData*>(node->user_data));
int filter_width = filter->dims->data[2];
int filter_height = filter->dims->data[1];
int output_width = output->dims->data[2];
int output_height = output->dims->data[1];
OpData data;
// All per-channel quantized tensors need valid zero point and scale arrays.
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(
filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
TF_LITE_ENSURE(context, affine_quantization->zero_point);
TF_LITE_ENSURE(context,
affine_quantization->scale->size == 1 ||
affine_quantization->scale->size ==
filter->dims->data[kConvQuantizedDimension]);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size,
affine_quantization->zero_point->size);
}
TF_LITE_ENSURE_STATUS(CalculateOpData(
context, node, params, input_width, input_height, filter_width,
filter_height, output_width, output_height, input->type, &data));
switch (input->type) { // Already know in/out types are same. switch (input->type) { // Already know in/out types are same.
case kTfLiteFloat32: case kTfLiteFloat32:
EvalFloat(context, node, params, &data, input, filter, bias, nullptr, EvalFloat(context, node, params, data, input, filter, bias, nullptr,
nullptr, output); nullptr, output);
break; break;
case kTfLiteInt8: case kTfLiteInt8:
EvalQuantizedPerChannel(context, node, params, &data, input, filter, bias, EvalQuantizedPerChannel(context, node, params, data, input, filter, bias,
output, nullptr); output, nullptr);
break; break;
case kTfLiteUInt8: case kTfLiteUInt8:
EvalQuantized(context, node, params, &data, input, filter, bias, nullptr, EvalQuantized(context, node, params, data, input, filter, bias, nullptr,
nullptr, output); nullptr, output);
break; break;
default: default:
@ -261,9 +292,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
} // namespace conv } // namespace conv
TfLiteRegistration* Register_CONV_2D() { TfLiteRegistration* Register_CONV_2D() {
static TfLiteRegistration r = {/*init=*/nullptr, static TfLiteRegistration r = {/*init=*/conv::Init,
/*free=*/nullptr, /*free=*/nullptr,
/*prepare=*/nullptr, /*prepare=*/conv::Prepare,
/*invoke=*/conv::Eval, /*invoke=*/conv::Eval,
/*profiling_string=*/nullptr, /*profiling_string=*/nullptr,
/*builtin_code=*/0, /*builtin_code=*/0,

View File

@ -185,9 +185,6 @@ inline void Conv1x32Input32x32Filter(
ae_q56s acc_56 = AE_ZEROQ56(); ae_q56s acc_56 = AE_ZEROQ56();
const int8_t* input_vals_ptr = input_data - 2; const int8_t* input_vals_ptr = input_data - 2;
for (int i = 0; i < kFilterDepth; i += 2) { for (int i = 0; i < kFilterDepth; i += 2) {
// Find current input index, minus 2 for Xtensa load
// alignments:
// Load signed 2x 8bit values and right shift into 24bit // Load signed 2x 8bit values and right shift into 24bit
// alignment: // alignment:
ae_p24x2s input_vals_24x2; ae_p24x2s input_vals_24x2;
@ -244,7 +241,6 @@ constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1; constexpr int kFilterTensor = 1;
constexpr int kBiasTensor = 2; constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0; constexpr int kOutputTensor = 0;
constexpr int kMaxChannels = 32;
// Conv is quantized along dimension 0: // Conv is quantized along dimension 0:
// https://www.tensorflow.org/lite/performance/quantization_spec // https://www.tensorflow.org/lite/performance/quantization_spec
@ -258,9 +254,8 @@ struct OpData {
int output_shift; int output_shift;
// Per channel output multiplier and shift. // Per channel output multiplier and shift.
// TODO(b/141139247): Allocate these dynamically when possible. int32_t* per_channel_output_multiplier;
int32_t per_channel_output_multiplier[kMaxChannels]; int32_t* per_channel_output_shift;
int32_t per_channel_output_shift[kMaxChannels];
// The range of the fused activation layer. For example for kNone and // The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255. // uint8_t these would be 0 and 255.
@ -268,12 +263,6 @@ struct OpData {
int32_t output_activation_max; int32_t output_activation_max;
}; };
// These constants represent constants specific to the music detect model.
// They exist until (b/132070898) is fixed.
static const int kMaxOpDataSize = 6;
static int op_data_counter = 0;
static OpData kStaticOpData[kMaxOpDataSize];
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, int width, int height, TfLiteConvParams* params, int width, int height,
int filter_width, int filter_height, int out_width, int filter_width, int filter_height, int out_width,
@ -301,30 +290,37 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
int output_channels = filter->dims->data[kConvQuantizedDimension]; int output_channels = filter->dims->data[kConvQuantizedDimension];
TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( return tflite::PopulateConvolutionQuantizationParams(
context, input, filter, bias, output, params->activation, context, input, filter, bias, output, params->activation,
&data->output_multiplier, &data->output_shift, &data->output_multiplier, &data->output_shift,
&data->output_activation_min, &data->output_activation_max, &data->output_activation_min, &data->output_activation_max,
data->per_channel_output_multiplier, data->per_channel_output_multiplier,
reinterpret_cast<int*>(data->per_channel_output_shift), reinterpret_cast<int*>(data->per_channel_output_shift),
output_channels)); output_channels);
} }
return kTfLiteOk; return kTfLiteOk;
} }
void Free(TfLiteContext* context, void* buffer) { op_data_counter = 0; } void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
void* data = nullptr;
if (context->AllocatePersistentBuffer(context, sizeof(OpData), &data) ==
kTfLiteError) {
return nullptr;
}
return data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
// TODO(b/132070898): Use statically slotted OpData structures until a auto* op_data = reinterpret_cast<OpData*>(node->user_data);
// scratch memory API is ready.
OpData* op_data = &kStaticOpData[op_data_counter++];
node->user_data = op_data;
int input_width = input->dims->data[2]; int input_width = input->dims->data[2];
int input_height = input->dims->data[1]; int input_height = input->dims->data[1];
@ -333,6 +329,17 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int output_width = output->dims->data[2]; int output_width = output->dims->data[2];
int output_height = output->dims->data[1]; int output_height = output->dims->data[1];
// Per channel quantization is only needed for int8 inference. For other
// quantized types, only a single scale and zero point is needed.
const int num_channels = filter->dims->data[kConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&op_data->per_channel_output_multiplier)));
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&op_data->per_channel_output_shift)));
// All per-channel quantized tensors need valid zero point and scale arrays. // All per-channel quantized tensors need valid zero point and scale arrays.
if (input->type == kTfLiteInt8) { if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, filter->quantization.type, TF_LITE_ENSURE_EQ(context, filter->quantization.type,
@ -353,11 +360,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
affine_quantization->zero_point->size); affine_quantization->zero_point->size);
} }
TF_LITE_ENSURE_STATUS(CalculateOpData( return CalculateOpData(context, node, params, input_width, input_height,
context, node, params, input_width, input_height, filter_width, filter_width, filter_height, output_width,
filter_height, output_width, output_height, input->type, op_data)); output_height, input->type, op_data);
return kTfLiteOk;
} }
void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
@ -366,6 +371,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* filter, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteTensor* bias, TfLiteTensor* output,
TfLiteTensor* im2col) { TfLiteTensor* im2col) {
// TODO(b/154032858): Investigate removing extra copies.
ConvParams op_params; ConvParams op_params;
op_params.input_offset = -input->params.zero_point; op_params.input_offset = -input->params.zero_point;
op_params.output_offset = output->params.zero_point; op_params.output_offset = output->params.zero_point;
@ -388,6 +394,8 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
} }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data);
auto* op_data = reinterpret_cast<OpData*>(node->user_data); auto* op_data = reinterpret_cast<OpData*>(node->user_data);
@ -429,8 +437,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
} // namespace conv } // namespace conv
TfLiteRegistration* Register_CONV_2D() { TfLiteRegistration* Register_CONV_2D() {
static TfLiteRegistration r = {/*init=*/nullptr, static TfLiteRegistration r = {/*init=*/conv::Init,
/*free=*/conv::Free, /*free=*/nullptr,
/*prepare=*/conv::Prepare, /*prepare=*/conv::Prepare,
/*invoke=*/conv::Eval, /*invoke=*/conv::Eval,
/*profiling_string=*/nullptr, /*profiling_string=*/nullptr,