Use persistent buffer in depthwise_conv and xtensa_hifimini/depthwise_conv.

PiperOrigin-RevId: 309830793
Change-Id: I5ee1ee93e3d85faf648ca8d4c938760f598bc8da
This commit is contained in:
Nat Jeffries 2020-05-04 15:37:24 -07:00 committed by TensorFlower Gardener
parent bff3eb21dd
commit e03b0d0480
2 changed files with 107 additions and 66 deletions

View File

@ -35,7 +35,6 @@ constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1; constexpr int kFilterTensor = 1;
constexpr int kBiasTensor = 2; constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0; constexpr int kOutputTensor = 0;
constexpr int kMaxChannels = 1024;
// Depthwise conv is quantized along dimension 3: // Depthwise conv is quantized along dimension 3:
// https://www.tensorflow.org/lite/performance/quantization_spec // https://www.tensorflow.org/lite/performance/quantization_spec
@ -49,10 +48,8 @@ struct OpData {
int output_shift; int output_shift;
// Per channel output multiplier and shift. // Per channel output multiplier and shift.
// TODO(b/141139247): Allocate these dynamically when possible. int32_t* per_channel_output_multiplier;
int32_t per_channel_output_multiplier[kMaxChannels]; int32_t* per_channel_output_shift;
int32_t per_channel_output_shift[kMaxChannels];
// The range of the fused activation layer. For example for kNone and // The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255. // uint8_t these would be 0 and 255.
int32_t output_activation_min; int32_t output_activation_min;
@ -84,20 +81,81 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( return tflite::PopulateConvolutionQuantizationParams(
context, input, filter, bias, output, params->activation, context, input, filter, bias, output, params->activation,
&data->output_multiplier, &data->output_shift, &data->output_multiplier, &data->output_shift,
&data->output_activation_min, &data->output_activation_max, &data->output_activation_min, &data->output_activation_max,
data->per_channel_output_multiplier, data->per_channel_output_multiplier,
reinterpret_cast<int*>(data->per_channel_output_shift), num_channels)); reinterpret_cast<int*>(data->per_channel_output_shift), num_channels);
} }
return kTfLiteOk; return kTfLiteOk;
} }
} // namespace } // namespace
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
void* data = nullptr;
if (context->AllocatePersistentBuffer(context, sizeof(OpData), &data) ==
kTfLiteError) {
return nullptr;
}
return data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
auto* params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
OpData* data = static_cast<OpData*>(node->user_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
const TfLiteType data_type = input->type;
int width = SizeOfDimension(input, 2);
int height = SizeOfDimension(input, 1);
int filter_width = SizeOfDimension(filter, 2);
int filter_height = SizeOfDimension(filter, 1);
// Per channel quantization is only needed for int8 inference. For other
// quantized types, only a single scale and zero point is needed.
const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&data->per_channel_output_multiplier)));
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&data->per_channel_output_shift)));
// All per-channel quantized tensors need valid zero point and scale arrays.
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(
filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
TF_LITE_ENSURE(context, affine_quantization->zero_point);
TF_LITE_ENSURE(
context, affine_quantization->scale->size == 1 ||
affine_quantization->scale->size ==
filter->dims->data[kDepthwiseConvQuantizedDimension]);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size,
affine_quantization->zero_point->size);
}
return CalculateOpData(context, node, params, width, height, filter_width,
filter_height, data_type, data);
}
void EvalFloat(TfLiteContext* context, TfLiteNode* node, void EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLiteDepthwiseConvParams* params, OpData* data, TfLiteDepthwiseConvParams* params, const OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) { const TfLiteTensor* bias, TfLiteTensor* output) {
float output_activation_min, output_activation_max; float output_activation_min, output_activation_max;
@ -125,8 +183,8 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
} }
void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
TfLiteDepthwiseConvParams* params, OpData* data, TfLiteDepthwiseConvParams* params,
const TfLiteTensor* input, const OpData* data, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) { const TfLiteTensor* bias, TfLiteTensor* output) {
DepthwiseParams op_params; DepthwiseParams op_params;
@ -155,7 +213,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
} }
void EvalQuantized(TfLiteContext* context, TfLiteNode* node, void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteDepthwiseConvParams* params, OpData* data, TfLiteDepthwiseConvParams* params, const OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) { const TfLiteTensor* bias, TfLiteTensor* output) {
const int32_t input_offset = -input->params.zero_point; const int32_t input_offset = -input->params.zero_point;
@ -189,8 +247,12 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
} }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
auto* params = auto* params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
const OpData& data = *(static_cast<const OpData*>(node->user_data));
TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor);
@ -198,37 +260,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* bias = const TfLiteTensor* bias =
(NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr; (NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr;
const TfLiteType data_type = input->type;
int width = SizeOfDimension(input, 2);
int height = SizeOfDimension(input, 1);
int filter_width = SizeOfDimension(filter, 2);
int filter_height = SizeOfDimension(filter, 1);
OpData data;
// All per-channel quantized tensors need valid zero point and scale arrays.
if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(
filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
TF_LITE_ENSURE(context, affine_quantization->zero_point);
TF_LITE_ENSURE(
context, affine_quantization->scale->size == 1 ||
affine_quantization->scale->size ==
filter->dims->data[kDepthwiseConvQuantizedDimension]);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size,
affine_quantization->zero_point->size);
}
TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height,
filter_width, filter_height, data_type,
&data));
// TODO(aselle): Consider whether float conv and quantized conv should be // TODO(aselle): Consider whether float conv and quantized conv should be
// separate ops to avoid dispatch overhead here. // separate ops to avoid dispatch overhead here.
switch (input->type) { // Already know in/out types are same. switch (input->type) { // Already know in/out types are same.
@ -253,9 +284,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
} // namespace depthwise_conv } // namespace depthwise_conv
TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { TfLiteRegistration* Register_DEPTHWISE_CONV_2D() {
static TfLiteRegistration r = {/*init=*/nullptr, static TfLiteRegistration r = {/*init=*/depthwise_conv::Init,
/*free=*/nullptr, /*free=*/nullptr,
/*prepare=*/nullptr, /*prepare=*/depthwise_conv::Prepare,
/*invoke=*/depthwise_conv::Eval, /*invoke=*/depthwise_conv::Eval,
/*profiling_string=*/nullptr, /*profiling_string=*/nullptr,
/*builtin_code=*/0, /*builtin_code=*/0,

View File

@ -40,8 +40,7 @@ inline void DepthwiseConvPerChannel(
const int8* filter_data, const RuntimeShape& bias_shape, const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, const int32* bias_data, const RuntimeShape& output_shape,
int8* output_data) { int8* output_data) {
// Get parameters. // TODO(b/154032858): Investigate removing extra copies.
// TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro.
const int stride_width = params.stride_width; const int stride_width = params.stride_width;
const int stride_height = params.stride_height; const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor; const int dilation_width_factor = params.dilation_width_factor;
@ -289,7 +288,6 @@ constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1; constexpr int kFilterTensor = 1;
constexpr int kBiasTensor = 2; constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0; constexpr int kOutputTensor = 0;
constexpr int kMaxChannels = 32;
// Depthwise conv is quantized along dimension 3: // Depthwise conv is quantized along dimension 3:
// https://www.tensorflow.org/lite/performance/quantization_spec // https://www.tensorflow.org/lite/performance/quantization_spec
@ -304,8 +302,8 @@ struct OpData {
// Per channel output multiplier and shift. // Per channel output multiplier and shift.
// TODO(b/141139247): Allocate these dynamically when possible. // TODO(b/141139247): Allocate these dynamically when possible.
int32_t per_channel_output_multiplier[kMaxChannels]; int32_t* per_channel_output_multiplier;
int32_t per_channel_output_shift[kMaxChannels]; int32_t* per_channel_output_shift;
// The range of the fused activation layer. For example for kNone and // The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255. // uint8_t these would be 0 and 255.
@ -313,12 +311,6 @@ struct OpData {
int32_t output_activation_max; int32_t output_activation_max;
}; };
// These constants represent constants specific to the music detect model.
// They exist until (b/132070898) is fixed.
static const int kMaxOpDataSize = 6;
static int op_data_counter = 0;
static OpData kStaticOpData[kMaxOpDataSize];
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteDepthwiseConvParams* params, int width, TfLiteDepthwiseConvParams* params, int width,
int height, int filter_width, int filter_height, int height, int filter_width, int filter_height,
@ -358,19 +350,26 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
} // namespace } // namespace
void Free(TfLiteContext* context, void* buffer) { op_data_counter = 0; } void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
void* data = nullptr;
if (context->AllocatePersistentBuffer(context, sizeof(OpData), &data) ==
kTfLiteError) {
return nullptr;
}
return data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
auto* params = auto* params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor);
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
// TODO(b/132070898): Use statically slotted OpData structures until a auto* op_data = reinterpret_cast<OpData*>(node->user_data);
// scratch memory API is ready.
OpData* op_data = &kStaticOpData[op_data_counter++];
node->user_data = op_data;
const TfLiteType data_type = input->type; const TfLiteType data_type = input->type;
int width = SizeOfDimension(input, 2); int width = SizeOfDimension(input, 2);
@ -378,6 +377,17 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int filter_width = SizeOfDimension(filter, 2); int filter_width = SizeOfDimension(filter, 2);
int filter_height = SizeOfDimension(filter, 1); int filter_height = SizeOfDimension(filter, 1);
// Per channel quantization is only needed for int8 inference. For other
// quantized types, only a single scale and zero point is needed.
const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&op_data->per_channel_output_multiplier)));
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t),
reinterpret_cast<void**>(&op_data->per_channel_output_shift)));
// All per-channel quantized tensors need valid zero point and scale arrays. // All per-channel quantized tensors need valid zero point and scale arrays.
if (input->type == kTfLiteInt8) { if (input->type == kTfLiteInt8) {
TF_LITE_ENSURE_EQ(context, filter->quantization.type, TF_LITE_ENSURE_EQ(context, filter->quantization.type,
@ -397,10 +407,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
affine_quantization->zero_point->size); affine_quantization->zero_point->size);
} }
TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, return CalculateOpData(context, node, params, width, height, filter_width,
filter_width, filter_height, data_type, filter_height, data_type, op_data);
op_data));
return kTfLiteOk;
} }
void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
@ -434,6 +442,8 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
} }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
TFLITE_DCHECK(node->builtin_data != nullptr);
auto* params = auto* params =
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data); reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
auto* op_data = reinterpret_cast<OpData*>(node->user_data); auto* op_data = reinterpret_cast<OpData*>(node->user_data);
@ -477,8 +487,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
} // namespace depthwise_conv } // namespace depthwise_conv
TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { TfLiteRegistration* Register_DEPTHWISE_CONV_2D() {
static TfLiteRegistration r = {/*init=*/nullptr, static TfLiteRegistration r = {/*init=*/depthwise_conv::Init,
/*free=*/depthwise_conv::Free, /*free=*/nullptr,
/*prepare=*/depthwise_conv::Prepare, /*prepare=*/depthwise_conv::Prepare,
/*invoke=*/depthwise_conv::Eval, /*invoke=*/depthwise_conv::Eval,
/*profiling_string=*/nullptr, /*profiling_string=*/nullptr,