After this change, compiled code is up to 100 bytes smaller per object on Aarch64, and removes all runtime code. Expecting similar changes on other platforms. PiperOrigin-RevId: 304261011 Change-Id: I22536a10fd1379e06aea331263bb8deb121a02e1
280 lines
12 KiB
C++
280 lines
12 KiB
C++
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
==============================================================================*/
|
|
|
|
#include "tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h"
|
|
|
|
#include "tensorflow/lite/c/builtin_op_data.h"
|
|
#include "tensorflow/lite/c/common.h"
|
|
#include "tensorflow/lite/kernels/internal/common.h"
|
|
#include "tensorflow/lite/kernels/internal/quantization_util.h"
|
|
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h"
|
|
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
|
|
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
|
|
#include "tensorflow/lite/kernels/kernel_util.h"
|
|
#include "tensorflow/lite/kernels/padding.h"
|
|
|
|
namespace tflite {
|
|
namespace ops {
|
|
namespace micro {
|
|
namespace depthwise_conv {
|
|
namespace {
|
|
|
|
constexpr int kInputTensor = 0;
|
|
constexpr int kFilterTensor = 1;
|
|
constexpr int kBiasTensor = 2;
|
|
constexpr int kOutputTensor = 0;
|
|
constexpr int kMaxChannels = 256;
|
|
|
|
// Depthwise conv is quantized along dimension 3:
|
|
// https://www.tensorflow.org/lite/performance/quantization_spec
|
|
constexpr int kDepthwiseConvQuantizedDimension = 3;
|
|
|
|
struct OpData {
|
|
TfLitePaddingValues padding;
|
|
// The scaling factor from input to output (aka the 'real multiplier') can
|
|
// be represented as a fixed point multiplier plus a left shift.
|
|
int32_t output_multiplier;
|
|
int output_shift;
|
|
|
|
// Per channel output multiplier and shift.
|
|
// TODO(b/141139247): Allocate these dynamically when possible.
|
|
int32_t per_channel_output_multiplier[kMaxChannels];
|
|
int32_t per_channel_output_shift[kMaxChannels];
|
|
|
|
// The range of the fused activation layer. For example for kNone and
|
|
// uint8_t these would be 0 and 255.
|
|
int32_t output_activation_min;
|
|
int32_t output_activation_max;
|
|
};
|
|
|
|
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
|
|
TfLiteDepthwiseConvParams* params, int width,
|
|
int height, int filter_width, int filter_height,
|
|
const TfLiteType data_type, OpData* data) {
|
|
bool has_bias = node->inputs->size == 3;
|
|
// Check number of inputs/outputs
|
|
TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2);
|
|
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
|
|
|
|
int unused_output_height, unused_output_width;
|
|
data->padding = ComputePaddingHeightWidth(
|
|
params->stride_height, params->stride_width, 1, 1, height, width,
|
|
filter_height, filter_width, params->padding, &unused_output_height,
|
|
&unused_output_width);
|
|
|
|
// Note that quantized inference requires that all tensors have their
|
|
// parameters set. This is usually done during quantized training.
|
|
if (data_type != kTfLiteFloat32) {
|
|
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
|
|
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
|
|
const TfLiteTensor* bias =
|
|
GetOptionalInputTensor(context, node, kBiasTensor);
|
|
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
|
|
int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
|
|
|
|
TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams(
|
|
context, input, filter, bias, output, params->activation,
|
|
&data->output_multiplier, &data->output_shift,
|
|
&data->output_activation_min, &data->output_activation_max,
|
|
data->per_channel_output_multiplier,
|
|
reinterpret_cast<int*>(data->per_channel_output_shift), num_channels));
|
|
}
|
|
return kTfLiteOk;
|
|
}
|
|
|
|
} // namespace
|
|
|
|
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
|
|
return nullptr;
|
|
}
|
|
|
|
void Free(TfLiteContext* context, void* buffer) {}
|
|
|
|
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
|
return kTfLiteOk;
|
|
}
|
|
|
|
void EvalFloat(TfLiteContext* context, TfLiteNode* node,
|
|
TfLiteDepthwiseConvParams* params, OpData* data,
|
|
const TfLiteTensor* input, const TfLiteTensor* filter,
|
|
const TfLiteTensor* bias, TfLiteTensor* output) {
|
|
float output_activation_min, output_activation_max;
|
|
CalculateActivationRange(params->activation, &output_activation_min,
|
|
&output_activation_max);
|
|
|
|
tflite::DepthwiseParams op_params;
|
|
// Padding type is ignored, but still set.
|
|
op_params.padding_type = PaddingType::kSame;
|
|
op_params.padding_values.width = data->padding.width;
|
|
op_params.padding_values.height = data->padding.height;
|
|
op_params.stride_width = params->stride_width;
|
|
op_params.stride_height = params->stride_height;
|
|
op_params.dilation_width_factor = params->dilation_width_factor;
|
|
op_params.dilation_height_factor = params->dilation_height_factor;
|
|
op_params.depth_multiplier = params->depth_multiplier;
|
|
op_params.float_activation_min = output_activation_min;
|
|
op_params.float_activation_max = output_activation_max;
|
|
|
|
tflite::reference_ops::DepthwiseConv(
|
|
op_params, GetTensorShape(input), GetTensorData<float>(input),
|
|
GetTensorShape(filter), GetTensorData<float>(filter),
|
|
GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output),
|
|
GetTensorData<float>(output));
|
|
}
|
|
|
|
void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
|
|
TfLiteDepthwiseConvParams* params, OpData* data,
|
|
const TfLiteTensor* input,
|
|
const TfLiteTensor* filter,
|
|
const TfLiteTensor* bias, TfLiteTensor* output) {
|
|
DepthwiseParams op_params;
|
|
op_params.padding_type = PaddingType::kSame;
|
|
op_params.padding_values.width = data->padding.width;
|
|
op_params.padding_values.height = data->padding.height;
|
|
op_params.stride_width = params->stride_width;
|
|
op_params.stride_height = params->stride_height;
|
|
op_params.dilation_width_factor = params->dilation_width_factor;
|
|
op_params.dilation_height_factor = params->dilation_height_factor;
|
|
op_params.depth_multiplier = params->depth_multiplier;
|
|
op_params.input_offset = -input->params.zero_point;
|
|
op_params.weights_offset = 0;
|
|
op_params.output_offset = output->params.zero_point;
|
|
// TODO(b/130439627): Use calculated value for clamping.
|
|
op_params.quantized_activation_min = std::numeric_limits<int8_t>::min();
|
|
op_params.quantized_activation_max = std::numeric_limits<int8_t>::max();
|
|
|
|
reference_integer_ops::DepthwiseConvPerChannel(
|
|
op_params, data->per_channel_output_multiplier,
|
|
data->per_channel_output_shift, GetTensorShape(input),
|
|
GetTensorData<int8>(input), GetTensorShape(filter),
|
|
GetTensorData<int8>(filter), GetTensorShape(bias),
|
|
GetTensorData<int32>(bias), GetTensorShape(output),
|
|
GetTensorData<int8>(output));
|
|
}
|
|
|
|
void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
|
|
TfLiteDepthwiseConvParams* params, OpData* data,
|
|
const TfLiteTensor* input, const TfLiteTensor* filter,
|
|
const TfLiteTensor* bias, TfLiteTensor* output) {
|
|
const int32_t input_offset = -input->params.zero_point;
|
|
const int32_t filter_offset = -filter->params.zero_point;
|
|
const int32_t output_offset = output->params.zero_point;
|
|
|
|
tflite::DepthwiseParams op_params;
|
|
// Padding type is ignored, but still set.
|
|
op_params.padding_type = PaddingType::kSame;
|
|
op_params.padding_values.width = data->padding.width;
|
|
op_params.padding_values.height = data->padding.height;
|
|
op_params.stride_width = params->stride_width;
|
|
op_params.stride_height = params->stride_height;
|
|
op_params.dilation_width_factor = params->dilation_width_factor;
|
|
op_params.dilation_height_factor = params->dilation_height_factor;
|
|
op_params.depth_multiplier = params->depth_multiplier;
|
|
op_params.quantized_activation_min = data->output_activation_min;
|
|
op_params.quantized_activation_max = data->output_activation_max;
|
|
op_params.input_offset = input_offset;
|
|
op_params.weights_offset = filter_offset;
|
|
op_params.output_offset = output_offset;
|
|
op_params.output_multiplier = data->output_multiplier;
|
|
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
|
|
op_params.output_shift = -data->output_shift;
|
|
|
|
tflite::reference_ops::DepthwiseConv(
|
|
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
|
|
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
|
|
GetTensorShape(bias), GetTensorData<int32_t>(bias),
|
|
GetTensorShape(output), GetTensorData<uint8_t>(output));
|
|
}
|
|
|
|
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
|
auto* params =
|
|
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
|
|
|
|
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
|
|
const TfLiteTensor* input = GetInput(context, node, kInputTensor);
|
|
const TfLiteTensor* filter = GetInput(context, node, kFilterTensor);
|
|
const TfLiteTensor* bias =
|
|
(NumInputs(node) == 3) ? GetInput(context, node, kBiasTensor) : nullptr;
|
|
|
|
const TfLiteType data_type = input->type;
|
|
int width = SizeOfDimension(input, 2);
|
|
int height = SizeOfDimension(input, 1);
|
|
int filter_width = SizeOfDimension(filter, 2);
|
|
int filter_height = SizeOfDimension(filter, 1);
|
|
|
|
OpData data;
|
|
|
|
// All per-channel quantized tensors need valid zero point and scale arrays.
|
|
if (input->type == kTfLiteInt8) {
|
|
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
|
|
kTfLiteAffineQuantization);
|
|
|
|
const auto* affine_quantization =
|
|
reinterpret_cast<TfLiteAffineQuantization*>(
|
|
filter->quantization.params);
|
|
TF_LITE_ENSURE(context, affine_quantization);
|
|
TF_LITE_ENSURE(context, affine_quantization->scale);
|
|
TF_LITE_ENSURE(context, affine_quantization->zero_point);
|
|
TF_LITE_ENSURE(
|
|
context, affine_quantization->scale->size == 1 ||
|
|
affine_quantization->scale->size ==
|
|
filter->dims->data[kDepthwiseConvQuantizedDimension]);
|
|
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size,
|
|
affine_quantization->zero_point->size);
|
|
}
|
|
|
|
TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height,
|
|
filter_width, filter_height, data_type,
|
|
&data));
|
|
|
|
// TODO(aselle): Consider whether float conv and quantized conv should be
|
|
// separate ops to avoid dispatch overhead here.
|
|
switch (input->type) { // Already know in/out types are same.
|
|
case kTfLiteFloat32:
|
|
EvalFloat(context, node, params, &data, input, filter, bias, output);
|
|
break;
|
|
case kTfLiteInt8:
|
|
EvalQuantizedPerChannel(context, node, params, &data, input, filter, bias,
|
|
output);
|
|
break;
|
|
case kTfLiteUInt8:
|
|
EvalQuantized(context, node, params, &data, input, filter, bias, output);
|
|
break;
|
|
default:
|
|
TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.",
|
|
TfLiteTypeGetName(input->type), input->type);
|
|
return kTfLiteError;
|
|
}
|
|
return kTfLiteOk;
|
|
}
|
|
|
|
} // namespace depthwise_conv
|
|
|
|
TfLiteRegistration* Register_DEPTHWISE_CONV_2D() {
|
|
static TfLiteRegistration r = {/*init=*/depthwise_conv::Init,
|
|
/*free=*/depthwise_conv::Free,
|
|
/*prepare=*/depthwise_conv::Prepare,
|
|
/*invoke=*/depthwise_conv::Eval,
|
|
/*profiling_string=*/nullptr,
|
|
/*builtin_code=*/0,
|
|
/*custom_name=*/nullptr,
|
|
/*version=*/0};
|
|
return &r;
|
|
}
|
|
|
|
} // namespace micro
|
|
} // namespace ops
|
|
} // namespace tflite
|