Switch TFLM Conv2D kernels to flat namespace.

This is incremental progress towards a flat namespace for TFLM. See
https://abseil.io/tips/130 for more context.

Best effort change to the arc_mli implementation. All the others (reference,
cmsis-nn and xtensa_hifimini build).

PiperOrigin-RevId: 335942662
Change-Id: I97271a681f46337a7c49f3fdf89ae73567b2ed7d
This commit is contained in:
Advait Jain 2020-10-07 13:37:00 -07:00 committed by TensorFlower Gardener
parent 37c2bf5016
commit b13e49836a
7 changed files with 66 additions and 91 deletions

View File

@ -30,9 +30,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/arc_mli/scratch_buffers.h"
namespace tflite {
namespace ops {
namespace micro {
namespace conv {
namespace {
constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1;
@ -498,19 +496,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
} // namespace conv
} // namespace
TfLiteRegistration Register_CONV_2D() {
return {/*init=*/conv::Init,
return {/*init=*/Init,
/*free=*/nullptr,
/*prepare=*/conv::Prepare,
/*invoke=*/conv::Eval,
/*prepare=*/Prepare,
/*invoke=*/Eval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -28,9 +28,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace micro {
namespace conv {
namespace {
constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1;
@ -442,19 +440,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
} // namespace conv
} // namespace
TfLiteRegistration Register_CONV_2D() {
return {/*init=*/conv::Init,
return {/*init=*/Init,
/*free=*/nullptr,
/*prepare=*/conv::Prepare,
/*invoke=*/conv::Eval,
/*prepare=*/Prepare,
/*invoke=*/Eval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -26,9 +26,7 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace micro {
namespace conv {
namespace {
constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1;
@ -322,19 +320,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
} // namespace conv
} // namespace
TfLiteRegistration Register_CONV_2D() {
return {/*init=*/conv::Init,
return {/*init=*/Init,
/*free=*/nullptr,
/*prepare=*/conv::Prepare,
/*invoke=*/conv::Eval,
/*prepare=*/Prepare,
/*invoke=*/Eval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -59,8 +59,7 @@ TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size,
int outputs_array_data[] = {1, 3};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration =
tflite::ops::micro::Register_CONV_2D();
const TfLiteRegistration registration = Register_CONV_2D();
micro::KernelRunner runner(
registration, tensors, tensors_size, inputs_array, outputs_array,
reinterpret_cast<void*>(conv_params), micro_test::reporter);

View File

@ -31,6 +31,7 @@ namespace tflite {
// (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should
// have their Register function declarations in the tflite namespace.
TfLiteRegistration Register_CONV_2D();
TfLiteRegistration Register_SHAPE();
namespace ops {
@ -44,7 +45,6 @@ TfLiteRegistration Register_AVERAGE_POOL_2D();
TfLiteRegistration Register_CEIL();
// TODO(b/160234179): Change custom OPs to also return by value.
TfLiteRegistration* Register_CIRCULAR_BUFFER();
TfLiteRegistration Register_CONV_2D();
TfLiteRegistration Register_CONCATENATION();
TfLiteRegistration Register_COS();
TfLiteRegistration Register_DEPTHWISE_CONV_2D();

View File

@ -28,11 +28,37 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h"
namespace tflite {
namespace ops {
namespace micro {
namespace conv {
namespace xtensa {
namespace hifimini {
namespace {
constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1;
constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0;
// Conv is quantized along dimension 0:
// https://www.tensorflow.org/lite/performance/quantization_spec
constexpr int kConvQuantizedDimension = 0;
struct OpData {
TfLitePaddingValues padding;
// The scaling factor from input to output (aka the 'real multiplier') can
// be represented as a fixed point multiplier plus a left shift.
int32_t output_multiplier;
int output_shift;
// Cached tensor zero point values for quantized operations.
int32_t input_zero_point;
int32_t output_zero_point;
// Per channel output multiplier and shift.
int32_t* per_channel_output_multiplier;
int32_t* per_channel_output_shift;
// The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255.
int32_t output_activation_min;
int32_t output_activation_max;
};
void ConvPerChannel(const ConvParams& params, const int32_t* output_multiplier,
const int32_t* output_shift,
@ -146,7 +172,7 @@ void ConvPerChannel(const ConvParams& params, const int32_t* output_multiplier,
// Apply quantized multiplier and accumulate result at 48bit
// alignment:
acc_56 = micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
acc_56 = ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
acc_24x2, output_multiplier[out_channel],
output_shift[out_channel]);
@ -223,7 +249,7 @@ inline void Conv1x32Input32x32Filter(
// Apply quantized multiplier and accumulate result at 48bit alignment.
// Convert the (unsigned) 32-bit multiplier down to a 24-bit multiplier.
acc_56 = micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
acc_56 = ops::micro::xtensa::hifimini::MultiplyByQuantizedMultiplier(
acc_24x2, output_multiplier[ch] >> 8, output_shift[ch]);
// Add output offset, cap activation, and assign to the output:
@ -235,39 +261,6 @@ inline void Conv1x32Input32x32Filter(
}
}
} // namespace hifimini
} // namespace xtensa
constexpr int kInputTensor = 0;
constexpr int kFilterTensor = 1;
constexpr int kBiasTensor = 2;
constexpr int kOutputTensor = 0;
// Conv is quantized along dimension 0:
// https://www.tensorflow.org/lite/performance/quantization_spec
constexpr int kConvQuantizedDimension = 0;
struct OpData {
TfLitePaddingValues padding;
// The scaling factor from input to output (aka the 'real multiplier') can
// be represented as a fixed point multiplier plus a left shift.
int32_t output_multiplier;
int output_shift;
// Cached tensor zero point values for quantized operations.
int32_t input_zero_point;
int32_t output_zero_point;
// Per channel output multiplier and shift.
int32_t* per_channel_output_multiplier;
int32_t* per_channel_output_shift;
// The range of the fused activation layer. For example for kNone and
// uint8_t these would be 0 and 255.
int32_t output_activation_min;
int32_t output_activation_max;
};
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, int width, int height,
int filter_width, int filter_height, int out_width,
@ -386,16 +379,16 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
op_params.quantized_activation_min = data->output_activation_min;
op_params.quantized_activation_max = data->output_activation_max;
xtensa::hifimini::ConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, tflite::micro::GetTensorShape(input),
tflite::micro::GetTensorData<int8_t>(input),
tflite::micro::GetTensorShape(filter),
tflite::micro::GetTensorData<int8_t>(filter),
tflite::micro::GetTensorShape(bias),
tflite::micro::GetTensorData<int32_t>(bias),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int8_t>(output));
ConvPerChannel(op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift,
tflite::micro::GetTensorShape(input),
tflite::micro::GetTensorData<int8_t>(input),
tflite::micro::GetTensorShape(filter),
tflite::micro::GetTensorData<int8_t>(filter),
tflite::micro::GetTensorShape(bias),
tflite::micro::GetTensorData<int32_t>(bias),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int8_t>(output));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@ -420,7 +413,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
if (input_dims[0] == 1 && input_dims[1] == 1 && input_dims[2] == 1 &&
input_dims[3] == 32 && filter_dims[0] == 32 && filter_dims[1] == 1 &&
filter_dims[2] == 1 && filter_dims[3] == 32) {
xtensa::hifimini::Conv1x32Input32x32Filter(
Conv1x32Input32x32Filter(
-op_data->input_zero_point, op_data->output_zero_point,
op_data->output_activation_min, op_data->output_activation_max,
op_data->per_channel_output_multiplier,
@ -447,20 +440,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
}
return kTfLiteOk;
}
} // namespace conv
} // namespace
TfLiteRegistration Register_CONV_2D() {
return {/*init=*/conv::Init,
return {/*init=*/Init,
/*free=*/nullptr,
/*prepare=*/conv::Prepare,
/*invoke=*/conv::Eval,
/*prepare=*/Prepare,
/*invoke=*/Eval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -150,8 +150,7 @@ class MicroMutableOpResolver : public MicroOpResolver {
}
TfLiteStatus AddConv2D() {
return AddBuiltin(BuiltinOperator_CONV_2D,
tflite::ops::micro::Register_CONV_2D(), ParseConv2D);
return AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(), ParseConv2D);
}
TfLiteStatus AddCos() {
@ -465,7 +464,6 @@ class MicroMutableOpResolver : public MicroOpResolver {
unsigned int num_buitin_ops_ = 0;
ErrorReporter* error_reporter_;
};
}; // namespace tflite