Switch TFLM quantize kernels to flat namespace.

This is incremental progress towards a flat namespace for TFLM. See
https://abseil.io/tips/130 for more context.

PiperOrigin-RevId: 335946049
Change-Id: Ie28a33927967798470825c2dfffd01dd926d0f40
This commit is contained in:
Advait Jain 2020-10-07 13:53:13 -07:00 committed by TensorFlower Gardener
parent c0da1d4092
commit 36bc549146
5 changed files with 26 additions and 48 deletions

View File

@ -33,6 +33,7 @@ namespace tflite {
TfLiteRegistration Register_CONV_2D(); TfLiteRegistration Register_CONV_2D();
TfLiteRegistration Register_DEPTHWISE_CONV_2D(); TfLiteRegistration Register_DEPTHWISE_CONV_2D();
TfLiteRegistration Register_QUANTIZE();
TfLiteRegistration Register_SHAPE(); TfLiteRegistration Register_SHAPE();
namespace ops { namespace ops {
@ -73,7 +74,6 @@ TfLiteRegistration Register_PACK();
TfLiteRegistration Register_PAD(); TfLiteRegistration Register_PAD();
TfLiteRegistration Register_PADV2(); TfLiteRegistration Register_PADV2();
TfLiteRegistration Register_PRELU(); TfLiteRegistration Register_PRELU();
TfLiteRegistration Register_QUANTIZE();
TfLiteRegistration Register_REDUCE_MAX(); TfLiteRegistration Register_REDUCE_MAX();
TfLiteRegistration Register_RELU(); TfLiteRegistration Register_RELU();
TfLiteRegistration Register_RELU6(); TfLiteRegistration Register_RELU6();

View File

@ -23,9 +23,7 @@ limitations under the License.
#include "tensorflow/lite/micro/micro_utils.h" #include "tensorflow/lite/micro/micro_utils.h"
namespace tflite { namespace tflite {
namespace ops { namespace {
namespace micro {
namespace quantize {
struct OpData { struct OpData {
tflite::QuantizationParams quantization_params; tflite::QuantizationParams quantization_params;
@ -175,22 +173,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk; return kTfLiteOk;
} }
} // namespace quantize } // namespace
// This Op (QUANTIZE) quantizes the input and produces quantized output.
// AffineQuantize takes scale and zero point and quantizes the float value to
// quantized output, in int8_t or uint8_t format.
TfLiteRegistration Register_QUANTIZE() { TfLiteRegistration Register_QUANTIZE() {
return {/*init=*/quantize::Init, return {/*init=*/Init,
/*free=*/nullptr, /*free=*/nullptr,
/*prepare=*/quantize::Prepare, /*prepare=*/Prepare,
/*invoke=*/quantize::Eval, /*invoke=*/Eval,
/*profiling_string=*/nullptr, /*profiling_string=*/nullptr,
/*builtin_code=*/0, /*builtin_code=*/0,
/*custom_name=*/nullptr, /*custom_name=*/nullptr,
/*version=*/0}; /*version=*/0};
} }
} // namespace micro
} // namespace ops
} // namespace tflite } // namespace tflite

View File

@ -35,8 +35,7 @@ void ValidateQuantizeGoldens(TfLiteTensor* tensors, int tensors_size,
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data); TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
// Version 1 of quantize supports int8_t and uint8_t quantization. // Version 1 of quantize supports int8_t and uint8_t quantization.
const TfLiteRegistration registration = const TfLiteRegistration registration = Register_QUANTIZE();
tflite::ops::micro::Register_QUANTIZE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array, micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array, outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter); /*builtin_data=*/nullptr, micro_test::reporter);

View File

@ -25,11 +25,12 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h" #include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h"
namespace tflite { namespace tflite {
namespace ops { namespace {
namespace micro {
namespace xtensa { struct OpData {
namespace hifimini { int32_t zero_point = 0;
int scale_multiplier = 0;
};
void AffineQuantize(int scale_multiplier, const int32_t zero_point, void AffineQuantize(int scale_multiplier, const int32_t zero_point,
const RuntimeShape& input_shape, const int16_t* input_data, const RuntimeShape& input_shape, const int16_t* input_data,
@ -98,16 +99,6 @@ void AffineQuantize(int scale_multiplier, const int32_t zero_point,
} }
} }
} // namespace hifimini
} // namespace xtensa
namespace quantize {
struct OpData {
int32_t zero_point = 0;
int scale_multiplier = 0;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) { void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
return context->AllocatePersistentBuffer(context, sizeof(OpData)); return context->AllocatePersistentBuffer(context, sizeof(OpData));
@ -121,7 +112,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0);
// TODO(b/155682734): Fix dangerous input/output scale ratio assumptions. // TODO(b/155682734): Fix dangerous input/output scale ratio assumptions.
op_data->scale_multiplier = xtensa::hifimini::CreateQConstantForInt24( op_data->scale_multiplier =
ops::micro::xtensa::hifimini::CreateQConstantForInt24(
0, input->params.scale / output->params.scale); 0, input->params.scale / output->params.scale);
op_data->zero_point = output->params.zero_point; op_data->zero_point = output->params.zero_point;
@ -146,8 +138,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError; return kTfLiteError;
} }
xtensa::hifimini::AffineQuantize( AffineQuantize(op_data->scale_multiplier, op_data->zero_point,
op_data->scale_multiplier, op_data->zero_point,
tflite::micro::GetTensorShape(input), tflite::micro::GetTensorShape(input),
tflite::micro::GetTensorData<int16_t>(input), tflite::micro::GetTensorData<int16_t>(input),
tflite::micro::GetTensorShape(output), tflite::micro::GetTensorShape(output),
@ -155,22 +146,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk; return kTfLiteOk;
} }
} // namespace quantize } // namespace
// This Op (QUANTIZE) quantizes the input and produces quantized output.
// AffineQuantize takes scale and zero point and quantizes the float value to
// quantized output, in int8_t or uint8_t format.
TfLiteRegistration Register_QUANTIZE() { TfLiteRegistration Register_QUANTIZE() {
return {/*init=*/quantize::Init, return {/*init=*/Init,
/*free=*/nullptr, /*free=*/nullptr,
/*prepare=*/quantize::Prepare, /*prepare=*/Prepare,
/*invoke=*/quantize::Eval, /*invoke=*/Eval,
/*profiling_string=*/nullptr, /*profiling_string=*/nullptr,
/*builtin_code=*/0, /*builtin_code=*/0,
/*custom_name=*/nullptr, /*custom_name=*/nullptr,
/*version=*/0}; /*version=*/0};
} }
} // namespace micro
} // namespace ops
} // namespace tflite } // namespace tflite

View File

@ -303,8 +303,8 @@ class MicroMutableOpResolver : public MicroOpResolver {
} }
TfLiteStatus AddQuantize() { TfLiteStatus AddQuantize() {
return AddBuiltin(BuiltinOperator_QUANTIZE, return AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(),
tflite::ops::micro::Register_QUANTIZE(), ParseQuantize); ParseQuantize);
} }
TfLiteStatus AddReduceMax() { TfLiteStatus AddReduceMax() {