Switch TFLM quantize kernels to flat namespace.

This is incremental progress towards a flat namespace for TFLM. See
https://abseil.io/tips/130 for more context.

PiperOrigin-RevId: 335946049
Change-Id: Ie28a33927967798470825c2dfffd01dd926d0f40
This commit is contained in:
Advait Jain 2020-10-07 13:53:13 -07:00 committed by TensorFlower Gardener
parent c0da1d4092
commit 36bc549146
5 changed files with 26 additions and 48 deletions

View File

@ -33,6 +33,7 @@ namespace tflite {
TfLiteRegistration Register_CONV_2D();
TfLiteRegistration Register_DEPTHWISE_CONV_2D();
TfLiteRegistration Register_QUANTIZE();
TfLiteRegistration Register_SHAPE();
namespace ops {
@ -73,7 +74,6 @@ TfLiteRegistration Register_PACK();
TfLiteRegistration Register_PAD();
TfLiteRegistration Register_PADV2();
TfLiteRegistration Register_PRELU();
TfLiteRegistration Register_QUANTIZE();
TfLiteRegistration Register_REDUCE_MAX();
TfLiteRegistration Register_RELU();
TfLiteRegistration Register_RELU6();

View File

@ -23,9 +23,7 @@ limitations under the License.
#include "tensorflow/lite/micro/micro_utils.h"
namespace tflite {
namespace ops {
namespace micro {
namespace quantize {
namespace {
struct OpData {
tflite::QuantizationParams quantization_params;
@ -175,22 +173,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
} // namespace quantize
} // namespace
// This Op (QUANTIZE) quantizes the input and produces quantized output.
// AffineQuantize takes scale and zero point and quantizes the float value to
// quantized output, in int8_t or uint8_t format.
TfLiteRegistration Register_QUANTIZE() {
return {/*init=*/quantize::Init,
return {/*init=*/Init,
/*free=*/nullptr,
/*prepare=*/quantize::Prepare,
/*invoke=*/quantize::Eval,
/*prepare=*/Prepare,
/*invoke=*/Eval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -35,8 +35,7 @@ void ValidateQuantizeGoldens(TfLiteTensor* tensors, int tensors_size,
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
// Version 1 of quantize supports int8_t and uint8_t quantization.
const TfLiteRegistration registration =
tflite::ops::micro::Register_QUANTIZE();
const TfLiteRegistration registration = Register_QUANTIZE();
micro::KernelRunner runner(registration, tensors, tensors_size, inputs_array,
outputs_array,
/*builtin_data=*/nullptr, micro_test::reporter);

View File

@ -25,11 +25,12 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/xtensa_hifimini/fixedpoint_utils.h"
namespace tflite {
namespace ops {
namespace micro {
namespace {
namespace xtensa {
namespace hifimini {
struct OpData {
int32_t zero_point = 0;
int scale_multiplier = 0;
};
void AffineQuantize(int scale_multiplier, const int32_t zero_point,
const RuntimeShape& input_shape, const int16_t* input_data,
@ -98,16 +99,6 @@ void AffineQuantize(int scale_multiplier, const int32_t zero_point,
}
}
} // namespace hifimini
} // namespace xtensa
namespace quantize {
struct OpData {
int32_t zero_point = 0;
int scale_multiplier = 0;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
return context->AllocatePersistentBuffer(context, sizeof(OpData));
@ -121,8 +112,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input = GetInput(context, node, 0);
// TODO(b/155682734): Fix dangerous input/output scale ratio assumptions.
op_data->scale_multiplier = xtensa::hifimini::CreateQConstantForInt24(
0, input->params.scale / output->params.scale);
op_data->scale_multiplier =
ops::micro::xtensa::hifimini::CreateQConstantForInt24(
0, input->params.scale / output->params.scale);
op_data->zero_point = output->params.zero_point;
@ -146,31 +138,25 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}
xtensa::hifimini::AffineQuantize(
op_data->scale_multiplier, op_data->zero_point,
tflite::micro::GetTensorShape(input),
tflite::micro::GetTensorData<int16_t>(input),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int8_t>(output));
AffineQuantize(op_data->scale_multiplier, op_data->zero_point,
tflite::micro::GetTensorShape(input),
tflite::micro::GetTensorData<int16_t>(input),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int8_t>(output));
return kTfLiteOk;
}
} // namespace quantize
} // namespace
// This Op (QUANTIZE) quantizes the input and produces quantized output.
// AffineQuantize takes scale and zero point and quantizes the float value to
// quantized output, in int8_t or uint8_t format.
TfLiteRegistration Register_QUANTIZE() {
return {/*init=*/quantize::Init,
return {/*init=*/Init,
/*free=*/nullptr,
/*prepare=*/quantize::Prepare,
/*invoke=*/quantize::Eval,
/*prepare=*/Prepare,
/*invoke=*/Eval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -303,8 +303,8 @@ class MicroMutableOpResolver : public MicroOpResolver {
}
TfLiteStatus AddQuantize() {
return AddBuiltin(BuiltinOperator_QUANTIZE,
tflite::ops::micro::Register_QUANTIZE(), ParseQuantize);
return AddBuiltin(BuiltinOperator_QUANTIZE, Register_QUANTIZE(),
ParseQuantize);
}
TfLiteStatus AddReduceMax() {