Prefer the standard integral types over custom type-aliases.

PiperOrigin-RevId: 322937618
Change-Id: I0c0560a13856ee1df1ff187d30244a99cce04f86
This commit is contained in:
Advait Jain 2020-07-23 22:24:35 -07:00 committed by TensorFlower Gardener
parent 37deabbb75
commit 29d635bccc
59 changed files with 351 additions and 344 deletions

View File

@ -151,7 +151,7 @@ int main() {
// Output scale of 50 is needed to accomodate a float range of [-6400, 6350]
float output_scale = 50.0f;
// Create per-tensor quantized int8 input tensor.
// Create per-tensor quantized int8_t input tensor.
int8_t input_quantized[32];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point);
@ -163,7 +163,7 @@ int main() {
tflite::testing::IntArrayFromInts(input_zero_points)};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Create per-tensor quantized int8 filter tensor.
// Create per-tensor quantized int8_t filter tensor.
int8_t filter_quantized[32 * 32];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale,
@ -176,7 +176,7 @@ int main() {
tflite::testing::IntArrayFromInts(filter_zero_points)};
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Create per-tensor quantized int32 bias tensor.
// Create per-tensor quantized int32_t bias tensor.
int32_t bias_quantized[32];
tflite::SymmetricQuantize(bias_values, bias_quantized, 32,
input_scale * output_scale);
@ -192,7 +192,7 @@ int main() {
tflite::testing::IntArrayFromInts(bias_zero_points)};
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Create per-tensor quantized int8 output tensor.
// Create per-tensor quantized int8_t output tensor.
int8_t output_quantized[32];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point);

View File

@ -157,7 +157,7 @@ int main() {
TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape);
TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape);
// Create per-tensor quantized int8 input tensor.
// Create per-tensor quantized int8_t input tensor.
int8_t input_quantized[input_elements];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point);
@ -170,7 +170,7 @@ int main() {
tflite::testing::IntArrayFromInts(input_zero_points)};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Create per-tensor quantized int8 filter tensor.
// Create per-tensor quantized int8_t filter tensor.
int8_t filter_quantized[filter_elements];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0);
@ -183,7 +183,7 @@ int main() {
tflite::testing::IntArrayFromInts(filter_zero_points)};
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Create per-tensor quantized int32 bias tensor.
// Create per-tensor quantized int32_t bias tensor.
int32_t bias_quantized[bias_elements];
// See https://www.tensorflow.org/lite/performance/quantization_spec for a
// detailed explanation of why bias scale is input_scale * filter_scale.
@ -200,7 +200,7 @@ int main() {
tflite::testing::IntArrayFromInts(bias_zero_points)};
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Create per-tensor quantized int8 output tensor.
// Create per-tensor quantized int8_t output tensor.
int8_t output_quantized[output_elements];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point);

View File

@ -50,7 +50,7 @@ TfLiteStatus RecognizeCommands::ProcessLatestResults(
if (latest_results->type != kTfLiteInt8) {
TF_LITE_REPORT_ERROR(
error_reporter_,
"The results for recognition should be int8 elements, but are %d",
"The results for recognition should be int8_t elements, but are %d",
latest_results->type);
return kTfLiteError;
}

View File

@ -32,7 +32,7 @@ const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* input = nullptr;
// In order to use optimized tensorflow lite kernels, a signed int8 quantized
// In order to use optimized tensorflow lite kernels, a signed int8_t quantized
// model is preferred over the legacy unsigned model format. This means that
// throughout this project, input images must be converted from unisgned to
// signed format. The easiest and quickest way to convert from unsigned to

View File

@ -53,7 +53,7 @@ inline void ReluQuantized(const ReluOpData& data,
T* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const int32 val = static_cast<int32_t>(input_data[i]);
const int32_t val = static_cast<int32_t>(input_data[i]);
int32_t clamped =
data.params.output_offset +
MultiplyByQuantizedMultiplier(val - data.params.input_offset,
@ -79,17 +79,17 @@ inline void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output,
QuantizeMultiplier(real_multiplier, &data->params.output_multiplier,
&data->params.output_shift);
data->params.quantized_activation_min =
std::max(static_cast<int32_t>(std::numeric_limits<T>::min()),
output->params.zero_point +
static_cast<int32>(roundf(act_min / output->params.scale)));
data->params.quantized_activation_min = std::max(
static_cast<int32_t>(std::numeric_limits<T>::min()),
output->params.zero_point +
static_cast<int32_t>(roundf(act_min / output->params.scale)));
data->params.quantized_activation_max =
act_max == std::numeric_limits<float>::infinity()
? static_cast<int32_t>(std::numeric_limits<T>::max())
: std::min(
static_cast<int32_t>(std::numeric_limits<T>::max()),
output->params.zero_point +
static_cast<int32>(roundf(act_max / output->params.scale)));
: std::min(static_cast<int32_t>(std::numeric_limits<T>::max()),
output->params.zero_point +
static_cast<int32_t>(
roundf(act_max / output->params.scale)));
data->params.input_offset = input->params.zero_point;
data->params.output_offset = output->params.zero_point;
}

View File

@ -42,18 +42,18 @@ struct OpData {
// and the special 16-bit -> 16bit quantized path
int input1_shift;
int input2_shift;
int32 output_activation_min;
int32 output_activation_max;
int32_t output_activation_min;
int32_t output_activation_max;
// These fields are used only in the general 8-bit -> 8bit quantized path
int32 input1_multiplier;
int32 input2_multiplier;
int32 output_multiplier;
int32_t input1_multiplier;
int32_t input2_multiplier;
int32_t output_multiplier;
int output_shift;
int left_shift;
int32 input1_offset;
int32 input2_offset;
int32 output_offset;
int32_t input1_offset;
int32_t input2_offset;
int32_t output_offset;
// Used only for float evals:
float output_activation_min_f32;

View File

@ -78,8 +78,8 @@ bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
const TfLiteConvParams* params) {
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
// MLI optimized version only supports int8 dataype, dilation factor of 1 and
// per-axis quantization of weights (no broadcasting/per-tensor)
// MLI optimized version only supports int8_t dataype, dilation factor of 1
// and per-axis quantization of weights (no broadcasting/per-tensor)
bool ret_val = (filter->type == kTfLiteInt8) &&
(input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) &&
(params->dilation_width_factor == 1) &&
@ -176,7 +176,7 @@ TfLiteStatus EvalMliQuantizedPerChannel(
OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output) {
// Run Conv MLI kernel
// MLI optimized version only supports int8 dataype and dilation factor of 1
// MLI optimized version only supports int8_t dataype and dilation factor of 1
if ((input->type == kTfLiteInt8) && (params->dilation_width_factor == 1) &&
(params->dilation_height_factor == 1)) {
mli_tensor mli_in = {0};
@ -353,10 +353,10 @@ TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
reference_integer_ops::ConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
return kTfLiteOk;
#else
TF_LITE_KERNEL_LOG(context,

View File

@ -71,10 +71,10 @@ bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
const int in_ch = SizeOfDimension(input, 3);
const int filters_num = SizeOfDimension(filter, 3);
// MLI optimized version only supports int8 dataype, dilation factor of 1 and
// per-axis quantization of weights (no broadcasting/per-tensor)
// (in_ch == filters_num) || (in_ch == 1)) is a forbidding of
// channel multiplier logic for multichannel input.
// MLI optimized version only supports int8_t dataype, dilation factor of 1
// and per-axis quantization of weights (no broadcasting/per-tensor) (in_ch ==
// filters_num) || (in_ch == 1)) is a forbidding of channel multiplier logic
// for multichannel input.
bool ret_val = (filter->type == kTfLiteInt8) &&
(input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) &&
(params->dilation_width_factor == 1) &&
@ -373,10 +373,10 @@ TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
reference_integer_ops::DepthwiseConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
return kTfLiteOk;
#else
TF_LITE_KERNEL_LOG(context,

View File

@ -55,7 +55,7 @@ constexpr int kOutputTensor = 0;
bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias,
const TfLiteFullyConnectedParams* params) {
// MLI optimized version only supports int8 dataype and no fused Relu and
// MLI optimized version only supports int8_t dataype and no fused Relu and
// symmetric per-tensor quantization of weights (not per-axis)
bool ret_val = (filter->type == kTfLiteInt8) &&
(input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) &&

View File

@ -34,7 +34,7 @@ static void ConvertToMliTensorData(const TfLiteTensor* tfT, mli_tensor* mliT) {
} else if (tfT->type == kTfLiteInt32) {
mliT->el_type = MLI_EL_ASYM_I32;
} else {
TF_LITE_FATAL("Wrong data type. Expected int8 or int32.");
TF_LITE_FATAL("Wrong data type. Expected int8_t or int32_t.");
}
mliT->capacity = tfT->bytes;

View File

@ -43,7 +43,7 @@ enum MliPoolingType { AveragePooling = 0, MaxPooling = 1 };
bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input,
const TfLitePoolParams* params) {
// MLI optimized version only supports int8 dataype and no fused Relu
// MLI optimized version only supports int8_t dataype and no fused Relu
return (input->type == kTfLiteInt8 && params->activation == kTfLiteActNone);
}

View File

@ -41,7 +41,7 @@ void TestAveragePoolingQuantized(
const T* expected_output_data, const int* output_dims_data,
float output_min, float output_max, TfLitePadding padding,
TfLiteFusedActivation activation, T* output_data) {
static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);
@ -112,7 +112,7 @@ void TestMaxPoolQuantized(const int* input_dims_data, const T* input_data,
float output_min, float output_max,
const int* output_dims_data, TfLitePadding padding,
TfLiteFusedActivation activation, T* output_data) {
static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInts(output_dims_data);

View File

@ -74,18 +74,19 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) {
break;
default:
TF_LITE_KERNEL_LOG(context,
"Only float32, uint8 and int8 are "
"Only float32, uint8_t and int8_t are "
"supported currently, got %s.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
} else {
TF_LITE_KERNEL_LOG(context, "Only int32 are supported currently, got %s.",
TF_LITE_KERNEL_LOG(context,
"Only int32_t are supported currently, got %s.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
} else {
TF_LITE_KERNEL_LOG(context, "Only int32 are supported currently, got %s.",
TF_LITE_KERNEL_LOG(context, "Only int32_t are supported currently, got %s.",
TfLiteTypeGetName(axis->type));
return kTfLiteError;
}

View File

@ -92,7 +92,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type);
// The circular buffer custom operator currently only supports int8.
// The circular buffer custom operator currently only supports int8_t.
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
// TODO(b/132070898): Use statically slotted OpData structures until a

View File

@ -41,18 +41,18 @@ struct OpData {
// and the special 16-bit -> 16bit quantized path
int input1_shift;
int input2_shift;
int32 output_activation_min;
int32 output_activation_max;
int32_t output_activation_min;
int32_t output_activation_max;
// These fields are used only in the general 8-bit -> 8bit quantized path
int32 input1_multiplier;
int32 input2_multiplier;
int32 output_multiplier;
int32_t input1_multiplier;
int32_t input2_multiplier;
int32_t output_multiplier;
int output_shift;
int left_shift;
int32 input1_offset;
int32 input2_offset;
int32 output_offset;
int32_t input1_offset;
int32_t input2_offset;
int32_t output_offset;
};
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params,

View File

@ -304,7 +304,7 @@ TfLiteStatus EvalQuantizedPerChannel(
arm_status status = arm_convolve_wrapper_s8(
&ctx, &conv_params, &quant_params, &input_dims,
GetTensorData<int8_t>(input), &filter_dims, GetTensorData<int8_t>(filter),
&bias_dims, GetTensorData<int32>(bias), &output_dims,
&bias_dims, GetTensorData<int32_t>(bias), &output_dims,
GetTensorData<int8_t>(output));
if (status == ARM_MATH_SUCCESS) {
@ -332,10 +332,10 @@ TfLiteStatus EvalQuantizedPerChannel(
reference_integer_ops::ConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
#endif
return kTfLiteOk;

View File

@ -304,7 +304,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
&ctx, &dw_conv_params, &quant_params, &input_dims,
GetTensorData<int8_t>(input), &filter_dims,
GetTensorData<int8_t>(filter), &bias_dims,
GetTensorData<int32>(bias), &output_dims,
GetTensorData<int32_t>(bias), &output_dims,
GetTensorData<int8_t>(output)),
ARM_MATH_SUCCESS);
} else {
@ -327,10 +327,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
reference_integer_ops::DepthwiseConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
}
}

View File

@ -99,7 +99,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
input->type, input, filter, bias,
output, data));
if (input->type == kTfLiteInt8 && nullptr != GetTensorData<int32>(bias)) {
if (input->type == kTfLiteInt8 && nullptr != GetTensorData<int32_t>(bias)) {
RuntimeShape filter_shape = GetTensorShape(filter);
RuntimeShape output_shape = GetTensorShape(output);
@ -130,7 +130,7 @@ TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* bias, TfLiteTensor* output) {
// The 'if' condition can be removed when null handling of bias is added to
// arm_fully_connected_s8
if (nullptr != GetTensorData<int32>(bias)) {
if (nullptr != GetTensorData<int32_t>(bias)) {
RuntimeShape output_shape = GetTensorShape(output);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
const int batches = output_shape.Dims(0);
@ -189,7 +189,7 @@ TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
arm_fully_connected_s8(&ctx, &fc_params, &quant_params, &input_dims,
GetTensorData<int8_t>(input), &filter_dims,
GetTensorData<int8_t>(filter), &bias_dims,
GetTensorData<int32>(bias), &output_dims,
GetTensorData<int32_t>(bias), &output_dims,
GetTensorData<int8_t>(output)),
ARM_MATH_SUCCESS);
} else {

View File

@ -38,7 +38,8 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
if (output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768);
// NOTE: Current int16 softmax output does not require symmetric scaling
// NOTE: Current int16_t softmax output does not require symmetric
// scaling
// - so no need to verify scale here.
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);

View File

@ -626,12 +626,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto input2_offset = -input2->params.zero_point;
const int kLeftShift = 8;
int32 input1_multiplier;
int32_t input1_multiplier;
int input1_shift;
QuantizeMultiplierSmallerThanOneExp(
static_cast<double>(input1->params.scale), &input1_multiplier,
&input1_shift);
int32 input2_multiplier;
int32_t input2_multiplier;
int input2_shift;
QuantizeMultiplierSmallerThanOneExp(
static_cast<double>(input2->params.scale), &input2_multiplier,

View File

@ -122,7 +122,7 @@ void EvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node) {
reference_ops::ConcatenationWithScaling(
data->params, inputs_shape_ptr, inputs_data,
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<uint8>(output));
tflite::micro::GetTensorData<uint8_t>(output));
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {

View File

@ -237,13 +237,13 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
reference_integer_ops::ConvPerChannel(
op_params, data.per_channel_output_multiplier,
data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
tflite::micro::GetTensorData<int8>(input),
tflite::micro::GetTensorData<int8_t>(input),
tflite::micro::GetTensorShape(filter),
tflite::micro::GetTensorData<int8>(filter),
tflite::micro::GetTensorData<int8_t>(filter),
tflite::micro::GetTensorShape(bias),
tflite::micro::GetTensorData<int32>(bias),
tflite::micro::GetTensorData<int32_t>(bias),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int8>(output));
tflite::micro::GetTensorData<int8_t>(output));
}
void EvalFloat(TfLiteContext* context, TfLiteNode* node,

View File

@ -601,7 +601,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
TfLiteIntArray* output_dims =
tflite::testing::IntArrayFromInts(tflite::testing::kOutputShape);
// Create per-layer quantized int8 input tensor.
// Create per-layer quantized int8_t input tensor.
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kInputData, input_quantized, input_dims, input_scale, 0);
int input_zero_points[2] = {1, 0};
@ -611,7 +611,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
tflite::testing::IntArrayFromInts(input_zero_points), 0};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Create per-layer quantized int8 filter tensor.
// Create per-layer quantized int8_t filter tensor.
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
tflite::testing::kFilterData, filter_quantized, filter_dims, filter_scale,
0);
@ -622,7 +622,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
tflite::testing::IntArrayFromInts(filter_zero_points), 0};
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Create per-layer quantized int32 bias tensor.
// Create per-layer quantized int32_t bias tensor.
tflite::SymmetricQuantize(tflite::testing::kBiasData, bias_quantized,
tflite::testing::kBiasElements,
input_scale * output_scale);
@ -636,7 +636,7 @@ TF_LITE_MICRO_TEST(BroadcastPerLayerQuantizationToPerChannelShouldMatchGolden) {
tflite::testing::IntArrayFromInts(bias_zero_points), 0};
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Create per-layer quantized int8 output tensor.
// Create per-layer quantized int8_t output tensor.
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0 /* quantized dimension */);
int output_zero_points[2] = {1, 0};
@ -723,7 +723,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
// Output scale of 50 is needed to accomodate a float range of [-6400, 6350]
float output_scale = 50.0f;
// Create per-tensor quantized int8 input tensor.
// Create per-tensor quantized int8_t input tensor.
int8_t input_quantized[kSampleSize];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point);
@ -735,7 +735,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
tflite::testing::IntArrayFromInts(input_zero_points), 0};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Create per-tensor quantized int8 filter tensor.
// Create per-tensor quantized int8_t filter tensor.
int8_t filter_quantized[kNumFilters * kSampleSize];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale,
@ -748,7 +748,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
tflite::testing::IntArrayFromInts(filter_zero_points), 0};
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Create per-tensor quantized int32 bias tensor.
// Create per-tensor quantized int32_t bias tensor.
int32_t bias_quantized[kSampleSize];
tflite::SymmetricQuantize(bias_values, bias_quantized, kSampleSize,
input_scale * output_scale);
@ -764,7 +764,7 @@ TF_LITE_MICRO_TEST(Int8Input32x1Filter32x32ShouldMatchGolden) {
tflite::testing::IntArrayFromInts(bias_zero_points), 0};
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Create per-tensor quantized int8 output tensor.
// Create per-tensor quantized int8_t output tensor.
int8_t output_quantized[kSampleSize];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point);

View File

@ -123,7 +123,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int filter_width = SizeOfDimension(filter, 2);
int filter_height = SizeOfDimension(filter, 1);
// Per channel quantization is only needed for int8 inference. For other
// Per channel quantization is only needed for int8_t inference. For other
// quantized types, only a single scale and zero point is needed.
const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
@ -221,13 +221,13 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
reference_integer_ops::DepthwiseConvPerChannel(
op_params, data.per_channel_output_multiplier,
data.per_channel_output_shift, tflite::micro::GetTensorShape(input),
tflite::micro::GetTensorData<int8>(input),
tflite::micro::GetTensorData<int8_t>(input),
tflite::micro::GetTensorShape(filter),
tflite::micro::GetTensorData<int8>(filter),
tflite::micro::GetTensorData<int8_t>(filter),
tflite::micro::GetTensorShape(bias),
tflite::micro::GetTensorData<int32>(bias),
tflite::micro::GetTensorData<int32_t>(bias),
tflite::micro::GetTensorShape(output),
tflite::micro::GetTensorData<int8>(output));
tflite::micro::GetTensorData<int8_t>(output));
}
void EvalQuantized(TfLiteContext* context, TfLiteNode* node,

View File

@ -787,7 +787,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape);
TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape);
// Create per-layer quantized int8 input tensor.
// Create per-layer quantized int8_t input tensor.
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, 0);
int input_zero_points[2] = {1, 0};
@ -797,7 +797,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
tflite::testing::IntArrayFromInts(input_zero_points), 0};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Create per-layer quantized int8 filter tensor.
// Create per-layer quantized int8_t filter tensor.
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0);
int filter_zero_points[2] = {1, 0};
@ -807,7 +807,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
tflite::testing::IntArrayFromInts(filter_zero_points), 0};
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Create per-layer quantized int32 bias tensor.
// Create per-layer quantized int32_t bias tensor.
tflite::SymmetricQuantize(bias_values, bias_quantized, bias_elements,
input_scale * output_scale);
TfLiteTensor bias_tensor =
@ -820,7 +820,7 @@ TF_LITE_MICRO_TEST(PerChannelBroadcastQuantizationParams) {
tflite::testing::IntArrayFromInts(bias_zero_points), 0};
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Create per-layer quantized int8 output tensor.
// Create per-layer quantized int8_t output tensor.
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_data, output_dims, output_scale, 0);
int output_zero_points[2] = {1, 0};
@ -922,7 +922,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
TfLiteIntArray* bias_dims = tflite::testing::IntArrayFromInts(bias_shape);
TfLiteIntArray* output_dims = tflite::testing::IntArrayFromInts(output_shape);
// Create per-tensor quantized int8 input tensor.
// Create per-tensor quantized int8_t input tensor.
int8_t input_quantized[input_elements];
TfLiteTensor input_tensor = tflite::testing::CreateQuantizedTensor(
input_values, input_quantized, input_dims, input_scale, input_zero_point);
@ -935,7 +935,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
tflite::testing::IntArrayFromInts(input_zero_points), 0};
input_tensor.quantization = {kTfLiteAffineQuantization, &input_quant};
// Create per-tensor quantized int8 filter tensor.
// Create per-tensor quantized int8_t filter tensor.
int8_t filter_quantized[filter_elements];
TfLiteTensor filter_tensor = tflite::testing::CreateQuantizedTensor(
filter_values, filter_quantized, filter_dims, filter_scale, 0);
@ -948,7 +948,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
tflite::testing::IntArrayFromInts(filter_zero_points), 0};
filter_tensor.quantization = {kTfLiteAffineQuantization, &filter_quant};
// Create per-tensor quantized int32 bias tensor.
// Create per-tensor quantized int32_t bias tensor.
int32_t bias_quantized[bias_elements];
// See https://www.tensorflow.org/lite/performance/quantization_spec for a
// detailed explanation of why bias scale is input_scale * filter_scale.
@ -965,7 +965,7 @@ TF_LITE_MICRO_TEST(Int8Input32x4Filter32x4ShouldMatchGolden) {
tflite::testing::IntArrayFromInts(bias_zero_points), 0};
bias_tensor.quantization = {kTfLiteAffineQuantization, &bias_quant};
// Create per-tensor quantized int8 output tensor.
// Create per-tensor quantized int8_t output tensor.
int8_t output_quantized[output_elements];
TfLiteTensor output_tensor = tflite::testing::CreateQuantizedTensor(
output_quantized, output_dims, output_scale, output_zero_point);

View File

@ -104,7 +104,8 @@ TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) {
} break;
default: {
TF_LITE_KERNEL_LOG(
context, "Only float32/int8/uint8 are supported currently, got %s",
context,
"Only float32/int8_t/uint8_t are supported currently, got %s",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}

View File

@ -97,12 +97,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_L2NORM(reference_ops);
#undef TF_LITE_L2NORM
} else if (output->type == kTfLiteUInt8) {
#define TF_LITE_L2NORM(type) \
tflite::L2NormalizationParams op_params; \
op_params.input_zero_point = input->params.zero_point; \
type::L2Normalization(op_params, GetTensorShape(input), \
GetTensorData<uint8>(input), GetTensorShape(output), \
GetTensorData<uint8>(output))
#define TF_LITE_L2NORM(type) \
tflite::L2NormalizationParams op_params; \
op_params.input_zero_point = input->params.zero_point; \
type::L2Normalization(op_params, GetTensorShape(input), \
GetTensorData<uint8_t>(input), GetTensorShape(output), \
GetTensorData<uint8_t>(output))
TF_LITE_L2NORM(reference_ops);
#undef TF_LITE_L2NORM
@ -115,8 +115,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
reference_integer_ops::L2Normalization(input->params.zero_point, outer_size,
depth, GetTensorData<int8>(input),
GetTensorData<int8>(output));
depth, GetTensorData<int8_t>(input),
GetTensorData<int8_t>(output));
} else {
TF_LITE_KERNEL_LOG(context, "Output type is %s, requires float.",
TfLiteTypeGetName(output->type));

View File

@ -23,7 +23,7 @@ namespace tflite {
namespace testing {
namespace {
// used to set the quantization parameters for the int8 and uint8 tests
// used to set the quantization parameters for the int8_t and uint8_t tests
constexpr float kInputMin = -2.0;
constexpr float kInputMax = 2.0;
constexpr float kOutputMin = -1.0;
@ -50,7 +50,7 @@ TfLiteTensor CreateL2NormTensor(const float* data, TfLiteIntArray* dims,
return CreateFloatTensor(data, dims);
}
TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
TfLiteTensor CreateL2NormTensor(const uint8_t* data, TfLiteIntArray* dims,
bool is_input) {
TfLiteTensor tensor;
@ -64,7 +64,7 @@ TfLiteTensor CreateL2NormTensor(const uint8* data, TfLiteIntArray* dims,
return tensor;
}
TfLiteTensor CreateL2NormTensor(const int8* data, TfLiteIntArray* dims,
TfLiteTensor CreateL2NormTensor(const int8_t* data, TfLiteIntArray* dims,
bool is_input) {
TfLiteTensor tensor;

View File

@ -50,7 +50,7 @@ struct PadContext {
resizing_category = ResizingCategory::kGenericResize;
const int paddings_total = GetTensorShape(paddings).FlatSize();
const int32* paddings_data = GetTensorData<int32>(paddings);
const int32_t* paddings_data = GetTensorData<int32_t>(paddings);
// Paddings will be a n,2 array, and we need to detect 4D arrays with the
// pattern { {0,0}, {a, b}, {c, d}, {0,0} }.
if (IsConstantTensor(paddings) && paddings_total == 8 &&
@ -83,7 +83,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
op_context.output->dims->size * 2);
// On Micro, outputs must be properly sized by the converter.
const int32* paddings_data = GetTensorData<int32>(op_context.paddings);
const int32_t* paddings_data = GetTensorData<int32_t>(op_context.paddings);
for (int i = 0; i < op_context.output->dims->size; i++) {
int output_dim = op_context.output->dims->data[i];
int expected_dim = op_context.input->dims->data[i] + paddings_data[i * 2] +
@ -107,7 +107,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
}
// Create before and after padding arrays that are accepted by the kernel.
const int32* paddings_data = GetTensorData<int32>(op_context.paddings);
const int32_t* paddings_data = GetTensorData<int32_t>(op_context.paddings);
tflite::PadParams op_params;
memset(&op_params, 0, sizeof(PadParams));

View File

@ -105,7 +105,7 @@ void TestAveragePoolingQuantized(
std::initializer_list<int> output_dims_data, float output_min,
float output_max, TfLitePadding padding, TfLiteFusedActivation activation,
T* output_data) {
static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInitializer(output_dims_data);
@ -246,7 +246,7 @@ void TestMaxPoolQuantized(std::initializer_list<int> input_dims_data,
std::initializer_list<int> output_dims_data,
TfLitePadding padding,
TfLiteFusedActivation activation, T* output_data) {
static_assert(sizeof(T) == 1, "Only int8/uint8 data types allowed.");
static_assert(sizeof(T) == 1, "Only int8_t/uint8_t data types allowed.");
TfLiteIntArray* input_dims = IntArrayFromInitializer(input_dims_data);
TfLiteIntArray* output_dims = IntArrayFromInitializer(output_dims_data);

View File

@ -120,7 +120,7 @@ TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) {
} break;
default:
TF_LITE_KERNEL_LOG(
context, "Only float32 and uint8 are supported currently, got %d.",
context, "Only float32 and uint8_t are supported currently, got %d.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}

View File

@ -203,7 +203,7 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_SafeCast) {
// 128 | 10.0
TF_LITE_MICRO_TEST(QuantizationUtilTest_ChooseQuantizationParams) {
tflite::QuantizationParams qp =
tflite::ChooseQuantizationParams<uint8>(-10.0, 30.0);
tflite::ChooseQuantizationParams<uint8_t>(-10.0, 30.0);
TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.156863, 1e-5);
TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 64);
}
@ -211,7 +211,7 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_ChooseQuantizationParams) {
TF_LITE_MICRO_TEST(
QuantizationUtilTest_ChooseQuantizationParamsZeroPointOnMinBoundary) {
tflite::QuantizationParams qp =
tflite::ChooseQuantizationParams<uint8>(0.0, 30.0);
tflite::ChooseQuantizationParams<uint8_t>(0.0, 30.0);
TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.117647, 1e-5);
TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 0);
}
@ -219,7 +219,7 @@ TF_LITE_MICRO_TEST(
TF_LITE_MICRO_TEST(
QuantizationUtilTest_ChooseQuantizationParamsEmptyRangeZero) {
tflite::QuantizationParams qp =
tflite::ChooseQuantizationParams<uint8>(0.0, 0.0);
tflite::ChooseQuantizationParams<uint8_t>(0.0, 0.0);
TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.0, 1e-5);
TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 0);
}
@ -227,7 +227,7 @@ TF_LITE_MICRO_TEST(
TF_LITE_MICRO_TEST(
QuantizationUtilTest_ChooseQuantizationParamsZeroPointOnMaxBoundary) {
tflite::QuantizationParams qp =
tflite::ChooseQuantizationParams<uint8>(-10.0, 0.0);
tflite::ChooseQuantizationParams<uint8_t>(-10.0, 0.0);
TF_LITE_MICRO_EXPECT_NEAR(qp.scale, 0.039216, 1e-5);
TF_LITE_MICRO_EXPECT_EQ(qp.zero_point, 255);
}
@ -418,11 +418,11 @@ TF_LITE_MICRO_TEST(QuantizationUtilTest_QuantizeMultiplierArray) {
0.125, 0.25, 0.5, 1, 2, 4};
const int size = 13;
int32 effective_scale_significand[size];
int32_t effective_scale_significand[size];
int effective_scale_shift[size];
tflite::QuantizeMultiplierArray(weights, size, effective_scale_significand,
effective_scale_shift);
const int32 expected_effective_scale_significand[] = {
const int32_t expected_effective_scale_significand[] = {
-1073741824, // float scale = -4
-1073741824, // float scale = -2
-1073741824, // float scale = -1

View File

@ -152,7 +152,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// This Op (QUANTIZE) quantizes the input and produces quantized output.
// AffineQuantize takes scale and zero point and quantizes the float value to
// quantized output, in int8 or uint8 format.
// quantized output, in int8_t or uint8_t format.
TfLiteRegistration Register_QUANTIZE() {
return {/*init=*/quantize::Init,
/*free=*/nullptr,

View File

@ -32,7 +32,7 @@ void ValidateQuantizeGoldens(TfLiteTensor* tensors, int tensors_size,
TfLiteContext context;
PopulateContext(tensors, tensors_size, micro_test::reporter, &context);
// Version 1 of quantize supports int8 and uint8 quantization.
// Version 1 of quantize supports int8_t and uint8_t quantization.
::tflite::AllOpsResolver resolver;
const TfLiteRegistration* registration =
resolver.FindOp(tflite::BuiltinOperator_QUANTIZE);

View File

@ -50,7 +50,7 @@ TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) {
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_OK(context, PrepareSimple(context, node));
// TODO(b/144955155): Support uint8(b/144955155) and int8(b/144955018)
// TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018)
return kTfLiteOk;
}
@ -58,7 +58,7 @@ void ResolveAxis(const int* axis_data, int axis_count,
tflite::MeanParams* op_params) {
int i = 0;
for (; i < axis_count; ++i) {
op_params->axis[i] = static_cast<int16>(axis_data[i]);
op_params->axis[i] = static_cast<int16_t>(axis_data[i]);
}
for (; i < 4; ++i) {
op_params->axis[i] = 1;
@ -110,7 +110,7 @@ TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
}
} break;
default:
// TODO(b/144955155): Support uint8(b/144955155) and int8(b/144955018)
// TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018)
TF_LITE_ENSURE_MSG(context, false,
"Currently, only float32 input type "
"is supported.");

View File

@ -71,22 +71,22 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
if (output->type == kTfLiteFloat32) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int32>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(output), GetTensorData<int32>(output));
op_params, GetTensorShape(input), GetTensorData<int32_t>(input),
GetTensorShape(size), GetTensorData<int32_t>(size),
GetTensorShape(output), GetTensorData<int32_t>(output));
} else if (output->type == kTfLiteUInt8) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(size), GetTensorData<int32_t>(size),
GetTensorShape(output), GetTensorData<uint8_t>(output));
} else if (output->type == kTfLiteInt8) {
reference_ops::ResizeNearestNeighbor(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(size), GetTensorData<int32>(size),
GetTensorShape(size), GetTensorData<int32_t>(size),
GetTensorShape(output), GetTensorData<int8_t>(output));
} else {
TF_LITE_KERNEL_LOG(context,
"Output type is %d, requires float, uint8 or int8.",
"Output type is %d, requires float, uint8_t or int8_t.",
output->type);
return kTfLiteError;
}

View File

@ -22,18 +22,18 @@ namespace tflite {
namespace testing {
namespace {
using uint8 = std::uint8_t;
using int32 = std::int32_t;
using uint8_t = std::uint8_t;
using int32_t = std::int32_t;
TfLiteTensor TestCreateTensor(const float* data, TfLiteIntArray* dims) {
return CreateFloatTensor(data, dims);
}
TfLiteTensor TestCreateTensor(const uint8* data, TfLiteIntArray* dims) {
TfLiteTensor TestCreateTensor(const uint8_t* data, TfLiteIntArray* dims) {
return CreateQuantizedTensor(data, dims, 0, 255);
}
TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims) {
TfLiteTensor TestCreateTensor(const int8_t* data, TfLiteIntArray* dims) {
return CreateQuantizedTensor(data, dims, -128, 127);
}
@ -42,7 +42,7 @@ TfLiteTensor TestCreateTensor(const int8* data, TfLiteIntArray* dims) {
// Expected sizes should be a 1-D tensor with 2 elements: new_height & new_width
template <typename T>
void TestResizeNearestNeighbor(const int* input_dims_data, const T* input_data,
const int32* expected_size_data,
const int32_t* expected_size_data,
const T* expected_output_data,
const int* output_dims_data, T* output_data) {
TfLiteIntArray* input_dims = IntArrayFromInts(input_dims_data);
@ -101,7 +101,7 @@ TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(HorizontalResize) {
const int input_dims[] = {4, 1, 1, 2, 1};
const float input_data[] = {3, 6};
const int32 expected_size_data[] = {1, 3};
const int32_t expected_size_data[] = {1, 3};
const float expected_output_data[] = {3, 3, 6};
const int output_dims[] = {4, 1, 1, 3, 1};
float output_data[3];
@ -112,32 +112,32 @@ TF_LITE_MICRO_TEST(HorizontalResize) {
}
TF_LITE_MICRO_TEST(HorizontalResizeUInt8) {
const int input_dims[] = {4, 1, 1, 2, 1};
const uint8 input_data[] = {3, 6};
const int32 expected_size_data[] = {1, 3};
const uint8 expected_output_data[] = {3, 3, 6};
const uint8_t input_data[] = {3, 6};
const int32_t expected_size_data[] = {1, 3};
const uint8_t expected_output_data[] = {3, 3, 6};
const int output_dims[] = {4, 1, 1, 3, 1};
uint8 output_data[3];
uint8_t output_data[3];
tflite::testing::TestResizeNearestNeighbor<uint8>(
tflite::testing::TestResizeNearestNeighbor<uint8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
TF_LITE_MICRO_TEST(HorizontalResizeInt8) {
const int input_dims[] = {4, 1, 1, 2, 1};
const int8 input_data[] = {-3, 6};
const int32 expected_size_data[] = {1, 3};
const int8 expected_output_data[] = {-3, -3, 6};
const int8_t input_data[] = {-3, 6};
const int32_t expected_size_data[] = {1, 3};
const int8_t expected_output_data[] = {-3, -3, 6};
const int output_dims[] = {4, 1, 1, 3, 1};
int8 output_data[3];
int8_t output_data[3];
tflite::testing::TestResizeNearestNeighbor<int8>(
tflite::testing::TestResizeNearestNeighbor<int8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
TF_LITE_MICRO_TEST(VerticalResize) {
const int input_dims[] = {4, 1, 2, 1, 1};
const float input_data[] = {3, 9};
const int32 expected_size_data[] = {3, 1};
const int32_t expected_size_data[] = {3, 1};
const float expected_output_data[] = {3, 3, 9};
const int output_dims[] = {4, 1, 3, 1, 1};
float output_data[3];
@ -148,25 +148,25 @@ TF_LITE_MICRO_TEST(VerticalResize) {
}
TF_LITE_MICRO_TEST(VerticalResizeUInt8) {
const int input_dims[] = {4, 1, 2, 1, 1};
const uint8 input_data[] = {3, 9};
const int32 expected_size_data[] = {3, 1};
const uint8 expected_output_data[] = {3, 3, 9};
const uint8_t input_data[] = {3, 9};
const int32_t expected_size_data[] = {3, 1};
const uint8_t expected_output_data[] = {3, 3, 9};
const int output_dims[] = {4, 1, 3, 1, 1};
uint8 output_data[3];
uint8_t output_data[3];
tflite::testing::TestResizeNearestNeighbor<uint8>(
tflite::testing::TestResizeNearestNeighbor<uint8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
TF_LITE_MICRO_TEST(VerticalResizeInt8) {
const int input_dims[] = {4, 1, 2, 1, 1};
const int8 input_data[] = {3, -9};
const int32 expected_size_data[] = {3, 1};
const int8 expected_output_data[] = {3, 3, -9};
const int8_t input_data[] = {3, -9};
const int32_t expected_size_data[] = {3, 1};
const int8_t expected_output_data[] = {3, 3, -9};
const int output_dims[] = {4, 1, 3, 1, 1};
int8 output_data[3];
int8_t output_data[3];
tflite::testing::TestResizeNearestNeighbor<int8>(
tflite::testing::TestResizeNearestNeighbor<int8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
@ -176,7 +176,7 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) {
3, 6, //
9, 12, //
};
const int32 expected_size_data[] = {3, 3};
const int32_t expected_size_data[] = {3, 3};
const float expected_output_data[] = {
3, 3, 6, //
3, 3, 6, //
@ -192,39 +192,39 @@ TF_LITE_MICRO_TEST(TwoDimensionalResize) {
}
TF_LITE_MICRO_TEST(TwoDimensionalResizeUInt8) {
const int input_dims[] = {4, 1, 2, 2, 1};
const uint8 input_data[] = {
const uint8_t input_data[] = {
3, 6, //
9, 12 //
};
const int32 expected_size_data[] = {3, 3};
const uint8 expected_output_data[] = {
const int32_t expected_size_data[] = {3, 3};
const uint8_t expected_output_data[] = {
3, 3, 6, //
3, 3, 6, //
9, 9, 12 //
};
const int output_dims[] = {4, 1, 3, 3, 1};
uint8 output_data[9];
uint8_t output_data[9];
tflite::testing::TestResizeNearestNeighbor<uint8>(
tflite::testing::TestResizeNearestNeighbor<uint8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
TF_LITE_MICRO_TEST(TwoDimensionalResizeInt8) {
const int input_dims[] = {4, 1, 2, 2, 1};
const int8 input_data[] = {
const int8_t input_data[] = {
3, -6, //
9, 12, //
};
const int32 expected_size_data[] = {3, 3};
const int8 expected_output_data[] = {
const int32_t expected_size_data[] = {3, 3};
const int8_t expected_output_data[] = {
3, 3, -6, //
3, 3, -6, //
9, 9, 12, //
};
const int output_dims[] = {4, 1, 3, 3, 1};
int8 output_data[9];
int8_t output_data[9];
tflite::testing::TestResizeNearestNeighbor<int8>(
tflite::testing::TestResizeNearestNeighbor<int8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
@ -236,7 +236,7 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatches) {
4, 10, //
10, 16 //
};
const int32 expected_size_data[] = {3, 3};
const int32_t expected_size_data[] = {3, 3};
const float expected_output_data[] = {
3, 3, 6, //
3, 3, 6, //
@ -254,14 +254,14 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatches) {
}
TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesUInt8) {
const int input_dims[] = {4, 2, 2, 2, 1};
const uint8 input_data[] = {
const uint8_t input_data[] = {
3, 6, //
9, 12, //
4, 10, //
10, 16 //
};
const int32 expected_size_data[] = {3, 3};
const uint8 expected_output_data[] = {
const int32_t expected_size_data[] = {3, 3};
const uint8_t expected_output_data[] = {
3, 3, 6, //
3, 3, 6, //
9, 9, 12, //
@ -270,22 +270,22 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesUInt8) {
10, 10, 16, //
};
const int output_dims[] = {4, 2, 3, 3, 1};
uint8 output_data[18];
uint8_t output_data[18];
tflite::testing::TestResizeNearestNeighbor<uint8>(
tflite::testing::TestResizeNearestNeighbor<uint8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesInt8) {
const int input_dims[] = {4, 2, 2, 2, 1};
const int8 input_data[] = {
const int8_t input_data[] = {
3, 6, //
9, -12, //
-4, 10, //
10, 16 //
};
const int32 expected_size_data[] = {3, 3};
const int8 expected_output_data[] = {
const int32_t expected_size_data[] = {3, 3};
const int8_t expected_output_data[] = {
3, 3, 6, //
3, 3, 6, //
9, 9, -12, //
@ -294,9 +294,9 @@ TF_LITE_MICRO_TEST(TwoDimensionalResizeWithTwoBatchesInt8) {
10, 10, 16, //
};
const int output_dims[] = {4, 2, 3, 3, 1};
int8 output_data[18];
int8_t output_data[18];
tflite::testing::TestResizeNearestNeighbor<int8>(
tflite::testing::TestResizeNearestNeighbor<int8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
@ -306,7 +306,7 @@ TF_LITE_MICRO_TEST(ThreeDimensionalResize) {
3, 4, 6, 10, //
9, 10, 12, 16, //
};
const int32 expected_size_data[] = {3, 3};
const int32_t expected_size_data[] = {3, 3};
const float expected_output_data[] = {
3, 4, 3, 4, 6, 10, //
3, 4, 3, 4, 6, 10, //
@ -321,39 +321,39 @@ TF_LITE_MICRO_TEST(ThreeDimensionalResize) {
}
TF_LITE_MICRO_TEST(ThreeDimensionalResizeUInt8) {
const int input_dims[] = {4, 1, 2, 2, 2};
const uint8 input_data[] = {
const uint8_t input_data[] = {
3, 4, 6, 10, //
10, 12, 14, 16, //
};
const int32 expected_size_data[] = {3, 3};
const uint8 expected_output_data[] = {
const int32_t expected_size_data[] = {3, 3};
const uint8_t expected_output_data[] = {
3, 4, 3, 4, 6, 10, //
3, 4, 3, 4, 6, 10, //
10, 12, 10, 12, 14, 16, //
};
const int output_dims[] = {4, 1, 3, 3, 2};
uint8 output_data[18];
uint8_t output_data[18];
tflite::testing::TestResizeNearestNeighbor<uint8>(
tflite::testing::TestResizeNearestNeighbor<uint8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}
TF_LITE_MICRO_TEST(ThreeDimensionalResizeInt8) {
const int input_dims[] = {4, 1, 2, 2, 2};
const int8 input_data[] = {
const int8_t input_data[] = {
3, 4, -6, 10, //
10, 12, -14, 16, //
};
const int32 expected_size_data[] = {3, 3};
const int8 expected_output_data[] = {
const int32_t expected_size_data[] = {3, 3};
const int8_t expected_output_data[] = {
3, 4, 3, 4, -6, 10, //
3, 4, 3, 4, -6, 10, //
10, 12, 10, 12, -14, 16, //
};
const int output_dims[] = {4, 1, 3, 3, 2};
int8 output_data[18];
int8_t output_data[18];
tflite::testing::TestResizeNearestNeighbor<int8>(
tflite::testing::TestResizeNearestNeighbor<int8_t>(
input_dims, input_data, expected_size_data, expected_output_data,
output_dims, output_data);
}

View File

@ -42,7 +42,8 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
if (output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768);
// NOTE: Current int16 softmax output does not require symmetric scaling
// NOTE: Current int16_t softmax output does not require symmetric
// scaling
// - so no need to verify scale here.
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);

View File

@ -40,18 +40,18 @@ struct OpData {
// and the special 16-bit -> 16bit quantized path
int input1_shift;
int input2_shift;
int32 output_activation_min;
int32 output_activation_max;
int32_t output_activation_min;
int32_t output_activation_max;
// These fields are used only in the general 8-bit -> 8bit quantized path
int32 input1_multiplier;
int32 input2_multiplier;
int32 output_multiplier;
int32_t input1_multiplier;
int32_t input2_multiplier;
int32_t output_multiplier;
int output_shift;
int left_shift;
int32 input1_offset;
int32 input2_offset;
int32 output_offset;
int32_t input1_offset;
int32_t input2_offset;
int32_t output_offset;
};
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteSubParams* params,

View File

@ -32,8 +32,8 @@ namespace svdf {
namespace {
struct OpData {
int32 effective_scale_1_a;
int32 effective_scale_2_a;
int32_t effective_scale_1_a;
int32_t effective_scale_2_a;
// b versions of each scale are kept at int since the numbers are just the
// shift value - typically between [-32, 32].
int effective_scale_1_b;
@ -377,7 +377,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
// Validate Tensor Output:
// [0] = float/int8, {2, batch_size, num_units}
// [0] = float/int8_t, {2, batch_size, num_units}
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);

View File

@ -42,18 +42,18 @@ struct OpData {
// and the special 16-bit -> 16bit quantized path
int input1_shift;
int input2_shift;
int32 output_activation_min;
int32 output_activation_max;
int32_t output_activation_min;
int32_t output_activation_max;
// These fields are used only in the general 8-bit -> 8bit quantized path
int32 input1_multiplier;
int32 input2_multiplier;
int32 output_multiplier;
int32_t input1_multiplier;
int32_t input2_multiplier;
int32_t output_multiplier;
int output_shift;
int left_shift;
int32 input1_offset;
int32 input2_offset;
int32 output_offset;
int32_t input1_offset;
int32_t input2_offset;
int32_t output_offset;
};
TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params,

View File

@ -219,9 +219,9 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
const int stride_height = params->stride_height;
const int pad_width = data.padding.width;
const int pad_height = data.padding.height;
const int32 output_activation_min = data.output_activation_min;
const int32 output_activation_max = data.output_activation_max;
const int32 output_multiplier = data.output_multiplier;
const int32_t output_activation_min = data.output_activation_min;
const int32_t output_activation_max = data.output_activation_max;
const int32_t output_multiplier = data.output_multiplier;
const int output_shift = -data.output_shift;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
@ -362,10 +362,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
reference_integer_ops::ConvPerChannel(
op_params, data.per_channel_output_multiplier,
data.per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
}
TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,

View File

@ -142,7 +142,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int filter_width = SizeOfDimension(filter, 2);
int filter_height = SizeOfDimension(filter, 1);
// Per channel quantization is only needed for int8 inference. For other
// Per channel quantization is only needed for int8_t inference. For other
// quantized types, only a single scale and zero point is needed.
const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
@ -335,10 +335,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
reference_integer_ops::DepthwiseConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
}
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
@ -370,9 +370,9 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
const int pad_width = data->padding.width;
const int pad_height = data->padding.height;
const int depth_multiplier = params->depth_multiplier;
const int32 output_activation_min = data->output_activation_min;
const int32 output_activation_max = data->output_activation_max;
const int32 output_multiplier = data->output_multiplier;
const int32_t output_activation_min = data->output_activation_min;
const int32_t output_activation_max = data->output_activation_max;
const int32_t output_multiplier = data->output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
const int output_shift = -data->output_shift;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);

View File

@ -148,7 +148,7 @@ TfLiteStatus AverageEvalFloat(TfLiteContext* context, const TfLiteNode* node,
}
out_length = batches * output_height * output_width * depth;
uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
p_align_val = (p_unalign_val + 7) & (~7);
// pre loop for activation_min_max
@ -215,8 +215,8 @@ TfLiteStatus AverageEvalQuantized(TfLiteContext* context,
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const uint8* inp_data_ptr;
uint8* out_data_ptr;
const uint8_t* inp_data_ptr;
uint8_t* out_data_ptr;
int inp_data_format = 0, out_data_format = 0, out_length;
int inp_precision = PREC_ASYM8, out_precision = PREC_ASYM8;
void* p_scratch;
@ -262,7 +262,7 @@ TfLiteStatus AverageEvalQuantized(TfLiteContext* context,
}
out_length = batches * output_height * output_width * depth;
uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
p_align_val = (p_unalign_val + 7) & (~7);
// pre loop for activation_min_max
@ -372,7 +372,7 @@ TfLiteStatus MaxEvalFloat(TfLiteContext* context, TfLiteNode* node,
}
out_length = batches * output_height * output_width * depth;
uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
p_align_val = (p_unalign_val + 7) & (~7);
// pre loop for activation_min_max
@ -438,8 +438,8 @@ TfLiteStatus MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node,
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const uint8* inp_data_ptr;
uint8* out_data_ptr;
const uint8_t* inp_data_ptr;
uint8_t* out_data_ptr;
int inp_data_format = 0, out_data_format = 0, out_length;
int inp_precision = PREC_ASYM8, out_precision = PREC_ASYM8;
void* p_scratch;
@ -482,7 +482,7 @@ TfLiteStatus MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node,
}
out_length = batches * output_height * output_width * depth;
uint32 p_unalign_val = (uint32)out_data_ptr, p_align_val;
uint32_t p_unalign_val = (uint32_t)out_data_ptr, p_align_val;
p_align_val = (p_unalign_val + 7) & (~7);
// pre loop for activation_min_max

View File

@ -63,7 +63,8 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8);
if (output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768);
// NOTE: Current int16 softmax output does not require symmetric scaling
// NOTE: Current int16_t softmax output does not require symmetric
// scaling
// - so no need to verify scale here.
} else {
TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8);

View File

@ -53,8 +53,8 @@ namespace svdf {
namespace {
struct OpData {
int32 effective_scale_1_a;
int32 effective_scale_2_a;
int32_t effective_scale_1_a;
int32_t effective_scale_2_a;
// b versions of each scale are kept at int since the numbers are just the
// shift value - typically between [-32, 32].
int effective_scale_1_b;
@ -461,7 +461,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
// Validate Tensor Output:
// [0] = float/int8, {2, batch_size, num_units}
// [0] = float/int8_t, {2, batch_size, num_units}
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);

View File

@ -33,22 +33,22 @@ namespace conv {
namespace xtensa {
namespace hifimini {
void ConvPerChannel(const ConvParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
int8* output_data) {
void ConvPerChannel(const ConvParams& params, const int32_t* output_multiplier,
const int32_t* output_shift,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& filter_shape, const int8_t* filter_data,
const RuntimeShape& bias_shape, const int32_t* bias_data,
const RuntimeShape& output_shape, int8_t* output_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int32 input_offset = params.input_offset;
const int32 output_offset = params.output_offset;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int32_t input_offset = params.input_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
const int batches = input_shape.Dims(0);
@ -169,11 +169,11 @@ void ConvPerChannel(const ConvParams& params, const int32* output_multiplier,
inline void Conv1x32Input32x32Filter(
const int input_offset, const int output_offset,
const int quantized_activation_min, const int quantized_activation_max,
const int32* output_multiplier, const int32* output_shift,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const int32* bias_data,
const RuntimeShape& output_shape, int8* output_data) {
const int32_t* output_multiplier, const int32_t* output_shift,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& filter_shape, const int8_t* filter_data,
const RuntimeShape& bias_shape, const int32_t* bias_data,
const RuntimeShape& output_shape, int8_t* output_data) {
ae_p24x2s input_offset_24x2 = AE_MOVPA24(input_offset);
ae_q56s output_offset_56 = AE_CVTQ48A32S(output_offset);
ae_q56s output_activation_max_56 = AE_CVTQ48A32S(quantized_activation_max);
@ -324,7 +324,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int output_width = output->dims->data[2];
int output_height = output->dims->data[1];
// Per channel quantization is only needed for int8 inference. For other
// Per channel quantization is only needed for int8_t inference. For other
// quantized types, only a single scale and zero point is needed.
const int num_channels = filter->dims->data[kConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
@ -382,10 +382,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
xtensa::hifimini::ConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@ -409,10 +409,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
op_data->output_activation_min, op_data->output_activation_max,
op_data->per_channel_output_multiplier,
op_data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
return kTfLiteOk;
}

View File

@ -34,12 +34,12 @@ namespace xtensa {
namespace hifimini {
inline void DepthwiseConvPerChannel(
const DepthwiseParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
int8* output_data) {
const DepthwiseParams& params, const int32_t* output_multiplier,
const int32_t* output_shift, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int8_t* output_data) {
// TODO(b/154032858): Investigate removing extra copies.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
@ -48,10 +48,10 @@ inline void DepthwiseConvPerChannel(
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const int32 input_offset = params.input_offset;
const int32 output_offset = params.output_offset;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int32_t input_offset = params.input_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
const int batches = input_shape.Dims(0);
@ -99,16 +99,16 @@ inline void DepthwiseConvPerChannel(
((batch * input_height + in_y) * input_width + in_x) *
input_depth +
(in_channel);
int32 input_val = input_data[input_idx];
int32_t input_val = input_data[input_idx];
// Find current filter index, minus 2 for Xtensa load
// alignments:
int filter_idx =
((filter_y)*filter_width + filter_x) * filter_depth +
(output_channel);
int32 filter_val = filter_data[filter_idx];
int32_t filter_val = filter_data[filter_idx];
// Load 8bit value as int32 into a 24x24 register and right
// Load 8bit value as int32_t into a 24x24 register and right
// shift into 24bit space. Note: value is duplicated in the HH
// and LL register - but all calculations are done on the HH
// side.
@ -171,11 +171,11 @@ constexpr int kConvolutionalKernelDepth = 32;
inline void DepthwiseConv4x32MatchingInputAndFilter(
const int input_offset, const int output_offset,
const int quantized_activation_min, const int quantized_activation_max,
const int32* output_multiplier, const int32* output_shift,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const int32* bias_data,
const RuntimeShape& output_shape, int8* output_data) {
const int32_t* output_multiplier, const int32_t* output_shift,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& filter_shape, const int8_t* filter_data,
const RuntimeShape& bias_shape, const int32_t* bias_data,
const RuntimeShape& output_shape, int8_t* output_data) {
// Convert the (unsigned) 32-bit multiplier down to a 24-bit multiplier.
const int32_t mult = output_multiplier[0] >> 8;
const int32_t shift = output_shift[0];
@ -189,16 +189,16 @@ inline void DepthwiseConv4x32MatchingInputAndFilter(
const int stride_elements =
(kConvolutionalKernelDepth / kConvolutionalKernelWidth);
const int8* input_0_ptr = (const int8*)(input_data - 2);
const int8* weight_0_ptr = (const int8*)(filter_data - 2);
const int8_t* input_0_ptr = (const int8_t*)(input_data - 2);
const int8_t* weight_0_ptr = (const int8_t*)(filter_data - 2);
// Apply the kernels in blocks of 4 for all the channels.
const int8* input_1_ptr = input_0_ptr + stride_elements * 4;
const int8* input_2_ptr = input_1_ptr + stride_elements * 4;
const int8* input_3_ptr = input_2_ptr + stride_elements * 4;
const int8_t* input_1_ptr = input_0_ptr + stride_elements * 4;
const int8_t* input_2_ptr = input_1_ptr + stride_elements * 4;
const int8_t* input_3_ptr = input_2_ptr + stride_elements * 4;
const int8* weight_1_ptr = weight_0_ptr + stride_elements * 4;
const int8* weight_2_ptr = weight_1_ptr + stride_elements * 4;
const int8* weight_3_ptr = weight_2_ptr + stride_elements * 4;
const int8_t* weight_1_ptr = weight_0_ptr + stride_elements * 4;
const int8_t* weight_2_ptr = weight_1_ptr + stride_elements * 4;
const int8_t* weight_3_ptr = weight_2_ptr + stride_elements * 4;
for (int i = 0; i < num_blocks; ++i) {
ae_q56s block_0_acc = AE_ZEROQ56();
@ -372,7 +372,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
int filter_width = SizeOfDimension(filter, 2);
int filter_height = SizeOfDimension(filter, 1);
// Per channel quantization is only needed for int8 inference. For other
// Per channel quantization is only needed for int8_t inference. For other
// quantized types, only a single scale and zero point is needed.
const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
@ -430,10 +430,10 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
xtensa::hifimini::DepthwiseConvPerChannel(
op_params, data->per_channel_output_multiplier,
data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
@ -460,10 +460,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max(),
op_data->per_channel_output_multiplier,
op_data->per_channel_output_shift, GetTensorShape(input),
GetTensorData<int8>(input), GetTensorShape(filter),
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output));
GetTensorData<int8_t>(input), GetTensorShape(filter),
GetTensorData<int8_t>(filter), GetTensorShape(bias),
GetTensorData<int32_t>(bias), GetTensorShape(output),
GetTensorData<int8_t>(output));
return kTfLiteOk;
}
switch (input->type) { // Already know in/out types are same.

View File

@ -36,16 +36,16 @@ namespace hifimini {
void FullyConnected(const FullyConnectedParams& params,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& filter_shape, const int8_t* filter_data,
const RuntimeShape& bias_shape, const int32* bias_data,
const RuntimeShape& bias_shape, const int32_t* bias_data,
const RuntimeShape& output_shape, int8_t* output_data) {
// TODO(b/154032858): Investigate removing extra copies.
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = output_shape.Dims(0);

View File

@ -156,7 +156,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// This Op (QUANTIZE) quantizes the input and produces quantized output.
// AffineQuantize takes scale and zero point and quantizes the float value to
// quantized output, in int8 or uint8 format.
// quantized output, in int8_t or uint8_t format.
TfLiteRegistration Register_QUANTIZE() {
return {/*init=*/quantize::Init,
/*free=*/nullptr,

View File

@ -33,12 +33,12 @@ struct OpData {
uint16_t* exp_lut;
};
// Number of unique int8 and int16 values. Used in exponent lookup table
// Number of unique int8_t and int16_t values. Used in exponent lookup table
// conputation.
constexpr int kInt8Range =
std::numeric_limits<int8_t>::max() - std::numeric_limits<int8>::min() + 1;
constexpr int kInt16Range =
std::numeric_limits<int16_t>::max() - std::numeric_limits<int16>::min() + 1;
std::numeric_limits<int8_t>::max() - std::numeric_limits<int8_t>::min() + 1;
constexpr int kInt16Range = std::numeric_limits<int16_t>::max() -
std::numeric_limits<int16_t>::min() + 1;
// Each 16-bit precalculated exponent is expressed as a Q0.16 fixedpoint
// value. We special-case e^0 since 1.0 requires 1 integer bit to
// express.
@ -47,7 +47,7 @@ constexpr int kExpFractionalBits = 16;
// specially.
constexpr int kMaxExponentValue = (1 << kExpFractionalBits);
// Quantized softmax with int8 input and int16 output.
// Quantized softmax with int8_t input and int16_t output.
// Passing OpData by value does not have much savings in this op, but following
// that as a best practice, at least for the xtensa kernels. See b/155656675 for
// more details.
@ -97,7 +97,7 @@ TfLiteStatus Softmax(OpData op_data, const RuntimeShape& input_shape,
}
output_data[i * depth + c] = static_cast<int16_t>(std::max(
std::min(full_range_output,
static_cast<int32>(std::numeric_limits<int16_t>::max())),
static_cast<int32_t>(std::numeric_limits<int16_t>::max())),
static_cast<int32_t>(std::numeric_limits<int16_t>::min())));
}
}
@ -118,7 +118,8 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
if (output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
std::numeric_limits<int16_t>::min());
// NOTE: Current int16 softmax output does not require symmetric scaling
// NOTE: Current int16_t softmax output does not require symmetric
// scaling
// - so no need to verify scale here.
} else {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
@ -127,10 +128,10 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
}
}
// Precompute e^(-x * input_scale * beta) for every possible int8 input.
// Precompute e^(-x * input_scale * beta) for every possible int8_t input.
// This computation is used for every iteration of Softmax. We must compute
// using pre-scaled inputs to avoid introducing additional error, while
// restricting our input range to the int8 range. This is valid since beta
// restricting our input range to the int8_t range. This is valid since beta
// and input scale are constant for a given op in the graph. Skip index 0
// since that is a special case which requires 1 integer bit instead of 0.
for (int i = 1; i <= kInt8Range; i++) {
@ -163,7 +164,7 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
TFLITE_DCHECK(node->user_data != nullptr);
OpData* op_data = static_cast<OpData*>(node->user_data);
// Allocate an array to precompute exponents over all int8 inputs, applying
// Allocate an array to precompute exponents over all int8_t inputs, applying
// the scale and beta before calculating exp. It is mandatory to apply beta
// and scale here, since each softmax op may have different beta and scale
// values. Beta and scale will remain constant for a given softmax op.

View File

@ -33,8 +33,8 @@ namespace svdf {
namespace {
struct OpData {
int32 effective_scale_1_a;
int32 effective_scale_2_a;
int32_t effective_scale_1_a;
int32_t effective_scale_2_a;
// b versions of each scale are kept at int since the numbers are just the
// shift value - typically between [-32, 32].
int effective_scale_1_b;
@ -153,7 +153,7 @@ void EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node,
dot_prod_24x2, data.effective_scale_1_a,
data.effective_scale_1_b);
// Cap min/max and convert to int32:
// Cap min/max and convert to int32_t:
dot_prod_56 = AE_MAXQ56S(dot_prod_56, output_int16_min_56);
dot_prod_56 = AE_MINQ56S(dot_prod_56, output_int16_max_56);
// Truncate immediately since the QR register is already 32 bit aligned:
@ -246,7 +246,7 @@ void EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node,
data.effective_scale_2_b);
// Add output adjustment:
x_56 = AE_ADDQ56(x_56, output_zp_56);
// Cap min/max and convert to int32 (already aligned to 32bit):
// Cap min/max and convert to int32_t (already aligned to 32bit):
x_56 = AE_MAXQ56S(x_56, output_int8_min_56);
x_56 = AE_MINQ56S(x_56, output_int8_max_56);
GetTensorData<int8_t>(output_tensor)[i] =
@ -308,7 +308,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
// Validate Tensor Output:
// [0] = float/int8, {2, batch_size, num_units}
// [0] = float/int8_t, {2, batch_size, num_units}
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);

View File

@ -34,7 +34,7 @@ void AffineQuantize(int scale_multiplier,
const tflite::QuantizationParams& op_params,
const RuntimeShape& input_shape, const int16_t* input_data,
const RuntimeShape& output_shape, int8_t* output_data) {
const int32 zero_point = op_params.zero_point;
const int32_t zero_point = op_params.zero_point;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
ae_q56s min_val_56 = AE_CVTQ48A32S(INT16_MIN);
ae_q56s max_val_56 = AE_CVTQ48A32S(INT16_MAX);
@ -155,7 +155,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// This Op (QUANTIZE) quantizes the input and produces quantized output.
// AffineQuantize takes scale and zero point and quantizes the float value to
// quantized output, in int8 or uint8 format.
// quantized output, in int8_t or uint8_t format.
TfLiteRegistration Register_QUANTIZE() {
return {/*init=*/quantize::Init,
/*free=*/nullptr,

View File

@ -72,7 +72,8 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
if (output->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
std::numeric_limits<int16_t>::min());
// NOTE: Current int16 softmax output does not require symmetric scaling
// NOTE: Current int16_t softmax output does not require symmetric
// scaling
// - so no need to verify scale here.
} else {
TF_LITE_ENSURE_EQ(context, output->params.zero_point,
@ -124,7 +125,7 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteStatus scratch_status = context->RequestScratchBufferInArena(
context, scratch_size, &(op_data->scratch_tensor_index));
TF_LITE_ENSURE_OK(context, scratch_status);
// Allocate an array to precompute exponents over all int8 inputs, applying
// Allocate an array to precompute exponents over all int8_t inputs, applying
// the scale and beta before calculating exp. It is mandatory to apply beta
// and scale here, since each softmax op may have different beta and scale
// values. Beta and scale will remain constant for a given softmax op.
@ -145,7 +146,7 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
const RuntimeShape& input_shape = GetTensorShape(input);
const int8_t* input_data = GetTensorData<int8_t>(input);
const RuntimeShape& output_shape = GetTensorShape(output);
int16* output_data = GetTensorData<int16>(output);
int16_t* output_data = GetTensorData<int16_t>(output);
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);

View File

@ -55,8 +55,8 @@ namespace svdf {
namespace {
struct OpData {
int32 effective_scale_1_a;
int32 effective_scale_2_a;
int32_t effective_scale_1_a;
int32_t effective_scale_2_a;
// b versions of each scale are kept at int since the numbers are just the
// shift value - typically between [-32, 32].
int effective_scale_1_b;
@ -239,7 +239,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
// Validate Tensor Output:
// [0] = float/int8, {2, batch_size, num_units}
// [0] = float/int8_t, {2, batch_size, num_units}
TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);

View File

@ -48,10 +48,10 @@ int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale);
//
// There are several key flavors of quantization in TfLite:
// asymmetric symmetric per channel
// int8 | X | X | X |
// uint8 | X | X | |
// int16 | X | | |
// int32 | | X | X |
// int8_t | X | X | X |
// uint8_t | X | X | |
// int16_t | X | | |
// int32_t | | X | X |
//
// The per-op quantization spec can be found here:
// https://www.tensorflow.org/lite/performance/quantization_spec

View File

@ -584,7 +584,7 @@ TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context,
TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
// Make sure that the input is in uint8 with at least 1 data entry.
// Make sure that the input is in uint8_t with at least 1 data entry.
const TfLiteTensor* input = tflite::GetInput(context, node, kInputTensor);
if (input->type != kTfLiteUInt8) return kTfLiteError;
if (NumElements(input->dims) == 0) return kTfLiteError;
@ -925,8 +925,8 @@ TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
TfLiteTensor result = CreateTensor(dims, is_variable);
result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(quantized);
// Quantized int32 tensors always have a zero point of 0, since the range of
// int32 values is large, and because zero point costs extra cycles during
// Quantized int32_t tensors always have a zero point of 0, since the range of
// int32_t values is large, and because zero point costs extra cycles during
// processing.
result.params = {bias_scale, 0};
result.quantization = {kTfLiteAffineQuantization, nullptr};
@ -934,7 +934,7 @@ TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
return result;
}
// Quantizes int32 bias tensor with per-channel weights determined by input
// Quantizes int32_t bias tensor with per-channel weights determined by input
// scale multiplied by weight scale for each channel.
TfLiteTensor CreatePerChannelQuantizedBiasTensor(
const float* input, int32_t* quantized, TfLiteIntArray* dims,

View File

@ -164,7 +164,7 @@ TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized,
float weights_scale,
bool is_variable = false);
// Quantizes int32 bias tensor with per-channel weights determined by input
// Quantizes int32_t bias tensor with per-channel weights determined by input
// scale multiplied by weight scale for each channel.
TfLiteTensor CreatePerChannelQuantizedBiasTensor(
const float* input, int32_t* quantized, TfLiteIntArray* dims,

View File

@ -248,8 +248,8 @@ TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims,
result.type = kTfLiteInt32;
result.data.i32 = const_cast<int32_t*>(data);
result.dims = dims;
// Quantized int32 tensors always have a zero point of 0, since the range of
// int32 values is large, and because zero point costs extra cycles during
// Quantized int32_t tensors always have a zero point of 0, since the range of
// int32_t values is large, and because zero point costs extra cycles during
// processing.
result.params = {scale, 0};
result.allocation_type = kTfLiteMemNone;