Add int16x8 support for RESIZE_BILINEAR operator
This commit is contained in:
parent
2e6397ec8b
commit
04b5fc6876
@ -1662,14 +1662,14 @@ inline void ComputeInterpolationValues(const int32 value, const int32 scale_10,
|
||||
*upper_bound = std::min(*scaled_value / (1 << 10) + 1, input_size - 1);
|
||||
}
|
||||
|
||||
// Same as above but takes int8 as input and output.
|
||||
inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape,
|
||||
const int8_t* input_data,
|
||||
const RuntimeShape& unextended_output_size_shape,
|
||||
const int32* output_size_data,
|
||||
const RuntimeShape& unextended_output_shape,
|
||||
int8_t* output_data) {
|
||||
// Same as above but doesn't use any floating-point for the resize
|
||||
template <typename T>
|
||||
inline void ResizeBilinearInteger(
|
||||
const tflite::ResizeBilinearParams& op_params,
|
||||
const RuntimeShape& unextended_input_shape, const T* input_data,
|
||||
const RuntimeShape& unextended_output_size_shape,
|
||||
const int32* output_size_data, const RuntimeShape& unextended_output_shape,
|
||||
T* output_data) {
|
||||
// If half_pixel_centers is True, align_corners must be False.
|
||||
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
|
||||
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
|
||||
@ -1743,8 +1743,8 @@ inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
|
||||
(input_y - (1 << 10) * y0) * (input_x - (1 << 10) * x0);
|
||||
const int64_t output_20 =
|
||||
output_20_ll + output_20_lu + output_20_rl + output_20_ru;
|
||||
const int8_t interpolation =
|
||||
static_cast<int8_t>((output_20 + (1 << 19)) / (1 << 20));
|
||||
const T interpolation =
|
||||
static_cast<T>((output_20 + (1 << 19)) / (1 << 20));
|
||||
output_data[Offset(output_shape, b, y, x, c)] = interpolation;
|
||||
}
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
|
||||
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
|
||||
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR(),
|
||||
/* min_version = */ 1,
|
||||
/* max_version = */ 3);
|
||||
/* max_version = */ 4);
|
||||
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
|
||||
Register_RESIZE_NEAREST_NEIGHBOR(),
|
||||
/* min_version = */ 1,
|
||||
|
@ -279,7 +279,7 @@ BuiltinRefOpResolver::BuiltinRefOpResolver() {
|
||||
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
|
||||
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR_REF(),
|
||||
/* min_version = */ 1,
|
||||
/* max_version = */ 3);
|
||||
/* max_version = */ 4);
|
||||
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
|
||||
Register_RESIZE_NEAREST_NEIGHBOR_REF(),
|
||||
/* min_version = */ 1,
|
||||
|
@ -114,30 +114,32 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
}
|
||||
|
||||
if (output->type == kTfLiteFloat32) {
|
||||
#define TF_LITE_RESIZE_BILINEAR(type, datatype) \
|
||||
tflite::ResizeBilinearParams op_params; \
|
||||
op_params.align_corners = params->align_corners; \
|
||||
op_params.half_pixel_centers = params->half_pixel_centers; \
|
||||
type::ResizeBilinear(op_params, GetTensorShape(input), \
|
||||
GetTensorData<datatype>(input), GetTensorShape(size), \
|
||||
GetTensorData<int32>(size), GetTensorShape(output), \
|
||||
GetTensorData<datatype>(output))
|
||||
#define TF_LITE_RESIZE_BILINEAR(type, opname, datatype) \
|
||||
tflite::ResizeBilinearParams op_params; \
|
||||
op_params.align_corners = params->align_corners; \
|
||||
op_params.half_pixel_centers = params->half_pixel_centers; \
|
||||
type::opname(op_params, GetTensorShape(input), \
|
||||
GetTensorData<datatype>(input), GetTensorShape(size), \
|
||||
GetTensorData<int32>(size), GetTensorShape(output), \
|
||||
GetTensorData<datatype>(output))
|
||||
|
||||
if (kernel_type == kReference) {
|
||||
TF_LITE_RESIZE_BILINEAR(reference_ops, float);
|
||||
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, float);
|
||||
}
|
||||
if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) {
|
||||
TF_LITE_RESIZE_BILINEAR(optimized_ops, float);
|
||||
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, float);
|
||||
}
|
||||
} else if (output->type == kTfLiteUInt8) {
|
||||
if (kernel_type == kReference) {
|
||||
TF_LITE_RESIZE_BILINEAR(reference_ops, uint8_t);
|
||||
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, uint8_t);
|
||||
}
|
||||
if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) {
|
||||
TF_LITE_RESIZE_BILINEAR(optimized_ops, uint8_t);
|
||||
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, uint8_t);
|
||||
}
|
||||
} else if (output->type == kTfLiteInt8) {
|
||||
TF_LITE_RESIZE_BILINEAR(reference_ops, int8_t);
|
||||
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int8_t);
|
||||
} else if (output->type == kTfLiteInt16) {
|
||||
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int16_t);
|
||||
#undef TF_LITE_RESIZE_BILINEAR
|
||||
} else {
|
||||
context->ReportError(context, "Output type is %d, requires float.",
|
||||
|
@ -104,6 +104,14 @@ TEST_P(ResizeBilinearOpTest, HorizontalResizeInt8) {
|
||||
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, HorizontalResizeInt16) {
|
||||
ResizeBilinearOpModel m({TensorType_INT16, {1, 1, 2, 1}}, {1, 3}, GetParam());
|
||||
m.SetInput<int16_t>({3, 6});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int16_t>(),
|
||||
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, VerticalResize) {
|
||||
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 1, 1}}, {3, 1},
|
||||
GetParam());
|
||||
@ -129,6 +137,14 @@ TEST_P(ResizeBilinearOpTest, VerticalResizeInt8) {
|
||||
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, VerticalResizeInt16) {
|
||||
ResizeBilinearOpModel m({TensorType_INT16, {1, 2, 1, 1}}, {3, 1}, GetParam());
|
||||
m.SetInput<int16_t>({3, 9});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int16_t>(),
|
||||
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, TwoDimensionalResize) {
|
||||
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, {3, 3},
|
||||
GetParam());
|
||||
@ -172,6 +188,20 @@ TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeInt8) {
|
||||
})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeInt16) {
|
||||
ResizeBilinearOpModel m({TensorType_INT16, {1, 2, 2, 1}}, {3, 3}, GetParam());
|
||||
m.SetInput<int16_t>({
|
||||
3, 6, //
|
||||
9, 12 //
|
||||
});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray(ArrayFloatNear({
|
||||
3, 5, 6, //
|
||||
7, 9, 10, //
|
||||
9, 11, 12, //
|
||||
})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeWithTwoBatches) {
|
||||
ResizeBilinearOpModel m({TensorType_FLOAT32, {2, 2, 2, 1}}, {3, 3},
|
||||
GetParam());
|
||||
@ -295,6 +325,27 @@ TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeWithTwoBatchesInt8) {
|
||||
/*max_abs_error=*/1)));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeWithTwoBatchesInt16) {
|
||||
ResizeBilinearOpModel m({TensorType_INT16, {2, 2, 2, 1}}, {3, 3}, GetParam());
|
||||
m.SetInput<int16_t>({
|
||||
3, 6, //
|
||||
9, 12, //
|
||||
4, 10, //
|
||||
12, 16 //
|
||||
});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray(ArrayFloatNear(
|
||||
{
|
||||
3, 5, 6, //
|
||||
7, 9, 10, //
|
||||
9, 11, 12, //
|
||||
4, 8, 10, //
|
||||
9, 12, 13, //
|
||||
12, 14, 16, //
|
||||
},
|
||||
/*max_abs_error=*/1)));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, ThreeDimensionalResizeUInt8) {
|
||||
ResizeBilinearOpModel m({TensorType_UINT8, {1, 2, 2, 2}}, {3, 3}, GetParam());
|
||||
m.SetInput<uint8>({
|
||||
@ -327,6 +378,46 @@ TEST_P(ResizeBilinearOpTest, ThreeDimensionalResizeInt8) {
|
||||
/*max_abs_error=*/1)));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, ThreeDimensionalResizeInt16) {
|
||||
ResizeBilinearOpModel m({TensorType_INT16, {1, 2, 2, 2}}, {3, 3}, GetParam());
|
||||
m.SetInput<int16_t>({
|
||||
3, 4, 6, 10, //
|
||||
10, 12, 14, 16, //
|
||||
});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray(ArrayFloatNear(
|
||||
{
|
||||
3, 4, 5, 8, 6, 10, //
|
||||
7, 9, 10, 12, 11, 13, //
|
||||
10, 12, 12, 14, 14, 16, //
|
||||
},
|
||||
/*max_abs_error=*/1)));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, HorizontalResizeExtremeValuesUInt8) {
|
||||
ResizeBilinearOpModel m({TensorType_UINT8, {1, 1, 2, 1}}, {1, 3}, GetParam());
|
||||
m.SetInput<uint8_t>({253, 255});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<uint8>(),
|
||||
ElementsAreArray(ArrayFloatNear({253, 254, 255})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, HorizontalResizeExtremeValuesInt8) {
|
||||
ResizeBilinearOpModel m({TensorType_INT8, {1, 1, 2, 1}}, {1, 3}, GetParam());
|
||||
m.SetInput<int8_t>({125, 127});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int8_t>(),
|
||||
ElementsAreArray(ArrayFloatNear({125, 126, 127})));
|
||||
}
|
||||
|
||||
TEST_P(ResizeBilinearOpTest, HorizontalResizeExtremeValuesInt16) {
|
||||
ResizeBilinearOpModel m({TensorType_INT16, {1, 1, 2, 1}}, {1, 3}, GetParam());
|
||||
m.SetInput<int16_t>({32765, 32767});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput<int16_t>(),
|
||||
ElementsAreArray(ArrayFloatNear({32765, 32766, 32767})));
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTest, ResizeBilinearOpTest,
|
||||
testing::Values(TestType::kConst, TestType::kDynamic));
|
||||
|
||||
|
@ -873,12 +873,6 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
|
||||
property.version = 1;
|
||||
break;
|
||||
case BuiltinOperator_RESIZE_BILINEAR:
|
||||
property.inputs = {{0, {}}};
|
||||
property.outputs = {{0, {}}};
|
||||
property.restrict_same_input_output_scale = true;
|
||||
property.version = 2;
|
||||
property.quantizable_int16 = false;
|
||||
break;
|
||||
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
|
||||
property.inputs = {{0, {}}};
|
||||
property.outputs = {{0, {}}};
|
||||
|
@ -399,7 +399,9 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
|
||||
}
|
||||
return 1;
|
||||
case BuiltinOperator_RESIZE_BILINEAR:
|
||||
if (op_sig.options.resize.half_pixel_centers) {
|
||||
if (op_sig.input_types.at(0) == TensorType_INT16) {
|
||||
return 4;
|
||||
} else if (op_sig.options.resize.half_pixel_centers) {
|
||||
return 3;
|
||||
} else if (op_sig.input_types.at(0) == TensorType_INT8) {
|
||||
return 2;
|
||||
|
@ -691,6 +691,14 @@ TEST(OpVersionTest, VersioningResizeBilinearTest) {
|
||||
|
||||
fake_op_sig.options.resize.half_pixel_centers = true;
|
||||
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
|
||||
|
||||
// int16 input is version 4.
|
||||
fake_op_sig = {
|
||||
.op = BuiltinOperator_RESIZE_BILINEAR,
|
||||
.input_types = std::vector<TensorType>{TensorType_INT16, TensorType_INT32},
|
||||
.output_types = std::vector<TensorType>{TensorType_INT16},
|
||||
};
|
||||
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
|
||||
}
|
||||
TEST(OpVersionTest, VersioningResizeNearestNeighborTest) {
|
||||
// Default.
|
||||
|
@ -189,6 +189,7 @@ std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
|
||||
{{BuiltinOperator_RESIZE_BILINEAR, 1}, "1.7.0"},
|
||||
{{BuiltinOperator_RESIZE_BILINEAR, 2}, "1.14.0"},
|
||||
{{BuiltinOperator_RESIZE_BILINEAR, 3}, "2.2.0"},
|
||||
{{BuiltinOperator_RESIZE_BILINEAR, 4}, kPendingReleaseVersion},
|
||||
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 1}, "1.13.1"},
|
||||
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 2}, "1.14.0"},
|
||||
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 3}, "2.3.0"},
|
||||
|
Loading…
Reference in New Issue
Block a user