Merge pull request #43957 from Tessil:toupstream/16x8_resize_bilinear

PiperOrigin-RevId: 343969608
Change-Id: Id23a01fead16266d216464ab399eabe6d5162501
This commit is contained in:
TensorFlower Gardener 2020-11-23 18:58:08 -08:00
commit 5bc72656b7
10 changed files with 169 additions and 44 deletions

View File

@ -1659,14 +1659,14 @@ inline void ComputeInterpolationValues(const int32 value, const int32 scale_10,
std::min((*scaled_value + (1 << 10) - 1) / (1 << 10), input_size - 1);
}
// Same as above but takes int8 as input and output.
inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
const RuntimeShape& unextended_input_shape,
const int8_t* input_data,
const RuntimeShape& unextended_output_size_shape,
const int32* output_size_data,
const RuntimeShape& unextended_output_shape,
int8_t* output_data) {
// Same as above but doesn't use any floating-point for the resize
template <typename T>
inline void ResizeBilinearInteger(
const tflite::ResizeBilinearParams& op_params,
const RuntimeShape& unextended_input_shape, const T* input_data,
const RuntimeShape& unextended_output_size_shape,
const int32* output_size_data, const RuntimeShape& unextended_output_shape,
T* output_data) {
// If half_pixel_centers is True, align_corners must be False.
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
@ -1741,8 +1741,8 @@ inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
const int64_t output_20 =
output_20_ll + output_20_lu + output_20_rl + output_20_ru;
const int64_t round = (output_20 > 0) ? (1 << 19) : -(1 << 19);
const int8_t interpolation =
static_cast<int8_t>((output_20 + round) / (1 << 20));
const T interpolation =
static_cast<T>((output_20 + round) / (1 << 20));
output_data[Offset(output_shape, b, y, x, c)] = interpolation;
}
}

View File

@ -289,12 +289,12 @@ TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x4) {
}
}
TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x6_Int8) {
template <typename T>
void TestResizeBilinearHalfPixelCenters_2x2to4x6() {
// Input: 2x2
RuntimeShape input_dims_inference({1, 2, 2, 1});
// clang-format off
std::vector<int8> input_data = {127, -128,
64, 0};
std::vector<T> input_data = {127, -128, 64, 0};
// clang-format on
// Output: 4x6
@ -302,7 +302,7 @@ TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x6_Int8) {
// Initialize the output data with something other than zero, so we can catch
// issue with kernels failing to initialize the output.
const int output_buffer_size = output_dims_inference.FlatSize();
std::vector<int8> output_data(output_buffer_size, 3);
std::vector<T> output_data(output_buffer_size, 3);
RuntimeShape output_size_dims({1, 1, 1, 2});
std::vector<int32> output_size_data = {4, 6};
@ -312,11 +312,11 @@ TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x6_Int8) {
op_params.half_pixel_centers = false;
// Test with half_pixel_centers = false.
reference_ops::ResizeBilinear(
reference_ops::ResizeBilinearInteger(
op_params, input_dims_inference, input_data.data(), output_size_dims,
output_size_data.data(), output_dims_inference, output_data.data());
// clang-format off
std::vector<int8> reference_half_pixel_centers_false =
std::vector<T> reference_half_pixel_centers_false =
{ 127, 42, -43, -128, -128, -128,
96, 42, -11, -64, -64, -64,
64, 43, 21, 0, 0, 0,
@ -329,17 +329,17 @@ TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x6_Int8) {
// clang-format on
for (int i = 0; i < output_buffer_size; i++) {
EXPECT_EQ(static_cast<int8>(output_data[i]),
static_cast<int8>(reference_half_pixel_centers_false[i]));
EXPECT_EQ(static_cast<T>(output_data[i]),
static_cast<T>(reference_half_pixel_centers_false[i]));
}
// Test with half_pixel_centers = true.
op_params.half_pixel_centers = true;
reference_ops::ResizeBilinear(
reference_ops::ResizeBilinearInteger(
op_params, input_dims_inference, input_data.data(), output_size_dims,
output_size_data.data(), output_dims_inference, output_data.data());
// clang-format off
std::vector<int8> reference_half_pixel_centers_true =
std::vector<T> reference_half_pixel_centers_true =
{ 127, 127, 42, -43, -128, -128,
111, 111, 42, -27, -96, -96,
80, 80, 43, 5, -32, -32,
@ -352,10 +352,18 @@ TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x6_Int8) {
// clang-format on
for (int i = 0; i < output_buffer_size; i++) {
EXPECT_EQ(static_cast<int8>(output_data[i]),
static_cast<int8>(reference_half_pixel_centers_true[i]));
EXPECT_EQ(static_cast<T>(output_data[i]),
static_cast<T>(reference_half_pixel_centers_true[i]));
}
}
TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x6_Int8) {
TestResizeBilinearHalfPixelCenters_2x2to4x6<int8_t>();
}
TEST(ResizeBilinear, TestResizeBilinearHalfPixelCenters_2x2to4x6_Int16) {
TestResizeBilinearHalfPixelCenters_2x2to4x6<int16_t>();
}
} // namespace
} // namespace tflite

View File

@ -121,7 +121,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR(),
/* min_version = */ 1,
/* max_version = */ 3);
/* max_version = */ 4);
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
Register_RESIZE_NEAREST_NEIGHBOR(),
/* min_version = */ 1,

View File

@ -286,7 +286,7 @@ BuiltinRefOpResolver::BuiltinRefOpResolver() {
AddBuiltin(BuiltinOperator_RESHAPE, Register_RESHAPE());
AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, Register_RESIZE_BILINEAR_REF(),
/* min_version = */ 1,
/* max_version = */ 3);
/* max_version = */ 4);
AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
Register_RESIZE_NEAREST_NEIGHBOR_REF(),
/* min_version = */ 1,

View File

@ -114,30 +114,32 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
}
if (output->type == kTfLiteFloat32) {
#define TF_LITE_RESIZE_BILINEAR(type, datatype) \
tflite::ResizeBilinearParams op_params; \
op_params.align_corners = params->align_corners; \
op_params.half_pixel_centers = params->half_pixel_centers; \
type::ResizeBilinear(op_params, GetTensorShape(input), \
GetTensorData<datatype>(input), GetTensorShape(size), \
GetTensorData<int32>(size), GetTensorShape(output), \
GetTensorData<datatype>(output))
#define TF_LITE_RESIZE_BILINEAR(type, opname, datatype) \
tflite::ResizeBilinearParams op_params; \
op_params.align_corners = params->align_corners; \
op_params.half_pixel_centers = params->half_pixel_centers; \
type::opname(op_params, GetTensorShape(input), \
GetTensorData<datatype>(input), GetTensorShape(size), \
GetTensorData<int32>(size), GetTensorShape(output), \
GetTensorData<datatype>(output))
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, float);
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, float);
}
if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, float);
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, float);
}
} else if (output->type == kTfLiteUInt8) {
if (kernel_type == kReference) {
TF_LITE_RESIZE_BILINEAR(reference_ops, uint8_t);
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinear, uint8_t);
}
if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) {
TF_LITE_RESIZE_BILINEAR(optimized_ops, uint8_t);
TF_LITE_RESIZE_BILINEAR(optimized_ops, ResizeBilinear, uint8_t);
}
} else if (output->type == kTfLiteInt8) {
TF_LITE_RESIZE_BILINEAR(reference_ops, int8_t);
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int8_t);
} else if (output->type == kTfLiteInt16) {
TF_LITE_RESIZE_BILINEAR(reference_ops, ResizeBilinearInteger, int16_t);
#undef TF_LITE_RESIZE_BILINEAR
} else {
context->ReportError(context, "Output type is %d, requires float.",

View File

@ -104,6 +104,17 @@ TEST_P(ResizeBilinearOpTest, HorizontalResizeInt8) {
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
}
TEST_P(ResizeBilinearOpTest, HorizontalResizeInt16) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ResizeBilinearOpModel m({TensorType_INT16, {1, 1, 2, 1}}, {1, 3}, GetParam());
m.SetInput<int16_t>({3, 6});
m.Invoke();
EXPECT_THAT(m.GetOutput<int16_t>(),
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
}
TEST_P(ResizeBilinearOpTest, VerticalResize) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 1, 1}}, {3, 1},
GetParam());
@ -129,6 +140,17 @@ TEST_P(ResizeBilinearOpTest, VerticalResizeInt8) {
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
}
TEST_P(ResizeBilinearOpTest, VerticalResizeInt16) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ResizeBilinearOpModel m({TensorType_INT16, {1, 2, 1, 1}}, {3, 1}, GetParam());
m.SetInput<int16_t>({3, 9});
m.Invoke();
EXPECT_THAT(m.GetOutput<int16_t>(),
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
}
TEST_P(ResizeBilinearOpTest, TwoDimensionalResize) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, {3, 3},
GetParam());
@ -172,6 +194,23 @@ TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeInt8) {
})));
}
TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeInt16) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ResizeBilinearOpModel m({TensorType_INT16, {1, 2, 2, 1}}, {3, 3}, GetParam());
m.SetInput<int16_t>({
3, 6, //
9, 12 //
});
m.Invoke();
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray(ArrayFloatNear({
3, 5, 6, //
7, 9, 10, //
9, 11, 12, //
})));
}
TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeWithTwoBatches) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {2, 2, 2, 1}}, {3, 3},
GetParam());
@ -295,6 +334,30 @@ TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeWithTwoBatchesInt8) {
/*max_abs_error=*/1)));
}
TEST_P(ResizeBilinearOpTest, TwoDimensionalResizeWithTwoBatchesInt16) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ResizeBilinearOpModel m({TensorType_INT16, {2, 2, 2, 1}}, {3, 3}, GetParam());
m.SetInput<int16_t>({
3, 6, //
9, 12, //
4, 10, //
12, 16 //
});
m.Invoke();
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray(ArrayFloatNear(
{
3, 5, 6, //
7, 9, 10, //
9, 11, 12, //
4, 8, 10, //
9, 12, 13, //
12, 14, 16, //
},
/*max_abs_error=*/1)));
}
TEST_P(ResizeBilinearOpTest, ThreeDimensionalResizeUInt8) {
ResizeBilinearOpModel m({TensorType_UINT8, {1, 2, 2, 2}}, {3, 3}, GetParam());
m.SetInput<uint8>({
@ -327,6 +390,52 @@ TEST_P(ResizeBilinearOpTest, ThreeDimensionalResizeInt8) {
/*max_abs_error=*/1)));
}
TEST_P(ResizeBilinearOpTest, ThreeDimensionalResizeInt16) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ResizeBilinearOpModel m({TensorType_INT16, {1, 2, 2, 2}}, {3, 3}, GetParam());
m.SetInput<int16_t>({
3, 4, 6, 10, //
10, 12, 14, 16, //
});
m.Invoke();
EXPECT_THAT(m.GetOutput<int16_t>(), ElementsAreArray(ArrayFloatNear(
{
3, 4, 5, 8, 6, 10, //
7, 9, 10, 12, 11, 13, //
10, 12, 12, 14, 14, 16, //
},
/*max_abs_error=*/1)));
}
TEST_P(ResizeBilinearOpTest, HorizontalResizeExtremeValuesUInt8) {
ResizeBilinearOpModel m({TensorType_UINT8, {1, 1, 2, 1}}, {1, 3}, GetParam());
m.SetInput<uint8_t>({253, 255});
m.Invoke();
EXPECT_THAT(m.GetOutput<uint8>(),
ElementsAreArray(ArrayFloatNear({253, 254, 255})));
}
TEST_P(ResizeBilinearOpTest, HorizontalResizeExtremeValuesInt8) {
ResizeBilinearOpModel m({TensorType_INT8, {1, 1, 2, 1}}, {1, 3}, GetParam());
m.SetInput<int8_t>({125, 127});
m.Invoke();
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear({125, 126, 127})));
}
TEST_P(ResizeBilinearOpTest, HorizontalResizeExtremeValuesInt16) {
if (SingleOpModel::GetForceUseNnapi()) {
return;
}
ResizeBilinearOpModel m({TensorType_INT16, {1, 1, 2, 1}}, {1, 3}, GetParam());
m.SetInput<int16_t>({32765, 32767});
m.Invoke();
EXPECT_THAT(m.GetOutput<int16_t>(),
ElementsAreArray(ArrayFloatNear({32765, 32766, 32767})));
}
INSTANTIATE_TEST_SUITE_P(ResizeBilinearOpTest, ResizeBilinearOpTest,
testing::Values(TestType::kConst, TestType::kDynamic));

View File

@ -879,12 +879,6 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
property.version = 1;
break;
case BuiltinOperator_RESIZE_BILINEAR:
property.inputs = {{0, {}}};
property.outputs = {{0, {}}};
property.restrict_same_input_output_scale = true;
property.version = 2;
property.quantizable_int16 = false;
break;
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
property.inputs = {{0, {}}};
property.outputs = {{0, {}}};

View File

@ -407,7 +407,9 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
}
return 1;
case BuiltinOperator_RESIZE_BILINEAR:
if (op_sig.options.resize.half_pixel_centers) {
if (op_sig.input_types.at(0) == TensorType_INT16) {
return 4;
} else if (op_sig.options.resize.half_pixel_centers) {
return 3;
} else if (op_sig.input_types.at(0) == TensorType_INT8) {
return 2;

View File

@ -705,6 +705,15 @@ TEST(OpVersionTest, VersioningResizeBilinearTest) {
fake_op_sig.options.resize.half_pixel_centers = true;
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
// int16 input is version 4.
fake_op_sig = {
.op = BuiltinOperator_RESIZE_BILINEAR,
.input_types =
std::vector<TensorType>{TensorType_INT16, TensorType_INT32},
.output_types = std::vector<TensorType>{TensorType_INT16},
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 4);
}
TEST(OpVersionTest, VersioningResizeNearestNeighborTest) {
// Default.

View File

@ -198,6 +198,7 @@ std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
{{BuiltinOperator_RESIZE_BILINEAR, 1}, "1.7.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 2}, "1.14.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 3}, "2.2.0"},
{{BuiltinOperator_RESIZE_BILINEAR, 4}, kPendingReleaseVersion},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 1}, "1.13.1"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 2}, "1.14.0"},
{{BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, 3}, "2.3.0"},