Merge pull request #39988 from wwwind:16x8_mean

PiperOrigin-RevId: 322209002
This commit is contained in:
TensorFlower Gardener 2020-07-20 13:12:50 -07:00
commit ecaf86d96c
11 changed files with 201 additions and 133 deletions

View File

@ -306,7 +306,9 @@ QuantizeOpTest/INT8,30
-ConstInt8MeanOpTest.QuantizedDifferentScale
ConstUint8(Max|Min)OpTest/.+,29
ConstUint8(Mean)OpTest/.+
ConstInt8(Mean|Max|Min)OpTest/.+,29
-ConstInt8(Max|Min)OpTest/.+,29
-ConstMeanOpTest.*/.+
-MeanOpTestQuantized.*/.+
ConstFloat(Sum|Prod|Max|Min)OpTest/NotKeepDims,29
ConstFloat(Sum|Prod|Max|Min)OpTest/KeepDims,29
ConstFloat(Mean|Any)OpTest/NotKeepDims

View File

@ -20,11 +20,12 @@ limitations under the License.
namespace tflite {
namespace reference_integer_ops {
template <typename integer_type>
inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier,
int32_t shift, const RuntimeShape& unextended_input_shape,
const int8_t* input_data, int32 input_zero_point,
const integer_type* input_data, int32 input_zero_point,
const RuntimeShape& unextended_output_shape,
int8_t* output_data, int32 output_zero_point) {
integer_type* output_data, int32 output_zero_point) {
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
@ -47,8 +48,8 @@ inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier,
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
static constexpr int32_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int32_t kMaxInt8 = std::numeric_limits<int8_t>::max();
static constexpr int32_t kMinInt = std::numeric_limits<integer_type>::min();
static constexpr int32_t kMaxInt = std::numeric_limits<integer_type>::max();
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
@ -63,9 +64,9 @@ inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier,
acc = acc > 0 ? (acc + num_elements_in_axis / 2) / num_elements_in_axis
: (acc - num_elements_in_axis / 2) / num_elements_in_axis;
acc += output_zero_point;
acc = std::min(std::max(acc, kMinInt8), kMaxInt8);
acc = std::min(std::max(acc, kMinInt), kMaxInt);
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
static_cast<int8_t>(acc);
static_cast<integer_type>(acc);
}
}
}

View File

@ -321,9 +321,12 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32 input_zero_point,
const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis, U* temp_sum,
bool compute_sum) {
const bool uint8_case = std::is_same<T, int8_t>::value;
const bool uint8_case = std::is_same<T, uint8_t>::value;
const bool int16_case = std::is_same<T, int16_t>::value;
if (uint8_case) {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Uint8" : "Mean/Uint8");
} else if (int16_case) {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int16" : "Mean/Int16");
} else {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int8" : "Mean/Int8");
}

View File

@ -196,9 +196,8 @@ TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
temp_sum->type = kTfLiteInt64;
break;
case kTfLiteUInt8:
temp_sum->type = kTfLiteInt32;
break;
case kTfLiteInt8:
case kTfLiteInt16:
temp_sum->type = kTfLiteInt32;
break;
case kTfLiteBool:
@ -245,7 +244,9 @@ TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {
// reduce_mean requires a buffer to store intermediate sum result.
OpContext op_context(context, node);
if (op_context.input->type == kTfLiteInt8) {
if (op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt16) {
const double real_multiplier =
static_cast<double>(op_context.input->params.scale) /
static_cast<double>(op_context.output->params.scale);
@ -273,6 +274,69 @@ void ResolveAxis(const int* axis_data, int axis_count,
}
}
template <typename integer_type>
TfLiteStatus EvalMeanReferenceOps(TfLiteContext* context,
const OpContext& op_context, int num_axis,
OpData* data, TfLiteTensor* temp_index,
TfLiteTensor* resolved_axis,
TfLiteTensor* temp_sum) {
tflite::MeanParams op_params;
op_params.axis_count = num_axis;
ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params);
const TfLiteTensor* input = op_context.input;
// TODO(b/139102329): Handle all the cases in the combined reference
// method.
if (op_context.params->keep_dims && NumDimensions(input) == 4 &&
op_params.axis_count == 2 &&
((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1))) {
if (std::is_same<integer_type, uint8_t>::value) {
reference_ops::Mean(op_params, GetTensorShape(op_context.input),
GetTensorData<uint8_t>(op_context.input),
op_context.input->params.zero_point,
op_context.input->params.scale,
GetTensorShape(op_context.output),
GetTensorData<uint8_t>(op_context.output),
op_context.output->params.zero_point,
op_context.output->params.scale);
} else {
reference_integer_ops::Mean(
op_params, data->multiplier, data->shift, GetTensorShape(input),
GetTensorData<integer_type>(input),
op_context.input->params.zero_point,
GetTensorShape(op_context.output),
GetTensorData<integer_type>(op_context.output),
op_context.output->params.zero_point);
}
} else if (input->params.zero_point == op_context.output->params.zero_point &&
input->params.scale == op_context.output->params.scale) {
TF_LITE_ENSURE(
context,
reference_ops::Mean(
GetTensorData<integer_type>(input), input->dims->data,
input->dims->size, GetTensorData<integer_type>(op_context.output),
op_context.output->dims->data, op_context.output->dims->size,
GetTensorData<int>(op_context.axis), num_axis,
op_context.params->keep_dims, GetTensorData<int>(temp_index),
GetTensorData<int>(resolved_axis), GetTensorData<int>(temp_sum)));
} else {
TF_LITE_ENSURE(
context,
reference_ops::QuantizedMeanOrSum<>(
GetTensorData<integer_type>(input), input->params.zero_point,
input->params.scale, input->dims->data, input->dims->size,
GetTensorData<integer_type>(op_context.output),
op_context.output->params.zero_point,
op_context.output->params.scale, op_context.output->dims->data,
op_context.output->dims->size, GetTensorData<int>(op_context.axis),
num_axis, op_context.params->keep_dims,
GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis),
GetTensorData<int>(temp_sum),
/*compute_sum=*/false));
}
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
@ -400,101 +464,19 @@ TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
GetTensorData<int64_t>(temp_sum)));
break;
case kTfLiteInt8: {
tflite::MeanParams op_params;
op_params.axis_count = num_axis;
ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params);
const TfLiteTensor* input = op_context.input;
// TODO(b/139102329): Handle all the cases in the combined reference
// method.
if (op_context.params->keep_dims && NumDimensions(input) == 4 &&
op_params.axis_count == 2 &&
((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1))) {
reference_integer_ops::Mean(
op_params, data->multiplier, data->shift, GetTensorShape(input),
GetTensorData<int8_t>(input), op_context.input->params.zero_point,
GetTensorShape(op_context.output),
GetTensorData<int8_t>(op_context.output),
op_context.output->params.zero_point);
} else if (input->params.zero_point ==
op_context.output->params.zero_point &&
input->params.scale == op_context.output->params.scale) {
TF_LITE_ENSURE(
context,
reference_ops::Mean(
GetTensorData<int8_t>(input), input->dims->data,
input->dims->size, GetTensorData<int8_t>(op_context.output),
op_context.output->dims->data, op_context.output->dims->size,
GetTensorData<int>(op_context.axis), num_axis,
op_context.params->keep_dims, GetTensorData<int>(temp_index),
GetTensorData<int>(resolved_axis),
GetTensorData<int>(temp_sum)));
} else {
TF_LITE_ENSURE(
context,
reference_ops::QuantizedMeanOrSum<>(
GetTensorData<int8_t>(input), input->params.zero_point,
input->params.scale, input->dims->data, input->dims->size,
GetTensorData<int8_t>(op_context.output),
op_context.output->params.zero_point,
op_context.output->params.scale, op_context.output->dims->data,
op_context.output->dims->size,
GetTensorData<int>(op_context.axis), num_axis,
op_context.params->keep_dims, GetTensorData<int>(temp_index),
GetTensorData<int>(resolved_axis), GetTensorData<int>(temp_sum),
/*compute_sum=*/false));
}
TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<int8_t>(
context, op_context, num_axis, data,
temp_index, resolved_axis, temp_sum));
} break;
case kTfLiteInt16: {
TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<int16_t>(
context, op_context, num_axis, data,
temp_index, resolved_axis, temp_sum));
} break;
case kTfLiteUInt8: {
// TODO(b/139102329): Handle all the cases in the combined reference
// method.
tflite::MeanParams op_params;
op_params.axis_count = num_axis;
ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params);
if (op_context.params->keep_dims &&
NumDimensions(op_context.input) == 4 && op_params.axis_count == 2 &&
((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1))) {
reference_ops::Mean(op_params, GetTensorShape(op_context.input),
GetTensorData<uint8_t>(op_context.input),
op_context.input->params.zero_point,
op_context.input->params.scale,
GetTensorShape(op_context.output),
GetTensorData<uint8_t>(op_context.output),
op_context.output->params.zero_point,
op_context.output->params.scale);
} else if (op_context.input->params.zero_point ==
op_context.output->params.zero_point &&
op_context.input->params.scale ==
op_context.output->params.scale) {
TF_LITE_ENSURE(
context,
reference_ops::Mean(
GetTensorData<uint8_t>(op_context.input),
op_context.input->dims->data, op_context.input->dims->size,
GetTensorData<uint8_t>(op_context.output),
op_context.output->dims->data, op_context.output->dims->size,
GetTensorData<int>(op_context.axis), num_axis,
op_context.params->keep_dims, GetTensorData<int>(temp_index),
GetTensorData<int>(resolved_axis),
GetTensorData<int>(temp_sum)));
} else {
TF_LITE_ENSURE(
context,
reference_ops::QuantizedMeanOrSum<>(
GetTensorData<uint8_t>(op_context.input),
op_context.input->params.zero_point,
op_context.input->params.scale, op_context.input->dims->data,
op_context.input->dims->size,
GetTensorData<uint8_t>(op_context.output),
op_context.output->params.zero_point,
op_context.output->params.scale, op_context.output->dims->data,
op_context.output->dims->size,
GetTensorData<int>(op_context.axis), num_axis,
op_context.params->keep_dims, GetTensorData<int>(temp_index),
GetTensorData<int>(resolved_axis), GetTensorData<int>(temp_sum),
/*compute_sum=*/false));
}
TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<uint8_t>(
context, op_context, num_axis, data,
temp_index, resolved_axis, temp_sum));
} break;
default:
return kTfLiteError;

View File

@ -233,7 +233,14 @@ class AnyOpDynamicModel : public BaseOpModel {
};
// for quantized Add, the error shouldn't exceed step
float GetTolerance(int min, int max) { return (max - min) / 255.0; }
template <typename integer_type = int8_t>
float GetTolerance(int min, int max) {
if (std::is_same<int16_t, integer_type>::value) {
return (max - min) / 65536.0;
} else {
return (max - min) / 255.0;
}
}
// Tests for reduce_mean
TEST(ConstFloatMeanOpTest, NotKeepDims) {
@ -430,65 +437,125 @@ TEST(ConstUint8MeanOpTest, KeepDims) {
ElementsAreArray(ArrayFloatNear({0.3, 0.35, 0.55}, kQuantizedTolerance)));
}
TEST(ConstInt8MeanOpTest, NonSpecialAxisSameScale) {
float kQuantizedTolerance = GetTolerance(-5.0, 5.0);
template <typename integer_type, TensorType tensor_dtype>
void MeanOpConstModelTest() {
float kQuantizedTolerance = GetTolerance<integer_type>(-5.0, 5.0);
std::vector<float> data = {105.0, 71.0, 233.0, 92.0, 227.0, 11.0, 14.0, 43.0};
MeanOpConstModel m({TensorType_INT8, {1, 1, 2, 4}, 0.0, 255.0},
{TensorType_INT8, {1, 2, 4}, 0.0, 255.0}, {1}, {1}, false);
m.QuantizeAndPopulate<int8_t>(m.Input(), data);
float scale = tensor_dtype == TensorType_INT16 ? 255 / 32767.0f : 0.0f;
MeanOpConstModel m({tensor_dtype, {1, 1, 2, 4}, 0.0, 255.0, scale, 0},
{tensor_dtype, {1, 2, 4}, 0.0, 255.0, scale, 0}, {1}, {1},
false);
m.QuantizeAndPopulate<integer_type>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2, 4}));
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
EXPECT_THAT(m.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear(data, kQuantizedTolerance)));
}
TEST(ConstInt8MeanOpTest, NonSpecialAxisNonSameScale) {
float kQuantizedTolerance = GetTolerance(-5.0, 5.0);
class ConstMeanOpTestSameScale : public ::testing::Test {};
TEST_F(ConstMeanOpTestSameScale, NonSpecialAxisSameScaleInt8) {
MeanOpConstModelTest<int8_t, TensorType_INT8>();
}
TEST_F(ConstMeanOpTestSameScale, NonSpecialAxisSameScaleInt16) {
MeanOpConstModelTest<int16_t, TensorType_INT16>();
}
template <typename integer_type, TensorType tensor_dtype>
void ConstMeanOpTestNonSameScale() {
float kQuantizedTolerance = GetTolerance<integer_type>(-5.0, 5.0);
std::vector<float> data = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8};
MeanOpConstModel m({TensorType_INT8, {1, 1, 2, 4}, -1.0, 1.0},
{TensorType_INT8, {1, 2}, -5.0, 5.0}, {2}, {1, 3}, false);
m.QuantizeAndPopulate<int8_t>(m.Input(), data);
float scale = tensor_dtype == TensorType_INT16 ? 1 / 32767.f : 0.0f;
MeanOpConstModel m({tensor_dtype, {1, 1, 2, 4}, -1.0, 1.0, scale, 0},
{tensor_dtype, {1, 2}, -5.0, 5.0, scale, 0}, {2}, {1, 3},
false);
m.QuantizeAndPopulate<integer_type>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
m.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear({0.25, 0.65}, kQuantizedTolerance)));
}
TEST(ConstInt8MeanOpTest, QuantizedSameScale) {
float kQuantizedTolerance = GetTolerance(-5.0, 5.0);
class ConstMeanOpTestNonSameScale : public ::testing::Test {};
TEST_F(ConstMeanOpTestNonSameScale, NonSpecialAxisNonSameScaleInt8) {
MeanOpConstModelTest<int8_t, TensorType_INT8>();
}
TEST_F(ConstMeanOpTestNonSameScale, NonSpecialAxisNonSameScaleInt16) {
MeanOpConstModelTest<int16_t, TensorType_INT16>();
}
template <typename integer_type, TensorType tensor_dtype>
void MeanOpTestQuantizedSameScale() {
float kQuantizedTolerance = GetTolerance<integer_type>(-5.0, 5.0);
float scale = tensor_dtype == TensorType_INT16 ? 1 / 32767.f : 0.0f;
std::vector<float> data = {0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5, 0.1,
0.1, 0.1, 0.1, 0.4, 0.2, 0.2, 0.2, 0.9, 0.9,
0.9, 0.9, 0.2, 0.3, 0.7, 0.7, 0.1, 0.1, 0.3,
0.3, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4};
MeanOpConstModel m({TensorType_INT8, {1, 2, 2, 9}, -1.0, 1.0},
{TensorType_INT8, {2}, -1.0, 1.0}, {2}, {1, 2}, true);
m.QuantizeAndPopulate<int8_t>(m.Input(), data);
MeanOpConstModel m({tensor_dtype, {1, 2, 2, 9}, -1.0, 1.0, scale, 0},
{tensor_dtype, {2}, -1.0, 1.0, scale, 0}, {2}, {1, 2},
true);
m.QuantizeAndPopulate<integer_type>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 9}));
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
EXPECT_THAT(m.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear(
{0.35, 0.325, 0.2, 0.35, 0.375, 0.325, 0.225, 0.45, 0.425},
kQuantizedTolerance)));
}
TEST(ConstInt8MeanOpTest, QuantizedDifferentScale) {
float kQuantizedTolerance = GetTolerance(-5.0, 5.0);
class MeanOpTestQuantizedSameScale : public ::testing::Test {};
TEST_F(MeanOpTestQuantizedSameScale, QuantizedSameScaleInt8) {
MeanOpConstModelTest<int8_t, TensorType_INT8>();
}
TEST_F(MeanOpTestQuantizedSameScale, QuantizedSameScaleInt16) {
MeanOpConstModelTest<int16_t, TensorType_INT16>();
}
template <typename integer_type, TensorType tensor_dtype>
void MeanOpTestQuantizedDifferentScale() {
float kQuantizedTolerance = GetTolerance<integer_type>(-5.0, 5.0);
float scale = tensor_dtype == TensorType_INT16 ? 1 / 32767.f : 0.0f;
std::vector<float> data = {0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5, 0.1,
0.1, 0.1, 0.1, 0.4, 0.2, 0.2, 0.2, 0.9, 0.9,
0.9, 0.9, 0.2, 0.3, 0.7, 0.7, 0.1, 0.1, 0.3,
0.3, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4};
MeanOpConstModel m({TensorType_INT8, {1, 2, 2, 9}, -1.0, 1.0},
{TensorType_INT8, {2}, -4.0, 4.0}, {2}, {1, 2}, true);
m.QuantizeAndPopulate<int8_t>(m.Input(), data);
MeanOpConstModel m({tensor_dtype, {1, 2, 2, 9}, -1.0, 1.0, scale, 0},
{tensor_dtype, {2}, -4.0, 4.0, scale, 0}, {2}, {1, 2},
true);
m.QuantizeAndPopulate<integer_type>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 9}));
EXPECT_THAT(m.GetDequantizedOutput<int8_t>(),
EXPECT_THAT(m.GetDequantizedOutput<integer_type>(),
ElementsAreArray(ArrayFloatNear(
{0.35, 0.325, 0.2, 0.35, 0.375, 0.325, 0.225, 0.45, 0.425},
kQuantizedTolerance)));
}
class MeanOpTestQuantizedDifferentScale : public ::testing::Test {};
TEST_F(MeanOpTestQuantizedDifferentScale, QuantizedDifferentScaleInt8) {
MeanOpConstModelTest<int8_t, TensorType_INT8>();
}
TEST_F(MeanOpTestQuantizedDifferentScale, QuantizedDifferentScaleInt16) {
MeanOpConstModelTest<int16_t, TensorType_INT16>();
}
TEST(ConstFloatMeanOpTest, KeepDims4DMeanLargeDepthInt8) {
float kQuantizedTolerance = GetTolerance(-5.0, 5.0);
std::vector<float> data = {

View File

@ -137,7 +137,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
/* max_version = */ 4);
AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(),
/* min_version = */ 1,
/* max_version = */ 2);
/* max_version = */ 3);
AddBuiltin(BuiltinOperator_DIV, Register_DIV(),
/* min_version */ 1,
/* max_version */ 2);

View File

@ -129,6 +129,7 @@ std::string GetMinimumRuntimeVersionForModel(const Model& model) {
{{OperatorType::kBidirectionalSequenceRnn, 1}, "1.14.0"},
{{OperatorType::kMean, 1}, "1.6.0"},
{{OperatorType::kMean, 2}, "1.14.0"},
{{OperatorType::kMean, 3}, kPendingReleaseOpVersion},
{{OperatorType::kSum, 1}, "1.10.0"},
{{OperatorType::kSum, 2}, "1.15.0"},
{{OperatorType::kReduceMax, 1}, "1.11.0"},

View File

@ -802,7 +802,6 @@ OperatorProperty GetOperatorProperty(const ModelT* model, int subgraph_index,
property.inputs = {{0, {}}};
property.outputs = {{0, {}}};
property.version = 2;
property.quantizable_int16 = false;
break;
case BuiltinOperator_MINIMUM:
property.arbitrary_inputs = true;

View File

@ -514,6 +514,7 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
case BuiltinOperator_CONCATENATION:
case BuiltinOperator_SOFTMAX:
case BuiltinOperator_MEAN:
case BuiltinOperator_PAD:
case BuiltinOperator_PADV2:
// In case of int16 inputs, the version is 3.
@ -541,7 +542,6 @@ int GetBuiltinOperatorVersion(const OpSignature& op_sig) {
case BuiltinOperator_ADD:
case BuiltinOperator_SPACE_TO_DEPTH:
case BuiltinOperator_SPLIT_V:
case BuiltinOperator_MEAN:
case BuiltinOperator_SUM:
case BuiltinOperator_REDUCE_MAX:
case BuiltinOperator_REDUCE_MIN:

View File

@ -67,6 +67,18 @@ void SimpleVersioningTest(BuiltinOperator op) {
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 1);
}
// Similar to SimpleVersioningTest function, but
// op has 3 versions and the input type includes TensorType_INT16.
void SimpleVersioningTestExtended(BuiltinOperator op) {
OpSignature fake_op_sig = {
.op = op,
.input_types = std::vector<TensorType>{TensorType_INT16},
};
EXPECT_EQ(GetBuiltinOperatorVersion(fake_op_sig), 3);
SimpleVersioningTest(op);
}
// Test version for a simple Op with 2 versions and the output type controls the
void SimpleOutputVersioningTest(BuiltinOperator op) {
OpSignature fake_op_sig = {
@ -281,7 +293,7 @@ TEST(OpVersionTest, VersioningMinTest) {
}
TEST(OpVersionTest, VersioningMeanTest) {
SimpleVersioningTest(BuiltinOperator_MEAN);
SimpleVersioningTestExtended(BuiltinOperator_MEAN);
}
TEST(OpVersionTest, VersioningSumTest) {

View File

@ -170,6 +170,7 @@ std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
{{BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN, 3}, "2.3.0"},
{{BuiltinOperator_MEAN, 1}, "1.6.0"},
{{BuiltinOperator_MEAN, 2}, "1.14.0"},
{{BuiltinOperator_MEAN, 3}, kPendingReleaseVersion},
{{BuiltinOperator_SUM, 1}, "1.10.0"},
{{BuiltinOperator_SUM, 2}, "1.15.0"},
{{BuiltinOperator_REDUCE_MAX, 1}, "1.11.0"},