Support reduce_min

PiperOrigin-RevId: 209634537
This commit is contained in:
A. Unique TensorFlower 2018-08-21 11:54:42 -07:00 committed by TensorFlower Gardener
parent 9158b1b83a
commit 81d90de884
13 changed files with 332 additions and 54 deletions

View File

@ -267,6 +267,7 @@ def generated_test_models():
"prelu",
"pow",
"reduce_max",
"reduce_min",
"reduce_prod",
"relu",
"relu1",

View File

@ -114,6 +114,7 @@ typedef enum {
kTfLiteBuiltinLogicalAnd = 86,
kTfLiteBuiltinLogicalNot = 87,
kTfLiteBuiltinUnpack = 88,
kTfLiteBuiltinReduceMin = 89,
} TfLiteBuiltinOperator;
#ifdef __cplusplus

View File

@ -3896,39 +3896,16 @@ inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
return true;
}
// Computes the sum of elements across dimensions given in axis.
// Computes the generic value (i.e., sum/max/min/prod) of elements across
// dimensions given in axis. It needs to pass in init_value and reducer.
template <typename T>
inline bool Sum(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis) {
// Reset output data.
if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(0),
output_data)) {
return false;
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
return ReduceSumImpl<T, T>(input_data, input_dims, output_dims,
input_num_dims, output_num_dims, resolved_axis,
num_resolved_axis, temp_index, output_data);
}
// Computes the max of elements across dimensions given in axis.
template <typename T>
inline bool ReduceMax(const T* input_data, const int* input_dims,
inline bool ReduceGeneric(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int64_t num_axis_dimensions,
bool keep_dims, int* temp_index, int* resolved_axis) {
T init_value = std::numeric_limits<T>::lowest();
bool keep_dims, int* temp_index, int* resolved_axis,
T init_value,
T reducer(const T current, const T in)) {
// Reset output data.
if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
output_data)) {
@ -3942,14 +3919,63 @@ inline bool ReduceMax(const T* input_data, const int* input_dims,
return false;
}
auto reducer = [](const T current, const T in) -> T {
return (in > current) ? in : current;
};
return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, reducer, output_data);
}
// Computes the sum of elements across dimensions given in axis.
template <typename T>
inline bool Sum(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis) {
T init_value = static_cast<T>(0);
auto reducer = [](const T current, const T in) -> T { return current + in; };
return ReduceGeneric<T>(input_data, input_dims, input_num_dims, output_data,
output_dims, output_num_dims, axis,
num_axis_dimensions, keep_dims, temp_index,
resolved_axis, init_value, reducer);
}
// Computes the max of elements across dimensions given in axis.
template <typename T>
inline bool ReduceMax(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int64_t num_axis_dimensions,
bool keep_dims, int* temp_index, int* resolved_axis) {
T init_value = std::numeric_limits<T>::lowest();
auto reducer = [](const T current, const T in) -> T {
return (in > current) ? in : current;
};
return ReduceGeneric<T>(input_data, input_dims, input_num_dims, output_data,
output_dims, output_num_dims, axis,
num_axis_dimensions, keep_dims, temp_index,
resolved_axis, init_value, reducer);
}
// Computes the min of elements across dimensions given in axis.
template <typename T>
inline bool ReduceMin(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int64_t num_axis_dimensions,
bool keep_dims, int* temp_index, int* resolved_axis) {
T init_value = std::numeric_limits<T>::max();
auto reducer = [](const T current, const T in) -> T {
return (in < current) ? in : current;
};
return ReduceGeneric<T>(input_data, input_dims, input_num_dims, output_data,
output_dims, output_num_dims, axis,
num_axis_dimensions, keep_dims, temp_index,
resolved_axis, init_value, reducer);
}
// Computes the prod of elements across dimensions given in axis.
template <typename T>
inline bool ReduceProd(const T* input_data, const int* input_dims,
@ -3957,23 +3983,13 @@ inline bool ReduceProd(const T* input_data, const int* input_dims,
const int* output_dims, const int output_num_dims,
const int* axis, const int64_t num_axis_dimensions,
bool keep_dims, int* temp_index, int* resolved_axis) {
// Reset output data.
if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast<T>(1),
output_data)) {
return false;
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
T init_value = static_cast<T>(1);
auto reducer = [](const T current, const T in) -> T { return in * current; };
return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, reducer, output_data);
return ReduceGeneric<T>(input_data, input_dims, input_num_dims, output_data,
output_dims, output_num_dims, axis,
num_axis_dimensions, keep_dims, temp_index,
resolved_axis, init_value, reducer);
}
// Computes the mean of elements across dimensions given in axis.

View File

@ -412,6 +412,54 @@ TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
template <KernelType kernel_type>
TfLiteStatus EvalMin(TfLiteContext* context, TfLiteNode* node) {
OpContext op_context(context, node);
int64_t num_axis = NumElements(op_context.axis);
TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0);
TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1);
// Resize the output tensor if the output tensor is dynamic.
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context,
ResizeTempAxis(context, &op_context, resolved_axis));
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_MIN(kernel_type, data_type) \
kernel_type::ReduceMin<>( \
GetTensorData<data_type>(op_context.input), \
op_context.input->dims->data, op_context.input->dims->size, \
GetTensorData<data_type>(op_context.output), \
op_context.output->dims->data, op_context.output->dims->size, \
GetTensorData<int>(op_context.axis), num_axis, \
op_context.params->keep_dims, GetTensorData<int>(temp_index), \
GetTensorData<int>(resolved_axis))
if (kernel_type == kReference) {
switch (op_context.input->type) {
case kTfLiteFloat32:
TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, float));
break;
case kTfLiteInt32:
TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, int));
break;
case kTfLiteInt64:
TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, int64_t));
break;
case kTfLiteUInt8:
TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
op_context.output->params.scale);
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
op_context.output->params.zero_point);
TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, uint8_t));
break;
default:
return kTfLiteError;
}
}
#undef TF_LITE_MIN
return kTfLiteOk;
}
} // namespace reduce
TfLiteRegistration* Register_MEAN_REF() {
@ -442,6 +490,13 @@ TfLiteRegistration* Register_REDUCE_MAX_REF() {
return &r;
}
TfLiteRegistration* Register_REDUCE_MIN_REF() {
static TfLiteRegistration r = {reduce::Init, reduce::Free,
reduce::PrepareSimple,
reduce::EvalMin<reduce::kReference>};
return &r;
}
// TODO(kanlig): add optimized implementation of Mean.
TfLiteRegistration* Register_MEAN() { return Register_MEAN_REF(); }
TfLiteRegistration* Register_SUM() { return Register_SUM_REF(); }
@ -449,6 +504,7 @@ TfLiteRegistration* Register_REDUCE_PROD() {
return Register_REDUCE_PROD_REF();
}
TfLiteRegistration* Register_REDUCE_MAX() { return Register_REDUCE_MAX_REF(); }
TfLiteRegistration* Register_REDUCE_MIN() { return Register_REDUCE_MIN_REF(); }
} // namespace builtin
} // namespace ops

View File

@ -169,6 +169,35 @@ class MaxOpDynamicModel : public BaseOpModel {
}
};
// Model for the tests case where axis is a const tensor.
class MinOpConstModel : public BaseOpModel {
public:
MinOpConstModel(const TensorData& input, const TensorData& output,
std::initializer_list<int> axis_shape,
std::initializer_list<int> axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_REDUCE_MIN, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreter({GetShape(input_)});
}
};
// Model for the tests case where axis is a dynamic tensor.
class MinOpDynamicModel : public BaseOpModel {
public:
MinOpDynamicModel(const TensorData& input, const TensorData& output,
const TensorData& axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddInput(axis);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_REDUCE_MIN, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreter({GetShape(input_)});
}
};
// for quantized Add, the error shouldn't exceed step
float GetTolerance(int min, int max) { return (max - min) / 255.0; }
@ -665,6 +694,147 @@ TEST(DynamicUint8MaxOpTest, Scalar) {
ElementsAreArray(ArrayFloatNear({11.1294}, kQuantizedTolerance)));
}
// Tests for reduce_min
TEST(ConstFloatMinOpTest, NotKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MinOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
{4}, {1, 0, -3, -3}, false);
m.SetInput(data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({1, 2})));
}
TEST(ConstFloatMinOpTest, KeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MinOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
{2}, {0, 2}, true);
m.SetInput(data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 3, 5})));
}
TEST(DynamicFloatMinOpTest, NotKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MinOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
false);
std::vector<int> axis = {1, 0, -3, -3};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({1, 2})));
}
TEST(DynamicFloatMinOpTest, KeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MinOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true);
std::vector<int> axis = {0, 2};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({1, 3, 5})));
}
TEST(DynamicFloatMinOpTest, Scale) {
std::vector<float> data = {9.527};
MinOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}},
{TensorType_INT32, {1}}, true);
std::vector<int> axis = {0};
m.SetAxis(axis);
m.SetInput(data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({9.527})));
}
TEST(ConstUint8MinOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
MinOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0},
{TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
EXPECT_THAT(
m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({0.294117, 0.2}, kQuantizedTolerance)));
}
TEST(ConstUint8MinOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-1.0, 1.0);
std::vector<float> data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6};
MinOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0},
{TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1}));
EXPECT_THAT(
m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({0.2, 0.3, 0.5}, kQuantizedTolerance)));
}
TEST(DynamicUint8MinOpTest, NotKeepDims) {
float kQuantizedTolerance = GetTolerance(-5.0, 2.0);
std::vector<float> data = {1.3, -4.8, -3.6, 0.24};
MinOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0},
{TensorType_UINT8, {2}, -5.0, 2.0},
{TensorType_INT32, {1}}, false);
std::vector<int> axis = {1};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(
m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({-4.807843, -3.6}, kQuantizedTolerance)));
}
TEST(DynamicUint8MinOpTest, KeepDims) {
float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
std::vector<float> data = {11.14, -0.14, 7.423, 0.879};
MinOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0},
{TensorType_UINT8, {2}, -10.0, 12.0},
{TensorType_INT32, {1}}, true);
std::vector<int> axis = {0};
m.SetAxis(axis);
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(
ArrayFloatNear({7.427451, -0.164706}, kQuantizedTolerance)));
}
TEST(DynamicUint8MinOpTest, Scalar) {
float kQuantizedTolerance = GetTolerance(-10.0, 12.0);
std::vector<float> data = {11.14};
MinOpDynamicModel m({TensorType_UINT8, {}, -10.0, 12.0},
{TensorType_UINT8, {}, -10.0, 12.0},
{TensorType_INT32, {1}}, true);
std::vector<int> axis = {0};
m.QuantizeAndPopulate<uint8_t>(m.Input(), data);
m.Invoke();
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
EXPECT_THAT(m.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({11.1294}, kQuantizedTolerance)));
}
} // namespace
} // namespace tflite

View File

@ -94,6 +94,7 @@ TfLiteRegistration* Register_NEG();
TfLiteRegistration* Register_SUM();
TfLiteRegistration* Register_REDUCE_PROD();
TfLiteRegistration* Register_REDUCE_MAX();
TfLiteRegistration* Register_REDUCE_MIN();
TfLiteRegistration* Register_SELECT();
TfLiteRegistration* Register_SLICE();
TfLiteRegistration* Register_SIN();
@ -219,6 +220,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_SUM, Register_SUM());
AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD());
AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX());
AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN());
AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS());
AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE());
AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL());

View File

@ -622,6 +622,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
}
case BuiltinOperator_MEAN:
case BuiltinOperator_REDUCE_MAX:
case BuiltinOperator_REDUCE_MIN:
case BuiltinOperator_REDUCE_PROD:
case BuiltinOperator_SUM: {
auto* params = MallocPOD<TfLiteReducerParams>();

View File

@ -636,6 +636,7 @@ TfLiteStatus AddOpsAndParams(
case tflite::BuiltinOperator_NOT_EQUAL:
case tflite::BuiltinOperator_SUM:
case tflite::BuiltinOperator_REDUCE_MAX:
case tflite::BuiltinOperator_REDUCE_MIN:
case tflite::BuiltinOperator_REDUCE_PROD:
case tflite::BuiltinOperator_SQRT:
case tflite::BuiltinOperator_RSQRT:

View File

@ -170,6 +170,7 @@ enum BuiltinOperator : byte {
LOGICAL_AND = 86,
LOGICAL_NOT = 87,
UNPACK = 88,
REDUCE_MIN = 89,
}
// Options for the builtin operators.

View File

@ -377,11 +377,12 @@ enum BuiltinOperator {
BuiltinOperator_LOGICAL_AND = 86,
BuiltinOperator_LOGICAL_NOT = 87,
BuiltinOperator_UNPACK = 88,
BuiltinOperator_REDUCE_MIN = 89,
BuiltinOperator_MIN = BuiltinOperator_ADD,
BuiltinOperator_MAX = BuiltinOperator_UNPACK
BuiltinOperator_MAX = BuiltinOperator_REDUCE_MIN
};
inline BuiltinOperator (&EnumValuesBuiltinOperator())[88] {
inline BuiltinOperator (&EnumValuesBuiltinOperator())[89] {
static BuiltinOperator values[] = {
BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D,
@ -470,7 +471,8 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[88] {
BuiltinOperator_ONE_HOT,
BuiltinOperator_LOGICAL_AND,
BuiltinOperator_LOGICAL_NOT,
BuiltinOperator_UNPACK
BuiltinOperator_UNPACK,
BuiltinOperator_REDUCE_MIN
};
return values;
}
@ -566,6 +568,7 @@ inline const char **EnumNamesBuiltinOperator() {
"LOGICAL_AND",
"LOGICAL_NOT",
"UNPACK",
"REDUCE_MIN",
nullptr
};
return names;

View File

@ -926,6 +926,11 @@ def make_reduce_max_tests(zip_path):
return make_reduce_tests(tf.reduce_max)(zip_path)
def make_reduce_min_tests(zip_path):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(zip_path)
def make_exp_tests(zip_path):
"""Make a set of tests to do exp."""

View File

@ -2118,7 +2118,7 @@ void ConvertOperator(const Model& model, const Operator& src_op,
tensorflow_graph, "Prod");
} else if (src_op.type == OperatorType::kReduceMin) {
ConvertReduceOperator(model,
static_cast<const TensorFlowMaxOperator&>(src_op),
static_cast<const TensorFlowMinOperator&>(src_op),
tensorflow_graph, "Min");
} else if (src_op.type == OperatorType::kReduceMax) {
ConvertReduceOperator(model,

View File

@ -787,6 +787,25 @@ class ReduceMax
int GetVersion(const Operator& op) const override { return 1; }
};
class ReduceMin
: public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
public:
using BuiltinOperator::BuiltinOperator;
flatbuffers::Offset<TfLiteOptions> WriteOptions(
const TocoOperator& op,
flatbuffers::FlatBufferBuilder* builder) const override {
return ::tflite::CreateReducerOptions(*builder, op.keep_dims);
}
void ReadOptions(const TfLiteOptions& options,
TocoOperator* op) const override {
op->keep_dims = options.keep_dims();
}
int GetVersion(const Operator& op) const override { return 1; }
};
class ReduceProd
: public BuiltinOperator<TensorFlowSumOperator, ::tflite::ReducerOptions,
::tflite::BuiltinOptions_ReducerOptions> {
@ -1297,6 +1316,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList() {
OperatorType::kReduceProd));
ops.push_back(MakeUnique<ReduceMax>(::tflite::BuiltinOperator_REDUCE_MAX,
OperatorType::kReduceMax));
ops.push_back(MakeUnique<ReduceMin>(::tflite::BuiltinOperator_REDUCE_MIN,
OperatorType::kReduceMin));
ops.push_back(
MakeUnique<ResizeBilinear>(::tflite::BuiltinOperator_RESIZE_BILINEAR,
OperatorType::kResizeBilinear));