From 81d90de884ad6005e57e0d7d333e8476659d00c2 Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Tue, 21 Aug 2018 11:54:42 -0700 Subject: [PATCH] Support reduce_min PiperOrigin-RevId: 209634537 --- tensorflow/contrib/lite/build_def.bzl | 1 + tensorflow/contrib/lite/builtin_ops.h | 1 + .../internal/reference/reference_ops.h | 116 ++++++------ tensorflow/contrib/lite/kernels/reduce.cc | 56 ++++++ .../contrib/lite/kernels/reduce_test.cc | 170 ++++++++++++++++++ tensorflow/contrib/lite/kernels/register.cc | 2 + tensorflow/contrib/lite/model.cc | 1 + tensorflow/contrib/lite/nnapi_delegate.cc | 1 + tensorflow/contrib/lite/schema/schema.fbs | 1 + .../contrib/lite/schema/schema_generated.h | 9 +- .../contrib/lite/testing/generate_examples.py | 5 + .../contrib/lite/toco/export_tensorflow.cc | 2 +- .../contrib/lite/toco/tflite/operator.cc | 21 +++ 13 files changed, 332 insertions(+), 54 deletions(-) diff --git a/tensorflow/contrib/lite/build_def.bzl b/tensorflow/contrib/lite/build_def.bzl index 05d0b453ab1..45a0ded7eb6 100644 --- a/tensorflow/contrib/lite/build_def.bzl +++ b/tensorflow/contrib/lite/build_def.bzl @@ -267,6 +267,7 @@ def generated_test_models(): "prelu", "pow", "reduce_max", + "reduce_min", "reduce_prod", "relu", "relu1", diff --git a/tensorflow/contrib/lite/builtin_ops.h b/tensorflow/contrib/lite/builtin_ops.h index e0e411e7a15..706f64a84ac 100644 --- a/tensorflow/contrib/lite/builtin_ops.h +++ b/tensorflow/contrib/lite/builtin_ops.h @@ -114,6 +114,7 @@ typedef enum { kTfLiteBuiltinLogicalAnd = 86, kTfLiteBuiltinLogicalNot = 87, kTfLiteBuiltinUnpack = 88, + kTfLiteBuiltinReduceMin = 89, } TfLiteBuiltinOperator; #ifdef __cplusplus diff --git a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h index 556049d8a69..2ebc6084be0 100644 --- a/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h +++ b/tensorflow/contrib/lite/kernels/internal/reference/reference_ops.h @@ -3896,39 +3896,16 @@ inline bool InitTensorDataForReduce(const int* dims, const int num_dims, return true; } -// Computes the sum of elements across dimensions given in axis. +// Computes the generic value (i.e., sum/max/min/prod) of elements across +// dimensions given in axis. It needs to pass in init_value and reducer. template -inline bool Sum(const T* input_data, const int* input_dims, - const int input_num_dims, T* output_data, - const int* output_dims, const int output_num_dims, - const int* axis, const int num_axis_dimensions, bool keep_dims, - int* temp_index, int* resolved_axis) { - // Reset output data. - if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast(0), - output_data)) { - return false; - } - - // Resolve axis. - int num_resolved_axis = 0; - if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, - &num_resolved_axis)) { - return false; - } - - return ReduceSumImpl(input_data, input_dims, output_dims, - input_num_dims, output_num_dims, resolved_axis, - num_resolved_axis, temp_index, output_data); -} - -// Computes the max of elements across dimensions given in axis. -template -inline bool ReduceMax(const T* input_data, const int* input_dims, - const int input_num_dims, T* output_data, - const int* output_dims, const int output_num_dims, - const int* axis, const int64_t num_axis_dimensions, - bool keep_dims, int* temp_index, int* resolved_axis) { - T init_value = std::numeric_limits::lowest(); +inline bool ReduceGeneric(const T* input_data, const int* input_dims, + const int input_num_dims, T* output_data, + const int* output_dims, const int output_num_dims, + const int* axis, const int64_t num_axis_dimensions, + bool keep_dims, int* temp_index, int* resolved_axis, + T init_value, + T reducer(const T current, const T in)) { // Reset output data. if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value, output_data)) { @@ -3942,14 +3919,63 @@ inline bool ReduceMax(const T* input_data, const int* input_dims, return false; } - auto reducer = [](const T current, const T in) -> T { - return (in > current) ? in : current; - }; return Reduce(input_data, input_dims, output_dims, input_num_dims, output_num_dims, resolved_axis, num_resolved_axis, temp_index, reducer, output_data); } +// Computes the sum of elements across dimensions given in axis. +template +inline bool Sum(const T* input_data, const int* input_dims, + const int input_num_dims, T* output_data, + const int* output_dims, const int output_num_dims, + const int* axis, const int num_axis_dimensions, bool keep_dims, + int* temp_index, int* resolved_axis) { + T init_value = static_cast(0); + + auto reducer = [](const T current, const T in) -> T { return current + in; }; + return ReduceGeneric(input_data, input_dims, input_num_dims, output_data, + output_dims, output_num_dims, axis, + num_axis_dimensions, keep_dims, temp_index, + resolved_axis, init_value, reducer); +} + +// Computes the max of elements across dimensions given in axis. +template +inline bool ReduceMax(const T* input_data, const int* input_dims, + const int input_num_dims, T* output_data, + const int* output_dims, const int output_num_dims, + const int* axis, const int64_t num_axis_dimensions, + bool keep_dims, int* temp_index, int* resolved_axis) { + T init_value = std::numeric_limits::lowest(); + + auto reducer = [](const T current, const T in) -> T { + return (in > current) ? in : current; + }; + return ReduceGeneric(input_data, input_dims, input_num_dims, output_data, + output_dims, output_num_dims, axis, + num_axis_dimensions, keep_dims, temp_index, + resolved_axis, init_value, reducer); +} + +// Computes the min of elements across dimensions given in axis. +template +inline bool ReduceMin(const T* input_data, const int* input_dims, + const int input_num_dims, T* output_data, + const int* output_dims, const int output_num_dims, + const int* axis, const int64_t num_axis_dimensions, + bool keep_dims, int* temp_index, int* resolved_axis) { + T init_value = std::numeric_limits::max(); + + auto reducer = [](const T current, const T in) -> T { + return (in < current) ? in : current; + }; + return ReduceGeneric(input_data, input_dims, input_num_dims, output_data, + output_dims, output_num_dims, axis, + num_axis_dimensions, keep_dims, temp_index, + resolved_axis, init_value, reducer); +} + // Computes the prod of elements across dimensions given in axis. template inline bool ReduceProd(const T* input_data, const int* input_dims, @@ -3957,23 +3983,13 @@ inline bool ReduceProd(const T* input_data, const int* input_dims, const int* output_dims, const int output_num_dims, const int* axis, const int64_t num_axis_dimensions, bool keep_dims, int* temp_index, int* resolved_axis) { - // Reset output data. - if (!InitTensorDataForReduce(output_dims, output_num_dims, static_cast(1), - output_data)) { - return false; - } - - // Resolve axis. - int num_resolved_axis = 0; - if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, - &num_resolved_axis)) { - return false; - } + T init_value = static_cast(1); auto reducer = [](const T current, const T in) -> T { return in * current; }; - return Reduce(input_data, input_dims, output_dims, input_num_dims, - output_num_dims, resolved_axis, num_resolved_axis, - temp_index, reducer, output_data); + return ReduceGeneric(input_data, input_dims, input_num_dims, output_data, + output_dims, output_num_dims, axis, + num_axis_dimensions, keep_dims, temp_index, + resolved_axis, init_value, reducer); } // Computes the mean of elements across dimensions given in axis. diff --git a/tensorflow/contrib/lite/kernels/reduce.cc b/tensorflow/contrib/lite/kernels/reduce.cc index e99f67c7258..29374a0c27b 100644 --- a/tensorflow/contrib/lite/kernels/reduce.cc +++ b/tensorflow/contrib/lite/kernels/reduce.cc @@ -412,6 +412,54 @@ TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } +template +TfLiteStatus EvalMin(TfLiteContext* context, TfLiteNode* node) { + OpContext op_context(context, node); + int64_t num_axis = NumElements(op_context.axis); + TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); + TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); + // Resize the output tensor if the output tensor is dynamic. + if (IsDynamicTensor(op_context.output)) { + TF_LITE_ENSURE_OK(context, + ResizeTempAxis(context, &op_context, resolved_axis)); + TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); + } + +#define TF_LITE_MIN(kernel_type, data_type) \ + kernel_type::ReduceMin<>( \ + GetTensorData(op_context.input), \ + op_context.input->dims->data, op_context.input->dims->size, \ + GetTensorData(op_context.output), \ + op_context.output->dims->data, op_context.output->dims->size, \ + GetTensorData(op_context.axis), num_axis, \ + op_context.params->keep_dims, GetTensorData(temp_index), \ + GetTensorData(resolved_axis)) + + if (kernel_type == kReference) { + switch (op_context.input->type) { + case kTfLiteFloat32: + TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, float)); + break; + case kTfLiteInt32: + TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, int)); + break; + case kTfLiteInt64: + TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, int64_t)); + break; + case kTfLiteUInt8: + TF_LITE_ENSURE_EQ(context, op_context.input->params.scale, + op_context.output->params.scale); + TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, + op_context.output->params.zero_point); + TF_LITE_ENSURE(context, TF_LITE_MIN(reference_ops, uint8_t)); + break; + default: + return kTfLiteError; + } + } +#undef TF_LITE_MIN + return kTfLiteOk; +} } // namespace reduce TfLiteRegistration* Register_MEAN_REF() { @@ -442,6 +490,13 @@ TfLiteRegistration* Register_REDUCE_MAX_REF() { return &r; } +TfLiteRegistration* Register_REDUCE_MIN_REF() { + static TfLiteRegistration r = {reduce::Init, reduce::Free, + reduce::PrepareSimple, + reduce::EvalMin}; + return &r; +} + // TODO(kanlig): add optimized implementation of Mean. TfLiteRegistration* Register_MEAN() { return Register_MEAN_REF(); } TfLiteRegistration* Register_SUM() { return Register_SUM_REF(); } @@ -449,6 +504,7 @@ TfLiteRegistration* Register_REDUCE_PROD() { return Register_REDUCE_PROD_REF(); } TfLiteRegistration* Register_REDUCE_MAX() { return Register_REDUCE_MAX_REF(); } +TfLiteRegistration* Register_REDUCE_MIN() { return Register_REDUCE_MIN_REF(); } } // namespace builtin } // namespace ops diff --git a/tensorflow/contrib/lite/kernels/reduce_test.cc b/tensorflow/contrib/lite/kernels/reduce_test.cc index 5d432d34ef5..d9aca64356b 100644 --- a/tensorflow/contrib/lite/kernels/reduce_test.cc +++ b/tensorflow/contrib/lite/kernels/reduce_test.cc @@ -169,6 +169,35 @@ class MaxOpDynamicModel : public BaseOpModel { } }; +// Model for the tests case where axis is a const tensor. +class MinOpConstModel : public BaseOpModel { + public: + MinOpConstModel(const TensorData& input, const TensorData& output, + std::initializer_list axis_shape, + std::initializer_list axis, bool keep_dims) { + input_ = AddInput(input); + axis_ = AddConstInput(TensorType_INT32, axis, axis_shape); + output_ = AddOutput(output); + SetBuiltinOp(BuiltinOperator_REDUCE_MIN, BuiltinOptions_ReducerOptions, + CreateReducerOptions(builder_, keep_dims).Union()); + BuildInterpreter({GetShape(input_)}); + } +}; + +// Model for the tests case where axis is a dynamic tensor. +class MinOpDynamicModel : public BaseOpModel { + public: + MinOpDynamicModel(const TensorData& input, const TensorData& output, + const TensorData& axis, bool keep_dims) { + input_ = AddInput(input); + axis_ = AddInput(axis); + output_ = AddOutput(output); + SetBuiltinOp(BuiltinOperator_REDUCE_MIN, BuiltinOptions_ReducerOptions, + CreateReducerOptions(builder_, keep_dims).Union()); + BuildInterpreter({GetShape(input_)}); + } +}; + // for quantized Add, the error shouldn't exceed step float GetTolerance(int min, int max) { return (max - min) / 255.0; } @@ -665,6 +694,147 @@ TEST(DynamicUint8MaxOpTest, Scalar) { ElementsAreArray(ArrayFloatNear({11.1294}, kQuantizedTolerance))); } +// Tests for reduce_min + +TEST(ConstFloatMinOpTest, NotKeepDims) { + std::vector data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, + 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}; + MinOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}}, + {4}, {1, 0, -3, -3}, false); + m.SetInput(data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2})); + EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({1, 2}))); +} + +TEST(ConstFloatMinOpTest, KeepDims) { + std::vector data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, + 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}; + MinOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}}, + {2}, {0, 2}, true); + m.SetInput(data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1})); + EXPECT_THAT(m.GetOutput(), + ElementsAreArray(ArrayFloatNear({1, 3, 5}))); +} + +TEST(DynamicFloatMinOpTest, NotKeepDims) { + std::vector data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, + 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}; + MinOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}}, + {TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}}, + false); + std::vector axis = {1, 0, -3, -3}; + m.SetAxis(axis); + m.SetInput(data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2})); + EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({1, 2}))); +} + +TEST(DynamicFloatMinOpTest, KeepDims) { + std::vector data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, + 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, + 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}; + MinOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}}, + {TensorType_FLOAT32, {3}}, {TensorType_INT32, {2}}, true); + std::vector axis = {0, 2}; + m.SetAxis(axis); + m.SetInput(data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1})); + EXPECT_THAT(m.GetOutput(), + ElementsAreArray(ArrayFloatNear({1, 3, 5}))); +} + +TEST(DynamicFloatMinOpTest, Scale) { + std::vector data = {9.527}; + MinOpDynamicModel m({TensorType_FLOAT32, {1}}, {TensorType_FLOAT32, {1}}, + {TensorType_INT32, {1}}, true); + std::vector axis = {0}; + m.SetAxis(axis); + m.SetInput(data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1})); + EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({9.527}))); +} + +TEST(ConstUint8MinOpTest, NotKeepDims) { + float kQuantizedTolerance = GetTolerance(-1.0, 1.0); + std::vector data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6}; + MinOpConstModel m({TensorType_UINT8, {1, 3, 2}, -1.0, 1.0}, + {TensorType_UINT8, {2}, -1.0, 1.0}, {1}, {1}, false); + m.QuantizeAndPopulate(m.Input(), data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2})); + EXPECT_THAT( + m.GetDequantizedOutput(), + ElementsAreArray(ArrayFloatNear({0.294117, 0.2}, kQuantizedTolerance))); +} + +TEST(ConstUint8MinOpTest, KeepDims) { + float kQuantizedTolerance = GetTolerance(-1.0, 1.0); + std::vector data = {0.4, 0.2, 0.3, 0.4, 0.5, 0.6}; + MinOpConstModel m({TensorType_UINT8, {3, 2}, -1.0, 1.0}, + {TensorType_UINT8, {3}, -1.0, 1.0}, {1}, {1}, true); + m.QuantizeAndPopulate(m.Input(), data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 1})); + EXPECT_THAT( + m.GetDequantizedOutput(), + ElementsAreArray(ArrayFloatNear({0.2, 0.3, 0.5}, kQuantizedTolerance))); +} + +TEST(DynamicUint8MinOpTest, NotKeepDims) { + float kQuantizedTolerance = GetTolerance(-5.0, 2.0); + std::vector data = {1.3, -4.8, -3.6, 0.24}; + MinOpDynamicModel m({TensorType_UINT8, {2, 2}, -5.0, 2.0}, + {TensorType_UINT8, {2}, -5.0, 2.0}, + {TensorType_INT32, {1}}, false); + std::vector axis = {1}; + m.SetAxis(axis); + m.QuantizeAndPopulate(m.Input(), data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2})); + EXPECT_THAT( + m.GetDequantizedOutput(), + ElementsAreArray(ArrayFloatNear({-4.807843, -3.6}, kQuantizedTolerance))); +} + +TEST(DynamicUint8MinOpTest, KeepDims) { + float kQuantizedTolerance = GetTolerance(-10.0, 12.0); + std::vector data = {11.14, -0.14, 7.423, 0.879}; + MinOpDynamicModel m({TensorType_UINT8, {2, 2}, -10.0, 12.0}, + {TensorType_UINT8, {2}, -10.0, 12.0}, + {TensorType_INT32, {1}}, true); + std::vector axis = {0}; + m.SetAxis(axis); + m.QuantizeAndPopulate(m.Input(), data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2})); + EXPECT_THAT(m.GetDequantizedOutput(), + ElementsAreArray( + ArrayFloatNear({7.427451, -0.164706}, kQuantizedTolerance))); +} + +TEST(DynamicUint8MinOpTest, Scalar) { + float kQuantizedTolerance = GetTolerance(-10.0, 12.0); + std::vector data = {11.14}; + MinOpDynamicModel m({TensorType_UINT8, {}, -10.0, 12.0}, + {TensorType_UINT8, {}, -10.0, 12.0}, + {TensorType_INT32, {1}}, true); + std::vector axis = {0}; + m.QuantizeAndPopulate(m.Input(), data); + m.Invoke(); + EXPECT_THAT(m.GetOutputShape(), IsEmpty()); + EXPECT_THAT(m.GetDequantizedOutput(), + ElementsAreArray(ArrayFloatNear({11.1294}, kQuantizedTolerance))); +} + } // namespace } // namespace tflite diff --git a/tensorflow/contrib/lite/kernels/register.cc b/tensorflow/contrib/lite/kernels/register.cc index 9681b900b7e..10d1fcc5bc3 100644 --- a/tensorflow/contrib/lite/kernels/register.cc +++ b/tensorflow/contrib/lite/kernels/register.cc @@ -94,6 +94,7 @@ TfLiteRegistration* Register_NEG(); TfLiteRegistration* Register_SUM(); TfLiteRegistration* Register_REDUCE_PROD(); TfLiteRegistration* Register_REDUCE_MAX(); +TfLiteRegistration* Register_REDUCE_MIN(); TfLiteRegistration* Register_SELECT(); TfLiteRegistration* Register_SLICE(); TfLiteRegistration* Register_SIN(); @@ -219,6 +220,7 @@ BuiltinOpResolver::BuiltinOpResolver() { AddBuiltin(BuiltinOperator_SUM, Register_SUM()); AddBuiltin(BuiltinOperator_REDUCE_PROD, Register_REDUCE_PROD()); AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX()); + AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN()); AddBuiltin(BuiltinOperator_EXPAND_DIMS, Register_EXPAND_DIMS()); AddBuiltin(BuiltinOperator_SPARSE_TO_DENSE, Register_SPARSE_TO_DENSE()); AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL()); diff --git a/tensorflow/contrib/lite/model.cc b/tensorflow/contrib/lite/model.cc index 5988b7a3a72..5f8d5c318a3 100644 --- a/tensorflow/contrib/lite/model.cc +++ b/tensorflow/contrib/lite/model.cc @@ -622,6 +622,7 @@ TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type, } case BuiltinOperator_MEAN: case BuiltinOperator_REDUCE_MAX: + case BuiltinOperator_REDUCE_MIN: case BuiltinOperator_REDUCE_PROD: case BuiltinOperator_SUM: { auto* params = MallocPOD(); diff --git a/tensorflow/contrib/lite/nnapi_delegate.cc b/tensorflow/contrib/lite/nnapi_delegate.cc index 5d8e7a50e2c..d287aa635cc 100644 --- a/tensorflow/contrib/lite/nnapi_delegate.cc +++ b/tensorflow/contrib/lite/nnapi_delegate.cc @@ -636,6 +636,7 @@ TfLiteStatus AddOpsAndParams( case tflite::BuiltinOperator_NOT_EQUAL: case tflite::BuiltinOperator_SUM: case tflite::BuiltinOperator_REDUCE_MAX: + case tflite::BuiltinOperator_REDUCE_MIN: case tflite::BuiltinOperator_REDUCE_PROD: case tflite::BuiltinOperator_SQRT: case tflite::BuiltinOperator_RSQRT: diff --git a/tensorflow/contrib/lite/schema/schema.fbs b/tensorflow/contrib/lite/schema/schema.fbs index e2c537fa4d8..58a94ff4a58 100644 --- a/tensorflow/contrib/lite/schema/schema.fbs +++ b/tensorflow/contrib/lite/schema/schema.fbs @@ -170,6 +170,7 @@ enum BuiltinOperator : byte { LOGICAL_AND = 86, LOGICAL_NOT = 87, UNPACK = 88, + REDUCE_MIN = 89, } // Options for the builtin operators. diff --git a/tensorflow/contrib/lite/schema/schema_generated.h b/tensorflow/contrib/lite/schema/schema_generated.h index d367d9a93a4..a2ea43f3701 100755 --- a/tensorflow/contrib/lite/schema/schema_generated.h +++ b/tensorflow/contrib/lite/schema/schema_generated.h @@ -377,11 +377,12 @@ enum BuiltinOperator { BuiltinOperator_LOGICAL_AND = 86, BuiltinOperator_LOGICAL_NOT = 87, BuiltinOperator_UNPACK = 88, + BuiltinOperator_REDUCE_MIN = 89, BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_UNPACK + BuiltinOperator_MAX = BuiltinOperator_REDUCE_MIN }; -inline BuiltinOperator (&EnumValuesBuiltinOperator())[88] { +inline BuiltinOperator (&EnumValuesBuiltinOperator())[89] { static BuiltinOperator values[] = { BuiltinOperator_ADD, BuiltinOperator_AVERAGE_POOL_2D, @@ -470,7 +471,8 @@ inline BuiltinOperator (&EnumValuesBuiltinOperator())[88] { BuiltinOperator_ONE_HOT, BuiltinOperator_LOGICAL_AND, BuiltinOperator_LOGICAL_NOT, - BuiltinOperator_UNPACK + BuiltinOperator_UNPACK, + BuiltinOperator_REDUCE_MIN }; return values; } @@ -566,6 +568,7 @@ inline const char **EnumNamesBuiltinOperator() { "LOGICAL_AND", "LOGICAL_NOT", "UNPACK", + "REDUCE_MIN", nullptr }; return names; diff --git a/tensorflow/contrib/lite/testing/generate_examples.py b/tensorflow/contrib/lite/testing/generate_examples.py index 597ee8fb1e8..599c82940e1 100644 --- a/tensorflow/contrib/lite/testing/generate_examples.py +++ b/tensorflow/contrib/lite/testing/generate_examples.py @@ -926,6 +926,11 @@ def make_reduce_max_tests(zip_path): return make_reduce_tests(tf.reduce_max)(zip_path) +def make_reduce_min_tests(zip_path): + """Make a set of tests to do min.""" + return make_reduce_tests(tf.reduce_min)(zip_path) + + def make_exp_tests(zip_path): """Make a set of tests to do exp.""" diff --git a/tensorflow/contrib/lite/toco/export_tensorflow.cc b/tensorflow/contrib/lite/toco/export_tensorflow.cc index 02671f0408f..f489c5ac653 100644 --- a/tensorflow/contrib/lite/toco/export_tensorflow.cc +++ b/tensorflow/contrib/lite/toco/export_tensorflow.cc @@ -2118,7 +2118,7 @@ void ConvertOperator(const Model& model, const Operator& src_op, tensorflow_graph, "Prod"); } else if (src_op.type == OperatorType::kReduceMin) { ConvertReduceOperator(model, - static_cast(src_op), + static_cast(src_op), tensorflow_graph, "Min"); } else if (src_op.type == OperatorType::kReduceMax) { ConvertReduceOperator(model, diff --git a/tensorflow/contrib/lite/toco/tflite/operator.cc b/tensorflow/contrib/lite/toco/tflite/operator.cc index 75808f2b690..dcb5fff39fe 100644 --- a/tensorflow/contrib/lite/toco/tflite/operator.cc +++ b/tensorflow/contrib/lite/toco/tflite/operator.cc @@ -787,6 +787,25 @@ class ReduceMax int GetVersion(const Operator& op) const override { return 1; } }; +class ReduceMin + : public BuiltinOperator { + public: + using BuiltinOperator::BuiltinOperator; + flatbuffers::Offset WriteOptions( + const TocoOperator& op, + flatbuffers::FlatBufferBuilder* builder) const override { + return ::tflite::CreateReducerOptions(*builder, op.keep_dims); + } + + void ReadOptions(const TfLiteOptions& options, + TocoOperator* op) const override { + op->keep_dims = options.keep_dims(); + } + + int GetVersion(const Operator& op) const override { return 1; } +}; + class ReduceProd : public BuiltinOperator { @@ -1297,6 +1316,8 @@ std::vector> BuildOperatorList() { OperatorType::kReduceProd)); ops.push_back(MakeUnique(::tflite::BuiltinOperator_REDUCE_MAX, OperatorType::kReduceMax)); + ops.push_back(MakeUnique(::tflite::BuiltinOperator_REDUCE_MIN, + OperatorType::kReduceMin)); ops.push_back( MakeUnique(::tflite::BuiltinOperator_RESIZE_BILINEAR, OperatorType::kResizeBilinear));