Add built-in operator AddN. It only supports float and int_32 now.
PiperOrigin-RevId: 232365698
This commit is contained in:
parent
7f026bd2cd
commit
df211323b8
@ -4,7 +4,7 @@ package(
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test", "if_not_windows")
|
||||
load("//tensorflow:tensorflow.bzl", "if_not_windows", "tf_cc_test")
|
||||
load("//tensorflow/lite:build_def.bzl", "tflite_copts")
|
||||
load("//tensorflow/lite:special_rules.bzl", "tflite_portable_test_suite")
|
||||
|
||||
@ -190,7 +190,7 @@ cc_library(
|
||||
":string",
|
||||
":util",
|
||||
"//tensorflow/lite/c:c_api_internal",
|
||||
"//tensorflow/lite/core/api:api",
|
||||
"//tensorflow/lite/core/api",
|
||||
"//tensorflow/lite/nnapi:nnapi_implementation",
|
||||
"//tensorflow/lite/profiling:profiler",
|
||||
"//tensorflow/lite/schema:schema_fbs",
|
||||
|
@ -226,6 +226,7 @@ def generated_test_models():
|
||||
return [
|
||||
"abs",
|
||||
"add",
|
||||
"add_n",
|
||||
"arg_min_max",
|
||||
"avg_pool",
|
||||
"batch_to_space_nd",
|
||||
|
@ -165,6 +165,17 @@ Options {
|
||||
}
|
||||
```
|
||||
|
||||
**ADD_N**
|
||||
|
||||
```
|
||||
Inputs {
|
||||
0-N: any number of tensors (must have same size and shape)
|
||||
}
|
||||
Outputs {
|
||||
0: elementwise sum of the input tensors
|
||||
}
|
||||
```
|
||||
|
||||
**ARG_MAX**
|
||||
|
||||
```
|
||||
|
@ -152,6 +152,7 @@ cc_library(
|
||||
srcs = [
|
||||
"activations.cc",
|
||||
"add.cc",
|
||||
"add_n.cc",
|
||||
"arg_min_max.cc",
|
||||
"audio_spectrogram.cc",
|
||||
"basic_rnn.cc",
|
||||
@ -355,6 +356,18 @@ tf_cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
tf_cc_test(
|
||||
name = "add_n_test",
|
||||
size = "small",
|
||||
srcs = ["add_n_test.cc"],
|
||||
deps = [
|
||||
":builtin_ops",
|
||||
":test_util",
|
||||
"//tensorflow/lite:framework",
|
||||
"@com_google_googletest//:gtest",
|
||||
],
|
||||
)
|
||||
|
||||
tf_cc_test(
|
||||
name = "arg_min_max_test",
|
||||
size = "small",
|
||||
|
88
tensorflow/lite/kernels/add_n.cc
Normal file
88
tensorflow/lite/kernels/add_n.cc
Normal file
@ -0,0 +1,88 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/c/c_api_internal.h"
|
||||
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
|
||||
#include "tensorflow/lite/kernels/internal/tensor.h"
|
||||
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
|
||||
#include "tensorflow/lite/kernels/kernel_util.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace ops {
|
||||
namespace builtin {
|
||||
namespace add_n {
|
||||
|
||||
constexpr int kInputTensor1 = 0;
|
||||
constexpr int kOutputTensor = 0;
|
||||
|
||||
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
int num_inputs = NumInputs(node);
|
||||
TF_LITE_ENSURE(context, num_inputs >= 2);
|
||||
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
|
||||
|
||||
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
|
||||
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
|
||||
output->type = input1->type;
|
||||
|
||||
// Check that all input tensors have the same shape and type.
|
||||
for (int i = kInputTensor1 + 1; i < num_inputs; ++i) {
|
||||
const TfLiteTensor* input = GetInput(context, node, i);
|
||||
TF_LITE_ENSURE(context, HaveSameShapes(input1, input));
|
||||
TF_LITE_ENSURE_EQ(context, input1->type, input->type);
|
||||
}
|
||||
|
||||
// Use the first input node's dimension to be the dimension of the output
|
||||
// node.
|
||||
TfLiteIntArray* input1_dims = input1->dims;
|
||||
TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims);
|
||||
return context->ResizeTensor(context, output, output_dims);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void EvalAddN(TfLiteContext* context, TfLiteNode* node) {
|
||||
// TODO(haoliang): Initialize all_inputs only once during init.
|
||||
VectorOfTensors<T> all_inputs(*context, *node->inputs);
|
||||
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
|
||||
int num_inputs = NumInputs(node);
|
||||
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
|
||||
reference_ops::AddN<T>(GetTensorShape(input1), num_inputs, all_inputs.data(),
|
||||
GetTensorData<T>(output));
|
||||
}
|
||||
|
||||
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
|
||||
const TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
|
||||
if (output->type == kTfLiteFloat32) {
|
||||
EvalAddN<float>(context, node);
|
||||
} else if (output->type == kTfLiteInt32) {
|
||||
EvalAddN<int32_t>(context, node);
|
||||
} else {
|
||||
context->ReportError(context,
|
||||
"AddN only supports FLOAT32|INT32 now, got %s.",
|
||||
TfLiteTypeGetName(output->type));
|
||||
return kTfLiteError;
|
||||
}
|
||||
return kTfLiteOk;
|
||||
}
|
||||
|
||||
} // namespace add_n
|
||||
|
||||
TfLiteRegistration* Register_ADD_N() {
|
||||
static TfLiteRegistration r = {/*init*/ nullptr, /*free*/ nullptr,
|
||||
add_n::Prepare, add_n::Eval};
|
||||
return &r;
|
||||
}
|
||||
|
||||
} // namespace builtin
|
||||
} // namespace ops
|
||||
} // namespace tflite
|
98
tensorflow/lite/kernels/add_n_test.cc
Normal file
98
tensorflow/lite/kernels/add_n_test.cc
Normal file
@ -0,0 +1,98 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <vector>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "tensorflow/lite/interpreter.h"
|
||||
#include "tensorflow/lite/kernels/register.h"
|
||||
#include "tensorflow/lite/kernels/test_util.h"
|
||||
#include "tensorflow/lite/model.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace {
|
||||
|
||||
using ::testing::ElementsAreArray;
|
||||
|
||||
class BaseAddNOpModel : public SingleOpModel {
|
||||
public:
|
||||
BaseAddNOpModel(const std::vector<TensorData>& inputs,
|
||||
const TensorData& output) {
|
||||
int num_inputs = inputs.size();
|
||||
std::vector<std::vector<int>> input_shapes;
|
||||
|
||||
for (int i = 0; i < num_inputs; ++i) {
|
||||
inputs_.push_back(AddInput(inputs[i]));
|
||||
input_shapes.push_back(GetShape(inputs_[i]));
|
||||
}
|
||||
|
||||
output_ = AddOutput(output);
|
||||
SetBuiltinOp(BuiltinOperator_ADD_N, BuiltinOptions_AddNOptions,
|
||||
CreateAddNOptions(builder_).Union());
|
||||
BuildInterpreter(input_shapes);
|
||||
}
|
||||
|
||||
int input(int i) { return inputs_[i]; }
|
||||
|
||||
protected:
|
||||
std::vector<int> inputs_;
|
||||
int output_;
|
||||
};
|
||||
|
||||
class FloatAddNOpModel : public BaseAddNOpModel {
|
||||
public:
|
||||
using BaseAddNOpModel::BaseAddNOpModel;
|
||||
|
||||
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
|
||||
};
|
||||
|
||||
class IntegerAddNOpModel : public BaseAddNOpModel {
|
||||
public:
|
||||
using BaseAddNOpModel::BaseAddNOpModel;
|
||||
|
||||
std::vector<int32_t> GetOutput() { return ExtractVector<int32_t>(output_); }
|
||||
};
|
||||
|
||||
TEST(FloatAddNOpModel, AddMultipleTensors) {
|
||||
FloatAddNOpModel m({{TensorType_FLOAT32, {1, 2, 2, 1}},
|
||||
{TensorType_FLOAT32, {1, 2, 2, 1}},
|
||||
{TensorType_FLOAT32, {1, 2, 2, 1}}},
|
||||
{TensorType_FLOAT32, {}});
|
||||
m.PopulateTensor<float>(m.input(0), {-2.0, 0.2, 0.7, 0.8});
|
||||
m.PopulateTensor<float>(m.input(1), {0.1, 0.2, 0.3, 0.5});
|
||||
m.PopulateTensor<float>(m.input(2), {0.5, 0.1, 0.1, 0.2});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.4, 0.5, 1.1, 1.5}));
|
||||
}
|
||||
|
||||
TEST(IntegerAddNOpModel, AddMultipleTensors) {
|
||||
IntegerAddNOpModel m({{TensorType_INT32, {1, 2, 2, 1}},
|
||||
{TensorType_INT32, {1, 2, 2, 1}},
|
||||
{TensorType_INT32, {1, 2, 2, 1}}},
|
||||
{TensorType_INT32, {}});
|
||||
m.PopulateTensor<int32_t>(m.input(0), {-20, 2, 7, 8});
|
||||
m.PopulateTensor<int32_t>(m.input(1), {1, 2, 3, 5});
|
||||
m.PopulateTensor<int32_t>(m.input(2), {10, -5, 1, -2});
|
||||
m.Invoke();
|
||||
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-9, -1, 11, 11}));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace tflite
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::tflite::LogToStderr();
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
@ -702,6 +702,22 @@ inline void Add(const ArithmeticParams& params,
|
||||
}
|
||||
}
|
||||
|
||||
// T is expected to be either float or int.
|
||||
template <typename T>
|
||||
inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs,
|
||||
T* const* input_data, T* output_data) {
|
||||
// All inputs and output should have the same shape, this is checked during
|
||||
// Prepare stage.
|
||||
const size_t size = input_shape.FlatSize();
|
||||
for (int i = 0; i < size; ++i) {
|
||||
T x = 0;
|
||||
for (int j = 0; j < num_inputs; ++j) {
|
||||
x += input_data[j][i];
|
||||
}
|
||||
output_data[i] = x;
|
||||
}
|
||||
}
|
||||
|
||||
// Element-wise add that can often be used for inner loop of broadcast add as
|
||||
// well as the non-broadcast add.
|
||||
inline void AddElementwise(int size, const ArithmeticParams& params,
|
||||
|
@ -131,6 +131,7 @@ TfLiteRegistration* Register_FILL();
|
||||
TfLiteRegistration* Register_MIRROR_PAD();
|
||||
TfLiteRegistration* Register_UNIQUE();
|
||||
TfLiteRegistration* Register_REVERSE_V2();
|
||||
TfLiteRegistration* Register_ADD_N();
|
||||
|
||||
TfLiteStatus UnsupportedTensorFlowOp(TfLiteContext* context, TfLiteNode* node) {
|
||||
context->ReportError(
|
||||
@ -295,6 +296,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
|
||||
AddBuiltin(BuiltinOperator_MIRROR_PAD, Register_MIRROR_PAD());
|
||||
AddBuiltin(BuiltinOperator_UNIQUE, Register_UNIQUE());
|
||||
AddBuiltin(BuiltinOperator_REVERSE_V2, Register_REVERSE_V2());
|
||||
AddBuiltin(BuiltinOperator_ADD_N, Register_ADD_N());
|
||||
|
||||
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
|
||||
// custom ops aren't always included by default.
|
||||
|
@ -1184,6 +1184,51 @@ def make_add_tests(zip_path):
|
||||
make_binary_op_tests(zip_path, tf.add)
|
||||
|
||||
|
||||
def make_add_n_tests(zip_path):
|
||||
"""Make a set of tests for AddN op."""
|
||||
|
||||
test_parameters = [
|
||||
{
|
||||
"dtype": [tf.float32, tf.int32],
|
||||
"input_shape": [[2, 5, 3, 1]],
|
||||
"num_inputs": [2, 3, 4, 5],
|
||||
},
|
||||
{
|
||||
"dtype": [tf.float32, tf.int32],
|
||||
"input_shape": [[5]],
|
||||
"num_inputs": [2, 3, 4, 5],
|
||||
},
|
||||
{
|
||||
"dtype": [tf.float32, tf.int32],
|
||||
"input_shape": [[]],
|
||||
"num_inputs": [2, 3, 4, 5],
|
||||
},
|
||||
]
|
||||
|
||||
def build_graph(parameters):
|
||||
"""Builds the graph given the current parameters."""
|
||||
input_tensors = []
|
||||
for i in range(parameters["num_inputs"]):
|
||||
input_tensors.append(
|
||||
tf.placeholder(
|
||||
dtype=parameters["dtype"],
|
||||
name="input_{}".format(i),
|
||||
shape=parameters["input_shape"]))
|
||||
out = tf.add_n(input_tensors)
|
||||
return input_tensors, [out]
|
||||
|
||||
def build_inputs(parameters, sess, inputs, outputs):
|
||||
"""Builds operand inputs for op."""
|
||||
input_data = []
|
||||
for i in range(parameters["num_inputs"]):
|
||||
input_data.append(
|
||||
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
|
||||
return input_data, sess.run(
|
||||
outputs, feed_dict={i: d for i, d in zip(inputs, input_data)})
|
||||
|
||||
make_zip_of_tests(zip_path, test_parameters, build_graph, build_inputs)
|
||||
|
||||
|
||||
def make_div_tests(zip_path):
|
||||
make_binary_op_tests(zip_path, tf.div)
|
||||
|
||||
|
@ -2375,7 +2375,7 @@ ConverterMapType GetTensorFlowNodeConverterMap() {
|
||||
return std::unordered_map<std::string, ConverterType>({
|
||||
{"Abs", ConvertSimpleOperator<AbsOperator, kAnyNumInputs, 1>},
|
||||
{"Add", ConvertSimpleOperator<AddOperator, 2, 1>},
|
||||
{"AddN", ConvertSimpleOperatorFlexOk<AddNOperator, kAnyNumInputs, 1>},
|
||||
{"AddN", ConvertSimpleOperator<AddNOperator, kAnyNumInputs, 1>},
|
||||
{"All", ConvertSimpleOperator<TensorFlowAllOperator, kAnyNumInputs, 1>},
|
||||
{"Any", ConvertReduceOperator<TensorFlowAnyOperator>},
|
||||
{"ArgMax", ConvertArgMaxOperator},
|
||||
|
@ -203,6 +203,25 @@ class Add : public BuiltinOperator<AddOperator, ::tflite::AddOptions,
|
||||
}
|
||||
};
|
||||
|
||||
class AddN : public BuiltinOperator<AddNOperator, ::tflite::AddNOptions,
|
||||
::tflite::BuiltinOptions_AddNOptions> {
|
||||
public:
|
||||
using BuiltinOperator::BuiltinOperator;
|
||||
|
||||
flatbuffers::Offset<TfLiteOptions> WriteOptions(
|
||||
const TocoOperator& op,
|
||||
flatbuffers::FlatBufferBuilder* builder) const override {
|
||||
return ::tflite::CreateAddNOptions(*builder);
|
||||
}
|
||||
|
||||
void ReadOptions(const TfLiteOptions& options,
|
||||
TocoOperator* op) const override {}
|
||||
|
||||
int GetVersion(const OperatorSignature& op_signature) const override {
|
||||
return 1;
|
||||
}
|
||||
};
|
||||
|
||||
class SpaceToBatchND
|
||||
: public BuiltinOperator<SpaceToBatchNDOperator,
|
||||
::tflite::SpaceToBatchNDOptions,
|
||||
@ -1903,6 +1922,8 @@ std::vector<std::unique_ptr<BaseOperator>> BuildOperatorList(
|
||||
// Builtin Operators.
|
||||
ops.push_back(
|
||||
MakeUnique<Add>(::tflite::BuiltinOperator_ADD, OperatorType::kAdd));
|
||||
ops.push_back(
|
||||
MakeUnique<AddN>(::tflite::BuiltinOperator_ADD_N, OperatorType::kAddN));
|
||||
ops.push_back(
|
||||
MakeUnique<Div>(::tflite::BuiltinOperator_DIV, OperatorType::kDiv));
|
||||
ops.push_back(
|
||||
|
@ -164,6 +164,13 @@ TEST_F(OperatorTest, BuiltinAdd) {
|
||||
output_toco_op->fused_activation_function);
|
||||
}
|
||||
|
||||
TEST_F(OperatorTest, BuiltinAddN) {
|
||||
AddNOperator op;
|
||||
auto output_toco_op =
|
||||
SerializeAndDeserialize(GetOperator("ADD_N", OperatorType::kAddN), op);
|
||||
ASSERT_NE(output_toco_op.get(), nullptr);
|
||||
}
|
||||
|
||||
TEST_F(OperatorTest, BuiltinReducerOps) {
|
||||
CheckReducerOperator<MeanOperator>("MEAN", OperatorType::kMean);
|
||||
CheckReducerOperator<TensorFlowSumOperator>("SUM", OperatorType::kSum);
|
||||
|
Loading…
Reference in New Issue
Block a user