Promote cumsum as a builtin op.

PiperOrigin-RevId: 337978855
Change-Id: Ia909e529df40ae64776fbff7e3ff22ab9b33bf67
This commit is contained in:
Renjie Liu 2020-10-19 18:57:38 -07:00 committed by TensorFlower Gardener
parent 80999cc508
commit 957aacafa5
13 changed files with 194 additions and 55 deletions

View File

@ -155,6 +155,7 @@ typedef enum {
kTfLiteBuiltinSegmentSum = 125, kTfLiteBuiltinSegmentSum = 125,
kTfLiteBuiltinBatchMatmul = 126, kTfLiteBuiltinBatchMatmul = 126,
kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127, kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127,
kTfLiteBuiltinCumsum = 128,
} TfLiteBuiltinOperator; } TfLiteBuiltinOperator;
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -465,6 +465,11 @@ typedef struct {
int body_subgraph_index; int body_subgraph_index;
} TfLiteWhileParams; } TfLiteWhileParams;
typedef struct {
bool exclusive;
bool reverse;
} TfLiteCumsumParams;
#ifdef __cplusplus #ifdef __cplusplus
} // extern "C" } // extern "C"
#endif // __cplusplus #endif // __cplusplus

View File

@ -761,6 +761,16 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release(); *builtin_data = params.release();
return kTfLiteOk; return kTfLiteOk;
} }
case BuiltinOperator_CUMSUM: {
auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
params->exclusive = cumsum_params->exclusive();
params->reverse = cumsum_params->reverse();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// Below are the ops with no builtin_data structure. // Below are the ops with no builtin_data structure.
case BuiltinOperator_BATCH_TO_SPACE_ND: case BuiltinOperator_BATCH_TO_SPACE_ND:
// TODO(aselle): Implement call in BuiltinOptions, but nullptrs are // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are

View File

@ -80,6 +80,7 @@ static const char* param_structs[] = {"TfLiteAddParams",
"TfLiteUnpackParams", "TfLiteUnpackParams",
"TfLiteReverseSequenceParams", "TfLiteReverseSequenceParams",
"TfLiteWhileParams", "TfLiteWhileParams",
"TfLiteCumsumParams",
nullptr}; nullptr};
} // namespace } // namespace

View File

@ -549,6 +549,7 @@ BUILTIN_KERNEL_SRCS = [
"comparisons.cc", "comparisons.cc",
"concatenation.cc", "concatenation.cc",
"conv.cc", "conv.cc",
"cumsum.cc",
"densify.cc", "densify.cc",
"depth_to_space.cc", "depth_to_space.cc",
"depthwise_conv.cc", "depthwise_conv.cc",
@ -717,7 +718,6 @@ cc_library(
name = "custom_ops", name = "custom_ops",
srcs = [ srcs = [
"complex_support.cc", "complex_support.cc",
"cumsum.cc",
"multinomial.cc", "multinomial.cc",
"random_standard_normal.cc", "random_standard_normal.cc",
"rfft2d.cc", "rfft2d.cc",
@ -2341,16 +2341,15 @@ cc_test(
cc_test( cc_test(
name = "cumsum_test", name = "cumsum_test",
size = "small",
srcs = ["cumsum_test.cc"], srcs = ["cumsum_test.cc"],
deps = [ deps = [
":custom_ops",
":test_main", ":test_main",
":test_util", ":test_util",
"//tensorflow/lite:framework", "//tensorflow/lite:framework",
"//tensorflow/lite/schema:schema_fbs", "//tensorflow/lite/schema:schema_fbs",
"//tensorflow/lite/testing:util", "//tensorflow/lite/testing:util",
"@com_google_googletest//:gtest", "@com_google_googletest//:gtest",
"@flatbuffers",
], ],
) )

View File

@ -44,6 +44,7 @@ TfLiteRegistration* Register_CEIL();
TfLiteRegistration* Register_CONCATENATION(); TfLiteRegistration* Register_CONCATENATION();
TfLiteRegistration* Register_CONV_2D(); TfLiteRegistration* Register_CONV_2D();
TfLiteRegistration* Register_COS(); TfLiteRegistration* Register_COS();
TfLiteRegistration* Register_CUMSUM();
TfLiteRegistration* Register_DENSIFY(); TfLiteRegistration* Register_DENSIFY();
TfLiteRegistration* Register_DEPTH_TO_SPACE(); TfLiteRegistration* Register_DEPTH_TO_SPACE();
TfLiteRegistration* Register_DEPTHWISE_CONV_2D(); TfLiteRegistration* Register_DEPTHWISE_CONV_2D();

View File

@ -13,44 +13,23 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
==============================================================================*/ ==============================================================================*/
#include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h" #include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/kernel_util.h"
// TODO(b/161933288): Promote this op to builtin-op when we can add new builtin
// ops.
namespace tflite { namespace tflite {
namespace ops { namespace ops {
namespace custom { namespace builtin {
namespace cumsum { namespace cumsum {
typedef struct {
bool exclusive;
bool reverse;
} TfLiteCumsumParams;
static const int kInputTensor = 0; static const int kInputTensor = 0;
static const int kAxisTensor = 1; static const int kAxisTensor = 1;
static const int kOutputTensor = 0; static const int kOutputTensor = 0;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new TfLiteCumsumParams;
const uint8_t* buffer_data = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_data, length).AsMap();
data->exclusive = m["exclusive"].AsBool();
data->reverse = m["reverse"].AsBool();
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<TfLiteCumsumParams*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
@ -79,7 +58,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
auto* params = reinterpret_cast<TfLiteCumsumParams*>(node->user_data); auto* params = reinterpret_cast<TfLiteCumsumParams*>(node->builtin_data);
int axis = *GetTensorData<int>(axis_tensor); int axis = *GetTensorData<int>(axis_tensor);
if (axis < 0) axis += NumDimensions(input); if (axis < 0) axis += NumDimensions(input);
@ -122,11 +101,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
} // namespace cumsum } // namespace cumsum
TfLiteRegistration* Register_CUMSUM() { TfLiteRegistration* Register_CUMSUM() {
static TfLiteRegistration r = {cumsum::Init, cumsum::Free, cumsum::Prepare, static TfLiteRegistration r = {nullptr, nullptr, cumsum::Prepare,
cumsum::Eval}; cumsum::Eval};
return &r; return &r;
} }
} // namespace custom } // namespace builtin
} // namespace ops } // namespace ops
} // namespace tflite } // namespace tflite

View File

@ -17,18 +17,14 @@ limitations under the License.
#include <gmock/gmock.h> #include <gmock/gmock.h>
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h" #include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h" #include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h" #include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h" #include "tensorflow/lite/testing/util.h"
namespace tflite { namespace tflite {
namespace ops { namespace ops {
namespace custom { namespace builtin {
TfLiteRegistration* Register_CUMSUM();
namespace { namespace {
@ -42,13 +38,8 @@ class CumsumOpModel : public SingleOpModel {
output_ = AddOutput(output); output_ = AddOutput(output);
flexbuffers::Builder fbb; SetBuiltinOp(BuiltinOperator_CUMSUM, BuiltinOptions_CumsumOptions,
fbb.Map([&]() { CreateCumsumOptions(builder_, exclusive, reverse).Union());
fbb.Bool("exclusive", exclusive);
fbb.Bool("reverse", reverse);
});
fbb.Finish();
SetCustomOp("Cumsum", fbb.GetBuffer(), Register_CUMSUM);
BuildInterpreter({GetShape(input_), GetShape(axis_)}); BuildInterpreter({GetShape(input_), GetShape(axis_)});
} }
@ -160,6 +151,6 @@ TEST(CumsumOpTest, SimpleFloatTest) {
} }
} // namespace } // namespace
} // namespace custom } // namespace builtin
} // namespace ops } // namespace ops
} // namespace tflite } // namespace tflite

View File

@ -21,7 +21,6 @@ namespace tflite {
namespace ops { namespace ops {
namespace custom { namespace custom {
TfLiteRegistration* Register_CUMSUM();
TfLiteRegistration* Register_HASHTABLE(); TfLiteRegistration* Register_HASHTABLE();
TfLiteRegistration* Register_HASHTABLE_FIND(); TfLiteRegistration* Register_HASHTABLE_FIND();
TfLiteRegistration* Register_HASHTABLE_IMPORT(); TfLiteRegistration* Register_HASHTABLE_IMPORT();

View File

@ -294,6 +294,7 @@ BuiltinOpResolver::BuiltinOpResolver() {
AddBuiltin(BuiltinOperator_BATCH_MATMUL, Register_BATCH_MATMUL(), AddBuiltin(BuiltinOperator_BATCH_MATMUL, Register_BATCH_MATMUL(),
/* min_version = */ 1, /* min_version = */ 1,
/* max_version = */ 3); /* max_version = */ 3);
AddBuiltin(BuiltinOperator_CUMSUM, Register_CUMSUM());
AddCustom("NumericVerify", tflite::ops::custom::Register_NUMERIC_VERIFY()); AddCustom("NumericVerify", tflite::ops::custom::Register_NUMERIC_VERIFY());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that // TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default. // custom ops aren't always included by default.

View File

@ -351,7 +351,8 @@ enum BuiltinOperator : int32 {
DENSIFY = 124, DENSIFY = 124,
SEGMENT_SUM = 125, SEGMENT_SUM = 125,
BATCH_MATMUL = 126, BATCH_MATMUL = 126,
PLACEHOLDER_FOR_GREATER_OP_CODES = 127 PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
CUMSUM = 128
} }
@ -457,7 +458,8 @@ union BuiltinOptions {
SelectV2Options, SelectV2Options,
DensifyOptions, DensifyOptions,
SegmentSumOptions, SegmentSumOptions,
BatchMatMulOptions BatchMatMulOptions,
CumsumOptions,
} }
enum Padding : byte { SAME, VALID } enum Padding : byte { SAME, VALID }
@ -981,6 +983,11 @@ table BatchMatMulOptions {
adj_y:bool; adj_y:bool;
} }
table CumsumOptions {
exclusive:bool;
reverse:bool;
}
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom. // builtin, or a string if the operator is custom.
table OperatorCode { table OperatorCode {

View File

@ -349,6 +349,9 @@ struct SegmentSumOptionsT;
struct BatchMatMulOptions; struct BatchMatMulOptions;
struct BatchMatMulOptionsT; struct BatchMatMulOptionsT;
struct CumsumOptions;
struct CumsumOptionsT;
struct OperatorCode; struct OperatorCode;
struct OperatorCodeT; struct OperatorCodeT;
@ -788,11 +791,12 @@ enum BuiltinOperator {
BuiltinOperator_SEGMENT_SUM = 125, BuiltinOperator_SEGMENT_SUM = 125,
BuiltinOperator_BATCH_MATMUL = 126, BuiltinOperator_BATCH_MATMUL = 126,
BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES = 127, BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
BuiltinOperator_CUMSUM = 128,
BuiltinOperator_MIN = BuiltinOperator_ADD, BuiltinOperator_MIN = BuiltinOperator_ADD,
BuiltinOperator_MAX = BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES BuiltinOperator_MAX = BuiltinOperator_CUMSUM
}; };
inline const BuiltinOperator (&EnumValuesBuiltinOperator())[128] { inline const BuiltinOperator (&EnumValuesBuiltinOperator())[129] {
static const BuiltinOperator values[] = { static const BuiltinOperator values[] = {
BuiltinOperator_ADD, BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D, BuiltinOperator_AVERAGE_POOL_2D,
@ -921,13 +925,14 @@ inline const BuiltinOperator (&EnumValuesBuiltinOperator())[128] {
BuiltinOperator_DENSIFY, BuiltinOperator_DENSIFY,
BuiltinOperator_SEGMENT_SUM, BuiltinOperator_SEGMENT_SUM,
BuiltinOperator_BATCH_MATMUL, BuiltinOperator_BATCH_MATMUL,
BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES,
BuiltinOperator_CUMSUM
}; };
return values; return values;
} }
inline const char * const *EnumNamesBuiltinOperator() { inline const char * const *EnumNamesBuiltinOperator() {
static const char * const names[129] = { static const char * const names[130] = {
"ADD", "ADD",
"AVERAGE_POOL_2D", "AVERAGE_POOL_2D",
"CONCATENATION", "CONCATENATION",
@ -1056,13 +1061,14 @@ inline const char * const *EnumNamesBuiltinOperator() {
"SEGMENT_SUM", "SEGMENT_SUM",
"BATCH_MATMUL", "BATCH_MATMUL",
"PLACEHOLDER_FOR_GREATER_OP_CODES", "PLACEHOLDER_FOR_GREATER_OP_CODES",
"CUMSUM",
nullptr nullptr
}; };
return names; return names;
} }
inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { inline const char *EnumNameBuiltinOperator(BuiltinOperator e) {
if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES)) return ""; if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_CUMSUM)) return "";
const size_t index = static_cast<size_t>(e); const size_t index = static_cast<size_t>(e);
return EnumNamesBuiltinOperator()[index]; return EnumNamesBuiltinOperator()[index];
} }
@ -1170,11 +1176,12 @@ enum BuiltinOptions {
BuiltinOptions_DensifyOptions = 99, BuiltinOptions_DensifyOptions = 99,
BuiltinOptions_SegmentSumOptions = 100, BuiltinOptions_SegmentSumOptions = 100,
BuiltinOptions_BatchMatMulOptions = 101, BuiltinOptions_BatchMatMulOptions = 101,
BuiltinOptions_CumsumOptions = 102,
BuiltinOptions_MIN = BuiltinOptions_NONE, BuiltinOptions_MIN = BuiltinOptions_NONE,
BuiltinOptions_MAX = BuiltinOptions_BatchMatMulOptions BuiltinOptions_MAX = BuiltinOptions_CumsumOptions
}; };
inline const BuiltinOptions (&EnumValuesBuiltinOptions())[102] { inline const BuiltinOptions (&EnumValuesBuiltinOptions())[103] {
static const BuiltinOptions values[] = { static const BuiltinOptions values[] = {
BuiltinOptions_NONE, BuiltinOptions_NONE,
BuiltinOptions_Conv2DOptions, BuiltinOptions_Conv2DOptions,
@ -1277,13 +1284,14 @@ inline const BuiltinOptions (&EnumValuesBuiltinOptions())[102] {
BuiltinOptions_SelectV2Options, BuiltinOptions_SelectV2Options,
BuiltinOptions_DensifyOptions, BuiltinOptions_DensifyOptions,
BuiltinOptions_SegmentSumOptions, BuiltinOptions_SegmentSumOptions,
BuiltinOptions_BatchMatMulOptions BuiltinOptions_BatchMatMulOptions,
BuiltinOptions_CumsumOptions
}; };
return values; return values;
} }
inline const char * const *EnumNamesBuiltinOptions() { inline const char * const *EnumNamesBuiltinOptions() {
static const char * const names[103] = { static const char * const names[104] = {
"NONE", "NONE",
"Conv2DOptions", "Conv2DOptions",
"DepthwiseConv2DOptions", "DepthwiseConv2DOptions",
@ -1386,13 +1394,14 @@ inline const char * const *EnumNamesBuiltinOptions() {
"DensifyOptions", "DensifyOptions",
"SegmentSumOptions", "SegmentSumOptions",
"BatchMatMulOptions", "BatchMatMulOptions",
"CumsumOptions",
nullptr nullptr
}; };
return names; return names;
} }
inline const char *EnumNameBuiltinOptions(BuiltinOptions e) { inline const char *EnumNameBuiltinOptions(BuiltinOptions e) {
if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_BatchMatMulOptions)) return ""; if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_CumsumOptions)) return "";
const size_t index = static_cast<size_t>(e); const size_t index = static_cast<size_t>(e);
return EnumNamesBuiltinOptions()[index]; return EnumNamesBuiltinOptions()[index];
} }
@ -1805,6 +1814,10 @@ template<> struct BuiltinOptionsTraits<tflite::BatchMatMulOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions;
}; };
template<> struct BuiltinOptionsTraits<tflite::CumsumOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions;
};
struct BuiltinOptionsUnion { struct BuiltinOptionsUnion {
BuiltinOptions type; BuiltinOptions type;
void *value; void *value;
@ -2645,6 +2658,14 @@ struct BuiltinOptionsUnion {
return type == BuiltinOptions_BatchMatMulOptions ? return type == BuiltinOptions_BatchMatMulOptions ?
reinterpret_cast<const tflite::BatchMatMulOptionsT *>(value) : nullptr; reinterpret_cast<const tflite::BatchMatMulOptionsT *>(value) : nullptr;
} }
tflite::CumsumOptionsT *AsCumsumOptions() {
return type == BuiltinOptions_CumsumOptions ?
reinterpret_cast<tflite::CumsumOptionsT *>(value) : nullptr;
}
const tflite::CumsumOptionsT *AsCumsumOptions() const {
return type == BuiltinOptions_CumsumOptions ?
reinterpret_cast<const tflite::CumsumOptionsT *>(value) : nullptr;
}
}; };
bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
@ -9343,6 +9364,72 @@ inline flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(
flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct CumsumOptionsT : public flatbuffers::NativeTable {
typedef CumsumOptions TableType;
bool exclusive;
bool reverse;
CumsumOptionsT()
: exclusive(false),
reverse(false) {
}
};
struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CumsumOptionsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_EXCLUSIVE = 4,
VT_REVERSE = 6
};
bool exclusive() const {
return GetField<uint8_t>(VT_EXCLUSIVE, 0) != 0;
}
bool reverse() const {
return GetField<uint8_t>(VT_REVERSE, 0) != 0;
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_EXCLUSIVE) &&
VerifyField<uint8_t>(verifier, VT_REVERSE) &&
verifier.EndTable();
}
CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<CumsumOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct CumsumOptionsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_exclusive(bool exclusive) {
fbb_.AddElement<uint8_t>(CumsumOptions::VT_EXCLUSIVE, static_cast<uint8_t>(exclusive), 0);
}
void add_reverse(bool reverse) {
fbb_.AddElement<uint8_t>(CumsumOptions::VT_REVERSE, static_cast<uint8_t>(reverse), 0);
}
explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
CumsumOptionsBuilder &operator=(const CumsumOptionsBuilder &);
flatbuffers::Offset<CumsumOptions> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<CumsumOptions>(end);
return o;
}
};
inline flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(
flatbuffers::FlatBufferBuilder &_fbb,
bool exclusive = false,
bool reverse = false) {
CumsumOptionsBuilder builder_(_fbb);
builder_.add_reverse(reverse);
builder_.add_exclusive(exclusive);
return builder_.Finish();
}
flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct OperatorCodeT : public flatbuffers::NativeTable { struct OperatorCodeT : public flatbuffers::NativeTable {
typedef OperatorCode TableType; typedef OperatorCode TableType;
int8_t deprecated_builtin_code; int8_t deprecated_builtin_code;
@ -9796,6 +9883,9 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const { const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const {
return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast<const tflite::BatchMatMulOptions *>(builtin_options()) : nullptr; return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast<const tflite::BatchMatMulOptions *>(builtin_options()) : nullptr;
} }
const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const {
return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast<const tflite::CumsumOptions *>(builtin_options()) : nullptr;
}
const flatbuffers::Vector<uint8_t> *custom_options() const { const flatbuffers::Vector<uint8_t> *custom_options() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS); return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
} }
@ -10236,6 +10326,10 @@ template<> inline const tflite::BatchMatMulOptions *Operator::builtin_options_as
return builtin_options_as_BatchMatMulOptions(); return builtin_options_as_BatchMatMulOptions();
} }
template<> inline const tflite::CumsumOptions *Operator::builtin_options_as<tflite::CumsumOptions>() const {
return builtin_options_as_CumsumOptions();
}
struct OperatorBuilder { struct OperatorBuilder {
flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_; flatbuffers::uoffset_t start_;
@ -13912,6 +14006,35 @@ inline flatbuffers::Offset<BatchMatMulOptions> CreateBatchMatMulOptions(flatbuff
_adj_y); _adj_y);
} }
inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new CumsumOptionsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = exclusive(); _o->exclusive = _e; }
{ auto _e = reverse(); _o->reverse = _e; }
}
inline flatbuffers::Offset<CumsumOptions> CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateCumsumOptions(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<CumsumOptions> CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _exclusive = _o->exclusive;
auto _reverse = _o->reverse;
return tflite::CreateCumsumOptions(
_fbb,
_exclusive,
_reverse);
}
inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new OperatorCodeT(); auto _o = new OperatorCodeT();
UnPackTo(_o, _resolver); UnPackTo(_o, _resolver);
@ -14791,6 +14914,10 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *ob
auto ptr = reinterpret_cast<const tflite::BatchMatMulOptions *>(obj); auto ptr = reinterpret_cast<const tflite::BatchMatMulOptions *>(obj);
return verifier.VerifyTable(ptr); return verifier.VerifyTable(ptr);
} }
case BuiltinOptions_CumsumOptions: {
auto ptr = reinterpret_cast<const tflite::CumsumOptions *>(obj);
return verifier.VerifyTable(ptr);
}
default: return true; default: return true;
} }
} }
@ -15213,6 +15340,10 @@ inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, c
auto ptr = reinterpret_cast<const tflite::BatchMatMulOptions *>(obj); auto ptr = reinterpret_cast<const tflite::BatchMatMulOptions *>(obj);
return ptr->UnPack(resolver); return ptr->UnPack(resolver);
} }
case BuiltinOptions_CumsumOptions: {
auto ptr = reinterpret_cast<const tflite::CumsumOptions *>(obj);
return ptr->UnPack(resolver);
}
default: return nullptr; default: return nullptr;
} }
} }
@ -15623,6 +15754,10 @@ inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBuff
auto ptr = reinterpret_cast<const tflite::BatchMatMulOptionsT *>(value); auto ptr = reinterpret_cast<const tflite::BatchMatMulOptionsT *>(value);
return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union(); return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union();
} }
case BuiltinOptions_CumsumOptions: {
auto ptr = reinterpret_cast<const tflite::CumsumOptionsT *>(value);
return CreateCumsumOptions(_fbb, ptr, _rehasher).Union();
}
default: return 0; default: return 0;
} }
} }
@ -16033,6 +16168,10 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FL
value = new tflite::BatchMatMulOptionsT(*reinterpret_cast<tflite::BatchMatMulOptionsT *>(u.value)); value = new tflite::BatchMatMulOptionsT(*reinterpret_cast<tflite::BatchMatMulOptionsT *>(u.value));
break; break;
} }
case BuiltinOptions_CumsumOptions: {
value = new tflite::CumsumOptionsT(*reinterpret_cast<tflite::CumsumOptionsT *>(u.value));
break;
}
default: default:
break; break;
} }
@ -16545,6 +16684,11 @@ inline void BuiltinOptionsUnion::Reset() {
delete ptr; delete ptr;
break; break;
} }
case BuiltinOptions_CumsumOptions: {
auto ptr = reinterpret_cast<tflite::CumsumOptionsT *>(value);
delete ptr;
break;
}
default: break; default: break;
} }
value = nullptr; value = nullptr;

View File

@ -318,6 +318,7 @@ std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
{{BuiltinOperator_REVERSE_V2, 2}, "2.2.0"}, {{BuiltinOperator_REVERSE_V2, 2}, "2.2.0"},
{{BuiltinOperator_RANK, 1}, "1.14.0"}, {{BuiltinOperator_RANK, 1}, "1.14.0"},
{{BuiltinOperator_WHILE, 1}, "1.15.0"}, {{BuiltinOperator_WHILE, 1}, "1.15.0"},
{{BuiltinOperator_CUMSUM, 1}, kPendingReleaseVersion},
}); });
std::pair<BuiltinOperator, int> version_key = {op_code, op_version}; std::pair<BuiltinOperator, int> version_key = {op_code, op_version};