Support session initializer via op addition in TFLite (Part 1)

This change is for adding CallOnce op in TFLite builtin op set.

CallOnce operator is a control flow op to invoke other subgraph in the graph
in order to conduct the given graph's initialization tasks, for example, hash
table initialization and variable initialization.

This new operator will invoke the subgraph for initialization in the first run
and become no-op after the first run in an interpreter's life cycle.

PiperOrigin-RevId: 339763662
Change-Id: I8c2ae7213e749b76b9294175562389ebe79b542e
This commit is contained in:
Jaesung Chung 2020-10-29 15:53:22 -07:00 committed by TensorFlower Gardener
parent 04f62ae3b1
commit 2d03c32d62
14 changed files with 489 additions and 11 deletions

View File

@ -156,6 +156,7 @@ typedef enum {
kTfLiteBuiltinBatchMatmul = 126,
kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127,
kTfLiteBuiltinCumsum = 128,
kTfLiteBuiltinCallOnce = 129,
} TfLiteBuiltinOperator;
#ifdef __cplusplus

View File

@ -470,6 +470,10 @@ typedef struct {
bool reverse;
} TfLiteCumsumParams;
typedef struct {
int init_subgraph_index;
} TfLiteCallOnceParams;
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus

View File

@ -761,6 +761,16 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CALL_ONCE: {
auto params = safe_allocator.Allocate<TfLiteCallOnceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* call_once_params =
op->builtin_options_as_CallOnceOptions()) {
params->init_subgraph_index = call_once_params->init_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CUMSUM: {
auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);

View File

@ -81,6 +81,7 @@ static const char* param_structs[] = {"TfLiteAddParams",
"TfLiteReverseSequenceParams",
"TfLiteWhileParams",
"TfLiteCumsumParams",
"TfLiteCallOnceParams",
nullptr};
} // namespace

View File

@ -544,6 +544,7 @@ BUILTIN_KERNEL_SRCS = [
"batch_to_space_nd.cc",
"bidirectional_sequence_lstm.cc",
"bidirectional_sequence_rnn.cc",
"call_once.cc",
"cast.cc",
"ceil.cc",
"comparisons.cc",
@ -2100,6 +2101,21 @@ cc_test(
],
)
cc_test(
name = "call_once_test",
size = "small",
srcs = ["call_once_test.cc"],
tags = ["tflite_not_portable_ios"],
deps = [
":kernel_util",
":subgraph_test_util",
":test_main",
":variable_op_kernels",
"//tensorflow/lite:framework",
"@com_google_googletest//:gtest",
],
)
cc_test(
name = "if_test",
size = "small",
@ -2225,6 +2241,7 @@ cc_library(
":builtin_ops",
":kernel_util",
":test_util",
":variable_op_kernels",
"//tensorflow/lite:builtin_op_data",
"//tensorflow/lite:framework",
"//tensorflow/lite/c:common",

View File

@ -39,6 +39,7 @@ TfLiteRegistration* Register_BATCH_TO_SPACE_ND();
TfLiteRegistration* Register_BATCH_MATMUL();
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_LSTM();
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_RNN();
TfLiteRegistration* Register_CALL_ONCE();
TfLiteRegistration* Register_CAST();
TfLiteRegistration* Register_CEIL();
TfLiteRegistration* Register_CONCATENATION();

View File

@ -0,0 +1,109 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <stddef.h>
#include <cstring>
#include <memory>
#include <vector>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace call_once_kernel {
// CallOnce operator is a control flow op to invoke other subgraph in the graph
// in order to conduct the given graph's initialization tasks, for example, hash
// table initialization and variable initialization.
//
// This operator will invoke the subgraph for initialization in the first run
// and become no-op after the first run in an interpreter's life cycle.
struct OpData {
// Subgraph index to be invoked once in a life cycle by this CallOnce op.
int init_subgraph_index;
// Boolean storage to store whether the subgraph for initialization is invoked
// successfully once in an interpreter's life cycle.
bool init_subgraph_invoked;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData;
const auto* params = reinterpret_cast<const TfLiteCallOnceParams*>(buffer);
op_data->init_subgraph_index = params->init_subgraph_index;
op_data->init_subgraph_invoked = false;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
// Return early if the initialization graph is already invoked.
if (op_data->init_subgraph_invoked) return kTfLiteOk;
TF_LITE_ENSURE_EQ(context, node->inputs->size, 0);
TF_LITE_ENSURE_EQ(context, node->outputs->size, 0);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
TF_LITE_ENSURE(context, op_data->init_subgraph_index < subgraphs->size());
// Ensures that there are no input and output tensors in the subgraph.
Subgraph* init_subgraph = (*subgraphs)[op_data->init_subgraph_index].get();
TF_LITE_ENSURE_EQ(context, init_subgraph->inputs().size(), 0);
TF_LITE_ENSURE_EQ(context, init_subgraph->outputs().size(), 0);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
// The initialization graph should be invoked once in a life cycle.
if (op_data->init_subgraph_invoked) return kTfLiteOk;
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
Subgraph& init_subgraph = *(*subgraphs)[op_data->init_subgraph_index];
TF_LITE_ENSURE_OK(context, init_subgraph.AllocateTensors());
TF_LITE_ENSURE_OK(context, init_subgraph.Invoke());
TF_LITE_ENSURE_OK(context, init_subgraph.ReleaseNonPersistentMemory());
// Mark the invocation completed.
op_data->init_subgraph_invoked = true;
return kTfLiteOk;
}
} // namespace call_once_kernel
TfLiteRegistration* Register_CALL_ONCE() {
static TfLiteRegistration r = {call_once_kernel::Init, call_once_kernel::Free,
call_once_kernel::Prepare,
call_once_kernel::Eval};
return &r;
}
} // namespace builtin
} // namespace ops
} // namespace tflite

View File

@ -0,0 +1,87 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <stdint.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite {
using subgraph_test_util::ControlFlowOpTest;
namespace {
class CallOnceTest : public ControlFlowOpTest {
protected:
void SetUp() override {
interpreter_->AddSubgraphs(1);
builder_->BuildCallOnceAndReadVariableSubgraph(
&interpreter_->primary_subgraph());
builder_->BuildAssignRandomValueToVariableSubgraph(
interpreter_->subgraph(1));
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
}
};
TEST_F(CallOnceTest, TestSimple) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
// The value of the variable must be non-zero, which will be assigned by the
// initialization subgraph.
EXPECT_GT(output->data.i32[0], 0);
}
TEST_F(CallOnceTest, TestInvokeMultipleTimes) {
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
// The value of the variable must be non-zero, which will be assigned by the
// initialization subgraph.
int value = output->data.i32[0];
EXPECT_GT(value, 0);
for (int i = 0; i < 3; ++i) {
// Make sure that no more random value assignment in the initialization
// subgraph.
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
ASSERT_EQ(output->dims->size, 1);
ASSERT_EQ(output->dims->data[0], 1);
ASSERT_EQ(output->type, kTfLiteInt32);
ASSERT_EQ(NumElements(output), 1);
ASSERT_EQ(output->data.i32[0], value);
}
}
} // namespace
} // namespace tflite

View File

@ -295,6 +295,8 @@ BuiltinOpResolver::BuiltinOpResolver() {
/* min_version = */ 1,
/* max_version = */ 3);
AddBuiltin(BuiltinOperator_CUMSUM, Register_CUMSUM());
AddBuiltin(BuiltinOperator_CALL_ONCE,
tflite::ops::builtin::Register_CALL_ONCE());
AddCustom("NumericVerify", tflite::ops::custom::Register_NUMERIC_VERIFY());
// TODO(andrewharp, ahentz): Move these somewhere more appropriate so that
// custom ops aren't always included by default.

View File

@ -19,6 +19,7 @@ limitations under the License.
#include <stdint.h>
#include <stdlib.h>
#include <random>
#include <vector>
#include <gtest/gtest.h>
@ -29,6 +30,48 @@ limitations under the License.
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
// Forward declaration for op kernels.
namespace ops {
namespace custom {
TfLiteRegistration* Register_ASSIGN_VARIABLE();
TfLiteRegistration* Register_READ_VARIABLE();
namespace random_int {
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 0);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, 0);
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1);
outputSize->data[0] = 1;
// TODO(jaesung): Make output size be changeable depending on user's input to
// make it generic.
return context->ResizeTensor(context, output, outputSize);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor& output = context->tensors[node->outputs->data[0]];
std::random_device rd;
std::uniform_int_distribution<int> dist(1, 32768);
output.data.i32[0] = dist(rd);
return kTfLiteOk;
}
} // namespace random_int
TfLiteRegistration* Register_RANDOM_INT() {
static TfLiteRegistration r = {nullptr, nullptr, random_int::Prepare,
random_int::Eval};
return &r;
}
} // namespace custom
} // namespace ops
namespace subgraph_test_util {
namespace {
@ -328,6 +371,65 @@ void SubgraphBuilder::BuildWhileSubgraph(Subgraph* subgraph) {
&node_index);
}
void SubgraphBuilder::BuildAssignRandomValueToVariableSubgraph(
Subgraph* subgraph) {
const int kConstResourceId = 0;
const int kRandomValue = 1;
const int kTensorCount = 3;
// Construct a graph like ths:
// %1 = random_int()
// variable_assign(%0, %1)
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({}), kTfLiteOk);
SetupTensor(subgraph, kRandomValue, kTfLiteInt32);
CreateConstantInt32Tensor(subgraph, kConstResourceId, {1}, {1024});
int node_index;
subgraph->AddNodeWithParameters({}, {kRandomValue}, {}, nullptr, 0, nullptr,
::tflite::ops::custom::Register_RANDOM_INT(),
&node_index);
subgraph->AddNodeWithParameters(
{kConstResourceId, kRandomValue}, {}, {}, nullptr, 0, nullptr,
::tflite::ops::custom::Register_ASSIGN_VARIABLE(), &node_index);
}
void SubgraphBuilder::BuildCallOnceAndReadVariableSubgraph(Subgraph* subgraph) {
const int kConstResourceId = 0;
const int kOutput = 1;
const int kTensorCount = 2;
// Construct a graph like ths:
// Output: %1
// %1 = read_variable(%0)
int first_new_tensor_index;
ASSERT_EQ(subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(subgraph->SetInputs({}), kTfLiteOk);
ASSERT_EQ(subgraph->SetOutputs({kOutput}), kTfLiteOk);
SetupTensor(subgraph, kOutput, kTfLiteInt32);
CreateConstantInt32Tensor(subgraph, kConstResourceId, {1}, {1024});
TfLiteCallOnceParams* params = reinterpret_cast<TfLiteCallOnceParams*>(
malloc(sizeof(TfLiteCallOnceParams)));
params->init_subgraph_index = 1;
int node_index;
subgraph->AddNodeWithParameters({}, {}, {}, nullptr, 0, params,
::tflite::ops::builtin::Register_CALL_ONCE(),
&node_index);
subgraph->AddNodeWithParameters(
{kConstResourceId}, {kOutput}, {}, nullptr, 0, nullptr,
::tflite::ops::custom::Register_READ_VARIABLE(), &node_index);
}
void SubgraphBuilder::CreateConstantInt32Tensor(Subgraph* subgraph,
int tensor_index,
const std::vector<int>& shape,

View File

@ -85,6 +85,14 @@ class SubgraphBuilder {
// 2 inputs, 2 outputs.
void BuildWhileSubgraph(Subgraph* subgraph);
// Build a subgraph that assigns a random value to a variable.
// No input/output.
void BuildAssignRandomValueToVariableSubgraph(Subgraph* graph);
// Build a subgraph with CallOnce op and ReadVariable op.
// No input and 1 output.
void BuildCallOnceAndReadVariableSubgraph(Subgraph* graph);
private:
void CreateConstantInt32Tensor(Subgraph* subgraph, int tensor_index,
const std::vector<int>& shape,

View File

@ -352,7 +352,8 @@ enum BuiltinOperator : int32 {
SEGMENT_SUM = 125,
BATCH_MATMUL = 126,
PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
CUMSUM = 128
CUMSUM = 128,
CALL_ONCE = 129
}
@ -460,6 +461,7 @@ union BuiltinOptions {
SegmentSumOptions,
BatchMatMulOptions,
CumsumOptions,
CallOnceOptions
}
enum Padding : byte { SAME, VALID }
@ -955,6 +957,10 @@ table IfOptions {
else_subgraph_index:int;
}
table CallOnceOptions {
init_subgraph_index:int;
}
table WhileOptions {
cond_subgraph_index:int;
body_subgraph_index:int;

View File

@ -325,6 +325,9 @@ struct MatrixSetDiagOptionsT;
struct IfOptions;
struct IfOptionsT;
struct CallOnceOptions;
struct CallOnceOptionsT;
struct WhileOptions;
struct WhileOptionsT;
@ -792,11 +795,12 @@ enum BuiltinOperator {
BuiltinOperator_BATCH_MATMUL = 126,
BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES = 127,
BuiltinOperator_CUMSUM = 128,
BuiltinOperator_CALL_ONCE = 129,
BuiltinOperator_MIN = BuiltinOperator_ADD,
BuiltinOperator_MAX = BuiltinOperator_CUMSUM
BuiltinOperator_MAX = BuiltinOperator_CALL_ONCE
};
inline const BuiltinOperator (&EnumValuesBuiltinOperator())[129] {
inline const BuiltinOperator (&EnumValuesBuiltinOperator())[130] {
static const BuiltinOperator values[] = {
BuiltinOperator_ADD,
BuiltinOperator_AVERAGE_POOL_2D,
@ -926,13 +930,14 @@ inline const BuiltinOperator (&EnumValuesBuiltinOperator())[129] {
BuiltinOperator_SEGMENT_SUM,
BuiltinOperator_BATCH_MATMUL,
BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES,
BuiltinOperator_CUMSUM
BuiltinOperator_CUMSUM,
BuiltinOperator_CALL_ONCE
};
return values;
}
inline const char * const *EnumNamesBuiltinOperator() {
static const char * const names[130] = {
static const char * const names[131] = {
"ADD",
"AVERAGE_POOL_2D",
"CONCATENATION",
@ -1062,13 +1067,14 @@ inline const char * const *EnumNamesBuiltinOperator() {
"BATCH_MATMUL",
"PLACEHOLDER_FOR_GREATER_OP_CODES",
"CUMSUM",
"CALL_ONCE",
nullptr
};
return names;
}
inline const char *EnumNameBuiltinOperator(BuiltinOperator e) {
if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_CUMSUM)) return "";
if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_CALL_ONCE)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesBuiltinOperator()[index];
}
@ -1177,11 +1183,12 @@ enum BuiltinOptions {
BuiltinOptions_SegmentSumOptions = 100,
BuiltinOptions_BatchMatMulOptions = 101,
BuiltinOptions_CumsumOptions = 102,
BuiltinOptions_CallOnceOptions = 103,
BuiltinOptions_MIN = BuiltinOptions_NONE,
BuiltinOptions_MAX = BuiltinOptions_CumsumOptions
BuiltinOptions_MAX = BuiltinOptions_CallOnceOptions
};
inline const BuiltinOptions (&EnumValuesBuiltinOptions())[103] {
inline const BuiltinOptions (&EnumValuesBuiltinOptions())[104] {
static const BuiltinOptions values[] = {
BuiltinOptions_NONE,
BuiltinOptions_Conv2DOptions,
@ -1285,13 +1292,14 @@ inline const BuiltinOptions (&EnumValuesBuiltinOptions())[103] {
BuiltinOptions_DensifyOptions,
BuiltinOptions_SegmentSumOptions,
BuiltinOptions_BatchMatMulOptions,
BuiltinOptions_CumsumOptions
BuiltinOptions_CumsumOptions,
BuiltinOptions_CallOnceOptions
};
return values;
}
inline const char * const *EnumNamesBuiltinOptions() {
static const char * const names[104] = {
static const char * const names[105] = {
"NONE",
"Conv2DOptions",
"DepthwiseConv2DOptions",
@ -1395,13 +1403,14 @@ inline const char * const *EnumNamesBuiltinOptions() {
"SegmentSumOptions",
"BatchMatMulOptions",
"CumsumOptions",
"CallOnceOptions",
nullptr
};
return names;
}
inline const char *EnumNameBuiltinOptions(BuiltinOptions e) {
if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_CumsumOptions)) return "";
if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_CallOnceOptions)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesBuiltinOptions()[index];
}
@ -1818,6 +1827,10 @@ template<> struct BuiltinOptionsTraits<tflite::CumsumOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions;
};
template<> struct BuiltinOptionsTraits<tflite::CallOnceOptions> {
static const BuiltinOptions enum_value = BuiltinOptions_CallOnceOptions;
};
struct BuiltinOptionsUnion {
BuiltinOptions type;
void *value;
@ -2666,6 +2679,14 @@ struct BuiltinOptionsUnion {
return type == BuiltinOptions_CumsumOptions ?
reinterpret_cast<const tflite::CumsumOptionsT *>(value) : nullptr;
}
tflite::CallOnceOptionsT *AsCallOnceOptions() {
return type == BuiltinOptions_CallOnceOptions ?
reinterpret_cast<tflite::CallOnceOptionsT *>(value) : nullptr;
}
const tflite::CallOnceOptionsT *AsCallOnceOptions() const {
return type == BuiltinOptions_CallOnceOptions ?
reinterpret_cast<const tflite::CallOnceOptionsT *>(value) : nullptr;
}
};
bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type);
@ -8992,6 +9013,60 @@ inline flatbuffers::Offset<IfOptions> CreateIfOptions(
flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct CallOnceOptionsT : public flatbuffers::NativeTable {
typedef CallOnceOptions TableType;
int32_t init_subgraph_index;
CallOnceOptionsT()
: init_subgraph_index(0) {
}
};
struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CallOnceOptionsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INIT_SUBGRAPH_INDEX = 4
};
int32_t init_subgraph_index() const {
return GetField<int32_t>(VT_INIT_SUBGRAPH_INDEX, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_INIT_SUBGRAPH_INDEX) &&
verifier.EndTable();
}
CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<CallOnceOptions> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct CallOnceOptionsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_init_subgraph_index(int32_t init_subgraph_index) {
fbb_.AddElement<int32_t>(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0);
}
explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
CallOnceOptionsBuilder &operator=(const CallOnceOptionsBuilder &);
flatbuffers::Offset<CallOnceOptions> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<CallOnceOptions>(end);
return o;
}
};
inline flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t init_subgraph_index = 0) {
CallOnceOptionsBuilder builder_(_fbb);
builder_.add_init_subgraph_index(init_subgraph_index);
return builder_.Finish();
}
flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct WhileOptionsT : public flatbuffers::NativeTable {
typedef WhileOptions TableType;
int32_t cond_subgraph_index;
@ -9886,6 +9961,9 @@ struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const {
return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast<const tflite::CumsumOptions *>(builtin_options()) : nullptr;
}
const tflite::CallOnceOptions *builtin_options_as_CallOnceOptions() const {
return builtin_options_type() == tflite::BuiltinOptions_CallOnceOptions ? static_cast<const tflite::CallOnceOptions *>(builtin_options()) : nullptr;
}
const flatbuffers::Vector<uint8_t> *custom_options() const {
return GetPointer<const flatbuffers::Vector<uint8_t> *>(VT_CUSTOM_OPTIONS);
}
@ -10330,6 +10408,10 @@ template<> inline const tflite::CumsumOptions *Operator::builtin_options_as<tfli
return builtin_options_as_CumsumOptions();
}
template<> inline const tflite::CallOnceOptions *Operator::builtin_options_as<tflite::CallOnceOptions>() const {
return builtin_options_as_CallOnceOptions();
}
struct OperatorBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
@ -13810,6 +13892,32 @@ inline flatbuffers::Offset<IfOptions> CreateIfOptions(flatbuffers::FlatBufferBui
_else_subgraph_index);
}
inline CallOnceOptionsT *CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new CallOnceOptionsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; }
}
inline flatbuffers::Offset<CallOnceOptions> CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateCallOnceOptions(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<CallOnceOptions> CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _init_subgraph_index = _o->init_subgraph_index;
return tflite::CreateCallOnceOptions(
_fbb,
_init_subgraph_index);
}
inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new WhileOptionsT();
UnPackTo(_o, _resolver);
@ -14918,6 +15026,10 @@ inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *ob
auto ptr = reinterpret_cast<const tflite::CumsumOptions *>(obj);
return verifier.VerifyTable(ptr);
}
case BuiltinOptions_CallOnceOptions: {
auto ptr = reinterpret_cast<const tflite::CallOnceOptions *>(obj);
return verifier.VerifyTable(ptr);
}
default: return true;
}
}
@ -15344,6 +15456,10 @@ inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, c
auto ptr = reinterpret_cast<const tflite::CumsumOptions *>(obj);
return ptr->UnPack(resolver);
}
case BuiltinOptions_CallOnceOptions: {
auto ptr = reinterpret_cast<const tflite::CallOnceOptions *>(obj);
return ptr->UnPack(resolver);
}
default: return nullptr;
}
}
@ -15758,6 +15874,10 @@ inline flatbuffers::Offset<void> BuiltinOptionsUnion::Pack(flatbuffers::FlatBuff
auto ptr = reinterpret_cast<const tflite::CumsumOptionsT *>(value);
return CreateCumsumOptions(_fbb, ptr, _rehasher).Union();
}
case BuiltinOptions_CallOnceOptions: {
auto ptr = reinterpret_cast<const tflite::CallOnceOptionsT *>(value);
return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union();
}
default: return 0;
}
}
@ -16172,6 +16292,10 @@ inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FL
value = new tflite::CumsumOptionsT(*reinterpret_cast<tflite::CumsumOptionsT *>(u.value));
break;
}
case BuiltinOptions_CallOnceOptions: {
value = new tflite::CallOnceOptionsT(*reinterpret_cast<tflite::CallOnceOptionsT *>(u.value));
break;
}
default:
break;
}
@ -16689,6 +16813,11 @@ inline void BuiltinOptionsUnion::Reset() {
delete ptr;
break;
}
case BuiltinOptions_CallOnceOptions: {
auto ptr = reinterpret_cast<tflite::CallOnceOptionsT *>(value);
delete ptr;
break;
}
default: break;
}
value = nullptr;

View File

@ -320,6 +320,7 @@ std::string FindMinimumRuntimeVersionForOp(tflite::BuiltinOperator op_code,
{{BuiltinOperator_RANK, 1}, "1.14.0"},
{{BuiltinOperator_WHILE, 1}, "1.15.0"},
{{BuiltinOperator_CUMSUM, 1}, kPendingReleaseVersion},
{{BuiltinOperator_CALL_ONCE, 1}, kPendingReleaseVersion},
});
std::pair<BuiltinOperator, int> version_key = {op_code, op_version};