TFLite OpenCL: CMake build fix

Added serialization_generated.h and serialization_base_generated.h

The file is generated with the following command.
$ flatc -c --scoped-enums -I ./ tensorflow/lite/delegates/gpu/cl/serialization.fbs
$ flatc -c --scoped-enums -I ./ tensorflow/lite/delegates/gpu/common/task/serialization_base.fbs

PiperOrigin-RevId: 341594596
Change-Id: I2e80a1d01d3b48560c502279443107cce608f369
This commit is contained in:
Terry Heo 2020-11-10 04:40:18 -08:00 committed by TensorFlower Gardener
parent 2c05a4a796
commit a6241f0271
5 changed files with 2830 additions and 2 deletions
tensorflow

View File

@ -0,0 +1,985 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
#define FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/delegates/gpu/common/task/serialization_base_generated.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace data {
struct OperationDef;
struct OperationDefBuilder;
struct CompilerOption;
struct CompilerOptionBuilder;
struct GPUOperation;
struct GPUOperationBuilder;
struct TensorDescWithId;
struct TensorDescWithIdBuilder;
struct CLNode;
struct CLNodeBuilder;
struct PairOfValueIds;
struct PairOfValueIdsBuilder;
struct InferenceContext;
struct InferenceContextBuilder;
enum class CalculationsPrecision : int8_t {
F32 = 0,
F32_F16 = 1,
F16 = 2,
MIN = F32,
MAX = F16
};
inline const CalculationsPrecision (&EnumValuesCalculationsPrecision())[3] {
static const CalculationsPrecision values[] = {
CalculationsPrecision::F32,
CalculationsPrecision::F32_F16,
CalculationsPrecision::F16
};
return values;
}
inline const char * const *EnumNamesCalculationsPrecision() {
static const char * const names[4] = {
"F32",
"F32_F16",
"F16",
nullptr
};
return names;
}
inline const char *EnumNameCalculationsPrecision(CalculationsPrecision e) {
if (flatbuffers::IsOutRange(e, CalculationsPrecision::F32, CalculationsPrecision::F16)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesCalculationsPrecision()[index];
}
enum class TensorToGrid : int8_t {
CUSTOM = 0,
WB_TO_X_HD_TO_Y_S_TO_Z = 1,
WB_TO_X_HD_TO_Y_Z_IS_1 = 2,
WB_TO_X_H_TO_Y_D_TO_Z = 3,
B_TO_X_Y_IS_1_Z_IS_1 = 4,
MIN = CUSTOM,
MAX = B_TO_X_Y_IS_1_Z_IS_1
};
inline const TensorToGrid (&EnumValuesTensorToGrid())[5] {
static const TensorToGrid values[] = {
TensorToGrid::CUSTOM,
TensorToGrid::WB_TO_X_HD_TO_Y_S_TO_Z,
TensorToGrid::WB_TO_X_HD_TO_Y_Z_IS_1,
TensorToGrid::WB_TO_X_H_TO_Y_D_TO_Z,
TensorToGrid::B_TO_X_Y_IS_1_Z_IS_1
};
return values;
}
inline const char * const *EnumNamesTensorToGrid() {
static const char * const names[6] = {
"CUSTOM",
"WB_TO_X_HD_TO_Y_S_TO_Z",
"WB_TO_X_HD_TO_Y_Z_IS_1",
"WB_TO_X_H_TO_Y_D_TO_Z",
"B_TO_X_Y_IS_1_Z_IS_1",
nullptr
};
return names;
}
inline const char *EnumNameTensorToGrid(TensorToGrid e) {
if (flatbuffers::IsOutRange(e, TensorToGrid::CUSTOM, TensorToGrid::B_TO_X_Y_IS_1_Z_IS_1)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesTensorToGrid()[index];
}
enum class CompilerOptions : int8_t {
ADRENO_FULL_SIMD_LINE = 0,
ADRENO_MORE_WAVES = 1,
POWERVR_FP16 = 2,
CL_OPT_DISABLE = 3,
CL_2_0 = 4,
CL_3_0 = 5,
MIN = ADRENO_FULL_SIMD_LINE,
MAX = CL_3_0
};
inline const CompilerOptions (&EnumValuesCompilerOptions())[6] {
static const CompilerOptions values[] = {
CompilerOptions::ADRENO_FULL_SIMD_LINE,
CompilerOptions::ADRENO_MORE_WAVES,
CompilerOptions::POWERVR_FP16,
CompilerOptions::CL_OPT_DISABLE,
CompilerOptions::CL_2_0,
CompilerOptions::CL_3_0
};
return values;
}
inline const char * const *EnumNamesCompilerOptions() {
static const char * const names[7] = {
"ADRENO_FULL_SIMD_LINE",
"ADRENO_MORE_WAVES",
"POWERVR_FP16",
"CL_OPT_DISABLE",
"CL_2_0",
"CL_3_0",
nullptr
};
return names;
}
inline const char *EnumNameCompilerOptions(CompilerOptions e) {
if (flatbuffers::IsOutRange(e, CompilerOptions::ADRENO_FULL_SIMD_LINE, CompilerOptions::CL_3_0)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesCompilerOptions()[index];
}
struct OperationDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef OperationDefBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PRECISION = 4,
VT_SRC_TENSORS = 6,
VT_DST_TENSORS = 8
};
tflite::gpu::cl::data::CalculationsPrecision precision() const {
return static_cast<tflite::gpu::cl::data::CalculationsPrecision>(GetField<int8_t>(VT_PRECISION, 0));
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>> *src_tensors() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>> *>(VT_SRC_TENSORS);
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>> *dst_tensors() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>> *>(VT_DST_TENSORS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_PRECISION) &&
VerifyOffset(verifier, VT_SRC_TENSORS) &&
verifier.VerifyVector(src_tensors()) &&
verifier.VerifyVectorOfTables(src_tensors()) &&
VerifyOffset(verifier, VT_DST_TENSORS) &&
verifier.VerifyVector(dst_tensors()) &&
verifier.VerifyVectorOfTables(dst_tensors()) &&
verifier.EndTable();
}
};
struct OperationDefBuilder {
typedef OperationDef Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_precision(tflite::gpu::cl::data::CalculationsPrecision precision) {
fbb_.AddElement<int8_t>(OperationDef::VT_PRECISION, static_cast<int8_t>(precision), 0);
}
void add_src_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>> src_tensors) {
fbb_.AddOffset(OperationDef::VT_SRC_TENSORS, src_tensors);
}
void add_dst_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>> dst_tensors) {
fbb_.AddOffset(OperationDef::VT_DST_TENSORS, dst_tensors);
}
explicit OperationDefBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<OperationDef> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<OperationDef>(end);
return o;
}
};
inline flatbuffers::Offset<OperationDef> CreateOperationDef(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::gpu::cl::data::CalculationsPrecision precision = tflite::gpu::cl::data::CalculationsPrecision::F32,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>> src_tensors = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>> dst_tensors = 0) {
OperationDefBuilder builder_(_fbb);
builder_.add_dst_tensors(dst_tensors);
builder_.add_src_tensors(src_tensors);
builder_.add_precision(precision);
return builder_.Finish();
}
inline flatbuffers::Offset<OperationDef> CreateOperationDefDirect(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::gpu::cl::data::CalculationsPrecision precision = tflite::gpu::cl::data::CalculationsPrecision::F32,
const std::vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>> *src_tensors = nullptr,
const std::vector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>> *dst_tensors = nullptr) {
auto src_tensors__ = src_tensors ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>(*src_tensors) : 0;
auto dst_tensors__ = dst_tensors ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::data::TensorDescriptor>>(*dst_tensors) : 0;
return tflite::gpu::cl::data::CreateOperationDef(
_fbb,
precision,
src_tensors__,
dst_tensors__);
}
struct CompilerOption FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CompilerOptionBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_OPTION = 4
};
tflite::gpu::cl::data::CompilerOptions option() const {
return static_cast<tflite::gpu::cl::data::CompilerOptions>(GetField<int8_t>(VT_OPTION, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int8_t>(verifier, VT_OPTION) &&
verifier.EndTable();
}
};
struct CompilerOptionBuilder {
typedef CompilerOption Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_option(tflite::gpu::cl::data::CompilerOptions option) {
fbb_.AddElement<int8_t>(CompilerOption::VT_OPTION, static_cast<int8_t>(option), 0);
}
explicit CompilerOptionBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<CompilerOption> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<CompilerOption>(end);
return o;
}
};
inline flatbuffers::Offset<CompilerOption> CreateCompilerOption(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::gpu::cl::data::CompilerOptions option = tflite::gpu::cl::data::CompilerOptions::ADRENO_FULL_SIMD_LINE) {
CompilerOptionBuilder builder_(_fbb);
builder_.add_option(option);
return builder_.Finish();
}
struct GPUOperation FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GPUOperationBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ARGUMENTS = 4,
VT_CODE = 6,
VT_WORK_GROUP_SIZE = 8,
VT_COMPILER_OPTIONS = 10,
VT_TENSOR_TO_GRID = 12,
VT_ELEMENTWISE = 14,
VT_LINKABLE = 16,
VT_CHECK_SRC_CHANNELS_SIZE = 18,
VT_DEFINITION = 20,
VT_GRID_DIMENSION = 22,
VT_WORK_GROUP_LAUNCH_ORDER = 24,
VT_GRID_SIZE = 26,
VT_SRC_TENSORS_NAMES = 28,
VT_DST_TENSORS_NAMES = 30,
VT_WORK_GROUPS_COUNT = 32,
VT_LINKABLE_COUNT = 34,
VT_ELEMENTWISE_CODE = 36
};
const tflite::gpu::data::Arguments *arguments() const {
return GetPointer<const tflite::gpu::data::Arguments *>(VT_ARGUMENTS);
}
const flatbuffers::String *code() const {
return GetPointer<const flatbuffers::String *>(VT_CODE);
}
const tflite::gpu::data::Int3 *work_group_size() const {
return GetPointer<const tflite::gpu::data::Int3 *>(VT_WORK_GROUP_SIZE);
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CompilerOption>> *compiler_options() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CompilerOption>> *>(VT_COMPILER_OPTIONS);
}
tflite::gpu::cl::data::TensorToGrid tensor_to_grid() const {
return static_cast<tflite::gpu::cl::data::TensorToGrid>(GetField<int8_t>(VT_TENSOR_TO_GRID, 0));
}
bool elementwise() const {
return GetField<uint8_t>(VT_ELEMENTWISE, 0) != 0;
}
bool linkable() const {
return GetField<uint8_t>(VT_LINKABLE, 0) != 0;
}
bool check_src_channels_size() const {
return GetField<uint8_t>(VT_CHECK_SRC_CHANNELS_SIZE, 0) != 0;
}
const tflite::gpu::cl::data::OperationDef *definition() const {
return GetPointer<const tflite::gpu::cl::data::OperationDef *>(VT_DEFINITION);
}
int32_t grid_dimension() const {
return GetField<int32_t>(VT_GRID_DIMENSION, 0);
}
const tflite::gpu::data::Int3 *work_group_launch_order() const {
return GetPointer<const tflite::gpu::data::Int3 *>(VT_WORK_GROUP_LAUNCH_ORDER);
}
const tflite::gpu::data::Int3 *grid_size() const {
return GetPointer<const tflite::gpu::data::Int3 *>(VT_GRID_SIZE);
}
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *src_tensors_names() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_SRC_TENSORS_NAMES);
}
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *dst_tensors_names() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DST_TENSORS_NAMES);
}
const tflite::gpu::data::Int3 *work_groups_count() const {
return GetPointer<const tflite::gpu::data::Int3 *>(VT_WORK_GROUPS_COUNT);
}
int32_t linkable_count() const {
return GetField<int32_t>(VT_LINKABLE_COUNT, 0);
}
const flatbuffers::String *elementwise_code() const {
return GetPointer<const flatbuffers::String *>(VT_ELEMENTWISE_CODE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_ARGUMENTS) &&
verifier.VerifyTable(arguments()) &&
VerifyOffset(verifier, VT_CODE) &&
verifier.VerifyString(code()) &&
VerifyOffset(verifier, VT_WORK_GROUP_SIZE) &&
verifier.VerifyTable(work_group_size()) &&
VerifyOffset(verifier, VT_COMPILER_OPTIONS) &&
verifier.VerifyVector(compiler_options()) &&
verifier.VerifyVectorOfTables(compiler_options()) &&
VerifyField<int8_t>(verifier, VT_TENSOR_TO_GRID) &&
VerifyField<uint8_t>(verifier, VT_ELEMENTWISE) &&
VerifyField<uint8_t>(verifier, VT_LINKABLE) &&
VerifyField<uint8_t>(verifier, VT_CHECK_SRC_CHANNELS_SIZE) &&
VerifyOffset(verifier, VT_DEFINITION) &&
verifier.VerifyTable(definition()) &&
VerifyField<int32_t>(verifier, VT_GRID_DIMENSION) &&
VerifyOffset(verifier, VT_WORK_GROUP_LAUNCH_ORDER) &&
verifier.VerifyTable(work_group_launch_order()) &&
VerifyOffset(verifier, VT_GRID_SIZE) &&
verifier.VerifyTable(grid_size()) &&
VerifyOffset(verifier, VT_SRC_TENSORS_NAMES) &&
verifier.VerifyVector(src_tensors_names()) &&
verifier.VerifyVectorOfStrings(src_tensors_names()) &&
VerifyOffset(verifier, VT_DST_TENSORS_NAMES) &&
verifier.VerifyVector(dst_tensors_names()) &&
verifier.VerifyVectorOfStrings(dst_tensors_names()) &&
VerifyOffset(verifier, VT_WORK_GROUPS_COUNT) &&
verifier.VerifyTable(work_groups_count()) &&
VerifyField<int32_t>(verifier, VT_LINKABLE_COUNT) &&
VerifyOffset(verifier, VT_ELEMENTWISE_CODE) &&
verifier.VerifyString(elementwise_code()) &&
verifier.EndTable();
}
};
struct GPUOperationBuilder {
typedef GPUOperation Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_arguments(flatbuffers::Offset<tflite::gpu::data::Arguments> arguments) {
fbb_.AddOffset(GPUOperation::VT_ARGUMENTS, arguments);
}
void add_code(flatbuffers::Offset<flatbuffers::String> code) {
fbb_.AddOffset(GPUOperation::VT_CODE, code);
}
void add_work_group_size(flatbuffers::Offset<tflite::gpu::data::Int3> work_group_size) {
fbb_.AddOffset(GPUOperation::VT_WORK_GROUP_SIZE, work_group_size);
}
void add_compiler_options(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CompilerOption>>> compiler_options) {
fbb_.AddOffset(GPUOperation::VT_COMPILER_OPTIONS, compiler_options);
}
void add_tensor_to_grid(tflite::gpu::cl::data::TensorToGrid tensor_to_grid) {
fbb_.AddElement<int8_t>(GPUOperation::VT_TENSOR_TO_GRID, static_cast<int8_t>(tensor_to_grid), 0);
}
void add_elementwise(bool elementwise) {
fbb_.AddElement<uint8_t>(GPUOperation::VT_ELEMENTWISE, static_cast<uint8_t>(elementwise), 0);
}
void add_linkable(bool linkable) {
fbb_.AddElement<uint8_t>(GPUOperation::VT_LINKABLE, static_cast<uint8_t>(linkable), 0);
}
void add_check_src_channels_size(bool check_src_channels_size) {
fbb_.AddElement<uint8_t>(GPUOperation::VT_CHECK_SRC_CHANNELS_SIZE, static_cast<uint8_t>(check_src_channels_size), 0);
}
void add_definition(flatbuffers::Offset<tflite::gpu::cl::data::OperationDef> definition) {
fbb_.AddOffset(GPUOperation::VT_DEFINITION, definition);
}
void add_grid_dimension(int32_t grid_dimension) {
fbb_.AddElement<int32_t>(GPUOperation::VT_GRID_DIMENSION, grid_dimension, 0);
}
void add_work_group_launch_order(flatbuffers::Offset<tflite::gpu::data::Int3> work_group_launch_order) {
fbb_.AddOffset(GPUOperation::VT_WORK_GROUP_LAUNCH_ORDER, work_group_launch_order);
}
void add_grid_size(flatbuffers::Offset<tflite::gpu::data::Int3> grid_size) {
fbb_.AddOffset(GPUOperation::VT_GRID_SIZE, grid_size);
}
void add_src_tensors_names(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> src_tensors_names) {
fbb_.AddOffset(GPUOperation::VT_SRC_TENSORS_NAMES, src_tensors_names);
}
void add_dst_tensors_names(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> dst_tensors_names) {
fbb_.AddOffset(GPUOperation::VT_DST_TENSORS_NAMES, dst_tensors_names);
}
void add_work_groups_count(flatbuffers::Offset<tflite::gpu::data::Int3> work_groups_count) {
fbb_.AddOffset(GPUOperation::VT_WORK_GROUPS_COUNT, work_groups_count);
}
void add_linkable_count(int32_t linkable_count) {
fbb_.AddElement<int32_t>(GPUOperation::VT_LINKABLE_COUNT, linkable_count, 0);
}
void add_elementwise_code(flatbuffers::Offset<flatbuffers::String> elementwise_code) {
fbb_.AddOffset(GPUOperation::VT_ELEMENTWISE_CODE, elementwise_code);
}
explicit GPUOperationBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<GPUOperation> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GPUOperation>(end);
return o;
}
};
inline flatbuffers::Offset<GPUOperation> CreateGPUOperation(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<tflite::gpu::data::Arguments> arguments = 0,
flatbuffers::Offset<flatbuffers::String> code = 0,
flatbuffers::Offset<tflite::gpu::data::Int3> work_group_size = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CompilerOption>>> compiler_options = 0,
tflite::gpu::cl::data::TensorToGrid tensor_to_grid = tflite::gpu::cl::data::TensorToGrid::CUSTOM,
bool elementwise = false,
bool linkable = false,
bool check_src_channels_size = false,
flatbuffers::Offset<tflite::gpu::cl::data::OperationDef> definition = 0,
int32_t grid_dimension = 0,
flatbuffers::Offset<tflite::gpu::data::Int3> work_group_launch_order = 0,
flatbuffers::Offset<tflite::gpu::data::Int3> grid_size = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> src_tensors_names = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> dst_tensors_names = 0,
flatbuffers::Offset<tflite::gpu::data::Int3> work_groups_count = 0,
int32_t linkable_count = 0,
flatbuffers::Offset<flatbuffers::String> elementwise_code = 0) {
GPUOperationBuilder builder_(_fbb);
builder_.add_elementwise_code(elementwise_code);
builder_.add_linkable_count(linkable_count);
builder_.add_work_groups_count(work_groups_count);
builder_.add_dst_tensors_names(dst_tensors_names);
builder_.add_src_tensors_names(src_tensors_names);
builder_.add_grid_size(grid_size);
builder_.add_work_group_launch_order(work_group_launch_order);
builder_.add_grid_dimension(grid_dimension);
builder_.add_definition(definition);
builder_.add_compiler_options(compiler_options);
builder_.add_work_group_size(work_group_size);
builder_.add_code(code);
builder_.add_arguments(arguments);
builder_.add_check_src_channels_size(check_src_channels_size);
builder_.add_linkable(linkable);
builder_.add_elementwise(elementwise);
builder_.add_tensor_to_grid(tensor_to_grid);
return builder_.Finish();
}
inline flatbuffers::Offset<GPUOperation> CreateGPUOperationDirect(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<tflite::gpu::data::Arguments> arguments = 0,
const char *code = nullptr,
flatbuffers::Offset<tflite::gpu::data::Int3> work_group_size = 0,
const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::CompilerOption>> *compiler_options = nullptr,
tflite::gpu::cl::data::TensorToGrid tensor_to_grid = tflite::gpu::cl::data::TensorToGrid::CUSTOM,
bool elementwise = false,
bool linkable = false,
bool check_src_channels_size = false,
flatbuffers::Offset<tflite::gpu::cl::data::OperationDef> definition = 0,
int32_t grid_dimension = 0,
flatbuffers::Offset<tflite::gpu::data::Int3> work_group_launch_order = 0,
flatbuffers::Offset<tflite::gpu::data::Int3> grid_size = 0,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *src_tensors_names = nullptr,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *dst_tensors_names = nullptr,
flatbuffers::Offset<tflite::gpu::data::Int3> work_groups_count = 0,
int32_t linkable_count = 0,
const char *elementwise_code = nullptr) {
auto code__ = code ? _fbb.CreateString(code) : 0;
auto compiler_options__ = compiler_options ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::CompilerOption>>(*compiler_options) : 0;
auto src_tensors_names__ = src_tensors_names ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*src_tensors_names) : 0;
auto dst_tensors_names__ = dst_tensors_names ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*dst_tensors_names) : 0;
auto elementwise_code__ = elementwise_code ? _fbb.CreateString(elementwise_code) : 0;
return tflite::gpu::cl::data::CreateGPUOperation(
_fbb,
arguments,
code__,
work_group_size,
compiler_options__,
tensor_to_grid,
elementwise,
linkable,
check_src_channels_size,
definition,
grid_dimension,
work_group_launch_order,
grid_size,
src_tensors_names__,
dst_tensors_names__,
work_groups_count,
linkable_count,
elementwise_code__);
}
struct TensorDescWithId FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TensorDescWithIdBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DESC = 4,
VT_ID = 6
};
const tflite::gpu::data::TensorDescriptor *desc() const {
return GetPointer<const tflite::gpu::data::TensorDescriptor *>(VT_DESC);
}
int32_t id() const {
return GetField<int32_t>(VT_ID, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_DESC) &&
verifier.VerifyTable(desc()) &&
VerifyField<int32_t>(verifier, VT_ID) &&
verifier.EndTable();
}
};
struct TensorDescWithIdBuilder {
typedef TensorDescWithId Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_desc(flatbuffers::Offset<tflite::gpu::data::TensorDescriptor> desc) {
fbb_.AddOffset(TensorDescWithId::VT_DESC, desc);
}
void add_id(int32_t id) {
fbb_.AddElement<int32_t>(TensorDescWithId::VT_ID, id, 0);
}
explicit TensorDescWithIdBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<TensorDescWithId> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TensorDescWithId>(end);
return o;
}
};
inline flatbuffers::Offset<TensorDescWithId> CreateTensorDescWithId(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<tflite::gpu::data::TensorDescriptor> desc = 0,
int32_t id = 0) {
TensorDescWithIdBuilder builder_(_fbb);
builder_.add_id(id);
builder_.add_desc(desc);
return builder_.Finish();
}
struct CLNode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CLNodeBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_GPU_OP = 4,
VT_INPUT_IDS = 6,
VT_OUTPUT_IDS = 8,
VT_NAME = 10
};
const tflite::gpu::cl::data::GPUOperation *gpu_op() const {
return GetPointer<const tflite::gpu::cl::data::GPUOperation *>(VT_GPU_OP);
}
const flatbuffers::Vector<int32_t> *input_ids() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUT_IDS);
}
const flatbuffers::Vector<int32_t> *output_ids() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_IDS);
}
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_GPU_OP) &&
verifier.VerifyTable(gpu_op()) &&
VerifyOffset(verifier, VT_INPUT_IDS) &&
verifier.VerifyVector(input_ids()) &&
VerifyOffset(verifier, VT_OUTPUT_IDS) &&
verifier.VerifyVector(output_ids()) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
verifier.EndTable();
}
};
struct CLNodeBuilder {
typedef CLNode Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_gpu_op(flatbuffers::Offset<tflite::gpu::cl::data::GPUOperation> gpu_op) {
fbb_.AddOffset(CLNode::VT_GPU_OP, gpu_op);
}
void add_input_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids) {
fbb_.AddOffset(CLNode::VT_INPUT_IDS, input_ids);
}
void add_output_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids) {
fbb_.AddOffset(CLNode::VT_OUTPUT_IDS, output_ids);
}
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(CLNode::VT_NAME, name);
}
explicit CLNodeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<CLNode> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<CLNode>(end);
return o;
}
};
inline flatbuffers::Offset<CLNode> CreateCLNode(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<tflite::gpu::cl::data::GPUOperation> gpu_op = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids = 0,
flatbuffers::Offset<flatbuffers::String> name = 0) {
CLNodeBuilder builder_(_fbb);
builder_.add_name(name);
builder_.add_output_ids(output_ids);
builder_.add_input_ids(input_ids);
builder_.add_gpu_op(gpu_op);
return builder_.Finish();
}
inline flatbuffers::Offset<CLNode> CreateCLNodeDirect(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<tflite::gpu::cl::data::GPUOperation> gpu_op = 0,
const std::vector<int32_t> *input_ids = nullptr,
const std::vector<int32_t> *output_ids = nullptr,
const char *name = nullptr) {
auto input_ids__ = input_ids ? _fbb.CreateVector<int32_t>(*input_ids) : 0;
auto output_ids__ = output_ids ? _fbb.CreateVector<int32_t>(*output_ids) : 0;
auto name__ = name ? _fbb.CreateString(name) : 0;
return tflite::gpu::cl::data::CreateCLNode(
_fbb,
gpu_op,
input_ids__,
output_ids__,
name__);
}
struct PairOfValueIds FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef PairOfValueIdsBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_FIRST = 4,
VT_SECOND = 6
};
int32_t first() const {
return GetField<int32_t>(VT_FIRST, 0);
}
int32_t second() const {
return GetField<int32_t>(VT_SECOND, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_FIRST) &&
VerifyField<int32_t>(verifier, VT_SECOND) &&
verifier.EndTable();
}
};
struct PairOfValueIdsBuilder {
typedef PairOfValueIds Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_first(int32_t first) {
fbb_.AddElement<int32_t>(PairOfValueIds::VT_FIRST, first, 0);
}
void add_second(int32_t second) {
fbb_.AddElement<int32_t>(PairOfValueIds::VT_SECOND, second, 0);
}
explicit PairOfValueIdsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<PairOfValueIds> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<PairOfValueIds>(end);
return o;
}
};
inline flatbuffers::Offset<PairOfValueIds> CreatePairOfValueIds(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t first = 0,
int32_t second = 0) {
PairOfValueIdsBuilder builder_(_fbb);
builder_.add_second(second);
builder_.add_first(first);
return builder_.Finish();
}
struct InferenceContext FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef InferenceContextBuilder Builder;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NEED_FLUSH = 4,
VT_FLUSH_PERIODICALLY = 6,
VT_FLUSH_PERIOD = 8,
VT_NEED_MANUAL_RELEASE = 10,
VT_PRECISION = 12,
VT_STORAGE_TYPE = 14,
VT_NODES = 16,
VT_TENSORS = 18,
VT_INPUT_IDS = 20,
VT_VARIABLE_IDS_AND_REFS = 22,
VT_OUTPUT_IDS = 24,
VT_INPUT_REFS = 26,
VT_OUTPUT_REFS = 28
};
bool need_flush() const {
return GetField<uint8_t>(VT_NEED_FLUSH, 0) != 0;
}
bool flush_periodically() const {
return GetField<uint8_t>(VT_FLUSH_PERIODICALLY, 0) != 0;
}
int32_t flush_period() const {
return GetField<int32_t>(VT_FLUSH_PERIOD, 0);
}
bool need_manual_release() const {
return GetField<uint8_t>(VT_NEED_MANUAL_RELEASE, 0) != 0;
}
tflite::gpu::cl::data::CalculationsPrecision precision() const {
return static_cast<tflite::gpu::cl::data::CalculationsPrecision>(GetField<int8_t>(VT_PRECISION, 0));
}
tflite::gpu::data::TensorStorageType storage_type() const {
return static_cast<tflite::gpu::data::TensorStorageType>(GetField<int8_t>(VT_STORAGE_TYPE, 0));
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>> *nodes() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>> *>(VT_NODES);
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *tensors() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *>(VT_TENSORS);
}
const flatbuffers::Vector<int32_t> *input_ids() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_INPUT_IDS);
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>> *variable_ids_and_refs() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>> *>(VT_VARIABLE_IDS_AND_REFS);
}
const flatbuffers::Vector<int32_t> *output_ids() const {
return GetPointer<const flatbuffers::Vector<int32_t> *>(VT_OUTPUT_IDS);
}
const flatbuffers::Vector<int64_t> *input_refs() const {
return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INPUT_REFS);
}
const flatbuffers::Vector<int64_t> *output_refs() const {
return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_OUTPUT_REFS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_NEED_FLUSH) &&
VerifyField<uint8_t>(verifier, VT_FLUSH_PERIODICALLY) &&
VerifyField<int32_t>(verifier, VT_FLUSH_PERIOD) &&
VerifyField<uint8_t>(verifier, VT_NEED_MANUAL_RELEASE) &&
VerifyField<int8_t>(verifier, VT_PRECISION) &&
VerifyField<int8_t>(verifier, VT_STORAGE_TYPE) &&
VerifyOffset(verifier, VT_NODES) &&
verifier.VerifyVector(nodes()) &&
verifier.VerifyVectorOfTables(nodes()) &&
VerifyOffset(verifier, VT_TENSORS) &&
verifier.VerifyVector(tensors()) &&
verifier.VerifyVectorOfTables(tensors()) &&
VerifyOffset(verifier, VT_INPUT_IDS) &&
verifier.VerifyVector(input_ids()) &&
VerifyOffset(verifier, VT_VARIABLE_IDS_AND_REFS) &&
verifier.VerifyVector(variable_ids_and_refs()) &&
verifier.VerifyVectorOfTables(variable_ids_and_refs()) &&
VerifyOffset(verifier, VT_OUTPUT_IDS) &&
verifier.VerifyVector(output_ids()) &&
VerifyOffset(verifier, VT_INPUT_REFS) &&
verifier.VerifyVector(input_refs()) &&
VerifyOffset(verifier, VT_OUTPUT_REFS) &&
verifier.VerifyVector(output_refs()) &&
verifier.EndTable();
}
};
struct InferenceContextBuilder {
typedef InferenceContext Table;
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_need_flush(bool need_flush) {
fbb_.AddElement<uint8_t>(InferenceContext::VT_NEED_FLUSH, static_cast<uint8_t>(need_flush), 0);
}
void add_flush_periodically(bool flush_periodically) {
fbb_.AddElement<uint8_t>(InferenceContext::VT_FLUSH_PERIODICALLY, static_cast<uint8_t>(flush_periodically), 0);
}
void add_flush_period(int32_t flush_period) {
fbb_.AddElement<int32_t>(InferenceContext::VT_FLUSH_PERIOD, flush_period, 0);
}
void add_need_manual_release(bool need_manual_release) {
fbb_.AddElement<uint8_t>(InferenceContext::VT_NEED_MANUAL_RELEASE, static_cast<uint8_t>(need_manual_release), 0);
}
void add_precision(tflite::gpu::cl::data::CalculationsPrecision precision) {
fbb_.AddElement<int8_t>(InferenceContext::VT_PRECISION, static_cast<int8_t>(precision), 0);
}
void add_storage_type(tflite::gpu::data::TensorStorageType storage_type) {
fbb_.AddElement<int8_t>(InferenceContext::VT_STORAGE_TYPE, static_cast<int8_t>(storage_type), 0);
}
void add_nodes(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>> nodes) {
fbb_.AddOffset(InferenceContext::VT_NODES, nodes);
}
void add_tensors(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>> tensors) {
fbb_.AddOffset(InferenceContext::VT_TENSORS, tensors);
}
void add_input_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids) {
fbb_.AddOffset(InferenceContext::VT_INPUT_IDS, input_ids);
}
void add_variable_ids_and_refs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>> variable_ids_and_refs) {
fbb_.AddOffset(InferenceContext::VT_VARIABLE_IDS_AND_REFS, variable_ids_and_refs);
}
void add_output_ids(flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids) {
fbb_.AddOffset(InferenceContext::VT_OUTPUT_IDS, output_ids);
}
void add_input_refs(flatbuffers::Offset<flatbuffers::Vector<int64_t>> input_refs) {
fbb_.AddOffset(InferenceContext::VT_INPUT_REFS, input_refs);
}
void add_output_refs(flatbuffers::Offset<flatbuffers::Vector<int64_t>> output_refs) {
fbb_.AddOffset(InferenceContext::VT_OUTPUT_REFS, output_refs);
}
explicit InferenceContextBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
flatbuffers::Offset<InferenceContext> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<InferenceContext>(end);
return o;
}
};
inline flatbuffers::Offset<InferenceContext> CreateInferenceContext(
flatbuffers::FlatBufferBuilder &_fbb,
bool need_flush = false,
bool flush_periodically = false,
int32_t flush_period = 0,
bool need_manual_release = false,
tflite::gpu::cl::data::CalculationsPrecision precision = tflite::gpu::cl::data::CalculationsPrecision::F32,
tflite::gpu::data::TensorStorageType storage_type = tflite::gpu::data::TensorStorageType::UNKNOWN,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>> nodes = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>> tensors = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> input_ids = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>> variable_ids_and_refs = 0,
flatbuffers::Offset<flatbuffers::Vector<int32_t>> output_ids = 0,
flatbuffers::Offset<flatbuffers::Vector<int64_t>> input_refs = 0,
flatbuffers::Offset<flatbuffers::Vector<int64_t>> output_refs = 0) {
InferenceContextBuilder builder_(_fbb);
builder_.add_output_refs(output_refs);
builder_.add_input_refs(input_refs);
builder_.add_output_ids(output_ids);
builder_.add_variable_ids_and_refs(variable_ids_and_refs);
builder_.add_input_ids(input_ids);
builder_.add_tensors(tensors);
builder_.add_nodes(nodes);
builder_.add_flush_period(flush_period);
builder_.add_storage_type(storage_type);
builder_.add_precision(precision);
builder_.add_need_manual_release(need_manual_release);
builder_.add_flush_periodically(flush_periodically);
builder_.add_need_flush(need_flush);
return builder_.Finish();
}
inline flatbuffers::Offset<InferenceContext> CreateInferenceContextDirect(
flatbuffers::FlatBufferBuilder &_fbb,
bool need_flush = false,
bool flush_periodically = false,
int32_t flush_period = 0,
bool need_manual_release = false,
tflite::gpu::cl::data::CalculationsPrecision precision = tflite::gpu::cl::data::CalculationsPrecision::F32,
tflite::gpu::data::TensorStorageType storage_type = tflite::gpu::data::TensorStorageType::UNKNOWN,
const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>> *nodes = nullptr,
const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>> *tensors = nullptr,
const std::vector<int32_t> *input_ids = nullptr,
const std::vector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>> *variable_ids_and_refs = nullptr,
const std::vector<int32_t> *output_ids = nullptr,
const std::vector<int64_t> *input_refs = nullptr,
const std::vector<int64_t> *output_refs = nullptr) {
auto nodes__ = nodes ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::CLNode>>(*nodes) : 0;
auto tensors__ = tensors ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::TensorDescWithId>>(*tensors) : 0;
auto input_ids__ = input_ids ? _fbb.CreateVector<int32_t>(*input_ids) : 0;
auto variable_ids_and_refs__ = variable_ids_and_refs ? _fbb.CreateVector<flatbuffers::Offset<tflite::gpu::cl::data::PairOfValueIds>>(*variable_ids_and_refs) : 0;
auto output_ids__ = output_ids ? _fbb.CreateVector<int32_t>(*output_ids) : 0;
auto input_refs__ = input_refs ? _fbb.CreateVector<int64_t>(*input_refs) : 0;
auto output_refs__ = output_refs ? _fbb.CreateVector<int64_t>(*output_refs) : 0;
return tflite::gpu::cl::data::CreateInferenceContext(
_fbb,
need_flush,
flush_periodically,
flush_period,
need_manual_release,
precision,
storage_type,
nodes__,
tensors__,
input_ids__,
variable_ids_and_refs__,
output_ids__,
input_refs__,
output_refs__);
}
inline const tflite::gpu::cl::data::InferenceContext *GetInferenceContext(const void *buf) {
return flatbuffers::GetRoot<tflite::gpu::cl::data::InferenceContext>(buf);
}
inline const tflite::gpu::cl::data::InferenceContext *GetSizePrefixedInferenceContext(const void *buf) {
return flatbuffers::GetSizePrefixedRoot<tflite::gpu::cl::data::InferenceContext>(buf);
}
inline bool VerifyInferenceContextBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifyBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
}
inline bool VerifySizePrefixedInferenceContextBuffer(
flatbuffers::Verifier &verifier) {
return verifier.VerifySizePrefixedBuffer<tflite::gpu::cl::data::InferenceContext>(nullptr);
}
inline void FinishInferenceContextBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
fbb.Finish(root);
}
inline void FinishSizePrefixedInferenceContextBuffer(
flatbuffers::FlatBufferBuilder &fbb,
flatbuffers::Offset<tflite::gpu::cl::data::InferenceContext> root) {
fbb.FinishSizePrefixed(root);
}
} // namespace data
} // namespace cl
} // namespace gpu
} // namespace tflite
#endif // FLATBUFFERS_GENERATED_SERIALIZATION_TFLITE_GPU_CL_DATA_H_

File diff suppressed because it is too large Load Diff

View File

@ -485,7 +485,7 @@ microlite: $(MICROLITE_LIB_PATH)
# Hack for generating schema file bypassing flatbuffer parsing
tensorflow/lite/schema/schema_generated.h:
@cp -u tensorflow/lite/schema/schema_generated.h.OPENSOURCE tensorflow/lite/schema/schema_generated.h
@cp -u tensorflow/lite/schema/schema_generated.h.oss tensorflow/lite/schema/schema_generated.h
# Gathers together all the objects we've compiled into a single '.a' archive.
$(MICROLITE_LIB_PATH): tensorflow/lite/schema/schema_generated.h $(MICROLITE_LIB_OBJS)

View File

@ -346,7 +346,7 @@ micro: $(LIB_PATH)
# Hack for generating schema file bypassing flatbuffer parsing
tensorflow/lite/schema/schema_generated.h:
@cp -u tensorflow/lite/schema/schema_generated.h.OPENSOURCE tensorflow/lite/schema/schema_generated.h
@cp -u tensorflow/lite/schema/schema_generated.h.oss tensorflow/lite/schema/schema_generated.h
# Gathers together all the objects we've compiled into a single '.a' archive.
$(LIB_PATH): tensorflow/lite/schema/schema_generated.h $(LIB_OBJS)

View File

@ -5,7 +5,11 @@ tensorflow/compat_template.__init__.py
tensorflow/compat_template_v1.__init__.py
tensorflow/compiler/mlir/glob_lit_test.bzl
tensorflow/go/op/wrappers.go
tensorflow/lite/delegates/gpu/cl/compiled_program_cache_generated.h
tensorflow/lite/delegates/gpu/cl/serialization_generated.h
tensorflow/lite/delegates/gpu/common/task/serialization_base_generated.h
tensorflow/lite/micro/build_def.bzl
tensorflow/lite/schema/schema_generated.h
tensorflow/python/autograph/core/config.py
tensorflow/python/eager/benchmarks_test_base.py
tensorflow/python/framework/tfrt_utils.py