STT-tensorflow/tensorflow/lite/schema/schema_v1.fbs

296 lines
6.8 KiB
Plaintext

// Copyright 2017 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Revision History
// Version 0: Initial version.
// Version 1: Add subgraphs to schema.
namespace tflite;
// The type of data stored in a tensor.
enum TensorType : byte {
FLOAT32 = 0,
FLOAT16 = 1,
INT32 = 2,
UINT8 = 3,
INT64 = 4,
STRING = 5,
}
// Parameters for converting a quantized tensor back to float. Given a
// quantized value q, the corresponding float value f should be:
// f = scale * (q - zero_point)
table QuantizationParameters {
min:[float]; // For importing back into tensorflow.
max:[float]; // For importing back into tensorflow.
scale:[float];
zero_point:[long];
}
table Tensor {
// The tensor shape. The meaning of each entry is operator-specific but
// builtin ops use: [batch size, height, width, number of channels] (That's
// Tensorflow's NHWC).
shape:[int];
type:TensorType;
// The data_buffer is an opaque container, with the assumption that the
// target device is little-endian. In addition, all builtin operators assume
// the memory is ordered such that if `shape` is [4, 3, 2], then index
// [i, j, k] maps to data_buffer[i*3*2 + j*3 + k].
data_buffer:[ubyte];
name:string; // For debugging and importing back into tensorflow.
quantization:QuantizationParameters; // Optional.
}
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
enum BuiltinOperator : byte {
CUSTOM = 0,
CONVOLUTION = 1,
DEPTHWISE_CONVOLUTION = 2,
CONCAT_EMBEDDINGS = 3,
LSH_PROJECTION = 4,
TANH = 5,
RELU = 6,
AVERAGE_POOL = 7,
MAX_POOL = 8,
L2_POOL = 9,
SIGMOID = 10,
SVDF = 11,
BasicRNN = 12,
RELU6 = 13,
EMBEDDING_LOOKUP = 14,
FULLY_CONNECTED = 15,
HASHTABLE_LOOKUP = 16,
SOFTMAX = 17,
CONCATENATION = 18,
LSTM = 19,
ADD = 20,
L2NORM = 21,
LOCAL_RESPONSE_NORM = 22,
RESIZE_BILINEAR = 23,
CALL = 24,
RESHAPE = 25,
SKIP_GRAM = 26,
SPACE_TO_DEPTH = 27,
}
// Options for the builtin operators.
union BuiltinOptions {
ConvolutionOptions,
DepthwiseConvolutionOptions,
ConcatEmbeddingsOptions,
LSHProjectionOptions,
PoolOptions,
SVDFOptions,
BasicRNNOptions,
FullyConnectedOptions,
SoftmaxOptions,
ConcatenationOptions,
AddOptions,
L2NormOptions,
LocalResponseNormOptions,
LSTMOptions,
ResizeBilinearOptions,
CallOptions,
ReshapeOptions,
SkipGramOptions,
SpaceToDepthOptions,
}
enum Padding : byte { SAME, VALID }
enum ActivationFunctionType : byte {
NONE = 0,
RELU = 1,
RELU1 = 2,
RELU6 = 3,
TANH = 4,
SIGN_BIT = 5,
}
table ConvolutionOptions {
padding:Padding;
stride_w:int;
stride_h:int;
fused_activation_function:ActivationFunctionType;
}
table PoolOptions {
padding:Padding;
stride_w:int;
stride_h:int;
filter_width:int;
filter_height:int;
fused_activation_function:ActivationFunctionType;
}
table DepthwiseConvolutionOptions {
padding:Padding;
stride_w:int;
stride_h:int;
depth_multiplier:int;
fused_activation_function:ActivationFunctionType;
}
table ConcatEmbeddingsOptions {
num_channels:int;
num_columns_per_channel:[int];
embedding_dim_per_channel:[int]; // This could be inferred from parameters.
}
enum LSHProjectionType: byte {
UNKNOWN = 0,
SPARSE = 1,
DENSE = 2,
}
table LSHProjectionOptions {
type: LSHProjectionType;
}
table SVDFOptions {
rank:int;
fused_activation_function:ActivationFunctionType;
}
// An implementation of TensorFlow BasicRNNCell.
table BasicRNNOptions {
fused_activation_function:ActivationFunctionType;
}
// An implementation of TensorFlow fully_connected (a.k.a Dense) layer.
table FullyConnectedOptions {
fused_activation_function:ActivationFunctionType;
}
table SoftmaxOptions {
beta: float;
}
// An implementation of TensorFlow concat.
table ConcatenationOptions {
axis:int;
fused_activation_function:ActivationFunctionType;
}
table AddOptions {
fused_activation_function:ActivationFunctionType;
}
table L2NormOptions {
fused_activation_function:ActivationFunctionType;
}
table LocalResponseNormOptions {
radius:int;
bias:float;
alpha:float;
beta:float;
}
// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell
table LSTMOptions {
fused_activation_function:ActivationFunctionType;
cell_clip: float; // Optional, 0.0 means no clipping
proj_clip: float; // Optional, 0.0 means no clipping
}
table ResizeBilinearOptions {
new_height:int;
new_width:int;
}
// A call operation options
table CallOptions {
// The subgraph index that needs to be called.
subgraph:int;
}
table ReshapeOptions {
new_shape:[int];
}
table SkipGramOptions {
ngram_size: int;
max_skip_size: int;
include_all_ngrams: bool;
}
table SpaceToDepthOptions {
block_size: int;
}
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
builtin_code:BuiltinOperator;
custom_code:string;
}
// An operator takes tensors as inputs and outputs. The type of operation being
// performed is determined by an index into the list of valid OperatorCodes,
// while the specifics of each operations is configured using builtin_options
// or custom_options.
table Operator {
// Index into the operator_codes array. Using an integer here avoids
// complicate map lookups.
opcode_index:int;
inputs:[int];
outputs:[int];
builtin_options:BuiltinOptions;
custom_options:[ubyte];
}
// The root type, defining a model.
table SubGraph {
// A list of all tensors used in this model.
tensors:[Tensor];
// Indices of the input tensors.
inputs:[int];
// Indices of the output tensors.
outputs:[int];
// All operators, in execution order.
operators:[Operator];
// Name of subgraph (used for debugging).
name:string;
}
table Model {
// Version of the schema.
version:int;
// A list of all operator codes used in this model. This is
// kept in order because operators carry an index into this
// vector.
operator_codes:[OperatorCode];
// All the subgraphs of the model. The 0th is assumed to be the main
// model.
subgraphs:[SubGraph];
// A description of the model.
description:string;
}
root_type Model;