From 54151bec863ab6159b3c288f8a3dc6b4d281bb27 Mon Sep 17 00:00:00 2001 From: Jaesung Chung Date: Thu, 8 Oct 2020 07:47:03 -0700 Subject: [PATCH] Add new builtin code field to operator code This is for addressing the builtin code shortage problem. PiperOrigin-RevId: 336083510 Change-Id: I0ca615ed0893e948ad6299221117e2c59f81cdba --- .../end2end/fake_quant_per_channel.pbtxt | 8 +- .../tests/mlir2flatbuffer/basic_lstm.mlir | 3 +- .../custom_op_with_tflite_op.mlir | 13 +- .../mlir2flatbuffer/depthwise_conv2d.mlir | 6 +- .../mlir2flatbuffer/depthwise_conv2d_v2.mlir | 10 +- .../disable_flex_enable_builtin.mlir | 10 +- .../tests/mlir2flatbuffer/fake_quant.mlir | 5 +- .../mlir2flatbuffer/flex_exclusively.mlir | 3 +- .../flex_op_with_complex128.mlir | 5 +- .../mlir2flatbuffer/flex_op_with_f64.mlir | 5 +- .../flex_op_with_tflite_op.mlir | 11 +- .../mlir2flatbuffer/fully_connected.mlir | 5 +- .../mlir2flatbuffer/fully_connected_v2.mlir | 5 +- .../mlir2flatbuffer/hashtable_resource.mlir | 5 +- .../lite/tests/mlir2flatbuffer/if_op.mlir | 15 +- .../lite/tests/mlir2flatbuffer/logical.mlir | 10 +- .../mlir/lite/tests/mlir2flatbuffer/lstm.mlir | 5 +- .../tests/mlir2flatbuffer/lstm_quantized.mlir | 5 +- .../mlir/lite/tests/mlir2flatbuffer/math.mlir | 25 +- .../lite/tests/mlir2flatbuffer/mul_v2.mlir | 5 +- .../lite/tests/mlir2flatbuffer/mul_v3.mlir | 5 +- .../mlir/lite/tests/mlir2flatbuffer/nn.mlir | 5 +- .../tests/mlir2flatbuffer/numeric_verify.mlir | 5 +- .../tests/mlir2flatbuffer/quantization.mlir | 25 +- .../lite/tests/mlir2flatbuffer/reshape.mlir | 5 +- .../lite/tests/mlir2flatbuffer/simple.mlir | 5 +- .../mlir/lite/tests/mlir2flatbuffer/svdf.mlir | 5 +- .../lite/tests/mlir2flatbuffer/svdf_v2.mlir | 5 +- .../tests/mlir2flatbuffer/tfl_while_op.mlir | 15 +- .../transpose_conv_optional.mlir | 5 +- .../lite/tests/mlir2flatbuffer/type_attr.mlir | 5 +- .../unidirectional_sequence_lstm.mlir | 5 +- .../unidirectional_sequence_rnn.mlir | 5 +- .../lite/tests/mlir2flatbuffer/while_op.mlir | 15 +- tensorflow/lite/builtin_ops.h | 4 +- .../lite/core/api/flatbuffer_conversions.cc | 2 + .../writer/option_writer_generator.cc | 3 + tensorflow/lite/schema/BUILD | 3 +- .../schema/builtin_ops_header/generator.cc | 3 +- .../schema/flatbuffer_compatibility_test.cc | 3 +- tensorflow/lite/schema/schema.fbs | 19 +- tensorflow/lite/schema/schema_generated.h | 72 +- tensorflow/lite/schema/schema_utils.cc | 77 +- tensorflow/lite/schema/schema_utils.h | 16 + tensorflow/lite/schema/schema_v3a.fbs | 1109 +++++++++++++++++ .../lite/tools/optimize/quantize_weights.cc | 2 + 46 files changed, 1437 insertions(+), 145 deletions(-) create mode 100644 tensorflow/lite/schema/schema_v3a.fbs diff --git a/tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt b/tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt index adfcd93b4bc..117edd02beb 100644 --- a/tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt +++ b/tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel.pbtxt @@ -459,11 +459,13 @@ node { # CHECK-LABEL: { # CHECK: version: 3, # CHECK: operator_codes: [ { -# CHECK: builtin_code: CONV_2D, -# CHECK: version: 3 +# CHECK: deprecated_builtin_code: 3, +# CHECK: version: 3, +# CHECK: builtin_code: CONV_2D # CHECK: }, { -# CHECK: builtin_code: RESHAPE, +# CHECK: deprecated_builtin_code: 22, # CHECK: version: 1 +# CHECK: builtin_code: RESHAPE # CHECK: } ], # CHECK: subgraphs: [ { # CHECK: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/basic_lstm.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/basic_lstm.mlir index 8389045fc57..f5e5087d420 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/basic_lstm.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/basic_lstm.mlir @@ -4,8 +4,9 @@ func @main(tensor<1x384xf32>, tensor<1x96xf32>, tensor<384x480xf32>, tensor<384x // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: LSTM, +// CHECK-NEXT: deprecated_builtin_code: 16, // CHECK-NEXT: version: 2 +// CHECK-NEXT: builtin_code: LSTM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir index 2d906d6901e..02767ddceba 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir @@ -6,14 +6,17 @@ func @main(tensor<4xf32>) -> tensor<4xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: MUL, +// CHECK-NEXT: deprecated_builtin_code: 18, // CHECK-NEXT: version: 1 +// CHECK-NEXT: builtin_code: MUL // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: CUSTOM, -// CHECK-NEXT: custom_code: "MyCustomOp" +// CHECK-NEXT: deprecated_builtin_code: 32, +// CHECK-NEXT: custom_code: "MyCustomOp", +// CHECK-NEXT: builtin_code: CUSTOM // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: EXP, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 47, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: EXP // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir index 98c3eb154e1..a576c84e207 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d.mlir @@ -5,11 +5,13 @@ func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { - // CHECK-NEXT: builtin_code: DEQUANTIZE, + // CHECK-NEXT: deprecated_builtin_code: 6, // CHECK-NEXT: version: 1 + // CHECK-NEXT: builtin_code: DEQUANTIZE // CHECK-NEXT: }, { - // CHECK-NEXT: builtin_code: DEPTHWISE_CONV_2D, + // CHECK-NEXT: deprecated_builtin_code: 4, // CHECK-NEXT: version: 1 + // CHECK-NEXT: builtin_code: DEPTHWISE_CONV_2D // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir index 86f27936946..7d2d84d242a 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/depthwise_conv2d_v2.mlir @@ -5,11 +5,13 @@ func @main(tensor<1x224x224x3xf32>) -> tensor<1x112x112x32xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { - // CHECK-NEXT: builtin_code: DEQUANTIZE, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 6, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: DEQUANTIZE // CHECK-NEXT: }, { - // CHECK-NEXT: builtin_code: DEPTHWISE_CONV_2D, - // CHECK-NEXT: version: 2 + // CHECK-NEXT: deprecated_builtin_code: 4, + // CHECK-NEXT: version: 2, + // CHECK-NEXT: builtin_code: DEPTHWISE_CONV_2D // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/disable_flex_enable_builtin.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/disable_flex_enable_builtin.mlir index c034fa7e462..66ec5ed8a04 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/disable_flex_enable_builtin.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/disable_flex_enable_builtin.mlir @@ -5,11 +5,13 @@ func @main(tensor<4xf32>) -> tensor<4xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: MUL, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 18, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: MUL // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: EXP, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 47, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: EXP // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fake_quant.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fake_quant.mlir index 6d8c54b783a..dc8590b4a20 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fake_quant.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fake_quant.mlir @@ -6,8 +6,9 @@ func @main(tensor<4xf32>) -> tensor<4xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: FAKE_QUANT, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 80, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: FAKE_QUANT // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_exclusively.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_exclusively.mlir index 018d99fc74d..245260d994d 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_exclusively.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_exclusively.mlir @@ -4,8 +4,9 @@ func @main(%arg0: tensor<3x2xf32>) -> tensor<3x2xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: CUSTOM, +// CHECK-NEXT: deprecated_builtin_code: 32, // CHECK-NEXT: custom_code: "FlexAddV2" +// CHECK-NEXT: builtin_code: CUSTOM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_complex128.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_complex128.mlir index a5e6d4aabb5..82ac52d2a64 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_complex128.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_complex128.mlir @@ -5,8 +5,9 @@ func @main(tensor<4xcomplex>, tensor<4xcomplex>) -> tensor<4xcomplex, tensor<4xf64>) -> tensor<4xf64> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: CUSTOM, -// CHECK-NEXT: custom_code: "FlexAdd" +// CHECK-NEXT: deprecated_builtin_code: 32, +// CHECK-NEXT: custom_code: "FlexAdd", +// CHECK-NEXT: builtin_code: CUSTOM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_tflite_op.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_tflite_op.mlir index 8a9175b5c59..e15c7f23585 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_tflite_op.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_tflite_op.mlir @@ -5,14 +5,17 @@ func @main(tensor<4xf32>) -> tensor<4xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { +// CHECK-NEXT: deprecated_builtin_code: 18, +// CHECK-NEXT: version: 1, // CHECK-NEXT: builtin_code: MUL -// CHECK-NEXT: version: 1 // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: CUSTOM, -// CHECK-NEXT: custom_code: "FlexDiv" +// CHECK-NEXT: deprecated_builtin_code: 32, +// CHECK-NEXT: custom_code: "FlexDiv", +// CHECK-NEXT: builtin_code: CUSTOM // CHECK-NEXT: }, { +// CHECK-NEXT: deprecated_builtin_code: 47, +// CHECK-NEXT: version: 1, // CHECK-NEXT: builtin_code: EXP -// CHECK-NEXT: version: 1 // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected.mlir index bbe4fdb8337..58f693c1b0a 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected.mlir @@ -5,8 +5,9 @@ func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { - // CHECK-NEXT: builtin_code: FULLY_CONNECTED, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 9, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: FULLY_CONNECTED // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected_v2.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected_v2.mlir index 0abe720ccba..913a69c4d46 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected_v2.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/fully_connected_v2.mlir @@ -5,8 +5,9 @@ func @main(tensor<40x37xf32>, tensor<40x37xf32>) -> tensor<40x40xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { - // CHECK-NEXT: builtin_code: FULLY_CONNECTED, - // CHECK-NEXT: version: 2 + // CHECK-NEXT: deprecated_builtin_code: 9, + // CHECK-NEXT: version: 2, + // CHECK-NEXT: builtin_code: FULLY_CONNECTED // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/hashtable_resource.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/hashtable_resource.mlir index 3adee1dec77..2d5852dd83d 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/hashtable_resource.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/hashtable_resource.mlir @@ -3,8 +3,9 @@ // CHECK: { // CHECK: version: 3, // CHECK: operator_codes: [ { -// CHECK: builtin_code: CUSTOM, -// CHECK: custom_code: "HashTableV2" +// CHECK: deprecated_builtin_code: 32, +// CHECK: custom_code: "HashTableV2", +// CHECK: builtin_code: CUSTOM // CHECK: } ], // CHECK: subgraphs: [ { // CHECK: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/if_op.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/if_op.mlir index 7290209cc4a..86794afdf4c 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/if_op.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/if_op.mlir @@ -4,16 +4,19 @@ // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: LESS, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 58, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: LESS // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: IF, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 118, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: IF // CHECK-NEXT: }, { // CHECK-NEXT: version: 1 // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: MUL, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 18, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: MUL // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/logical.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/logical.mlir index 84cbf48c099..d24fa33fa13 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/logical.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/logical.mlir @@ -5,11 +5,13 @@ func @main(tensor<4xi1>) -> tensor<4xi1> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { - // CHECK-NEXT: builtin_code: LOGICAL_OR, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 84, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: LOGICAL_OR // CHECK-NEXT: }, { - // CHECK-NEXT: builtin_code: LOGICAL_AND, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 86, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: LOGICAL_AND // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm.mlir index 707bc926870..19bfc661425 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm.mlir @@ -4,8 +4,9 @@ func @main(tensor<1x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, t // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: LSTM, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 16, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: LSTM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm_quantized.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm_quantized.mlir index 5985ffaa446..a48e3c82e9d 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm_quantized.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/lstm_quantized.mlir @@ -7,8 +7,9 @@ func @main(%arg0: tensor<1x528x!quant.uniform> // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: LSTM, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 16, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: LSTM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/math.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/math.mlir index 297a8b8cb59..c4cb910f17b 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/math.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/math.mlir @@ -5,20 +5,25 @@ func @main(tensor<4xf32>) -> tensor<4xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { - // CHECK-NEXT: builtin_code: SQUARED_DIFFERENCE, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 99, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: SQUARED_DIFFERENCE // CHECK-NEXT: }, { - // CHECK-NEXT: builtin_code: MUL, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 18, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: MUL // CHECK-NEXT: }, { - // CHECK-NEXT: builtin_code: DIV, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 42, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: DIV // CHECK-NEXT: }, { - // CHECK-NEXT: builtin_code: EXP, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 47, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: EXP // CHECK-NEXT: }, { - // CHECK-NEXT: builtin_code: NEG, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 59, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: NEG // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/mul_v2.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/mul_v2.mlir index 15fce806a70..04ceb3855f2 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/mul_v2.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/mul_v2.mlir @@ -5,8 +5,9 @@ func @main(tensor<3x!quant.uniform>) -> tensor<3x!quant.uniform>) -> tensor<3x!quant.uniform) -> tensor<1x1x1x16xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { - // CHECK-NEXT: builtin_code: AVERAGE_POOL_2D, - // CHECK-NEXT: version: 1 + // CHECK-NEXT: deprecated_builtin_code: 1, + // CHECK-NEXT: version: 1, + // CHECK-NEXT: builtin_code: AVERAGE_POOL_2D // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/numeric_verify.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/numeric_verify.mlir index 4f28ad327df..e39ded18b86 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/numeric_verify.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/numeric_verify.mlir @@ -3,8 +3,9 @@ // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: CUSTOM, -// CHECK-NEXT: custom_code: "NumericVerify" +// CHECK-NEXT: deprecated_builtin_code: 32, +// CHECK-NEXT: custom_code: "NumericVerify", +// CHECK-NEXT: builtin_code: CUSTOM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir index dbe10a3f90c..81065798271 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir @@ -4,20 +4,25 @@ func @main(%arg0: tensor<1x224x224x3xf32>) -> tensor<1x1001xf32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: QUANTIZE, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 114, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: QUANTIZE // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: CONV_2D, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 3, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: CONV_2D // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: RESHAPE, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 22, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: RESHAPE // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: SOFTMAX, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 25, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: SOFTMAX // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: DEQUANTIZE, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 6, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: DEQUANTIZE // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/reshape.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/reshape.mlir index 15defbc3957..129e037a2ee 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/reshape.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/reshape.mlir @@ -5,8 +5,9 @@ func @main(tensor<3x2xi32>) -> tensor<6xi32> { // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: RESHAPE, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 22, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: RESHAPE // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/simple.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/simple.mlir index 2182db1d39e..ea380f8f47d 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/simple.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/simple.mlir @@ -7,8 +7,9 @@ func @main(tensor<3x2xi32>) -> tensor<3x2xi32> // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: SUB, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 41, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: SUB // CHECK-NEXT: }, { // CHECK-NEXT: version: 1 // CHECK-NEXT: } ], diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf.mlir index 3d29823c93c..e2ad4c73baa 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf.mlir @@ -4,8 +4,9 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) - // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: SVDF, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 27, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: SVDF // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf_v2.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf_v2.mlir index 8dfa68798b8..c1d30a9b4d4 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf_v2.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf_v2.mlir @@ -4,8 +4,9 @@ func @main(tensor<4 x f32>, tensor<4 x i8>, tensor<4 x f32>, tensor<4 x f32>) -> // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: SVDF, -// CHECK-NEXT: version: 2 +// CHECK-NEXT: deprecated_builtin_code: 27, +// CHECK-NEXT: version: 2, +// CHECK-NEXT: builtin_code: SVDF // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/tfl_while_op.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/tfl_while_op.mlir index 996543cc9c7..87e3ccf4688 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/tfl_while_op.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/tfl_while_op.mlir @@ -3,14 +3,17 @@ // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: WHILE, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 119, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: WHILE // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: GREATER, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 61, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: GREATER // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: SUB, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 41, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: SUB // CHECK-NEXT: }, { // CHECK-NEXT: version: 1 // CHECK-NEXT: } ], diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/transpose_conv_optional.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/transpose_conv_optional.mlir index ca335ebd000..5252dc21a59 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/transpose_conv_optional.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/transpose_conv_optional.mlir @@ -4,8 +4,9 @@ func @main(%arg0: tensor<4xi32>, %arg1: tensor<32x4x4x128xf32>, %arg2: tensor<1x // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: TRANSPOSE_CONV, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 67, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: TRANSPOSE_CONV // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/type_attr.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/type_attr.mlir index 01410d370d4..690331dec84 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/type_attr.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/type_attr.mlir @@ -3,8 +3,9 @@ // CHECK: { // CHECK: version: 3, // CHECK: operator_codes: [ { -// CHECK: builtin_code: CUSTOM, -// CHECK: custom_code: "SomeOperation" +// CHECK: deprecated_builtin_code: 32, +// CHECK: custom_code: "SomeOperation", +// CHECK: builtin_code: CUSTOM // CHECK: } ], // CHECK: subgraphs: [ { // CHECK: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_lstm.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_lstm.mlir index 9b0315e1e20..38c1ed40b35 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_lstm.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_lstm.mlir @@ -4,8 +4,9 @@ func @main(tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, t // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: UNIDIRECTIONAL_SEQUENCE_LSTM, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 44, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: UNIDIRECTIONAL_SEQUENCE_LSTM // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_rnn.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_rnn.mlir index 67349b857f7..575499c8d66 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_rnn.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/unidirectional_sequence_rnn.mlir @@ -4,8 +4,9 @@ func @main(tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>, tensor<4 x f32>) - // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: UNIDIRECTIONAL_SEQUENCE_RNN, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 35, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: UNIDIRECTIONAL_SEQUENCE_RNN // CHECK-NEXT: } ], // CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { diff --git a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/while_op.mlir b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/while_op.mlir index d69e8f40311..e58c54219ab 100644 --- a/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/while_op.mlir +++ b/tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/while_op.mlir @@ -3,14 +3,17 @@ // CHECK: { // CHECK-NEXT: version: 3, // CHECK-NEXT: operator_codes: [ { -// CHECK-NEXT: builtin_code: WHILE, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 119, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: WHILE // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: GREATER, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 61, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: GREATER // CHECK-NEXT: }, { -// CHECK-NEXT: builtin_code: SUB, -// CHECK-NEXT: version: 1 +// CHECK-NEXT: deprecated_builtin_code: 41, +// CHECK-NEXT: version: 1, +// CHECK-NEXT: builtin_code: SUB // CHECK-NEXT: }, { // CHECK-NEXT: version: 1 // CHECK-NEXT: } ], diff --git a/tensorflow/lite/builtin_ops.h b/tensorflow/lite/builtin_ops.h index 85140289ac1..a37607f6260 100644 --- a/tensorflow/lite/builtin_ops.h +++ b/tensorflow/lite/builtin_ops.h @@ -24,7 +24,8 @@ extern "C" { #endif // __cplusplus // The enum for builtin operators. -// Note: CUSTOM and DELEGATE are 2 special ops which are not real built-in ops. +// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special +// ops which are not real built-in ops. typedef enum { kTfLiteBuiltinAdd = 0, kTfLiteBuiltinAveragePool2d = 1, @@ -153,6 +154,7 @@ typedef enum { kTfLiteBuiltinDensify = 124, kTfLiteBuiltinSegmentSum = 125, kTfLiteBuiltinBatchMatmul = 126, + kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127, } TfLiteBuiltinOperator; #ifdef __cplusplus diff --git a/tensorflow/lite/core/api/flatbuffer_conversions.cc b/tensorflow/lite/core/api/flatbuffer_conversions.cc index 4049aea485c..77621c3f2fd 100644 --- a/tensorflow/lite/core/api/flatbuffer_conversions.cc +++ b/tensorflow/lite/core/api/flatbuffer_conversions.cc @@ -803,6 +803,8 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, case BuiltinOperator_DENSIFY: case BuiltinOperator_SEGMENT_SUM: return kTfLiteOk; + case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES: + return kTfLiteError; } return kTfLiteError; } // NOLINT[readability/fn_size] diff --git a/tensorflow/lite/experimental/writer/option_writer_generator.cc b/tensorflow/lite/experimental/writer/option_writer_generator.cc index 898f4a95ef6..47550be2a21 100644 --- a/tensorflow/lite/experimental/writer/option_writer_generator.cc +++ b/tensorflow/lite/experimental/writer/option_writer_generator.cc @@ -163,8 +163,11 @@ class OpOptionData { op_to_option_["UNIDIRECTIONAL_SEQUENCE_RNN"] = "SequenceRNNOptions"; op_to_option_["MAXIMUM"] = "MaximumMinimumOptions"; op_to_option_["MINIMUM"] = "MaximumMinimumOptions"; + + // These operators are not real ones. op_to_option_["CUSTOM"] = ""; // TODO(aselle): maybe something else. op_to_option_["DELEGATE"] = ""; // TODO(aselle): maybe something else. + op_to_option_["PLACEHOLDER_FOR_GREATER_OP_CODES"] = ""; // Manually specified mappings between ops to "none" options -- these are // ops without a corresponding Options message in schema as yet. If these diff --git a/tensorflow/lite/schema/BUILD b/tensorflow/lite/schema/BUILD index d67be7a937a..13a996cf56e 100644 --- a/tensorflow/lite/schema/BUILD +++ b/tensorflow/lite/schema/BUILD @@ -73,6 +73,7 @@ exports_files([ "schema_v1.fbs", "schema_v2.fbs", "schema_v3.fbs", + "schema_v3a.fbs", ]) flatbuffer_cc_library( @@ -115,7 +116,7 @@ cc_test( srcs = ["flatbuffer_compatibility_test.cc"], data = [ "schema.fbs", - "schema_v3.fbs", + "schema_v3a.fbs", ], tags = [ "no_oss", diff --git a/tensorflow/lite/schema/builtin_ops_header/generator.cc b/tensorflow/lite/schema/builtin_ops_header/generator.cc index e2967aee0ff..6a004223bf2 100644 --- a/tensorflow/lite/schema/builtin_ops_header/generator.cc +++ b/tensorflow/lite/schema/builtin_ops_header/generator.cc @@ -46,7 +46,8 @@ extern "C" { #endif // __cplusplus // The enum for builtin operators. -// Note: CUSTOM and DELEGATE are 2 special ops which are not real built-in ops. +// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special +// ops which are not real built-in ops. typedef enum { )"; diff --git a/tensorflow/lite/schema/flatbuffer_compatibility_test.cc b/tensorflow/lite/schema/flatbuffer_compatibility_test.cc index 8f88b6204a1..85002b2911e 100644 --- a/tensorflow/lite/schema/flatbuffer_compatibility_test.cc +++ b/tensorflow/lite/schema/flatbuffer_compatibility_test.cc @@ -61,8 +61,7 @@ TEST(SchemaTest, TestCompatibility) { // Read file contents of schemas into strings // TODO(aselle): Need a reliable way to load files. std::string base_contents, current_contents; - const char *base_filename = - TFLITE_TF_PREFIX "lite/schema/schema_v3.fbs"; + const char *base_filename = TFLITE_TF_PREFIX "lite/schema/schema_v3a.fbs"; const char *current_filename = TFLITE_TF_PREFIX "lite/schema/schema.fbs"; diff --git a/tensorflow/lite/schema/schema.fbs b/tensorflow/lite/schema/schema.fbs index baeb49f7b7a..0df9c3d8441 100644 --- a/tensorflow/lite/schema/schema.fbs +++ b/tensorflow/lite/schema/schema.fbs @@ -17,6 +17,8 @@ // Version 1: Add subgraphs to schema. // Version 2: Rename operators to conform to NN API. // Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers. +// Version 3a: Add new builtin op code field. Has backward compatibility with +// version 3. namespace tflite; @@ -215,7 +217,7 @@ table Tensor { // object containing configuration parameters, builtins have a predetermined // set of acceptable options. -enum BuiltinOperator : byte { +enum BuiltinOperator : int32 { ADD = 0, AVERAGE_POOL_2D = 1, CONCATENATION = 2, @@ -248,7 +250,6 @@ enum BuiltinOperator : byte { SPACE_TO_DEPTH = 26, SVDF = 27, TANH = 28, - // TODO(aselle): Consider rename to CONCATENATE_EMBEDDINGS CONCAT_EMBEDDINGS = 29, SKIP_GRAM = 30, CALL = 31, @@ -349,7 +350,8 @@ enum BuiltinOperator : byte { SELECT_V2 = 123, DENSIFY = 124, SEGMENT_SUM = 125, - BATCH_MATMUL = 126 + BATCH_MATMUL = 126, + PLACEHOLDER_FOR_GREATER_OP_CODES = 127 } @@ -982,12 +984,21 @@ table BatchMatMulOptions { // An OperatorCode can be an enum value (BuiltinOperator) if the operator is a // builtin, or a string if the operator is custom. table OperatorCode { - builtin_code:BuiltinOperator; + // This field is for backward compatibility. This field will be used when + // the value of the extended builtin_code field has less than + // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES. + deprecated_builtin_code:byte; custom_code:string; // The version of the operator. The version need to be bumped whenever new // parameters are introduced into an op. version:int = 1; + + // This field is introduced for resolving op builtin code shortage problem + // (the original BuiltinOperator enum field was represented as a byte). + // This field will be used when the value of the extended builtin_code field + // has greater than BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES. + builtin_code:BuiltinOperator; } enum CustomOptionsFormat : byte { diff --git a/tensorflow/lite/schema/schema_generated.h b/tensorflow/lite/schema/schema_generated.h index c5013edb179..bbc000cc5dc 100755 --- a/tensorflow/lite/schema/schema_generated.h +++ b/tensorflow/lite/schema/schema_generated.h @@ -781,11 +781,12 @@ enum BuiltinOperator { BuiltinOperator_DENSIFY = 124, BuiltinOperator_SEGMENT_SUM = 125, BuiltinOperator_BATCH_MATMUL = 126, + BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES = 127, BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_BATCH_MATMUL + BuiltinOperator_MAX = BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES }; -inline const BuiltinOperator (&EnumValuesBuiltinOperator())[127] { +inline const BuiltinOperator (&EnumValuesBuiltinOperator())[128] { static const BuiltinOperator values[] = { BuiltinOperator_ADD, BuiltinOperator_AVERAGE_POOL_2D, @@ -913,13 +914,14 @@ inline const BuiltinOperator (&EnumValuesBuiltinOperator())[127] { BuiltinOperator_SELECT_V2, BuiltinOperator_DENSIFY, BuiltinOperator_SEGMENT_SUM, - BuiltinOperator_BATCH_MATMUL + BuiltinOperator_BATCH_MATMUL, + BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES }; return values; } inline const char * const *EnumNamesBuiltinOperator() { - static const char * const names[128] = { + static const char * const names[129] = { "ADD", "AVERAGE_POOL_2D", "CONCATENATION", @@ -1047,13 +1049,14 @@ inline const char * const *EnumNamesBuiltinOperator() { "DENSIFY", "SEGMENT_SUM", "BATCH_MATMUL", + "PLACEHOLDER_FOR_GREATER_OP_CODES", nullptr }; return names; } inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { - if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_BATCH_MATMUL)) return ""; + if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES)) return ""; const size_t index = static_cast(e); return EnumNamesBuiltinOperator()[index]; } @@ -9336,24 +9339,27 @@ flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::Fl struct OperatorCodeT : public flatbuffers::NativeTable { typedef OperatorCode TableType; - tflite::BuiltinOperator builtin_code; + int8_t deprecated_builtin_code; std::string custom_code; int32_t version; + tflite::BuiltinOperator builtin_code; OperatorCodeT() - : builtin_code(tflite::BuiltinOperator_ADD), - version(1) { + : deprecated_builtin_code(0), + version(1), + builtin_code(tflite::BuiltinOperator_ADD) { } }; struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef OperatorCodeT NativeTableType; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BUILTIN_CODE = 4, + VT_DEPRECATED_BUILTIN_CODE = 4, VT_CUSTOM_CODE = 6, - VT_VERSION = 8 + VT_VERSION = 8, + VT_BUILTIN_CODE = 10 }; - tflite::BuiltinOperator builtin_code() const { - return static_cast(GetField(VT_BUILTIN_CODE, 0)); + int8_t deprecated_builtin_code() const { + return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); } const flatbuffers::String *custom_code() const { return GetPointer(VT_CUSTOM_CODE); @@ -9361,12 +9367,16 @@ struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { int32_t version() const { return GetField(VT_VERSION, 1); } + tflite::BuiltinOperator builtin_code() const { + return static_cast(GetField(VT_BUILTIN_CODE, 0)); + } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BUILTIN_CODE) && + VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE) && VerifyOffset(verifier, VT_CUSTOM_CODE) && verifier.VerifyString(custom_code()) && VerifyField(verifier, VT_VERSION) && + VerifyField(verifier, VT_BUILTIN_CODE) && verifier.EndTable(); } OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; @@ -9377,8 +9387,8 @@ struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { struct OperatorCodeBuilder { flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; - void add_builtin_code(tflite::BuiltinOperator builtin_code) { - fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); + void add_deprecated_builtin_code(int8_t deprecated_builtin_code) { + fbb_.AddElement(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0); } void add_custom_code(flatbuffers::Offset custom_code) { fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); @@ -9386,6 +9396,9 @@ struct OperatorCodeBuilder { void add_version(int32_t version) { fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); } + void add_builtin_code(tflite::BuiltinOperator builtin_code) { + fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); + } explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); @@ -9400,27 +9413,31 @@ struct OperatorCodeBuilder { inline flatbuffers::Offset CreateOperatorCode( flatbuffers::FlatBufferBuilder &_fbb, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD, + int8_t deprecated_builtin_code = 0, flatbuffers::Offset custom_code = 0, - int32_t version = 1) { + int32_t version = 1, + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { OperatorCodeBuilder builder_(_fbb); + builder_.add_builtin_code(builtin_code); builder_.add_version(version); builder_.add_custom_code(custom_code); - builder_.add_builtin_code(builtin_code); + builder_.add_deprecated_builtin_code(deprecated_builtin_code); return builder_.Finish(); } inline flatbuffers::Offset CreateOperatorCodeDirect( flatbuffers::FlatBufferBuilder &_fbb, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD, + int8_t deprecated_builtin_code = 0, const char *custom_code = nullptr, - int32_t version = 1) { + int32_t version = 1, + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; return tflite::CreateOperatorCode( _fbb, - builtin_code, + deprecated_builtin_code, custom_code__, - version); + version, + builtin_code); } flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); @@ -13695,9 +13712,10 @@ inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_ inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; - { auto _e = builtin_code(); _o->builtin_code = _e; } + { auto _e = deprecated_builtin_code(); _o->deprecated_builtin_code = _e; } { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); } { auto _e = version(); _o->version = _e; } + { auto _e = builtin_code(); _o->builtin_code = _e; } } inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { @@ -13708,14 +13726,16 @@ inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBuf (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _builtin_code = _o->builtin_code; + auto _deprecated_builtin_code = _o->deprecated_builtin_code; auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); auto _version = _o->version; + auto _builtin_code = _o->builtin_code; return tflite::CreateOperatorCode( _fbb, - _builtin_code, + _deprecated_builtin_code, _custom_code, - _version); + _version, + _builtin_code); } inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { diff --git a/tensorflow/lite/schema/schema_utils.cc b/tensorflow/lite/schema/schema_utils.cc index ff5754bdbc9..ad110ebda4f 100644 --- a/tensorflow/lite/schema/schema_utils.cc +++ b/tensorflow/lite/schema/schema_utils.cc @@ -19,21 +19,90 @@ limitations under the License. namespace tflite { // The following GetBuiltinCode methods are the utility methods for reading -// builtin operatore code. Later, theses method will be used for upcoming -// builtin code compatibility changes. +// builtin operatore code, ensuring compatibility issues between v3 and v3a +// schema. Always the maximum value of the two fields always will be the correct +// value as follows: +// +// - Supporting schema version v3 models +// +// The `builtin_code` field is not available in the v3 models. Flatbuffer +// library will feed zero value, which is the default value in the v3a schema. +// The actual builtin operatore code value will exist in the +// `deprecated_builtin_code` field. At the same time, it implies that +// `deprecated_builtin_code` >= `builtin_code` and the maximum value of the two +// fields will be same with `deprecated_builtin_code'. +// +// - Supporting builtin operator codes beyonds 127 +// +// New builtin operators, whose operator code is larger than 127, can not be +// assigned to the `deprecated_builtin_code` field. In such cases, the +// value of the `builtin_code` field should be used for the builtin operator +// code. In the case, the maximum value of the two fields will be the value of +// the `builtin_code` as the right value. BuiltinOperator GetBuiltinCode(const OperatorCode *op_code) { // Caller should guarantee that the given argument value is not a nullptr. TFLITE_DCHECK(op_code != nullptr); - return op_code->builtin_code(); + return (op_code->builtin_code() ? op_code->builtin_code() + : static_cast( + op_code->deprecated_builtin_code())); } BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code) { // Caller should guarantee that the given argument value is not a nullptr. TFLITE_DCHECK(op_code != nullptr); - return op_code->builtin_code; + return (op_code->builtin_code + ? op_code->builtin_code + : static_cast(op_code->deprecated_builtin_code)); +} + +int8_t ConvertBuiltinCodeToDeprecatedBuiltinCode( + const BuiltinOperator builtin_code) { + return (builtin_code < BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES) + ? static_cast(builtin_code) + : static_cast( + BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES); +} + +// The following methods are the following `OperatorCode` table object creation +// methods for backward compatibility. These are manually copied from the +// flatbuffer generated code from schema v3. They serve as overloads for the +// v3a's CreateOperatorCode functions in schema_generated.h and enable code that +// still assumes flatbuffer schema v3 to be unchanged with the inclusion of the +// schema_utils header. +// TODO(b/162392898): remove once all callers are updated to use schema v3a +// functions. + +flatbuffers::Offset CreateOperatorCode( + flatbuffers::FlatBufferBuilder &_fbb, BuiltinOperator builtin_code, + flatbuffers::Offset custom_code, int32_t version) { + OperatorCodeBuilder builder_(_fbb); + builder_.add_version(version); + + int8_t deprecated_builtin_code = + static_cast(BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES); + if (builtin_code < BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES) { + deprecated_builtin_code = static_cast(builtin_code); + } + builder_.add_deprecated_builtin_code(deprecated_builtin_code); + builder_.add_custom_code(custom_code); + builder_.add_builtin_code(builtin_code); + return builder_.Finish(); +} + +flatbuffers::Offset CreateOperatorCodeDirect( + flatbuffers::FlatBufferBuilder &_fbb, BuiltinOperator builtin_code, + const char *custom_code, int32_t version) { + auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; + int8_t deprecated_builtin_code = + static_cast(BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES); + if (builtin_code < BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES) { + deprecated_builtin_code = static_cast(builtin_code); + } + return CreateOperatorCode(_fbb, deprecated_builtin_code, custom_code__, + version, builtin_code); } } // namespace tflite diff --git a/tensorflow/lite/schema/schema_utils.h b/tensorflow/lite/schema/schema_utils.h index 453276b97f0..315a8d0daf4 100644 --- a/tensorflow/lite/schema/schema_utils.h +++ b/tensorflow/lite/schema/schema_utils.h @@ -28,6 +28,22 @@ BuiltinOperator GetBuiltinCode(const OperatorCode *op_code); BuiltinOperator GetBuiltinCode(const OperatorCodeT *op_code); +int8_t ConvertBuiltinCodeToDeprecatedBuiltinCode( + const BuiltinOperator builtin_code); + +// The following methods are for backward compatibility for the early version +// three, which does not have an extended builtin code. +flatbuffers::Offset CreateOperatorCode( + flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + flatbuffers::Offset custom_code = 0, + int32_t version = 1); + +flatbuffers::Offset CreateOperatorCodeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + BuiltinOperator builtin_code = BuiltinOperator_ADD, + const char *custom_code = nullptr, int32_t version = 1); + } // namespace tflite #endif // TENSORFLOW_LITE_SCHEMA_SCHEMA_UTILS_H_ diff --git a/tensorflow/lite/schema/schema_v3a.fbs b/tensorflow/lite/schema/schema_v3a.fbs new file mode 100644 index 00000000000..cae5a63c615 --- /dev/null +++ b/tensorflow/lite/schema/schema_v3a.fbs @@ -0,0 +1,1109 @@ +// Copyright 2017 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Revision History +// Version 0: Initial version. +// Version 1: Add subgraphs to schema. +// Version 2: Rename operators to conform to NN API. +// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers. +// Version 3a: Add new builtin op code field. Has backward compatibility with +// version 3. + +namespace tflite; + +// This corresponds to the version. +file_identifier "TFL3"; +// File extension of any written files. +file_extension "tflite"; + +// IMPORTANT: All new members of tables, enums and unions must be added at the +// end to ensure backwards compatibility. + +// The type of data stored in a tensor. +enum TensorType : byte { + FLOAT32 = 0, + FLOAT16 = 1, + INT32 = 2, + UINT8 = 3, + INT64 = 4, + STRING = 5, + BOOL = 6, + INT16 = 7, + COMPLEX64 = 8, + INT8 = 9, + FLOAT64 = 10, + COMPLEX128 = 11, +} + +// Custom quantization parameters for experimenting with new quantization +// techniques. +table CustomQuantization { + custom:[ubyte] (force_align: 16); +} + +// Represents a specific quantization technique's parameters. +union QuantizationDetails { + CustomQuantization, +} + +// Parameters for converting a quantized tensor back to float. +table QuantizationParameters { + // These four parameters are the asymmetric linear quantization parameters. + // Given a quantized value q, the corresponding float value f should be: + // f = scale * (q - zero_point) + // For other quantization types, the QuantizationDetails below is used. + min:[float]; // For importing back into tensorflow. + max:[float]; // For importing back into tensorflow. + scale:[float]; // For dequantizing the tensor's values. + zero_point:[long]; + + // If this is not none, the other quantization parameters (i.e. min, max, + // scale, zero_point fields above) are ignored and the value of the + // QuantizationDetails union should be used. + details:QuantizationDetails; + + // Specifies the dimension of the Tensor's shape that the scales and + // zero_points correspond to. For example, a tensor t, with dims=[4, 3, 2, 1] + // with quantization params: + // scale=[1.0, 2.0, 3.0], zero_point=[1, 2, 3], quantization_dimension=1 + // will be quantized across the second dimension of t. + // t[:, 0, :, :] will have scale[0]=1.0, zero_point[0]=1 + // t[:, 1, :, :] will have scale[1]=2.0, zero_point[0]=2 + // t[:, 2, :, :] will have scale[2]=3.0, zero_point[0]=3 + quantized_dimension:int; +} + +// Sparse tensors. +// We use a modification of the TACO format. +// Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf +// +// To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1), +// potentially with a k-dimensional block (0 <= k <= n) with dims +// (dn, ..., dn+k-1), the format needs to specify: +// 1. In what order to traverse these dimensions. For example, to store a 2-D +// matrix in row major order, the traversal order would be (d0, d1), +// whereas to store it in column major order, the traversal order would be +// (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order +// could be (d0, d1, d2, d3). +// 2. How each block dimension in (dn, ..., dn+k-1) maps to the original +// tensor dimension in (d0, ..., dn-1). +// 3. In the traversal order defined above, the format (dense vs. sparse) and +// index metadata for each dimension. For a dense dimension, this is just +// the size of that dimension. For a sparse dimension, it's the same as +// the compressed index defined in the Compressed Sparse Row (CSR) format. +// (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html) + +// The storage type for a dimension. Currently we support: +// 1. DENSE: each coordinate in this dimension is stored implicitly. +// 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The +// compression technique is the same what CSR uses. +// More types like a sparse dimension with a different compression technique +// could be added to the list in the future. +enum DimensionType : byte { + DENSE = 0, + SPARSE_CSR = 1, +} + +table Int32Vector { + values:[int]; +} + +table Uint16Vector { + values:[ushort] (force_align: 4); +} + +table Uint8Vector { + values:[ubyte] (force_align: 4); +} + +// Variable-typed buffer to store the index metadata for a sparse dimension. +// The widest type is Int32 instead of UInt32 because tensor's shape is a int32 +// vector. We don't want the per-dimensional index to overflow that range. +union SparseIndexVector { + Int32Vector, + Uint16Vector, + Uint8Vector +} + +table DimensionMetadata { + // Whether a dimension is dense or sparse. + format:DimensionType; + // Index metadata used for a dimension. + // - If format is DimensionType.DENSE then we use the dense_size field to + // store the size of that dimension. Each index in that dimension is + // stored implicitly. + // - If format is DimensionType.SPARSE_CSR then we use array_segments and + // array_indices to encode that dimension. array_segments represents how + // to segment the indices array, each segment corresponds to one element + // in the previous dimension. array_indices represents the index of the + // non-zero elements within this dimension (as those in the CSR matrix + // format, where the first array is row pointers and the second array is + // column indices). + dense_size:int; + array_segments:SparseIndexVector; + array_indices:SparseIndexVector; +} + +// Parameters to encode a sparse TfLite tensor. +table SparsityParameters { + // The traversal order of the dimensions defined in the `shape` field of the + // conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1, + // ..., dn-1), + // - if not block sparse, the traversal_order is just a permutation of (d0, + // ..., dn-1). For example, a 2-D matrix stored in row-major order would + // have traversal_order = (d0, d1). + // - if block sparse with a k-dimensional block (0 <= k <= n), the + // traversal_order has n + k elements. The first n elements are still a + // permutation of (d0, ..., dn-1). The lask k elements are a permutation + // of (dn, ..., dn+k-1), defining how to traverse a block internally. For + // example, a 2-D matrix with 2-D blocks, both stored in row-major order + // would have traversal_order = (d0, d1, d2, d3). + traversal_order:[int]; + // For an n-dimensional tensor with a k-dimensional block (0 <= k <= n), + // stores how a block dimension in (dn, ..., dn+k-1) maps to the original + // tensor dimension in (d0, ..., dn). + // It's stored in the order of (dn, ..., dn+k-1). + // If not block-sparse, this field is NULL. + block_map:[int]; + // In the traversal order defined above, the metadata needed for + // each dimension to locate the non-zero values in the original dense tensor. + // The size of the dim_metadata array = the size of the traversal_order array + // = n + k. + dim_metadata:[DimensionMetadata]; +} + +table Tensor { + // The tensor shape. The meaning of each entry is operator-specific but + // builtin ops use: [batch size, height, width, number of channels] (That's + // Tensorflow's NHWC). + shape:[int]; + type:TensorType; + // An index that refers to the buffers table at the root of the model. Or, + // if there is no data buffer associated (i.e. intermediate results), then + // this is 0 (which refers to an always existent empty buffer). + // + // The data_buffer itself is an opaque container, with the assumption that the + // target device is little-endian. In addition, all builtin operators assume + // the memory is ordered such that if `shape` is [4, 3, 2], then index + // [i, j, k] maps to data_buffer[i*3*2 + j*2 + k]. + buffer:uint; + name:string; // For debugging and importing back into tensorflow. + quantization:QuantizationParameters; // Optional. + + is_variable:bool = false; + + // Parameters to encode a sparse tensor. See the example in + // tensorflow/lite/testdata/sparse_tensor.json. + sparsity:SparsityParameters; // Optional. + + // Encodes `shape` with unknown dimensions. Unknown dimensions are + // represented with -1. + shape_signature:[int]; // Optional. +} + +// A list of builtin operators. Builtin operators are slightly faster than custom +// ones, but not by much. Moreover, while custom operators accept an opaque +// object containing configuration parameters, builtins have a predetermined +// set of acceptable options. + +enum BuiltinOperator : int32 { + ADD = 0, + AVERAGE_POOL_2D = 1, + CONCATENATION = 2, + CONV_2D = 3, + DEPTHWISE_CONV_2D = 4, + DEPTH_TO_SPACE = 5, + DEQUANTIZE = 6, + EMBEDDING_LOOKUP = 7, + FLOOR = 8, + FULLY_CONNECTED = 9, + HASHTABLE_LOOKUP = 10, + L2_NORMALIZATION = 11, + L2_POOL_2D = 12, + LOCAL_RESPONSE_NORMALIZATION = 13, + LOGISTIC = 14, + LSH_PROJECTION = 15, + LSTM = 16, + MAX_POOL_2D = 17, + MUL = 18, + RELU = 19, + // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed + // since different model developers use RELU1 in different ways. Never + // create another op called RELU1. + RELU_N1_TO_1 = 20, + RELU6 = 21, + RESHAPE = 22, + RESIZE_BILINEAR = 23, + RNN = 24, + SOFTMAX = 25, + SPACE_TO_DEPTH = 26, + SVDF = 27, + TANH = 28, + CONCAT_EMBEDDINGS = 29, + SKIP_GRAM = 30, + CALL = 31, + CUSTOM = 32, + EMBEDDING_LOOKUP_SPARSE = 33, + PAD = 34, + UNIDIRECTIONAL_SEQUENCE_RNN = 35, + GATHER = 36, + BATCH_TO_SPACE_ND = 37, + SPACE_TO_BATCH_ND = 38, + TRANSPOSE = 39, + MEAN = 40, + SUB = 41, + DIV = 42, + SQUEEZE = 43, + UNIDIRECTIONAL_SEQUENCE_LSTM = 44, + STRIDED_SLICE = 45, + BIDIRECTIONAL_SEQUENCE_RNN = 46, + EXP = 47, + TOPK_V2 = 48, + SPLIT = 49, + LOG_SOFTMAX = 50, + // DELEGATE is a special op type for the operations which are delegated to + // other backends. + // WARNING: Experimental interface, subject to change + DELEGATE = 51, + BIDIRECTIONAL_SEQUENCE_LSTM = 52, + CAST = 53, + PRELU = 54, + MAXIMUM = 55, + ARG_MAX = 56, + MINIMUM = 57, + LESS = 58, + NEG = 59, + PADV2 = 60, + GREATER = 61, + GREATER_EQUAL = 62, + LESS_EQUAL = 63, + SELECT = 64, + SLICE = 65, + SIN = 66, + TRANSPOSE_CONV = 67, + SPARSE_TO_DENSE = 68, + TILE = 69, + EXPAND_DIMS = 70, + EQUAL = 71, + NOT_EQUAL = 72, + LOG = 73, + SUM = 74, + SQRT = 75, + RSQRT = 76, + SHAPE = 77, + POW = 78, + ARG_MIN = 79, + FAKE_QUANT = 80, + REDUCE_PROD = 81, + REDUCE_MAX = 82, + PACK = 83, + LOGICAL_OR = 84, + ONE_HOT = 85, + LOGICAL_AND = 86, + LOGICAL_NOT = 87, + UNPACK = 88, + REDUCE_MIN = 89, + FLOOR_DIV = 90, + REDUCE_ANY = 91, + SQUARE = 92, + ZEROS_LIKE = 93, + FILL = 94, + FLOOR_MOD = 95, + RANGE = 96, + RESIZE_NEAREST_NEIGHBOR = 97, + LEAKY_RELU = 98, + SQUARED_DIFFERENCE = 99, + MIRROR_PAD = 100, + ABS = 101, + SPLIT_V = 102, + UNIQUE = 103, + CEIL = 104, + REVERSE_V2 = 105, + ADD_N = 106, + GATHER_ND = 107, + COS = 108, + WHERE = 109, + RANK = 110, + ELU = 111, + REVERSE_SEQUENCE = 112, + MATRIX_DIAG = 113, + QUANTIZE = 114, + MATRIX_SET_DIAG = 115, + ROUND = 116, + HARD_SWISH = 117, + IF = 118, + WHILE = 119, + NON_MAX_SUPPRESSION_V4 = 120, + NON_MAX_SUPPRESSION_V5 = 121, + SCATTER_ND = 122, + SELECT_V2 = 123, + DENSIFY = 124, + SEGMENT_SUM = 125, + BATCH_MATMUL = 126, + PLACEHOLDER_FOR_GREATER_OP_CODES = 127 +} + + +// Options for the builtin operators. +union BuiltinOptions { + Conv2DOptions, + DepthwiseConv2DOptions, + ConcatEmbeddingsOptions, + LSHProjectionOptions, + Pool2DOptions, + SVDFOptions, + RNNOptions, + FullyConnectedOptions, + SoftmaxOptions, + ConcatenationOptions, + AddOptions, + L2NormOptions, + LocalResponseNormalizationOptions, + LSTMOptions, + ResizeBilinearOptions, + CallOptions, + ReshapeOptions, + SkipGramOptions, + SpaceToDepthOptions, + EmbeddingLookupSparseOptions, + MulOptions, + PadOptions, + GatherOptions, + BatchToSpaceNDOptions, + SpaceToBatchNDOptions, + TransposeOptions, + ReducerOptions, + SubOptions, + DivOptions, + SqueezeOptions, + SequenceRNNOptions, + StridedSliceOptions, + ExpOptions, + TopKV2Options, + SplitOptions, + LogSoftmaxOptions, + CastOptions, + DequantizeOptions, + MaximumMinimumOptions, + ArgMaxOptions, + LessOptions, + NegOptions, + PadV2Options, + GreaterOptions, + GreaterEqualOptions, + LessEqualOptions, + SelectOptions, + SliceOptions, + TransposeConvOptions, + SparseToDenseOptions, + TileOptions, + ExpandDimsOptions, + EqualOptions, + NotEqualOptions, + ShapeOptions, + PowOptions, + ArgMinOptions, + FakeQuantOptions, + PackOptions, + LogicalOrOptions, + OneHotOptions, + LogicalAndOptions, + LogicalNotOptions, + UnpackOptions, + FloorDivOptions, + SquareOptions, + ZerosLikeOptions, + FillOptions, + BidirectionalSequenceLSTMOptions, + BidirectionalSequenceRNNOptions, + UnidirectionalSequenceLSTMOptions, + FloorModOptions, + RangeOptions, + ResizeNearestNeighborOptions, + LeakyReluOptions, + SquaredDifferenceOptions, + MirrorPadOptions, + AbsOptions, + SplitVOptions, + UniqueOptions, + ReverseV2Options, + AddNOptions, + GatherNdOptions, + CosOptions, + WhereOptions, + RankOptions, + ReverseSequenceOptions, + MatrixDiagOptions, + QuantizeOptions, + MatrixSetDiagOptions, + HardSwishOptions, + IfOptions, + WhileOptions, + DepthToSpaceOptions, + NonMaxSuppressionV4Options, + NonMaxSuppressionV5Options, + ScatterNdOptions, + SelectV2Options, + DensifyOptions, + SegmentSumOptions, + BatchMatMulOptions +} + +enum Padding : byte { SAME, VALID } + +enum ActivationFunctionType : byte { + NONE = 0, + RELU = 1, + RELU_N1_TO_1 = 2, + RELU6 = 3, + TANH = 4, + SIGN_BIT = 5, +} + +table Conv2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + fused_activation_function:ActivationFunctionType; + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table Pool2DOptions { + padding:Padding; + stride_w:int; + stride_h:int; + filter_width:int; + filter_height:int; + fused_activation_function:ActivationFunctionType; +} + +table DepthwiseConv2DOptions { + // Parameters for DepthwiseConv version 1 or above. + padding:Padding; + stride_w:int; + stride_h:int; + // `depth_multiplier` is redundant. It's used by CPU kernels in + // TensorFlow 2.0 or below, but ignored in versions above. + // See comments in lite/c/builtin_op_data.h for more details. + depth_multiplier:int; + fused_activation_function:ActivationFunctionType; + // Parameters for DepthwiseConv version 2 or above. + dilation_w_factor:int = 1; + dilation_h_factor:int = 1; +} + +table ConcatEmbeddingsOptions { + num_channels:int; + num_columns_per_channel:[int]; + embedding_dim_per_channel:[int]; // This could be inferred from parameters. +} + +enum LSHProjectionType: byte { + UNKNOWN = 0, + SPARSE = 1, + DENSE = 2, +} + +table LSHProjectionOptions { + type: LSHProjectionType; +} + +table SVDFOptions { + rank:int; + fused_activation_function:ActivationFunctionType; + // For weights-only quantization, use asymmetric quantization for non + // constant inputs at evaluation time. + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow RNNCell. +table RNNOptions { + fused_activation_function:ActivationFunctionType; + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow dynamic_rnn with RNNCell. +table SequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; + asymmetric_quantize_inputs:bool; +} + +// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell. +table BidirectionalSequenceRNNOptions { + time_major:bool; + fused_activation_function:ActivationFunctionType; + merge_outputs: bool; + asymmetric_quantize_inputs:bool; +} + +enum FullyConnectedOptionsWeightsFormat: byte { + DEFAULT = 0, + SHUFFLED4x16INT8 = 1, +} + +// An implementation of TensorFlow fully_connected (a.k.a Dense) layer. +table FullyConnectedOptions { + // Parameters for FullyConnected version 1 or above. + fused_activation_function:ActivationFunctionType; + + // Parameters for FullyConnected version 2 or above. + weights_format:FullyConnectedOptionsWeightsFormat = DEFAULT; + + // Parameters for FullyConnected version 5 or above. + // If set to true, then the number of dimension is preserved. Furthermore, + // all but the last dimension of the input and output shapes will be equal. + keep_num_dims: bool; + + // Parameters for FullyConnected version 7 or above. + // If set to true, then weights-only op will use asymmetric quantization for + // inputs. + asymmetric_quantize_inputs: bool; +} + +table SoftmaxOptions { + beta: float; +} + +// An implementation of TensorFlow concat. +table ConcatenationOptions { + axis:int; + fused_activation_function:ActivationFunctionType; +} + +table AddOptions { + fused_activation_function:ActivationFunctionType; + // Parameters supported by version 4. + pot_scale_int16:bool = true; +} + +table MulOptions { + fused_activation_function:ActivationFunctionType; +} + +table L2NormOptions { + fused_activation_function:ActivationFunctionType; +} + +table LocalResponseNormalizationOptions { + radius:int; + bias:float; + alpha:float; + beta:float; +} + +enum LSTMKernelType : byte { + // Full LSTM kernel which supports peephole and projection. + FULL = 0, + // Basic LSTM kernels. Equivalent to TensorFlow BasicLSTMCell. + BASIC = 1, +} + +// An implementation of TensorFlow LSTMCell and CoupledInputForgetGateLSTMCell +table LSTMOptions { + // Parameters for LSTM version 1 or above. + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // Parameters for LSTM version 2 or above. + // Basic kernel is only supported in version 2 or above. + kernel_type: LSTMKernelType = FULL; + + // Parameters for LSTM version 4 or above. + asymmetric_quantize_inputs: bool; +} + +// An implementation of TensorFlow dynamic_rnn with LSTMCell. +table UnidirectionalSequenceLSTMOptions { + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true then first dimension is sequence, otherwise batch. + time_major:bool; + + // Parameter for Unidirectional Sequence LSTM version 4. + asymmetric_quantize_inputs:bool; +} + +table BidirectionalSequenceLSTMOptions { + // Parameters supported by version 1: + fused_activation_function:ActivationFunctionType; + cell_clip: float; // Optional, 0.0 means no clipping + proj_clip: float; // Optional, 0.0 means no clipping + + // If true, store the outputs of both directions into the first output. + merge_outputs: bool; + + // Parameters supported by version 2: + // If true then first dimension is sequence, otherwise batch. + // Version 1 implementations assumed time_major to be true, so this default + // value should never change. + time_major: bool = true; + + // Parameters for version 3 or above. + asymmetric_quantize_inputs:bool; +} + +table ResizeBilinearOptions { + new_height: int (deprecated); + new_width: int (deprecated); + align_corners: bool; + half_pixel_centers: bool; +} + +table ResizeNearestNeighborOptions { + align_corners: bool; + half_pixel_centers: bool; +} + +// A call operation options +table CallOptions { + // The subgraph index that needs to be called. + subgraph:uint; +} + +table PadOptions { +} + +table PadV2Options { +} + +table ReshapeOptions { + new_shape:[int]; +} + +table SpaceToBatchNDOptions { +} + +table BatchToSpaceNDOptions { +} + +table SkipGramOptions { + ngram_size: int; + max_skip_size: int; + include_all_ngrams: bool; +} + +table SpaceToDepthOptions { + block_size: int; +} + +table DepthToSpaceOptions { + block_size: int; +} + +table SubOptions { + fused_activation_function:ActivationFunctionType; + // Parameters supported by version 5 + pot_scale_int16:bool = true; +} + +table DivOptions { + fused_activation_function:ActivationFunctionType; +} + +table TopKV2Options { +} + +enum CombinerType : byte { + SUM = 0, + MEAN = 1, + SQRTN = 2, +} + +table EmbeddingLookupSparseOptions { + combiner:CombinerType; +} + +table GatherOptions { + axis: int; +} + +table TransposeOptions { +} + +table ExpOptions { +} + +table CosOptions { +} + +table ReducerOptions { + keep_dims: bool; +} + +table SqueezeOptions { + squeeze_dims:[int]; +} + +table SplitOptions { + num_splits: int; +} + +table SplitVOptions { + num_splits: int; +} + +table StridedSliceOptions { + begin_mask: int; + end_mask: int; + ellipsis_mask: int; + new_axis_mask: int; + shrink_axis_mask: int; +} + +table LogSoftmaxOptions { +} + +table CastOptions { + in_data_type: TensorType; + out_data_type: TensorType; +} + +table DequantizeOptions { +} + +table MaximumMinimumOptions { +} + +table TileOptions { +} + +table ArgMaxOptions { + output_type : TensorType; +} + +table ArgMinOptions { + output_type : TensorType; +} + +table GreaterOptions { +} + +table GreaterEqualOptions { +} + +table LessOptions { +} + +table LessEqualOptions { +} + +table NegOptions { +} + +table SelectOptions { +} + +table SliceOptions { +} + +table TransposeConvOptions { + padding:Padding; + stride_w:int; + stride_h:int; +} + +table ExpandDimsOptions { +} + +table SparseToDenseOptions { + validate_indices:bool; +} + +table EqualOptions { +} + +table NotEqualOptions { +} + +table ShapeOptions { + // Optional output type of the operation (int32 or int64). Defaults to int32. + out_type : TensorType; +} + +table RankOptions { +} + +table PowOptions { +} + +table FakeQuantOptions { + // Parameters supported by version 1: + min:float; + max:float; + num_bits:int; + + // Parameters supported by version 2: + narrow_range:bool; +} + +table PackOptions { + values_count:int; + axis:int; +} + +table LogicalOrOptions { +} + +table OneHotOptions { + axis:int; +} + +table AbsOptions { +} + + +table HardSwishOptions { +} + +table LogicalAndOptions { +} + +table LogicalNotOptions { +} + +table UnpackOptions { + num:int; + axis:int; +} + +table FloorDivOptions { +} + +table SquareOptions { +} + +table ZerosLikeOptions { +} + +table FillOptions { +} + +table FloorModOptions { +} + +table RangeOptions { +} + +table LeakyReluOptions { + alpha:float; +} + +table SquaredDifferenceOptions { +} + +enum MirrorPadMode : byte { + // Doesn't include borders. + REFLECT = 0, + // Includes borders. + SYMMETRIC = 1, +} + +table MirrorPadOptions { + mode:MirrorPadMode; +} + +table UniqueOptions { + idx_out_type:TensorType = INT32; +} + +table ReverseV2Options { +} + +table AddNOptions { +} + +table GatherNdOptions { +} + +table WhereOptions { +} + +table ReverseSequenceOptions { + seq_dim:int; + batch_dim:int = 0; +} + +table MatrixDiagOptions { +} + +table QuantizeOptions { +} + +table MatrixSetDiagOptions { +} + +table IfOptions { + then_subgraph_index:int; + else_subgraph_index:int; +} + +table WhileOptions { + cond_subgraph_index:int; + body_subgraph_index:int; +} + +table NonMaxSuppressionV4Options { +} + +table NonMaxSuppressionV5Options { +} + +table ScatterNdOptions { +} + +table SelectV2Options { +} + +table DensifyOptions { +} + +table SegmentSumOptions { +} + +table BatchMatMulOptions { + adj_x:bool; + adj_y:bool; +} + +// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a +// builtin, or a string if the operator is custom. +table OperatorCode { + // This field is for backward compatibility. This field will be used when + // the value of the extended builtin_code field has less than + // BulitinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES. + deprecated_builtin_code:byte; + custom_code:string; + + // The version of the operator. The version need to be bumped whenever new + // parameters are introduced into an op. + version:int = 1; + + // This field is introduced for resolving op builtin code shortage problem. + // This field will be used when the value of the extended builtin_code field + // is greater than BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES. + builtin_code:BuiltinOperator; +} + +enum CustomOptionsFormat : byte { + FLEXBUFFERS = 0, +} + +// An operator takes tensors as inputs and outputs. The type of operation being +// performed is determined by an index into the list of valid OperatorCodes, +// while the specifics of each operations is configured using builtin_options +// or custom_options. +table Operator { + // Index into the operator_codes array. Using an integer here avoids + // complicate map lookups. + opcode_index:uint; + + // Optional input are indicated by -1. + inputs:[int]; + outputs:[int]; + + builtin_options:BuiltinOptions; + custom_options:[ubyte]; + custom_options_format:CustomOptionsFormat; + + // A list of booleans indicating the input tensors which are being mutated by + // this operator.(e.g. used by RNN and LSTM). + // For example, if the "inputs" array refers to 5 tensors and the second and + // fifth are mutable variables, then this list will contain + // [false, true, false, false, true]. + // + // If the list is empty, no variable is mutated in this operator. + // The list either has the same length as `inputs`, or is empty. + mutating_variable_inputs:[bool]; + + // A list of indices to the subgraph's "tensors" that are internal to an Op. + // Internal tensors are those that do not flow in or out of the operation, + // but instead are part of internal computation. As such, the operation's + // implementation may manage its memory more efficiently. They are needed + // however (i.e. not just an implementation detail) since they are part of the + // computation, which may require relevant metadata such as quantization + // parameters. + intermediates:[int]; +} + +// The root type, defining a subgraph, which typically represents an entire +// model. +table SubGraph { + // A list of all tensors used in this subgraph. + tensors:[Tensor]; + + // Indices of the tensors that are inputs into this subgraph. Note this is + // the list of non-static tensors that feed into the subgraph for inference. + inputs:[int]; + + // Indices of the tensors that are outputs out of this subgraph. Note this is + // the list of output tensors that are considered the product of the + // subgraph's inference. + outputs:[int]; + + // All operators, in execution order. + operators:[Operator]; + + // Name of this subgraph (used for debugging). + name:string; +} + +// Table of raw data buffers (used for constant tensors). Referenced by tensors +// by index. The generous alignment accommodates mmap-friendly data structures. +table Buffer { + data:[ubyte] (force_align: 16); +} + +table Metadata { + // A human readable string to uniquely identify a Metadata. + name:string; + // An index to the buffers table. + buffer:uint; +} + +table Model { + // Version of the schema. + version:uint; + + // A list of all operator codes used in this model. This is + // kept in order because operators carry an index into this + // vector. + operator_codes:[OperatorCode]; + + // All the subgraphs of the model. The 0th is assumed to be the main + // model. + subgraphs:[SubGraph]; + + // A description of the model. + description:string; + + // Buffers of the model. + // Note the 0th entry of this array must be an empty buffer (sentinel). + // This is a convention so that tensors without a buffer can provide 0 as + // their buffer. + buffers:[Buffer]; + + // Metadata about the model. Indirects into the existings buffers list. + // Deprecated, prefer to use metadata field. + metadata_buffer:[int]; + + // Metadata about the model. + metadata:[Metadata]; +} + +root_type Model; diff --git a/tensorflow/lite/tools/optimize/quantize_weights.cc b/tensorflow/lite/tools/optimize/quantize_weights.cc index d46063fcaad..1b22cb56117 100644 --- a/tensorflow/lite/tools/optimize/quantize_weights.cc +++ b/tensorflow/lite/tools/optimize/quantize_weights.cc @@ -297,6 +297,8 @@ int32_t GetOrInsertDequantizeOpCodeIndex(ModelT* model) { model->operator_codes.push_back(absl::make_unique()); int op_code_idx = model->operator_codes.size() - 1; model->operator_codes[op_code_idx]->builtin_code = BuiltinOperator_DEQUANTIZE; + model->operator_codes[op_code_idx]->deprecated_builtin_code = + static_cast(BuiltinOperator_DEQUANTIZE); // Version 2 and onwards supports INT8 inputs. model->operator_codes[op_code_idx]->version = 2;