Ensure that models with no fused bias into TransposeConv are exported with only three inputs. This is to ensure that these models still run with older builds of the TFlite runtime.

PiperOrigin-RevId: 308691394
Change-Id: I10290ba89e3469b5eb5c46b0ab2d4ef7cad0fb59
This commit is contained in:
Suharsh Sivakumar 2020-04-27 13:52:34 -07:00 committed by TensorFlower Gardener
parent 2d72bcdebe
commit f5e6c392b7
3 changed files with 92 additions and 13 deletions

View File

@ -435,7 +435,7 @@ class Translator {
// Builds operator for the given operation with specified operand and result
// tensor indices. Emits an error and returns llvm::None on failure.
Optional<BufferOffset<tflite::Operator>> BuildOperator(
Operation* inst, const std::vector<int32_t>& operands,
Operation* inst, std::vector<int32_t> operands,
const std::vector<int32_t>& results,
const std::vector<int32_t>& intermediates);
@ -927,7 +927,7 @@ uint32_t Translator::GetOpcodeIndex(const std::string& op_name,
}
Optional<BufferOffset<tflite::Operator>> Translator::BuildOperator(
Operation* inst, const std::vector<int32_t>& operands,
Operation* inst, std::vector<int32_t> operands,
const std::vector<int32_t>& results,
const std::vector<int32_t>& intermediates) {
const auto* dialect = inst->getDialect();
@ -981,6 +981,15 @@ Optional<BufferOffset<tflite::Operator>> Translator::BuildOperator(
std::string op_name = inst->getName().getStringRef().str();
uint32_t opcode_index = GetOpcodeIndex(op_name, *builtin_code);
// If this is TransposeConv we need to do a special case of ignoring the
// optional tensor, to allow newly created models to run on old runtimes.
if (*builtin_code == tflite::BuiltinOperator_TRANSPOSE_CONV) {
if (operands.size() == 4 && operands.at(3) == -1) {
operands.pop_back();
}
}
auto offset = CreateFlatBufferOperator(inst, opcode_index, operands,
results, intermediates, &builder_);
if (!offset) {

View File

@ -0,0 +1,77 @@
// RUN: flatbuffer_translate -mlir-to-tflite-flatbuffer %s -o - | flatbuffer_to_string - | FileCheck --dump-input-on-failure %s
func @main(%arg0: tensor<4xi32>, %arg1: tensor<32x4x4x128xf32>, %arg2: tensor<1x32x42x128xf32>) -> tensor<1x64x84x32xf32> {
// CHECK: {
// CHECK-NEXT: version: 3,
// CHECK-NEXT: operator_codes: [ {
// CHECK-NEXT: builtin_code: TRANSPOSE_CONV,
// CHECK-NEXT: version: 1
// CHECK-NEXT: } ],
// CHECK-NEXT: subgraphs: [ {
// CHECK-NEXT: tensors: [ {
// CHECK-NEXT: shape: [ 4 ],
// CHECK-NEXT: type: INT32,
// CHECK-NEXT: buffer: 1,
// CHECK-NEXT: name: "arg0",
// CHECK-NEXT: quantization: {
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-NEXT: }, {
// CHECK-NEXT: shape: [ 32, 4, 4, 128 ],
// CHECK-NEXT: buffer: 2,
// CHECK-NEXT: name: "arg1",
// CHECK-NEXT: quantization: {
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-NEXT: }, {
// CHECK-NEXT: shape: [ 1, 32, 42, 128 ],
// CHECK-NEXT: buffer: 3,
// CHECK-NEXT: name: "arg2",
// CHECK-NEXT: quantization: {
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-NEXT: }, {
// CHECK-NEXT: shape: [ 1, 64, 84, 32 ],
// CHECK-NEXT: buffer: 4,
// CHECK-NEXT: name: "tfl.transpose_conv",
// CHECK-NEXT: quantization: {
// CHECK-EMPTY:
// CHECK-NEXT: }
// CHECK-NEXT: } ],
// CHECK-NEXT: inputs: [ 0, 1, 2 ],
// CHECK-NEXT: outputs: [ 3 ],
// CHECK-NEXT: operators: [ {
// CHECK-NEXT: inputs: [ 0, 1, 2 ],
// CHECK-NEXT: outputs: [ 3 ],
// CHECK-NEXT: builtin_options_type: TransposeConvOptions,
// CHECK-NEXT: builtin_options: {
// CHECK-NEXT: stride_w: 2,
// CHECK-NEXT: stride_h: 2
// CHECK-NEXT: }
// CHECK-NEXT: } ],
// CHECK-NEXT: name: "main"
// CHECK-NEXT: } ],
// CHECK-NEXT: description: "MLIR Converted.",
// CHECK-NEXT: buffers: [ {
// CHECK-EMPTY:
// CHECK-NEXT: }, {
// CHECK-EMPTY:
// CHECK-NEXT: }, {
// CHECK-EMPTY:
// CHECK-NEXT: }, {
// CHECK-EMPTY:
// CHECK-NEXT: }, {
// CHECK-EMPTY:
// CHECK-NEXT: }, {
// CHECK-NEXT: data: [ 49, 46, 57, 46, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
// CHECK-NEXT: } ],
// CHECK-NEXT: metadata: [ {
// CHECK-NEXT: name: "min_runtime_version",
// CHECK-NEXT: buffer: 5
// CHECK-NEXT: } ]
// CHECK-NEXT:}
%cst = constant unit
%0 = "tfl.transpose_conv"(%arg0, %arg1, %arg2, %cst) {padding = "SAME", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<4xi32>, tensor<32x4x4x128xf32>, tensor<1x32x42x128xf32>, none) -> tensor<1x64x84x32xf32>
return %0 : tensor<1x64x84x32xf32>
}

View File

@ -865,18 +865,10 @@ TfLiteStatus QuantizeBiases(ModelT* model,
continue;
}
for (const int bias_idx : property.biases) {
if (op->inputs[bias_idx] == kTfLiteOptionalTensor) {
if (bias_idx >= op->inputs.size() ||
op->inputs[bias_idx] == kTfLiteOptionalTensor) {
continue;
}
if (bias_idx >= op->inputs.size()) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Required input index %d is larger than the input length of "
"op %s at index %d in subgraph %d",
bias_idx, op->inputs.size(), EnumNameBuiltinOperator(op_code),
op_idx, subgraph_idx);
return kTfLiteError;
}
// Quantize if it is not quantized already as the
// output of another op or input of another op.
TensorT* bias_tensor = subgraph->tensors[op->inputs[bias_idx]].get();
@ -1046,7 +1038,8 @@ TfLiteStatus EnsureBiasScaleCompatibility(
// Loop over all bias tensors.
for (const int bias_idx : property.biases) {
if (op->inputs[bias_idx] == kTfLiteOptionalTensor) {
if (bias_idx >= op->inputs.size() ||
op->inputs[bias_idx] == kTfLiteOptionalTensor) {
continue;
}
TensorT* bias_tensor = subgraph->tensors[op->inputs[bias_idx]].get();