Add quant_StatisticsOp when importing variable tensor with calibrated data
PiperOrigin-RevId: 345007220 Change-Id: I928bcf66a8caea8096174d8d31ab200645359b08
This commit is contained in:
parent
73e9dc4e1e
commit
77b3ba3502
@ -510,6 +510,12 @@ Operation* BuildVariableOp(const tflite::TensorT& tensor,
|
||||
return op.getOperation();
|
||||
}
|
||||
auto op = builder.create<tfl::ConstOp>(loc, value);
|
||||
if (!tensor.quantization->min.empty()) {
|
||||
if (auto stats_op =
|
||||
ConvertMinMaxToStatsOp(tensor, builder, op.getResult())) {
|
||||
return stats_op;
|
||||
}
|
||||
}
|
||||
return op.getOperation();
|
||||
}
|
||||
|
||||
|
@ -1,10 +1,14 @@
|
||||
// RUN: json_to_flatbuffer %p/test_schema.fbs %s | flatbuffer_translate --tflite-flatbuffer-to-mlir -o - | FileCheck %s
|
||||
|
||||
// CHECK: effective_hidden_scale_intermediate = tensor<!quant.calibrated<f32<-5.000000e-01:5.000000e-01>>>
|
||||
// CHECK: input_to_cell_intermediate = tensor<!quant.calibrated<f32<-4.000000e+00:4.000000e+00>>>
|
||||
// CHECK: input_to_forget_intermediate = tensor<!quant.calibrated<f32<-1.600000e+01:1.600000e+01>>>
|
||||
// CHECK: input_to_input_intermediate = tensor<!quant.calibrated<f32<-3.200000e+01:3.200000e+01>>>
|
||||
// CHECK: input_to_output_intermediate = tensor<!quant.calibrated<f32<-1.000000e+00:1.000000e+00>>>
|
||||
// CHECK-DAG: %[[input_18:.*]] = "quant.stats"({{.*}}) {layerStats = dense<[-8.000000e-01, 1.600000e+00]> : tensor<2xf32>} : (tensor<1x4xf32>) -> tensor<1x4xf32>
|
||||
// CHECK-DAG: %[[input_19:.*]] = "quant.stats"({{.*}}) {layerStats = dense<[-2.000000e+00, 4.000000e+00]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
|
||||
|
||||
// CHECK: "tfl.unidirectional_sequence_lstm"({{.*}}, %[[input_18]], %[[input_19]], %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}})
|
||||
// CHECK-SAME: effective_hidden_scale_intermediate = tensor<!quant.calibrated<f32<-5.000000e-01:5.000000e-01>>>
|
||||
// CHECK-SAME: input_to_cell_intermediate = tensor<!quant.calibrated<f32<-4.000000e+00:4.000000e+00>>>
|
||||
// CHECK-SAME: input_to_forget_intermediate = tensor<!quant.calibrated<f32<-1.600000e+01:1.600000e+01>>>
|
||||
// CHECK-SAME: input_to_input_intermediate = tensor<!quant.calibrated<f32<-3.200000e+01:3.200000e+01>>>
|
||||
// CHECK-SAME: input_to_output_intermediate = tensor<!quant.calibrated<f32<-1.000000e+00:1.000000e+00>>>
|
||||
|
||||
{
|
||||
"version": 3,
|
||||
@ -110,8 +114,8 @@
|
||||
"name": "input_activation_state18",
|
||||
"is_variable": true,
|
||||
"quantization": {
|
||||
"min": [-0.9],
|
||||
"max": [0.9]
|
||||
"min": [-0.8],
|
||||
"max": [1.6]
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -119,8 +123,8 @@
|
||||
"name": "input_cell_state19",
|
||||
"is_variable": true,
|
||||
"quantization": {
|
||||
"min": [-0.8],
|
||||
"max": [0.8]
|
||||
"min": [-2.0],
|
||||
"max": [4.0]
|
||||
}
|
||||
},
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user