|
|
|
@ -67,7 +67,7 @@ func @QuantizeConv2DPerChannel(%arg0: tensor<1x224x224x3x!quant.uniform<u8:f32,
|
|
|
|
|
return %conv : tensor<1x112x112x32xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<1.000000e+00> : tensor<32xf32>
|
|
|
|
|
// CHECK-NEXT: %[[qbias:.*]] = "tfl.quantize"(%[[cst]]) {qtype = tensor<32x!quant.uniform<i32:f32:0, {1.500000e+00,3.000000e+00,4.500000e+00}>>}
|
|
|
|
|
// CHECK-NEXT: %[[qbias:.*]] = "tfl.quantize"(%[[cst]]) {qtype = tensor<32x!quant.uniform<i32:f32:0, {1.500000e+00,3.000000e+00,4.500000e+00}>>, volatile}
|
|
|
|
|
// CHECK-NEXT: %[[bias:.*]] = "tfl.dequantize"(%[[qbias]])
|
|
|
|
|
// CHECK-NEXT: %[[in:.*]] = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK-NEXT: %[[w:.*]] = "tfl.dequantize"(%arg1)
|
|
|
|
@ -87,7 +87,7 @@ func @QuantizeConv2DPerChannels(%arg0: tensor<1x224x224x3x!quant.uniform<u8:f32:
|
|
|
|
|
return %conv : tensor<1x112x112x32xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<1.000000e+00> : tensor<32xf32>
|
|
|
|
|
// CHECK-NEXT: %[[qbias:.*]] = "tfl.quantize"(%[[cst]]) {qtype = tensor<32x!quant.uniform<i32:f32:0, {1.000000e+00,4.000000e+00,9.000000e+00}>>}
|
|
|
|
|
// CHECK-NEXT: %[[qbias:.*]] = "tfl.quantize"(%[[cst]]) {qtype = tensor<32x!quant.uniform<i32:f32:0, {1.000000e+00,4.000000e+00,9.000000e+00}>>, volatile}
|
|
|
|
|
// CHECK-NEXT: %[[bias:.*]] = "tfl.dequantize"(%[[qbias]])
|
|
|
|
|
// CHECK-NEXT: %[[in:.*]] = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK-NEXT: %[[w:.*]] = "tfl.dequantize"(%arg1)
|
|
|
|
@ -107,7 +107,7 @@ func @QuantizeConv2D(tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>
|
|
|
|
|
return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %cst = constant dense<-1.23697901> : tensor<32xf32>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>}
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>)
|
|
|
|
|
// CHECK: %2 = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK: %3 = "tfl.pseudo_qconst"()
|
|
|
|
@ -129,7 +129,7 @@ func @QuantizeFullyConnected(tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e
|
|
|
|
|
return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %cst = constant dense<-1.23697901> : tensor<32xf32>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>}
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>)
|
|
|
|
|
// CHECK: %2 = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK: %3 = "tfl.pseudo_qconst"()
|
|
|
|
@ -151,7 +151,7 @@ func @QuantizeDepthwiseConv2D(tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500
|
|
|
|
|
return %6 : tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %cst = constant dense<-1.23697901> : tensor<32xf32>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>}
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>)
|
|
|
|
|
// CHECK: %2 = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK: %3 = "tfl.pseudo_qconst"()
|
|
|
|
@ -232,7 +232,7 @@ func @QuantizeStridedSlice(tensor<12x2x2x5x!quant.uniform<u8:f32, 0.1>>, tensor<
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK: %1 = "tfl.strided_slice"(%0, %arg1, %arg2, %arg3)
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x2x2x5x!quant.uniform<u8:f32, 1.000000e-01>>}
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x2x2x5x!quant.uniform<u8:f32, 1.000000e-01>>, volatile}
|
|
|
|
|
// CHECK: %3 = "tfl.dequantize"(%2)
|
|
|
|
|
// CHECK: return %3 : tensor<1x2x2x5xf32>
|
|
|
|
|
}
|
|
|
|
@ -277,7 +277,7 @@ func @QuantizeReshape2D(tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.dequantize"(%arg0) : (tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>)
|
|
|
|
|
// CHECK: %1 = "tfl.reshape"(%0, %{{.*}}) : (tensor<1x6x6x16xf32>, tensor<3xi32>) -> tensor<1x36x16xf32>
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x36x16x!quant.uniform<u8:f32, 7.812500e-03:128>>}
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x36x16x!quant.uniform<u8:f32, 7.812500e-03:128>>, volatile}
|
|
|
|
|
// CHECK: %3 = "tfl.dequantize"(%2) : (tensor<1x36x16x!quant.uniform<u8:f32, 7.812500e-03:128>>)
|
|
|
|
|
// CHECK: return %3 : tensor<1x36x16xf32>
|
|
|
|
|
}
|
|
|
|
@ -291,7 +291,7 @@ func @QuantizeSoftmax(tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>)
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK: %1 = "tfl.softmax"(%0) {beta = 1.000000e+00 : f32} : (tensor<1x6x6x16xf32>) -> tensor<1x6x6x16xf32>
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>>}
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>>, volatile}
|
|
|
|
|
// CHECK: %3 = "tfl.dequantize"(%2)
|
|
|
|
|
// CHECK: return %3 : tensor<1x6x6x16xf32>
|
|
|
|
|
}
|
|
|
|
@ -305,7 +305,7 @@ func @QuantizeLogistic(tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK: %1 = "tfl.logistic"(%0) : (tensor<1x6x6x16xf32>) -> tensor<1x6x6x16xf32>
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>>}
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%1) {qtype = tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>>, volatile}
|
|
|
|
|
// CHECK: %3 = "tfl.dequantize"(%2) : (tensor<1x6x6x16x!quant.uniform<u8:f32, 3.906250e-03>>) -> tensor<1x6x6x16xf32>
|
|
|
|
|
// CHECK: return %3 : tensor<1x6x6x16xf32>
|
|
|
|
|
}
|
|
|
|
@ -327,7 +327,7 @@ func @QuantizeL2Norm(%arg0: tensor<1x6x6x16x!quant.uniform<u8:f32, 1.0>>) -> ten
|
|
|
|
|
|
|
|
|
|
// CHECK: %[[in:.*]] = "tfl.dequantize"(%arg0)
|
|
|
|
|
// CHECK: %[[l2:.*]] = "tfl.l2_normalization"(%[[in]])
|
|
|
|
|
// CHECK: %[[q:.*]] = "tfl.quantize"(%[[l2]]) {qtype = tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>}
|
|
|
|
|
// CHECK: %[[q:.*]] = "tfl.quantize"(%[[l2]]) {qtype = tensor<1x6x6x16x!quant.uniform<u8:f32, 7.812500e-03:128>>, volatile}
|
|
|
|
|
// CHECK: %[[dq:.*]] = "tfl.dequantize"(%[[q]])
|
|
|
|
|
// CHECK: return %[[dq]] : tensor<1x6x6x16xf32>
|
|
|
|
|
}
|
|
|
|
@ -350,13 +350,13 @@ func @QuantizeConcatOperand0ToAll(tensor<1x2x!quant.uniform<u8:f32, 0.1:128>>, t
|
|
|
|
|
%1 = "tfl.concatenation"(%0, %arg1) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
|
|
|
|
|
return %1 : tensor<2x2xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %2 = "tfl.dequantize"(%arg0) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %3 = "tfl.concatenation"(%2, %1) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
|
|
|
|
|
// CHECK: %4 = "tfl.quantize"(%3) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %4 = "tfl.quantize"(%3) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %5 = "tfl.dequantize"(%4) : (tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<2x2xf32>
|
|
|
|
|
// CHeCK: return %5 : tensor<2x2xf32>
|
|
|
|
|
// CHECK: return %5 : tensor<2x2xf32>
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: QuantizeConcatOperand1ToAll
|
|
|
|
@ -366,11 +366,11 @@ func @QuantizeConcatOperand1ToAll(tensor<1x2xf32>, tensor<1x2x!quant.uniform<u8:
|
|
|
|
|
%1 = "tfl.concatenation"(%arg0, %0) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
|
|
|
|
|
return %1 : tensor<2x2xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %2 = "tfl.dequantize"(%arg1) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %3 = "tfl.concatenation"(%1, %2) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
|
|
|
|
|
// CHECK: %4 = "tfl.quantize"(%3) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %4 = "tfl.quantize"(%3) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %5 = "tfl.dequantize"(%4) : (tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<2x2xf32>
|
|
|
|
|
// CHECK: return %5 : tensor<2x2xf32>
|
|
|
|
|
}
|
|
|
|
@ -382,9 +382,9 @@ func @QuantizeConcatResToAll(tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2x!qu
|
|
|
|
|
%1 = "tfl.quantize"(%0) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
return %1 : tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %2 = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %3 = "tfl.dequantize"(%2) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %4 = "tfl.concatenation"(%3, %1) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
|
|
|
|
|
// CHECK: %5 = "tfl.quantize"(%4) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
@ -399,7 +399,7 @@ func @QuantizeConcatResToAllNoRequantize(tensor<1x2x!quant.uniform<u8:f32, 0.1:1
|
|
|
|
|
%2 = "tfl.quantize"(%1) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
return %2 : tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %2 = "tfl.dequantize"(%arg0) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %3 = "tfl.concatenation"(%2, %1) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2xf32>, tensor<1x2xf32>) -> tensor<2x2xf32>
|
|
|
|
@ -416,7 +416,7 @@ func @QuantizeConcatResToAllRequantize(tensor<1x2xf32>, tensor<1x2xf32>) -> tens
|
|
|
|
|
%3 = "tfl.quantize"(%2) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
return %3 : tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %[[Q1:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %[[Q1:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %[[DQ1:.*]] = "tfl.dequantize"(%[[Q1]]) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %[[Q0:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 2.000000e+00:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 2.000000e+00:128>>
|
|
|
|
|
// CHECK: %[[RQ0:.*]] = "tfl.quantize"(%[[Q0]]) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2x!quant.uniform<u8:f32, 2.000000e+00:128>>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
@ -434,7 +434,7 @@ func @QuantizeConcatResToAllRequantizeArg(tensor<1x2x!quant.uniform<u8:f32, 2.0:
|
|
|
|
|
%3 = "tfl.quantize"(%2) {qtype = tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<2x2xf32>) -> tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
return %3 : tensor<2x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
|
|
|
|
|
// CHECK: %[[Q1:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %[[Q1:.*]] = "tfl.quantize"(%arg1) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>, volatile}
|
|
|
|
|
// CHECK: %[[DQ1:.*]] = "tfl.dequantize"(%[[Q1]]) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
|
// CHECK: %[[RQ0:.*]] = "tfl.quantize"(%arg0) {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>} : (tensor<1x2x!quant.uniform<u8:f32, 2.000000e+00:128>>) -> tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>
|
|
|
|
|
// CHECK: %[[DQ0:.*]] = "tfl.dequantize"(%[[RQ0]]) : (tensor<1x2x!quant.uniform<u8:f32, 1.000000e-01:128>>) -> tensor<1x2xf32>
|
|
|
|
@ -475,22 +475,22 @@ func @QuantizeChain(tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>
|
|
|
|
|
return %10 : tensor<1x36x16xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK: %cst = constant dense<-1.23697901> : tensor<32xf32>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>}
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0) : (tensor<32x!quant.uniform<i32:f32, 1.7052092479439231E-4>>)
|
|
|
|
|
// CHECK: %2 = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>)
|
|
|
|
|
// CHECK: %3 = "tfl.pseudo_qconst"()
|
|
|
|
|
// CHECK: %4 = "tfl.dequantize"(%3) : (tensor<32x3x3x3x!quant.uniform<u8<1:255>:f32, 0.021826678373682216:151>>)
|
|
|
|
|
// CHECK: %5 = "tfl.average_pool_2d"(%2)
|
|
|
|
|
// CHECK: %6 = "tfl.quantize"(%5) {qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>}
|
|
|
|
|
// CHECK: %6 = "tfl.quantize"(%5) {qtype = tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>, volatile}
|
|
|
|
|
// CHECK: %7 = "tfl.dequantize"(%6) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>)
|
|
|
|
|
// CHECK: %8 = "tfl.conv_2d"(%7, %4, %1)
|
|
|
|
|
// CHECK: %9 = "tfl.quantize"(%8) {qtype = tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>}
|
|
|
|
|
// CHECK: %10 = "tfl.dequantize"(%9) : (tensor<1x112x112x32x!quant.uniform<u8:f32, 0.023528476789885875>>)
|
|
|
|
|
// CHECK: %11 = "tfl.reshape"(%10, %{{.*}})
|
|
|
|
|
// CHECK: %12 = "tfl.quantize"(%11) {qtype = tensor<1x36x16x!quant.uniform<u8:f32, 0.023528476789885875>>}
|
|
|
|
|
// CHECK: %12 = "tfl.quantize"(%11) {qtype = tensor<1x36x16x!quant.uniform<u8:f32, 0.023528476789885875>>, volatile}
|
|
|
|
|
// CHECK: %13 = "tfl.dequantize"(%12) : (tensor<1x36x16x!quant.uniform<u8:f32, 0.023528476789885875>>)
|
|
|
|
|
// CHECK: %14 = "tfl.softmax"(%13)
|
|
|
|
|
// CHECK: %15 = "tfl.quantize"(%14) {qtype = tensor<1x36x16x!quant.uniform<u8:f32, 3.906250e-03>>}
|
|
|
|
|
// CHECK: %15 = "tfl.quantize"(%14) {qtype = tensor<1x36x16x!quant.uniform<u8:f32, 3.906250e-03>>, volatile}
|
|
|
|
|
// CHECK: %16 = "tfl.dequantize"(%15) : (tensor<1x36x16x!quant.uniform<u8:f32, 3.906250e-03>>)
|
|
|
|
|
// CHECK: return %16 : tensor<1x36x16xf32>
|
|
|
|
|
}
|
|
|
|
@ -501,7 +501,7 @@ func @QuantizeConstant() -> tensor<2x3xf32> {
|
|
|
|
|
return %cst : tensor<2x3xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK: %cst = constant dense{{.*}}tensor<2x3xf32>
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<2x3x!quant.uniform<u8:f32, 0.023529411764705882:128>>}
|
|
|
|
|
// CHECK: %0 = "tfl.quantize"(%cst) {qtype = tensor<2x3x!quant.uniform<u8:f32, 0.023529411764705882:128>>, volatile}
|
|
|
|
|
// CHECK: %1 = "tfl.dequantize"(%0)
|
|
|
|
|
// CHECK: return %1 : tensor<2x3xf32>
|
|
|
|
|
}
|
|
|
|
@ -521,7 +521,7 @@ func @QuantizeZeroSplat() -> tensor<2x3xf32> {
|
|
|
|
|
return %cst : tensor<2x3xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<0.000000e+00> : tensor<2x3xf32>
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<2x3x!quant.uniform<u8:f32, 1.000000e+00>>}
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<2x3x!quant.uniform<u8:f32, 1.000000e+00>>, volatile}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: QuantizeZeroScalar
|
|
|
|
@ -530,7 +530,7 @@ func @QuantizeZeroScalar() -> tensor<f32> {
|
|
|
|
|
return %cst : tensor<f32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<0.000000e+00> : tensor<f32>
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<!quant.uniform<u8:f32, 1.000000e+00>>}
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<!quant.uniform<u8:f32, 1.000000e+00>>, volatile}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: QuantizePositiveSplat
|
|
|
|
@ -539,7 +539,7 @@ func @QuantizePositiveSplat() -> tensor<2x3xf32> {
|
|
|
|
|
return %cst : tensor<2x3xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<2.540000e+01> : tensor<2x3xf32>
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<2x3x!quant.uniform<u8:f32, 0.099607841641295186>>}
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<2x3x!quant.uniform<u8:f32, 0.099607841641295186>>, volatile}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: QuantizePositiveScalar
|
|
|
|
@ -548,7 +548,7 @@ func @QuantizePositiveScalar() -> tensor<f32> {
|
|
|
|
|
return %cst : tensor<f32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<2.540000e+00> : tensor<f32>
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<!quant.uniform<u8:f32, 0.0099607841641295193>>}
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<!quant.uniform<u8:f32, 0.0099607841641295193>>, volatile}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: QuantizeNegativeSplat
|
|
|
|
@ -557,7 +557,7 @@ func @QuantizeNegativeSplat() -> tensor<2x3xf32> {
|
|
|
|
|
return %cst : tensor<2x3xf32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<-2.540000e+00> : tensor<2x3xf32>
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<2x3x!quant.uniform<u8:f32, 0.0099607841641295193:255>>}
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<2x3x!quant.uniform<u8:f32, 0.0099607841641295193:255>>, volatile}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: QuantizeNegativeScalar
|
|
|
|
@ -566,7 +566,7 @@ func @QuantizeNegativeScalar() -> tensor<f32> {
|
|
|
|
|
return %cst : tensor<f32>
|
|
|
|
|
|
|
|
|
|
// CHECK-NEXT: %[[cst:.*]] = constant dense<-2.540000e+01> : tensor<f32>
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<!quant.uniform<u8:f32, 0.099607841641295186:255>>}
|
|
|
|
|
// CHECK-NEXT: "tfl.quantize"(%[[cst]]) {qtype = tensor<!quant.uniform<u8:f32, 0.099607841641295186:255>>, volatile}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CHECK-LABEL: QuantizeSharedBiases
|
|
|
|
@ -617,7 +617,7 @@ func @QuantizeSharedBiases2(
|
|
|
|
|
// CHECK: %[[q:.*]] = "tfl.quantize"(%[[cst]])
|
|
|
|
|
// CHECK: %[[dq:.*]] = "tfl.dequantize"(%[[q]])
|
|
|
|
|
// CHECK: %[[cst_0:.*]] = constant dense<0.000000e+00> : tensor<32xf32>
|
|
|
|
|
// CHECK: %[[q_0:.*]] = "tfl.quantize"(%[[cst_0]]) {qtype = tensor<32x!quant.uniform<u8:f32, 1.000000e+00>>}
|
|
|
|
|
// CHECK: %[[q_0:.*]] = "tfl.quantize"(%[[cst_0]]) {qtype = tensor<32x!quant.uniform<u8:f32, 1.000000e+00>>, volatile}
|
|
|
|
|
// CHECK: %[[dq_0:.*]] = "tfl.dequantize"(%[[q_0]])
|
|
|
|
|
// CHECK: %{{.*}} = tfl.add %{{.*}}, %[[dq_0]]
|
|
|
|
|
// CHECK: %{{.*}} = "tfl.conv_2d"(%{{.*}}, %{{.*}}, %[[dq]])
|
|
|
|
|