Relax TFL_BatchMatMulOp input constraints for hybrid quantization.
PiperOrigin-RevId: 360471859 Change-Id: I23c5b452869f772ad61a166253d1f6d4b4a508cc
This commit is contained in:
parent
29d60a3f38
commit
b6f149d84c
@ -1003,9 +1003,7 @@ def TFL_BatchMatMulOp : TFL_Op<"batch_matmul", [
|
||||
TFL_OperandHasAtleastRank<0, 2>,
|
||||
TFL_OperandHasAtleastRank<1, 2>,
|
||||
PredOpTrait<"x and output must have same element type",
|
||||
TFL_TCresVTEtIsSameAsOp<0, 0>>,
|
||||
PredOpTrait<"y and output must have same element type",
|
||||
TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
|
||||
TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
|
||||
|
||||
let summary = "Batch Matrix Multiply Operator";
|
||||
|
||||
|
@ -1387,6 +1387,15 @@ func @testBatchMatmulQuant(%arg0 : tensor<1x4x384x32x!quant.uniform<i8:f32, 0.06
|
||||
%0 = "tfl.batch_matmul"(%arg0, %arg1) {adj_x = false, adj_y = true} : (tensor<1x4x384x32x!quant.uniform<i8:f32, 0.06:-2>>, tensor<1x4x384x32x!quant.uniform<i8:f32, 0.11:-16>>) -> tensor<1x4x384x384x!quant.uniform<i8:f32, 1.02:-73>>
|
||||
return %0 : tensor<1x4x384x384x!quant.uniform<i8:f32, 1.02:-73>>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @testBatchMatmulHybridQuant(%arg0 : tensor<1x4x384x32xf32>, %arg1 : tensor<1x4x384x32x!quant.uniform<i8:f32, 0.11:-16>>) -> tensor<1x4x384x384xf32> {
|
||||
// CHECK: "tfl.batch_matmul"(%arg0, %arg1)
|
||||
%0 = "tfl.batch_matmul"(%arg0, %arg1) {adj_x = false, adj_y = true} : (tensor<1x4x384x32xf32>, tensor<1x4x384x32x!quant.uniform<i8:f32, 0.11:-16>>) -> tensor<1x4x384x384xf32>
|
||||
return %0 : tensor<1x4x384x384xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @testConcat(%arg0: tensor<1x2xi32>, %arg1: tensor<1x2xi32>) -> tensor<2x2xi32> {
|
||||
|
Loading…
x
Reference in New Issue
Block a user