Add legalization for floorMod and Exp ops in MLIR converter
PiperOrigin-RevId: 262426012
This commit is contained in:
parent
7027d3bd95
commit
115b86b882
@ -855,9 +855,9 @@ def TFL_ExpOp: TFL_Op<"exp", [NoSideEffect, SameOperandsAndResultType]> {
|
||||
Performs element-wise natural exponentiation operation on input.
|
||||
}];
|
||||
|
||||
let arguments = (ins AnyTensor:$x);
|
||||
let arguments = (ins TFL_FpTensor:$x);
|
||||
|
||||
let results = (outs AnyTensor:$y);
|
||||
let results = (outs TFL_FpTensor:$y);
|
||||
|
||||
let hasOptions = 0b1;
|
||||
}
|
||||
@ -999,14 +999,12 @@ def TFL_FloorModOp : TFL_Op<"floor_mod", [Broadcastable, NoSideEffect]> {
|
||||
}];
|
||||
|
||||
let arguments = (
|
||||
ins AnyTensor:$lhs,
|
||||
AnyTensor:$rhs);
|
||||
ins TensorOf<[I32, I64, F32]>:$lhs,
|
||||
TensorOf<[I32, I64, F32]>:$rhs);
|
||||
|
||||
let results = (outs AnyTensor:$output);
|
||||
let results = (outs TensorOf<[I32, I64, F32]>:$output);
|
||||
|
||||
let parser = [{ return mlir::impl::parseBinaryOp(parser, result); }];
|
||||
|
||||
let printer = [{ return mlir::impl::printBinaryOp(getOperation(), p); }];
|
||||
let builders = [TFL_BroadcastableBinaryBuilder];
|
||||
}
|
||||
|
||||
def TFL_GreaterOp : TFL_Op<"greater", [NoSideEffect, TFL_NoQuantizableResult]> {
|
||||
|
@ -1052,3 +1052,17 @@ func @where(%arg0: tensor<3x5xi1>) -> tensor<?x2xi64> {
|
||||
// CHECK-LABEL: where
|
||||
// CHECK: "tfl.where"(%arg0) : (tensor<3x5xi1>) -> tensor<?x2xi64>
|
||||
}
|
||||
|
||||
func @floor_mod(%arg0: tensor<5xf32>, %arg1: tensor<5xf32>) -> tensor<5xf32> {
|
||||
%0 = "tf.FloorMod"(%arg0, %arg1) : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xf32>
|
||||
return %0 : tensor<5xf32>
|
||||
// CHECK-LABEL: floor_mod
|
||||
// CHECK: "tfl.floor_mod"(%arg0, %arg1) : (tensor<5xf32>, tensor<5xf32>) -> tensor<5xf32>
|
||||
}
|
||||
|
||||
func @exp(%arg0: tensor<5xf32>) -> tensor<5xf32> {
|
||||
%0 = "tf.Exp"(%arg0) : (tensor<5xf32>) -> tensor<5xf32>
|
||||
return %0 : tensor<5xf32>
|
||||
// CHECK-LABEL: exp
|
||||
// CHECK: "tfl.exp"(%arg0) : (tensor<5xf32>) -> tensor<5xf32>
|
||||
}
|
||||
|
@ -307,11 +307,9 @@ func @testFloorDivF32(%arg0: tensor<2 x f32>, %arg1: tensor<2 x i32>) -> tensor<
|
||||
// -----
|
||||
|
||||
// CHECK-LABEL: testFloorMod
|
||||
func @testFloorMod(tensor<? x i32>, tensor<? x i32>) -> tensor<? x i32> {
|
||||
^bb0(%arg0: tensor<? x i32>, %arg1: tensor<? x i32>):
|
||||
// CHECK: tfl.floor_mod %arg0, %arg1
|
||||
%0 = tfl.floor_mod %arg0, %arg1 : tensor<? x i32>
|
||||
return %0#0 : tensor<? x i32>
|
||||
func @testFloorMod(%arg0: tensor<? x i32>, %arg1: tensor<? x i32>) -> tensor<? x i32> {
|
||||
%0 = "tfl.floor_mod"(%arg0, %arg1) : (tensor<? x i32>, tensor<? x i32>) -> tensor<? x i32>
|
||||
return %0 : tensor<? x i32>
|
||||
}
|
||||
|
||||
// CHECK-LABEL: testPow
|
||||
|
@ -299,4 +299,7 @@ def : Pat<
|
||||
|
||||
def : Pat<(TF_UniqueOp $arg0),(TFL_UniqueOp $arg0)>;
|
||||
|
||||
def : Pat<(TF_FloorModOp $arg0, $arg1), (TFL_FloorModOp $arg0, $arg1)>;
|
||||
def : Pat<(TF_ExpOp $arg0), (TFL_ExpOp $arg0)>;
|
||||
|
||||
def : Pat<(TF_LRNOp $arg0, $radius, F32Attr:$bias, F32Attr:$alpha, F32Attr:$beta), (TFL_LocalResponseNormalizationOp $arg0, (convertIntAttrTo32Bit $radius), $bias, $alpha, $beta)>;
|
||||
|
@ -727,6 +727,51 @@ tf.math.equal(x, y) ==> array([True, True])
|
||||
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
|
||||
}
|
||||
|
||||
def TF_ExpOp : TF_Op<"Exp", [NoSideEffect, SameOperandsAndResultType]> {
|
||||
let summary = [{
|
||||
Computes exponential of x element-wise. \\(y = e^x\\).
|
||||
}];
|
||||
|
||||
let description = [{
|
||||
This function computes the exponential of every element in the input tensor.
|
||||
i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.
|
||||
`e` denotes Euler's number and is approximately equal to 2.718281.
|
||||
Output is positive for any real input.
|
||||
|
||||
```python
|
||||
x = tf.constant(2.0)
|
||||
tf.math.exp(x) ==> 7.389056
|
||||
|
||||
x = tf.constant([2.0, 8.0])
|
||||
tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
|
||||
```
|
||||
|
||||
For complex numbers, the exponential value is calculated as follows:
|
||||
|
||||
```
|
||||
e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
|
||||
```
|
||||
|
||||
Let's consider complex number 1+1j as an example.
|
||||
e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
|
||||
|
||||
```python
|
||||
x = tf.constant(1 + 1j)
|
||||
tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
|
||||
```
|
||||
}];
|
||||
|
||||
let arguments = (ins
|
||||
TF_FpOrComplexTensor:$x
|
||||
);
|
||||
|
||||
let results = (outs
|
||||
TF_FpOrComplexTensor:$y
|
||||
);
|
||||
|
||||
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
|
||||
}
|
||||
|
||||
def TF_ExpandDimsOp : TF_Op<"ExpandDims", [NoSideEffect]> {
|
||||
let summary = "Inserts a dimension of 1 into a tensor's shape.";
|
||||
|
||||
@ -939,6 +984,32 @@ def TF_FloorDivOp : TF_Op<"FloorDiv", [Broadcastable, NoSideEffect]>,
|
||||
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
|
||||
}
|
||||
|
||||
def TF_FloorModOp : TF_Op<"FloorMod", [Broadcastable, NoSideEffect]>,
|
||||
WithBroadcastableBinOpBuilder {
|
||||
let summary = [{
|
||||
Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
|
||||
}];
|
||||
|
||||
let description = [{
|
||||
true, this follows Python semantics in that the result here is consistent
|
||||
with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
|
||||
|
||||
*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
|
||||
[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
|
||||
}];
|
||||
|
||||
let arguments = (ins
|
||||
TF_FpOrI32OrI64Tensor:$x,
|
||||
TF_FpOrI32OrI64Tensor:$y
|
||||
);
|
||||
|
||||
let results = (outs
|
||||
TF_FpOrI32OrI64Tensor:$z
|
||||
);
|
||||
|
||||
TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
|
||||
}
|
||||
|
||||
def TF_FusedBatchNormOp : TF_Op<"FusedBatchNorm", [NoSideEffect]> {
|
||||
let summary = "Batch normalization.";
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user