diff --git a/tensorflow/compiler/mlir/lite/ir/tfl_ops.td b/tensorflow/compiler/mlir/lite/ir/tfl_ops.td index fdf1501dbef..8a949a45e2d 100644 --- a/tensorflow/compiler/mlir/lite/ir/tfl_ops.td +++ b/tensorflow/compiler/mlir/lite/ir/tfl_ops.td @@ -1561,10 +1561,12 @@ def TFL_GreaterOp : TFL_Op<"greater", [ let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }]; } -def TFL_HardSwishOp: TFL_Op<"hard_swish", [NoSideEffect, - SameOperandsAndResultShape, - SameOperandsAndResultType, - TFL_GpuTargetOp]> { +def TFL_HardSwishOp: TFL_Op<"hard_swish", [ + NoSideEffect, + SameOperandsAndResultShape, + PredOpTrait<"input and output must have same element type", + TFL_TCresVTEtIsSameAsOp<0, 0>>, + TFL_GpuTargetOp]> { let summary = "Hardswish activation function."; let description = [{ Computes hard-swish activation function @@ -1574,7 +1576,7 @@ def TFL_HardSwishOp: TFL_Op<"hard_swish", [NoSideEffect, let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$input); - let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$out); + let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$output); let hasOptions = 0; } @@ -1606,7 +1608,8 @@ def TFL_L2NormalizationOp : TFL_Op<"l2_normalization", [NoSideEffect, def TFL_LeakyReluOp: TFL_Op<"leaky_relu", [ SameOperandsAndResultShape, NoSideEffect, - SameOperandsAndResultType]> { + PredOpTrait<"input and output must have same element type", + TFL_TCresVTEtIsSameAsOp<0, 0>>]> { let summary = "Leaky Relu operator"; let description = [{ @@ -1740,7 +1743,8 @@ def TFL_LogOp: TFL_Op<"log", [ def TFL_LogSoftmaxOp : TFL_Op<"log_softmax", [ NoSideEffect, SameOperandsAndResultShape, - SameOperandsAndResultType, + PredOpTrait<"x and y must have same element type", + TFL_TCresVTEtIsSameAsOp<0, 0>>, // zero_point = max_value // scale = -log_softmax_output_min / (max_value + 1) FixedResultScale>, @@ -1896,11 +1900,11 @@ Rounds the values of a tensor to the nearest integer, element-wise. }]; let arguments = (ins - TFL_TensorOf<[F32]>:$x + TFL_FpTensor:$x ); let results = (outs - TFL_TensorOf<[F32]>:$y + TFL_FpTensor:$y ); } @@ -2443,9 +2447,9 @@ def TFL_RsqrtOp: TFL_Op<"rsqrt", [NoSideEffect, Computes element-wise reverse square root of input }]; - let arguments = (ins AnyTensor:$x); + let arguments = (ins TFL_FpTensor:$x); - let results = (outs AnyTensor:$y); + let results = (outs TFL_FpTensor:$y); let hasFolder = 1; } @@ -3361,9 +3365,11 @@ def TFL_QuantizeOp: TFL_Op<"quantize", [ let results = (outs AnyTensor:$output); } -def TFL_DensifyOp: TFL_Op<"densify", [NoSideEffect, - SameOperandsAndResultType, - NoQuantizableResult]> { +def TFL_DensifyOp: TFL_Op<"densify", [ + NoSideEffect, + PredOpTrait<"input and output must have same element type", + TFL_TCresVTEtIsSameAsOp<0, 0>>, + NoQuantizableResult]> { let summary = "Densify operator"; let description = [{ diff --git a/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc b/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc index 201a0bb2481..9b526f40277 100644 --- a/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc +++ b/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc @@ -321,7 +321,8 @@ void DenseToSparse::runOnFunction() { if (result.needs_densify) { const auto value = op->getOperand(operand); - auto densify = builder.create(op->getLoc(), value); + auto densify = + builder.create(op->getLoc(), value.getType(), value); value.replaceAllUsesWith(densify); densify.setOperand(value); }