Replace SameOperandsAndResultType by TFL_TCresVTEtIsSameAsOp to cover quantization types

Also fixes Mobilenet-v3-quant conversion failure.

PiperOrigin-RevId: 311473695
Change-Id: I08f836a2b829772f7a8d6b39766ab67ccd2c9a10
This commit is contained in:
Jaesung Chung 2020-05-13 22:48:33 -07:00 committed by TensorFlower Gardener
parent d5a5959dd3
commit 4afee5f519
2 changed files with 22 additions and 15 deletions

View File

@ -1561,10 +1561,12 @@ def TFL_GreaterOp : TFL_Op<"greater", [
let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
}
def TFL_HardSwishOp: TFL_Op<"hard_swish", [NoSideEffect,
SameOperandsAndResultShape,
SameOperandsAndResultType,
TFL_GpuTargetOp]> {
def TFL_HardSwishOp: TFL_Op<"hard_swish", [
NoSideEffect,
SameOperandsAndResultShape,
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
TFL_GpuTargetOp]> {
let summary = "Hardswish activation function.";
let description = [{
Computes hard-swish activation function
@ -1574,7 +1576,7 @@ def TFL_HardSwishOp: TFL_Op<"hard_swish", [NoSideEffect,
let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$input);
let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$out);
let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$output);
let hasOptions = 0;
}
@ -1606,7 +1608,8 @@ def TFL_L2NormalizationOp : TFL_Op<"l2_normalization", [NoSideEffect,
def TFL_LeakyReluOp: TFL_Op<"leaky_relu", [
SameOperandsAndResultShape,
NoSideEffect,
SameOperandsAndResultType]> {
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
let summary = "Leaky Relu operator";
let description = [{
@ -1740,7 +1743,8 @@ def TFL_LogOp: TFL_Op<"log", [
def TFL_LogSoftmaxOp : TFL_Op<"log_softmax", [
NoSideEffect,
SameOperandsAndResultShape,
SameOperandsAndResultType,
PredOpTrait<"x and y must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
// zero_point = max_value
// scale = -log_softmax_output_min / (max_value + 1)
FixedResultScale<Int8UniformQuantizedType<127, 625, -4>>,
@ -1896,11 +1900,11 @@ Rounds the values of a tensor to the nearest integer, element-wise.
}];
let arguments = (ins
TFL_TensorOf<[F32]>:$x
TFL_FpTensor:$x
);
let results = (outs
TFL_TensorOf<[F32]>:$y
TFL_FpTensor:$y
);
}
@ -2443,9 +2447,9 @@ def TFL_RsqrtOp: TFL_Op<"rsqrt", [NoSideEffect,
Computes element-wise reverse square root of input
}];
let arguments = (ins AnyTensor:$x);
let arguments = (ins TFL_FpTensor:$x);
let results = (outs AnyTensor:$y);
let results = (outs TFL_FpTensor:$y);
let hasFolder = 1;
}
@ -3361,9 +3365,11 @@ def TFL_QuantizeOp: TFL_Op<"quantize", [
let results = (outs AnyTensor:$output);
}
def TFL_DensifyOp: TFL_Op<"densify", [NoSideEffect,
SameOperandsAndResultType,
NoQuantizableResult]> {
def TFL_DensifyOp: TFL_Op<"densify", [
NoSideEffect,
PredOpTrait<"input and output must have same element type",
TFL_TCresVTEtIsSameAsOp<0, 0>>,
NoQuantizableResult]> {
let summary = "Densify operator";
let description = [{

View File

@ -321,7 +321,8 @@ void DenseToSparse::runOnFunction() {
if (result.needs_densify) {
const auto value = op->getOperand(operand);
auto densify = builder.create<DensifyOp>(op->getLoc(), value);
auto densify =
builder.create<DensifyOp>(op->getLoc(), value.getType(), value);
value.replaceAllUsesWith(densify);
densify.setOperand(value);
}