From 2d2500117e467f2a56eaba14b142c5d7d9f44f90 Mon Sep 17 00:00:00 2001 From: Lucy Fox Date: Thu, 4 Jun 2020 23:07:45 -0700 Subject: [PATCH] Add tf.BiasAddV1 op and canonicalize to tf.BiasAdd. The difference between the two ops is that BiasAdd has a data format attribute. We canonicalize from v1 by using the default data format for this attribute. PiperOrigin-RevId: 314872733 Change-Id: I947a0a40a4aeca3262319a2b8b9dbf8b8b11328a --- .../mlir/tensorflow/ir/tf_generated_ops.td | 24 +++++++++++++++++++ .../compiler/mlir/tensorflow/ir/tf_ops.cc | 9 +++++++ .../mlir/tensorflow/tests/canonicalize.mlir | 7 ++++++ .../tensorflow/transforms/canonicalize.td | 6 +++++ 4 files changed, 46 insertions(+) diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td index 024bc6c21dd..1c9297485b4 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td @@ -917,6 +917,30 @@ the feature dimension is the third-to-last. }]; } +def TF_BiasAddV1Op : TF_Op<"BiasAddV1", [NoSideEffect]> { + let summary = "Adds `bias` to `value`."; + + let description = [{ +This is a deprecated version of BiasAdd and will be soon removed. + +This is a special case of `tf.add` where `bias` is restricted to be 1-D. +Broadcasting is supported, so `value` may have any number of dimensions. + }]; + + let arguments = (ins + TensorOf<[BF16, F16, F32, F64, I16, I32, I64, I8, TF_Complex128, TF_Complex64, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$value, + TensorOf<[BF16, F16, F32, F64, I16, I32, I64, I8, TF_Complex128, TF_Complex64, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$bias + ); + + let results = (outs + TensorOf<[BF16, F16, F32, F64, I16, I32, I64, I8, TF_Complex128, TF_Complex64, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output + ); + + TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>; + + let hasCanonicalizer = 1; +} + def TF_BitcastOp : TF_Op<"Bitcast", [NoSideEffect]> { let summary = [{ Bitcasts a tensor from one type to another without copying data. diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.cc b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.cc index 8154e501f60..7b3e1508efb 100644 --- a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.cc +++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops.cc @@ -755,6 +755,15 @@ static LogicalResult Verify(BiasAddGradOp op) { return success(); } +//===----------------------------------------------------------------------===// +// BiasAddV1Op +//===----------------------------------------------------------------------===// + +void BiasAddV1Op::getCanonicalizationPatterns(OwningRewritePatternList &results, + MLIRContext *context) { + results.insert(context); +} + //===----------------------------------------------------------------------===// // BitcastOp //===----------------------------------------------------------------------===// diff --git a/tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir b/tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir index 958a2a0182e..3b499f404bf 100644 --- a/tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir +++ b/tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir @@ -34,6 +34,13 @@ func @testBatchMatMulV2ToMatMul(%arg0: tensor<4x3xf32>, %arg1: tensor<4x5xf32>) // CHECK: return %0 } +// CHECK-LABEL: testBiasAddV1ToBiasAdd +func @testBiasAddV1ToBiasAdd(%arg0: tensor<*xf32>, %arg1: tensor<128xf32>) -> tensor<*xf32> { + // CHECK: "tf.BiasAdd"(%arg0, %arg1) {data_format = "NHWC"} : (tensor<*xf32>, tensor<128xf32>) -> tensor<*xf32> + %0 = "tf.BiasAddV1"(%arg0, %arg1) : (tensor<*xf32>, tensor<128xf32>) -> tensor<*xf32> + return %0: tensor<*xf32> +} + // CHECK-LABEL: func @testLeakyRelu func @testLeakyRelu(%arg0 : tensor<16xf32>) -> (tensor<16xf32>) { %2 = "tf.LeakyRelu"(%arg0) {alpha = 1.0 : f32} : (tensor<16xf32>) -> tensor<16xf32> diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td b/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td index cf09f8d64fb..43661902138 100644 --- a/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td +++ b/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td @@ -65,6 +65,12 @@ def BatchMatMulV2ToMatMul : Pat<(TF_BatchMatMulV2Op $x, $y, $adj_x, $adj_y), (TF_MatMulOp $x, $y, $adj_x, $adj_y), [(IsRank2Tensor $x), (IsRank2Tensor $y)]>; +//===----------------------------------------------------------------------===// +// BiasAddV1 op patterns. +//===----------------------------------------------------------------------===// + +def BiasAddV1ToBiasAdd : Pat<(TF_BiasAddV1Op $arg0, $arg1), + (TF_BiasAddOp $arg0, $arg1, ConstantAttr)>; //===----------------------------------------------------------------------===// // Bitcast op patterns.