PiperOrigin-RevId: 311323011
Change-Id: I0d60709d46dffa171e299a7e8bdfc9a1ae43fc06
This commit is contained in:
A. Unique TensorFlower 2020-05-13 07:18:43 -07:00 committed by TensorFlower Gardener
parent e4702e19bb
commit f8429e72fc
5 changed files with 13 additions and 37 deletions

View File

@ -6,8 +6,8 @@
// CHECK-SAME: %[[ARG0:.+]]: tensor<?xf32>,
// CHECK-SAME: %[[ARG1:.+]]: tensor<?xf32>
func @broadcast_add(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<1xindex> {
// CHECK-DAG: %[[ARG0_S:.+]] = "shape.shape_of"(%[[ARG0]])
// CHECK-DAG: %[[ARG1_S:.+]] = "shape.shape_of"(%[[ARG1]])
// CHECK-DAG: %[[ARG0_S:.+]] = shape.shape_of %[[ARG0]]
// CHECK-DAG: %[[ARG1_S:.+]] = shape.shape_of %[[ARG1]]
// CHECK-DAG: %[[BCAST_S:.+]] = "shape.broadcast"(%[[ARG0_S]], %[[ARG1_S]])
// CHECK: %[[EXTENTS:.+]] = "shape.to_extent_tensor"(%[[BCAST_S]])
// CHECK: return %[[EXTENTS]]

View File

@ -14,8 +14,8 @@ func @addWithoutBroadcast(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<
// CHECK-SAME: %[[ARG0:.+]]: tensor<?xf32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<?x?xf32>
func @dynamicBroadcast(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
// CHECK-DAG: %[[ARG0_S:.+]] = "shape.shape_of"(%[[ARG0]])
// CHECK-DAG: %[[ARG1_S:.+]] = "shape.shape_of"(%[[ARG1]])
// CHECK-DAG: %[[ARG0_S:.+]] = shape.shape_of %[[ARG0]]
// CHECK-DAG: %[[ARG1_S:.+]] = shape.shape_of %[[ARG1]]
// CHECK-DAG: %[[RESULT_S:.+]] = "shape.broadcast"(%[[ARG0_S]], %[[ARG1_S]])
// CHECK: %[[RESULT_EXTENTS:.+]] = "shape.to_extent_tensor"(%[[RESULT_S]])
// CHECK-DAG: %[[ARG0_B:.+]] = "xla_hlo.dynamic_broadcast_in_dim"(%[[ARG0]], %[[RESULT_EXTENTS]]) {broadcast_dimensions = dense<1> : tensor<1xi64>}
@ -31,8 +31,8 @@ func @dynamicBroadcast(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?
// CHECK-SAME: %[[ARG0:.+]]: tensor<?xf32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<?x?xf32>
func @dynamicBroadcastComplex(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xcomplex<f32>> {
// CHECK-DAG: %[[ARG0_S:.+]] = "shape.shape_of"(%[[ARG0]])
// CHECK-DAG: %[[ARG1_S:.+]] = "shape.shape_of"(%[[ARG1]])
// CHECK-DAG: %[[ARG0_S:.+]] = shape.shape_of %[[ARG0]]
// CHECK-DAG: %[[ARG1_S:.+]] = shape.shape_of %[[ARG1]]
// CHECK-DAG: %[[RESULT_S:.+]] = "shape.broadcast"(%[[ARG0_S]], %[[ARG1_S]])
// CHECK: %[[RESULT_EXTENTS:.+]] = "shape.to_extent_tensor"(%[[RESULT_S]])
// CHECK-DAG: %[[ARG0_B:.+]] = "xla_hlo.dynamic_broadcast_in_dim"(%[[ARG0]], %[[RESULT_EXTENTS]]) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<2xindex>) -> tensor<?x?xf32>
@ -48,8 +48,8 @@ func @dynamicBroadcastComplex(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>) -> t
// CHECK-SAME: %[[ARG0:.+]]: tensor<?xf32>
// CHECK-SAME: %[[ARG1:.+]]: tensor<?x?xf32>
func @dynamicBroadcastCompare(%arg0: tensor<?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xi1> {
// CHECK-DAG: %[[ARG0_S:.+]] = "shape.shape_of"(%[[ARG0]])
// CHECK-DAG: %[[ARG1_S:.+]] = "shape.shape_of"(%[[ARG1]])
// CHECK-DAG: %[[ARG0_S:.+]] = shape.shape_of %[[ARG0]]
// CHECK-DAG: %[[ARG1_S:.+]] = shape.shape_of %[[ARG1]]
// CHECK-DAG: %[[RESULT_S:.+]] = "shape.broadcast"(%[[ARG0_S]], %[[ARG1_S]])
// CHECK: %[[RESULT_EXTENTS:.+]] = "shape.to_extent_tensor"(%[[RESULT_S]])
// CHECK-DAG: %[[ARG0_B:.+]] = "xla_hlo.dynamic_broadcast_in_dim"(%[[ARG0]], %[[RESULT_EXTENTS]]) {broadcast_dimensions = dense<1> : tensor<1xi64>} : (tensor<?xf32>, tensor<2xindex>) -> tensor<?x?xf32>

View File

@ -7,8 +7,8 @@
func @batchmatmulv2_basic(%arg0: tensor<1x4x2xf32>, %arg1: tensor<3x2x4xf32>) -> tensor<3x4x4xf32> {
// CHECK-LABEL: func @batchmatmulv2_basic
// CHECK-SAME: ([[LHS:%.*]]: tensor<1x4x2xf32>, [[RHS:%.*]]: tensor<3x2x4xf32>) -> tensor<3x4x4xf32>
// CHECK: [[LHSSHAPE:%.*]] = "shape.shape_of"([[LHS]]) : (tensor<1x4x2xf32>) -> !shape.shape
// CHECK: [[RHSSHAPE:%.*]] = "shape.shape_of"([[RHS]]) : (tensor<3x2x4xf32>) -> !shape.shape
// CHECK: [[LHSSHAPE:%.*]] = shape.shape_of [[LHS]] : tensor<1x4x2xf32>
// CHECK: [[RHSSHAPE:%.*]] = shape.shape_of [[RHS]] : tensor<3x2x4xf32>
// CHECK: [[CM2:%.*]] = constant -2 : i32
// CHECK: [[LHSHEAD:%.*]], [[LHSTAIL:%.*]] = "shape.split_at"([[LHSSHAPE]], [[CM2]]) : (!shape.shape, i32) -> (!shape.shape, !shape.shape)
// CHECK: [[RHSHEAD:%.*]], [[RHSTAIL:%.*]] = "shape.split_at"([[RHSSHAPE]], [[CM2]]) : (!shape.shape, i32) -> (!shape.shape, !shape.shape)
@ -86,8 +86,8 @@ func @batchmatmulv2_adj_complex(%arg0: tensor<5x2xcomplex<f32>>, %arg1: tensor<2
// CHECK: [[RHSIM:%.*]] = "xla_hlo.imag"([[RHS]])
// CHECK: [[RHSIMNEG:%.*]] = "xla_hlo.negate"([[RHSIM]])
// CHECK: [[RHSCONJ:%.*]] = "xla_hlo.complex"([[RHSRE]], [[RHSIMNEG]])
// CHECK: "shape.shape_of"([[LHSCONJ]])
// CHECK: "shape.shape_of"([[RHSCONJ]])
// CHECK: shape.shape_of [[LHSCONJ]]
// CHECK: shape.shape_of [[RHSCONJ]]
%0 = "tf.BatchMatMulV2"(%arg0, %arg1) {adj_x = true, adj_y = true, device = ""} : (tensor<5x2xcomplex<f32>>, tensor<2x4xcomplex<f32>>) -> tensor<5x4xcomplex<f32>>
return %0 : tensor<5x4xcomplex<f32>>
}

View File

@ -1801,28 +1801,6 @@ cc_library(
],
)
cc_library(
name = "StandardToStandard",
srcs = glob([
"lib/Conversion/StandardToStandard/*.cpp",
"lib/Conversion/StandardToStandard/*.h",
]),
hdrs = glob([
"include/mlir/Conversion/StandardToStandard/*.h",
]),
includes = [
"include",
"lib/Conversion/StandardToStandard",
],
deps = [
":ConversionPassIncGen",
":IR",
":Pass",
":StandardOps",
":Transforms",
],
)
cc_library(
name = "SPIRVSerialization",
srcs = glob(
@ -2485,7 +2463,6 @@ cc_library(
":SCFTransforms",
":StandardOpsTransforms",
":StandardToSPIRVConversions",
":StandardToStandard",
":Support",
":Transforms",
":VectorToLLVM",
@ -2584,7 +2561,6 @@ cc_library(
":StandardOpsTransforms",
":StandardOpsTransformsPassIncGen",
":StandardToSPIRVConversions",
":StandardToStandard",
":Transforms",
":TransformsPassIncGen",
":VectorOps",

View File

@ -106,7 +106,7 @@ cc_library(
"@llvm-project//mlir:Pass",
"@llvm-project//mlir:SideEffects",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:StandardToStandard",
"@llvm-project//mlir:StandardOpsTransforms",
"@llvm-project//mlir:TransformUtils",
"@llvm-project//mlir:Transforms",
],