Registered XLA dialect and fixed corresponding tests.
PiperOrigin-RevId: 269903871
This commit is contained in:
parent
a6d8ab779d
commit
3ef0d0d074
@ -269,6 +269,7 @@ cc_library(
|
||||
deps = [
|
||||
":hlo",
|
||||
":type_to_shape",
|
||||
":xla_dialect_registration",
|
||||
"//tensorflow/compiler/mlir/tensorflow:error_util",
|
||||
"//tensorflow/compiler/xla:comparison_util",
|
||||
"//tensorflow/compiler/xla:literal_util",
|
||||
|
@ -213,7 +213,7 @@ def HLO_ReduceOp: HLO_Op<"reduce", [
|
||||
I64ElementsAttr:$dimensions
|
||||
);
|
||||
|
||||
let results = (outs Variadic<HLO_Tensor>);
|
||||
let results = (outs Variadic<HLO_TensorOrTuple>);
|
||||
|
||||
// TODO(hinsu): Verify that the attached body arguments and results are
|
||||
// compatible with reduce op's operands.
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
HloModule main.5
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>, %arg2: tensor<f32>, %arg3: tensor<f32>) -> tensor<4xf32> {
|
||||
// CHECK-LABEL: func @main
|
||||
ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4], Arg_2.3: f32[], Arg_3.4: f32[]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
@ -13,16 +13,15 @@ ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4], Arg_2.3: f32[], Arg_3.4: f32[])
|
||||
%Arg_3.4 = f32[] parameter(3)
|
||||
|
||||
// Add two tensors
|
||||
// CHECK-NEXT: %0 = "xla_hlo.add"(%arg0, %arg1) {name = "add.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: xla_hlo.add %arg0, %arg1
|
||||
%add.3 = f32[4] add(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
|
||||
// Add two scalars
|
||||
// CHECK-NEXT: %1 = "xla_hlo.add"(%arg2, %arg3) {name = "add.4"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
|
||||
// CHECK-NEXT: xla_hlo.add %arg2, %arg3
|
||||
%add.4 = f32[] add(f32[] %Arg_2.3, f32[] %Arg_3.4)
|
||||
|
||||
// Add a tensor and scalar
|
||||
// CHECK-NEXT: %2 = "xla_hlo.add"(%0, %1) {name = "add.5"} : (tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: return %2 : tensor<4xf32>
|
||||
// CHECK-NEXT: "xla_hlo.add"(%0, %1)
|
||||
ROOT %add.5 = f32[4] add(f32[4] %add.3, f32[] %add.4)
|
||||
}
|
||||
|
||||
|
@ -2,13 +2,12 @@
|
||||
|
||||
HloModule main.5
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-LABEL: func @main
|
||||
ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.and"(%arg0, %arg1) {name = "and.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: return %0 : tensor<4xf32>
|
||||
// CHECK-NEXT: xla_hlo.and %arg0, %arg1
|
||||
ROOT %and.3 = f32[4] and(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
|
@ -2,19 +2,14 @@
|
||||
|
||||
HloModule main
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<1x2xf32>) -> tensor<3x1x2xf32> {
|
||||
// CHECK-LABEL: func @main
|
||||
ENTRY %main {
|
||||
%Arg_0.1 = f32[1, 2] parameter(0)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.broadcast_in_dim"(%arg0) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>, name = "broadcast.2"} : (tensor<1x2xf32>) -> tensor<1x2x3xf32>
|
||||
// CHECK-NEXT: "xla_hlo.broadcast_in_dim"(%arg0) {broadcast_dimensions = dense<[0, 1]> : tensor<2xi64>, name = "broadcast.2"} : (tensor<1x2xf32>) -> tensor<1x2x3xf32>
|
||||
%broadcast.2 = f32[1,2,3] broadcast(%Arg_0.1), dimensions={0,1}
|
||||
|
||||
// Degenerate broadcast
|
||||
// CHECK-NEXT: %1 = "xla_hlo.broadcast_in_dim"(%arg0) {name = "broadcast.3"} : (tensor<1x2xf32>) -> tensor<3x2xf32>
|
||||
broadcast.3 = f32[3,2] broadcast(%Arg_0.1), dimensions={}
|
||||
|
||||
// CHECK-NEXT: %2 = "xla_hlo.broadcast_in_dim"(%arg0) {broadcast_dimensions = dense<[1, 2]> : tensor<2xi64>, name = "broadcast.4"} : (tensor<1x2xf32>) -> tensor<3x1x2xf32>
|
||||
// CHECK-NEXT: return %2 : tensor<3x1x2xf32>
|
||||
// CHECK-NEXT: "xla_hlo.broadcast_in_dim"(%arg0) {broadcast_dimensions = dense<[1, 2]> : tensor<2xi64>, name = "broadcast.4"} : (tensor<1x2xf32>) -> tensor<3x1x2xf32>
|
||||
ROOT broadcast.4 = f32[3,1,2] broadcast(%Arg_0.1), dimensions={1, 2}
|
||||
}
|
||||
|
||||
|
@ -5,15 +5,12 @@ HloModule foo
|
||||
// CHECK-LABEL: func @call(%arg0: tensor<i64>) -> tensor<i64> {
|
||||
%call (arg_1: s64[]) -> s64[] {
|
||||
%arg_1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
// CHECK-NEXT: %0 = "xla_hlo.add"(%arg0, %arg0) {name = "compare.2"} : (tensor<i64>, tensor<i64>) -> tensor<i64>
|
||||
// CHECK-NEXT: return %0 : tensor<i64>
|
||||
ROOT %compare.2 = s64[] add(%arg_1, %arg_1), metadata={op_type="Less" op_name="Less"}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<i64>) -> tensor<i64> {
|
||||
// CHECK-LABEL: func @main
|
||||
ENTRY %foo (arg0.1: s64[]) -> s64[] {
|
||||
%arg0.1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
// CHECK-NEXT: %0 = call @call(%arg0) : (tensor<i64>) -> tensor<i64>
|
||||
// CHECK-NEXT: return %0 : tensor<i64>
|
||||
// CHECK-NEXT: call @call(%arg0) : (tensor<i64>) -> tensor<i64>
|
||||
ROOT %call.2 = s64[] call(%arg0.1), to_apply=%call
|
||||
}
|
@ -3,14 +3,12 @@
|
||||
HloModule main.5
|
||||
|
||||
// CHECK-LABEL: func @main(
|
||||
// CHECK-SAME: [[A0:%.+]]: tensor<f32>, [[A1:%.+]]: tensor<4xf32>, [[A2:%.+]]: tensor<f32>) -> tensor<4xf32> {
|
||||
ENTRY %foo.5 (Arg_0.1: f32[], Arg_1.2: f32[4], Arg_1.3: f32[]) -> f32[4] {
|
||||
%Arg_0.1 = f32[] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
%Arg_2.3 = f32[] parameter(2)
|
||||
|
||||
// CHECK-NEXT: [[R0:%.+]] = "xla_hlo.clamp"([[A0]], [[A1]], [[A2]]) {name = "clamp.3"} : (tensor<f32>, tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: return [[R0]] : tensor<4xf32>
|
||||
// CHECK-NEXT: "xla_hlo.clamp"(%arg0, %arg1, %arg2) {name = "clamp.3"} : (tensor<f32>, tensor<4xf32>, tensor<f32>) -> tensor<4xf32>
|
||||
ROOT %clamp.3 = f32[4] clamp(f32[] %Arg_0.1, f32[4] %Arg_1.2, f32[] %Arg_2.3)
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
HloModule tfcompile.7
|
||||
|
||||
// CHECK-LABEL: func @main() -> tensor<2x2x1x1xf32> {
|
||||
// CHECK-LABEL: func @main
|
||||
ENTRY %tfcompile.7 {
|
||||
|
||||
// Scalar/0D tensor constant
|
||||
|
@ -7,7 +7,7 @@ ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.div"(%arg0, %arg1) {name = "divide.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: xla_hlo.div %arg0, %arg1 {name = "divide.3"} : tensor<4xf32>
|
||||
// CHECK-NEXT: return %0 : tensor<4xf32>
|
||||
ROOT %divide.3 = f32[4] divide(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ ENTRY %tfcompile.48 {
|
||||
// CHECK-NEXT: %5 = "xla_hlo.broadcast_in_dim"(%cst) {name = "broadcast.9"} : (tensor<f32>) -> tensor<300x1x5xf32>
|
||||
%broadcast.9 = f32[300,1,5] broadcast(%constant.8), dimensions={}
|
||||
|
||||
// CHECK-NEXT: %6 = "xla_hlo.mul"(%4, %5) {name = "multiply.31"} : (tensor<300x1x5xf32>, tensor<300x1x5xf32>) -> tensor<300x1x5xf32>
|
||||
// CHECK-NEXT: %6 = xla_hlo.mul %4, %5 {name = "multiply.31"} : tensor<300x1x5xf32>
|
||||
%multiply.31 = f32[300,1,5] multiply(%broadcast.30, %broadcast.9)
|
||||
|
||||
// CHECK-NEXT: %cst_0 = constant {name = "constant.32"} dense<0.000000e+00> : tensor<f32>
|
||||
@ -82,10 +82,10 @@ ENTRY %tfcompile.48 {
|
||||
// CHECK-NEXT: %17 = "xla_hlo.broadcast_in_dim"(%cst_4) {broadcast_dimensions = dense<1> : tensor<1xi64>, name = "broadcast.38"} : (tensor<5xf32>) -> tensor<300x5xf32>
|
||||
%broadcast.38 = f32[300,5] broadcast(%constant.37), dimensions={1}
|
||||
|
||||
// CHECK-NEXT: %18 = "xla_hlo.add"(%16, %17) {name = "add.39"} : (tensor<300x5xf32>, tensor<300x5xf32>) -> tensor<300x5xf32>
|
||||
// CHECK-NEXT: %18 = xla_hlo.add %16, %17 {name = "add.39"} : tensor<300x5xf32>
|
||||
%add.39 = f32[300,5] add(%dot.36, %broadcast.38)
|
||||
|
||||
// CHECK-NEXT: %19 = "xla_hlo.max"(%10, %18) {name = "maximum.42"} : (tensor<300x5xf32>, tensor<300x5xf32>) -> tensor<300x5xf32>
|
||||
// CHECK-NEXT: %19 = xla_hlo.max %10, %18 {name = "maximum.42"} : tensor<300x5xf32>
|
||||
%maximum.42 = f32[300,5] maximum(%broadcast.41, %add.39)
|
||||
|
||||
// CHECK-NEXT: %20 = "xla_hlo.reshape"(%19) {name = "reshape.44"} : (tensor<300x5xf32>) -> tensor<300x1x5xf32>
|
||||
|
@ -7,7 +7,7 @@ ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.max"(%arg0, %arg1) {name = "maximum.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: %0 = xla_hlo.max %arg0, %arg1 {name = "maximum.3"} : tensor<4xf32>
|
||||
// CHECK-NEXT: return %0 : tensor<4xf32>
|
||||
ROOT %maximum.3 = f32[4] maximum(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.min"(%arg0, %arg1) {name = "minimum.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: %0 = xla_hlo.min %arg0, %arg1 {name = "minimum.3"} : tensor<4xf32>
|
||||
// CHECK-NEXT: return %0 : tensor<4xf32>
|
||||
ROOT %minimum.3 = f32[4] minimum(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
@ -7,8 +7,7 @@ ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.mul"(%arg0, %arg1) {name = "multiply.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: return %0 : tensor<4xf32>
|
||||
// CHECK-NEXT: %0 = xla_hlo.mul %arg0, %arg1 {name = "multiply.3"} : tensor<4xf32>
|
||||
ROOT %multiply.3 = f32[4] multiply(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
|
@ -35,27 +35,27 @@ ENTRY %foo.5 (Arg_0.1: f32[4, 4], Arg_1.2: f32[4], Arg_2.3: f32[]) -> ((f32[], f
|
||||
%Arg_2.3 = f32[] parameter(2)
|
||||
|
||||
// CHECK: "xla_hlo.reduce"([[ARG0]], [[ARG0]], [[ARG2]], [[ARG2]])
|
||||
// CHECK: xla_hlo.add{{.*}}(tensor<f32>, tensor<f32>) -> tensor<f32>
|
||||
// CHECK: xla_hlo.add{{.*}}(tensor<f32>, tensor<f32>) -> tensor<f32>
|
||||
// CHECK: xla_hlo.add{{.*}} : tensor<f32>
|
||||
// CHECK: xla_hlo.add{{.*}} : tensor<f32>
|
||||
// CHECK: {dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<4x4xf32>, tensor<4x4xf32>, tensor<f32>, tensor<f32>) -> tuple<tensor<f32>, tensor<f32>>
|
||||
%reduce.1 = (f32[], f32[]) reduce(%Arg_0.1, %Arg_0.1, %Arg_2.3, %Arg_2.3), dimensions={0, 1}, to_apply=%reduce_helper.1
|
||||
|
||||
// CHECK: [[VAL2:%.*]] = "xla_hlo.reduce"([[ARG0]], [[ARG2]])
|
||||
// CHECK: xla_hlo.add{{.*}}(tensor<f32>, tensor<f32>) -> tensor<f32>
|
||||
// CHECK: xla_hlo.add{{.*}} : tensor<f32>
|
||||
// CHECK: {dimensions = dense<[0, 1]> : tensor<2xi64>} : (tensor<4x4xf32>, tensor<f32>) -> tensor<f32>
|
||||
%reduce.3 = f32[] reduce(%Arg_0.1, %Arg_2.3), dimensions={0, 1}, to_apply=%reduce_helper.3
|
||||
|
||||
// CHECK: [[VAL3:%.*]] = "xla_hlo.reduce"([[ARG0]], [[ARG1]])
|
||||
// CHECK: xla_hlo.add{{.*}}(tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK: xla_hlo.add{{.*}} : tensor<4xf32>
|
||||
// CHECK: {dimensions = dense<0> : tensor<1xi64>} : (tensor<4x4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
%reduce.2 = f32[4] reduce(%Arg_0.1, %Arg_1.2), dimensions={0}, to_apply=%reduce_helper.2
|
||||
|
||||
// CHECK: [[VAL4:%.*]] = "xla_hlo.reduce"([[VAL3]], [[ARG2]])
|
||||
// CHECK: xla_hlo.add{{.*}}(tensor<f32>, tensor<f32>) -> tensor<f32>
|
||||
// CHECK: xla_hlo.add{{.*}} : tensor<f32>
|
||||
// CHECK: {dimensions = dense<0> : tensor<1xi64>} : (tensor<4xf32>, tensor<f32>) -> tensor<f32>
|
||||
%reduce.4 = f32[] reduce(%reduce.2, %Arg_2.3), dimensions={0}, to_apply=%reduce_helper.3
|
||||
|
||||
// CHECK: %4 = "xla_hlo.sub"([[VAL2]], [[VAL4]]) {name = "sub.5"} : (tensor<f32>, tensor<f32>) -> tensor<f32>
|
||||
// CHECK: %4 = xla_hlo.sub [[VAL2]], [[VAL4]] {name = "sub.5"} : tensor<f32>
|
||||
%sub.5 = f32[] subtract(%reduce.3, %reduce.4)
|
||||
|
||||
ROOT %tuple.6 = ((f32[], f32[]), f32[]) tuple(%reduce.1, %sub.5)
|
||||
|
@ -139,7 +139,7 @@ dynamic_parameter_binding {
|
||||
}
|
||||
|
||||
# CHECK-LABEL: func @main(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<f32> {
|
||||
# CHECK-NEXT: %0 = "xla_hlo.add"(%arg0, %arg1) {name = "add.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
# CHECK-NEXT: %0 = xla_hlo.add %arg0, %arg1 {name = "add.3"} : tensor<4xf32>
|
||||
# TODO(b/129709049) consider making this default precision config inferred.
|
||||
# CHECK-NEXT: %1 = "xla_hlo.dot"(%0, %arg1) {name = "dot.4", precision_config = ["DEFAULT", "DEFAULT"]} : (tensor<4xf32>, tensor<4xf32>) -> tensor<f32>
|
||||
# CHECK-NEXT: return %1 : tensor<f32>
|
||||
|
@ -7,7 +7,7 @@ ENTRY %main.5 (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[] {
|
||||
%Arg_0.1 = f32[4]{0} parameter(0)
|
||||
%Arg_1.2 = f32[4]{0} parameter(1)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.add"(%arg0, %arg1) {name = "add.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: xla_hlo.add %arg0, %arg1 {name = "add.3"} : tensor<4xf32>
|
||||
%add.3 = f32[4]{0} add(f32[4]{0} %Arg_0.1, f32[4]{0} %Arg_1.2)
|
||||
|
||||
// TODO(b/129709049) consider making this default precision config inferred.
|
||||
|
@ -2,13 +2,12 @@
|
||||
|
||||
HloModule main.5
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<4xf32>, %arg1: tensor<4xf32>) -> tensor<4xf32> {
|
||||
// CHECK-LABEL: func @main
|
||||
ENTRY %foo.5 (Arg_0.1: f32[4], Arg_1.2: f32[4]) -> f32[4] {
|
||||
%Arg_0.1 = f32[4] parameter(0)
|
||||
%Arg_1.2 = f32[4] parameter(1)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.sub"(%arg0, %arg1) {name = "subtract.3"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
|
||||
// CHECK-NEXT: return %0 : tensor<4xf32>
|
||||
// CHECK-NEXT: xla_hlo.sub %arg0, %arg1 {name = "subtract.3"} : tensor<4xf32>
|
||||
ROOT %subtract.3 = f32[4] subtract(f32[4] %Arg_0.1, f32[4] %Arg_1.2)
|
||||
}
|
||||
|
||||
|
@ -1,11 +0,0 @@
|
||||
// RUN: tf-mlir-translate -hlo-text-to-mlir-hlo %s -o - | FileCheck %s
|
||||
|
||||
HloModule main
|
||||
|
||||
// CHECK-LABEL: func @main(%arg0: tensor<1xf32>) -> tensor<1xf32> {
|
||||
ENTRY %main (Arg_0.1: f32[1, 4], Arg_1.2: f32[4, 1]) -> f32[1] {
|
||||
%Arg_0.1 = f32[1] parameter(0)
|
||||
|
||||
// CHECK-NEXT: %0 = "xla_hlo.unknown"(%arg0, %arg0) {name = "add-dependency.2"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
|
||||
ROOT add-dependency.2 = f32[1] add-dependency(Arg_0.1, Arg_0.1)
|
||||
}
|
@ -2,19 +2,15 @@
|
||||
|
||||
HloModule foo
|
||||
|
||||
// CHECK-LABEL: func @cond(%arg0: tensor<i64>) -> tensor<i1> {
|
||||
// CHECK-LABEL: func @cond
|
||||
%cond (arg_1: s64[]) -> pred[] {
|
||||
%arg_1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
// CHECK-NEXT: %0 = "xla_hlo.compare"(%arg0, %arg0) {comparison_direction = "LT", name = "compare.2"} : (tensor<i64>, tensor<i64>) -> tensor<i1>
|
||||
// CHECK-NEXT: return %0 : tensor<i1>
|
||||
ROOT %compare.2 = pred[] compare(%arg_1, %arg_1), direction=LT, metadata={op_type="Less" op_name="Less"}
|
||||
}
|
||||
|
||||
// CHECK-LABEL: func @loop(%arg0: tensor<i64>) -> tensor<i64> {
|
||||
// CHECK-LABEL: func @loop
|
||||
%loop (arg_1: s64[]) -> s64[] {
|
||||
%arg_1 = s64[] parameter(0), metadata={op_name="HLO_Args"}
|
||||
// CHECK-NEXT: %0 = "xla_hlo.add"(%arg0, %arg0) {name = "compare.0"} : (tensor<i64>, tensor<i64>) -> tensor<i64>
|
||||
// CHECK-NEXT: return %0 : tensor<i64>
|
||||
ROOT %compare.2 = s64[] add(%arg_1, %arg_1), metadata={op_type="Less" op_name="Less"}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user