Be more selective when disabling mlir_gpu_lhlo_gen_test.
PiperOrigin-RevId: 294653870 Change-Id: I6e0dcf432f630550e49b776390a738c013a1e891
This commit is contained in:
parent
dc5443ca68
commit
675799d105
@ -18,14 +18,14 @@ package_group(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
# tf_cc_test(
|
tf_cc_test(
|
||||||
# name = "mlir_gpu_lhlo_gen_test",
|
name = "mlir_gpu_lhlo_gen_test",
|
||||||
# srcs = ["mlir_gpu_lhlo_gen_test.cc"],
|
srcs = ["mlir_gpu_lhlo_gen_test.cc"],
|
||||||
# tags = tf_cuda_tests_tags() + ["no_rocm"],
|
tags = tf_cuda_tests_tags() + ["no_rocm"],
|
||||||
# deps = [
|
deps = [
|
||||||
# "//tensorflow/compiler/xla/service:gpu_plugin_mlir",
|
"//tensorflow/compiler/xla/service:gpu_plugin_mlir",
|
||||||
# "//tensorflow/compiler/xla/service/mlir_gpu:mlir_irgen_test_base",
|
"//tensorflow/compiler/xla/service/mlir_gpu:mlir_irgen_test_base",
|
||||||
# "//tensorflow/core:test_main",
|
"//tensorflow/core:test_main",
|
||||||
# "//tensorflow/stream_executor/lib",
|
"//tensorflow/stream_executor/lib",
|
||||||
# ],
|
],
|
||||||
# )
|
)
|
||||||
|
@ -240,7 +240,8 @@ ENTRY %Add (x: f32[2,2], y: f32[2,2]) -> f32[2,2] {
|
|||||||
LoweringStage::KERNEL);
|
LoweringStage::KERNEL);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LhloGenTest, AddMultiply) {
|
// TODO(b/149302060) Reenable once fusion is fixed.
|
||||||
|
TEST_F(LhloGenTest, DISABLED_AddMultiply) {
|
||||||
CompileAndVerifyIr(R"(
|
CompileAndVerifyIr(R"(
|
||||||
HloModule AddMultiply
|
HloModule AddMultiply
|
||||||
|
|
||||||
@ -265,7 +266,8 @@ ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] {
|
|||||||
)");
|
)");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(LhloGenTest, IotaAddMultiply) {
|
// TODO(b/149302060) Reenable once fusion is fixed.
|
||||||
|
TEST_F(LhloGenTest, DISABLED_IotaAddMultiply) {
|
||||||
CompileAndVerifyIr(R"(
|
CompileAndVerifyIr(R"(
|
||||||
HloModule AddMultiply
|
HloModule AddMultiply
|
||||||
|
|
||||||
@ -315,44 +317,44 @@ ENTRY %AddMultiply (x: f32[2,2], y: f32[2,2], z: f32[2,2]) -> f32[2,2] {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO(b/137624192): Reenable once we can fuse reductions.
|
// TODO(b/137624192): Reenable once we can fuse reductions.
|
||||||
// TEST_F(LhloGenTest, FusedReduce) {
|
TEST_F(LhloGenTest, DISABLED_FusedReduce) {
|
||||||
// CompileAndVerifyIr(R"(
|
CompileAndVerifyIr(R"(
|
||||||
// HloModule FusedReduce
|
HloModule FusedReduce
|
||||||
//
|
|
||||||
// %add (x: f32[], y: f32[]) -> f32[] {
|
%add (x: f32[], y: f32[]) -> f32[] {
|
||||||
// %x = f32[] parameter(0)
|
%x = f32[] parameter(0)
|
||||||
// %y = f32[] parameter(1)
|
%y = f32[] parameter(1)
|
||||||
// ROOT %add = f32[] add(f32[] %x, f32[] %y)
|
ROOT %add = f32[] add(f32[] %x, f32[] %y)
|
||||||
// }
|
}
|
||||||
//
|
|
||||||
// %fused_computation (param: f32[100,10]) -> f32[10] {
|
%fused_computation (param: f32[100,10]) -> f32[10] {
|
||||||
// %param = f32[100,10] parameter(0)
|
%param = f32[100,10] parameter(0)
|
||||||
// %constant = f32[] constant(0)
|
%constant = f32[] constant(0)
|
||||||
// ROOT %reduce = f32[10]{0} reduce(f32[100,10]{1,0} %param, f32[] %constant),
|
ROOT %reduce = f32[10]{0} reduce(f32[100,10]{1,0} %param, f32[] %constant),
|
||||||
// dimensions={0}, to_apply=%add
|
dimensions={0}, to_apply=%add
|
||||||
// }
|
}
|
||||||
//
|
|
||||||
// ENTRY %FusedReduce (x: f32[100,10]) -> f32[10] {
|
ENTRY %FusedReduce (x: f32[100,10]) -> f32[10] {
|
||||||
// %x = f32[100,10] parameter(0)
|
%x = f32[100,10] parameter(0)
|
||||||
// ROOT %fusion = f32[10]{0} fusion(f32[100,10]{1,0} %x), kind=kInput,
|
ROOT %fusion = f32[10]{0} fusion(f32[100,10]{1,0} %x), kind=kInput,
|
||||||
// calls=%fused_computation
|
calls=%fused_computation
|
||||||
// }
|
}
|
||||||
// )",
|
)",
|
||||||
// R"(
|
R"(
|
||||||
// ;CHECK: func @fusion(%[[ARG0:.*]]: [[TYPE:.*]], %[[RESULT:.*]]: [[RTYPE:.*]])
|
;CHECK: func @fusion(%[[ARG0:.*]]: [[TYPE:.*]], %[[RESULT:.*]]: [[RTYPE:.*]])
|
||||||
// ;CHECK: "xla_lhlo.fusion"() ( {
|
;CHECK: "xla_lhlo.fusion"() ( {
|
||||||
// ;CHECK: %[[REF0:.*]] = tensor_load %arg0 : [[TYPE]]
|
;CHECK: %[[REF0:.*]] = tensor_load %arg0 : [[TYPE]]
|
||||||
// ;CHECK: %[[CT0:.*]] = xla_hlo.constant dense<0.000000e+00>
|
;CHECK: %[[CT0:.*]] = xla_hlo.constant dense<0.000000e+00>
|
||||||
// ;CHECK: %[[RED:.*]] = "xla_hlo.reduce"(%0, %1) ( {
|
;CHECK: %[[RED:.*]] = "xla_hlo.reduce"(%0, %1) ( {
|
||||||
// ;CHECK: ^bb0(%[[BARG0:.*]]: [[ETYPE:.*]], %[[BARG1:.*]]: [[ETYPE]])
|
;CHECK: ^bb0(%[[BARG0:.*]]: [[ETYPE:.*]], %[[BARG1:.*]]: [[ETYPE]])
|
||||||
// ;CHECK: %[[ADD:.*]] = xla_hlo.add %[[BARG0]], %[[BARG1]] : [[ETYPE]]
|
;CHECK: %[[ADD:.*]] = xla_hlo.add %[[BARG0]], %[[BARG1]] : [[ETYPE]]
|
||||||
// ;CHECK: "xla_hlo.return"(%[[ADD]])
|
;CHECK: "xla_hlo.return"(%[[ADD]])
|
||||||
// ;CHECK: })
|
;CHECK: })
|
||||||
// ;CHECK: tensor_store %[[RED]], %[[RESULT]] : [[RTYPE]]
|
;CHECK: tensor_store %[[RED]], %[[RESULT]] : [[RTYPE]]
|
||||||
// ;CHECK: "xla_lhlo.terminator"()
|
;CHECK: "xla_lhlo.terminator"()
|
||||||
// ;CHECK-NEXT: })
|
;CHECK-NEXT: })
|
||||||
// )");
|
)");
|
||||||
// }
|
}
|
||||||
|
|
||||||
TEST_F(LhloGenTest, Broadcast) {
|
TEST_F(LhloGenTest, Broadcast) {
|
||||||
CompileAndVerifyIr(R"(
|
CompileAndVerifyIr(R"(
|
||||||
|
Loading…
Reference in New Issue
Block a user