[ROCm] Adding no_rocm tag to XLA tests that fail on the ROCm platform

This commit is contained in:
Deven Desai 2020-01-02 21:42:54 +00:00
parent 88a1e3b399
commit 11b85f7473
4 changed files with 51 additions and 11 deletions

View File

@ -240,7 +240,10 @@ tf_xla_py_test(
size = "medium",
srcs = ["cholesky_op_test.py"],
python_version = "PY3",
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = [
":xla_test",
"//tensorflow/python:array_ops",
@ -297,7 +300,10 @@ tf_xla_py_test(
"cpu_ondemand",
],
python_version = "PY3",
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = [
":xla_test",
"//tensorflow/python:array_ops",
@ -382,7 +388,10 @@ tf_xla_py_test(
size = "medium",
srcs = ["concat_ops_test.py"],
python_version = "PY3",
tags = ["many_xla_args"],
tags = [
"many_xla_args",
"no_rocm",
],
deps = [
":xla_test",
"//tensorflow/python:array_ops",
@ -568,7 +577,10 @@ tf_xla_py_test(
srcs = ["fft_test.py"],
python_version = "PY3",
shard_count = 6,
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = [
":xla_test",
"//tensorflow/python:array_ops",
@ -845,7 +857,10 @@ tf_xla_py_test(
srcs = ["unstack_test.py"],
python_version = "PY3",
shard_count = 5,
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = [
":xla_test",
"//tensorflow/python:array_ops",
@ -1292,6 +1307,7 @@ cuda_py_test(
size = "medium",
srcs = ["jit_test.py"],
shard_count = 5,
tags = ["no_rocm"],
xla_enable_strict_auto_jit = False,
deps = [
":test_utils",
@ -1312,6 +1328,7 @@ cuda_py_test(
name = "dense_layer_test",
size = "medium",
srcs = ["dense_layer_test.py"],
tags = ["no_rocm"],
xla_enable_strict_auto_jit = False,
deps = [
":test_utils",
@ -1396,6 +1413,7 @@ py_library(
cuda_py_test(
name = "lstm_test",
srcs = ["lstm_test.py"],
tags = ["no_rocm"],
xla_enable_strict_auto_jit = False,
deps = [
":lstm",
@ -1498,6 +1516,7 @@ tf_xla_py_test(
srcs = ["conv_node_name_test.py"],
python_version = "PY3",
shard_count = 5,
tags = ["no_rocm"],
deps = [
":xla_test",
"//tensorflow/python:array_ops",

View File

@ -1,6 +1,7 @@
"""Build rules for Tensorflow/XLA testing."""
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
load("@local_config_rocm//rocm:build_defs.bzl", "rocm_is_configured")
load("//tensorflow/compiler/tests:plugin.bzl", "plugins")
load(
"//tensorflow/core/platform:build_config_root.bzl",
@ -10,7 +11,7 @@ load(
def all_backends():
b = ["cpu"] + plugins.keys()
if cuda_is_configured():
if cuda_is_configured() or rocm_is_configured():
return b + ["gpu"]
else:
return b

View File

@ -21,7 +21,7 @@ package_group(
tf_cc_test(
name = "mlir_gpu_lhlo_gen_test",
srcs = ["mlir_gpu_lhlo_gen_test.cc"],
tags = tf_cuda_tests_tags(),
tags = tf_cuda_tests_tags() + ["no_rocm"],
deps = [
"//tensorflow/compiler/xla/service:mlir_gpu_plugin",
"//tensorflow/compiler/xla/service/mlir_gpu:mlir_irgen_test_base",

View File

@ -587,6 +587,7 @@ xla_test(
name = "conditional_test",
srcs = ["conditional_test.cc"],
shard_count = 2,
tags = ["no_rocm"],
deps = [
":test_macros_header",
"//tensorflow/compiler/xla:xla_data_proto_cc",
@ -625,6 +626,7 @@ xla_test(
name = "scalar_computations_test",
srcs = ["scalar_computations_test.cc"],
shard_count = 32,
tags = ["no_rocm"],
deps = [
":test_macros_header",
"//tensorflow/compiler/xla:literal",
@ -924,6 +926,7 @@ xla_test(
srcs = ["dot_operation_test.cc"],
shard_count = 20,
tags = [
"no_rocm",
"optonly",
],
deps = [
@ -957,6 +960,7 @@ xla_test(
backends = ["gpu"],
shard_count = 20,
tags = [
"no_rocm",
"optonly",
],
deps = [
@ -1019,7 +1023,10 @@ xla_test(
],
},
shard_count = 20,
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = [
":test_macros_header",
"//tensorflow/compiler/xla:array2d",
@ -1113,7 +1120,10 @@ xla_test(
timeout = "long",
srcs = ["convolution_test.cc"],
shard_count = 40,
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = CONVOLUTION_TEST_DEPS + [
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
@ -1130,7 +1140,10 @@ xla_test(
args = ["--xla_gpu_disable_autotune"],
backends = ["gpu"],
shard_count = 40,
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = CONVOLUTION_TEST_DEPS + [
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
@ -1144,6 +1157,7 @@ xla_test(
backend_args = {"gpu": ["--xla_backend_extra_options=xla_gpu_experimental_conv_disable_layout_heuristic"]},
backends = ["gpu"],
shard_count = 25,
tags = ["no_rocm"],
deps = CONVOLUTION_TEST_DEPS + [
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
@ -1213,6 +1227,7 @@ xla_test(
"interpreter",
],
shard_count = 40,
tags = ["no_rocm"],
deps = [
":client_library_test_base",
":hlo_test_base",
@ -1418,6 +1433,7 @@ xla_test(
srcs = ["reduce_test.cc"],
shard_count = 31,
tags = [
"no_rocm",
"optonly",
],
deps = [
@ -1497,6 +1513,7 @@ xla_test(
timeout = "long",
srcs = ["select_and_scatter_test.cc"],
tags = [
"no_rocm",
"optonly",
],
deps = [
@ -2543,7 +2560,10 @@ xla_test(
xla_test(
name = "cholesky_test",
srcs = ["cholesky_test.cc"],
tags = ["optonly"],
tags = [
"no_rocm",
"optonly",
],
deps = [
":test_macros_header",
"//tensorflow/compiler/xla:array2d",