Remove cuda_is_configured() non-configurable macro.

Change uses to if_cuda_is_configured(), which returns a select() statement. It currently returns two different select statements because select() + select() [can not](497ef110c3) have any common values in the two dicts. I will fix uses of this in a separate change.

PiperOrigin-RevId: 353049964
Change-Id: I222b7d49263ea9b06569208e00a180d6ff54d0a0
This commit is contained in:
Christian Sigg 2021-01-21 10:33:10 -08:00 committed by TensorFlower Gardener
parent ab61e1eccc
commit 4379cc4cf6
7 changed files with 71 additions and 74 deletions

View File

@ -1,21 +1,14 @@
"""Build rules for Tensorflow/XLA testing."""
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
load("@local_config_rocm//rocm:build_defs.bzl", "rocm_is_configured")
load("//tensorflow:tensorflow.bzl", "py_test")
load("//tensorflow/compiler/tests:plugin.bzl", "plugins")
load(
"//tensorflow/core/platform:build_config_root.bzl",
"tf_cuda_tests_tags",
"tf_exec_properties",
)
load("//tensorflow:tensorflow.bzl", "py_test")
def all_backends():
b = ["cpu"] + plugins.keys()
if cuda_is_configured() or rocm_is_configured():
return b + ["gpu"]
else:
return b
all_backends = ["cpu", "gpu"] + plugins.keys()
def tf_xla_py_test(
name,
@ -32,7 +25,7 @@ def tf_xla_py_test(
"""Generates py_test targets, one per XLA backend.
This rule generates py_test() targets named name_backend, for each backend
in all_backends(). The rule also generates a test suite with named `name` that
in all_backends. The rule also generates a test suite with named `name` that
tests all backends for the test.
For example, the following rule generates test cases foo_test_cpu,
@ -62,7 +55,7 @@ def tf_xla_py_test(
**kwargs: keyword arguments passed onto the generated py_test() rules.
"""
if enabled_backends == None:
enabled_backends = all_backends()
enabled_backends = all_backends
if disabled_backends == None:
disabled_backends = []
if type(disabled_backends) != "list":
@ -140,6 +133,6 @@ def tf_xla_py_test(
def generate_backend_suites(backends = []):
"""Generates per-backend test_suites that run all tests for a backend."""
if not backends:
backends = all_backends()
backends = all_backends
for backend in backends:
native.test_suite(name = "%s_tests" % backend, tags = ["tf_xla_%s" % backend])

View File

@ -1,33 +1,18 @@
"""Build rules for XLA testing."""
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
load("@local_config_rocm//rocm:build_defs.bzl", "rocm_is_configured")
load("//tensorflow/compiler/xla/tests:plugin.bzl", "plugins")
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
load("//tensorflow/compiler/xla/tests:plugin.bzl", "plugins")
load(
"//tensorflow/stream_executor:build_defs.bzl",
"if_gpu_is_configured",
)
load(
"//tensorflow/core/platform:build_config_root.bzl",
"tf_cuda_tests_tags",
"tf_gpu_tests_tags",
)
all_backends = ["cpu", "gpu"] + plugins.keys()
def filter_backends(backends):
"""Removes "gpu" from a backend list if CUDA or ROCm is not enabled.
This allows us to simply hardcode lists including "gpu" here and in the
BUILD file, without causing failures when CUDA or ROCm isn't enabled.'
Args:
backends: A list of backends to filter.
Returns:
The filtered list of backends.
"""
if cuda_is_configured() or rocm_is_configured():
return backends
else:
return [backend for backend in backends if backend != "gpu"]
def xla_test(
name,
srcs,
@ -132,7 +117,7 @@ def xla_test(
deps = deps,
)
for backend in filter_backends(backends):
for backend in backends:
test_name = "%s_%s" % (name, backend)
this_backend_tags = ["xla_%s" % backend]
this_backend_copts = []
@ -142,9 +127,9 @@ def xla_test(
backend_deps = ["//tensorflow/compiler/xla/service:cpu_plugin"]
backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_cpu"]
elif backend == "gpu":
backend_deps = ["//tensorflow/compiler/xla/service:gpu_plugin"]
backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_gpu"]
this_backend_tags += tf_cuda_tests_tags()
backend_deps = if_gpu_is_configured(["//tensorflow/compiler/xla/service:gpu_plugin"])
backend_deps += if_gpu_is_configured(["//tensorflow/compiler/xla/tests:test_macros_gpu"])
this_backend_tags += tf_gpu_tests_tags()
elif backend in plugins:
backend_deps = []
backend_deps += plugins[backend]["deps"]
@ -219,7 +204,7 @@ def xla_test_library(
if not backends:
backends = all_backends
for backend in filter_backends(backends):
for backend in backends:
this_backend_copts = []
if backend in ["cpu", "gpu"]:
backend_deps = ["//tensorflow/compiler/xla/tests:test_macros_%s" % backend]
@ -242,7 +227,7 @@ def xla_test_library(
def generate_backend_suites(backends = []):
if not backends:
backends = all_backends
for backend in filter_backends(backends):
for backend in backends:
native.test_suite(
name = "%s_tests" % backend,
tags = ["xla_%s" % backend, "-broken", "manual"],
@ -251,7 +236,7 @@ def generate_backend_suites(backends = []):
def generate_backend_test_macros(backends = []):
if not backends:
backends = all_backends
for backend in filter_backends(backends):
for backend in backends:
manifest = ""
if backend in plugins:
manifest = plugins[backend]["disabled_manifest"]

View File

@ -4,7 +4,6 @@ load("@local_config_cuda//cuda:build_defs.bzl", "cuda_gpu_architectures")
load(
"@local_config_rocm//rocm:build_defs.bzl",
"rocm_gpu_architectures",
"rocm_is_configured",
)
load("//tensorflow:tensorflow.bzl", "get_compatible_with_cloud")
load(
@ -177,7 +176,7 @@ def gen_kernel_library(name, types, tile_size, tags = [], unroll_factors = None,
name = "{name}_{type}_kernel_generator".format(name = name, type = type),
mlir_op = "{name}_{type}.mlir".format(name = name, type = type),
data_type = type,
gpu_archs = rocm_gpu_architectures() if rocm_is_configured() else cuda_gpu_architectures(),
gpu_archs = rocm_gpu_architectures() + cuda_gpu_architectures(),
tile_size = tile_size,
unroll_factors = unroll_factors,
extra_args = extra_args,

View File

@ -1,5 +1,5 @@
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
load("@local_config_rocm//rocm:build_defs.bzl", "rocm_is_configured")
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda_is_configured")
load("@local_config_rocm//rocm:build_defs.bzl", "if_rocm_is_configured")
def stream_executor_friends():
return ["//tensorflow/..."]
@ -18,9 +18,7 @@ def tf_additional_cudnn_plugin_deps():
# Returns whether any GPU backend is configuered.
def if_gpu_is_configured(x):
if cuda_is_configured() or rocm_is_configured():
return x
return []
return if_cuda_is_configured(x) + if_rocm_is_configured(x)
def if_cuda_or_rocm(x):
return if_gpu_is_configured(x)

View File

@ -158,12 +158,20 @@ cc_library(
deps = ["//tensorflow/stream_executor:platform"],
)
cc_library(
name = "rocblas_if_static",
deps = if_static([
"@local_config_rocm//rocm:rocblas",
]),
)
cc_library(
name = "rocblas_plugin",
srcs = if_rocm_is_configured(["rocm_blas.cc"]),
hdrs = if_rocm_is_configured(["rocm_blas.h"]),
visibility = ["//visibility:public"],
deps = if_rocm_is_configured([
":rocblas_if_static",
":rocm_gpu_executor",
":rocm_platform_id",
"//third_party/eigen3",
@ -184,18 +192,24 @@ cc_library(
"//tensorflow/stream_executor/platform:dso_loader",
"@com_google_absl//absl/strings",
"@local_config_rocm//rocm:rocm_headers",
] + if_static([
"@local_config_rocm//rocm:rocblas",
])),
]),
alwayslink = True,
)
cc_library(
name = "rocfft_if_static",
deps = if_static([
"@local_config_rocm//rocm:rocfft",
]),
)
cc_library(
name = "rocfft_plugin",
srcs = if_rocm_is_configured(["rocm_fft.cc"]),
hdrs = if_rocm_is_configured(["rocm_fft.h"]),
visibility = ["//visibility:public"],
deps = if_rocm_is_configured([
":rocfft_if_static",
":rocm_platform_id",
"//tensorflow/stream_executor:event",
"//tensorflow/stream_executor:fft",
@ -210,12 +224,17 @@ cc_library(
"//tensorflow/stream_executor/platform",
"//tensorflow/stream_executor/platform:dso_loader",
"@local_config_rocm//rocm:rocm_headers",
] + if_static([
"@local_config_rocm//rocm:rocfft",
])),
]),
alwayslink = True,
)
cc_library(
name = "miopen_if_static",
deps = if_static([
"@local_config_rocm//rocm:miopen",
]),
)
cc_library(
name = "miopen_plugin",
srcs = if_rocm_is_configured(["rocm_dnn.cc"]),
@ -227,6 +246,7 @@ cc_library(
],
visibility = ["//visibility:public"],
deps = if_rocm_is_configured([
":miopen_if_static",
":rocm_diagnostics",
":rocm_driver",
":rocm_gpu_executor",
@ -248,17 +268,23 @@ cc_library(
"//tensorflow/stream_executor/platform:dso_loader",
"@com_google_absl//absl/strings",
"@local_config_rocm//rocm:rocm_headers",
] + if_static([
"@local_config_rocm//rocm:miopen",
])),
]),
alwayslink = True,
)
cc_library(
name = "hiprand_if_static",
deps = if_static([
"@local_config_rocm//rocm:hiprand",
]),
)
cc_library(
name = "rocrand_plugin",
srcs = if_rocm_is_configured(["rocm_rng.cc"]),
hdrs = if_rocm_is_configured([]),
deps = if_rocm_is_configured([
":hiprand_if_static",
":rocm_gpu_executor",
":rocm_platform_id",
"@local_config_rocm//rocm:rocm_headers",
@ -273,26 +299,30 @@ cc_library(
"//tensorflow/stream_executor/lib",
"//tensorflow/stream_executor/platform",
"//tensorflow/stream_executor/platform:dso_loader",
] + if_static([
"@local_config_rocm//rocm:hiprand",
])),
]),
alwayslink = True,
)
cc_library(
name = "hipsparse_if_static",
deps = if_static([
"@local_config_rocm//rocm:hipsparse",
]),
)
cc_library(
name = "hipsparse_wrapper",
srcs = if_rocm_is_configured(["hipsparse_wrapper.h"]),
hdrs = if_rocm_is_configured(["hipsparse_wrapper.h"]),
deps = if_rocm_is_configured([
":hipsparse_if_static",
":rocm_gpu_executor",
":rocm_platform_id",
"@local_config_rocm//rocm:rocm_headers",
"//tensorflow/stream_executor/lib",
"//tensorflow/stream_executor/platform",
"//tensorflow/stream_executor/platform:dso_loader",
] + if_static([
"@local_config_rocm//rocm:hiprand",
])),
]),
alwayslink = True,
)

View File

@ -50,10 +50,6 @@ def cuda_default_copts():
["-O3"]
)
def cuda_is_configured():
"""Returns true if CUDA was enabled during the configure process."""
return %{cuda_is_configured}
def cuda_gpu_architectures():
"""Returns a list of supported GPU architectures."""
return %{cuda_gpu_architectures}
@ -64,7 +60,7 @@ def if_cuda_is_configured(x):
Unlike if_cuda(), this does not require that we are building with
--config=cuda. Used to allow non-CUDA code to depend on CUDA libraries.
"""
if cuda_is_configured():
if %{cuda_is_configured}:
return select({"//conditions:default": x})
return select({"//conditions:default": []})

View File

@ -30,10 +30,6 @@ def rocm_copts(opts = []):
]),
}) + if_rocm_is_configured(opts)
def rocm_is_configured():
"""Returns true if ROCm was enabled during the configure process."""
return %{rocm_is_configured}
def rocm_gpu_architectures():
"""Returns a list of supported GPU architectures."""
return %{rocm_gpu_architectures}
@ -44,9 +40,9 @@ def if_rocm_is_configured(x):
Unlike if_rocm(), this does not require that we are building with
--config=rocm. Used to allow non-ROCm code to depend on ROCm libraries.
"""
if rocm_is_configured():
return x
return []
if %{rocm_is_configured}:
return select({"//conditions:default": x})
return select({"//conditions:default": []})
def rocm_library(copts = [], **kwargs):
"""Wrapper over cc_library which adds default ROCm options."""