Remove cuda_is_configured() non-configurable macro.
Change uses to if_cuda_is_configured(), which returns a select() statement. It currently returns two different select statements because select() + select() [can not](497ef110c3
) have any common values in the two dicts. I will fix uses of this in a separate change.
PiperOrigin-RevId: 353049964
Change-Id: I222b7d49263ea9b06569208e00a180d6ff54d0a0
This commit is contained in:
parent
ab61e1eccc
commit
4379cc4cf6
tensorflow
compiler
core/kernels/mlir_generated
stream_executor
third_party/gpus
@ -1,21 +1,14 @@
|
|||||||
"""Build rules for Tensorflow/XLA testing."""
|
"""Build rules for Tensorflow/XLA testing."""
|
||||||
|
|
||||||
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
|
load("//tensorflow:tensorflow.bzl", "py_test")
|
||||||
load("@local_config_rocm//rocm:build_defs.bzl", "rocm_is_configured")
|
|
||||||
load("//tensorflow/compiler/tests:plugin.bzl", "plugins")
|
load("//tensorflow/compiler/tests:plugin.bzl", "plugins")
|
||||||
load(
|
load(
|
||||||
"//tensorflow/core/platform:build_config_root.bzl",
|
"//tensorflow/core/platform:build_config_root.bzl",
|
||||||
"tf_cuda_tests_tags",
|
"tf_cuda_tests_tags",
|
||||||
"tf_exec_properties",
|
"tf_exec_properties",
|
||||||
)
|
)
|
||||||
load("//tensorflow:tensorflow.bzl", "py_test")
|
|
||||||
|
|
||||||
def all_backends():
|
all_backends = ["cpu", "gpu"] + plugins.keys()
|
||||||
b = ["cpu"] + plugins.keys()
|
|
||||||
if cuda_is_configured() or rocm_is_configured():
|
|
||||||
return b + ["gpu"]
|
|
||||||
else:
|
|
||||||
return b
|
|
||||||
|
|
||||||
def tf_xla_py_test(
|
def tf_xla_py_test(
|
||||||
name,
|
name,
|
||||||
@ -32,7 +25,7 @@ def tf_xla_py_test(
|
|||||||
"""Generates py_test targets, one per XLA backend.
|
"""Generates py_test targets, one per XLA backend.
|
||||||
|
|
||||||
This rule generates py_test() targets named name_backend, for each backend
|
This rule generates py_test() targets named name_backend, for each backend
|
||||||
in all_backends(). The rule also generates a test suite with named `name` that
|
in all_backends. The rule also generates a test suite with named `name` that
|
||||||
tests all backends for the test.
|
tests all backends for the test.
|
||||||
|
|
||||||
For example, the following rule generates test cases foo_test_cpu,
|
For example, the following rule generates test cases foo_test_cpu,
|
||||||
@ -62,7 +55,7 @@ def tf_xla_py_test(
|
|||||||
**kwargs: keyword arguments passed onto the generated py_test() rules.
|
**kwargs: keyword arguments passed onto the generated py_test() rules.
|
||||||
"""
|
"""
|
||||||
if enabled_backends == None:
|
if enabled_backends == None:
|
||||||
enabled_backends = all_backends()
|
enabled_backends = all_backends
|
||||||
if disabled_backends == None:
|
if disabled_backends == None:
|
||||||
disabled_backends = []
|
disabled_backends = []
|
||||||
if type(disabled_backends) != "list":
|
if type(disabled_backends) != "list":
|
||||||
@ -140,6 +133,6 @@ def tf_xla_py_test(
|
|||||||
def generate_backend_suites(backends = []):
|
def generate_backend_suites(backends = []):
|
||||||
"""Generates per-backend test_suites that run all tests for a backend."""
|
"""Generates per-backend test_suites that run all tests for a backend."""
|
||||||
if not backends:
|
if not backends:
|
||||||
backends = all_backends()
|
backends = all_backends
|
||||||
for backend in backends:
|
for backend in backends:
|
||||||
native.test_suite(name = "%s_tests" % backend, tags = ["tf_xla_%s" % backend])
|
native.test_suite(name = "%s_tests" % backend, tags = ["tf_xla_%s" % backend])
|
||||||
|
@ -1,33 +1,18 @@
|
|||||||
"""Build rules for XLA testing."""
|
"""Build rules for XLA testing."""
|
||||||
|
|
||||||
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
|
|
||||||
load("@local_config_rocm//rocm:build_defs.bzl", "rocm_is_configured")
|
|
||||||
load("//tensorflow/compiler/xla/tests:plugin.bzl", "plugins")
|
|
||||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||||
|
load("//tensorflow/compiler/xla/tests:plugin.bzl", "plugins")
|
||||||
|
load(
|
||||||
|
"//tensorflow/stream_executor:build_defs.bzl",
|
||||||
|
"if_gpu_is_configured",
|
||||||
|
)
|
||||||
load(
|
load(
|
||||||
"//tensorflow/core/platform:build_config_root.bzl",
|
"//tensorflow/core/platform:build_config_root.bzl",
|
||||||
"tf_cuda_tests_tags",
|
"tf_gpu_tests_tags",
|
||||||
)
|
)
|
||||||
|
|
||||||
all_backends = ["cpu", "gpu"] + plugins.keys()
|
all_backends = ["cpu", "gpu"] + plugins.keys()
|
||||||
|
|
||||||
def filter_backends(backends):
|
|
||||||
"""Removes "gpu" from a backend list if CUDA or ROCm is not enabled.
|
|
||||||
|
|
||||||
This allows us to simply hardcode lists including "gpu" here and in the
|
|
||||||
BUILD file, without causing failures when CUDA or ROCm isn't enabled.'
|
|
||||||
|
|
||||||
Args:
|
|
||||||
backends: A list of backends to filter.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The filtered list of backends.
|
|
||||||
"""
|
|
||||||
if cuda_is_configured() or rocm_is_configured():
|
|
||||||
return backends
|
|
||||||
else:
|
|
||||||
return [backend for backend in backends if backend != "gpu"]
|
|
||||||
|
|
||||||
def xla_test(
|
def xla_test(
|
||||||
name,
|
name,
|
||||||
srcs,
|
srcs,
|
||||||
@ -132,7 +117,7 @@ def xla_test(
|
|||||||
deps = deps,
|
deps = deps,
|
||||||
)
|
)
|
||||||
|
|
||||||
for backend in filter_backends(backends):
|
for backend in backends:
|
||||||
test_name = "%s_%s" % (name, backend)
|
test_name = "%s_%s" % (name, backend)
|
||||||
this_backend_tags = ["xla_%s" % backend]
|
this_backend_tags = ["xla_%s" % backend]
|
||||||
this_backend_copts = []
|
this_backend_copts = []
|
||||||
@ -142,9 +127,9 @@ def xla_test(
|
|||||||
backend_deps = ["//tensorflow/compiler/xla/service:cpu_plugin"]
|
backend_deps = ["//tensorflow/compiler/xla/service:cpu_plugin"]
|
||||||
backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_cpu"]
|
backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_cpu"]
|
||||||
elif backend == "gpu":
|
elif backend == "gpu":
|
||||||
backend_deps = ["//tensorflow/compiler/xla/service:gpu_plugin"]
|
backend_deps = if_gpu_is_configured(["//tensorflow/compiler/xla/service:gpu_plugin"])
|
||||||
backend_deps += ["//tensorflow/compiler/xla/tests:test_macros_gpu"]
|
backend_deps += if_gpu_is_configured(["//tensorflow/compiler/xla/tests:test_macros_gpu"])
|
||||||
this_backend_tags += tf_cuda_tests_tags()
|
this_backend_tags += tf_gpu_tests_tags()
|
||||||
elif backend in plugins:
|
elif backend in plugins:
|
||||||
backend_deps = []
|
backend_deps = []
|
||||||
backend_deps += plugins[backend]["deps"]
|
backend_deps += plugins[backend]["deps"]
|
||||||
@ -219,7 +204,7 @@ def xla_test_library(
|
|||||||
if not backends:
|
if not backends:
|
||||||
backends = all_backends
|
backends = all_backends
|
||||||
|
|
||||||
for backend in filter_backends(backends):
|
for backend in backends:
|
||||||
this_backend_copts = []
|
this_backend_copts = []
|
||||||
if backend in ["cpu", "gpu"]:
|
if backend in ["cpu", "gpu"]:
|
||||||
backend_deps = ["//tensorflow/compiler/xla/tests:test_macros_%s" % backend]
|
backend_deps = ["//tensorflow/compiler/xla/tests:test_macros_%s" % backend]
|
||||||
@ -242,7 +227,7 @@ def xla_test_library(
|
|||||||
def generate_backend_suites(backends = []):
|
def generate_backend_suites(backends = []):
|
||||||
if not backends:
|
if not backends:
|
||||||
backends = all_backends
|
backends = all_backends
|
||||||
for backend in filter_backends(backends):
|
for backend in backends:
|
||||||
native.test_suite(
|
native.test_suite(
|
||||||
name = "%s_tests" % backend,
|
name = "%s_tests" % backend,
|
||||||
tags = ["xla_%s" % backend, "-broken", "manual"],
|
tags = ["xla_%s" % backend, "-broken", "manual"],
|
||||||
@ -251,7 +236,7 @@ def generate_backend_suites(backends = []):
|
|||||||
def generate_backend_test_macros(backends = []):
|
def generate_backend_test_macros(backends = []):
|
||||||
if not backends:
|
if not backends:
|
||||||
backends = all_backends
|
backends = all_backends
|
||||||
for backend in filter_backends(backends):
|
for backend in backends:
|
||||||
manifest = ""
|
manifest = ""
|
||||||
if backend in plugins:
|
if backend in plugins:
|
||||||
manifest = plugins[backend]["disabled_manifest"]
|
manifest = plugins[backend]["disabled_manifest"]
|
||||||
|
@ -4,7 +4,6 @@ load("@local_config_cuda//cuda:build_defs.bzl", "cuda_gpu_architectures")
|
|||||||
load(
|
load(
|
||||||
"@local_config_rocm//rocm:build_defs.bzl",
|
"@local_config_rocm//rocm:build_defs.bzl",
|
||||||
"rocm_gpu_architectures",
|
"rocm_gpu_architectures",
|
||||||
"rocm_is_configured",
|
|
||||||
)
|
)
|
||||||
load("//tensorflow:tensorflow.bzl", "get_compatible_with_cloud")
|
load("//tensorflow:tensorflow.bzl", "get_compatible_with_cloud")
|
||||||
load(
|
load(
|
||||||
@ -177,7 +176,7 @@ def gen_kernel_library(name, types, tile_size, tags = [], unroll_factors = None,
|
|||||||
name = "{name}_{type}_kernel_generator".format(name = name, type = type),
|
name = "{name}_{type}_kernel_generator".format(name = name, type = type),
|
||||||
mlir_op = "{name}_{type}.mlir".format(name = name, type = type),
|
mlir_op = "{name}_{type}.mlir".format(name = name, type = type),
|
||||||
data_type = type,
|
data_type = type,
|
||||||
gpu_archs = rocm_gpu_architectures() if rocm_is_configured() else cuda_gpu_architectures(),
|
gpu_archs = rocm_gpu_architectures() + cuda_gpu_architectures(),
|
||||||
tile_size = tile_size,
|
tile_size = tile_size,
|
||||||
unroll_factors = unroll_factors,
|
unroll_factors = unroll_factors,
|
||||||
extra_args = extra_args,
|
extra_args = extra_args,
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
|
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda_is_configured")
|
||||||
load("@local_config_rocm//rocm:build_defs.bzl", "rocm_is_configured")
|
load("@local_config_rocm//rocm:build_defs.bzl", "if_rocm_is_configured")
|
||||||
|
|
||||||
def stream_executor_friends():
|
def stream_executor_friends():
|
||||||
return ["//tensorflow/..."]
|
return ["//tensorflow/..."]
|
||||||
@ -18,9 +18,7 @@ def tf_additional_cudnn_plugin_deps():
|
|||||||
|
|
||||||
# Returns whether any GPU backend is configuered.
|
# Returns whether any GPU backend is configuered.
|
||||||
def if_gpu_is_configured(x):
|
def if_gpu_is_configured(x):
|
||||||
if cuda_is_configured() or rocm_is_configured():
|
return if_cuda_is_configured(x) + if_rocm_is_configured(x)
|
||||||
return x
|
|
||||||
return []
|
|
||||||
|
|
||||||
def if_cuda_or_rocm(x):
|
def if_cuda_or_rocm(x):
|
||||||
return if_gpu_is_configured(x)
|
return if_gpu_is_configured(x)
|
||||||
|
@ -158,12 +158,20 @@ cc_library(
|
|||||||
deps = ["//tensorflow/stream_executor:platform"],
|
deps = ["//tensorflow/stream_executor:platform"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "rocblas_if_static",
|
||||||
|
deps = if_static([
|
||||||
|
"@local_config_rocm//rocm:rocblas",
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "rocblas_plugin",
|
name = "rocblas_plugin",
|
||||||
srcs = if_rocm_is_configured(["rocm_blas.cc"]),
|
srcs = if_rocm_is_configured(["rocm_blas.cc"]),
|
||||||
hdrs = if_rocm_is_configured(["rocm_blas.h"]),
|
hdrs = if_rocm_is_configured(["rocm_blas.h"]),
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = if_rocm_is_configured([
|
deps = if_rocm_is_configured([
|
||||||
|
":rocblas_if_static",
|
||||||
":rocm_gpu_executor",
|
":rocm_gpu_executor",
|
||||||
":rocm_platform_id",
|
":rocm_platform_id",
|
||||||
"//third_party/eigen3",
|
"//third_party/eigen3",
|
||||||
@ -184,18 +192,24 @@ cc_library(
|
|||||||
"//tensorflow/stream_executor/platform:dso_loader",
|
"//tensorflow/stream_executor/platform:dso_loader",
|
||||||
"@com_google_absl//absl/strings",
|
"@com_google_absl//absl/strings",
|
||||||
"@local_config_rocm//rocm:rocm_headers",
|
"@local_config_rocm//rocm:rocm_headers",
|
||||||
] + if_static([
|
]),
|
||||||
"@local_config_rocm//rocm:rocblas",
|
|
||||||
])),
|
|
||||||
alwayslink = True,
|
alwayslink = True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "rocfft_if_static",
|
||||||
|
deps = if_static([
|
||||||
|
"@local_config_rocm//rocm:rocfft",
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "rocfft_plugin",
|
name = "rocfft_plugin",
|
||||||
srcs = if_rocm_is_configured(["rocm_fft.cc"]),
|
srcs = if_rocm_is_configured(["rocm_fft.cc"]),
|
||||||
hdrs = if_rocm_is_configured(["rocm_fft.h"]),
|
hdrs = if_rocm_is_configured(["rocm_fft.h"]),
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = if_rocm_is_configured([
|
deps = if_rocm_is_configured([
|
||||||
|
":rocfft_if_static",
|
||||||
":rocm_platform_id",
|
":rocm_platform_id",
|
||||||
"//tensorflow/stream_executor:event",
|
"//tensorflow/stream_executor:event",
|
||||||
"//tensorflow/stream_executor:fft",
|
"//tensorflow/stream_executor:fft",
|
||||||
@ -210,12 +224,17 @@ cc_library(
|
|||||||
"//tensorflow/stream_executor/platform",
|
"//tensorflow/stream_executor/platform",
|
||||||
"//tensorflow/stream_executor/platform:dso_loader",
|
"//tensorflow/stream_executor/platform:dso_loader",
|
||||||
"@local_config_rocm//rocm:rocm_headers",
|
"@local_config_rocm//rocm:rocm_headers",
|
||||||
] + if_static([
|
]),
|
||||||
"@local_config_rocm//rocm:rocfft",
|
|
||||||
])),
|
|
||||||
alwayslink = True,
|
alwayslink = True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "miopen_if_static",
|
||||||
|
deps = if_static([
|
||||||
|
"@local_config_rocm//rocm:miopen",
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "miopen_plugin",
|
name = "miopen_plugin",
|
||||||
srcs = if_rocm_is_configured(["rocm_dnn.cc"]),
|
srcs = if_rocm_is_configured(["rocm_dnn.cc"]),
|
||||||
@ -227,6 +246,7 @@ cc_library(
|
|||||||
],
|
],
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = if_rocm_is_configured([
|
deps = if_rocm_is_configured([
|
||||||
|
":miopen_if_static",
|
||||||
":rocm_diagnostics",
|
":rocm_diagnostics",
|
||||||
":rocm_driver",
|
":rocm_driver",
|
||||||
":rocm_gpu_executor",
|
":rocm_gpu_executor",
|
||||||
@ -248,17 +268,23 @@ cc_library(
|
|||||||
"//tensorflow/stream_executor/platform:dso_loader",
|
"//tensorflow/stream_executor/platform:dso_loader",
|
||||||
"@com_google_absl//absl/strings",
|
"@com_google_absl//absl/strings",
|
||||||
"@local_config_rocm//rocm:rocm_headers",
|
"@local_config_rocm//rocm:rocm_headers",
|
||||||
] + if_static([
|
]),
|
||||||
"@local_config_rocm//rocm:miopen",
|
|
||||||
])),
|
|
||||||
alwayslink = True,
|
alwayslink = True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "hiprand_if_static",
|
||||||
|
deps = if_static([
|
||||||
|
"@local_config_rocm//rocm:hiprand",
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "rocrand_plugin",
|
name = "rocrand_plugin",
|
||||||
srcs = if_rocm_is_configured(["rocm_rng.cc"]),
|
srcs = if_rocm_is_configured(["rocm_rng.cc"]),
|
||||||
hdrs = if_rocm_is_configured([]),
|
hdrs = if_rocm_is_configured([]),
|
||||||
deps = if_rocm_is_configured([
|
deps = if_rocm_is_configured([
|
||||||
|
":hiprand_if_static",
|
||||||
":rocm_gpu_executor",
|
":rocm_gpu_executor",
|
||||||
":rocm_platform_id",
|
":rocm_platform_id",
|
||||||
"@local_config_rocm//rocm:rocm_headers",
|
"@local_config_rocm//rocm:rocm_headers",
|
||||||
@ -273,26 +299,30 @@ cc_library(
|
|||||||
"//tensorflow/stream_executor/lib",
|
"//tensorflow/stream_executor/lib",
|
||||||
"//tensorflow/stream_executor/platform",
|
"//tensorflow/stream_executor/platform",
|
||||||
"//tensorflow/stream_executor/platform:dso_loader",
|
"//tensorflow/stream_executor/platform:dso_loader",
|
||||||
] + if_static([
|
]),
|
||||||
"@local_config_rocm//rocm:hiprand",
|
|
||||||
])),
|
|
||||||
alwayslink = True,
|
alwayslink = True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "hipsparse_if_static",
|
||||||
|
deps = if_static([
|
||||||
|
"@local_config_rocm//rocm:hipsparse",
|
||||||
|
]),
|
||||||
|
)
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "hipsparse_wrapper",
|
name = "hipsparse_wrapper",
|
||||||
srcs = if_rocm_is_configured(["hipsparse_wrapper.h"]),
|
srcs = if_rocm_is_configured(["hipsparse_wrapper.h"]),
|
||||||
hdrs = if_rocm_is_configured(["hipsparse_wrapper.h"]),
|
hdrs = if_rocm_is_configured(["hipsparse_wrapper.h"]),
|
||||||
deps = if_rocm_is_configured([
|
deps = if_rocm_is_configured([
|
||||||
|
":hipsparse_if_static",
|
||||||
":rocm_gpu_executor",
|
":rocm_gpu_executor",
|
||||||
":rocm_platform_id",
|
":rocm_platform_id",
|
||||||
"@local_config_rocm//rocm:rocm_headers",
|
"@local_config_rocm//rocm:rocm_headers",
|
||||||
"//tensorflow/stream_executor/lib",
|
"//tensorflow/stream_executor/lib",
|
||||||
"//tensorflow/stream_executor/platform",
|
"//tensorflow/stream_executor/platform",
|
||||||
"//tensorflow/stream_executor/platform:dso_loader",
|
"//tensorflow/stream_executor/platform:dso_loader",
|
||||||
] + if_static([
|
]),
|
||||||
"@local_config_rocm//rocm:hiprand",
|
|
||||||
])),
|
|
||||||
alwayslink = True,
|
alwayslink = True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
6
third_party/gpus/cuda/build_defs.bzl.tpl
vendored
6
third_party/gpus/cuda/build_defs.bzl.tpl
vendored
@ -50,10 +50,6 @@ def cuda_default_copts():
|
|||||||
["-O3"]
|
["-O3"]
|
||||||
)
|
)
|
||||||
|
|
||||||
def cuda_is_configured():
|
|
||||||
"""Returns true if CUDA was enabled during the configure process."""
|
|
||||||
return %{cuda_is_configured}
|
|
||||||
|
|
||||||
def cuda_gpu_architectures():
|
def cuda_gpu_architectures():
|
||||||
"""Returns a list of supported GPU architectures."""
|
"""Returns a list of supported GPU architectures."""
|
||||||
return %{cuda_gpu_architectures}
|
return %{cuda_gpu_architectures}
|
||||||
@ -64,7 +60,7 @@ def if_cuda_is_configured(x):
|
|||||||
Unlike if_cuda(), this does not require that we are building with
|
Unlike if_cuda(), this does not require that we are building with
|
||||||
--config=cuda. Used to allow non-CUDA code to depend on CUDA libraries.
|
--config=cuda. Used to allow non-CUDA code to depend on CUDA libraries.
|
||||||
"""
|
"""
|
||||||
if cuda_is_configured():
|
if %{cuda_is_configured}:
|
||||||
return select({"//conditions:default": x})
|
return select({"//conditions:default": x})
|
||||||
return select({"//conditions:default": []})
|
return select({"//conditions:default": []})
|
||||||
|
|
||||||
|
10
third_party/gpus/rocm/build_defs.bzl.tpl
vendored
10
third_party/gpus/rocm/build_defs.bzl.tpl
vendored
@ -30,10 +30,6 @@ def rocm_copts(opts = []):
|
|||||||
]),
|
]),
|
||||||
}) + if_rocm_is_configured(opts)
|
}) + if_rocm_is_configured(opts)
|
||||||
|
|
||||||
def rocm_is_configured():
|
|
||||||
"""Returns true if ROCm was enabled during the configure process."""
|
|
||||||
return %{rocm_is_configured}
|
|
||||||
|
|
||||||
def rocm_gpu_architectures():
|
def rocm_gpu_architectures():
|
||||||
"""Returns a list of supported GPU architectures."""
|
"""Returns a list of supported GPU architectures."""
|
||||||
return %{rocm_gpu_architectures}
|
return %{rocm_gpu_architectures}
|
||||||
@ -44,9 +40,9 @@ def if_rocm_is_configured(x):
|
|||||||
Unlike if_rocm(), this does not require that we are building with
|
Unlike if_rocm(), this does not require that we are building with
|
||||||
--config=rocm. Used to allow non-ROCm code to depend on ROCm libraries.
|
--config=rocm. Used to allow non-ROCm code to depend on ROCm libraries.
|
||||||
"""
|
"""
|
||||||
if rocm_is_configured():
|
if %{rocm_is_configured}:
|
||||||
return x
|
return select({"//conditions:default": x})
|
||||||
return []
|
return select({"//conditions:default": []})
|
||||||
|
|
||||||
def rocm_library(copts = [], **kwargs):
|
def rocm_library(copts = [], **kwargs):
|
||||||
"""Wrapper over cc_library which adds default ROCm options."""
|
"""Wrapper over cc_library which adds default ROCm options."""
|
||||||
|
Loading…
Reference in New Issue
Block a user