Merge pull request #29809 from ROCmSoftwarePlatform:google_upstream_is_built_with_rocm
PiperOrigin-RevId: 256505080
This commit is contained in:
commit
d0814e19f8
tensorflow
@ -1368,6 +1368,21 @@ def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
|
||||
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
|
||||
CUDA compute capability required, or None if no requirement.
|
||||
|
||||
Note that the keyword arg name "cuda_only" is misleading (since routine will
|
||||
return true when a GPU device is available irrespective of whether TF was
|
||||
built with CUDA support or ROCm support. However no changes here because
|
||||
|
||||
++ Changing the name "cuda_only" to something more generic would break
|
||||
backward compatibility
|
||||
|
||||
++ Adding an equivalent "rocm_only" would require the implementation check
|
||||
the build type. This in turn would require doing the same for CUDA and thus
|
||||
potentially break backward compatibility
|
||||
|
||||
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
|
||||
but would require most (if not all) callers to update the call to use
|
||||
"cuda_or_rocm_only" instead of "cuda_only"
|
||||
|
||||
Returns:
|
||||
True if a GPU device of the requested kind is available.
|
||||
"""
|
||||
|
@ -25,6 +25,7 @@ from tensorflow.python.platform import test
|
||||
class BuildInfoTest(test.TestCase):
|
||||
|
||||
def testBuildInfo(self):
|
||||
self.assertEqual(build_info.is_rocm_build, test.is_built_with_rocm())
|
||||
self.assertEqual(build_info.is_cuda_build, test.is_built_with_cuda())
|
||||
|
||||
|
||||
|
@ -92,3 +92,15 @@ def test_src_dir_path(relative_path):
|
||||
def is_built_with_cuda():
|
||||
"""Returns whether TensorFlow was built with CUDA (GPU) support."""
|
||||
return _test_util.IsGoogleCudaEnabled()
|
||||
|
||||
|
||||
@tf_export('test.is_built_with_rocm')
|
||||
def is_built_with_rocm():
|
||||
"""Returns whether TensorFlow was built with ROCm (GPU) support."""
|
||||
return _test_util.IsBuiltWithROCm()
|
||||
|
||||
|
||||
@tf_export('test.is_built_with_gpu_support')
|
||||
def is_built_with_gpu_support():
|
||||
"""Returns whether TensorFlow was built with GPU (i.e. CUDA or ROCm) support."""
|
||||
return is_built_with_cuda() or is_built_with_rocm()
|
||||
|
@ -2314,8 +2314,9 @@ def tf_py_build_info_genrule():
|
||||
name = "py_build_info_gen",
|
||||
outs = ["platform/build_info.py"],
|
||||
cmd =
|
||||
"$(location //tensorflow/tools/build_info:gen_build_info) --raw_generate \"$@\" --build_config " +
|
||||
if_cuda("cuda", "cpu") +
|
||||
"$(location //tensorflow/tools/build_info:gen_build_info) --raw_generate \"$@\" " +
|
||||
" --is_config_cuda " + if_cuda("True", "False") +
|
||||
" --is_config_rocm " + if_rocm("True", "False") +
|
||||
" --key_value " +
|
||||
if_cuda(" cuda_version_number=$${TF_CUDA_VERSION:-} cudnn_version_number=$${TF_CUDNN_VERSION:-} ", "") +
|
||||
if_windows(" msvcp_dll_name=msvcp140.dll ", "") +
|
||||
|
@ -48,6 +48,14 @@ tf_module {
|
||||
name: "is_built_with_cuda"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "is_built_with_gpu_support"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "is_built_with_rocm"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "is_gpu_available"
|
||||
argspec: "args=[\'cuda_only\', \'min_cuda_compute_capability\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
|
@ -32,6 +32,14 @@ tf_module {
|
||||
name: "is_built_with_cuda"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "is_built_with_gpu_support"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "is_built_with_rocm"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "is_gpu_available"
|
||||
argspec: "args=[\'cuda_only\', \'min_cuda_compute_capability\'], varargs=None, keywords=None, defaults=[\'False\', \'None\'], "
|
||||
|
@ -19,13 +19,13 @@ from __future__ import print_function
|
||||
import argparse
|
||||
|
||||
|
||||
def write_build_info(filename, build_config, key_value_list):
|
||||
def write_build_info(filename, is_config_cuda, is_config_rocm, key_value_list):
|
||||
"""Writes a Python that describes the build.
|
||||
|
||||
Args:
|
||||
filename: filename to write to.
|
||||
build_config: A string that represents the config used in this build (e.g.
|
||||
"cuda").
|
||||
is_config_cuda: Whether this build is using CUDA.
|
||||
is_config_rocm: Whether this build is using ROCm.
|
||||
key_value_list: A list of "key=value" strings that will be added to the
|
||||
module as additional fields.
|
||||
|
||||
@ -35,10 +35,14 @@ def write_build_info(filename, build_config, key_value_list):
|
||||
"""
|
||||
module_docstring = "\"\"\"Generates a Python module containing information "
|
||||
module_docstring += "about the build.\"\"\""
|
||||
if build_config == "cuda":
|
||||
build_config_bool = "True"
|
||||
else:
|
||||
build_config_bool = "False"
|
||||
|
||||
build_config_rocm_bool = "False"
|
||||
build_config_cuda_bool = "False"
|
||||
|
||||
if is_config_rocm == "True":
|
||||
build_config_rocm_bool = "True"
|
||||
elif is_config_cuda == "True":
|
||||
build_config_cuda_bool = "True"
|
||||
|
||||
key_value_pair_stmts = []
|
||||
if key_value_list:
|
||||
@ -47,6 +51,9 @@ def write_build_info(filename, build_config, key_value_list):
|
||||
if key == "is_cuda_build":
|
||||
raise ValueError("The key \"is_cuda_build\" cannot be passed as one of "
|
||||
"the --key_value arguments.")
|
||||
if key == "is_rocm_build":
|
||||
raise ValueError("The key \"is_rocm_build\" cannot be passed as one of "
|
||||
"the --key_value arguments.")
|
||||
key_value_pair_stmts.append("%s = %r" % (key, value))
|
||||
key_value_pair_content = "\n".join(key_value_pair_stmts)
|
||||
|
||||
@ -70,10 +77,12 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
is_rocm_build = %s
|
||||
is_cuda_build = %s
|
||||
|
||||
%s
|
||||
""" % (module_docstring, build_config_bool, key_value_pair_content)
|
||||
""" % (module_docstring, build_config_rocm_bool, build_config_cuda_bool,
|
||||
key_value_pair_content)
|
||||
open(filename, "w").write(contents)
|
||||
|
||||
|
||||
@ -81,18 +90,26 @@ parser = argparse.ArgumentParser(
|
||||
description="""Build info injection into the PIP package.""")
|
||||
|
||||
parser.add_argument(
|
||||
"--build_config",
|
||||
"--is_config_cuda",
|
||||
type=str,
|
||||
help="Either 'cuda' for GPU builds or 'cpu' for CPU builds.")
|
||||
help="'True' for CUDA GPU builds, 'False' otherwise.")
|
||||
|
||||
parser.add_argument(
|
||||
"--is_config_rocm",
|
||||
type=str,
|
||||
help="'True' for ROCm GPU builds, 'False' otherwise.")
|
||||
|
||||
parser.add_argument("--raw_generate", type=str, help="Generate build_info.py")
|
||||
|
||||
parser.add_argument("--key_value", type=str, nargs="*",
|
||||
help="List of key=value pairs.")
|
||||
parser.add_argument(
|
||||
"--key_value", type=str, nargs="*", help="List of key=value pairs.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.raw_generate is not None and args.build_config is not None:
|
||||
write_build_info(args.raw_generate, args.build_config, args.key_value)
|
||||
if (args.raw_generate is not None) and (args.is_config_cuda is not None) and (
|
||||
args.is_config_rocm is not None):
|
||||
write_build_info(args.raw_generate, args.is_config_cuda, args.is_config_rocm,
|
||||
args.key_value)
|
||||
else:
|
||||
raise RuntimeError("--raw_generate and --build_config must be used")
|
||||
raise RuntimeError(
|
||||
"--raw_generate, --is_config_cuda and --is_config_rocm must be used")
|
||||
|
@ -19,9 +19,10 @@ set -e
|
||||
set -x
|
||||
|
||||
N_JOBS=$(grep -c ^processor /proc/cpuinfo)
|
||||
N_GPUS=$(lspci|grep 'VGA'|grep 'AMD/ATI'|wc -l)
|
||||
|
||||
echo ""
|
||||
echo "Bazel will use ${N_JOBS} concurrent job(s)."
|
||||
echo "Bazel will use ${N_JOBS} concurrent build job(s) and ${N_GPUS} concurrent test job(s)."
|
||||
echo ""
|
||||
|
||||
# Run configure.
|
||||
@ -29,11 +30,15 @@ export PYTHON_BIN_PATH=`which python3`
|
||||
export CC_OPT_FLAGS='-mavx'
|
||||
|
||||
export TF_NEED_ROCM=1
|
||||
export TF_GPU_COUNT=${N_GPUS}
|
||||
|
||||
yes "" | $PYTHON_BIN_PATH configure.py
|
||||
|
||||
# Run bazel test command. Double test timeouts to avoid flakes.
|
||||
bazel test --config=rocm --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-benchmark-test -k \
|
||||
bazel test --config=rocm --test_tag_filters=-no_oss,-oss_serial,-no_gpu,-no_rocm,-benchmark-test -k \
|
||||
--test_lang_filters=cc --jobs=${N_JOBS} --test_timeout 300,450,1200,3600 \
|
||||
--build_tests_only --test_output=errors --local_test_jobs=1 --config=opt \
|
||||
--build_tests_only --test_output=errors --local_test_jobs=${TF_GPU_COUNT} --config=opt \
|
||||
--test_sharding_strategy=disabled \
|
||||
--test_size_filters=small,medium \
|
||||
--run_under=//tensorflow/tools/ci_build/gpu_build:parallel_gpu_execute -- \
|
||||
//tensorflow/... -//tensorflow/compiler/... -//tensorflow/contrib/...
|
||||
|
Loading…
Reference in New Issue
Block a user