Add centos platform configuration featuring a devtoolset-7 setup.

One of the main differences is that devtoolset-7 is a more traditional
cross-compiler setup where the compiler is under its own prefix; this change
adds support for using a prefix'ed toolchain in our cuda toolchain.

Other smaller differences are that libcudnn and tensorrt are installed into
different paths than on ubuntu.

Additionally, adds new platforms for -centos7 and -centos7-gpu to enable running
the centos7 setup remotely.

Cleanup: delete cuda 9 based platforms that are now unused.
PiperOrigin-RevId: 240188659
This commit is contained in:
A. Unique TensorFlower 2019-03-25 11:57:11 -07:00 committed by TensorFlower Gardener
parent f665d09225
commit 24ac875e96
7 changed files with 85 additions and 35 deletions

View File

@ -259,16 +259,16 @@ toolchain {
tool_path { name: "gcc" path: "%{host_compiler_path}" } tool_path { name: "gcc" path: "%{host_compiler_path}" }
# Use the default system toolchain for everything else. # Use the default system toolchain for everything else.
tool_path { name: "ar" path: "/usr/bin/ar" } tool_path { name: "ar" path: "%{host_compiler_prefix}/ar" }
tool_path { name: "compat-ld" path: "/usr/bin/ld" } tool_path { name: "compat-ld" path: "%{host_compiler_prefix}/ld" }
tool_path { name: "cpp" path: "/usr/bin/cpp" } tool_path { name: "cpp" path: "%{host_compiler_prefix}/cpp" }
tool_path { name: "dwp" path: "/usr/bin/dwp" } tool_path { name: "dwp" path: "%{host_compiler_prefix}/dwp" }
tool_path { name: "gcov" path: "/usr/bin/gcov" } tool_path { name: "gcov" path: "%{host_compiler_prefix}/gcov" }
tool_path { name: "ld" path: "/usr/bin/ld" } tool_path { name: "ld" path: "%{host_compiler_prefix}/ld" }
tool_path { name: "nm" path: "/usr/bin/nm" } tool_path { name: "nm" path: "%{host_compiler_prefix}/nm" }
tool_path { name: "objcopy" path: "/usr/bin/objcopy" } tool_path { name: "objcopy" path: "%{host_compiler_prefix}/objcopy" }
tool_path { name: "objdump" path: "/usr/bin/objdump" } tool_path { name: "objdump" path: "%{host_compiler_prefix}/objdump" }
tool_path { name: "strip" path: "/usr/bin/strip" } tool_path { name: "strip" path: "%{host_compiler_prefix}/strip" }
# Enabled dynamic linking. # Enabled dynamic linking.
linking_mode_flags { mode: DYNAMIC } linking_mode_flags { mode: DYNAMIC }

View File

@ -24,6 +24,7 @@
""" """
_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH" _GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH"
_GCC_HOST_COMPILER_PREFIX = "GCC_HOST_COMPILER_PREFIX"
_CLANG_CUDA_COMPILER_PATH = "CLANG_CUDA_COMPILER_PATH" _CLANG_CUDA_COMPILER_PATH = "CLANG_CUDA_COMPILER_PATH"
_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH" _CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
_TF_CUDA_VERSION = "TF_CUDA_VERSION" _TF_CUDA_VERSION = "TF_CUDA_VERSION"
@ -1362,6 +1363,11 @@ def _create_local_cuda_repository(repository_ctx):
host_compiler_includes = _host_compiler_includes(repository_ctx, cc_fullpath) host_compiler_includes = _host_compiler_includes(repository_ctx, cc_fullpath)
cuda_defines = {} cuda_defines = {}
host_compiler_prefix = "/usr/bin"
if _GCC_HOST_COMPILER_PREFIX in repository_ctx.os.environ:
host_compiler_prefix = repository_ctx.os.environ[_GCC_HOST_COMPILER_PREFIX].strip()
cuda_defines["%{host_compiler_prefix}"] = host_compiler_prefix
# Bazel sets '-B/usr/bin' flag to workaround build errors on RHEL (see # Bazel sets '-B/usr/bin' flag to workaround build errors on RHEL (see
# https://github.com/bazelbuild/bazel/issues/760). # https://github.com/bazelbuild/bazel/issues/760).
# However, this stops our custom clang toolchain from picking the provided # However, this stops our custom clang toolchain from picking the provided
@ -1373,7 +1379,7 @@ def _create_local_cuda_repository(repository_ctx):
if should_download_clang: if should_download_clang:
cuda_defines["%{linker_bin_path_flag}"] = "" cuda_defines["%{linker_bin_path_flag}"] = ""
else: else:
cuda_defines["%{linker_bin_path_flag}"] = 'flag: "-B/usr/bin"' cuda_defines["%{linker_bin_path_flag}"] = 'flag: "-B%s"' % host_compiler_prefix
if is_cuda_clang: if is_cuda_clang:
cuda_defines["%{host_compiler_path}"] = str(cc) cuda_defines["%{host_compiler_path}"] = str(cc)
@ -1526,6 +1532,7 @@ cuda_configure = repository_rule(
implementation = _cuda_autoconf_impl, implementation = _cuda_autoconf_impl,
environ = [ environ = [
_GCC_HOST_COMPILER_PATH, _GCC_HOST_COMPILER_PATH,
_GCC_HOST_COMPILER_PREFIX,
_CLANG_CUDA_COMPILER_PATH, _CLANG_CUDA_COMPILER_PATH,
"TF_NEED_CUDA", "TF_NEED_CUDA",
"TF_CUDA_CLANG", "TF_CUDA_CLANG",

View File

@ -81,3 +81,46 @@ platform(
} }
""" % container_digests["cuda10.0-cudnn7-ubuntu14.04"], """ % container_digests["cuda10.0-cudnn7-ubuntu14.04"],
) )
# Built with //tensorflow/tools/ci_build/Dockerfile.rbe.cuda10.0-cudnn7-centos7.
platform(
name = "rbe_cuda10.0-cudnn7-centos7",
constraint_values = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:linux",
],
remote_execution_properties = """
properties: {
name: "container-image"
value:"docker://gcr.io/tensorflow-testing/nosla-cuda10.0-cudnn7-centos7@%s"
}
properties: {
name: "Pool"
value: "default"
}
""" % container_digests["cuda10.0-cudnn7-centos7"],
)
# The above platform with GPU support.
platform(
name = "rbe_cuda10.0-cudnn7-centos7-gpu",
constraint_values = [
"@bazel_tools//platforms:x86_64",
"@bazel_tools//platforms:linux",
":gpu_test",
],
remote_execution_properties = """
properties: {
name: "container-image"
value: "docker://gcr.io/tensorflow-testing/nosla-cuda10.0-cudnn7-centos7@%s"
}
properties: {
name: "dockerRuntime"
value: "nvidia"
}
properties: {
name: "Pool"
value: "gpu-pool"
}
""" % container_digests["cuda10.0-cudnn7-centos7"],
)

View File

@ -5,24 +5,20 @@ load(":generate.bzl", "tensorflow_rbe_config")
tensorflow_rbe_config( tensorflow_rbe_config(
name = "ubuntu16.04-py3-clang", name = "ubuntu16.04-py3-clang",
compiler = "clang", compiler = "clang",
os = "ubuntu16.04",
python_version = "3", python_version = "3",
) )
tensorflow_rbe_config( tensorflow_rbe_config(
name = "ubuntu14.04-py3-gcc-cuda9.0-cudnn7-tensorrt5", name = "centos7-py3-gcc7-cuda10.0-cudnn7-tensorrt5",
compiler = "gcc", compiler = "gcc",
cuda_version = "9.0", compiler_prefix = "/opt/rh/devtoolset-7/root/usr/bin",
cudnn_version = "7", cuda_version = "10.0",
python_version = "3", cudnn_install_path = "/usr/local/cuda-10.0",
tensorrt_version = "5",
)
tensorflow_rbe_config(
name = "ubuntu14.04-py3-clang-cuda9.0-cudnn7-tensorrt5",
compiler = "clang",
cuda_version = "9.0",
cudnn_version = "7", cudnn_version = "7",
os = "centos7",
python_version = "3", python_version = "3",
tensorrt_install_path = "/usr/lib64",
tensorrt_version = "5", tensorrt_version = "5",
) )
@ -31,6 +27,7 @@ tensorflow_rbe_config(
compiler = "gcc-7", compiler = "gcc-7",
cuda_version = "10.0", cuda_version = "10.0",
cudnn_version = "7", cudnn_version = "7",
os = "ubuntu14.04",
python_version = "3", python_version = "3",
tensorrt_version = "5", tensorrt_version = "5",
) )
@ -40,6 +37,7 @@ tensorflow_rbe_config(
compiler = "gcc", compiler = "gcc",
cuda_version = "10.0", cuda_version = "10.0",
cudnn_version = "7", cudnn_version = "7",
os = "ubuntu14.04",
python_version = "3", python_version = "3",
tensorrt_version = "5", tensorrt_version = "5",
) )
@ -49,6 +47,7 @@ tensorflow_rbe_config(
compiler = "clang", compiler = "clang",
cuda_version = "10.0", cuda_version = "10.0",
cudnn_version = "7", cudnn_version = "7",
os = "ubuntu14.04",
python_version = "3", python_version = "3",
tensorrt_version = "5", tensorrt_version = "5",
) )

View File

@ -1,5 +1,5 @@
container_digests = { container_digests = {
"ubuntu16.04": "sha256:d0d98c53111c3ec071aa81632a2b0d6f210e5c2411c5172e31f99002125ec4de", "ubuntu16.04": "sha256:d0d98c53111c3ec071aa81632a2b0d6f210e5c2411c5172e31f99002125ec4de",
"cuda9.0-cudnn7-ubuntu14.04": "sha256:006a76ee1838122ff7f21ebac85f24c1ef350d4dd79b3ceff0e4fe649ed90d33",
"cuda10.0-cudnn7-ubuntu14.04": "sha256:d433e1221f802dac393bc8652fabcc63aa46896cd920bb888ae0e2002fe6b756", "cuda10.0-cudnn7-ubuntu14.04": "sha256:d433e1221f802dac393bc8652fabcc63aa46896cd920bb888ae0e2002fe6b756",
"cuda10.0-cudnn7-centos7": "sha256:a453b7147a60928a8345689eae48916a746b3578b5e831bfa151f0529d469c88",
} }

View File

@ -3,8 +3,8 @@ load(
"docker_toolchain_autoconfig", "docker_toolchain_autoconfig",
) )
def _tensorflow_rbe_config(name, compiler, python_version, cuda_version = None, cudnn_version = None, tensorrt_version = None): def _tensorflow_rbe_config(name, compiler, python_version, os, cuda_version = None, cudnn_version = None, tensorrt_version = None, tensorrt_install_path = None, cudnn_install_path = None, compiler_prefix = None):
base = "@ubuntu16.04//image" base = "@%s//image" % os
config_repos = [ config_repos = [
"local_config_python", "local_config_python",
"local_config_cc", "local_config_cc",
@ -26,7 +26,7 @@ def _tensorflow_rbe_config(name, compiler, python_version, cuda_version = None,
} }
if cuda_version != None: if cuda_version != None:
base = "@cuda%s-cudnn%s-ubuntu14.04//image" % (cuda_version, cudnn_version) base = "@cuda%s-cudnn%s-%s//image" % (cuda_version, cudnn_version, os)
# The cuda toolchain currently contains its own C++ toolchain definition, # The cuda toolchain currently contains its own C++ toolchain definition,
# so we do not fetch local_config_cc. # so we do not fetch local_config_cc.
@ -42,11 +42,12 @@ def _tensorflow_rbe_config(name, compiler, python_version, cuda_version = None,
"TF_ENABLE_XLA": "1", "TF_ENABLE_XLA": "1",
"TF_CUDNN_VERSION": cudnn_version, "TF_CUDNN_VERSION": cudnn_version,
"TF_CUDA_VERSION": cuda_version, "TF_CUDA_VERSION": cuda_version,
"CUDNN_INSTALL_PATH": "/usr/lib/x86_64-linux-gnu", "CUDNN_INSTALL_PATH": cudnn_install_path if cudnn_install_path != None else "/usr/lib/x86_64-linux-gnu",
"TF_NEED_TENSORRT": "1", "TF_NEED_TENSORRT": "1",
"TF_TENSORRT_VERSION": tensorrt_version, "TF_TENSORRT_VERSION": tensorrt_version,
"TENSORRT_INSTALL_PATH": "/usr/lib/x86_64-linux-gnu", "TENSORRT_INSTALL_PATH": tensorrt_install_path if tensorrt_install_path != None else "/usr/lib/x86_64-linux-gnu",
"GCC_HOST_COMPILER_PATH": compiler if compiler != "clang" else "", "GCC_HOST_COMPILER_PATH": compiler if compiler != "clang" else "",
"GCC_HOST_COMPILER_PREFIX": compiler_prefix if compiler_prefix != None else "/usr/bin",
}) })
docker_toolchain_autoconfig( docker_toolchain_autoconfig(

View File

@ -18,13 +18,6 @@ def _remote_config_workspace():
digest = container_digests["ubuntu16.04"], digest = container_digests["ubuntu16.04"],
) )
container_pull(
name = "cuda9.0-cudnn7-ubuntu14.04",
registry = "gcr.io",
repository = "tensorflow-testing/nosla-cuda9.0-cudnn7-ubuntu14.04",
digest = container_digests["cuda9.0-cudnn7-ubuntu14.04"],
)
container_pull( container_pull(
name = "cuda10.0-cudnn7-ubuntu14.04", name = "cuda10.0-cudnn7-ubuntu14.04",
registry = "gcr.io", registry = "gcr.io",
@ -32,4 +25,11 @@ def _remote_config_workspace():
digest = container_digests["cuda10.0-cudnn7-ubuntu14.04"], digest = container_digests["cuda10.0-cudnn7-ubuntu14.04"],
) )
container_pull(
name = "cuda10.0-cudnn7-centos7",
registry = "gcr.io",
repository = "tensorflow-testing/nosla-cuda10.0-cudnn7-centos7",
digest = container_digests["cuda10.0-cudnn7-centos7"],
)
remote_config_workspace = _remote_config_workspace remote_config_workspace = _remote_config_workspace