diff --git a/configure b/configure index bcef37bd26b..f4b772c55ef 100755 --- a/configure +++ b/configure @@ -98,7 +98,7 @@ while true; do fi fi if [ -e "$GCC_HOST_COMPILER_PATH" ]; then - export CC=$GCC_HOST_COMPILER_PATH + export GCC_HOST_COMPILER_PATH break fi echo "Invalid gcc path. ${GCC_HOST_COMPILER_PATH} cannot be found" 1>&2 @@ -142,7 +142,7 @@ while true; do if [ -e "${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH}" ]; then export CUDA_TOOLKIT_PATH - export CUDA_VERSION=$TF_CUDA_VERSION + export TF_CUDA_VERSION break fi echo "Invalid path to CUDA $TF_CUDA_VERSION toolkit. ${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH} cannot be found" @@ -203,7 +203,7 @@ while true; do fi if [ -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_ALT_PATH}" -o -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_PATH}" ]; then - export CUDNN_VERSION=$TF_CUDNN_VERSION + export TF_CUDNN_VERSION export CUDNN_INSTALL_PATH break fi @@ -211,7 +211,7 @@ while true; do if [ "$OSNAME" == "Linux" ]; then CUDNN_PATH_FROM_LDCONFIG="$(ldconfig -p | sed -n 's/.*libcudnn.so .* => \(.*\)/\1/p')" if [ -e "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}" ]; then - export CUDNN_VERSION=$TF_CUDNN_VERSION + export TF_CUDNN_VERSION export CUDNN_INSTALL_PATH="$(dirname ${CUDNN_PATH_FROM_LDCONFIG})" break fi @@ -263,7 +263,7 @@ EOF exit 1 fi else - export CUDA_COMPUTE_CAPABILITIES=$TF_CUDA_COMPUTE_CAPABILITIES + export TF_CUDA_COMPUTE_CAPABILITIES break fi TF_CUDA_COMPUTE_CAPABILITIES="" diff --git a/gif.BUILD b/gif.BUILD index 8dbea9cc413..cbdcc75f126 100644 --- a/gif.BUILD +++ b/gif.BUILD @@ -9,12 +9,18 @@ SOURCES = [ "quantize.c", ] +HEADERS = [ + "gif_hash.h", + "gif_lib.h", + "gif_lib_private.h", +] + prefix_dir = "giflib-5.1.4/lib" cc_library( name = "gif", srcs = [prefix_dir + "/" + source for source in SOURCES], - hdrs = [prefix_dir + "/gif_lib.h"], + hdrs = [prefix_dir + "/" + hdrs for hdrs in HEADERS], includes = [prefix_dir], defines = [ "HAVE_CONFIG_H", diff --git a/tensorflow/tools/ci_build/Dockerfile.gpu b/tensorflow/tools/ci_build/Dockerfile.gpu index 7f169165e5e..da92184ebef 100644 --- a/tensorflow/tools/ci_build/Dockerfile.gpu +++ b/tensorflow/tools/ci_build/Dockerfile.gpu @@ -24,4 +24,4 @@ ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64 ENV CUDA_TOOLKIT_PATH /usr/local/cuda ENV CUDNN_INSTALL_PATH /usr/lib/x86_64-linux-gnu ENV TF_NEED_CUDA 1 -ENV CUDA_COMPUTE_CAPABILITIES 3.0,5.2 +ENV TF_CUDA_COMPUTE_CAPABILITIES 3.0,5.2 diff --git a/third_party/gpus/cuda_configure.bzl b/third_party/gpus/cuda_configure.bzl index 3682cb305de..1e47bfac788 100644 --- a/third_party/gpus/cuda_configure.bzl +++ b/third_party/gpus/cuda_configure.bzl @@ -3,19 +3,25 @@ `cuda_configure` depends on the following environment variables: - * `ENABLE_CUDA`: Whether to enable building with CUDA. - * `CC`: The GCC host compiler path + * `TF_NEED_CUDA`: Whether to enable building with CUDA. + * `GCC_HOST_COMPILER_PATH`: The GCC host compiler path * `CUDA_TOOLKIT_PATH`: The path to the CUDA toolkit. Default is `/usr/local/cuda`. - * `CUDA_VERSION`: The version of the CUDA toolkit. If this is blank, then + * `TF_CUDA_VERSION`: The version of the CUDA toolkit. If this is blank, then use the system default. - * `CUDNN_VERSION`: The version of the cuDNN library. + * `TF_CUDNN_VERSION`: The version of the cuDNN library. * `CUDNN_INSTALL_PATH`: The path to the cuDNN library. Default is `/usr/local/cuda`. - * `CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is + * `TF_CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is `3.5,5.2`. """ +_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH" +_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH" +_TF_CUDA_VERSION = "TF_CUDA_VERSION" +_TF_CUDNN_VERSION = "TF_CUDNN_VERSION" +_CUDNN_INSTALL_PATH = "CUDNN_INSTALL_PATH" +_TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES" _DEFAULT_CUDA_VERSION = "" _DEFAULT_CUDNN_VERSION = "" @@ -30,8 +36,8 @@ _DEFAULT_CUDA_COMPUTE_CAPABILITIES = ["3.5", "5.2"] def find_cc(repository_ctx): """Find the C++ compiler.""" cc_name = "gcc" - if "CC" in repository_ctx.os.environ: - cc_name = repository_ctx.os.environ["CC"].strip() + if _GCC_HOST_COMPILER_PATH in repository_ctx.os.environ: + cc_name = repository_ctx.os.environ[_GCC_HOST_COMPILER_PATH].strip() if not cc_name: cc_name = "gcc" if cc_name.startswith("/"): @@ -93,8 +99,8 @@ def _enable_cuda(repository_ctx): def _cuda_toolkit_path(repository_ctx): """Finds the cuda toolkit directory.""" cuda_toolkit_path = _DEFAULT_CUDA_TOOLKIT_PATH - if "CUDA_TOOLKIT_PATH" in repository_ctx.os.environ: - cuda_toolkit_path = repository_ctx.os.environ["CUDA_TOOLKIT_PATH"].strip() + if _CUDA_TOOLKIT_PATH in repository_ctx.os.environ: + cuda_toolkit_path = repository_ctx.os.environ[_CUDA_TOOLKIT_PATH].strip() if not repository_ctx.path(cuda_toolkit_path).exists: fail("Cannot find cuda toolkit path.") return cuda_toolkit_path @@ -103,8 +109,8 @@ def _cuda_toolkit_path(repository_ctx): def _cudnn_install_basedir(repository_ctx): """Finds the cudnn install directory.""" cudnn_install_path = _DEFAULT_CUDNN_INSTALL_PATH - if "CUDNN_INSTALL_PATH" in repository_ctx.os.environ: - cudnn_install_path = repository_ctx.os.environ["CUDNN_INSTALL_PATH"].strip() + if _CUDNN_INSTALL_PATH in repository_ctx.os.environ: + cudnn_install_path = repository_ctx.os.environ[_CUDNN_INSTALL_PATH].strip() if not repository_ctx.path(cudnn_install_path).exists: fail("Cannot find cudnn install path.") return cudnn_install_path @@ -112,25 +118,25 @@ def _cudnn_install_basedir(repository_ctx): def _cuda_version(repository_ctx): """Detects the cuda version.""" - if "CUDA_VERSION" in repository_ctx.os.environ: - return repository_ctx.os.environ["CUDA_VERSION"].strip() + if _TF_CUDA_VERSION in repository_ctx.os.environ: + return repository_ctx.os.environ[_TF_CUDA_VERSION].strip() else: return "" def _cudnn_version(repository_ctx): """Detects the cudnn version.""" - if "CUDNN_VERSION" in repository_ctx.os.environ: - return repository_ctx.os.environ["CUDNN_VERSION"].strip() + if _TF_CUDNN_VERSION in repository_ctx.os.environ: + return repository_ctx.os.environ[_TF_CUDNN_VERSION].strip() else: return "" def _compute_capabilities(repository_ctx): """Returns a list of strings representing cuda compute capabilities.""" - if "CUDA_COMPUTE_CAPABILITIES" not in repository_ctx.os.environ: + if _TF_CUDA_COMPUTE_CAPABILITIES not in repository_ctx.os.environ: return _DEFAULT_CUDA_COMPUTE_CAPABILITIES - capabilities_str = repository_ctx.os.environ["CUDA_COMPUTE_CAPABILITIES"] + capabilities_str = repository_ctx.os.environ[_TF_CUDA_COMPUTE_CAPABILITIES] capabilities = capabilities_str.split(",") for capability in capabilities: # Workaround for Skylark's lack of support for regex. This check should