CudaRoot() returns the configured CUDA toolkit path.

Fixes the problem of XLA being unable to find libdevice files if not executed
through bazel. For example,
http://stackoverflow.com/questions/41729019/notfounderror-running-tensorflow-xla-example-libdevice-compute-35-10-bc/41800414#41800414
Change: 146329331
This commit is contained in:
Jingyue Wu 2017-02-01 22:17:39 -08:00 committed by gunan
parent b9deaac643
commit 7eb34751d7
7 changed files with 20 additions and 18 deletions

View File

@ -129,6 +129,12 @@ file(GLOB tf_core_platform_srcs
"${tensorflow_source_dir}/tensorflow/core/platform/*.cc"
"${tensorflow_source_dir}/tensorflow/core/platform/default/*.h"
"${tensorflow_source_dir}/tensorflow/core/platform/default/*.cc")
if (NOT tensorflow_ENABLE_GPU)
file(GLOB tf_core_platform_gpu_srcs
"${tensorflow_source_dir}/tensorflow/core/platform/cuda_libdevice_path.*"
"${tensorflow_source_dir}/tensorflow/core/platform/default/cuda_libdevice_path.*")
list(REMOVE_ITEM tf_core_platform_srcs ${tf_core_platform_gpu_srcs})
endif()
list(APPEND tf_core_lib_srcs ${tf_core_platform_srcs})
if(UNIX)

View File

@ -458,9 +458,11 @@ $(wildcard tensorflow/core/lib/jpeg/*) \
$(wildcard tensorflow/core/lib/png/*) \
$(wildcard tensorflow/core/util/events_writer.*) \
$(wildcard tensorflow/core/util/reporter.*) \
$(wildcard tensorflow/core/platform/default/cuda_libdevice_path.*) \
$(wildcard tensorflow/core/platform/default/stream_executor.*) \
$(wildcard tensorflow/core/platform/default/test_benchmark.*) \
$(wildcard tensorflow/core/platform/cuda.h) \
$(wildcard tensorflow/core/platform/cuda_libdevice_path.*) \
$(wildcard tensorflow/core/platform/cloud/*) \
$(wildcard tensorflow/core/platform/google/*) \
$(wildcard tensorflow/core/platform/google/*/*) \

View File

@ -781,6 +781,7 @@ filegroup(
"lib/gif/**/*",
"util/events_writer.*",
"util/reporter.*",
"platform/**/cuda_libdevice_path.*",
"platform/default/stream_executor.*",
"platform/default/test_benchmark.*",
"platform/cuda.h",

View File

@ -152,10 +152,10 @@ def tf_additional_cupti_wrapper_deps():
return ["//tensorflow/core/platform/default/gpu:cupti_wrapper"]
def tf_additional_libdevice_data():
return ["@local_config_cuda//cuda:libdevice_root"]
return []
def tf_additional_libdevice_deps():
return []
return ["@local_config_cuda//cuda:cuda_headers"]
def tf_additional_libdevice_srcs():
return ["platform/default/cuda_libdevice_path.cc"]

View File

@ -17,27 +17,16 @@ limitations under the License.
#include <stdlib.h>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#if !defined(PLATFORM_GOOGLE)
#include "cuda/cuda_config.h"
#endif
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
string CudaRoot() {
// 'bazel test' sets TEST_SRCDIR.
const string kRelativeCudaRoot = io::JoinPath("local_config_cuda", "cuda");
const char* test_srcdir = getenv("TEST_SRCDIR");
if (test_srcdir && test_srcdir[0] != '\0') {
return io::JoinPath(test_srcdir, kRelativeCudaRoot);
}
LOG(INFO) << "TEST_SRCDIR environment variable not set: using "
<< kRelativeCudaRoot
<< " under this executable's runfiles directory as the CUDA root.";
return io::JoinPath(
strings::StrCat(Env::Default()->GetExecutablePath(), ".runfiles"),
kRelativeCudaRoot);
VLOG(3) << "CUDA root = " << TF_CUDA_TOOLKIT_PATH;
return TF_CUDA_TOOLKIT_PATH;
}
} // namespace tensorflow

View File

@ -21,4 +21,6 @@ limitations under the License.
#define TF_CUDA_VERSION "%{cuda_version}"
#define TF_CUDNN_VERSION "%{cudnn_version}"
#define TF_CUDA_TOOLKIT_PATH "%{cuda_toolkit_path}"
#endif // CUDA_CUDA_CONFIG_H_

View File

@ -700,6 +700,7 @@ def _create_dummy_repository(repository_ctx):
"%{cuda_compute_capabilities}": ",".join([
"CudaVersion(\"%s\")" % c
for c in _DEFAULT_CUDA_COMPUTE_CAPABILITIES]),
"%{cuda_toolkit_path}": _DEFAULT_CUDA_TOOLKIT_PATH,
})
# If cuda_configure is not configured to build with GPU support, and the user
@ -802,6 +803,7 @@ def _create_cuda_repository(repository_ctx):
"%{cuda_compute_capabilities}": ",".join(
["CudaVersion(\"%s\")" % c
for c in cuda_config.compute_capabilities]),
"%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
})