The original PR got overriden by bc456e361d
This will make the fetch phase reproducible so the refetch does
not happens unless people re-run configure (the environment variable
are stored in the .bazelrc file).
Fixes #8619.
571 lines
18 KiB
Bash
Executable File
571 lines
18 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
set -e
|
|
set -o pipefail
|
|
|
|
# Find out the absolute path to where ./configure resides
|
|
pushd `dirname $0` > /dev/null
|
|
SOURCE_BASE_DIR=`pwd -P`
|
|
popd > /dev/null
|
|
|
|
# This file contains customized config settings.
|
|
touch .bazelrc
|
|
|
|
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
|
|
|
|
function is_linux() {
|
|
if [[ "${PLATFORM}" == "linux" ]]; then
|
|
true
|
|
else
|
|
false
|
|
fi
|
|
}
|
|
|
|
function is_macos() {
|
|
if [[ "${PLATFORM}" == "darwin" ]]; then
|
|
true
|
|
else
|
|
false
|
|
fi
|
|
}
|
|
|
|
function is_windows() {
|
|
# On windows, the shell script is actually running in msys
|
|
if [[ "${PLATFORM}" =~ msys_nt*|mingw*|cygwin*|uwin* ]]; then
|
|
true
|
|
else
|
|
false
|
|
fi
|
|
}
|
|
|
|
function sed_hyphen_i() {
|
|
if is_macos; then
|
|
sed -i '' "$@"
|
|
else
|
|
sed -i "$@"
|
|
fi
|
|
}
|
|
|
|
# Delete any leftover BUILD files from the Makefile build, which would interfere
|
|
# with Bazel parsing.
|
|
MAKEFILE_DOWNLOAD_DIR=tensorflow/contrib/makefile/downloads
|
|
if [ -d "${MAKEFILE_DOWNLOAD_DIR}" ]; then
|
|
find ${MAKEFILE_DOWNLOAD_DIR} -type f -name '*BUILD' -delete
|
|
fi
|
|
|
|
## Set up python-related environment settings
|
|
while true; do
|
|
fromuser=""
|
|
if [ -z "$PYTHON_BIN_PATH" ]; then
|
|
default_python_bin_path=$(which python || which python3 || true)
|
|
read -p "Please specify the location of python. [Default is $default_python_bin_path]: " PYTHON_BIN_PATH
|
|
fromuser="1"
|
|
if [ -z "$PYTHON_BIN_PATH" ]; then
|
|
PYTHON_BIN_PATH=$default_python_bin_path
|
|
fi
|
|
fi
|
|
if [ -e "$PYTHON_BIN_PATH" ]; then
|
|
break
|
|
fi
|
|
echo "Invalid python path. ${PYTHON_BIN_PATH} cannot be found" 1>&2
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
PYTHON_BIN_PATH=""
|
|
# Retry
|
|
done
|
|
|
|
## Set up MKL related environment settings
|
|
if false; then # Disable building with MKL for now
|
|
while [ "$TF_NEED_MKL" == "" ]; do
|
|
fromuser=""
|
|
read -p "Do you wish to build TensorFlow with MKL support? [y/N] " INPUT
|
|
fromuser="1"
|
|
case $INPUT in
|
|
[Yy]* ) echo "MKL support will be enabled for TensorFlow"; TF_NEED_MKL=1;;
|
|
[Nn]* ) echo "No MKL support will be enabled for TensorFlow"; TF_NEED_MKL=0;;
|
|
"" ) echo "No MKL support will be enabled for TensorFlow"; TF_NEED_MKL=0;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
|
|
OSNAME=`uname -s`
|
|
|
|
if [ "$TF_NEED_MKL" == "1" ]; then # TF_NEED_MKL
|
|
DST=`dirname $0`
|
|
ARCHIVE_BASENAME=mklml_lnx_2017.0.2.20170209.tgz
|
|
GITHUB_RELEASE_TAG=v0.5
|
|
MKLURL="https://github.com/01org/mkl-dnn/releases/download/$GITHUB_RELEASE_TAG/$ARCHIVE_BASENAME"
|
|
if ! [ -e "$DST/third_party/mkl/$ARCHIVE_BASENAME" ]; then
|
|
wget --no-check-certificate -P $DST/third_party/mkl/ $MKLURL
|
|
fi
|
|
tar -xzf $DST/third_party/mkl/$ARCHIVE_BASENAME -C $DST/third_party/mkl/
|
|
extracted_dir_name="${ARCHIVE_BASENAME%.*}"
|
|
MKL_INSTALL_PATH=$DST/third_party/mkl/$extracted_dir_name
|
|
MKL_INSTALL_PATH=`${PYTHON_BIN_PATH} -c "import os; print(os.path.realpath(os.path.expanduser('${MKL_INSTALL_PATH}')))"`
|
|
|
|
if [ "$OSNAME" == "Linux" ]; then
|
|
# Full MKL configuration
|
|
MKL_RT_LIB_PATH="lib/intel64/libmkl_rt.so" #${TF_MKL_EXT}#TODO version?
|
|
MKL_RT_OMP_LIB_PATH="../compiler/lib/intel64/libiomp5.so" #TODO VERSION?
|
|
|
|
# MKL-ML configuration
|
|
MKL_ML_LIB_PATH="lib/libmklml_intel.so" #${TF_MKL_EXT}#TODO version?
|
|
MKL_ML_OMP_LIB_PATH="lib/libiomp5.so" #TODO VERSION?
|
|
elif [ "$OSNAME" == "Darwin" ]; then
|
|
echo "Darwin is unsupported yet";
|
|
exit 1
|
|
fi
|
|
|
|
if [ -e "$MKL_INSTALL_PATH/${MKL_ML_LIB_PATH}" ]; then
|
|
ln -sf $MKL_INSTALL_PATH/${MKL_ML_LIB_PATH} third_party/mkl/
|
|
ln -sf $MKL_INSTALL_PATH/${MKL_ML_OMP_LIB_PATH} third_party/mkl/
|
|
ln -sf $MKL_INSTALL_PATH/include third_party/mkl/
|
|
ln -sf $MKL_INSTALL_PATH/include third_party/eigen3/mkl_include
|
|
else
|
|
echo "ERROR: $MKL_INSTALL_PATH/${MKL_ML_LIB_PATH} does not exist";
|
|
exit 1
|
|
fi
|
|
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
|
|
cat > third_party/mkl/mkl.config <<EOF
|
|
# MKL_INSTALL_PATH refers to the location of MKL root folder. The MKL header and library
|
|
# files can be either in this directory, or under include/ and lib64/
|
|
MKL_INSTALL_PATH=$MKL_INSTALL_PATH
|
|
EOF
|
|
|
|
fi # TF_NEED_MKL
|
|
################## MKL
|
|
fi # Disable building with MKL for now
|
|
|
|
## Set up architecture-dependent optimization flags.
|
|
if [ -z "$CC_OPT_FLAGS" ]; then
|
|
default_cc_opt_flags="-march=native"
|
|
read -p "Please specify optimization flags to use during compilation when bazel option "\
|
|
"\"--config=opt\" is specified [Default is $default_cc_opt_flags]: " CC_OPT_FLAGS
|
|
if [ -z "$CC_OPT_FLAGS" ]; then
|
|
CC_OPT_FLAGS=$default_cc_opt_flags
|
|
fi
|
|
fi
|
|
|
|
if is_windows; then
|
|
TF_NEED_GCP=0
|
|
TF_NEED_HDFS=0
|
|
TF_NEED_JEMALLOC=0
|
|
TF_NEED_OPENCL=0
|
|
fi
|
|
|
|
if is_linux; then
|
|
while [ "$TF_NEED_JEMALLOC" == "" ]; do
|
|
read -p "Do you wish to use jemalloc as the malloc implementation? [Y/n] "\
|
|
INPUT
|
|
case $INPUT in
|
|
[Yy]* ) echo "jemalloc enabled"; TF_NEED_JEMALLOC=1;;
|
|
[Nn]* ) echo "jemalloc disabled"; TF_NEED_JEMALLOC=0;;
|
|
"" ) echo "jemalloc enabled"; TF_NEED_JEMALLOC=1;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
else
|
|
TF_NEED_JEMALLOC=0
|
|
fi
|
|
|
|
sed_hyphen_i -e "/with_jemalloc/d" .bazelrc
|
|
if [[ "$TF_NEED_JEMALLOC" == "1" ]]; then
|
|
echo 'build --define with_jemalloc=true' >>.bazelrc
|
|
fi
|
|
|
|
while [[ "$TF_NEED_GCP" == "" ]]; do
|
|
read -p "Do you wish to build TensorFlow with "\
|
|
"Google Cloud Platform support? [y/N] " INPUT
|
|
case $INPUT in
|
|
[Yy]* ) echo "Google Cloud Platform support will be enabled for "\
|
|
"TensorFlow"; TF_NEED_GCP=1;;
|
|
[Nn]* ) echo "No Google Cloud Platform support will be enabled for "\
|
|
"TensorFlow"; TF_NEED_GCP=0;;
|
|
"" ) echo "No Google Cloud Platform support will be enabled for "\
|
|
"TensorFlow"; TF_NEED_GCP=0;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
|
|
sed_hyphen_i -e "/with_gcp_support/d" .bazelrc
|
|
if [[ "$TF_NEED_GCP" == "1" ]]; then
|
|
echo 'build --define with_gcp_support=true' >>.bazelrc
|
|
fi
|
|
|
|
while [[ "$TF_NEED_HDFS" == "" ]]; do
|
|
read -p "Do you wish to build TensorFlow with "\
|
|
"Hadoop File System support? [y/N] " INPUT
|
|
case $INPUT in
|
|
[Yy]* ) echo "Hadoop File System support will be enabled for "\
|
|
"TensorFlow"; TF_NEED_HDFS=1;;
|
|
[Nn]* ) echo "No Hadoop File System support will be enabled for "\
|
|
"TensorFlow"; TF_NEED_HDFS=0;;
|
|
"" ) echo "No Hadoop File System support will be enabled for "\
|
|
"TensorFlow"; TF_NEED_HDFS=0;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
|
|
sed_hyphen_i -e "/with_hdfs_support/d" .bazelrc
|
|
if [[ "$TF_NEED_HDFS" == "1" ]]; then
|
|
echo 'build --define with_hdfs_support=true' >>.bazelrc
|
|
fi
|
|
|
|
## Enable XLA.
|
|
while [[ "$TF_ENABLE_XLA" == "" ]]; do
|
|
read -p "Do you wish to build TensorFlow with the XLA just-in-time compiler (experimental)? [y/N] " INPUT
|
|
case $INPUT in
|
|
[Yy]* ) echo "XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=1;;
|
|
[Nn]* ) echo "No XLA JIT support will be enabled for TensorFlow"; TF_ENABLE_XLA=0;;
|
|
"" ) echo "No XLA support will be enabled for TensorFlow"; TF_ENABLE_XLA=0;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
|
|
sed_hyphen_i -e "/with_xla_support/d" .bazelrc
|
|
if [[ "$TF_ENABLE_XLA" == "1" ]]; then
|
|
echo 'build --define with_xla_support=true' >>.bazelrc
|
|
fi
|
|
|
|
|
|
# Invoke python_config and set up symlinks to python includes
|
|
./util/python/python_config.sh --setup "$PYTHON_BIN_PATH"
|
|
|
|
# Append CC optimization flags to bazel.rc
|
|
echo >> tools/bazel.rc
|
|
for opt in $CC_OPT_FLAGS; do
|
|
echo "build:opt --cxxopt=$opt --copt=$opt" >> tools/bazel.rc
|
|
done
|
|
|
|
# Run the gen_git_source to create links where bazel can track dependencies for
|
|
# git hash propagation
|
|
GEN_GIT_SOURCE=tensorflow/tools/git/gen_git_source.py
|
|
chmod a+x ${GEN_GIT_SOURCE}
|
|
"${PYTHON_BIN_PATH}" ${GEN_GIT_SOURCE} --configure "${SOURCE_BASE_DIR}"
|
|
|
|
## Set up SYCL-related environment settings
|
|
while [ "$TF_NEED_OPENCL" == "" ]; do
|
|
read -p "Do you wish to build TensorFlow with OpenCL support? [y/N] " INPUT
|
|
case $INPUT in
|
|
[Yy]* ) echo "OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=1;;
|
|
[Nn]* ) echo "No OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=0;;
|
|
"" ) echo "No OpenCL support will be enabled for TensorFlow"; TF_NEED_OPENCL=0;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
|
|
## Set up Cuda-related environment settings
|
|
|
|
while [ "$TF_NEED_CUDA" == "" ]; do
|
|
read -p "Do you wish to build TensorFlow with CUDA support? [y/N] " INPUT
|
|
case $INPUT in
|
|
[Yy]* ) echo "CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=1;;
|
|
[Nn]* ) echo "No CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
|
|
"" ) echo "No CUDA support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
|
|
export TF_NEED_CUDA
|
|
export TF_NEED_OPENCL
|
|
|
|
if [ "$TF_NEED_CUDA" == "1" ]; then
|
|
# Set up which gcc nvcc should use as the host compiler
|
|
# No need to set this on Windows
|
|
while ! is_windows && true; do
|
|
fromuser=""
|
|
if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
|
|
default_gcc_host_compiler_path=$(which gcc || true)
|
|
read -p "Please specify which gcc should be used by nvcc as the host compiler. [Default is $default_gcc_host_compiler_path]: " GCC_HOST_COMPILER_PATH
|
|
fromuser="1"
|
|
if [ -z "$GCC_HOST_COMPILER_PATH" ]; then
|
|
GCC_HOST_COMPILER_PATH="$default_gcc_host_compiler_path"
|
|
fi
|
|
fi
|
|
if [ -e "$GCC_HOST_COMPILER_PATH" ]; then
|
|
export GCC_HOST_COMPILER_PATH
|
|
echo "build --action_env GCC_HOST_COMPILER_PATH=$GCC_HOST_COMPILER_PATH" >>.bazelrc
|
|
break
|
|
fi
|
|
echo "Invalid gcc path. ${GCC_HOST_COMPILER_PATH} cannot be found" 1>&2
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
GCC_HOST_COMPILER_PATH=""
|
|
# Retry
|
|
done
|
|
|
|
# Find out where the CUDA toolkit is installed
|
|
while true; do
|
|
# Configure the Cuda SDK version to use.
|
|
if [ -z "$TF_CUDA_VERSION" ]; then
|
|
read -p "Please specify the CUDA SDK version you want to use, e.g. 7.0. [Leave empty to use system default]: " TF_CUDA_VERSION
|
|
fi
|
|
|
|
fromuser=""
|
|
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
|
|
default_cuda_path=/usr/local/cuda
|
|
if is_windows; then
|
|
if [ -z "$CUDA_PATH" ]; then
|
|
default_cuda_path="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v8.0"
|
|
else
|
|
default_cuda_path="$(cygpath -m "$CUDA_PATH")"
|
|
fi
|
|
fi
|
|
read -p "Please specify the location where CUDA $TF_CUDA_VERSION toolkit is installed. Refer to README.md for more details. [Default is $default_cuda_path]: " CUDA_TOOLKIT_PATH
|
|
fromuser="1"
|
|
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
|
|
CUDA_TOOLKIT_PATH="$default_cuda_path"
|
|
fi
|
|
fi
|
|
|
|
if [[ -z "$TF_CUDA_VERSION" ]]; then
|
|
TF_CUDA_EXT=""
|
|
else
|
|
TF_CUDA_EXT=".$TF_CUDA_VERSION"
|
|
fi
|
|
|
|
if is_windows; then
|
|
CUDA_RT_LIB_PATH="lib/x64/cudart.lib"
|
|
elif is_linux; then
|
|
CUDA_RT_LIB_PATH="lib64/libcudart.so${TF_CUDA_EXT}"
|
|
elif is_macos; then
|
|
CUDA_RT_LIB_PATH="lib/libcudart${TF_CUDA_EXT}.dylib"
|
|
fi
|
|
|
|
if [ -e "${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH}" ]; then
|
|
export CUDA_TOOLKIT_PATH
|
|
echo "build --action_env CUDA_TOOLKIT_PATH=$CUDA_TOOLKIT_PATH" >>.bazelrc
|
|
export TF_CUDA_VERSION
|
|
echo "build --action_env TF_CUDA_VERSION=$TF_CUDA_VERSION" >>.bazelrc
|
|
break
|
|
fi
|
|
echo "Invalid path to CUDA $TF_CUDA_VERSION toolkit. ${CUDA_TOOLKIT_PATH}/${CUDA_RT_LIB_PATH} cannot be found"
|
|
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
# Retry
|
|
TF_CUDA_VERSION=""
|
|
CUDA_TOOLKIT_PATH=""
|
|
done
|
|
|
|
# Find out where the cuDNN library is installed
|
|
while true; do
|
|
# Configure the cuDNN version to use.
|
|
if [ -z "$TF_CUDNN_VERSION" ]; then
|
|
read -p "Please specify the cuDNN version you want to use. [Leave empty to use system default]: " TF_CUDNN_VERSION
|
|
fi
|
|
|
|
fromuser=""
|
|
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
|
default_cudnn_path=${CUDA_TOOLKIT_PATH}
|
|
read -p "Please specify the location where cuDNN $TF_CUDNN_VERSION library is installed. Refer to README.md for more details. [Default is $default_cudnn_path]: " CUDNN_INSTALL_PATH
|
|
fromuser="1"
|
|
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
|
CUDNN_INSTALL_PATH=$default_cudnn_path
|
|
fi
|
|
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
|
|
# Going through one more level of expansion to handle that.
|
|
CUDNN_INSTALL_PATH=`"${PYTHON_BIN_PATH}" -c "import os; print(os.path.realpath(os.path.expanduser('${CUDNN_INSTALL_PATH}')))"`
|
|
fi
|
|
|
|
if [[ -z "$TF_CUDNN_VERSION" ]]; then
|
|
TF_CUDNN_EXT=""
|
|
else
|
|
TF_CUDNN_EXT=".$TF_CUDNN_VERSION"
|
|
fi
|
|
|
|
if is_windows; then
|
|
CUDA_DNN_LIB_PATH="lib/x64/cudnn.lib"
|
|
CUDA_DNN_LIB_ALT_PATH="lib/x64/cudnn.lib"
|
|
elif is_linux; then
|
|
CUDA_DNN_LIB_PATH="lib64/libcudnn.so${TF_CUDNN_EXT}"
|
|
CUDA_DNN_LIB_ALT_PATH="libcudnn.so${TF_CUDNN_EXT}"
|
|
elif is_macos; then
|
|
CUDA_DNN_LIB_PATH="lib/libcudnn${TF_CUDNN_EXT}.dylib"
|
|
CUDA_DNN_LIB_ALT_PATH="libcudnn${TF_CUDNN_EXT}.dylib"
|
|
fi
|
|
|
|
if [ -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_ALT_PATH}" -o -e "$CUDNN_INSTALL_PATH/${CUDA_DNN_LIB_PATH}" ]; then
|
|
export TF_CUDNN_VERSION
|
|
echo "build --action_env TF_CUDNN_VERSION=$TF_CUDNN_VERSION" >>.bazelrc
|
|
export CUDNN_INSTALL_PATH
|
|
echo "build --action_env CUDNN_INSTALL_PATH=$CUDNN_INSTALL_PATH" >>.bazelrc
|
|
break
|
|
fi
|
|
|
|
if is_linux; then
|
|
if ! type ldconfig > /dev/null 2>&1; then
|
|
LDCONFIG_BIN=/sbin/ldconfig
|
|
else
|
|
LDCONFIG_BIN=ldconfig
|
|
fi
|
|
CUDNN_PATH_FROM_LDCONFIG="$($LDCONFIG_BIN -p | sed -n 's/.*libcudnn.so .* => \(.*\)/\1/p')"
|
|
if [ -e "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}" ]; then
|
|
export TF_CUDNN_VERSION
|
|
echo "build --action_env TF_CUDNN_VERSION=$TF_CUDNN_VERSION" >>.bazelrc
|
|
export CUDNN_INSTALL_PATH="$(dirname ${CUDNN_PATH_FROM_LDCONFIG})"
|
|
echo "build --action_env CUDNN_INSTALL_PATH=$CUDNN_INSTALL_PATH" >>.bazelrc
|
|
break
|
|
fi
|
|
fi
|
|
echo "Invalid path to cuDNN ${CUDNN_VERSION} toolkit. Neither of the following two files can be found:"
|
|
echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_PATH}"
|
|
echo "${CUDNN_INSTALL_PATH}/${CUDA_DNN_LIB_ALT_PATH}"
|
|
if is_linux; then
|
|
echo "${CUDNN_PATH_FROM_LDCONFIG}${TF_CUDNN_EXT}"
|
|
fi
|
|
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
# Retry
|
|
TF_CUDNN_VERSION=""
|
|
CUDNN_INSTALL_PATH=""
|
|
done
|
|
|
|
# Configure the compute capabilities that TensorFlow builds for.
|
|
# Since Cuda toolkit is not backward-compatible, this is not guaranteed to work.
|
|
while true; do
|
|
fromuser=""
|
|
default_cuda_compute_capabilities="3.5,5.2"
|
|
if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
|
|
cat << EOF
|
|
Please specify a list of comma-separated Cuda compute capabilities you want to build with.
|
|
You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
|
|
Please note that each additional compute capability significantly increases your build time and binary size.
|
|
EOF
|
|
read -p "[Default is: \"3.5,5.2\"]: " TF_CUDA_COMPUTE_CAPABILITIES
|
|
fromuser=1
|
|
fi
|
|
if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
|
|
TF_CUDA_COMPUTE_CAPABILITIES=$default_cuda_compute_capabilities
|
|
fi
|
|
# Check whether all capabilities from the input is valid
|
|
COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES//,/ }
|
|
ALL_VALID=1
|
|
for CAPABILITY in $COMPUTE_CAPABILITIES; do
|
|
if [[ ! "$CAPABILITY" =~ [0-9]+.[0-9]+ ]]; then
|
|
echo "Invalid compute capability: " $CAPABILITY
|
|
ALL_VALID=0
|
|
break
|
|
fi
|
|
done
|
|
if [ "$ALL_VALID" == "0" ]; then
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
else
|
|
export TF_CUDA_COMPUTE_CAPABILITIES
|
|
echo "build --action_env TF_CUDA_COMPUTE_CAPABILITIES=$TF_CUDA_COMPUTE_CAPABILITIES" >>.bazelrc
|
|
break
|
|
fi
|
|
TF_CUDA_COMPUTE_CAPABILITIES=""
|
|
done
|
|
|
|
if is_windows; then
|
|
# The following three variables are needed for MSVC toolchain configuration in Bazel
|
|
export CUDA_PATH="$CUDA_TOOLKIT_PATH"
|
|
export CUDA_COMPUTE_CAPABILITIES="$TF_CUDA_COMPUTE_CAPABILITIES"
|
|
export NO_WHOLE_ARCHIVE_OPTION=1
|
|
|
|
# Set GCC_HOST_COMPILER_PATH to keep cuda_configure.bzl happy
|
|
export GCC_HOST_COMPILER_PATH="/usr/bin/dummy_compiler"
|
|
fi
|
|
|
|
# end of if "$TF_NEED_CUDA" == "1"
|
|
fi
|
|
|
|
# OpenCL configuration
|
|
|
|
if [ "$TF_NEED_OPENCL" == "1" ]; then
|
|
|
|
# Determine which C++ compiler should be used as the host compiler
|
|
while true; do
|
|
fromuser=""
|
|
if [ -z "$HOST_CXX_COMPILER" ]; then
|
|
default_cxx_host_compiler=$(which clang++-3.6 || true)
|
|
read -p "Please specify which C++ compiler should be used as the host C++ compiler. [Default is $default_cxx_host_compiler]: " HOST_CXX_COMPILER
|
|
fromuser="1"
|
|
if [ -z "$HOST_CXX_COMPILER" ]; then
|
|
HOST_CXX_COMPILER=$default_cxx_host_compiler
|
|
fi
|
|
fi
|
|
if [ -e "$HOST_CXX_COMPILER" ]; then
|
|
export HOST_CXX_COMPILER
|
|
break
|
|
fi
|
|
echo "Invalid C++ compiler path. ${HOST_CXX_COMPILER} cannot be found" 1>&2
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
HOST_CXX_COMPILER=""
|
|
# Retry
|
|
done
|
|
|
|
# Determine which C compiler should be used as the host compiler
|
|
while true; do
|
|
fromuser=""
|
|
if [ -z "$HOST_C_COMPILER" ]; then
|
|
default_c_host_compiler=$(which clang-3.6 || true)
|
|
read -p "Please specify which C compiler should be used as the host C compiler. [Default is $default_c_host_compiler]: " HOST_C_COMPILER
|
|
fromuser="1"
|
|
if [ -z "$HOST_C_COMPILER" ]; then
|
|
HOST_C_COMPILER=$default_c_host_compiler
|
|
fi
|
|
fi
|
|
if [ -e "$HOST_C_COMPILER" ]; then
|
|
export HOST_C_COMPILER
|
|
break
|
|
fi
|
|
echo "Invalid C compiler path. ${HOST_C_COMPILER} cannot be found" 1>&2
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
HOST_C_COMPILER=""
|
|
# Retry
|
|
done
|
|
|
|
while true; do
|
|
# Configure the OPENCL version to use.
|
|
TF_OPENCL_VERSION="1.2"
|
|
|
|
# Point to ComputeCpp root
|
|
if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then
|
|
default_computecpp_toolkit_path=/usr/local/computecpp
|
|
read -p "Please specify the location where ComputeCpp for SYCL $TF_OPENCL_VERSION is installed. [Default is $default_computecpp_toolkit_path]: " COMPUTECPP_TOOLKIT_PATH
|
|
fromuser="1"
|
|
if [ -z "$COMPUTECPP_TOOLKIT_PATH" ]; then
|
|
COMPUTECPP_TOOLKIT_PATH=$default_computecpp_toolkit_path
|
|
fi
|
|
fi
|
|
|
|
if is_linux; then
|
|
SYCL_RT_LIB_PATH="lib/libComputeCpp.so"
|
|
fi
|
|
|
|
if [ -e "${COMPUTECPP_TOOLKIT_PATH}/${SYCL_RT_LIB_PATH}" ]; then
|
|
export COMPUTECPP_TOOLKIT_PATH
|
|
break
|
|
fi
|
|
echo "Invalid SYCL $TF_OPENCL_VERSION library path. ${COMPUTECPP_TOOLKIT_PATH}/${SYCL_RT_LIB_PATH} cannot be found"
|
|
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
# Retry
|
|
TF_OPENCL_VERSION=""
|
|
COMPUTECPP_TOOLKIT_PATH=""
|
|
done
|
|
|
|
# end of if "$TF_NEED_OPENCL" == "1"
|
|
fi
|
|
|
|
echo "Configuration finished"
|