Upgrade tensorflow to cuda 8.0
Change: 136158550
This commit is contained in:
parent
185557db24
commit
60bb54e311
@ -1,4 +1,4 @@
|
|||||||
FROM nvidia/cuda:7.5-cudnn5-devel
|
FROM nvidia/cuda:8.0-cudnn5-devel
|
||||||
|
|
||||||
MAINTAINER Jan Prach <jendap@google.com>
|
MAINTAINER Jan Prach <jendap@google.com>
|
||||||
|
|
||||||
@ -15,13 +15,8 @@ RUN /install/install_bazel.sh
|
|||||||
# Set up bazelrc.
|
# Set up bazelrc.
|
||||||
COPY install/.bazelrc /root/.bazelrc
|
COPY install/.bazelrc /root/.bazelrc
|
||||||
ENV BAZELRC /root/.bazelrc
|
ENV BAZELRC /root/.bazelrc
|
||||||
|
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
|
||||||
# Set up CUDA variables
|
|
||||||
ENV CUDA_PATH /usr/local/cuda
|
|
||||||
ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
|
|
||||||
|
|
||||||
# Configure the build for our CUDA configuration.
|
# Configure the build for our CUDA configuration.
|
||||||
ENV CUDA_TOOLKIT_PATH /usr/local/cuda
|
|
||||||
ENV CUDNN_INSTALL_PATH /usr/lib/x86_64-linux-gnu
|
|
||||||
ENV TF_NEED_CUDA 1
|
ENV TF_NEED_CUDA 1
|
||||||
ENV TF_CUDA_COMPUTE_CAPABILITIES 3.0,5.2
|
ENV TF_CUDA_COMPUTE_CAPABILITIES 3.0
|
||||||
|
@ -83,6 +83,13 @@ if [[ "${CONTAINER_TYPE}" == "cmake" ]]; then
|
|||||||
CI_COMMAND_PREFIX=""
|
CI_COMMAND_PREFIX=""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Use nvidia-docker if the container is GPU.
|
||||||
|
if [[ "${CONTAINER_TYPE}" == "gpu" ]]; then
|
||||||
|
DOCKER_BINARY="nvidia-docker"
|
||||||
|
else
|
||||||
|
DOCKER_BINARY="docker"
|
||||||
|
fi
|
||||||
|
|
||||||
# Helper function to traverse directories up until given file is found.
|
# Helper function to traverse directories up until given file is found.
|
||||||
function upsearch () {
|
function upsearch () {
|
||||||
test / == "$PWD" && return || \
|
test / == "$PWD" && return || \
|
||||||
@ -95,15 +102,9 @@ function upsearch () {
|
|||||||
WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
|
WORKSPACE="${WORKSPACE:-$(upsearch WORKSPACE)}"
|
||||||
BUILD_TAG="${BUILD_TAG:-tf_ci}"
|
BUILD_TAG="${BUILD_TAG:-tf_ci}"
|
||||||
|
|
||||||
|
|
||||||
# Add extra params for cuda devices and libraries for GPU container.
|
# Add extra params for cuda devices and libraries for GPU container.
|
||||||
if [ "${CONTAINER_TYPE}" == "gpu" ]; then
|
if [ "${CONTAINER_TYPE}" == "gpu" ]; then
|
||||||
devices=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
|
# GPU pip tests-on-install concurrency is limited to the number of GPUs.
|
||||||
libs=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
|
|
||||||
GPU_EXTRA_PARAMS="${devices} ${libs}"
|
|
||||||
|
|
||||||
# GPU pip tests-on-install should avoid using concurrent jobs due to GPU
|
|
||||||
# resource contention
|
|
||||||
GPU_EXTRA_PARAMS="${GPU_EXTRA_PARAMS} -e TF_BUILD_SERIAL_INSTALL_TESTS=1"
|
GPU_EXTRA_PARAMS="${GPU_EXTRA_PARAMS} -e TF_BUILD_SERIAL_INSTALL_TESTS=1"
|
||||||
else
|
else
|
||||||
GPU_EXTRA_PARAMS=""
|
GPU_EXTRA_PARAMS=""
|
||||||
@ -146,7 +147,7 @@ mkdir -p ${WORKSPACE}/bazel-ci_build-cache
|
|||||||
# By default we cleanup - remove the container once it finish running (--rm)
|
# By default we cleanup - remove the container once it finish running (--rm)
|
||||||
# and share the PID namespace (--pid=host) so the process inside does not have
|
# and share the PID namespace (--pid=host) so the process inside does not have
|
||||||
# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
|
# pid 1 and SIGKILL is propagated to the process inside (jenkins can kill it).
|
||||||
docker run --rm --pid=host \
|
${DOCKER_BINARY} run --rm --pid=host \
|
||||||
-v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \
|
-v ${WORKSPACE}/bazel-ci_build-cache:${WORKSPACE}/bazel-ci_build-cache \
|
||||||
-e "CI_BUILD_HOME=${WORKSPACE}/bazel-ci_build-cache" \
|
-e "CI_BUILD_HOME=${WORKSPACE}/bazel-ci_build-cache" \
|
||||||
-e "CI_BUILD_USER=$(id -u --name)" \
|
-e "CI_BUILD_USER=$(id -u --name)" \
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM nvidia/cuda:7.5-cudnn5-devel
|
FROM nvidia/cuda:8.0-cudnn5-devel
|
||||||
|
|
||||||
MAINTAINER Craig Citro <craigcitro@google.com>
|
MAINTAINER Craig Citro <craigcitro@google.com>
|
||||||
|
|
||||||
@ -85,10 +85,7 @@ RUN git clone -b r0.11 --recursive --recurse-submodules https://github.com/tenso
|
|||||||
WORKDIR /tensorflow
|
WORKDIR /tensorflow
|
||||||
|
|
||||||
# Configure the build for our CUDA configuration.
|
# Configure the build for our CUDA configuration.
|
||||||
ENV CUDA_PATH /usr/local/cuda
|
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
|
||||||
ENV CUDA_TOOLKIT_PATH /usr/local/cuda
|
|
||||||
ENV CUDNN_INSTALL_PATH /usr/lib/x86_64-linux-gnu
|
|
||||||
ENV LD_LIBRARY_PATH /usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
|
|
||||||
ENV TF_NEED_CUDA 1
|
ENV TF_NEED_CUDA 1
|
||||||
ENV TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2
|
ENV TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM nvidia/cuda:7.5-cudnn5-devel
|
FROM nvidia/cuda:8.0-cudnn5-devel
|
||||||
|
|
||||||
MAINTAINER Craig Citro <craigcitro@google.com>
|
MAINTAINER Craig Citro <craigcitro@google.com>
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user