Merge pull request #26113 from angersson:angerson-remove-old-dockerfiles
PiperOrigin-RevId: 245969020
This commit is contained in:
commit
4891a7a33b
@ -36,15 +36,7 @@ VERSION_H = "%s/core/public/version.h" % TF_SRC_DIR
|
||||
SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR
|
||||
README_MD = "./README.md"
|
||||
TENSORFLOW_BZL = "%s/tensorflow.bzl" % TF_SRC_DIR
|
||||
DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel" % TF_SRC_DIR
|
||||
GPU_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-gpu" % TF_SRC_DIR
|
||||
CPU_MKL_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-mkl" % TF_SRC_DIR
|
||||
RELEVANT_FILES = [TF_SRC_DIR,
|
||||
VERSION_H,
|
||||
SETUP_PY,
|
||||
README_MD,
|
||||
DEVEL_DOCKERFILE,
|
||||
GPU_DEVEL_DOCKERFILE]
|
||||
RELEVANT_FILES = [TF_SRC_DIR, VERSION_H, SETUP_PY, README_MD]
|
||||
|
||||
# Version type parameters.
|
||||
NIGHTLY_VERSION = 1
|
||||
@ -238,24 +230,6 @@ def major_minor_change(old_version, new_version):
|
||||
return False
|
||||
|
||||
|
||||
def update_dockerfiles(old_version, new_version):
|
||||
"""Update dockerfiles if there was a major change."""
|
||||
if major_minor_change(old_version, new_version):
|
||||
old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor)
|
||||
r_major_minor = "r%s.%s" % (new_version.major, new_version.minor)
|
||||
|
||||
print("Detected Major.Minor change.")
|
||||
print("Updating pattern %s to %s in additional files"
|
||||
% (old_r_major_minor, r_major_minor))
|
||||
|
||||
# Update dockerfiles
|
||||
replace_string_in_line(old_r_major_minor, r_major_minor, DEVEL_DOCKERFILE)
|
||||
replace_string_in_line(old_r_major_minor, r_major_minor,
|
||||
GPU_DEVEL_DOCKERFILE)
|
||||
replace_string_in_line(old_r_major_minor, r_major_minor,
|
||||
CPU_MKL_DEVEL_DOCKERFILE)
|
||||
|
||||
|
||||
def check_for_lingering_string(lingering_string):
|
||||
"""Check for given lingering strings."""
|
||||
formatted_string = lingering_string.replace(".", r"\.")
|
||||
@ -333,7 +307,6 @@ def main():
|
||||
update_version_h(old_version, new_version)
|
||||
update_setup_dot_py(old_version, new_version)
|
||||
update_readme(old_version, new_version)
|
||||
update_dockerfiles(old_version, new_version)
|
||||
update_tensorflow_bzl(old_version, new_version)
|
||||
|
||||
# Print transition details.
|
||||
|
@ -1,15 +0,0 @@
|
||||
# Description:
|
||||
# Various tools and rules related to the TensorFlow docker container.
|
||||
|
||||
package(default_visibility = ["//visibility:private"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
py_binary(
|
||||
name = "simple_console",
|
||||
srcs = ["simple_console.py"],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = ["//tensorflow:tensorflow_py"],
|
||||
)
|
@ -1,73 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
LABEL maintainer="Craig Citro <craigcitro@google.com>"
|
||||
|
||||
# Pick up some TF dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libpng-dev \
|
||||
libzmq3-dev \
|
||||
pkg-config \
|
||||
python \
|
||||
python-dev \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
|
||||
python get-pip.py && \
|
||||
rm get-pip.py
|
||||
|
||||
RUN pip --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
numpy \
|
||||
pandas \
|
||||
scipy \
|
||||
sklearn \
|
||||
&& \
|
||||
python -m ipykernel.kernelspec
|
||||
|
||||
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
|
||||
# These lines will be edited automatically by parameterized_docker_build.sh. #
|
||||
# COPY _PIP_FILE_ /
|
||||
# RUN pip --no-cache-dir install /_PIP_FILE_
|
||||
# RUN rm -f /_PIP_FILE_
|
||||
|
||||
# Install TensorFlow CPU version from central repo
|
||||
RUN pip --no-cache-dir install \
|
||||
http://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.0.0-cp27-none-linux_x86_64.whl
|
||||
# --- ~ DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
|
||||
|
||||
# RUN ln -s -f /usr/bin/python3 /usr/bin/python#
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Copy sample notebooks.
|
||||
COPY notebooks /notebooks
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
||||
|
||||
WORKDIR "/notebooks"
|
||||
|
||||
CMD ["/run_jupyter.sh", "--allow-root"]
|
@ -1,108 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
LABEL maintainer="Craig Citro <craigcitro@google.com>"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
curl \
|
||||
git \
|
||||
libcurl3-dev \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libpng-dev \
|
||||
libzmq3-dev \
|
||||
pkg-config \
|
||||
python-dev \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
zip \
|
||||
zlib1g-dev \
|
||||
openjdk-8-jdk \
|
||||
openjdk-8-jre-headless \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
|
||||
python get-pip.py && \
|
||||
rm get-pip.py
|
||||
|
||||
RUN pip --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
mock \
|
||||
numpy \
|
||||
scipy \
|
||||
sklearn \
|
||||
pandas \
|
||||
&& \
|
||||
python -m ipykernel.kernelspec
|
||||
|
||||
# RUN ln -s -f /usr/bin/python3 /usr/bin/python#
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
# Set up Bazel.
|
||||
|
||||
# Running bazel inside a `docker build` command causes trouble, cf:
|
||||
# https://github.com/bazelbuild/bazel/issues/134
|
||||
# The easiest solution is to set up a bazelrc file forcing --batch.
|
||||
RUN echo "startup --batch" >>/etc/bazel.bazelrc
|
||||
# Similarly, we need to workaround sandboxing issues:
|
||||
# https://github.com/bazelbuild/bazel/issues/418
|
||||
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
|
||||
>>/etc/bazel.bazelrc
|
||||
# Install the most recent bazel release.
|
||||
ENV BAZEL_VERSION 0.20.0
|
||||
WORKDIR /
|
||||
RUN mkdir /bazel && \
|
||||
cd /bazel && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
|
||||
chmod +x bazel-*.sh && \
|
||||
./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
cd / && \
|
||||
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
|
||||
|
||||
# Download and build TensorFlow.
|
||||
WORKDIR /tensorflow
|
||||
RUN git clone --branch=r1.13 --depth=1 https://github.com/tensorflow/tensorflow.git .
|
||||
|
||||
# TODO(craigcitro): Don't install the pip package, since it makes it
|
||||
# more difficult to experiment with local changes. Instead, just add
|
||||
# the built directory to the path.
|
||||
|
||||
ENV CI_BUILD_PYTHON python
|
||||
|
||||
RUN tensorflow/tools/ci_build/builds/configured CPU \
|
||||
bazel build -c opt --copt=-mavx --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" \
|
||||
# For optimized builds appropriate for the hardware platform of your choosing, uncomment below...
|
||||
# For ivy-bridge or sandy-bridge
|
||||
# --copt=-march="ivybridge" \
|
||||
# for haswell, broadwell, or skylake
|
||||
# --copt=-march="haswell" \
|
||||
tensorflow/tools/pip_package:build_pip_package && \
|
||||
bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/pip && \
|
||||
pip --no-cache-dir install --upgrade /tmp/pip/tensorflow-*.whl && \
|
||||
rm -rf /tmp/pip && \
|
||||
rm -rf /root/.cache
|
||||
# Clean up pip wheel and Bazel cache when done.
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
||||
|
||||
WORKDIR /root
|
@ -1,125 +0,0 @@
|
||||
FROM nvidia/cuda:9.0-base-ubuntu16.04
|
||||
|
||||
LABEL maintainer="Craig Citro <craigcitro@google.com>"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cuda-command-line-tools-9-0 \
|
||||
cuda-cublas-dev-9-0 \
|
||||
cuda-cudart-dev-9-0 \
|
||||
cuda-cufft-dev-9-0 \
|
||||
cuda-curand-dev-9-0 \
|
||||
cuda-cusolver-dev-9-0 \
|
||||
cuda-cusparse-dev-9-0 \
|
||||
curl \
|
||||
git \
|
||||
libcudnn7=7.2.1.38-1+cuda9.0 \
|
||||
libcudnn7-dev=7.2.1.38-1+cuda9.0 \
|
||||
libcurl3-dev \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libpng12-dev \
|
||||
libzmq3-dev \
|
||||
pkg-config \
|
||||
python-dev \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
zip \
|
||||
zlib1g-dev \
|
||||
wget \
|
||||
&& \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
find /usr/local/cuda-9.0/lib64/ -type f -name 'lib*_static.a' -not -name 'libcudart_static.a' -delete && \
|
||||
rm /usr/lib/x86_64-linux-gnu/libcudnn_static_v7.a
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
|
||||
apt-get update && \
|
||||
apt-get install libnvinfer4=4.1.2-1+cuda9.0 && \
|
||||
apt-get install libnvinfer-dev=4.1.2-1+cuda9.0
|
||||
|
||||
RUN curl -fSsL -O https://bootstrap.pypa.io/get-pip.py && \
|
||||
python get-pip.py && \
|
||||
rm get-pip.py
|
||||
|
||||
RUN pip --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
mock \
|
||||
numpy \
|
||||
scipy \
|
||||
sklearn \
|
||||
pandas \
|
||||
&& \
|
||||
python -m ipykernel.kernelspec
|
||||
|
||||
# RUN ln -s -f /usr/bin/python3 /usr/bin/python#
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
# Set up Bazel.
|
||||
|
||||
# Running bazel inside a `docker build` command causes trouble, cf:
|
||||
# https://github.com/bazelbuild/bazel/issues/134
|
||||
# The easiest solution is to set up a bazelrc file forcing --batch.
|
||||
RUN echo "startup --batch" >>/etc/bazel.bazelrc
|
||||
# Similarly, we need to workaround sandboxing issues:
|
||||
# https://github.com/bazelbuild/bazel/issues/418
|
||||
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
|
||||
>>/etc/bazel.bazelrc
|
||||
# Install the most recent bazel release.
|
||||
ENV BAZEL_VERSION 0.20.0
|
||||
WORKDIR /
|
||||
RUN mkdir /bazel && \
|
||||
cd /bazel && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
|
||||
chmod +x bazel-*.sh && \
|
||||
./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
cd / && \
|
||||
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
|
||||
|
||||
# Download and build TensorFlow.
|
||||
WORKDIR /tensorflow
|
||||
RUN git clone --branch=r1.13 --depth=1 https://github.com/tensorflow/tensorflow.git .
|
||||
|
||||
# Configure the build for our CUDA configuration.
|
||||
ENV CI_BUILD_PYTHON python
|
||||
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
|
||||
ENV TF_NEED_CUDA 1
|
||||
ENV TF_NEED_TENSORRT 1
|
||||
ENV TF_CUDA_COMPUTE_CAPABILITIES=3.5,5.2,6.0,6.1,7.0
|
||||
ENV TF_CUDA_VERSION=9.0
|
||||
ENV TF_CUDNN_VERSION=7
|
||||
|
||||
RUN ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
|
||||
LD_LIBRARY_PATH=/usr/local/cuda/lib64/stubs:${LD_LIBRARY_PATH} \
|
||||
tensorflow/tools/ci_build/builds/configured GPU \
|
||||
bazel build -c opt --copt=-mavx --config=cuda \
|
||||
--cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" \
|
||||
tensorflow/tools/pip_package:build_pip_package && \
|
||||
rm /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
|
||||
bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/pip && \
|
||||
pip --no-cache-dir install --upgrade /tmp/pip/tensorflow-*.whl && \
|
||||
rm -rf /tmp/pip && \
|
||||
rm -rf /root/.cache
|
||||
# Clean up pip wheel and Bazel cache when done.
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
@ -1,130 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
LABEL maintainer="Clayne Robison <clayne.b.robison@intel.com>"
|
||||
|
||||
# These parameters can be overridden by parameterized_docker_build.sh
|
||||
ARG TF_BUILD_VERSION=r1.13
|
||||
ARG PYTHON="python"
|
||||
ARG PYTHON3_DEV=""
|
||||
ARG WHL_DIR="/tmp/pip"
|
||||
ARG PIP="pip"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
||||
${PYTHON} \
|
||||
${PYTHON}-dev \
|
||||
${PYTHON}-pip \
|
||||
${PYTHON}-setuptools \
|
||||
${PYTHON}-wheel \
|
||||
build-essential \
|
||||
curl \
|
||||
git \
|
||||
libcurl3-dev \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libpng-dev \
|
||||
libssl-dev \
|
||||
libzmq3-dev \
|
||||
openjdk-8-jdk \
|
||||
openjdk-8-jre-headless \
|
||||
pkg-config \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
zip \
|
||||
zlib1g-dev \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN ${PIP} --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
mock \
|
||||
numpy \
|
||||
pandas \
|
||||
scipy \
|
||||
sklearn \
|
||||
&& \
|
||||
${PYTHON} -m ipykernel.kernelspec
|
||||
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
# Set up Bazel.
|
||||
|
||||
# Running bazel inside a `docker build` command causes trouble, cf:
|
||||
# https://github.com/bazelbuild/bazel/issues/134
|
||||
# The easiest solution is to set up a bazelrc file forcing --batch.
|
||||
RUN echo "startup --batch" >>/etc/bazel.bazelrc
|
||||
# Similarly, we need to workaround sandboxing issues:
|
||||
# https://github.com/bazelbuild/bazel/issues/418
|
||||
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
|
||||
>>/etc/bazel.bazelrc
|
||||
# Install the most recent bazel release.
|
||||
ENV BAZEL_VERSION 0.20.0
|
||||
WORKDIR /
|
||||
RUN mkdir /bazel && \
|
||||
cd /bazel && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
|
||||
chmod +x bazel-*.sh && \
|
||||
./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
cd / && \
|
||||
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
|
||||
|
||||
# Download and build TensorFlow.
|
||||
WORKDIR /tensorflow
|
||||
|
||||
# Download and build TensorFlow.
|
||||
# Enable checking out both tags and branches
|
||||
RUN export TAG_PREFIX="v" && \
|
||||
echo ${TF_BUILD_VERSION} | grep -q ^${TAG_PREFIX}; \
|
||||
if [ $? -eq 0 ]; then \
|
||||
git clone --depth=1 https://github.com/tensorflow/tensorflow.git . && \
|
||||
git fetch --tags && \
|
||||
git checkout ${TF_BUILD_VERSION}; \
|
||||
else \
|
||||
git clone --depth=1 --branch=${TF_BUILD_VERSION} https://github.com/tensorflow/tensorflow.git . ; \
|
||||
fi
|
||||
|
||||
RUN yes "" | ${PYTHON} configure.py
|
||||
RUN cp .bazelrc /root/.bazelrc
|
||||
|
||||
ENV CI_BUILD_PYTHON ${PYTHON}
|
||||
|
||||
# Set bazel build parameters in .bazelrc in parameterized_docker_build.sh
|
||||
# Use --copt=-march values to get optimized builds appropriate for the hardware
|
||||
# platform of your choice.
|
||||
# For ivy-bridge or sandy-bridge
|
||||
# --copt=-march="avx" \
|
||||
# For haswell, broadwell, or skylake
|
||||
# --copt=-march="avx2" \
|
||||
COPY .bazelrc /root/.mkl.bazelrc
|
||||
RUN echo "import /root/.mkl.bazelrc" >>/root/.bazelrc
|
||||
|
||||
RUN tensorflow/tools/ci_build/builds/configured CPU \
|
||||
bazel --bazelrc=/root/.bazelrc build -c opt \
|
||||
tensorflow/tools/pip_package:build_pip_package && \
|
||||
bazel-bin/tensorflow/tools/pip_package/build_pip_package "${WHL_DIR}" && \
|
||||
${PIP} --no-cache-dir install --upgrade "${WHL_DIR}"/tensorflow-*.whl && \
|
||||
rm -rf /root/.cache
|
||||
# Clean up Bazel cache when done.
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
||||
|
||||
WORKDIR /root
|
@ -1,168 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
LABEL maintainer="Cong Xu <cong.xu@intel.com>"
|
||||
|
||||
# These parameters can be overridden by parameterized_docker_build.sh
|
||||
ARG TF_BUILD_VERSION=r1.13
|
||||
ARG PYTHON="python"
|
||||
ARG PYTHON3_DEV=""
|
||||
ARG WHL_DIR="/tmp/pip"
|
||||
ARG PIP="pip"
|
||||
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
||||
${PYTHON} \
|
||||
${PYTHON}-dev \
|
||||
${PYTHON}-pip \
|
||||
${PYTHON}-setuptools \
|
||||
${PYTHON}-wheel \
|
||||
build-essential \
|
||||
curl \
|
||||
git \
|
||||
libcurl3-dev \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libnuma-dev \
|
||||
libpng-dev \
|
||||
libzmq3-dev \
|
||||
openjdk-8-jdk \
|
||||
openjdk-8-jre-headless \
|
||||
openssh-client \
|
||||
openssh-server \
|
||||
pkg-config \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
wget \
|
||||
zip \
|
||||
zlib1g-dev \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN ${PIP} --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
mock \
|
||||
numpy \
|
||||
scipy \
|
||||
sklearn \
|
||||
pandas \
|
||||
&& \
|
||||
${PYTHON} -m ipykernel.kernelspec
|
||||
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
# Set up Bazel.
|
||||
|
||||
# Running bazel inside a `docker build` command causes trouble, cf:
|
||||
# https://github.com/bazelbuild/bazel/issues/134
|
||||
# The easiest solution is to set up a bazelrc file forcing --batch.
|
||||
RUN echo "startup --batch" >>/etc/bazel.bazelrc
|
||||
# Similarly, we need to workaround sandboxing issues:
|
||||
# https://github.com/bazelbuild/bazel/issues/418
|
||||
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
|
||||
>>/etc/bazel.bazelrc
|
||||
# Install the most recent bazel release.
|
||||
ENV BAZEL_VERSION 0.20.0
|
||||
WORKDIR /
|
||||
RUN mkdir /bazel && \
|
||||
cd /bazel && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
curl -H "User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36" -fSsL -o /bazel/LICENSE.txt https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE && \
|
||||
chmod +x bazel-*.sh && \
|
||||
./bazel-$BAZEL_VERSION-installer-linux-x86_64.sh && \
|
||||
cd / && \
|
||||
rm -f /bazel/bazel-$BAZEL_VERSION-installer-linux-x86_64.sh
|
||||
|
||||
# Download and build TensorFlow.
|
||||
WORKDIR /tensorflow
|
||||
|
||||
# Download and build TensorFlow.
|
||||
# Enable checking out both tags and branches
|
||||
RUN export TAG_PREFIX="v" && \
|
||||
echo ${TF_BUILD_VERSION} | grep -q ^${TAG_PREFIX}; \
|
||||
if [ $? -eq 0 ]; then \
|
||||
git clone --depth=1 https://github.com/tensorflow/tensorflow.git . && \
|
||||
git fetch --tags && \
|
||||
git checkout ${TF_BUILD_VERSION}; \
|
||||
else \
|
||||
git clone --depth=1 --branch=${TF_BUILD_VERSION} https://github.com/tensorflow/tensorflow.git . ; \
|
||||
fi
|
||||
|
||||
RUN yes "" | ${PYTHON} configure.py
|
||||
RUN cp .bazelrc /root/.bazelrc
|
||||
|
||||
ENV CI_BUILD_PYTHON ${PYTHON}
|
||||
|
||||
# Set bazel build parameters in .bazelrc in parameterized_docker_build.sh
|
||||
# Use --copt=-march values to get optimized builds appropriate for the hardware
|
||||
# platform of your choice.
|
||||
# For ivy-bridge or sandy-bridge
|
||||
# --copt=-march="avx" \
|
||||
# For haswell, broadwell, or skylake
|
||||
# --copt=-march="avx2" \
|
||||
COPY .bazelrc /root/.mkl.bazelrc
|
||||
RUN echo "import /root/.mkl.bazelrc" >>/root/.bazelrc
|
||||
|
||||
RUN tensorflow/tools/ci_build/builds/configured CPU \
|
||||
bazel --bazelrc=/root/.bazelrc build -c opt \
|
||||
tensorflow/tools/pip_package:build_pip_package && \
|
||||
bazel-bin/tensorflow/tools/pip_package/build_pip_package "${WHL_DIR}" && \
|
||||
${PIP} --no-cache-dir install --upgrade "${WHL_DIR}"/tensorflow-*.whl && \
|
||||
rm -rf /root/.cache
|
||||
# Clean up Bazel cache when done.
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# Install Open MPI
|
||||
RUN mkdir /tmp/openmpi && \
|
||||
cd /tmp/openmpi && \
|
||||
wget https://www.open-mpi.org/software/ompi/v3.0/downloads/openmpi-3.0.0.tar.gz && \
|
||||
tar zxf openmpi-3.0.0.tar.gz && \
|
||||
cd openmpi-3.0.0 && \
|
||||
./configure --enable-orterun-prefix-by-default && \
|
||||
make -j $(nproc) all && \
|
||||
make install && \
|
||||
ldconfig && \
|
||||
rm -rf /tmp/openmpi
|
||||
|
||||
# Create a wrapper for OpenMPI to allow running as root by default
|
||||
RUN mv /usr/local/bin/mpirun /usr/local/bin/mpirun.real && \
|
||||
echo '#!/bin/bash' > /usr/local/bin/mpirun && \
|
||||
echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/local/bin/mpirun && \
|
||||
chmod a+x /usr/local/bin/mpirun
|
||||
|
||||
# Configure OpenMPI to run good defaults:
|
||||
RUN echo "btl_tcp_if_exclude = lo,docker0" >> /usr/local/etc/openmpi-mca-params.conf
|
||||
|
||||
# Install Horovod
|
||||
RUN ${PIP} install --no-cache-dir horovod
|
||||
|
||||
# Install OpenSSH for MPI to communicate between containers
|
||||
RUN mkdir -p /var/run/sshd
|
||||
|
||||
# Allow OpenSSH to talk to containers without asking for confirmation
|
||||
RUN cat /etc/ssh/ssh_config | grep -v StrictHostKeyChecking > /etc/ssh/ssh_config.new && \
|
||||
echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config.new && \
|
||||
mv /etc/ssh/ssh_config.new /etc/ssh/ssh_config
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
||||
|
||||
WORKDIR /root
|
@ -1,89 +0,0 @@
|
||||
FROM nvidia/cuda:9.0-base-ubuntu16.04
|
||||
|
||||
LABEL maintainer="Craig Citro <craigcitro@google.com>"
|
||||
|
||||
# Pick up some TF dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cuda-command-line-tools-9-0 \
|
||||
cuda-cublas-9-0 \
|
||||
cuda-cufft-9-0 \
|
||||
cuda-curand-9-0 \
|
||||
cuda-cusolver-9-0 \
|
||||
cuda-cusparse-9-0 \
|
||||
curl \
|
||||
libcudnn7=7.2.1.38-1+cuda9.0 \
|
||||
libnccl2=2.2.13-1+cuda9.0 \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libpng12-dev \
|
||||
libzmq3-dev \
|
||||
pkg-config \
|
||||
python \
|
||||
python-dev \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0 && \
|
||||
apt-get update && \
|
||||
apt-get install libnvinfer4=4.1.2-1+cuda9.0
|
||||
|
||||
RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
|
||||
python get-pip.py && \
|
||||
rm get-pip.py
|
||||
|
||||
RUN pip --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
numpy \
|
||||
pandas \
|
||||
scipy \
|
||||
sklearn \
|
||||
&& \
|
||||
python -m ipykernel.kernelspec
|
||||
|
||||
# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
|
||||
# These lines will be edited automatically by parameterized_docker_build.sh. #
|
||||
# COPY _PIP_FILE_ /
|
||||
# RUN pip --no-cache-dir install /_PIP_FILE_
|
||||
# RUN rm -f /_PIP_FILE_
|
||||
|
||||
# Install TensorFlow GPU version.
|
||||
RUN pip --no-cache-dir install \
|
||||
http://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-0.0.0-cp27-none-linux_x86_64.whl
|
||||
# --- ~ DO NOT EDIT OR DELETE BETWEEN THE LINES --- #
|
||||
|
||||
# RUN ln -s -f /usr/bin/python3 /usr/bin/python#
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Copy sample notebooks.
|
||||
COPY notebooks /notebooks
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
# For CUDA profiling, TensorFlow requires CUPTI.
|
||||
ENV LD_LIBRARY_PATH /usr/local/cuda/extras/CUPTI/lib64:$LD_LIBRARY_PATH
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
||||
|
||||
WORKDIR "/notebooks"
|
||||
|
||||
CMD ["/run_jupyter.sh", "--allow-root"]
|
@ -1,75 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
LABEL maintainer="Clayne Robison <clayne.b.robison@intel.com>"
|
||||
|
||||
# This parameter MUST be set by parameterized_docker_build.sh
|
||||
ARG TF_WHL_URL
|
||||
|
||||
# Optional parameters
|
||||
ARG TF_BUILD_VERSION=r1.13
|
||||
ARG PYTHON="python"
|
||||
ARG PYTHON_DEV="python-dev"
|
||||
ARG PIP="pip"
|
||||
|
||||
# Pick up some TF dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
||||
${PYTHON} \
|
||||
${PYTHON}-dev \
|
||||
${PYTHON}-pip \
|
||||
${PYTHON}-setuptools \
|
||||
${PYTHON}-wheel \
|
||||
build-essential \
|
||||
curl \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libpng-dev \
|
||||
libzmq3-dev \
|
||||
pkg-config \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN ${PIP} --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
numpy \
|
||||
pandas \
|
||||
scipy \
|
||||
sklearn \
|
||||
&& \
|
||||
${PYTHON} -m ipykernel.kernelspec
|
||||
|
||||
|
||||
COPY ${TF_WHL_URL} /
|
||||
RUN ${PIP} install --no-cache-dir --force-reinstall /${TF_WHL_URL} && \
|
||||
rm -rf /${TF_WHL_URL}
|
||||
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Copy sample notebooks.
|
||||
COPY notebooks /notebooks
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
||||
|
||||
WORKDIR "/notebooks"
|
||||
|
||||
CMD ["/run_jupyter.sh", "--allow-root"]
|
@ -1,113 +0,0 @@
|
||||
FROM ubuntu:18.04
|
||||
|
||||
LABEL maintainer="Cong Xu <cong.xu@intel.com>"
|
||||
|
||||
# This parameter MUST be set by parameterized_docker_build.sh
|
||||
ARG TF_WHL_URL
|
||||
|
||||
# Optional parameters
|
||||
ARG TF_BUILD_VERSION=r1.13
|
||||
ARG PYTHON="python"
|
||||
ARG PYTHON_DEV="python-dev"
|
||||
ARG PIP="pip"
|
||||
|
||||
# Pick up some TF dependencies
|
||||
# RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
|
||||
${PYTHON} \
|
||||
${PYTHON}-dev \
|
||||
${PYTHON}-pip \
|
||||
${PYTHON}-setuptools \
|
||||
${PYTHON}-wheel \
|
||||
build-essential \
|
||||
curl \
|
||||
libfreetype6-dev \
|
||||
libhdf5-serial-dev \
|
||||
libnuma-dev \
|
||||
libpng-dev \
|
||||
libzmq3-dev \
|
||||
openssh-client \
|
||||
openssh-server \
|
||||
pkg-config \
|
||||
rsync \
|
||||
software-properties-common \
|
||||
unzip \
|
||||
wget \
|
||||
&& \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
RUN ${PIP} --no-cache-dir install \
|
||||
Pillow \
|
||||
h5py \
|
||||
ipykernel \
|
||||
jupyter \
|
||||
keras_applications \
|
||||
keras_preprocessing \
|
||||
matplotlib \
|
||||
numpy \
|
||||
pandas \
|
||||
scipy \
|
||||
sklearn \
|
||||
&& \
|
||||
${PYTHON} -m ipykernel.kernelspec
|
||||
|
||||
|
||||
COPY ${TF_WHL_URL} /
|
||||
RUN ${PIP} install --no-cache-dir --force-reinstall /${TF_WHL_URL} && \
|
||||
rm -rf /${TF_WHL_URL}
|
||||
|
||||
|
||||
# Set up our notebook config.
|
||||
COPY jupyter_notebook_config.py /root/.jupyter/
|
||||
|
||||
# Copy sample notebooks.
|
||||
COPY notebooks /notebooks
|
||||
|
||||
# Jupyter has issues with being run directly:
|
||||
# https://github.com/ipython/ipython/issues/7062
|
||||
# We just add a little wrapper script.
|
||||
COPY run_jupyter.sh /
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
# Install Open MPI
|
||||
RUN mkdir /tmp/openmpi && \
|
||||
cd /tmp/openmpi && \
|
||||
wget https://www.open-mpi.org/software/ompi/v3.0/downloads/openmpi-3.0.0.tar.gz && \
|
||||
tar zxf openmpi-3.0.0.tar.gz && \
|
||||
cd openmpi-3.0.0 && \
|
||||
./configure --enable-orterun-prefix-by-default && \
|
||||
make -j $(nproc) all && \
|
||||
make install && \
|
||||
ldconfig && \
|
||||
rm -rf /tmp/openmpi
|
||||
|
||||
# Create a wrapper for OpenMPI to allow running as root by default
|
||||
RUN mv /usr/local/bin/mpirun /usr/local/bin/mpirun.real && \
|
||||
echo '#!/bin/bash' > /usr/local/bin/mpirun && \
|
||||
echo 'mpirun.real --allow-run-as-root "$@"' >> /usr/local/bin/mpirun && \
|
||||
chmod a+x /usr/local/bin/mpirun
|
||||
|
||||
# Configure OpenMPI to run good defaults:
|
||||
RUN echo "btl_tcp_if_exclude = lo,docker0" >> /usr/local/etc/openmpi-mca-params.conf
|
||||
|
||||
# Install Horovod
|
||||
RUN ${PIP} install --no-cache-dir horovod
|
||||
|
||||
# Install OpenSSH for MPI to communicate between containers
|
||||
RUN mkdir -p /var/run/sshd
|
||||
|
||||
# Allow OpenSSH to talk to containers without asking for confirmation
|
||||
RUN cat /etc/ssh/ssh_config | grep -v StrictHostKeyChecking > /etc/ssh/ssh_config.new && \
|
||||
echo " StrictHostKeyChecking no" >> /etc/ssh/ssh_config.new && \
|
||||
mv /etc/ssh/ssh_config.new /etc/ssh/ssh_config
|
||||
|
||||
# TensorBoard
|
||||
EXPOSE 6006
|
||||
# IPython
|
||||
EXPOSE 8888
|
||||
|
||||
WORKDIR "/notebooks"
|
||||
|
||||
CMD ["/run_jupyter.sh", "--allow-root"]
|
@ -1,13 +0,0 @@
|
||||
Copyright 2018 The TensorFlow Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,110 +0,0 @@
|
||||
# WARNING: THESE IMAGES ARE DEPRECATED.
|
||||
|
||||
TensorFlow's Dockerfiles are now located in
|
||||
[`tensorflow/tools/dockerfiles/`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/dockerfiles).
|
||||
However, these Dockerfiles are still used to build
|
||||
[TensorFlow's official Docker images](https://hub.docker.com/r/tensorflow/tensorflow)
|
||||
while the internal infrastructure for the newer Dockerfiles is being developed.
|
||||
|
||||
This directory will eventually be removed.
|
||||
|
||||
# Using TensorFlow via Docker
|
||||
|
||||
This directory contains `Dockerfile`s to make it easy to get up and running with
|
||||
TensorFlow via [Docker](http://www.docker.com/).
|
||||
|
||||
## Installing Docker
|
||||
|
||||
General installation instructions are
|
||||
[on the Docker site](https://docs.docker.com/installation/), but we give some
|
||||
quick links here:
|
||||
|
||||
* [OSX](https://www.docker.com/products/docker#/mac)
|
||||
* [Ubuntu](https://docs.docker.com/engine/installation/linux/ubuntulinux/)
|
||||
|
||||
## Which containers exist?
|
||||
|
||||
We currently maintain two Docker container images:
|
||||
|
||||
* `tensorflow/tensorflow` - TensorFlow with all dependencies - CPU only!
|
||||
|
||||
* `tensorflow/tensorflow:latest-gpu` - TensorFlow with all dependencies
|
||||
and support for NVidia CUDA
|
||||
|
||||
Note: We store all our containers on
|
||||
[Docker Hub](https://hub.docker.com/r/tensorflow/tensorflow/tags/).
|
||||
|
||||
|
||||
## Running the container
|
||||
|
||||
Run non-GPU container using
|
||||
|
||||
$ docker run -it -p 8888:8888 tensorflow/tensorflow
|
||||
|
||||
For GPU support install NVidia drivers (ideally latest) and
|
||||
[nvidia-docker](https://github.com/NVIDIA/nvidia-docker). Run using
|
||||
|
||||
$ nvidia-docker run -it -p 8888:8888 tensorflow/tensorflow:latest-gpu
|
||||
|
||||
|
||||
Note: If you would have a problem running nvidia-docker you may try the old method
|
||||
we have used. But it is not recommended. If you find a bug in nvidia-docker, please report
|
||||
it there and try using nvidia-docker as described above.
|
||||
|
||||
$ # The old, not recommended way to run docker with gpu support:
|
||||
$ export CUDA_SO=$(\ls /usr/lib/x86_64-linux-gnu/libcuda.* | xargs -I{} echo '-v {}:{}')
|
||||
$ export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
|
||||
$ docker run -it -p 8888:8888 $CUDA_SO $DEVICES tensorflow/tensorflow:latest-gpu
|
||||
|
||||
|
||||
## More containers
|
||||
|
||||
See all available [tags](https://hub.docker.com/r/tensorflow/tensorflow/tags/)
|
||||
for additional containers, such as release candidates or nightly builds.
|
||||
|
||||
|
||||
## Rebuilding the containers
|
||||
|
||||
Building TensorFlow Docker containers should be done through the
|
||||
[parameterized_docker_build.sh](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/docker/parameterized_docker_build.sh)
|
||||
script. The raw Dockerfiles should not be used directly as they contain strings
|
||||
to be replaced by the script during the build.
|
||||
|
||||
Attempting to run [parameterized_docker_build.sh](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/docker/parameterized_docker_build.sh)
|
||||
from a binary docker image such as for example `tensorflow/tensorflow:latest` will
|
||||
not work. One needs to execute the script from a developer docker image since by
|
||||
contrast with a binary docker image it contains not only the compiled solution but
|
||||
also the tensorflow source code. Please select the appropriate developer docker
|
||||
image of tensorflow at `tensorflow/tensorflow:[.](https://hub.docker.com/r/tensorflow/tensorflow/tags/)`.
|
||||
|
||||
The smallest command line to generate a docker image will then be:
|
||||
```docker run -it tensorflow/tensorflow:"right_tag"```
|
||||
|
||||
If you would like to start a jupyter notebook on your docker container, make sure
|
||||
to map the port 8888 of your docker container by adding -p 8888:8888 to the above
|
||||
command.
|
||||
|
||||
To use the script, specify the container type (`CPU` vs. `GPU`), the desired
|
||||
Python version (`PYTHON2` vs. `PYTHON3`) and whether the developer Docker image
|
||||
is to be built (`NO` vs. `YES`). In addition, you need to specify the central
|
||||
location from where the pip package of TensorFlow will be downloaded.
|
||||
|
||||
For example, to build a CPU-only non-developer Docker image for Python 2, using
|
||||
TensorFlow's nightly pip package:
|
||||
|
||||
``` bash
|
||||
export TF_DOCKER_BUILD_IS_DEVEL=NO
|
||||
export TF_DOCKER_BUILD_TYPE=CPU
|
||||
export TF_DOCKER_BUILD_PYTHON_VERSION=PYTHON2
|
||||
|
||||
pip download --no-deps tf-nightly
|
||||
|
||||
export TF_DOCKER_BUILD_CENTRAL_PIP=$(ls tf_nightly*.whl)
|
||||
export TF_DOCKER_BUILD_CENTRAL_PIP_IS_LOCAL=1
|
||||
|
||||
tensorflow/tools/docker/parameterized_docker_build.sh
|
||||
```
|
||||
|
||||
If successful, the image will be tagged as `${USER}/tensorflow:latest` by default.
|
||||
|
||||
Rebuilding GPU images requires [nvidia-docker](https://github.com/NVIDIA/nvidia-docker).
|
@ -1,31 +0,0 @@
|
||||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
import os
|
||||
from IPython.lib import passwd
|
||||
|
||||
c = c # pylint:disable=undefined-variable
|
||||
c.NotebookApp.ip = '0.0.0.0' # https://github.com/jupyter/notebook/issues/3946
|
||||
c.NotebookApp.port = int(os.getenv('PORT', 8888))
|
||||
c.NotebookApp.open_browser = False
|
||||
|
||||
# sets a password if PASSWORD is set in the environment
|
||||
if 'PASSWORD' in os.environ:
|
||||
password = os.environ['PASSWORD']
|
||||
if password:
|
||||
c.NotebookApp.password = passwd(password)
|
||||
else:
|
||||
c.NotebookApp.password = ''
|
||||
c.NotebookApp.token = ''
|
||||
del os.environ['PASSWORD']
|
@ -1,683 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "a3bskVXPvchm"
|
||||
},
|
||||
"source": [
|
||||
"# Hello, TensorFlow\n",
|
||||
"## A beginner-level, getting started, basic introduction to TensorFlow"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "Rb5rSpcZvYbX"
|
||||
},
|
||||
"source": [
|
||||
"TensorFlow is a general-purpose system for graph-based computation. A typical use is machine learning. In this notebook, we'll introduce the basic concepts of TensorFlow using some simple examples.\n",
|
||||
"\n",
|
||||
"TensorFlow gets its name from [tensors](https://en.wikipedia.org/wiki/Tensor), which are arrays of arbitrary dimensionality. A vector is a 1-d array and is known as a 1st-order tensor. A matrix is a 2-d array and a 2nd-order tensor. The \"flow\" part of the name refers to computation flowing through a graph. Training and inference in a neural network, for example, involves the propagation of matrix computations through many nodes in a computational graph.\n",
|
||||
"\n",
|
||||
"When you think of doing things in TensorFlow, you might want to think of creating tensors (like matrices), adding operations (that output other tensors), and then executing the computation (running the computational graph). In particular, it's important to realize that when you add an operation on tensors, it doesn't execute immediately. Rather, TensorFlow waits for you to define all the operations you want to perform. Then, TensorFlow optimizes the computation graph, deciding how to execute the computation, before generating the data. Because of this, a tensor in TensorFlow isn't so much holding the data as a placeholder for holding the data, waiting for the data to arrive when a computation is executed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "E8FhiMivhcYB"
|
||||
},
|
||||
"source": [
|
||||
"## Adding two vectors in TensorFlow\n",
|
||||
"\n",
|
||||
"Let's start with something that should be simple. Let's add two length four vectors (two 1st-order tensors):\n",
|
||||
"\n",
|
||||
"$\\begin{bmatrix} 1. \u0026 1. \u0026 1. \u0026 1.\\end{bmatrix} + \\begin{bmatrix} 2. \u0026 2. \u0026 2. \u0026 2.\\end{bmatrix} = \\begin{bmatrix} 3. \u0026 3. \u0026 3. \u0026 3.\\end{bmatrix}$"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 2922,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675631337,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "2iv3XQ6k3eF1",
|
||||
"outputId": "7dbded62-91bc-4e38-9f25-53375c4c8dd8"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"result: [ 3. 3. 3. 3.]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from __future__ import print_function\n",
|
||||
"\n",
|
||||
"import tensorflow as tf\n",
|
||||
"\n",
|
||||
"with tf.Session():\n",
|
||||
" input1 = tf.constant([1.0, 1.0, 1.0, 1.0])\n",
|
||||
" input2 = tf.constant([2.0, 2.0, 2.0, 2.0])\n",
|
||||
" output = tf.add(input1, input2)\n",
|
||||
" result = output.eval()\n",
|
||||
" print(\"result: \", result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "dqLV5GXT3wLy"
|
||||
},
|
||||
"source": [
|
||||
"What we're doing is creating two vectors, [1.0, 1.0, 1.0, 1.0] and [2.0, 2.0, 2.0, 2.0], and then adding them. Here's equivalent code in raw Python and using numpy:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 214,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675631563,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "7DzDJ7sW79ao",
|
||||
"outputId": "588b573b-95d2-4587-849e-af6f3ec1303e"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[3.0, 3.0, 3.0, 3.0]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print([x + y for x, y in zip([1.0] * 4, [2.0] * 4)])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 340,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675631948,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "MDWJf0lHAF4E",
|
||||
"outputId": "bee09475-24dd-4331-fc46-692a07dae101"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[ 1. 1. 1. 1.] + [ 2. 2. 2. 2.] = [ 3. 3. 3. 3.]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"x, y = np.full(4, 1.0), np.full(4, 2.0)\n",
|
||||
"print(\"{} + {} = {}\".format(x, y, x + y))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "I52jQOyO8vAn"
|
||||
},
|
||||
"source": [
|
||||
"## Details of adding two vectors in TensorFlow\n",
|
||||
"\n",
|
||||
"The example above of adding two vectors involves a lot more than it seems, so let's look at it in more depth.\n",
|
||||
"\n",
|
||||
"\u003e`import tensorflow as tf`\n",
|
||||
"\n",
|
||||
"This import brings TensorFlow's public API into our IPython runtime environment.\n",
|
||||
"\n",
|
||||
"\u003e`with tf.Session():`\n",
|
||||
"\n",
|
||||
"When you run an operation in TensorFlow, you need to do it in the context of a `Session`. A session holds the computation graph, which contains the tensors and the operations. When you create tensors and operations, they are not executed immediately, but wait for other operations and tensors to be added to the graph, only executing when finally requested to produce the results of the session. Deferring the execution like this provides additional opportunities for parallelism and optimization, as TensorFlow can decide how to combine operations and where to run them after TensorFlow knows about all the operations. \n",
|
||||
"\n",
|
||||
"\u003e\u003e`input1 = tf.constant([1.0, 1.0, 1.0, 1.0])`\n",
|
||||
"\n",
|
||||
"\u003e\u003e`input2 = tf.constant([2.0, 2.0, 2.0, 2.0])`\n",
|
||||
"\n",
|
||||
"The next two lines create tensors using a convenience function called `constant`, which is similar to numpy's `array` and numpy's `full`. If you look at the code for `constant`, you can see the details of what it is doing to create the tensor. In summary, it creates a tensor of the necessary shape and applies the constant operator to it to fill it with the provided values. The values to `constant` can be Python or numpy arrays. `constant` can take an optional shape parameter, which works similarly to numpy's `fill` if provided, and an optional name parameter, which can be used to put a more human-readable label on the operation in the TensorFlow operation graph.\n",
|
||||
"\n",
|
||||
"\u003e\u003e`output = tf.add(input1, input2)`\n",
|
||||
"\n",
|
||||
"You might think `add` just adds the two vectors now, but it doesn't quite do that. What it does is put the `add` operation into the computational graph. The results of the addition aren't available yet. They've been put in the computation graph, but the computation graph hasn't been executed yet.\n",
|
||||
"\n",
|
||||
"\u003e\u003e`result = output.eval()`\n",
|
||||
"\n",
|
||||
"\u003e\u003e`print result`\n",
|
||||
"\n",
|
||||
"`eval()` is also slightly more complicated than it looks. Yes, it does get the value of the vector (tensor) that results from the addition. It returns this as a numpy array, which can then be printed. But, it's important to realize it also runs the computation graph at this point, because we demanded the output from the operation node of the graph; to produce that, it had to run the computation graph. So, this is the point where the addition is actually performed, not when `add` was called, as `add` just put the addition operation into the TensorFlow computation graph."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "H_5_2YY3ySr2"
|
||||
},
|
||||
"source": [
|
||||
"## Multiple operations\n",
|
||||
"\n",
|
||||
"To use TensorFlow, you add operations on tensors that produce tensors to the computation graph, then execute that graph to run all those operations and calculate the values of all the tensors in the graph.\n",
|
||||
"\n",
|
||||
"Here's a simple example with two operations:"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 1203,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675633108,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "-kQmn3U_yXX8",
|
||||
"outputId": "8ba14a4d-b0cd-4b90-8b95-790e77d35e70"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[ 6. 6. 6. 6.]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import tensorflow as tf\n",
|
||||
"\n",
|
||||
"with tf.Session():\n",
|
||||
" input1 = tf.constant(1.0, shape=[4])\n",
|
||||
" input2 = tf.constant(2.0, shape=[4])\n",
|
||||
" input3 = tf.constant(3.0, shape=[4])\n",
|
||||
" output = tf.add(tf.add(input1, input2), input3)\n",
|
||||
" result = output.eval()\n",
|
||||
" print(result)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "Hod0zvsly8YT"
|
||||
},
|
||||
"source": [
|
||||
"This version uses `constant` in a way similar to numpy's `fill`, specifying the optional shape and having the values copied out across it.\n",
|
||||
"\n",
|
||||
"The `add` operator supports operator overloading, so you could try writing it inline as `input1 + input2` instead as well as experimenting with other operators."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 350,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675633468,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "yS2WElRfxz53",
|
||||
"outputId": "2e3efae6-3990-447c-e05d-56a9d9701e87"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[ 3. 3. 3. 3.]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"with tf.Session():\n",
|
||||
" input1 = tf.constant(1.0, shape=[4])\n",
|
||||
" input2 = tf.constant(2.0, shape=[4])\n",
|
||||
" output = input1 + input2\n",
|
||||
" print(output.eval())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "zszjoYUjkUNU"
|
||||
},
|
||||
"source": [
|
||||
"## Adding two matrices"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "EWNYBCB6kbri"
|
||||
},
|
||||
"source": [
|
||||
"Next, let's do something very similar, adding two matrices:\n",
|
||||
"\n",
|
||||
"$\\begin{bmatrix}\n",
|
||||
" 1. \u0026 1. \u0026 1. \\\\\n",
|
||||
" 1. \u0026 1. \u0026 1. \\\\\n",
|
||||
"\\end{bmatrix} + \n",
|
||||
"\\begin{bmatrix}\n",
|
||||
" 1. \u0026 2. \u0026 3. \\\\\n",
|
||||
" 4. \u0026 5. \u0026 6. \\\\\n",
|
||||
"\\end{bmatrix} = \n",
|
||||
"\\begin{bmatrix}\n",
|
||||
" 2. \u0026 3. \u0026 4. \\\\\n",
|
||||
" 5. \u0026 6. \u0026 7. \\\\\n",
|
||||
"\\end{bmatrix}$"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 1327,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675634683,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "tmWcCxSilYkg",
|
||||
"outputId": "8a135ccf-e706-457c-f4bc-2187039ffd92"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[ 2. 3. 4.]\n",
|
||||
" [ 5. 6. 7.]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import tensorflow as tf\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"with tf.Session():\n",
|
||||
" input1 = tf.constant(1.0, shape=[2, 3])\n",
|
||||
" input2 = tf.constant(np.reshape(np.arange(1.0, 7.0, dtype=np.float32), (2, 3)))\n",
|
||||
" output = tf.add(input1, input2)\n",
|
||||
" print(output.eval())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "JuU3Bmglq1vd"
|
||||
},
|
||||
"source": [
|
||||
"Recall that you can pass numpy or Python arrays into `constant`.\n",
|
||||
"\n",
|
||||
"In this example, the matrix with values from 1 to 6 is created in numpy and passed into `constant`, but TensorFlow also has `range`, `reshape`, and `tofloat` operators. Doing this entirely within TensorFlow could be more efficient if this was a very large matrix.\n",
|
||||
"\n",
|
||||
"Try experimenting with this code a bit -- maybe modifying some of the values, using the numpy version, doing this using, adding another operation, or doing this using TensorFlow's `range` function."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "gnXnpnuLrflb"
|
||||
},
|
||||
"source": [
|
||||
"## Multiplying matrices"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "Ho-QNSOorj0y"
|
||||
},
|
||||
"source": [
|
||||
"Let's move on to matrix multiplication. This time, let's use a bit vector and some random values, which is a good step toward some of what we'll need to do for regression and neural networks."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 2353,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675637053,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "uNqMaFR8sIY5",
|
||||
"outputId": "b630554e-68b3-4904-c07d-f28a0a41bbd2"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Input:\n",
|
||||
"[[ 1. 0. 0. 1.]]\n",
|
||||
"Weights:\n",
|
||||
"[[ 0.3949919 -0.83823347]\n",
|
||||
" [ 0.25941893 -1.58861065]\n",
|
||||
" [-1.11733329 -0.60435963]\n",
|
||||
" [ 1.04782867 0.18336453]]\n",
|
||||
"Output:\n",
|
||||
"[[ 1.44282055 -0.65486896]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#@test {\"output\": \"ignore\"}\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"with tf.Session():\n",
|
||||
" input_features = tf.constant(np.reshape([1, 0, 0, 1], (1, 4)).astype(np.float32))\n",
|
||||
" weights = tf.constant(np.random.randn(4, 2).astype(np.float32))\n",
|
||||
" output = tf.matmul(input_features, weights)\n",
|
||||
" print(\"Input:\")\n",
|
||||
" print(input_features.eval())\n",
|
||||
" print(\"Weights:\")\n",
|
||||
" print(weights.eval())\n",
|
||||
" print(\"Output:\")\n",
|
||||
" print(output.eval())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "JDAVTPhb22AP"
|
||||
},
|
||||
"source": [
|
||||
"Above, we're taking a 1 x 4 vector [1 0 0 1] and multiplying it by a 4 by 2 matrix full of random values from a normal distribution (mean 0, stdev 1). The output is a 1 x 2 matrix.\n",
|
||||
"\n",
|
||||
"You might try modifying this example. Running the cell multiple times will generate new random weights and a new output. Or, change the input, e.g., to \\[0 0 0 1]), and run the cell again. Or, try initializing the weights using the TensorFlow op, e.g., `random_normal`, instead of using numpy to generate the random weights.\n",
|
||||
"\n",
|
||||
"What we have here is the basics of a simple neural network already. If we are reading in the input features, along with some expected output, and change the weights based on the error with the output each time, that's a neural network."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "XhnBjAUILuy8"
|
||||
},
|
||||
"source": [
|
||||
"## Use of variables\n",
|
||||
"\n",
|
||||
"Let's look at adding two small matrices in a loop, not by creating new tensors every time, but by updating the existing values and then re-running the computation graph on the new data. This happens a lot with machine learning models, where we change some parameters each time such as gradient descent on some weights and then perform the same computations over and over again."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 0,
|
||||
"metadata": {
|
||||
"cellView": "both",
|
||||
"colab": {
|
||||
"autoexec": {
|
||||
"startup": false,
|
||||
"wait_interval": 0
|
||||
},
|
||||
"output_extras": [
|
||||
{
|
||||
"item_id": 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"colab_type": "code",
|
||||
"executionInfo": {
|
||||
"elapsed": 2561,
|
||||
"status": "ok",
|
||||
"timestamp": 1474675639610,
|
||||
"user": {
|
||||
"displayName": "",
|
||||
"photoUrl": "",
|
||||
"userId": ""
|
||||
},
|
||||
"user_tz": 420
|
||||
},
|
||||
"id": "vJ_AgZ8lLtRv",
|
||||
"outputId": "b8f19c28-a9b4-4fb3-9e90-6e432bf300a7"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[[ -7.29560852e-05 8.01583767e-01]] [[ -7.29560852e-05 8.01583767e-01]]\n",
|
||||
"[[ 0.64477301 -0.03944111]] [[ 0.64470005 0.76214266]]\n",
|
||||
"[[-0.07470274 -0.76814342]] [[ 0.56999731 -0.00600076]]\n",
|
||||
"[[-0.34230471 -0.42372179]] [[ 0.2276926 -0.42972255]]\n",
|
||||
"[[ 0.67873812 0.65932178]] [[ 0.90643072 0.22959924]]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#@test {\"output\": \"ignore\"}\n",
|
||||
"import tensorflow as tf\n",
|
||||
"import numpy as np\n",
|
||||
"\n",
|
||||
"with tf.Session() as sess:\n",
|
||||
" # Set up two variables, total and weights, that we'll change repeatedly.\n",
|
||||
" total = tf.Variable(tf.zeros([1, 2]))\n",
|
||||
" weights = tf.Variable(tf.random_uniform([1,2]))\n",
|
||||
"\n",
|
||||
" # Initialize the variables we defined above.\n",
|
||||
" tf.global_variables_initializer().run()\n",
|
||||
"\n",
|
||||
" # This only adds the operators to the graph right now. The assignment\n",
|
||||
" # and addition operations are not performed yet.\n",
|
||||
" update_weights = tf.assign(weights, tf.random_uniform([1, 2], -1.0, 1.0))\n",
|
||||
" update_total = tf.assign(total, tf.add(total, weights))\n",
|
||||
" \n",
|
||||
" for _ in range(5):\n",
|
||||
" # Actually run the operation graph, so randomly generate weights and then\n",
|
||||
" # add them into the total. Order does matter here. We need to update\n",
|
||||
" # the weights before updating the total.\n",
|
||||
" sess.run(update_weights)\n",
|
||||
" sess.run(update_total)\n",
|
||||
" \n",
|
||||
" print(weights.eval(), total.eval())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "kSYJr89aM_n0"
|
||||
},
|
||||
"source": [
|
||||
"This is more complicated. At a high level, we create two variables and add operations over them, then, in a loop, repeatedly execute those operations. Let's walk through it step by step.\n",
|
||||
"\n",
|
||||
"Starting off, the code creates two variables, `total` and `weights`. `total` is initialized to \\[0, 0\\] and `weights` is initialized to random values between -1 and 1.\n",
|
||||
"\n",
|
||||
"Next, two assignment operators are added to the graph, one that updates weights with random values from [-1, 1], the other that updates the total with the new weights. Again, the operators are not executed here. In fact, this isn't even inside the loop. We won't execute these operations until the `eval` call inside the loop.\n",
|
||||
"\n",
|
||||
"Finally, in the for loop, we run each of the operators. In each iteration of the loop, this executes the operators we added earlier, first putting random values into the weights, then updating the totals with the new weights. This call uses `eval` on the session; the code also could have called `eval` on the operators (e.g. `update_weights.eval`).\n",
|
||||
"\n",
|
||||
"It can be a little hard to wrap your head around exactly what computation is done when. The important thing to remember is that computation is only performed on demand.\n",
|
||||
"\n",
|
||||
"Variables can be useful in cases where you have a large amount of computation and data that you want to use over and over again with just a minor change to the input each time. That happens quite a bit with neural networks, for example, where you just want to update the weights each time you go through the batches of input data, then run the same operations over again."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {
|
||||
"colab_type": "text",
|
||||
"id": "fL3WfAbKzqr5"
|
||||
},
|
||||
"source": [
|
||||
"## What's next?\n",
|
||||
"\n",
|
||||
"This has been a gentle introduction to TensorFlow, focused on what TensorFlow is and the very basics of doing anything in TensorFlow. If you'd like more, the next tutorial in the series is Getting Started with TensorFlow, also available in the [notebooks directory](../notebooks)."
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"default_view": {},
|
||||
"name": "Untitled",
|
||||
"provenance": [],
|
||||
"version": "0.3.2",
|
||||
"views": {}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -1,5 +0,0 @@
|
||||
package(default_visibility = ["//visibility:private"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
@ -1,13 +0,0 @@
|
||||
Copyright 2018 The TensorFlow Authors. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -1,548 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
# Parameterized build and test for TensorFlow Docker images.
|
||||
#
|
||||
# Usage:
|
||||
# parameterized_docker_build.sh
|
||||
#
|
||||
# The script obeys the following environment variables:
|
||||
# TF_DOCKER_BUILD_TYPE: (CPU | GPU | MKL | MKL-HOROVOD)
|
||||
# CPU, GPU, MKL or MKL-HOROVOD image
|
||||
#
|
||||
# TF_DOCKER_BUILD_IS_DEVEL: (NO | YES)
|
||||
# Is this developer image
|
||||
#
|
||||
# TF_DOCKER_BUILD_DEVEL_BRANCH
|
||||
# (Required if TF_DOCKER_BUILD_IS_DEVEL is YES)
|
||||
# Specifies the branch to checkout for devel docker images
|
||||
#
|
||||
# TF_DOCKER_BUILD_CENTRAL_PIP
|
||||
# (Optional)
|
||||
# If set to a non-empty string, will use it as the URL from which the
|
||||
# pip wheel file will be downloaded (instead of building the pip locally).
|
||||
#
|
||||
# TF_DOCKER_BUILD_CENTRAL_PIP_IS_LOCAL
|
||||
# (Optional)
|
||||
# If set to a non-empty string, we will treat TF_DOCKER_BUILD_CENTRAL_PIP
|
||||
# as a path rather than a url.
|
||||
#
|
||||
# TF_DOCKER_BUILD_IMAGE_NAME:
|
||||
# (Optional)
|
||||
# If set to any non-empty value, will use it as the image of the
|
||||
# newly-built image. If not set, the tag prefix tensorflow/tensorflow
|
||||
# will be used.
|
||||
#
|
||||
# TF_DOCKER_BUILD_VERSION:
|
||||
# (Optinal)
|
||||
# If set to any non-empty value, will use the version (e.g., 0.8.0) as the
|
||||
# tag prefix of the image. Additional strings, e.g., "-devel-gpu", will be
|
||||
# appended to the tag. If not set, the default tag prefix "latest" will be
|
||||
# used.
|
||||
#
|
||||
# TF_DOCKER_BUILD_PORT
|
||||
# (Optional)
|
||||
# If set to any non-empty and valid port number, will use that port number
|
||||
# during basic checks on the newly-built docker image.
|
||||
#
|
||||
# TF_DOCKER_BUILD_PUSH_CMD
|
||||
# (Optional)
|
||||
# If set to a valid binary/script path, will call the script with the final
|
||||
# tagged image name with an argument, to push the image to a central repo
|
||||
# such as gcr.io or Docker Hub.
|
||||
#
|
||||
# TF_DOCKER_BUILD_PUSH_WITH_CREDENTIALS
|
||||
# (Optional)
|
||||
# Do not set this along with TF_DOCKER_BUILD_PUSH_CMD. We will push with the
|
||||
# direct commands as opposed to a script.
|
||||
#
|
||||
# TF_DOCKER_USERNAME
|
||||
# (Optional)
|
||||
# Dockerhub username for pushing a package.
|
||||
#
|
||||
# TF_DOCKER_EMAIL
|
||||
# (Optional)
|
||||
# Dockerhub email for pushing a package.
|
||||
#
|
||||
# TF_DOCKER_PASSWORD
|
||||
# (Optional)
|
||||
# Dockerhub password for pushing a package.
|
||||
#
|
||||
# TF_DOCKER_BUILD_PYTHON_VERSION
|
||||
# (Optional)
|
||||
# Specifies the desired Python version. Defaults to PYTHON2.
|
||||
#
|
||||
# TF_DOCKER_BUILD_OPTIONS
|
||||
# (Optional)
|
||||
# Specifies the desired build options. Defaults to OPT.
|
||||
#
|
||||
# TF_DOCKER_BUILD_ARGS
|
||||
# (Optional)
|
||||
# A list (array) of docker build args. Will be passed to docker build
|
||||
# command as list of --build-arg parameters.
|
||||
#
|
||||
# TF_BAZEL_BUILD_OPTIONS
|
||||
# (Optional)
|
||||
# Bazel compiler flags to be passed to the bazelrc file
|
||||
|
||||
# Script directory
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "${SCRIPT_DIR}/../ci_build/builds/builds_common.sh"
|
||||
|
||||
# Help functions
|
||||
CHECK_FAILED=0
|
||||
mark_check_failed() {
|
||||
# Usage: mark_check_failed <FAILURE_MESSAGE>
|
||||
echo $1
|
||||
CHECK_FAILED=1
|
||||
}
|
||||
|
||||
TF_DOCKER_BUILD_TYPE=$(to_lower ${TF_DOCKER_BUILD_TYPE})
|
||||
TF_DOCKER_BUILD_IS_DEVEL=$(to_lower ${TF_DOCKER_BUILD_IS_DEVEL})
|
||||
TF_DOCKER_BUILD_PYTHON_VERSION=$(to_lower ${TF_DOCKER_BUILD_PYTHON_VERSION:-PYTHON2})
|
||||
TF_DOCKER_BUILD_OPTIONS=$(to_lower ${TF_DOCKER_BUILD_OPTIONS:-OPT})
|
||||
|
||||
echo "Required build parameters:"
|
||||
echo " TF_DOCKER_BUILD_TYPE=${TF_DOCKER_BUILD_TYPE}"
|
||||
echo " TF_DOCKER_BUILD_IS_DEVEL=${TF_DOCKER_BUILD_IS_DEVEL}"
|
||||
echo " TF_DOCKER_BUILD_DEVEL_BRANCH=${TF_DOCKER_BUILD_DEVEL_BRANCH}"
|
||||
echo ""
|
||||
echo "Optional build parameters:"
|
||||
echo " TF_DOCKER_BUILD_CENTRAL_PIP=${TF_DOCKER_BUILD_CENTRAL_PIP}"
|
||||
echo " TF_DOCKER_BUILD_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME}"
|
||||
echo " TF_DOCKER_BUILD_VERSION=${TF_DOCKER_BUILD_VERSION}"
|
||||
echo " TF_DOCKER_BUILD_PORT=${TF_DOCKER_BUILD_PORT}"
|
||||
echo " TF_DOCKER_BUILD_PUSH_CMD=${TF_DOCKER_BUILD_PUSH_CMD}"
|
||||
echo " TF_DOCKER_BUILD_ARGS=${TF_DOCKER_BUILD_ARGS[@]:-()}"
|
||||
echo " TF_BAZEL_BUILD_OPTIONS=${TF_BAZEL_BUILD_OPTIONS}"
|
||||
|
||||
|
||||
CONTAINER_PORT=${TF_DOCKER_BUILD_PORT:-8888}
|
||||
|
||||
# Make sure that docker is available on path
|
||||
if [[ -z $(which docker) ]]; then
|
||||
die "ERROR: docker is not available on path"
|
||||
fi
|
||||
|
||||
# Validate the environment-variable options and construct the final image name
|
||||
# Final image name with tag
|
||||
FINAL_IMAGE_NAME=${TF_DOCKER_BUILD_IMAGE_NAME:-tensorflow/tensorflow}
|
||||
FINAL_TAG=${TF_DOCKER_BUILD_VERSION:-latest}
|
||||
|
||||
# Original (unmodified) Dockerfile
|
||||
ORIG_DOCKERFILE="Dockerfile"
|
||||
|
||||
if [[ ${TF_DOCKER_BUILD_IS_DEVEL} == "yes" ]]; then
|
||||
FINAL_TAG="${FINAL_TAG}-devel"
|
||||
ORIG_DOCKERFILE="${ORIG_DOCKERFILE}.devel"
|
||||
|
||||
if [[ -z "${TF_DOCKER_BUILD_DEVEL_BRANCH}" ]]; then
|
||||
die "ERROR: TF_DOCKER_BUILD_DEVEL_BRANCH is missing for devel docker build"
|
||||
fi
|
||||
elif [[ ${TF_DOCKER_BUILD_IS_DEVEL} == "no" ]]; then
|
||||
:
|
||||
else
|
||||
die "ERROR: Unrecognized value in TF_DOCKER_BUILD_IS_DEVEL: "\
|
||||
"${TF_DOCKER_BUILD_IS_DEVEL}"
|
||||
fi
|
||||
|
||||
if [[ ${TF_DOCKER_BUILD_TYPE} == "cpu" ]]; then
|
||||
DOCKER_BINARY="docker"
|
||||
elif [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]]; then
|
||||
DOCKER_BINARY="docker"
|
||||
FINAL_TAG="${FINAL_TAG}-mkl"
|
||||
if [[ ${ORIG_DOCKERFILE} == *"."* ]]; then
|
||||
# There is already a dot in the tag, use "-"
|
||||
ORIG_DOCKERFILE="${ORIG_DOCKERFILE}-mkl"
|
||||
else
|
||||
ORIG_DOCKERFILE="${ORIG_DOCKERFILE}.mkl"
|
||||
fi
|
||||
elif [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
|
||||
DOCKER_BINARY="docker"
|
||||
FINAL_TAG="${FINAL_TAG}-mkl-horovod"
|
||||
if [[ ${ORIG_DOCKERFILE} == *"."* ]]; then
|
||||
# There is already a dot in the tag, use "-"
|
||||
ORIG_DOCKERFILE="${ORIG_DOCKERFILE}-mkl-horovod"
|
||||
else
|
||||
ORIG_DOCKERFILE="${ORIG_DOCKERFILE}.mkl-horovod"
|
||||
fi
|
||||
elif [[ ${TF_DOCKER_BUILD_TYPE} == "gpu" ]]; then
|
||||
DOCKER_BINARY="nvidia-docker"
|
||||
|
||||
FINAL_TAG="${FINAL_TAG}-gpu"
|
||||
if [[ ${ORIG_DOCKERFILE} == *"."* ]]; then
|
||||
# There is already a dot in the tag, use "-"
|
||||
ORIG_DOCKERFILE="${ORIG_DOCKERFILE}-gpu"
|
||||
else
|
||||
ORIG_DOCKERFILE="${ORIG_DOCKERFILE}.gpu"
|
||||
fi
|
||||
else
|
||||
die "ERROR: Unrecognized value in TF_DOCKER_BUILD_TYPE: "\
|
||||
"${TF_DOCKER_BUILD_TYPE}"
|
||||
fi
|
||||
|
||||
if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python2" ]]; then
|
||||
:
|
||||
elif [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3" ]]; then
|
||||
FINAL_TAG="${FINAL_TAG}-py3"
|
||||
elif [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then
|
||||
FINAL_TAG="${FINAL_TAG}-py3.6"
|
||||
else
|
||||
die "Unrecognized value in TF_DOCKER_BUILD_PYTHON_VERSION: "\
|
||||
"${TF_DOCKER_BUILD_PYTHON_VERSION}"
|
||||
fi
|
||||
|
||||
# Verify that the original Dockerfile exists
|
||||
ORIG_DOCKERFILE="${SCRIPT_DIR}/${ORIG_DOCKERFILE}"
|
||||
if [[ ! -f "${ORIG_DOCKERFILE}" ]]; then
|
||||
die "ERROR: Cannot find Dockerfile at: ${ORIG_DOCKERFILE}"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "FINAL_IMAGE_NAME: ${FINAL_IMAGE_NAME}"
|
||||
echo "FINAL_TAG: ${FINAL_TAG}"
|
||||
echo "Original Dockerfile: ${ORIG_DOCKERFILE}"
|
||||
echo ""
|
||||
|
||||
# Create tmp directory for Docker build
|
||||
TMP_DIR=$(mktemp -d)
|
||||
echo ""
|
||||
echo "Docker build will occur in temporary directory: ${TMP_DIR}"
|
||||
|
||||
# Copy all files to tmp directory for Docker build
|
||||
cp -r ${SCRIPT_DIR}/* "${TMP_DIR}/"
|
||||
|
||||
if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
|
||||
DOCKERFILE="${TMP_DIR}/Dockerfile"
|
||||
|
||||
if [[ -z "${TF_DOCKER_BUILD_CENTRAL_PIP}" ]]; then
|
||||
# Perform local build of the required PIP whl file
|
||||
export TF_BUILD_CONTAINER_TYPE=${TF_DOCKER_BUILD_TYPE}
|
||||
export TF_BUILD_PYTHON_VERSION=${TF_DOCKER_BUILD_PYTHON_VERSION}
|
||||
export TF_BUILD_OPTIONS=${TF_DOCKER_BUILD_OPTIONS}
|
||||
export TF_BUILD_IS_PIP="PIP"
|
||||
|
||||
if [[ "${TF_DOCKER_BUILD_TYPE}" == "mkl" ]]; then
|
||||
die "FAIL: Non-development MKL builds require a pre-built pip whl."
|
||||
fi
|
||||
|
||||
if [[ "${TF_DOCKER_BUILD_TYPE}" == "mkl-horovod" ]]; then
|
||||
die "FAIL: Non-development MKL-HOROVOD builds require a pre-built pip whl."
|
||||
fi
|
||||
|
||||
if [[ "${TF_DOCKER_BUILD_TYPE}" == "gpu" ]]; then
|
||||
export TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS=\
|
||||
"${TF_BUILD_APPEND_CI_DOCKER_EXTRA_PARAMS} -e TF_CUDA_COMPUTE_CAPABILITIES=3.0,3.5,5.2,6.0"
|
||||
fi
|
||||
|
||||
pushd "${SCRIPT_DIR}/../../../"
|
||||
rm -rf pip_test/whl &&
|
||||
tensorflow/tools/ci_build/ci_parameterized_build.sh
|
||||
PIP_BUILD_EXIT_CODE=$?
|
||||
popd
|
||||
|
||||
# Was the pip build successful?
|
||||
if [[ ${PIP_BUILD_EXIT_CODE} != "0" ]]; then
|
||||
die "FAIL: Failed to build pip file locally"
|
||||
fi
|
||||
|
||||
PIP_WHL=$(ls pip_test/whl/*.whl | head -1)
|
||||
if [[ -z "${PIP_WHL}" ]]; then
|
||||
die "ERROR: Cannot locate the locally-built pip whl file"
|
||||
fi
|
||||
echo "Locally-built PIP whl file is at: ${PIP_WHL}"
|
||||
|
||||
# Copy the pip file to tmp directory
|
||||
cp "${PIP_WHL}" "${TMP_DIR}/" || \
|
||||
die "ERROR: Failed to copy wheel file: ${PIP_WHL}"
|
||||
|
||||
# Use string replacement to put the correct file name into the Dockerfile
|
||||
PIP_WHL=$(basename "${PIP_WHL}")
|
||||
|
||||
# Modify the non-devel Dockerfile to point to the correct pip whl file
|
||||
# location
|
||||
sed -e "/# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/,"\
|
||||
"/# --- ~ DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/c"\
|
||||
"COPY ${PIP_WHL} /\n"\
|
||||
"RUN pip --no-cache-dir install /${PIP_WHL}" "${ORIG_DOCKERFILE}" \
|
||||
> "${DOCKERFILE}"
|
||||
|
||||
# Build from a local whl file path rather than an URL
|
||||
elif [[ ! -z "${TF_DOCKER_BUILD_CENTRAL_PIP_IS_LOCAL}" ]]; then
|
||||
PIP_WHL="${TF_DOCKER_BUILD_CENTRAL_PIP}"
|
||||
if [[ -z "${PIP_WHL}" ]]; then
|
||||
die "ERROR: Cannot locate the specified pip whl file"
|
||||
fi
|
||||
echo "Specified PIP whl file is at: ${PIP_WHL}"
|
||||
|
||||
# Copy the pip file to tmp directory
|
||||
cp "${PIP_WHL}" "${TMP_DIR}/" || \
|
||||
die "ERROR: Failed to copy wheel file: ${PIP_WHL}"
|
||||
|
||||
# Use string replacement to put the correct file name into the Dockerfile
|
||||
PIP_WHL=$(basename "${PIP_WHL}")
|
||||
|
||||
if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
|
||||
[[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg TF_WHL_URL=${PIP_WHL}" )
|
||||
cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
|
||||
else
|
||||
# Modify the non-devel Dockerfile to point to the correct pip whl file
|
||||
# location
|
||||
sed -e "/# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/,"\
|
||||
"/# --- ~ DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/c"\
|
||||
"COPY ${PIP_WHL} /\n"\
|
||||
"RUN pip --no-cache-dir install /${PIP_WHL}" "${ORIG_DOCKERFILE}" \
|
||||
> "${DOCKERFILE}"
|
||||
fi
|
||||
echo "Using local pip wheel from: ${TF_DOCKER_BUILD_CENTRAL_PIP}"
|
||||
echo
|
||||
else
|
||||
echo "Downloading pip wheel from: ${TF_DOCKER_BUILD_CENTRAL_PIP}"
|
||||
if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
|
||||
[[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
|
||||
pushd "${TMP_DIR}/"
|
||||
curl -O ${TF_DOCKER_BUILD_CENTRAL_PIP}
|
||||
popd
|
||||
PIP_WHL_PATH=`find ${TMP_DIR} -name "*.whl"`
|
||||
PIP_WHL=$(basename "${PIP_WHL_PATH}")
|
||||
echo "PIP_WHL= ${PIP_WHL}"
|
||||
echo
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg TF_WHL_URL=${PIP_WHL}")
|
||||
cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
|
||||
else
|
||||
# Modify the non-devel Dockerfile to point to the correct pip whl URL.
|
||||
sed -e "/# --- DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/,"\
|
||||
"/# --- ~ DO NOT EDIT OR DELETE BETWEEN THE LINES --- #/c"\
|
||||
"RUN pip --no-cache-dir install ${TF_DOCKER_BUILD_CENTRAL_PIP}" "${ORIG_DOCKERFILE}" \
|
||||
> "${DOCKERFILE}"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Modified Dockerfile at: ${DOCKERFILE}"
|
||||
echo
|
||||
|
||||
# Modify python/pip version if necessary.
|
||||
if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3" ]]; then
|
||||
if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
|
||||
[[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON=${TF_DOCKER_BUILD_PYTHON_VERSION}")
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON_DEV=python3-dev")
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg PIP=pip3")
|
||||
cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
|
||||
else
|
||||
if sed -i -e 's/python /python3 /g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's/python-dev/python3-dev/g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's/pip /pip3 /g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's^# RUN ln -s -f /usr/bin/python3 /usr/bin/python#^RUN ln -s -f /usr/bin/python3 /usr/bin/python^' "${DOCKERFILE}"
|
||||
then
|
||||
echo "Modified Dockerfile for python version "\
|
||||
"${TF_DOCKER_BUILD_PYTHON_VERSION} at: ${DOCKERFILE}"
|
||||
else
|
||||
die "FAILED to modify ${DOCKERFILE} for python3"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else # TF_DOCKER_BUILD_IS_DEVEL == 'yes'
|
||||
DOCKERFILE="${TMP_DIR}/Dockerfile"
|
||||
|
||||
# Set up Dockerfile ARGS for mkl and mkl-horovod build
|
||||
if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || \
|
||||
[[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
|
||||
if [[ -z "${TF_BAZEL_BUILD_OPTIONS// }" ]]; then
|
||||
TF_BAZEL_BUILD_OPTIONS=("--config=mkl --copt=-mavx --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=0")
|
||||
else
|
||||
TF_BAZEL_BUILD_OPTIONS="${TF_BAZEL_BUILD_OPTIONS}"
|
||||
fi
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg TF_BUILD_VERSION=${TF_DOCKER_BUILD_DEVEL_BRANCH}")
|
||||
echo "TF_DOCKER_BUILD_ARGS=${TF_DOCKER_BUILD_ARGS[@]}"
|
||||
|
||||
# Pass the build options to bazel using the user-specific .bazelrc file
|
||||
echo "build ${TF_BAZEL_BUILD_OPTIONS}" >> ${TMP_DIR}/.bazelrc
|
||||
cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
|
||||
else
|
||||
# Modify the devel Dockerfile to specify the git branch
|
||||
sed "s/^RUN git clone --branch=.* --depth=1/RUN git clone --branch=${TF_DOCKER_BUILD_DEVEL_BRANCH} --depth=1/" \
|
||||
"${ORIG_DOCKERFILE}" > "${DOCKERFILE}"
|
||||
fi
|
||||
|
||||
# Modify python/pip version if necessary.
|
||||
if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3" ]] || [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3.6" ]]; then
|
||||
if [[ ${TF_DOCKER_BUILD_TYPE} == "mkl" ]] || [[ ${TF_DOCKER_BUILD_TYPE} == "mkl-horovod" ]]; then
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON=${TF_DOCKER_BUILD_PYTHON_VERSION}")
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg PYTHON3_DEV=python3-dev")
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg WHL_DIR=/tmp/pip3")
|
||||
TF_DOCKER_BUILD_ARGS+=("--build-arg PIP=pip3")
|
||||
cp "${ORIG_DOCKERFILE}" "${DOCKERFILE}"
|
||||
else
|
||||
if [[ "${TF_DOCKER_BUILD_PYTHON_VERSION}" == "python3.6" ]] && [[ "${TF_DOCKER_BUILD_TYPE}" != "mkl" ]]; then
|
||||
die "Python 3.6 build only supported for MKL builds."
|
||||
fi
|
||||
if sed -i -e 's/python-dev/python-dev python3-dev/g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's/python /python3 /g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's^/tmp/pip^/tmp/pip3^g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's/pip /pip3 /g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's/ENV CI_BUILD_PYTHON python/ENV CI_BUILD_PYTHON python3/g' "${DOCKERFILE}" && \
|
||||
sed -i -e 's^# RUN ln -s -f /usr/bin/python3 /usr/bin/python#^RUN ln -s -f /usr/bin/python3 /usr/bin/python^' "${DOCKERFILE}"
|
||||
then
|
||||
echo "Modified Dockerfile further for python version ${TF_DOCKER_BUILD_PYTHON_VERSION} at: ${DOCKERFILE}"
|
||||
else
|
||||
die "FAILED to modify ${DOCKERFILE} for python3"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Perform docker build
|
||||
# Intermediate image name with tag
|
||||
IMG="${USER}/tensorflow:${FINAL_TAG}"
|
||||
echo "Building docker image with image name and tag: ${IMG}"
|
||||
echo "TF_DOCKER_BUILD_ARGS=${TF_DOCKER_BUILD_ARGS[@]}"
|
||||
CMD="${DOCKER_BINARY} build ${TF_DOCKER_BUILD_ARGS[@]} --no-cache --pull -t ${IMG} -f ${DOCKERFILE} ${TMP_DIR}"
|
||||
echo "CMD=${CMD}"
|
||||
${CMD}
|
||||
|
||||
if [[ $? == "0" ]]; then
|
||||
echo "${DOCKER_BINARY} build of ${IMG} succeeded"
|
||||
else
|
||||
die "FAIL: ${DOCKER_BINARY} build of ${IMG} with Dockerfile ${DOCKERFILE} "\
|
||||
"failed"
|
||||
fi
|
||||
|
||||
|
||||
# Make sure that there is no other containers of the same image running
|
||||
# TODO(cais): Move to an earlier place.
|
||||
if "${DOCKER_BINARY}" ps | grep -q "${IMG}"; then
|
||||
die "ERROR: It appears that there are docker containers of the image "\
|
||||
"${IMG} running. Please stop them before proceeding"
|
||||
fi
|
||||
|
||||
# Start a docker container from the newly-built docker image
|
||||
DOCKER_RUN_LOG="${TMP_DIR}/docker_run.log"
|
||||
echo ""
|
||||
echo "Running docker container from image ${IMG}..."
|
||||
echo " Log file is at: ${DOCKER_RUN_LOG}"
|
||||
echo ""
|
||||
|
||||
if [[ "${TF_DOCKER_BUILD_IS_DEVEL}" == "no" ]]; then
|
||||
"${DOCKER_BINARY}" run --rm -p ${CONTAINER_PORT}:${CONTAINER_PORT} \
|
||||
-v ${TMP_DIR}/notebooks:/root/notebooks "${IMG}" \
|
||||
2>&1 > "${DOCKER_RUN_LOG}" &
|
||||
|
||||
# Get the container ID
|
||||
CONTAINER_ID=""
|
||||
while [[ -z ${CONTAINER_ID} ]]; do
|
||||
sleep 1
|
||||
echo "Polling for container ID..."
|
||||
CONTAINER_ID=$("${DOCKER_BINARY}" ps | grep "${IMG}" | awk '{print $1}')
|
||||
done
|
||||
|
||||
echo "ID of the running docker container: ${CONTAINER_ID}"
|
||||
echo ""
|
||||
|
||||
if [[ ${TF_DOCKER_BUILD_IS_DEVEL} == "no" ]]; then
|
||||
# Non-devel docker build: Do some basic sanity checks on jupyter notebook
|
||||
# on the running docker container
|
||||
echo ""
|
||||
echo "Performing basic sanity checks on the running container..."
|
||||
if wget -qO- "http://127.0.0.1:${CONTAINER_PORT}/tree" &> /dev/null
|
||||
then
|
||||
echo " PASS: wget tree"
|
||||
else
|
||||
mark_check_failed " FAIL: wget tree"
|
||||
fi
|
||||
|
||||
for NB in ${TMP_DIR}/notebooks/*.ipynb; do
|
||||
NB_BASENAME=$(basename "${NB}")
|
||||
NB_URL="http://127.0.0.1:${CONTAINER_PORT}/notebooks/${NB_BASENAME}"
|
||||
if wget -qO- "${NB_URL}" -o "${TMP_DIR}/${NB_BASENAME}" &> /dev/null
|
||||
then
|
||||
echo " PASS: wget ${NB_URL}"
|
||||
else
|
||||
mark_check_failed " FAIL: wget ${NB_URL}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Stop the running docker container
|
||||
sleep 1
|
||||
"${DOCKER_BINARY}" stop --time=0 ${CONTAINER_ID}
|
||||
fi
|
||||
|
||||
|
||||
# Clean up
|
||||
echo "Cleaning up temporary directory: ${TMP_DIR} ..."
|
||||
rm -rf "${TMP_DIR}" || echo "ERROR: Failed to remove directory ${TMP_DIR}"
|
||||
|
||||
|
||||
# Summarize result
|
||||
echo ""
|
||||
if [[ ${CHECK_FAILED} == "0" ]]; then
|
||||
echo "PASS: basic checks on newly-built image \"${IMG}\" succeeded"
|
||||
else
|
||||
die "FAIL: basic checks on newly-built image \"${IMG}\" failed"
|
||||
fi
|
||||
|
||||
|
||||
# Apply the final image name and tag
|
||||
FINAL_IMG="${FINAL_IMAGE_NAME}:${FINAL_TAG}"
|
||||
|
||||
DOCKER_VER=$("${DOCKER_BINARY}" version | grep Version | head -1 | awk '{print $NF}')
|
||||
if [[ -z "${DOCKER_VER}" ]]; then
|
||||
die "ERROR: Failed to determine ${DOCKER_BINARY} version"
|
||||
fi
|
||||
DOCKER_MAJOR_VER=$(echo "${DOCKER_VER}" | cut -d. -f 1)
|
||||
DOCKER_MINOR_VER=$(echo "${DOCKER_VER}" | cut -d. -f 2)
|
||||
|
||||
FORCE_TAG=""
|
||||
if [[ "${DOCKER_MAJOR_VER}" -le 1 ]] && \
|
||||
[[ "${DOCKER_MINOR_VER}" -le 9 ]]; then
|
||||
FORCE_TAG="--force"
|
||||
fi
|
||||
|
||||
"${DOCKER_BINARY}" tag ${FORCE_TAG} "${IMG}" "${FINAL_IMG}" || \
|
||||
die "Failed to tag intermediate docker image ${IMG} as ${FINAL_IMG}"
|
||||
|
||||
echo ""
|
||||
echo "Successfully tagged docker image: ${FINAL_IMG}"
|
||||
|
||||
# Optional: call command specified by TF_DOCKER_BUILD_PUSH_CMD to push image
|
||||
if [[ ! -z "${TF_DOCKER_BUILD_PUSH_CMD}" ]]; then
|
||||
${TF_DOCKER_BUILD_PUSH_CMD} ${FINAL_IMG}
|
||||
if [[ $? == "0" ]]; then
|
||||
echo "Successfully pushed Docker image ${FINAL_IMG}"
|
||||
else
|
||||
die "FAIL: Failed to push Docker image ${FINAL_IMG}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Optional: set TF_DOCKER_BUILD_PUSH_WITH_CREDENTIALS to push image
|
||||
if [[ ! -z "${TF_DOCKER_BUILD_PUSH_WITH_CREDENTIALS}" ]]; then
|
||||
|
||||
docker login -u "${TF_DOCKER_USERNAME}" \
|
||||
-p "${TF_DOCKER_PASSWORD}"
|
||||
|
||||
if [[ $? != "0" ]]; then
|
||||
die "FAIL: Unable to login. Invalid credentials."
|
||||
fi
|
||||
docker push "${FINAL_IMG}"
|
||||
if [[ $? == "0" ]]; then
|
||||
docker logout
|
||||
echo "Successfully pushed Docker image ${FINAL_IMG}"
|
||||
else
|
||||
docker logout
|
||||
die "FAIL: Failed to push Docker image ${FINAL_IMG}"
|
||||
fi
|
||||
fi
|
@ -1,18 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
jupyter notebook "$@"
|
@ -1,33 +0,0 @@
|
||||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
"""Start a simple interactive console with TensorFlow available."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import code
|
||||
import sys
|
||||
|
||||
|
||||
def main(_):
|
||||
"""Run an interactive console."""
|
||||
code.interact()
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv))
|
@ -2,16 +2,13 @@
|
||||
|
||||
This directory houses TensorFlow's Dockerfiles and the infrastructure used to
|
||||
create and deploy them to
|
||||
[Docker Hub](https://hub.docker.com/r/tensorflow/tensorflow).
|
||||
[TensorFlow's Docker Hub](https://hub.docker.com/r/tensorflow/tensorflow).
|
||||
|
||||
**DO NOT EDIT THE DOCKERFILES/ DIRECTORY MANUALLY!** The files within are
|
||||
maintained by `assembler.py`, which builds Dockerfiles from the files in
|
||||
`partials/` and the rules in `spec.yml`. See
|
||||
[the Contributing section](#contributing) for more information.
|
||||
|
||||
These Dockerfiles are planned to replace the Dockerfiles used to generate
|
||||
[TensorFlow's official Docker images](https://hub.docker.com/r/tensorflow/tensorflow).
|
||||
|
||||
## Building
|
||||
|
||||
The Dockerfiles in the `dockerfiles` directory must have their build context set
|
||||
|
Loading…
Reference in New Issue
Block a user