Merge pull request #1957 from mozilla/update-1.13

Update to TensorFlow v1.13
This commit is contained in:
Reuben Morais 2019-03-19 23:06:03 +00:00 committed by GitHub
commit eadc9ecb9e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 44 additions and 52 deletions

View File

@ -27,12 +27,6 @@ from util.logging import log_info, log_error, log_debug, log_warn
from util.preprocess import preprocess
from util.text import Alphabet
#TODO: remove once fully switched to 1.13
try:
import tensorflow.lite as lite # 1.13
except ImportError:
import tensorflow.contrib.lite as lite # 1.12
# Graph Creation
# ==============
@ -91,21 +85,21 @@ def BiRNN(batch_x, seq_length, dropout, reuse=False, batch_size=None, n_steps=-1
b1 = variable_on_worker_level('b1', [Config.n_hidden_1], tf.zeros_initializer())
h1 = variable_on_worker_level('h1', [Config.n_input + 2*Config.n_input*Config.n_context, Config.n_hidden_1], tf.contrib.layers.xavier_initializer())
layer_1 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(batch_x, h1), b1)), FLAGS.relu_clip)
layer_1 = tf.nn.dropout(layer_1, (1.0 - dropout[0]))
layer_1 = tf.nn.dropout(layer_1, rate=dropout[0])
layers['layer_1'] = layer_1
# 2nd layer
b2 = variable_on_worker_level('b2', [Config.n_hidden_2], tf.zeros_initializer())
h2 = variable_on_worker_level('h2', [Config.n_hidden_1, Config.n_hidden_2], tf.contrib.layers.xavier_initializer())
layer_2 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_1, h2), b2)), FLAGS.relu_clip)
layer_2 = tf.nn.dropout(layer_2, (1.0 - dropout[1]))
layer_2 = tf.nn.dropout(layer_2, rate=dropout[1])
layers['layer_2'] = layer_2
# 3rd layer
b3 = variable_on_worker_level('b3', [Config.n_hidden_3], tf.zeros_initializer())
h3 = variable_on_worker_level('h3', [Config.n_hidden_2, Config.n_hidden_3], tf.contrib.layers.xavier_initializer())
layer_3 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(layer_2, h3), b3)), FLAGS.relu_clip)
layer_3 = tf.nn.dropout(layer_3, (1.0 - dropout[2]))
layer_3 = tf.nn.dropout(layer_3, rate=dropout[2])
layers['layer_3'] = layer_3
# Now we create the forward and backward LSTM units.
@ -149,7 +143,7 @@ def BiRNN(batch_x, seq_length, dropout, reuse=False, batch_size=None, n_steps=-1
b5 = variable_on_worker_level('b5', [Config.n_hidden_5], tf.zeros_initializer())
h5 = variable_on_worker_level('h5', [Config.n_cell_dim, Config.n_hidden_5], tf.contrib.layers.xavier_initializer())
layer_5 = tf.minimum(tf.nn.relu(tf.add(tf.matmul(output, h5), b5)), FLAGS.relu_clip)
layer_5 = tf.nn.dropout(layer_5, (1.0 - dropout[5]))
layer_5 = tf.nn.dropout(layer_5, rate=dropout[5])
layers['layer_5'] = layer_5
# Now we apply the weight matrix `h6` and bias `b6` to the output of `layer_5`
@ -821,7 +815,7 @@ def export():
frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='')
output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite'))
converter = lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values())
converter = tf.lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values())
converter.post_training_quantize = True
tflite_model = converter.convert()

View File

@ -1,6 +1,6 @@
# Need devel version cause we need /usr/include/cudnn.h
# for compiling libctc_decoder_with_kenlm.so
FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
# >> START Install base software
@ -31,21 +31,21 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
liblzma-dev \
locales \
pkg-config \
libsox-dev
libsox-dev \
openjdk-8-jdk \
bash-completion \
g++ \
unzip
# Install NCCL 2.2
RUN apt-get install -qq -y --allow-downgrades --allow-change-held-packages libnccl2=2.2.13-1+cuda9.0 libnccl-dev=2.2.13-1+cuda9.0
RUN apt-get install -qq -y --allow-downgrades --allow-change-held-packages libnccl2=2.3.7-1+cuda10.0 libnccl-dev=2.3.7-1+cuda10.0
# Install Bazel
RUN apt-get install -y openjdk-8-jdk
# Use bazel 0.11.1 cause newer bazel fails to compile TensorFlow (https://github.com/tensorflow/tensorflow/issues/18450#issuecomment-381380000)
RUN apt-get install -y --no-install-recommends bash-completion g++ zlib1g-dev
RUN curl -LO "https://github.com/bazelbuild/bazel/releases/download/0.15.2/bazel_0.15.2-linux-x86_64.deb"
RUN curl -LO "https://github.com/bazelbuild/bazel/releases/download/0.19.2/bazel_0.19.2-linux-x86_64.deb"
RUN dpkg -i bazel_*.deb
# Install CUDA CLI Tools
RUN apt-get install -qq -y cuda-command-line-tools-9-0
RUN apt-get install -qq -y cuda-command-line-tools-10-0
# Install pip
RUN wget https://bootstrap.pypa.io/get-pip.py && \
@ -62,19 +62,17 @@ RUN wget https://bootstrap.pypa.io/get-pip.py && \
# Clone TensoFlow from Mozilla repo
RUN git clone https://github.com/mozilla/tensorflow/
WORKDIR /tensorflow
RUN git checkout r1.12
RUN git checkout r1.13
# GPU Environment Setup
ENV TF_NEED_CUDA 1
ENV CUDA_TOOLKIT_PATH /usr/local/cuda
ENV CUDA_PKG_VERSION 9-0=9.0.176-1
ENV CUDA_VERSION 9.0.176
ENV TF_CUDA_VERSION 9.0
ENV TF_CUDNN_VERSION 7.3.0
ENV TF_CUDA_VERSION 10.0
ENV TF_CUDNN_VERSION 7
ENV CUDNN_INSTALL_PATH /usr/lib/x86_64-linux-gnu/
ENV TF_CUDA_COMPUTE_CAPABILITIES 6.0
ENV TF_NCCL_VERSION 2.2.13
ENV TF_NCCL_VERSION 2.3
# ENV NCCL_INSTALL_PATH /usr/lib/x86_64-linux-gnu/
# Common Environment Setup
@ -186,7 +184,7 @@ RUN cp /tensorflow/bazel-bin/native_client/generate_trie /DeepSpeech/native_clie
# Install TensorFlow
WORKDIR /DeepSpeech/
RUN pip install tensorflow-gpu==1.12.0
RUN pip install tensorflow-gpu==1.13.1
# Make DeepSpeech and install Python bindings
@ -212,7 +210,7 @@ ENV PYTHONIOENCODING UTF-8
# Build KenLM in /DeepSpeech/native_client/kenlm folder
WORKDIR /DeepSpeech/native_client
RUN rm -rf kenlm \
&& git clone https://github.com/kpu/kenlm && cd kenlm \
&& git clone --depth 1 https://github.com/kpu/kenlm && cd kenlm \
&& mkdir -p build \
&& cd build \
&& cmake .. \

View File

@ -244,7 +244,7 @@ If you have a capable (NVIDIA, at least 8GB of VRAM) GPU, it is highly recommend
```bash
pip3 uninstall tensorflow
pip3 install 'tensorflow-gpu==1.12.0'
pip3 install 'tensorflow-gpu==1.13.1'
```
Please ensure you have the required [CUDA dependency](#cuda-dependency).

View File

@ -3,7 +3,7 @@
load("@org_tensorflow//tensorflow:tensorflow.bzl",
"tf_cc_shared_object", "if_cuda")
load("@org_tensorflow//tensorflow/contrib/lite:build_def.bzl",
load("@org_tensorflow//tensorflow/lite:build_def.bzl",
"tflite_copts", "tflite_linkopts")
config_setting(
@ -92,7 +92,7 @@ tf_cc_shared_object(
}) + tflite_linkopts(),
deps = select({
"//native_client:tflite": [
"//tensorflow/contrib/lite/kernels:builtin_ops",
"//tensorflow/lite/kernels:builtin_ops",
],
"//conditions:default": [
"//tensorflow/core:core_cpu",

View File

@ -61,7 +61,7 @@ Check the [main README](../README.md) for more details.
If you'd like to build the binaries yourself, you'll need the following pre-requisites downloaded and installed:
* [TensorFlow requirements](https://www.tensorflow.org/install/install_sources)
* [TensorFlow `r1.12` sources](https://github.com/mozilla/tensorflow/tree/r1.12)
* [TensorFlow `r1.13` sources](https://github.com/mozilla/tensorflow/tree/r1.13)
* [libsox](https://sourceforge.net/projects/sox/)
It is required to use our fork of TensorFlow since it includes fixes for common problems encountered when building the native client files.

View File

@ -19,8 +19,8 @@
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/util/memmapped_file_system.h"
#else // USE_TFLITE
#include "tensorflow/contrib/lite/model.h"
#include "tensorflow/contrib/lite/kernels/register.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/kernels/register.h"
#endif // USE_TFLITE
#include "c_speech_features.h"

View File

@ -1,7 +1,7 @@
pandas
progressbar2
python-utils
tensorflow == 1.12.0
tensorflow == 1.13.1
numpy == 1.15.4
matplotlib
scipy

View File

@ -23,4 +23,4 @@ build:
tests_cmdline: ''
convert_graphdef: ''
benchmark_model_bin: ''
tensorflow_git_desc: 'TensorFlow: v1.12.0-21-ge763555'
tensorflow_git_desc: 'TensorFlow: v1.13.1-8-g17f8188'

View File

@ -12,7 +12,7 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.android-arm64/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.android-arm64/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/android-build.sh arm64-v8a"
package: "taskcluster/android-package.sh arm64-v8a"

View File

@ -12,7 +12,7 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.android-armv7/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.android-armv7/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/android-build.sh armeabi-v7a"
package: "taskcluster/android-package.sh armeabi-v7a"

View File

@ -13,7 +13,7 @@ build:
system_setup:
>
apt-get -qq -y install curl && ${swig.packages.install_script}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.android-armv7/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.android-armv7/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/android-apk-build.sh"
package: "taskcluster/android-apk-package.sh"

View File

@ -12,7 +12,7 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.android-arm64/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.android-arm64/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/android-build.sh x86_64"
package: "taskcluster/android-package.sh x86_64"

View File

@ -6,7 +6,7 @@ build:
- "index.project.deepspeech.deepspeech.native_client.osx.${event.head.sha}"
- "notify.irc-channel.${notifications.irc}.on-exception"
- "notify.irc-channel.${notifications.irc}.on-failed"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.osx/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.osx/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/host-build.sh"
package: "taskcluster/package.sh"

View File

@ -6,7 +6,7 @@ build:
- "index.project.deepspeech.deepspeech.native_client.osx-ctc.${event.head.sha}"
- "notify.irc-channel.${notifications.irc}.on-exception"
- "notify.irc-channel.${notifications.irc}.on-failed"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.osx/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.osx/artifacts/public/home.tar.xz"
maxRunTime: 14400
scripts:
build: 'taskcluster/decoder-build.sh'

View File

@ -14,7 +14,7 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.cpu/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.cpu/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/host-build.sh"
package: "taskcluster/package.sh"

View File

@ -14,7 +14,7 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.cpu/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.cpu/artifacts/public/home.tar.xz"
scripts:
build: 'taskcluster/decoder-build.sh'
package: 'taskcluster/decoder-package.sh'

View File

@ -12,7 +12,7 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.gpu/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.gpu/artifacts/public/home.tar.xz"
maxRunTime: 14400
scripts:
build: "taskcluster/cuda-build.sh"

View File

@ -4,7 +4,7 @@ build:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm64"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm64"
- "index.project.deepspeech.deepspeech.native_client.arm64.${event.head.sha}"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.arm64/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.arm64/artifacts/public/home.tar.xz"
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
system_setup:
>

View File

@ -4,7 +4,7 @@ build:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm"
- "index.project.deepspeech.deepspeech.native_client.arm.${event.head.sha}"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.arm/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.arm/artifacts/public/home.tar.xz"
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
system_setup:
>

View File

@ -16,7 +16,7 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.cpu/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.cpu/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/node-build.sh"
package: "taskcluster/node-package.sh"

View File

@ -4,7 +4,7 @@ build:
- "test-training_upstream-linux-amd64-py27mu-opt"
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-lite_benchmark_model-ds-tests.sh"
benchmark_model_bin: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.cpu/artifacts/public/lite_benchmark_model"
benchmark_model_bin: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.cpu/artifacts/public/lite_benchmark_model"
metadata:
name: "DeepSpeech Linux AMD64 CPU TF Lite benchmark_model"
description: "Testing DeepSpeech TF Lite benchmark_model for Linux/AMD64, CPU only, optimized version"

View File

@ -7,7 +7,7 @@ build:
apt-get -qq -y install ${python.packages_trusty.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 2.7.14:mu"
convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.cpu/artifacts/public/convert_graphdef_memmapped_format"
convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.cpu/artifacts/public/convert_graphdef_memmapped_format"
metadata:
name: "DeepSpeech Linux AMD64 CPU upstream training Py2.7 mu"
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 using upstream TensorFlow Python 2.7 mu, CPU only, optimized version"

View File

@ -6,7 +6,7 @@ build:
- "index.project.deepspeech.deepspeech.native_client.win.${event.head.sha}"
- "notify.irc-channel.${notifications.irc}.on-exception"
- "notify.irc-channel.${notifications.irc}.on-failed"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.win/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.win/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/win-build.sh"
package: "taskcluster/win-package.sh"

View File

@ -6,7 +6,7 @@ build:
- "index.project.deepspeech.deepspeech.native_client.win-cuda.${event.head.sha}"
- "notify.irc-channel.${notifications.irc}.on-exception"
- "notify.irc-channel.${notifications.irc}.on-failed"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.e76355516a0c417cfd3fa8a122405477fcd1af0d.win-cuda/artifacts/public/home.tar.xz"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.13.17f818896d063427d38ba2ec6662902ea4ae79eb.win-cuda/artifacts/public/home.tar.xz"
scripts:
build: "taskcluster/win-build.sh --cuda"
package: "taskcluster/win-package.sh"