diff --git a/.install b/.install index bb47ce32..d7801ca0 100755 --- a/.install +++ b/.install @@ -3,7 +3,7 @@ virtualenv -p python3 ../tmp/venv source ../tmp/venv/bin/activate pip install -r <(grep -v tensorflow requirements.txt) -pip install tensorflow-gpu==1.11.0 +pip install tensorflow-gpu==1.12.0rc2 python3 util/taskcluster.py --arch gpu --target ../tmp/native_client diff --git a/DeepSpeech.py b/DeepSpeech.py index 0f021be8..ea9503da 100755 --- a/DeepSpeech.py +++ b/DeepSpeech.py @@ -18,10 +18,12 @@ import time import traceback import inspect import progressbar +import tempfile from functools import partial from six.moves import zip, range, filter, urllib, BaseHTTPServer from tensorflow.python.tools import freeze_graph +from tensorflow.contrib.lite.python import tflite_convert from threading import Thread, Lock from util.audio import audiofile_to_input_vector from util.feeding import DataSet, ModelFeeder @@ -1831,9 +1833,8 @@ def create_inference_graph(batch_size=1, n_steps=16, use_new_decoder=False, tfli return ( { 'input': input_tensor, - 'input_lengths': seq_length, - 'new_state_c': new_state_c, - 'new_state_h': new_state_h, + 'previous_state_c': previous_state_c, + 'previous_state_h': previous_state_h, }, { 'outputs': logits, @@ -1849,11 +1850,17 @@ def export(): ''' log_info('Exporting the model...') with tf.device('/cpu:0'): + from tensorflow.python.framework.ops import Tensor, Operation tf.reset_default_graph() session = tf.Session(config=session_config) inputs, outputs = create_inference_graph(batch_size=1, n_steps=FLAGS.n_steps, tflite=FLAGS.export_tflite) + input_names = ",".join(tensor.op.name for tensor in inputs.values()) + output_names_tensors = [ tensor.op.name for tensor in outputs.values() if isinstance(tensor, Tensor) ] + output_names_ops = [ tensor.name for tensor in outputs.values() if isinstance(tensor, Operation) ] + output_names = ",".join(output_names_tensors + output_names_ops) + input_shapes = ":".join(",".join(map(str, tensor.shape)) for tensor in inputs.values()) if not FLAGS.export_tflite: mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')} @@ -1872,11 +1879,7 @@ def export(): checkpoint = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir) checkpoint_path = checkpoint.model_checkpoint_path - if not FLAGS.export_tflite: - output_filename = 'output_graph.pb' - else: - output_filename = 'output_graph.fb' - + output_filename = 'output_graph.pb' if FLAGS.remove_export: if os.path.isdir(FLAGS.export_dir): log_info('Removing old export') @@ -1887,31 +1890,61 @@ def export(): if not os.path.isdir(FLAGS.export_dir): os.makedirs(FLAGS.export_dir) - if not FLAGS.export_tflite: - output_node_names = 'logits,initialize_state' - variables_blacklist = 'previous_state_c,previous_state_h' - else: - output_node_names = 'logits,new_state_c,new_state_h' - variables_blacklist = '' + def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=None): + freeze_graph.freeze_graph_with_def_protos( + input_graph_def=session.graph_def, + input_saver_def=saver.as_saver_def(), + input_checkpoint=checkpoint_path, + output_node_names=output_node_names, + restore_op_name=None, + filename_tensor_name=None, + output_graph=output_file, + clear_devices=False, + variable_names_blacklist=variables_blacklist, + initializer_nodes='') - # Freeze graph - freeze_graph.freeze_graph_with_def_protos( - input_graph_def=session.graph_def, - input_saver_def=saver.as_saver_def(), - input_checkpoint=checkpoint_path, - output_node_names=output_node_names, - restore_op_name=None, - filename_tensor_name=None, - output_graph=output_graph_path, - clear_devices=False, - variable_names_blacklist=variables_blacklist, - initializer_nodes='') + if not FLAGS.export_tflite: + do_graph_freeze(output_file=output_graph_path, output_node_names=output_names, variables_blacklist='previous_state_c,previous_state_h') + else: + temp_fd, temp_freeze = tempfile.mkstemp(dir=FLAGS.export_dir) + os.close(temp_fd) + do_graph_freeze(output_file=temp_freeze, output_node_names=output_names, variables_blacklist='') + output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite')) + class TFLiteFlags(): + def __init__(self): + self.graph_def_file = temp_freeze + self.inference_type = 'FLOAT' + self.input_arrays = input_names + self.input_shapes = input_shapes + self.output_arrays = output_names + self.output_file = output_tflite_path + self.output_format = 'TFLITE' + + default_empty = [ + 'inference_input_type', + 'mean_values', + 'default_ranges_min', 'default_ranges_max', + 'drop_control_dependency', + 'reorder_across_fake_quant', + 'change_concat_input_ranges', + 'allow_custom_ops', + 'converter_mode', + 'post_training_quantize', + 'dump_graphviz_dir', + 'dump_graphviz_video' + ] + for e in default_empty: + self.__dict__[e] = None + + flags = TFLiteFlags() + tflite_convert._convert_model(flags) + os.unlink(temp_freeze) + log_info('Exported model for TF Lite engine as {}'.format(os.path.basename(output_tflite_path))) log_info('Models exported at %s' % (FLAGS.export_dir)) except RuntimeError as e: log_error(str(e)) - def do_single_file_inference(input_file_path): with tf.Session(config=session_config) as session: inputs, outputs = create_inference_graph(batch_size=1, use_new_decoder=True) diff --git a/Dockerfile b/Dockerfile index 564ae57e..25425b10 100644 --- a/Dockerfile +++ b/Dockerfile @@ -62,7 +62,7 @@ RUN wget https://bootstrap.pypa.io/get-pip.py && \ # Clone TensoFlow from Mozilla repo RUN git clone https://github.com/mozilla/tensorflow/ WORKDIR /tensorflow -RUN git checkout r1.11 +RUN git checkout r1.12 # GPU Environment Setup @@ -190,7 +190,7 @@ RUN cp /tensorflow/bazel-bin/native_client/libctc_decoder_with_kenlm.so /DeepSpe # Install TensorFlow WORKDIR /DeepSpeech/ -RUN pip install tensorflow-gpu==1.11.0 +RUN pip install tensorflow-gpu==1.12.0rc2 # Make DeepSpeech and install Python bindings diff --git a/README.md b/README.md index 10f41e99..9ed85feb 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ See the output of `deepspeech -h` for more information on the use of `deepspeech - [Training a model](#training-a-model) - [Checkpointing](#checkpointing) - [Exporting a model for inference](#exporting-a-model-for-inference) + - [Exporting a model for TFLite](#exporting-a-model-for-tflite) - [Distributed computing across more than one machine](#distributed-training-across-more-than-one-machine) - [Continuing training from a release model](#continuing-training-from-a-release-model) - [Code documentation](#code-documentation) @@ -226,7 +227,7 @@ If you have a capable (Nvidia, at least 8GB of VRAM) GPU, it is highly recommend ```bash pip3 uninstall tensorflow -pip3 install 'tensorflow-gpu==1.11.0' +pip3 install 'tensorflow-gpu==1.12.0rc2' ``` ### Common Voice training data @@ -317,6 +318,10 @@ Be aware however that checkpoints are only valid for the same model geometry the If the `--export_dir` parameter is provided, a model will have been exported to this directory during training. Refer to the corresponding [README.md](native_client/README.md) for information on building and running a client that can use the exported model. +### Exporting a model for TFLite + +If you want to experiment with the TF Lite engine, you need to export a model that is compatible with it, then use the `--export_tflite` flag. If you already have a trained model, you can re-export it for TFLite by running `DeepSpeech.py` again and specifying the same `checkpoint_dir` that you used for training, as well as passing `--notrain --notest --export_tflite --export_dir /model/export/destination`. + ### Making a mmap-able model for inference The `output_graph.pb` model file generated in the above step will be loaded in memory to be dealt with when running inference. diff --git a/native_client/README.md b/native_client/README.md index ba43d328..8ec39ea4 100644 --- a/native_client/README.md +++ b/native_client/README.md @@ -52,7 +52,7 @@ Check the [main README](../README.md) for more details. If you'd like to build the binaries yourself, you'll need the following pre-requisites downloaded/installed: * [TensorFlow requirements](https://www.tensorflow.org/install/install_sources) -* [TensorFlow `r1.11` sources](https://github.com/mozilla/tensorflow/tree/r1.11) +* [TensorFlow `r1.12` sources](https://github.com/mozilla/tensorflow/tree/r1.12) * [libsox](https://sourceforge.net/projects/sox/) It is required to use our fork of TensorFlow since it includes fixes for common problems encountered when building the native client files. diff --git a/requirements.txt b/requirements.txt index 8cc67773..03cc814d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ pandas progressbar2 python-utils -tensorflow == 1.11.0 +tensorflow == 1.12.0rc2 numpy matplotlib scipy diff --git a/taskcluster/darwin-amd64-cpu-opt.yml b/taskcluster/darwin-amd64-cpu-opt.yml index 2426c7c9..62902d1a 100644 --- a/taskcluster/darwin-amd64-cpu-opt.yml +++ b/taskcluster/darwin-amd64-cpu-opt.yml @@ -6,8 +6,7 @@ build: - "index.project.deepspeech.deepspeech.native_client.osx.${event.head.sha}" - "notify.irc-channel.${notifications.irc}.on-exception" - "notify.irc-channel.${notifications.irc}.on-failed" - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.osx/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.osx/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.osx/artifacts/public/home.tar.xz" scripts: build: "taskcluster/host-build.sh" package: "taskcluster/package.sh" diff --git a/taskcluster/darwin-opt-base.tyml b/taskcluster/darwin-opt-base.tyml index adc9b134..7510a4a5 100644 --- a/taskcluster/darwin-opt-base.tyml +++ b/taskcluster/darwin-opt-base.tyml @@ -39,7 +39,6 @@ payload: training: { $eval: as_slugid("test-training_upstream-linux-amd64-py27mu-opt") } in: TENSORFLOW_BUILD_ARTIFACT: ${build.tensorflow} - SUMMARIZE_GRAPH_BINARY: ${build.summarize_graph} DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb # There is no VM yet running tasks on OSX diff --git a/taskcluster/linux-amd64-cpu-opt.yml b/taskcluster/linux-amd64-cpu-opt.yml index 54f38aed..5e854ceb 100644 --- a/taskcluster/linux-amd64-cpu-opt.yml +++ b/taskcluster/linux-amd64-cpu-opt.yml @@ -14,8 +14,7 @@ build: system_config: > ${swig.patch_nodejs.linux} - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.cpu/artifacts/public/home.tar.xz" scripts: build: "taskcluster/host-build.sh" package: "taskcluster/package.sh" diff --git a/taskcluster/linux-amd64-ctc-opt.yml b/taskcluster/linux-amd64-ctc-opt.yml index 535fcfbd..ff6f5593 100644 --- a/taskcluster/linux-amd64-ctc-opt.yml +++ b/taskcluster/linux-amd64-ctc-opt.yml @@ -4,8 +4,7 @@ build: - "pull_request.synchronize" - "pull_request.reopened" template_file: linux-opt-base.tyml - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.cpu/artifacts/public/home.tar.xz" scripts: build: 'taskcluster/decoder-build.sh' package: 'taskcluster/decoder-package.sh' diff --git a/taskcluster/linux-amd64-gpu-opt.yml b/taskcluster/linux-amd64-gpu-opt.yml index 2f7d79e0..22497b90 100644 --- a/taskcluster/linux-amd64-gpu-opt.yml +++ b/taskcluster/linux-amd64-gpu-opt.yml @@ -12,8 +12,7 @@ build: system_config: > ${swig.patch_nodejs.linux} - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.gpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.gpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.gpu/artifacts/public/home.tar.xz" maxRunTime: 14400 scripts: build: "taskcluster/cuda-build.sh" diff --git a/taskcluster/linux-arm64-cpu-opt.yml b/taskcluster/linux-arm64-cpu-opt.yml index ba5e0ac4..136efe7b 100644 --- a/taskcluster/linux-arm64-cpu-opt.yml +++ b/taskcluster/linux-arm64-cpu-opt.yml @@ -4,8 +4,7 @@ build: - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm64" - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm64" - "index.project.deepspeech.deepspeech.native_client.arm64.${event.head.sha}" - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.arm64/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.arm64/artifacts/public/home.tar.xz" ## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787 system_setup: > diff --git a/taskcluster/linux-opt-base.tyml b/taskcluster/linux-opt-base.tyml index 3af60d35..c6fa9baa 100644 --- a/taskcluster/linux-opt-base.tyml +++ b/taskcluster/linux-opt-base.tyml @@ -36,7 +36,6 @@ then: training: { $eval: as_slugid("test-training_upstream-linux-amd64-py27mu-opt") } in: TENSORFLOW_BUILD_ARTIFACT: ${build.tensorflow} - SUMMARIZE_GRAPH_BINARY: ${build.summarize_graph} DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb command: diff --git a/taskcluster/linux-rpi3-cpu-opt.yml b/taskcluster/linux-rpi3-cpu-opt.yml index 76b867b3..d58cc6fb 100644 --- a/taskcluster/linux-rpi3-cpu-opt.yml +++ b/taskcluster/linux-rpi3-cpu-opt.yml @@ -4,8 +4,7 @@ build: - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm" - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm" - "index.project.deepspeech.deepspeech.native_client.arm.${event.head.sha}" - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.arm/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.arm/artifacts/public/home.tar.xz" ## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787 system_setup: > diff --git a/taskcluster/node-package.yml b/taskcluster/node-package.yml index d937bd41..85e1a0b9 100644 --- a/taskcluster/node-package.yml +++ b/taskcluster/node-package.yml @@ -16,8 +16,7 @@ build: system_config: > ${swig.patch_nodejs.linux} - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.cpu/artifacts/public/home.tar.xz" scripts: build: "taskcluster/node-build.sh" package: "taskcluster/node-package.sh" diff --git a/taskcluster/test-armbian-opt-base.tyml b/taskcluster/test-armbian-opt-base.tyml index 561072ff..4c314d9a 100644 --- a/taskcluster/test-armbian-opt-base.tyml +++ b/taskcluster/test-armbian-opt-base.tyml @@ -35,7 +35,6 @@ then: linux_arm64_build: { $eval: as_slugid("linux-arm64-cpu-opt") } node_package: { $eval: as_slugid("node-package") } in: - CONVERT_GRAPHDEF_MEMMAPPED: ${build.convert_graphdef} DEEPSPEECH_ARTIFACTS_ROOT: https://queue.taskcluster.net/v1/task/${linux_arm64_build}/artifacts/public DEEPSPEECH_NODEJS: https://queue.taskcluster.net/v1/task/${node_package}/artifacts/public DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb @@ -44,7 +43,7 @@ then: PIP_DEFAULT_TIMEOUT: "60" PIP_EXTRA_INDEX_URL: "https://lissyx.github.io/deepspeech-python-wheels/" EXTRA_PYTHON_CONFIGURE_OPTS: "--with-fpectl" # Required by Debian Stretch - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-11-gbee8254" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.12.0-rc2-5-g1c93ca2" command: - "/bin/bash" diff --git a/taskcluster/test-darwin-opt-base.tyml b/taskcluster/test-darwin-opt-base.tyml index df787a89..0338f93e 100644 --- a/taskcluster/test-darwin-opt-base.tyml +++ b/taskcluster/test-darwin-opt-base.tyml @@ -41,7 +41,7 @@ then: DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod-ctcdecode/output_graph.pb DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod-ctcdecode/output_graph.pbmm - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-11-gbee8254" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.12.0-rc2-5-g1c93ca2" command: - - "/bin/bash" diff --git a/taskcluster/test-linux-opt-base.tyml b/taskcluster/test-linux-opt-base.tyml index 5fe0d1f0..b99d8fc4 100644 --- a/taskcluster/test-linux-opt-base.tyml +++ b/taskcluster/test-linux-opt-base.tyml @@ -44,7 +44,7 @@ then: DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod-ctcdecode/output_graph.pb DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod-ctcdecode/output_graph.pbmm PIP_DEFAULT_TIMEOUT: "60" - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-11-gbee8254" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.12.0-rc2-5-g1c93ca2" command: - "/bin/bash" diff --git a/taskcluster/test-raspbian-opt-base.tyml b/taskcluster/test-raspbian-opt-base.tyml index f9fd9d60..dbb4e012 100644 --- a/taskcluster/test-raspbian-opt-base.tyml +++ b/taskcluster/test-raspbian-opt-base.tyml @@ -35,7 +35,6 @@ then: linux_rpi3_build: { $eval: as_slugid("linux-rpi3-cpu-opt") } node_package: { $eval: as_slugid("node-package") } in: - CONVERT_GRAPHDEF_MEMMAPPED: ${build.convert_graphdef} DEEPSPEECH_ARTIFACTS_ROOT: https://queue.taskcluster.net/v1/task/${linux_rpi3_build}/artifacts/public DEEPSPEECH_NODEJS: https://queue.taskcluster.net/v1/task/${node_package}/artifacts/public DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb @@ -44,7 +43,7 @@ then: PIP_DEFAULT_TIMEOUT: "60" PIP_EXTRA_INDEX_URL: "https://www.piwheels.org/simple" EXTRA_PYTHON_CONFIGURE_OPTS: "--with-fpectl" # Required by Raspbian Stretch / PiWheels - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-11-gbee8254" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.12.0-rc2-5-g1c93ca2" command: - "/bin/bash" diff --git a/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml b/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml index ef98a1e4..632f8316 100644 --- a/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml +++ b/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml @@ -7,7 +7,7 @@ build: apt-get -qq -y install ${python.packages_trusty.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 2.7.14:mu" - convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.bee825492fcf830bd65a024bf859cbfc218e1473.cpu/artifacts/public/convert_graphdef_memmapped_format" + convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.12.1c93ca24c99d7011ad639eea4cd96e4fe45e1a95.cpu/artifacts/public/convert_graphdef_memmapped_format" metadata: name: "DeepSpeech Linux AMD64 CPU upstream training Py2.7 mu" description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 using upstream TensorFlow Python 2.7 mu, CPU only, optimized version" diff --git a/tc-train-tests.sh b/tc-train-tests.sh index fb65e891..5452fee2 100644 --- a/tc-train-tests.sh +++ b/tc-train-tests.sh @@ -66,7 +66,7 @@ pushd ${HOME}/DeepSpeech/ds/ popd cp /tmp/train/output_graph.pb ${TASKCLUSTER_ARTIFACTS} -cp /tmp/train/output_graph.fb ${TASKCLUSTER_ARTIFACTS} +cp /tmp/train/output_graph.tflite ${TASKCLUSTER_ARTIFACTS} if [ ! -z "${CONVERT_GRAPHDEF_MEMMAPPED}" ]; then convert_graphdef=$(basename "${CONVERT_GRAPHDEF_MEMMAPPED}")