commit
86c88c2310
2
.gitignore
vendored
2
.gitignore
vendored
@ -32,3 +32,5 @@
|
||||
/doc/.build/
|
||||
/doc/xml-c/
|
||||
/doc/xml-java/
|
||||
Dockerfile.build
|
||||
Dockerfile.train
|
||||
|
@ -45,7 +45,7 @@ tasks:
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- >
|
||||
echo "deb http://archive.ubuntu.com/ubuntu/ trusty-updates main" > /etc/apt/sources.list.d/trusty-updates.list &&
|
||||
echo "deb http://archive.ubuntu.com/ubuntu/ xenial-updates main" > /etc/apt/sources.list.d/xenial-updates.list &&
|
||||
apt-get -qq update && apt-get -qq -y install git python3-pip curl sudo &&
|
||||
adduser --system --home /home/build-user build-user &&
|
||||
cd /home/build-user/ &&
|
||||
|
@ -1,83 +1,73 @@
|
||||
# Need devel version cause we need /usr/include/cudnn.h
|
||||
# for compiling libctc_decoder_with_kenlm.so
|
||||
FROM nvidia/cuda:10.0-cudnn7-devel-ubuntu18.04
|
||||
# Please refer to the USING documentation, "Dockerfile for building from source"
|
||||
|
||||
# Need devel version cause we need /usr/include/cudnn.h
|
||||
FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04
|
||||
|
||||
ENV DEEPSPEECH_REPO=#DEEPSPEECH_REPO#
|
||||
ENV DEEPSPEECH_SHA=#DEEPSPEECH_SHA#
|
||||
|
||||
# >> START Install base software
|
||||
|
||||
# Get basic packages
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
apt-utils \
|
||||
bash-completion \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
cmake \
|
||||
curl \
|
||||
wget \
|
||||
g++ \
|
||||
gcc \
|
||||
git \
|
||||
git-lfs \
|
||||
libbz2-dev \
|
||||
libboost-all-dev \
|
||||
libgsm1-dev \
|
||||
libltdl-dev \
|
||||
liblzma-dev \
|
||||
libmagic-dev \
|
||||
libpng-dev \
|
||||
libsox-fmt-mp3 \
|
||||
libsox-dev \
|
||||
locales \
|
||||
openjdk-8-jdk \
|
||||
pkg-config \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-wheel \
|
||||
python3-numpy \
|
||||
libcurl3-dev \
|
||||
ca-certificates \
|
||||
gcc \
|
||||
sox \
|
||||
libsox-fmt-mp3 \
|
||||
htop \
|
||||
nano \
|
||||
cmake \
|
||||
libboost-all-dev \
|
||||
zlib1g-dev \
|
||||
libbz2-dev \
|
||||
liblzma-dev \
|
||||
locales \
|
||||
pkg-config \
|
||||
libpng-dev \
|
||||
libsox-dev \
|
||||
libmagic-dev \
|
||||
libgsm1-dev \
|
||||
libltdl-dev \
|
||||
openjdk-8-jdk \
|
||||
bash-completion \
|
||||
g++ \
|
||||
unzip
|
||||
unzip \
|
||||
wget \
|
||||
zlib1g-dev
|
||||
|
||||
RUN ln -s -f /usr/bin/python3 /usr/bin/python
|
||||
|
||||
# Install NCCL 2.2
|
||||
RUN apt-get --no-install-recommends install -qq -y --allow-downgrades --allow-change-held-packages libnccl2=2.3.7-1+cuda10.0 libnccl-dev=2.3.7-1+cuda10.0
|
||||
RUN update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
|
||||
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
|
||||
# Install Bazel
|
||||
RUN curl -LO "https://github.com/bazelbuild/bazel/releases/download/0.24.1/bazel_0.24.1-linux-x86_64.deb"
|
||||
RUN curl -LO "https://github.com/bazelbuild/bazel/releases/download/2.0.0/bazel_2.0.0-linux-x86_64.deb"
|
||||
RUN dpkg -i bazel_*.deb
|
||||
|
||||
# Install CUDA CLI Tools
|
||||
RUN apt-get --no-install-recommends install -qq -y cuda-command-line-tools-10-0
|
||||
|
||||
# Install pip
|
||||
RUN wget https://bootstrap.pypa.io/get-pip.py && \
|
||||
python3 get-pip.py && \
|
||||
rm get-pip.py
|
||||
|
||||
# << END Install base software
|
||||
|
||||
|
||||
|
||||
|
||||
# >> START Configure Tensorflow Build
|
||||
|
||||
# Clone TensorFlow from Mozilla repo
|
||||
RUN git clone https://github.com/mozilla/tensorflow/
|
||||
WORKDIR /tensorflow
|
||||
RUN git checkout r1.15
|
||||
|
||||
RUN git checkout r2.2
|
||||
|
||||
# GPU Environment Setup
|
||||
ENV TF_NEED_ROCM 0
|
||||
ENV TF_NEED_OPENCL_SYCL 0
|
||||
ENV TF_NEED_OPENCL 0
|
||||
ENV TF_NEED_CUDA 1
|
||||
ENV TF_CUDA_PATHS "/usr/local/cuda,/usr/lib/x86_64-linux-gnu/"
|
||||
ENV TF_CUDA_VERSION 10.0
|
||||
ENV TF_CUDNN_VERSION 7
|
||||
ENV TF_CUDA_PATHS "/usr,/usr/local/cuda-10.1,/usr/lib/x86_64-linux-gnu/"
|
||||
ENV TF_CUDA_VERSION 10.1
|
||||
ENV TF_CUDNN_VERSION 7.6
|
||||
ENV TF_CUDA_COMPUTE_CAPABILITIES 6.0
|
||||
ENV TF_NCCL_VERSION 2.3
|
||||
ENV TF_NCCL_VERSION 2.4
|
||||
|
||||
# Common Environment Setup
|
||||
ENV TF_BUILD_CONTAINER_TYPE GPU
|
||||
@ -105,14 +95,12 @@ ENV TF_NEED_TENSORRT 0
|
||||
ENV TF_NEED_GDR 0
|
||||
ENV TF_NEED_VERBS 0
|
||||
ENV TF_NEED_OPENCL_SYCL 0
|
||||
|
||||
ENV PYTHON_BIN_PATH /usr/bin/python3.6
|
||||
ENV PYTHON_LIB_PATH /usr/lib/python3.6/dist-packages
|
||||
ENV PYTHON_LIB_PATH /usr/local/lib/python3.6/dist-packages
|
||||
|
||||
# << END Configure Tensorflow Build
|
||||
|
||||
|
||||
|
||||
|
||||
# >> START Configure Bazel
|
||||
|
||||
# Running bazel inside a `docker build` command causes trouble, cf:
|
||||
@ -124,39 +112,17 @@ RUN echo "startup --batch" >>/etc/bazel.bazelrc
|
||||
RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \
|
||||
>>/etc/bazel.bazelrc
|
||||
|
||||
# Put cuda libraries to where they are expected to be
|
||||
RUN mkdir /usr/local/cuda/lib && \
|
||||
ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/local/cuda/lib/libnccl.so.2 && \
|
||||
ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h && \
|
||||
ln -s /usr/local/cuda/lib64/stubs/libcuda.so /usr/local/cuda/lib64/stubs/libcuda.so.1 && \
|
||||
ln -s /usr/include/cudnn.h /usr/local/cuda/include/cudnn.h
|
||||
|
||||
|
||||
# Set library paths
|
||||
ENV LD_LIBRARY_PATH $LD_LIBRARY_PATH:/usr/local/cuda/extras/CUPTI/lib64:/usr/local/cuda/lib64:/usr/lib/x86_64-linux-gnu/:/usr/local/cuda/lib64/stubs/
|
||||
|
||||
# << END Configure Bazel
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Copy DeepSpeech repo contents to container's /DeepSpeech
|
||||
COPY . /DeepSpeech/
|
||||
|
||||
# Alternative clone from GitHub
|
||||
# RUN apt-get update && apt-get install -y git-lfs
|
||||
# WORKDIR /
|
||||
# RUN git lfs install
|
||||
# RUN git clone https://github.com/mozilla/DeepSpeech.git
|
||||
|
||||
RUN git clone $DEEPSPEECH_REPO
|
||||
WORKDIR /DeepSpeech
|
||||
|
||||
RUN DS_NODECODER=1 pip3 --no-cache-dir install .
|
||||
RUN git checkout $DEEPSPEECH_SHA
|
||||
|
||||
# Link DeepSpeech native_client libs to tf folder
|
||||
RUN ln -s /DeepSpeech/native_client /tensorflow
|
||||
|
||||
|
||||
|
||||
|
||||
# >> START Build and bind
|
||||
|
||||
WORKDIR /tensorflow
|
||||
@ -170,59 +136,60 @@ RUN ./configure
|
||||
|
||||
# passing LD_LIBRARY_PATH is required cause Bazel doesn't pickup it from environment
|
||||
|
||||
|
||||
# Build DeepSpeech
|
||||
RUN bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=cuda -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-mtune=generic --copt=-march=x86-64 --copt=-msse --copt=-msse2 --copt=-msse3 --copt=-msse4.1 --copt=-msse4.2 --copt=-mavx --copt=-fvisibility=hidden //native_client:libdeepspeech.so --verbose_failures --action_env=LD_LIBRARY_PATH=${LD_LIBRARY_PATH}
|
||||
|
||||
###
|
||||
### Using TensorFlow upstream should work
|
||||
###
|
||||
# # Build TF pip package
|
||||
# RUN bazel build --config=opt --config=cuda --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-mtune=generic --copt=-march=x86-64 --copt=-msse --copt=-msse2 --copt=-msse3 --copt=-msse4.1 --copt=-msse4.2 --copt=-mavx //tensorflow/tools/pip_package:build_pip_package --verbose_failures --action_env=LD_LIBRARY_PATH=${LD_LIBRARY_PATH}
|
||||
#
|
||||
# # Build wheel
|
||||
# RUN bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
|
||||
#
|
||||
# # Install tensorflow from our custom wheel
|
||||
# RUN pip3 install /tmp/tensorflow_pkg/*.whl
|
||||
RUN bazel build \
|
||||
--workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" \
|
||||
--config=monolithic \
|
||||
--config=cuda \
|
||||
-c opt \
|
||||
--copt=-O3 \
|
||||
--copt="-D_GLIBCXX_USE_CXX11_ABI=0" \
|
||||
--copt=-mtune=generic \
|
||||
--copt=-march=x86-64 \
|
||||
--copt=-msse \
|
||||
--copt=-msse2 \
|
||||
--copt=-msse3 \
|
||||
--copt=-msse4.1 \
|
||||
--copt=-msse4.2 \
|
||||
--copt=-mavx \
|
||||
--copt=-fvisibility=hidden \
|
||||
//native_client:libdeepspeech.so \
|
||||
--verbose_failures \
|
||||
--action_env=LD_LIBRARY_PATH=${LD_LIBRARY_PATH}
|
||||
|
||||
# Copy built libs to /DeepSpeech/native_client
|
||||
RUN cp /tensorflow/bazel-bin/native_client/libdeepspeech.so /DeepSpeech/native_client/
|
||||
|
||||
# Install TensorFlow
|
||||
WORKDIR /DeepSpeech/
|
||||
RUN pip3 install tensorflow-gpu==1.15.0
|
||||
|
||||
|
||||
# Build client.cc and install Python client and decoder bindings
|
||||
ENV TFDIR /tensorflow
|
||||
|
||||
RUN nproc
|
||||
|
||||
WORKDIR /DeepSpeech/native_client
|
||||
RUN make deepspeech
|
||||
RUN make NUM_PROCESSES=$(nproc) deepspeech
|
||||
|
||||
WORKDIR /DeepSpeech
|
||||
RUN cd native_client/python && make bindings
|
||||
RUN cd native_client/python && make NUM_PROCESSES=$(nproc) bindings
|
||||
RUN pip3 install --upgrade native_client/python/dist/*.whl
|
||||
|
||||
RUN cd native_client/ctcdecode && make bindings
|
||||
RUN cd native_client/ctcdecode && make NUM_PROCESSES=$(nproc) bindings
|
||||
RUN pip3 install --upgrade native_client/ctcdecode/dist/*.whl
|
||||
|
||||
|
||||
# << END Build and bind
|
||||
|
||||
|
||||
|
||||
|
||||
# Allow Python printing utf-8
|
||||
ENV PYTHONIOENCODING UTF-8
|
||||
|
||||
# Build KenLM in /DeepSpeech/native_client/kenlm folder
|
||||
WORKDIR /DeepSpeech/native_client
|
||||
RUN rm -rf kenlm \
|
||||
&& git clone --depth 1 https://github.com/kpu/kenlm && cd kenlm \
|
||||
&& mkdir -p build \
|
||||
&& cd build \
|
||||
&& cmake .. \
|
||||
&& make -j 4
|
||||
RUN rm -rf kenlm && \
|
||||
git clone https://github.com/kpu/kenlm && \
|
||||
cd kenlm && \
|
||||
git checkout 87e85e66c99ceff1fab2500a7c60c01da7315eec && \
|
||||
mkdir -p build && \
|
||||
cd build && \
|
||||
cmake .. && \
|
||||
make -j $(nproc)
|
||||
|
||||
# Done
|
||||
WORKDIR /DeepSpeech
|
53
Dockerfile.train.tmpl
Normal file
53
Dockerfile.train.tmpl
Normal file
@ -0,0 +1,53 @@
|
||||
# Please refer to the TRAINING documentation, "Basic Dockerfile for training"
|
||||
|
||||
FROM tensorflow/tensorflow:1.15.2-gpu-py3
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
ENV DEEPSPEECH_REPO=#DEEPSPEECH_REPO#
|
||||
ENV DEEPSPEECH_SHA=#DEEPSPEECH_SHA#
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
apt-utils \
|
||||
bash-completion \
|
||||
build-essential \
|
||||
curl \
|
||||
git \
|
||||
git-lfs \
|
||||
libbz2-dev \
|
||||
locales \
|
||||
python3-venv \
|
||||
unzip \
|
||||
wget
|
||||
|
||||
# We need to remove it because it's breaking deepspeech install later with
|
||||
# weird errors about setuptools
|
||||
RUN apt-get purge -y python3-xdg
|
||||
|
||||
# Install dependencies for audio augmentation
|
||||
RUN apt-get install -y --no-install-recommends libopus0 libsndfile1
|
||||
|
||||
WORKDIR /
|
||||
RUN git lfs install
|
||||
RUN git clone $DEEPSPEECH_REPO
|
||||
|
||||
WORKDIR /DeepSpeech
|
||||
RUN git checkout $DEEPSPEECH_SHA
|
||||
|
||||
# Build CTC decoder first, to avoid clashes on incompatible versions upgrades
|
||||
RUN cd native_client/ctcdecode && make NUM_PROCESSES=$(nproc) bindings
|
||||
RUN pip3 install --upgrade native_client/ctcdecode/dist/*.whl
|
||||
|
||||
# Prepare deps
|
||||
RUN pip3 install --upgrade pip==20.0.2 wheel==0.34.2 setuptools==46.1.3
|
||||
|
||||
# Install DeepSpeech
|
||||
# - No need for the decoder since we did it earlier
|
||||
# - There is already correct TensorFlow GPU installed on the base image,
|
||||
# we don't want to break that
|
||||
RUN DS_NODECODER=y DS_NOTENSORFLOW=y pip3 install --upgrade -e .
|
||||
|
||||
# Tool to convert output graph for inference
|
||||
RUN python3 util/taskcluster.py --source tensorflow --branch r1.15 \
|
||||
--artifact convert_graphdef_memmapped_format --target .
|
||||
|
||||
RUN ./bin/run-ldc93s1.sh
|
8
Makefile
Normal file
8
Makefile
Normal file
@ -0,0 +1,8 @@
|
||||
DEEPSPEECH_REPO ?= https://github.com/mozilla/DeepSpeech.git
|
||||
DEEPSPEECH_SHA ?= origin/master
|
||||
|
||||
Dockerfile%: Dockerfile%.tmpl
|
||||
sed \
|
||||
-e "s|#DEEPSPEECH_REPO#|$(DEEPSPEECH_REPO)|g" \
|
||||
-e "s|#DEEPSPEECH_SHA#|$(DEEPSPEECH_SHA)|g" \
|
||||
< $< > $@
|
@ -14,7 +14,7 @@ Project DeepSpeech
|
||||
|
||||
DeepSpeech is an open source Speech-To-Text engine, using a model trained by machine learning techniques based on `Baidu's Deep Speech research paper <https://arxiv.org/abs/1412.5567>`_. Project DeepSpeech uses Google's `TensorFlow <https://www.tensorflow.org/>`_ to make the implementation easier.
|
||||
|
||||
Documentation for installation, usage, and training models is available on `deepspeech.readthedocs.io <http://deepspeech.readthedocs.io/?badge=latest>`_.
|
||||
Documentation for installation, usage, and training models are available on `deepspeech.readthedocs.io <http://deepspeech.readthedocs.io/?badge=latest>`_.
|
||||
|
||||
For the latest release, including pre-trained models and checkpoints, `see the latest release on GitHub <https://github.com/mozilla/DeepSpeech/releases/latest>`_.
|
||||
|
||||
|
@ -93,6 +93,7 @@ def one_sample(sample):
|
||||
else:
|
||||
# This one is good - keep it for the target CSV
|
||||
rows.append((wav_filename, file_size, label))
|
||||
counter["imported_time"] += frames
|
||||
counter["all"] += 1
|
||||
counter["total_time"] += frames
|
||||
return (counter, rows)
|
||||
|
@ -78,6 +78,7 @@ def one_sample(args):
|
||||
else:
|
||||
# This one is good - keep it for the target CSV
|
||||
rows.append((os.path.split(wav_filename)[-1], file_size, label, sample[2]))
|
||||
counter["imported_time"] += frames
|
||||
counter["all"] += 1
|
||||
counter["total_time"] += frames
|
||||
|
||||
|
@ -91,6 +91,7 @@ def one_sample(sample):
|
||||
else:
|
||||
# This one is good - keep it for the target CSV
|
||||
rows.append((wav_filename, file_size, label))
|
||||
counter["imported_time"] += frames
|
||||
counter["all"] += 1
|
||||
counter["total_time"] += frames
|
||||
|
||||
|
@ -91,6 +91,7 @@ def one_sample(sample):
|
||||
else:
|
||||
# This one is good - keep it for the target CSV
|
||||
rows.append((wav_filename, file_size, label))
|
||||
counter["imported_time"] += frames
|
||||
counter["all"] += 1
|
||||
counter["total_time"] += frames
|
||||
return (counter, rows)
|
||||
|
@ -86,6 +86,7 @@ def one_sample(sample):
|
||||
else:
|
||||
# This one is good - keep it for the target CSV
|
||||
rows.append((wav_filename, file_size, label))
|
||||
counter["imported_time"] += frames
|
||||
counter["all"] += 1
|
||||
counter["total_time"] += frames
|
||||
|
||||
|
@ -93,6 +93,7 @@ def one_sample(sample):
|
||||
else:
|
||||
# This one is good - keep it for the target CSV
|
||||
rows.append((wav_filename, file_size, label))
|
||||
counter["imported_time"] += frames
|
||||
counter["all"] += 1
|
||||
counter["total_time"] += frames
|
||||
|
||||
|
16
bin/play.py
16
bin/play.py
@ -10,7 +10,8 @@ import random
|
||||
import argparse
|
||||
|
||||
from deepspeech_training.util.audio import LOADABLE_AUDIO_EXTENSIONS, AUDIO_TYPE_PCM, AUDIO_TYPE_WAV
|
||||
from deepspeech_training.util.sample_collections import SampleList, LabeledSample, samples_from_source, augment_samples
|
||||
from deepspeech_training.util.sample_collections import SampleList, LabeledSample, samples_from_source
|
||||
from deepspeech_training.util.augmentations import parse_augmentations, apply_sample_augmentations, SampleAugmentation
|
||||
|
||||
|
||||
def get_samples_in_play_order():
|
||||
@ -38,12 +39,15 @@ def get_samples_in_play_order():
|
||||
|
||||
|
||||
def play_collection():
|
||||
augmentations = parse_augmentations(CLI_ARGS.augment)
|
||||
if any(not isinstance(a, SampleAugmentation) for a in augmentations):
|
||||
print("Warning: Some of the augmentations cannot be simulated by this command.")
|
||||
samples = get_samples_in_play_order()
|
||||
samples = augment_samples(samples,
|
||||
audio_type=AUDIO_TYPE_PCM,
|
||||
augmentation_specs=CLI_ARGS.augment,
|
||||
process_ahead=0,
|
||||
fixed_clock=CLI_ARGS.clock)
|
||||
samples = apply_sample_augmentations(samples,
|
||||
audio_type=AUDIO_TYPE_PCM,
|
||||
augmentations=augmentations,
|
||||
process_ahead=0,
|
||||
clock=CLI_ARGS.clock)
|
||||
for sample in samples:
|
||||
if not CLI_ARGS.quiet:
|
||||
print('Sample "{}"'.format(sample.sample_id), file=sys.stderr)
|
||||
|
28
bin/run-tc-graph_augmentations.sh
Executable file
28
bin/run-tc-graph_augmentations.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -xe
|
||||
|
||||
ldc93s1_dir="./data/smoke_test"
|
||||
ldc93s1_csv="${ldc93s1_dir}/ldc93s1.csv"
|
||||
|
||||
if [ ! -f "${ldc93s1_dir}/ldc93s1.csv" ]; then
|
||||
echo "Downloading and preprocessing LDC93S1 example data, saving in ${ldc93s1_dir}."
|
||||
python -u bin/import_ldc93s1.py ${ldc93s1_dir}
|
||||
fi;
|
||||
|
||||
# Force only one visible device because we have a single-sample dataset
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u DeepSpeech.py --noshow_progressbar --noearly_stop \
|
||||
--train_files ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--scorer "" \
|
||||
--augment dropout \
|
||||
--augment pitch \
|
||||
--augment tempo \
|
||||
--augment time_mask \
|
||||
--augment frequency_mask \
|
||||
--augment add \
|
||||
--augment multiply \
|
||||
--n_hidden 100 \
|
||||
--epochs 1
|
29
bin/run-tc-ldc93s1_new_metrics.sh
Executable file
29
bin/run-tc-ldc93s1_new_metrics.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/sh
|
||||
|
||||
set -xe
|
||||
|
||||
ldc93s1_dir="./data/smoke_test"
|
||||
ldc93s1_csv="${ldc93s1_dir}/ldc93s1.csv"
|
||||
|
||||
epoch_count=$1
|
||||
audio_sample_rate=$2
|
||||
|
||||
if [ ! -f "${ldc93s1_dir}/ldc93s1.csv" ]; then
|
||||
echo "Downloading and preprocessing LDC93S1 example data, saving in ${ldc93s1_dir}."
|
||||
python -u bin/import_ldc93s1.py ${ldc93s1_dir}
|
||||
fi;
|
||||
|
||||
# Force only one visible device because we have a single-sample dataset
|
||||
# and when trying to run on multiple devices (like GPUs), this will break
|
||||
export CUDA_VISIBLE_DEVICES=0
|
||||
|
||||
python -u DeepSpeech.py --noshow_progressbar --noearly_stop \
|
||||
--train_files ${ldc93s1_csv} --train_batch_size 1 \
|
||||
--dev_files ${ldc93s1_csv} --dev_batch_size 1 \
|
||||
--test_files ${ldc93s1_csv} --test_batch_size 1 \
|
||||
--metrics_files ${ldc93s1_csv} \
|
||||
--n_hidden 100 --epochs $epoch_count \
|
||||
--max_to_keep 1 --checkpoint_dir '/tmp/ckpt_metrics' \
|
||||
--learning_rate 0.001 --dropout_rate 0.05 --export_dir '/tmp/train_metrics' \
|
||||
--scorer_path 'data/smoke_test/pruned_lm.scorer' \
|
||||
--audio_sample_rate ${audio_sample_rate}
|
@ -41,12 +41,6 @@ if ! $compare --if-differ "${ldc93s1_wav}" /tmp/reverb-test.wav; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$play ${ldc93s1_wav} --augment gaps[n=10,size=100.0] --pipe >/tmp/gaps-test.wav
|
||||
if ! $compare --if-differ "${ldc93s1_wav}" /tmp/gaps-test.wav; then
|
||||
echo "Gaps augmentation had no effect or changed basic sample properties"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
$play ${ldc93s1_wav} --augment resample[rate=4000] --pipe >/tmp/resample-test.wav
|
||||
if ! $compare --if-differ "${ldc93s1_wav}" /tmp/resample-test.wav; then
|
||||
echo "Resample augmentation had no effect or changed basic sample properties"
|
@ -61,8 +61,12 @@ def create_bundle(
|
||||
sys.exit(1)
|
||||
scorer.fill_dictionary(list(words))
|
||||
shutil.copy(lm_path, package_path)
|
||||
scorer.save_dictionary(package_path, True) # append, not overwrite
|
||||
print("Package created in {}".format(package_path))
|
||||
# append, not overwrite
|
||||
if scorer.save_dictionary(package_path, True):
|
||||
print("Package created in {}".format(package_path))
|
||||
else:
|
||||
print("Error when creating {}".format(package_path))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class Tristate(object):
|
||||
|
69
doc/SUPPORTED_PLATFORMS.rst
Normal file
69
doc/SUPPORTED_PLATFORMS.rst
Normal file
@ -0,0 +1,69 @@
|
||||
.. _supported-platforms-inference:
|
||||
|
||||
Supported platforms for inference
|
||||
=================================
|
||||
|
||||
Here we maintain the list of supported platforms for running inference.
|
||||
|
||||
Linux / AMD64 without GPU
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
* x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference)
|
||||
* Ubuntu 14.04+ (glibc >= 2.19, libstdc++6 >= 4.8)
|
||||
* Full TensorFlow runtime (``deepspeech`` packages)
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
Linux / AMD64 with GPU
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
* x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference)
|
||||
* Ubuntu 14.04+ (glibc >= 2.19, libstdc++6 >= 4.8)
|
||||
* CUDA 10.0 (and capable GPU)
|
||||
* Full TensorFlow runtime (``deepspeech`` packages)
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
Linux / ARMv7
|
||||
^^^^^^^^^^^^^
|
||||
* Cortex-A53 compatible ARMv7 SoC with Neon support
|
||||
* Raspbian Buster-compatible distribution
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
Linux / Aarch64
|
||||
^^^^^^^^^^^^^^^
|
||||
* Cortex-A72 compatible Aarch64 SoC
|
||||
* ARMbian Buster-compatible distribution
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
Android / ARMv7
|
||||
^^^^^^^^^^^^^^^
|
||||
* ARMv7 SoC with Neon support
|
||||
* Android 7.0-10.0
|
||||
* NDK API level >= 21
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
Android / Aarch64
|
||||
^^^^^^^^^^^^^^^^^
|
||||
* Aarch64 SoC
|
||||
* Android 7.0-10.0
|
||||
* NDK API level >= 21
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
macOS / AMD64
|
||||
^^^^^^^^^^^^^
|
||||
* x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference)
|
||||
* macOS >= 10.10
|
||||
* Full TensorFlow runtime (``deepspeech`` packages)
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
Windows / AMD64 without GPU
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
* x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference)
|
||||
* Windows Server >= 2012 R2 ; Windows >= 8.1
|
||||
* Full TensorFlow runtime (``deepspeech`` packages)
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
||||
|
||||
Windows / AMD64 with GPU
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
* x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference)
|
||||
* Windows Server >= 2012 R2 ; Windows >= 8.1
|
||||
* CUDA 10.0 (and capable GPU)
|
||||
* Full TensorFlow runtime (``deepspeech`` packages)
|
||||
* TensorFlow Lite runtime (``deepspeech-tflite`` packages)
|
@ -24,7 +24,7 @@ Then use the ``generate_lm.py`` script to generate ``lm.binary`` and ``vocab-500
|
||||
|
||||
As input you can use a plain text (e.g. ``file.txt``) or gzipped (e.g. ``file.txt.gz``) text file with one sentence in each line.
|
||||
|
||||
If you are using a container created from the Dockerfile, you can use ``--kenlm_bins /DeepSpeech/native_client/kenlm/build/bin/``.
|
||||
If you are using a container created from ``Dockerfile.build``, you can use ``--kenlm_bins /DeepSpeech/native_client/kenlm/build/bin/``.
|
||||
Else you have to build `KenLM <https://github.com/kpu/kenlm>`_ first and then pass the build directory to the script.
|
||||
|
||||
.. code-block:: bash
|
||||
@ -54,4 +54,4 @@ The LibriSpeech LM training text used by our scorer is around 4GB uncompressed,
|
||||
With a text corpus in hand, you can then re-use the ``generate_lm.py`` and ``generate_package.py`` scripts to create your own scorer that is compatible with DeepSpeech clients and language bindings. Before building the language model, you must first familiarize yourself with the `KenLM toolkit <https://kheafield.com/code/kenlm/>`_. Most of the options exposed by the ``generate_lm.py`` script are simply forwarded to KenLM options of the same name, so you must read the KenLM documentation in order to fully understand their behavior.
|
||||
|
||||
After using ``generate_lm.py`` to create a KenLM language model binary file, you can use ``generate_package.py`` to create a scorer package as described in the previous section. Note that we have a :github:`lm_optimizer.py script <lm_optimizer.py>` which can be used to find good default values for alpha and beta. To use it, you must first
|
||||
generate a package with any value set for default alpha and beta flags. For this step, it doesn't matter what values you use, as they'll be overridden by ``lm_optimizer.py``. Then, use ``lm_optimizer.py`` with this scorer file to find good alpha and beta values. Finally, use ``generate_package.py`` again, this time with the new values.
|
||||
generate a package with any value set for default alpha and beta flags. For this step, it doesn't matter what values you use, as they'll be overridden by ``lm_optimizer.py``. Then, use ``lm_optimizer.py`` with this scorer file to find good alpha and beta values. Finally, use ``generate_package.py`` again, this time with the new values.
|
||||
|
185
doc/TRAINING.rst
185
doc/TRAINING.rst
@ -47,7 +47,9 @@ Install the required dependencies using ``pip3``\ :
|
||||
|
||||
cd DeepSpeech
|
||||
pip3 install --upgrade pip==20.0.2 wheel==0.34.2 setuptools==46.1.3
|
||||
pip3 install --upgrade --force-reinstall -e .
|
||||
pip3 install --upgrade -e .
|
||||
|
||||
Remember to re-run the last ``pip3 install`` command above when you update the training code (for example by pulling new changes), in order to update any dependencies.
|
||||
|
||||
The ``webrtcvad`` Python package might require you to ensure you have proper tooling to build Python modules:
|
||||
|
||||
@ -76,6 +78,22 @@ It has been reported for some people failure at training:
|
||||
|
||||
Setting the ``TF_FORCE_GPU_ALLOW_GROWTH`` environment variable to ``true`` seems to help in such cases. This could also be due to an incorrect version of libcudnn. Double check your versions with the :ref:`TensorFlow 1.15 documentation <cuda-deps>`.
|
||||
|
||||
Basic Dockerfile for training
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We provide ``Dockerfile.train`` to automatically set up a basic training environment in Docker. You need to generate the Dockerfile from the template using:
|
||||
This should ensure that you'll re-use the upstream Python 3 TensorFlow GPU-enabled Docker image.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
make Dockerfile.train
|
||||
|
||||
If you want to specify a different DeepSpeech repository / branch, you can pass ``DEEPSPEECH_REPO`` or ``DEEPSPEECH_SHA`` parameters:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
make Dockerfile.train DEEPSPEECH_REPO=git://your/fork DEEPSPEECH_SHA=origin/your-branch
|
||||
|
||||
Common Voice training data
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -270,12 +288,6 @@ Augmentation
|
||||
|
||||
Augmentation is a useful technique for better generalization of machine learning models. Thus, a pre-processing pipeline with various augmentation techniques on raw pcm and spectrogram has been implemented and can be used while training the model. Following are the available augmentation techniques that can be enabled at training time by using the corresponding flags in the command line.
|
||||
|
||||
|
||||
Audio Augmentation
|
||||
------------------
|
||||
|
||||
Augmentations that are applied before potential feature caching can be specified through the ``--augment`` flag. Being a multi-flag, it can be specified multiple times (see below for an example).
|
||||
|
||||
Each sample of the training data will get treated by every specified augmentation in their given order. However: whether an augmentation will actually get applied to a sample is decided by chance on base of the augmentation's probability value. For example a value of ``p=0.1`` would apply the according augmentation to just 10% of all samples. This also means that augmentations are not mutually exclusive on a per-sample basis.
|
||||
|
||||
The ``--augment`` flag uses a common syntax for all augmentation types:
|
||||
@ -297,14 +309,31 @@ In the documentation below, whenever a value is specified as ``<float-range>`` o
|
||||
|
||||
* ``<value>~<r>``: A center value with a randomization radius around it. E.g. ``1.2~0.4`` will result in picking of a uniformly random value between 0.8 and 1.6 on each sample augmentation.
|
||||
|
||||
* ``<start>:<end>``: The value will range from `<start>` at the beginning of an epoch to `<end>` at the end of an epoch. E.g. ``-0.2:1.2`` (float) or ``2000:4000`` (int)
|
||||
* ``<start>:<end>``: The value will range from `<start>` at the beginning of the training to `<end>` at the end of the training. E.g. ``-0.2:1.2`` (float) or ``2000:4000`` (int)
|
||||
|
||||
* ``<start>:<end>~<r>``: Combination of the two previous cases with a ranging center value. E.g. ``4-6~2`` would at the beginning of an epoch pick values between 2 and 6 and at the end of an epoch between 4 and 8.
|
||||
* ``<start>:<end>~<r>``: Combination of the two previous cases with a ranging center value. E.g. ``4-6~2`` would at the beginning of the training pick values between 2 and 6 and at the end of the training between 4 and 8.
|
||||
|
||||
Ranges specified with integer limits will only assume integer (rounded) values.
|
||||
|
||||
If feature caching is enabled, these augmentations will only be performed on the first epoch and the result will be reused for subsequent epochs. The flag ``--augmentations_per_epoch N`` (by default `N` is 1) could be used to get more than one epoch worth of augmentations into the cache. During training, each epoch will do ``N`` passes over the training set, each time performing augmentation independently of previous passes. Be aware: this will also multiply the required size of the feature cache if it's enabled.
|
||||
.. warning::
|
||||
When feature caching is enabled, by default the cache has no expiration limit and will be used for the entire training run. This will cause these augmentations to only be performed once during the first epoch and the result will be reused for subsequent epochs. This would not only hinder value ranges from reaching their intended final values, but could also lead to unintended over-fitting. In this case flag ``--cache_for_epochs N`` (with N > 1) should be used to periodically invalidate the cache after every N epochs and thus allow samples to be re-augmented in new ways and with current range-values.
|
||||
|
||||
Every augmentation targets a certain representation of the sample - in this documentation these representations are referred to as *domains*.
|
||||
Augmentations are applied in the following order:
|
||||
|
||||
1. **sample** domain: The sample just got loaded and its waveform is represented as a NumPy array. For implementation reasons these augmentations are the only ones that can be "simulated" through ``bin/play.py``.
|
||||
|
||||
2. **signal** domain: The sample waveform is represented as a tensor.
|
||||
|
||||
3. **spectrogram** domain: The sample spectrogram is represented as a tensor.
|
||||
|
||||
4. **features** domain: The sample's mel spectrogram features are represented as a tensor.
|
||||
|
||||
Within a single domain, augmentations are applied in the same order as they appear in the command-line.
|
||||
|
||||
|
||||
Sample domain augmentations
|
||||
---------------------------
|
||||
|
||||
**Overlay augmentation** ``--augment overlay[p=<float>,source=<str>,snr=<float-range>,layers=<int-range>]``
|
||||
Layers another audio source (multiple times) onto augmented samples.
|
||||
@ -328,16 +357,6 @@ If feature caching is enabled, these augmentations will only be performed on the
|
||||
* **decay**: sound decay in dB per reflection - higher values will result in a less reflective perceived "room"
|
||||
|
||||
|
||||
**Gaps augmentation** ``--augment gaps[p=<float>,n=<int-range>,size=<float-range>]``
|
||||
Sets time-intervals within the augmented samples to zero (silence) at random positions.
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **n**: number of intervals to set to zero
|
||||
|
||||
* **size**: duration of intervals in ms
|
||||
|
||||
|
||||
**Resample augmentation** ``--augment resample[p=<float>,rate=<int-range>]``
|
||||
Resamples augmented samples to another sample rate and then resamples back to the original sample rate.
|
||||
|
||||
@ -361,6 +380,78 @@ If feature caching is enabled, these augmentations will only be performed on the
|
||||
|
||||
* **dbfs** : target volume in dBFS (default value of 3.0103 will normalize min and max amplitudes to -1.0/1.0)
|
||||
|
||||
Spectrogram domain augmentations
|
||||
--------------------------------
|
||||
|
||||
**Pitch augmentation** ``--augment pitch[p=<float>,pitch=<float-range>]``
|
||||
Scales spectrogram on frequency axis and thus changes pitch.
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **pitch**: pitch factor by with the frequency axis is scaled (e.g. a value of 2.0 will raise audio frequency by one octave)
|
||||
|
||||
|
||||
**Tempo augmentation** ``--augment tempo[p=<float>,factor=<float-range>]``
|
||||
Scales spectrogram on time axis and thus changes playback tempo.
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **factor**: speed factor by which the time axis is stretched or shrunken (e.g. a value of 2.0 will double playback tempo)
|
||||
|
||||
|
||||
**Frequency mask augmentation** ``--augment frequency_mask[p=<float>,n=<int-range>,size=<int-range>]``
|
||||
Sets frequency-intervals within the augmented samples to zero (silence) at random frequencies. See the SpecAugment paper for more details - https://arxiv.org/abs/1904.08779
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **n**: number of intervals to mask
|
||||
|
||||
* **size**: number of frequency bands to mask per interval
|
||||
|
||||
Multi domain augmentations
|
||||
--------------------------
|
||||
|
||||
**Time mask augmentation** ``--augment time_mask[p=<float>,n=<int-range>,size=<float-range>,domain=<domain>]``
|
||||
Sets time-intervals within the augmented samples to zero (silence) at random positions.
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **n**: number of intervals to set to zero
|
||||
|
||||
* **size**: duration of intervals in ms
|
||||
|
||||
* **domain**: data representation to apply augmentation to - "signal", "features" or "spectrogram" (default)
|
||||
|
||||
|
||||
**Dropout augmentation** ``--augment dropout[p=<float>,rate=<float-range>,domain=<domain>]``
|
||||
Zeros random data points of the targeted data representation.
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **rate**: dropout rate ranging from 0.0 for no dropout to 1.0 for 100% dropout
|
||||
|
||||
* **domain**: data representation to apply augmentation to - "signal", "features" or "spectrogram" (default)
|
||||
|
||||
|
||||
**Add augmentation** ``--augment add[p=<float>,stddev=<float-range>,domain=<domain>]``
|
||||
Adds random values picked from a normal distribution (with a mean of 0.0) to all data points of the targeted data representation.
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **stddev**: standard deviation of the normal distribution to pick values from
|
||||
|
||||
* **domain**: data representation to apply augmentation to - "signal", "features" (default) or "spectrogram"
|
||||
|
||||
|
||||
**Multiply augmentation** ``--augment multiply[p=<float>,stddev=<float-range>,domain=<domain>]``
|
||||
Multiplies all data points of the targeted data representation with random values picked from a normal distribution (with a mean of 1.0).
|
||||
|
||||
* **p**: probability value between 0.0 (never) and 1.0 (always) if a given sample gets augmented by this method
|
||||
|
||||
* **stddev**: standard deviation of the normal distribution to pick values from
|
||||
|
||||
* **domain**: data representation to apply augmentation to - "signal", "features" (default) or "spectrogram"
|
||||
|
||||
|
||||
Example training with all augmentations:
|
||||
|
||||
@ -368,18 +459,25 @@ Example training with all augmentations:
|
||||
|
||||
python -u DeepSpeech.py \
|
||||
--train_files "train.sdb" \
|
||||
--augmentations_per_epoch 10 \
|
||||
--feature_cache ./feature.cache \
|
||||
--cache_for_epochs 10 \
|
||||
--epochs 100 \
|
||||
--augment overlay[p=0.5,source=noise.sdb,layers=1,snr=50:20~10] \
|
||||
--augment overlay[p=0.2,source=voices.sdb,layers=10:6,snr=50:20~10] \
|
||||
--augment reverb[p=0.1,delay=50.0~30.0,decay=10.0:2.0~1.0] \
|
||||
--augment gaps[p=0.05,n=1:3~2,size=10:100] \
|
||||
--augment resample[p=0.1,rate=12000:8000~4000] \
|
||||
--augment codec[p=0.1,bitrate=48000:16000] \
|
||||
--augment volume[p=0.1,dbfs=-10:-40] \
|
||||
--augment pitch[p=0.1,pitch=1~0.2] \
|
||||
--augment tempo[p=0.1,factor=1~0.5] \
|
||||
--augment frequency_mask[p=0.1,n=1:3,size=1:5] \
|
||||
--augment time_mask[p=0.1,domain=signal,n=3:10~2,size=50:100~40] \
|
||||
--augment dropout[p=0.1,rate=0.05] \
|
||||
--augment add[p=0.1,domain=signal,stddev=0~0.5] \
|
||||
--augment multiply[p=0.1,domain=features,stddev=0~0.5] \
|
||||
[...]
|
||||
|
||||
|
||||
The ``bin/play.py`` tool also supports ``--augment`` parameters and can be used for experimenting with different configurations.
|
||||
The ``bin/play.py`` tool also supports ``--augment`` parameters (for sample domain augmentations) and can be used for experimenting with different configurations.
|
||||
|
||||
Example of playing all samples with reverberation and maximized volume:
|
||||
|
||||
@ -393,42 +491,3 @@ Example simulation of the codec augmentation of a wav-file first at the beginnin
|
||||
|
||||
bin/play.py --augment codec[p=0.1,bitrate=48000:16000] --clock 0.0 test.wav
|
||||
bin/play.py --augment codec[p=0.1,bitrate=48000:16000] --clock 1.0 test.wav
|
||||
|
||||
|
||||
The following augmentations are applied after feature caching, hence the way they are applied will not repeat epoch-wise.
|
||||
Working on spectrogram and feature level, `bin/play.py` offers no ability to simulate them.
|
||||
|
||||
#. **Standard deviation for Gaussian additive noise:** ``--data_aug_features_additive``
|
||||
#. **Standard deviation for Normal distribution around 1 for multiplicative noise:** ``--data_aug_features_multiplicative``
|
||||
#. **Standard deviation for speeding-up tempo. If Standard deviation is 0, this augmentation is not performed:** ``--augmentation_speed_up_std``
|
||||
|
||||
Spectrogram Augmentation
|
||||
------------------------
|
||||
|
||||
Inspired by Google Paper on `SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition <https://arxiv.org/abs/1904.08779>`_
|
||||
|
||||
|
||||
#.
|
||||
**Keep rate of dropout augmentation on a spectrogram (if 1, no dropout will be performed on the spectrogram)**\ :
|
||||
|
||||
|
||||
* Keep Rate : ``--augmentation_spec_dropout_keeprate value between range [0 - 1]``
|
||||
|
||||
#.
|
||||
**Whether to use frequency and time masking augmentation:**
|
||||
|
||||
|
||||
* Enable / Disable : ``--augmentation_freq_and_time_masking / --noaugmentation_freq_and_time_masking``
|
||||
* Max range of masks in the frequency domain when performing freqtime-mask augmentation: ``--augmentation_freq_and_time_masking_freq_mask_range eg: 5``
|
||||
* Number of masks in the frequency domain when performing freqtime-mask augmentation: ``--augmentation_freq_and_time_masking_number_freq_masks eg: 3``
|
||||
* Max range of masks in the time domain when performing freqtime-mask augmentation: ``--augmentation_freq_and_time_masking_time_mask_range eg: 2``
|
||||
* Number of masks in the time domain when performing freqtime-mask augmentation: ``--augmentation_freq_and_time_masking_number_time_masks eg: 3``
|
||||
|
||||
#.
|
||||
**Whether to use spectrogram speed and tempo scaling:**
|
||||
|
||||
|
||||
* Enable / Disable : ``--augmentation_pitch_and_tempo_scaling / --noaugmentation_pitch_and_tempo_scaling``
|
||||
* Min value of pitch scaling: ``--augmentation_pitch_and_tempo_scaling_min_pitch eg:0.95``
|
||||
* Max value of pitch scaling: ``--augmentation_pitch_and_tempo_scaling_max_pitch eg:1.2``
|
||||
* Max value of tempo scaling: ``--augmentation_pitch_and_tempo_scaling_max_tempo eg:1.2``
|
||||
|
@ -28,7 +28,7 @@ Please refer to your system's documentation on how to install these dependencies
|
||||
CUDA dependency
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
The GPU capable builds (Python, NodeJS, C++, etc) depend on the same CUDA runtime as upstream TensorFlow. Currently with TensorFlow 1.15 it depends on CUDA 10.0 and CuDNN v7.6. `See the TensorFlow documentation <https://www.tensorflow.org/install/gpu>`_.
|
||||
The GPU capable builds (Python, NodeJS, C++, etc) depend on the same CUDA runtime as upstream TensorFlow. Currently with TensorFlow 2.2 it depends on CUDA 10.1 and CuDNN v7.6. `See the TensorFlow documentation <https://www.tensorflow.org/install/gpu>`_.
|
||||
|
||||
Getting the pre-trained model
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -37,8 +37,8 @@ If you want to use the pre-trained English model for performing speech-to-text,
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.3/deepspeech-0.7.3-models.pbmm
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.3/deepspeech-0.7.3-models.scorer
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.4/deepspeech-0.7.4-models.pbmm
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.7.4/deepspeech-0.7.4-models.scorer
|
||||
|
||||
Model compatibility
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
@ -113,7 +113,7 @@ Note: the following command assumes you `downloaded the pre-trained model <#gett
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
deepspeech --model deepspeech-0.7.3-models.pbmm --scorer deepspeech-0.7.3-models.scorer --audio my_audio_file.wav
|
||||
deepspeech --model deepspeech-0.7.4-models.pbmm --scorer deepspeech-0.7.4-models.scorer --audio my_audio_file.wav
|
||||
|
||||
The ``--scorer`` argument is optional, and represents an external language model to be used when transcribing the audio.
|
||||
|
||||
@ -177,7 +177,7 @@ Note: the following command assumes you `downloaded the pre-trained model <#gett
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
./deepspeech --model deepspeech-0.7.3-models.pbmm --scorer deepspeech-0.7.3-models.scorer --audio audio_input.wav
|
||||
./deepspeech --model deepspeech-0.7.4-models.pbmm --scorer deepspeech-0.7.4-models.scorer --audio audio_input.wav
|
||||
|
||||
See the help output with ``./deepspeech -h`` for more details.
|
||||
|
||||
@ -186,6 +186,22 @@ Installing bindings from source
|
||||
|
||||
If pre-built binaries aren't available for your system, you'll need to install them from scratch. Follow the :github:`native client build and installation instructions <native_client/README.rst>`.
|
||||
|
||||
Dockerfile for building from source
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We provide ``Dockerfile.build`` to automatically build ``libdeepspeech.so``, the C++ native client, Python bindings, and KenLM.
|
||||
You need to generate the Dockerfile from the template using:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
make Dockerfile.build
|
||||
|
||||
If you want to specify a different DeepSpeech repository / branch, you can pass ``DEEPSPEECH_REPO`` or ``DEEPSPEECH_SHA`` parameters:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
make Dockerfile.build DEEPSPEECH_REPO=git://your/fork DEEPSPEECH_SHA=origin/your-branch
|
||||
|
||||
Third party bindings
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -197,4 +213,4 @@ In addition to the bindings above, third party developers have started to provid
|
||||
* `stes <https://github.com/stes>`_ provides preliminary `PKGBUILDs <https://wiki.archlinux.org/index.php/PKGBUILD>`_ to install the client and python bindings on `Arch Linux <https://www.archlinux.org/>`_ in the `arch-deepspeech <https://github.com/stes/arch-deepspeech>`_ repo.
|
||||
* `gst-deepspeech <https://github.com/Elleo/gst-deepspeech>`_ provides a `GStreamer <https://gstreamer.freedesktop.org/>`_ plugin which can be used from any language with GStreamer bindings.
|
||||
* `thecodrr <https://github.com/thecodrr>`_ provides `Vlang <https://vlang.io>`_ bindings. The installation and use of which is described in their `vspeech <https://github.com/thecodrr/vspeech>`_ repo.
|
||||
|
||||
* `eagledot <https://gitlab.com/eagledot>`_ provides `NIM-lang <https://nim-lang.org/>`_ bindings. The installation and use of which is described in their `nim-deepspeech <https://gitlab.com/eagledot/nim-deepspeech>`_ repo.
|
||||
|
@ -20,15 +20,15 @@ To install and use DeepSpeech all you have to do is:
|
||||
pip3 install deepspeech
|
||||
|
||||
# Download pre-trained English model files
|
||||
curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.7.3/deepspeech-0.7.3-models.pbmm
|
||||
curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.7.3/deepspeech-0.7.3-models.scorer
|
||||
curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.7.4/deepspeech-0.7.4-models.pbmm
|
||||
curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.7.4/deepspeech-0.7.4-models.scorer
|
||||
|
||||
# Download example audio files
|
||||
curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.7.3/audio-0.7.3.tar.gz
|
||||
tar xvf audio-0.7.3.tar.gz
|
||||
curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.7.4/audio-0.7.4.tar.gz
|
||||
tar xvf audio-0.7.4.tar.gz
|
||||
|
||||
# Transcribe an audio file
|
||||
deepspeech --model deepspeech-0.7.3-models.pbmm --scorer deepspeech-0.7.3-models.scorer --audio audio/2830-3980-0043.wav
|
||||
deepspeech --model deepspeech-0.7.4-models.pbmm --scorer deepspeech-0.7.4-models.scorer --audio audio/2830-3980-0043.wav
|
||||
|
||||
A pre-trained English model is available for use and can be downloaded following the instructions in :ref:`the usage docs <usage-docs>`. For the latest release, including pre-trained models and checkpoints, `see the GitHub releases page <https://github.com/mozilla/DeepSpeech/releases/latest>`_.
|
||||
|
||||
@ -44,7 +44,7 @@ Quicker inference can be performed using a supported NVIDIA GPU on Linux. See th
|
||||
pip3 install deepspeech-gpu
|
||||
|
||||
# Transcribe an audio file.
|
||||
deepspeech --model deepspeech-0.7.3-models.pbmm --scorer deepspeech-0.7.3-models.scorer --audio audio/2830-3980-0043.wav
|
||||
deepspeech --model deepspeech-0.7.4-models.pbmm --scorer deepspeech-0.7.4-models.scorer --audio audio/2830-3980-0043.wav
|
||||
|
||||
Please ensure you have the required :ref:`CUDA dependencies <cuda-deps>`.
|
||||
|
||||
@ -58,6 +58,8 @@ See the output of ``deepspeech -h`` for more information on the use of ``deepspe
|
||||
|
||||
TRAINING
|
||||
|
||||
SUPPORTED_PLATFORMS
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Decoder and scorer
|
||||
|
@ -1,10 +1,8 @@
|
||||
# Description: Deepspeech native client library.
|
||||
|
||||
load(
|
||||
"@org_tensorflow//tensorflow:tensorflow.bzl",
|
||||
"if_cuda",
|
||||
"tf_cc_shared_object",
|
||||
)
|
||||
load("@org_tensorflow//tensorflow:tensorflow.bzl", "tf_cc_shared_object")
|
||||
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
|
||||
|
||||
load(
|
||||
"@org_tensorflow//tensorflow/lite:build_def.bzl",
|
||||
"tflite_copts",
|
||||
@ -124,8 +122,8 @@ tf_cc_shared_object(
|
||||
linkopts = select({
|
||||
"//tensorflow:macos": [],
|
||||
"//tensorflow:linux_x86_64": LINUX_LINKOPTS,
|
||||
"//tensorflow:rpi3": LINUX_LINKOPTS + ["-l:libstdc++.a"],
|
||||
"//tensorflow:rpi3-armv8": LINUX_LINKOPTS + ["-l:libstdc++.a"],
|
||||
"//tensorflow:rpi3": LINUX_LINKOPTS,
|
||||
"//tensorflow:rpi3-armv8": LINUX_LINKOPTS,
|
||||
"//tensorflow:windows": [],
|
||||
"//conditions:default": [],
|
||||
}) + tflite_linkopts(),
|
||||
@ -143,7 +141,6 @@ tf_cc_shared_object(
|
||||
### CPU only build, libdeepspeech.so file size reduced by ~50%
|
||||
"//tensorflow/core/kernels:spectrogram_op", # AudioSpectrogram
|
||||
"//tensorflow/core/kernels:bias_op", # BiasAdd
|
||||
"//tensorflow/contrib/rnn:lstm_ops_kernels", # BlockLSTM
|
||||
"//tensorflow/core/kernels:cast_op", # Cast
|
||||
"//tensorflow/core/kernels:concat_op", # ConcatV2
|
||||
"//tensorflow/core/kernels:constant_op", # Const, Placeholder
|
||||
@ -163,9 +160,10 @@ tf_cc_shared_object(
|
||||
"//tensorflow/core/kernels:softmax_op", # Softmax
|
||||
"//tensorflow/core/kernels:tile_ops", # Tile
|
||||
"//tensorflow/core/kernels:transpose_op", # Transpose
|
||||
"//tensorflow/core/kernels:rnn_ops", # BlockLSTM
|
||||
# And we also need the op libs for these ops used in the model:
|
||||
"//tensorflow/core:audio_ops_op_lib", # AudioSpectrogram, Mfcc
|
||||
"//tensorflow/contrib/rnn:lstm_ops_op_lib", # BlockLSTM
|
||||
"//tensorflow/core:rnn_ops_op_lib", # BlockLSTM
|
||||
"//tensorflow/core:math_ops_op_lib", # Cast, Less, Max, MatMul, Minimum, Range
|
||||
"//tensorflow/core:array_ops_op_lib", # ConcatV2, Const, ExpandDims, Fill, GatherNd, Identity, Pack, Placeholder, Reshape, Tile, Transpose
|
||||
"//tensorflow/core:no_op_op_lib", # NoOp
|
||||
|
@ -5,8 +5,8 @@ Building DeepSpeech Binaries
|
||||
If you'd like to build the DeepSpeech binaries yourself, you'll need the following pre-requisites downloaded and installed:
|
||||
|
||||
|
||||
* `Mozilla's TensorFlow r1.15 branch <https://github.com/mozilla/tensorflow/tree/r1.15>`_
|
||||
* `Bazel 0.24.1 <https://github.com/bazelbuild/bazel/releases/tag/0.24.1>`_
|
||||
* `Mozilla's TensorFlow r2.2 branch <https://github.com/mozilla/tensorflow/tree/r2.2>`_
|
||||
* `Bazel 2.0.0 <https://github.com/bazelbuild/bazel/releases/tag/2.0.0>`_
|
||||
* `General TensorFlow requirements <https://www.tensorflow.org/install/install_sources>`_
|
||||
* `libsox <https://sourceforge.net/projects/sox/>`_
|
||||
|
||||
@ -36,12 +36,12 @@ Clone our fork of TensorFlow and checkout the correct version:
|
||||
.. code-block::
|
||||
|
||||
git clone https://github.com/mozilla/tensorflow.git
|
||||
git checkout origin/r1.15
|
||||
git checkout origin/r2.2
|
||||
|
||||
Bazel: Download & Install
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
First, install Bazel 0.24.1 following the `Bazel installation documentation <https://docs.bazel.build/versions/0.24.0/install.html>`_.
|
||||
First, install Bazel 2.0.0 following the `Bazel installation documentation <https://docs.bazel.build/versions/2.0.0/install.html>`_.
|
||||
|
||||
TensorFlow: Configure with Bazel
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -45,15 +45,15 @@ workspace_status.cc:
|
||||
# variables over several runs
|
||||
bindings: clean-keep-third-party workspace_status.cc ds-swig
|
||||
pip install --quiet $(PYTHON_PACKAGES) wheel==0.33.6 setuptools==39.1.0
|
||||
PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py build_ext --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
DISTUTILS_USE_SDK=1 PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py build_ext --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
find temp_build -type f -name "*.o" -delete
|
||||
AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
DISTUTILS_USE_SDK=1 AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
rm -rf temp_build
|
||||
|
||||
bindings-debug: clean-keep-third-party workspace_status.cc ds-swig
|
||||
pip install --quiet $(PYTHON_PACKAGES) wheel==0.33.6 setuptools==39.1.0
|
||||
PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py build_ext --debug --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
DISTUTILS_USE_SDK=1 PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py build_ext --debug --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
$(GENERATE_DEBUG_SYMS)
|
||||
find temp_build -type f -name "*.o" -delete
|
||||
AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
DISTUTILS_USE_SDK=1 AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
rm -rf temp_build
|
||||
|
@ -47,7 +47,7 @@ class Scorer(swigwrapper.Scorer):
|
||||
return super(Scorer, self).load_lm(lm_path.encode('utf-8'))
|
||||
|
||||
def save_dictionary(self, save_path, *args, **kwargs):
|
||||
super(Scorer, self).save_dictionary(save_path.encode('utf-8'), *args, **kwargs)
|
||||
return super(Scorer, self).save_dictionary(save_path.encode('utf-8'), *args, **kwargs)
|
||||
|
||||
|
||||
def ctc_beam_search_decoder(probs_seq,
|
||||
|
@ -146,7 +146,7 @@ int Scorer::load_trie(std::ifstream& fin, const std::string& file_path)
|
||||
return DS_ERR_OK;
|
||||
}
|
||||
|
||||
void Scorer::save_dictionary(const std::string& path, bool append_instead_of_overwrite)
|
||||
bool Scorer::save_dictionary(const std::string& path, bool append_instead_of_overwrite)
|
||||
{
|
||||
std::ios::openmode om;
|
||||
if (append_instead_of_overwrite) {
|
||||
@ -155,15 +155,39 @@ void Scorer::save_dictionary(const std::string& path, bool append_instead_of_ove
|
||||
om = std::ios::out|std::ios::binary;
|
||||
}
|
||||
std::fstream fout(path, om);
|
||||
if (!fout ||fout.bad()) {
|
||||
std::cerr << "Error opening '" << path << "'" << std::endl;
|
||||
return false;
|
||||
}
|
||||
fout.write(reinterpret_cast<const char*>(&MAGIC), sizeof(MAGIC));
|
||||
if (fout.bad()) {
|
||||
std::cerr << "Error writing MAGIC '" << path << "'" << std::endl;
|
||||
return false;
|
||||
}
|
||||
fout.write(reinterpret_cast<const char*>(&FILE_VERSION), sizeof(FILE_VERSION));
|
||||
if (fout.bad()) {
|
||||
std::cerr << "Error writing FILE_VERSION '" << path << "'" << std::endl;
|
||||
return false;
|
||||
}
|
||||
fout.write(reinterpret_cast<const char*>(&is_utf8_mode_), sizeof(is_utf8_mode_));
|
||||
if (fout.bad()) {
|
||||
std::cerr << "Error writing is_utf8_mode '" << path << "'" << std::endl;
|
||||
return false;
|
||||
}
|
||||
fout.write(reinterpret_cast<const char*>(&alpha), sizeof(alpha));
|
||||
if (fout.bad()) {
|
||||
std::cerr << "Error writing alpha '" << path << "'" << std::endl;
|
||||
return false;
|
||||
}
|
||||
fout.write(reinterpret_cast<const char*>(&beta), sizeof(beta));
|
||||
if (fout.bad()) {
|
||||
std::cerr << "Error writing beta '" << path << "'" << std::endl;
|
||||
return false;
|
||||
}
|
||||
fst::FstWriteOptions opt;
|
||||
opt.align = true;
|
||||
opt.source = path;
|
||||
dictionary->Write(fout, opt);
|
||||
return dictionary->Write(fout, opt);
|
||||
}
|
||||
|
||||
bool Scorer::is_scoring_boundary(PathTrie* prefix, size_t new_label)
|
||||
|
@ -77,7 +77,7 @@ public:
|
||||
void set_alphabet(const Alphabet& alphabet);
|
||||
|
||||
// save dictionary in file
|
||||
void save_dictionary(const std::string &path, bool append_instead_of_overwrite=false);
|
||||
bool save_dictionary(const std::string &path, bool append_instead_of_overwrite=false);
|
||||
|
||||
// return weather this step represents a boundary where beam scoring should happen
|
||||
bool is_scoring_boundary(PathTrie* prefix, size_t new_label);
|
||||
|
@ -48,7 +48,7 @@ endif
|
||||
endif
|
||||
|
||||
ifeq ($(TARGET),host-win)
|
||||
TOOLCHAIN := '$(VCINSTALLDIR)\bin\amd64\'
|
||||
TOOLCHAIN := '$(VCToolsInstallDir)\bin\Hostx64\x64\'
|
||||
TOOL_CC := cl.exe
|
||||
TOOL_CXX := cl.exe
|
||||
TOOL_LD := link.exe
|
||||
@ -65,7 +65,7 @@ ifeq ($(TARGET),rpi3)
|
||||
TOOLCHAIN ?= ${TFDIR}/bazel-$(shell basename "${TFDIR}")/external/LinaroArmGcc72/bin/arm-linux-gnueabihf-
|
||||
RASPBIAN ?= $(abspath $(NC_DIR)/../multistrap-raspbian-buster)
|
||||
CFLAGS := -march=armv7-a -mtune=cortex-a53 -mfpu=neon-fp-armv8 -mfloat-abi=hard -D_GLIBCXX_USE_CXX11_ABI=0 --sysroot $(RASPBIAN)
|
||||
CXXFLAGS := $(CXXFLAGS)
|
||||
CXXFLAGS := $(CFLAGS)
|
||||
LDFLAGS := -Wl,-rpath-link,$(RASPBIAN)/lib/arm-linux-gnueabihf/ -Wl,-rpath-link,$(RASPBIAN)/usr/lib/arm-linux-gnueabihf/
|
||||
|
||||
SOX_CFLAGS := -I$(RASPBIAN)/usr/include
|
||||
|
@ -30,11 +30,11 @@ Prerequisites
|
||||
|
||||
* Windows 10
|
||||
* `Windows 10 SDK <https://developer.microsoft.com/en-us/windows/downloads/windows-10-sdk>`_
|
||||
* `Visual Studio 2017 Community <https://visualstudio.microsoft.com/vs/community/>`_
|
||||
* `Visual Studio 2019 Community <https://visualstudio.microsoft.com/vs/community/>`_
|
||||
* `Git Large File Storage <https://git-lfs.github.com/>`_
|
||||
* `TensorFlow Windows pre-requisites <https://www.tensorflow.org/install/source_windows>`_
|
||||
|
||||
Inside the Visual Studio Installer enable ``MS Build Tools`` and ``VC++ 2015.3 v14.00 (v140) toolset for desktop``.
|
||||
Inside the Visual Studio Installer enable ``MS Build Tools`` and ``VC++ 2019 v16.00 (v160) toolset for desktop``.
|
||||
|
||||
If you want to enable CUDA support you need to follow the steps in `the TensorFlow docs for building on Windows with CUDA <https://www.tensorflow.org/install/gpu#windows_setup>`_.
|
||||
|
||||
@ -51,7 +51,7 @@ We need to clone ``mozilla/DeepSpeech`` and ``mozilla/tensorflow``.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone --branch r1.15 https://github.com/mozilla/tensorflow
|
||||
git clone --branch r2.2 https://github.com/mozilla/tensorflow
|
||||
|
||||
Configuring the paths
|
||||
---------------------
|
||||
@ -113,7 +113,7 @@ If you run CUDA enabled ``native_client`` we need to add the following to the ``
|
||||
|
||||
.. code-block::
|
||||
|
||||
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin
|
||||
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.1\bin
|
||||
|
||||
Building the native_client
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
@ -41,10 +41,6 @@ package.json: package.json.in
|
||||
package.json.in > package.json && cat package.json
|
||||
|
||||
npm-dev: package.json
|
||||
ifeq ($(findstring _NT,$(OS)),_NT)
|
||||
# node-gyp@5.x behaves erratically with VS2015 and MSBuild.exe detection
|
||||
$(NPM_TOOL) install node-gyp@4.x
|
||||
endif
|
||||
$(NPM_TOOL) install --prefix=$(NPM_ROOT)/../ --ignore-scripts --force --verbose --production=false .
|
||||
|
||||
configure: deepspeech_wrap.cxx package.json npm-dev
|
||||
|
@ -35,12 +35,12 @@
|
||||
"node-pre-gyp": "0.15.x",
|
||||
"argparse": "1.0.x",
|
||||
"sox-stream": "2.0.x",
|
||||
"memory-stream": "0.0.3",
|
||||
"memory-stream": "1.0.x",
|
||||
"node-wav": "0.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"electron": "^1.7.9",
|
||||
"node-gyp": "4.x - 5.x",
|
||||
"node-gyp": "5.x",
|
||||
"typescript": "3.8.x",
|
||||
"typedoc": "0.17.x",
|
||||
"@types/argparse": "1.0.x",
|
||||
|
@ -10,7 +10,7 @@ bindings-clean:
|
||||
# variables over several runs
|
||||
bindings-build: ds-swig
|
||||
pip install --quiet $(PYTHON_PACKAGES) wheel==0.33.6 setuptools==39.1.0
|
||||
PATH=$(TOOLCHAIN):$(DS_SWIG_BIN_PATH):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED) $(RPATH_PYTHON)" MODEL_LDFLAGS="$(LDFLAGS_DIRS)" MODEL_LIBS="$(LIBS)" $(PYTHON_PATH) $(PYTHON_SYSCONFIGDATA) $(NUMPY_INCLUDE) python ./setup.py build_ext $(PYTHON_PLATFORM_NAME)
|
||||
DISTUTILS_USE_SDK=1 PATH=$(TOOLCHAIN):$(DS_SWIG_BIN_PATH):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED) $(RPATH_PYTHON)" MODEL_LDFLAGS="$(LDFLAGS_DIRS)" MODEL_LIBS="$(LIBS)" $(PYTHON_PATH) $(PYTHON_SYSCONFIGDATA) $(NUMPY_INCLUDE) python ./setup.py build_ext $(PYTHON_PLATFORM_NAME)
|
||||
|
||||
MANIFEST.in: bindings-build
|
||||
> $@
|
||||
@ -21,6 +21,6 @@ MANIFEST.in: bindings-build
|
||||
bindings-package: MANIFEST.in
|
||||
cat MANIFEST.in
|
||||
rm -f temp_build/*_wrap.o temp_build/Release/*_wrap.obj
|
||||
AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED) $(RPATH_PYTHON)" MODEL_LDFLAGS="$(LDFLAGS_DIRS)" MODEL_LIBS="$(LIBS)" $(PYTHON_PATH) $(PYTHON_SYSCONFIGDATA) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
DISTUTILS_USE_SDK=1 AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED) $(RPATH_PYTHON)" MODEL_LDFLAGS="$(LDFLAGS_DIRS)" MODEL_LIBS="$(LIBS)" $(PYTHON_PATH) $(PYTHON_SYSCONFIGDATA) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
|
||||
bindings: bindings-build bindings-package
|
||||
|
@ -118,7 +118,7 @@ TFModelState::init(const char* model_path)
|
||||
int beam_width = metadata_outputs[3].scalar<int>()();
|
||||
beam_width_ = (unsigned int)(beam_width);
|
||||
|
||||
string serialized_alphabet = metadata_outputs[4].scalar<string>()();
|
||||
string serialized_alphabet = metadata_outputs[4].scalar<tensorflow::tstring>()();
|
||||
err = alphabet_.deserialize(serialized_alphabet.data(), serialized_alphabet.size());
|
||||
if (err != 0) {
|
||||
return DS_ERR_INVALID_ALPHABET;
|
||||
|
10
setup.py
10
setup.py
@ -50,7 +50,6 @@ def main():
|
||||
version = fin.read().strip()
|
||||
|
||||
install_requires_base = [
|
||||
'tensorflow == 1.15.2',
|
||||
'numpy',
|
||||
'progressbar2',
|
||||
'six',
|
||||
@ -74,6 +73,10 @@ def main():
|
||||
'ds_ctcdecoder == {}'.format(version)
|
||||
]
|
||||
|
||||
tensorflow_pypi_dep = [
|
||||
'tensorflow == 1.15.2'
|
||||
]
|
||||
|
||||
# Due to pip craziness environment variables are the only consistent way to
|
||||
# get options into this script when doing `pip install`.
|
||||
tc_decoder_artifacts_root = os.environ.get('DECODER_ARTIFACTS_ROOT', '')
|
||||
@ -87,6 +90,11 @@ def main():
|
||||
else:
|
||||
install_requires = install_requires_base + decoder_pypi_dep
|
||||
|
||||
if os.environ.get('DS_NOTENSORFLOW', ''):
|
||||
install_requires = install_requires
|
||||
else:
|
||||
install_requires = install_requires + tensorflow_pypi_dep
|
||||
|
||||
setup(
|
||||
name='deepspeech_training',
|
||||
version=version,
|
||||
|
@ -9,7 +9,7 @@ build:
|
||||
dependencies: []
|
||||
routes: []
|
||||
maxRunTime: 3600
|
||||
docker_image: "ubuntu:14.04"
|
||||
docker_image: "ubuntu:16.04"
|
||||
system_setup:
|
||||
>
|
||||
true
|
||||
@ -22,7 +22,7 @@ build:
|
||||
nc_asset_name: 'native_client.tar.xz'
|
||||
args:
|
||||
tests_cmdline: ''
|
||||
tensorflow_git_desc: 'TensorFlow: v1.15.0-24-gceb46aa'
|
||||
tensorflow_git_desc: 'TensorFlow: v2.2.0-12-gc29895f'
|
||||
test_model_task: ''
|
||||
homebrew:
|
||||
url: ''
|
||||
@ -36,3 +36,7 @@ build:
|
||||
gradle_cache:
|
||||
url: ''
|
||||
namespace: ''
|
||||
build_or_cache:
|
||||
repo: "${event.head.repo.url}"
|
||||
sha: "${event.head.sha}"
|
||||
dir: "DeepSpeech/ds"
|
||||
|
@ -1,27 +1,27 @@
|
||||
python:
|
||||
packages_trusty:
|
||||
packages_xenial:
|
||||
apt: 'make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev liblzma-dev curl llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev libpng-dev libsox-dev libmagic-dev libgsm1-dev libltdl-dev'
|
||||
packages_buster:
|
||||
apt: 'python3-virtualenv python3-setuptools python3-pip python3-wheel python3-pkg-resources'
|
||||
packages_docs_bionic:
|
||||
apt: 'python3 python3-pip zip doxygen'
|
||||
training:
|
||||
packages_trusty:
|
||||
packages_xenial:
|
||||
apt: 'libopus0'
|
||||
tensorflow:
|
||||
packages_trusty:
|
||||
packages_xenial:
|
||||
apt: 'make build-essential gfortran git libblas-dev liblapack-dev libsox-dev libmagic-dev libgsm1-dev libltdl-dev libpng-dev python zlib1g-dev'
|
||||
java:
|
||||
packages_trusty:
|
||||
packages_xenial:
|
||||
apt: 'apt-get -qq -y install curl software-properties-common wget unzip && add-apt-repository --yes ppa:openjdk-r/ppa && apt-get -qq update && DEBIAN_FRONTEND=noninteractive apt-get -qq -y --force-yes install openjdk-8-jdk && java -version && update-ca-certificates -f'
|
||||
electronjs:
|
||||
packages_xenial:
|
||||
apt: 'libatk1.0-0 libatk-bridge2.0-0 libcairo2 libcups2 libdbus-1-3 libgdk-pixbuf2.0-0 libgtk-3-0 libnspr4 libnss3 libpango-1.0-0 libpangocairo-1.0-0 libx11-xcb1 libxcomposite1 libxcursor1 libxdamage1 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 xvfb'
|
||||
nodejs:
|
||||
packages_trusty:
|
||||
packages_xenial:
|
||||
apt: 'nodejs sox'
|
||||
apt_pinning: '(echo "Package: nodejs" && echo "Pin: origin deb.nodesource.com" && echo "Pin-Priority: 999") > /etc/apt/preferences'
|
||||
prep_12: 'echo "deb http://deb.nodesource.com/node_12.x trusty main" > /etc/apt/sources.list.d/nodesource.list && wget -qO- https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -'
|
||||
prep_12: 'echo "deb http://deb.nodesource.com/node_12.x xenial main" > /etc/apt/sources.list.d/nodesource.list && wget -qO- https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add -'
|
||||
packages_xenial:
|
||||
apt: 'nodejs sox'
|
||||
apt_pinning: '(echo "Package: nodejs" && echo "Pin: origin deb.nodesource.com" && echo "Pin-Priority: 999") > /etc/apt/preferences'
|
||||
@ -57,72 +57,91 @@ nodejs:
|
||||
prep_14: '/usr/bin/wget.exe https://nodejs.org/dist/v14.3.0/node-v14.3.0-win-x64.zip && ""C:\Program Files\7-zip\7z.exe"" x -o$TASKCLUSTER_NODE_DIR -tzip -aoa node-v14.3.0-win-x64.zip && rm node-*.zip && export PATH=$TASKCLUSTER_TASK_DIR/bin/node-v14.3.0-win-x64/:$PATH'
|
||||
system:
|
||||
node_gyp_cache:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.node-gyp-cache.4/artifacts/public/node-gyp-cache.tar.gz'
|
||||
namespace: 'project.deepspeech.node-gyp-cache.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.node-gyp-cache.6/artifacts/public/node-gyp-cache.tar.gz'
|
||||
namespace: 'project.deepspeech.node-gyp-cache.6'
|
||||
homebrew_builds:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.homebrew_builds.5/artifacts/public/homebrew_builds.tar.gz'
|
||||
namespace: 'project.deepspeech.homebrew_builds.5'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.homebrew_builds.7/artifacts/public/homebrew_builds.tar.gz'
|
||||
namespace: 'project.deepspeech.homebrew_builds.7'
|
||||
homebrew_tests:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.homebrew_tests.6/artifacts/public/homebrew_tests.tar.gz'
|
||||
namespace: 'project.deepspeech.homebrew_tests.6'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.homebrew_tests.8/artifacts/public/homebrew_tests.tar.gz'
|
||||
namespace: 'project.deepspeech.homebrew_tests.8'
|
||||
android_cache:
|
||||
arm64_v8a:
|
||||
android_24:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.arm64-v8a.android-24.4/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.arm64-v8a.android-24.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.arm64-v8a.android-24.6/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.arm64-v8a.android-24.6'
|
||||
android_25:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.arm64-v8a.android-25.4/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.arm64-v8a.android-25.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.arm64-v8a.android-25.6/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.arm64-v8a.android-25.6'
|
||||
armeabi_v7a:
|
||||
android_24:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.armeabi-v7a.android-24.4/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.armeabi-v7a.android-24.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.armeabi-v7a.android-24.6/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.armeabi-v7a.android-24.6'
|
||||
android_25:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.armeabi-v7a.android-25.4/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.armeabi-v7a.android-25.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.armeabi-v7a.android-25.6/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.armeabi-v7a.android-25.6'
|
||||
x86_64:
|
||||
android_24:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-24.4/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-24.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-24.6/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-24.6'
|
||||
android_25:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-25.4/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-25.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-25.6/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-25.6'
|
||||
android_26:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-26.0/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-26.0'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-26.2/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-26.2'
|
||||
android_27:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-27.0/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-27.0'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-27.2/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-27.2'
|
||||
android_28:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-28.0/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-28.0'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-28.2/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-28.2'
|
||||
android_29:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-29.0/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-29.0'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-29.2/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-29.2'
|
||||
android_30:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.x86_64.android-30.2/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.x86_64.android-30.2'
|
||||
sdk:
|
||||
android_27:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.sdk.android-27.4/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.sdk.android-27.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.android_cache.sdk.android-27.6/artifacts/public/android_cache.tar.gz'
|
||||
namespace: 'project.deepspeech.android_cache.sdk.android-27.6'
|
||||
gradle_cache:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.gradle.4/artifacts/public/gradle.tar.gz'
|
||||
namespace: 'project.deepspeech.gradle.4'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.gradle.6/artifacts/public/gradle.tar.gz'
|
||||
namespace: 'project.deepspeech.gradle.6'
|
||||
pyenv:
|
||||
linux:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.pyenv.linux.5/artifacts/public/pyenv.tar.gz'
|
||||
namespace: 'project.deepspeech.pyenv.linux.5'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.pyenv.linux.8/artifacts/public/pyenv.tar.gz'
|
||||
namespace: 'project.deepspeech.pyenv.linux.8'
|
||||
osx:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.pyenv.osx.5/artifacts/public/pyenv.tar.gz'
|
||||
namespace: 'project.deepspeech.pyenv.osx.5'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.pyenv.osx.8/artifacts/public/pyenv.tar.gz'
|
||||
namespace: 'project.deepspeech.pyenv.osx.8'
|
||||
win:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.pyenv.win.5/artifacts/public/pyenv.tar.gz'
|
||||
namespace: 'project.deepspeech.pyenv.win.5'
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.pyenv.win.8/artifacts/public/pyenv.tar.gz'
|
||||
namespace: 'project.deepspeech.pyenv.win.8'
|
||||
swig:
|
||||
repo: "https://github.com/lissyx/swig"
|
||||
sha1: "b5fea54d39832d1d132d7dd921b69c0c2c9d5118"
|
||||
swig_build:
|
||||
linux:
|
||||
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.swig.linux.amd64.b5fea54d39832d1d132d7dd921b69c0c2c9d5118/artifacts/public/ds-swig.tar.gz"
|
||||
namespace: "project.deepspeech.swig.linux.amd64.b5fea54d39832d1d132d7dd921b69c0c2c9d5118"
|
||||
osx:
|
||||
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.swig.darwin.amd64.b5fea54d39832d1d132d7dd921b69c0c2c9d5118/artifacts/public/ds-swig.tar.gz"
|
||||
namespace: "project.deepspeech.swig.darwin.amd64.b5fea54d39832d1d132d7dd921b69c0c2c9d5118"
|
||||
win:
|
||||
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.swig.win.amd64.b5fea54d39832d1d132d7dd921b69c0c2c9d5118/artifacts/public/ds-swig.tar.gz"
|
||||
namespace: "project.deepspeech.swig.win.amd64.b5fea54d39832d1d132d7dd921b69c0c2c9d5118"
|
||||
username: 'build-user'
|
||||
homedir:
|
||||
linux: '/home/build-user'
|
||||
osx: '/Users/build-user'
|
||||
win: '/c/builds/tc-workdir'
|
||||
sox_win: '/usr/bin/wget.exe https://sourceforge.net/projects/sox/files/sox/14.4.2/sox-14.4.2-win32.zip/download -O sox-14.4.2-win32.zip && ""C:\Program Files\7-zip\7z.exe"" x -o$TASKCLUSTER_TASK_DIR/bin/ -tzip -aoa sox-14.4.2-win32.zip && rm sox-*zip && export PATH=$TASKCLUSTER_TASK_DIR/bin/sox-14.4.2/:$PATH'
|
||||
aptEc2Mirrors: 'echo "deb http://archive.ubuntu.com/ubuntu/ trusty-updates main" > /etc/apt/sources.list.d/trusty-updates.list && apt-get -qq update && apt-get -qq -y upgrade'
|
||||
msys2:
|
||||
url: 'https://github.com/msys2/msys2-installer/releases/download/2020-06-02/msys2-base-x86_64-20200602.tar.xz'
|
||||
sha: '598ceeaa3e2ccf86a25a2e3c449d00a9fd35300e36011bee610036dfa59d670a'
|
||||
msys2_filesystem_pkg:
|
||||
url: 'http://repo.msys2.org/msys/x86_64/filesystem-2020.02-3-x86_64.pkg.tar.xz'
|
||||
sha: '927b020a67a05139ee1b2c45bff491c1d42335e64350cc7758ee20d7c3099477'
|
||||
install: 'pacman -Udd --noconfirm $USERPROFILE/filesystem-2020.02-3-x86_64.pkg.tar.xz'
|
||||
|
@ -8,7 +8,7 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.android-arm64"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.android-arm64"
|
||||
- "index.project.deepspeech.deepspeech.native_client.android-arm64.${event.head.sha}"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.android-arm64/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.android-arm64/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/android-build.sh arm64-v8a"
|
||||
package: "taskcluster/android-package.sh arm64-v8a"
|
||||
|
@ -8,7 +8,7 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.android-armv7"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.android-armv7"
|
||||
- "index.project.deepspeech.deepspeech.native_client.android-armv7.${event.head.sha}"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.android-armv7/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.android-armv7/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/android-build.sh armeabi-v7a"
|
||||
package: "taskcluster/android-package.sh armeabi-v7a"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.arm64_v8a.android_24.url}
|
||||
namespace: ${system.android_cache.arm64_v8a.android_24.namespace}
|
||||
artifact_url: ${system.android_cache.arm64_v8a.android_24.url}
|
||||
artifact_namespace: ${system.android_cache.arm64_v8a.android_24.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh arm64-v8a android-24"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.arm64_v8a.android_25.url}
|
||||
namespace: ${system.android_cache.arm64_v8a.android_25.namespace}
|
||||
artifact_url: ${system.android_cache.arm64_v8a.android_25.url}
|
||||
artifact_namespace: ${system.android_cache.arm64_v8a.android_25.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh arm64-v8a android-25"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,13 +1,13 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.armeabi_v7a.android_24.url}
|
||||
namespace: ${system.android_cache.armeabi_v7a.android_24.namespace}
|
||||
artifact_url: ${system.android_cache.armeabi_v7a.android_24.url}
|
||||
artifact_namespace: ${system.android_cache.armeabi_v7a.android_24.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh armeabi-v7a android-24"
|
||||
build: "taskcluster/android_cache-build.sh armeabi-v7a android-24 default"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
metadata:
|
||||
name: "Builds Android cache armeabi-v7a / android-24"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.armeabi_v7a.android_25.url}
|
||||
namespace: ${system.android_cache.armeabi_v7a.android_25.namespace}
|
||||
artifact_url: ${system.android_cache.armeabi_v7a.android_25.url}
|
||||
artifact_namespace: ${system.android_cache.armeabi_v7a.android_25.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh armeabi-v7a android-25"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.sdk.android_27.url}
|
||||
namespace: ${system.android_cache.sdk.android_27.namespace}
|
||||
artifact_url: ${system.android_cache.sdk.android_27.url}
|
||||
artifact_namespace: ${system.android_cache.sdk.android_27.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh sdk android-27"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.x86_64.android_24.url}
|
||||
namespace: ${system.android_cache.x86_64.android_24.namespace}
|
||||
artifact_url: ${system.android_cache.x86_64.android_24.url}
|
||||
artifact_namespace: ${system.android_cache.x86_64.android_24.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh x86_64 android-24"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.x86_64.android_25.url}
|
||||
namespace: ${system.android_cache.x86_64.android_25.namespace}
|
||||
artifact_url: ${system.android_cache.x86_64.android_25.url}
|
||||
artifact_namespace: ${system.android_cache.x86_64.android_25.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh x86_64 android-25"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.x86_64.android_26.url}
|
||||
namespace: ${system.android_cache.x86_64.android_26.namespace}
|
||||
artifact_url: ${system.android_cache.x86_64.android_26.url}
|
||||
artifact_namespace: ${system.android_cache.x86_64.android_26.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh x86_64 android-26"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.x86_64.android_28.url}
|
||||
namespace: ${system.android_cache.x86_64.android_28.namespace}
|
||||
artifact_url: ${system.android_cache.x86_64.android_28.url}
|
||||
artifact_namespace: ${system.android_cache.x86_64.android_28.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh x86_64 android-28"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
url: ${system.android_cache.x86_64.android_29.url}
|
||||
namespace: ${system.android_cache.x86_64.android_29.namespace}
|
||||
artifact_url: ${system.android_cache.x86_64.android_29.url}
|
||||
artifact_namespace: ${system.android_cache.x86_64.android_29.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh x86_64 android-29"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
|
14
taskcluster/android-cache-x86_64-android-30.yml
Normal file
14
taskcluster/android-cache-x86_64-android-30.yml
Normal file
@ -0,0 +1,14 @@
|
||||
build:
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_xenial.apt}
|
||||
cache:
|
||||
artifact_url: ${system.android_cache.x86_64.android_30.url}
|
||||
artifact_namespace: ${system.android_cache.x86_64.android_30.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/android_cache-build.sh x86_64 android-30"
|
||||
package: "taskcluster/android_cache-package.sh"
|
||||
metadata:
|
||||
name: "Builds Android cache x86_64 / android-30"
|
||||
description: "Setup an Android SDK / emulator cache for Android / x86_64 android-30"
|
@ -13,8 +13,8 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.android-apk.${event.head.sha}"
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.android-armv7/artifacts/public/home.tar.xz"
|
||||
${java.packages_xenial.apt}
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.android-armv7/artifacts/public/home.tar.xz"
|
||||
gradle_cache:
|
||||
url: ${system.gradle_cache.url}
|
||||
namespace: ${system.gradle_cache.namespace}
|
||||
|
@ -8,7 +8,7 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.android-x86_64"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.android-x86_64"
|
||||
- "index.project.deepspeech.deepspeech.native_client.android-x86_64.${event.head.sha}"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.android-arm64/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.android-arm64/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/android-build.sh x86_64"
|
||||
package: "taskcluster/android-package.sh x86_64"
|
||||
|
@ -6,6 +6,7 @@ source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
arm_flavor=$1
|
||||
api_level=$2
|
||||
api_kind=$3
|
||||
|
||||
export ANDROID_HOME=${ANDROID_SDK_HOME}
|
||||
|
||||
@ -17,5 +18,5 @@ android_install_sdk
|
||||
android_install_sdk_platform "android-27"
|
||||
|
||||
if [ "${arm_flavor}" != "sdk" ]; then
|
||||
android_setup_emulator "${arm_flavor}" "${api_level}"
|
||||
android_setup_emulator "${arm_flavor}" "${api_level}" "${api_kind}"
|
||||
fi;
|
||||
|
@ -1,52 +0,0 @@
|
||||
$if: 'event.event in build.allowed'
|
||||
then:
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerType}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
image: ${build.docker_image}
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- $let:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
(apt-get -qq -y remove --purge ubuntu-advantage-tools || true) &&
|
||||
apt-get -qq update && apt-get -qq -y install curl git && ${extraSystemSetup};
|
||||
cache_file=`curl -sSIL -o /dev/null -w "%{http_code}" ${build.cache.url}` &&
|
||||
if [ "$cache_file" != "200" ]; then
|
||||
${extraSystemSetup} &&
|
||||
adduser --system --home ${system.homedir.linux} ${system.username} && cd ${system.homedir.linux}/ &&
|
||||
mkdir -p /tmp/artifacts/ && chmod 777 /tmp/artifacts &&
|
||||
echo -e "#!/bin/bash\nset -xe\n env && id && git clone --quiet ${event.head.repo.url} ~/DeepSpeech/ds/ && cd ~/DeepSpeech/ds && git checkout --quiet ${event.head.sha}" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} --preserve-env /bin/bash ${system.homedir.linux}/DeepSpeech/ds/${build.scripts.build} &&
|
||||
sudo -H -u ${system.username} --preserve-env /bin/bash ${system.homedir.linux}/DeepSpeech/ds/${build.scripts.package} ${taskIndexExpire} taskcluster ${build.cache.namespace}
|
||||
fi;
|
||||
|
||||
artifacts:
|
||||
"public":
|
||||
type: "directory"
|
||||
path: "/tmp/artifacts/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -2,17 +2,6 @@
|
||||
|
||||
set -xe
|
||||
|
||||
TC_EXPIRE=$1
|
||||
TC_INSTANCE=$2
|
||||
TC_INDEX=$3
|
||||
|
||||
source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
cd $HOME/ && tar -czf $TASKCLUSTER_ARTIFACTS/android_cache.tar.gz DeepSpeech/Android/
|
||||
|
||||
if [ ! -z "${TC_EXPIRE}" -a ! -z "${TC_INSTANCE}" -a ! -z "${TC_INDEX}" ]; then
|
||||
curl -sSL --fail -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\":\"$TASK_ID\",\"rank\":0,\"expires\":\"${TC_EXPIRE}\",\"data\":{}}" \
|
||||
"http://${TC_INSTANCE}/index/v1/task/${TC_INDEX}"
|
||||
fi;
|
||||
|
@ -9,7 +9,7 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.osx"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.osx"
|
||||
- "index.project.deepspeech.deepspeech.native_client.osx.${event.head.sha}"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.osx/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.osx/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
|
@ -9,7 +9,7 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.osx-ctc"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.osx-ctc"
|
||||
- "index.project.deepspeech.deepspeech.native_client.osx-ctc.${event.head.sha}"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.osx/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.osx/artifacts/public/home.tar.xz"
|
||||
maxRunTime: 14400
|
||||
scripts:
|
||||
build: 'taskcluster/decoder-build.sh'
|
||||
|
@ -9,7 +9,7 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.osx-tflite"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.osx-tflite"
|
||||
- "index.project.deepspeech.deepspeech.native_client.osx-tflite.${event.head.sha}"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.osx/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.osx/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh tflite"
|
||||
package: "taskcluster/package.sh"
|
||||
|
@ -30,12 +30,12 @@ then:
|
||||
dockerfile: { $eval: strip(str(build.dockerfile)) }
|
||||
in: >
|
||||
apt-get -qq -y remove --purge ubuntu-advantage-tools &&
|
||||
${aptEc2Mirrors} &&
|
||||
apt-get -qq update && apt-get -qq -y install git wget pkg-config apt-transport-https ca-certificates curl software-properties-common &&
|
||||
apt-get -qq update && apt-get -qq -y install git wget pkg-config apt-transport-https ca-certificates curl software-properties-common make &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - &&
|
||||
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" &&
|
||||
apt-get -qq update && apt-get -qq -y install docker-ce && mkdir -p /opt/deepspeech &&
|
||||
git clone --quiet ${event.head.repo.url} /opt/deepspeech && cd /opt/deepspeech && git checkout --quiet ${event.head.sha} &&
|
||||
make ${dockerfile} DEEPSPEECH_REPO=${event.head.repo.url} DEEPSPEECH_SHA=${event.head.sha} &&
|
||||
docker build --file ${dockerfile} .
|
||||
|
||||
artifacts:
|
||||
|
@ -1,6 +1,6 @@
|
||||
build:
|
||||
template_file: docker-build-base.tyml
|
||||
dockerfile: "Dockerfile"
|
||||
dockerfile: "Dockerfile.build"
|
||||
metadata:
|
||||
name: "DeepSpeech Docker build"
|
||||
description: "Testing |docker build| of DeepSpeech"
|
||||
description: "Testing |docker build| of DeepSpeech build image"
|
||||
|
6
taskcluster/docker-image-train.yml
Normal file
6
taskcluster/docker-image-train.yml
Normal file
@ -0,0 +1,6 @@
|
||||
build:
|
||||
template_file: docker-build-base.tyml
|
||||
dockerfile: "Dockerfile.train"
|
||||
metadata:
|
||||
name: "DeepSpeech Docker train"
|
||||
description: "Testing |docker build| of DeepSpeech train image"
|
@ -35,12 +35,13 @@ payload:
|
||||
export SDKROOT=/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/ &&
|
||||
env &&
|
||||
mkdir -p $TASKCLUSTER_ARTIFACTS/ &&
|
||||
swig_bin=`curl -sSIL -o /dev/null -w "%{http_code}" ${system.pyenv.osx.url}` &&
|
||||
if [ "$swig_bin" != "200" ]; then
|
||||
git clone --quiet ${event.head.repo.url} $TASKCLUSTER_TASK_DIR/DeepSpeech/ds/ &&
|
||||
cd $TASKCLUSTER_TASK_DIR/DeepSpeech/ds && git checkout --quiet ${event.head.sha} &&
|
||||
$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/${build.scripts.build} &&
|
||||
$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/${build.scripts.package} ${taskIndexExpire} 127.0.0.1:8080 ${system.pyenv.osx.namespace}
|
||||
cache_artifact=`curl -sSIL -o /dev/null -w "%{http_code}" ${build.cache.artifact_url}` &&
|
||||
if [ "$cache_artifact" != "200" ]; then
|
||||
git clone --quiet ${build.build_or_cache.repo} $TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir} &&
|
||||
cd $TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir} && git checkout --quiet ${build.build_or_cache.sha} &&
|
||||
$TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir}/${build.scripts.build} &&
|
||||
$TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir}/${build.scripts.package} &&
|
||||
$TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir}/taskcluster/tc-update-index.sh ${taskIndexExpire} 127.0.0.1:8080 ${build.cache.artifact_namespace}
|
||||
fi;
|
||||
|
||||
artifacts:
|
50
taskcluster/generic_tc_caching-linux-opt-base.tyml
Normal file
50
taskcluster/generic_tc_caching-linux-opt-base.tyml
Normal file
@ -0,0 +1,50 @@
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerType}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
image: ${build.docker_image}
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- $let:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
(apt-get -qq -y remove --purge ubuntu-advantage-tools || true) &&
|
||||
apt-get -qq update && apt-get -qq -y install curl git sudo && ${extraSystemSetup};
|
||||
cache_artifact=`curl -sSIL -o /dev/null -w "%{http_code}" ${build.cache.artifact_url}` &&
|
||||
if [ "$cache_artifact" != "200" ]; then
|
||||
adduser --system --home ${system.homedir.linux} ${system.username} && cd ${system.homedir.linux}/ &&
|
||||
mkdir -p /tmp/artifacts/ && chmod 777 /tmp/artifacts &&
|
||||
echo -e "#!/bin/bash\nset -xe\n env && id && (git clone --quiet ${build.build_or_cache.repo} ~/${build.build_or_cache.dir}/ && cd ~/${build.build_or_cache.dir}/ && git checkout --quiet ${build.build_or_cache.sha})" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} --preserve-env /bin/bash ${system.homedir.linux}/${build.build_or_cache.dir}/${build.scripts.build} &&
|
||||
sudo -H -u ${system.username} /bin/bash ${system.homedir.linux}/${build.build_or_cache.dir}/${build.scripts.package} &&
|
||||
sudo -H -u ${system.username} --preserve-env /bin/bash ${system.homedir.linux}/${build.build_or_cache.dir}/taskcluster/tc-update-index.sh ${taskIndexExpire} taskcluster ${build.cache.artifact_namespace}
|
||||
fi;
|
||||
|
||||
artifacts:
|
||||
"public":
|
||||
type: "directory"
|
||||
path: "/tmp/artifacts/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
61
taskcluster/generic_tc_caching-win-opt-base.tyml
Normal file
61
taskcluster/generic_tc_caching-win-opt-base.tyml
Normal file
@ -0,0 +1,61 @@
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerTypeWin}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
mounts:
|
||||
- file: msys2-base-x86_64.tar.xz
|
||||
content:
|
||||
sha256: ${system.msys2.sha}
|
||||
url: ${system.msys2.url}
|
||||
- file: filesystem-2020.02-3-x86_64.pkg.tar.xz
|
||||
content:
|
||||
sha256: ${system.msys2_filesystem_pkg.sha}
|
||||
url: ${system.msys2_filesystem_pkg.url}
|
||||
|
||||
env:
|
||||
TC_MSYS_VERSION: 'MSYS_NT-6.3-9600'
|
||||
MSYS: 'winsymlinks:nativestrict'
|
||||
|
||||
command:
|
||||
- >-
|
||||
"C:\Program Files\7-zip\7z.exe" x -txz -so msys2-base-x86_64.tar.xz |
|
||||
"C:\Program Files\7-zip\7z.exe" x -o%USERPROFILE% -ttar -aoa -si
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "export THIS_BASH_PID=$$; ps -ef | grep '[?]' | awk '{print $2}' | grep -v $THIS_BASH_PID | xargs -r kill; exit 0"
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "${system.msys2_filesystem_pkg.install}"
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "pacman -Syu --noconfirm"
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "pacman -Syu --noconfirm"
|
||||
- $let:
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
echo .\msys64\usr\bin\bash.exe --login -cxe "export LC_ALL=C &&
|
||||
export PATH=\"$USERPROFILE/msys64/usr/bin:/c/Python36:/c/Program Files/Git/bin:/c/Program Files/7-Zip/:$PATH\" &&
|
||||
export TASKCLUSTER_ARTIFACTS=\"$(cygpath -u $USERPROFILE/public)\" &&
|
||||
export TASKCLUSTER_TASK_DIR=\"/c/builds/tc-workdir/\" &&
|
||||
echo \"export TASKCLUSTER_TASK_EXIT_CODE=0\" > $USERPROFILE/tc-exit.sh &&
|
||||
env && pacman --noconfirm -S tar && mkdir -p $TASKCLUSTER_ARTIFACTS/ && if [ \"`curl -sSIL -o /dev/null -w %%{http_code} ${build.cache.artifact_url}`\" != \"200\" ]; then git clone --quiet ${build.build_or_cache.repo} $TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir}/ && cd $TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir} && git checkout --quiet ${build.build_or_cache.sha} && $TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir}/${build.scripts.build} && $TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir}/${build.scripts.package} && $TASKCLUSTER_TASK_DIR/${build.build_or_cache.dir}/taskcluster/tc-update-index.sh ${taskIndexExpire} taskcluster ${build.cache.artifact_namespace}; fi; echo \"export TASKCLUSTER_TASK_EXIT_CODE=$?\" > $USERPROFILE/tc-exit.sh" | cmd /k
|
||||
|
||||
- .\msys64\usr\bin\bash.exe --login -cxe "source $USERPROFILE/tc-exit.sh && exit $TASKCLUSTER_TASK_EXIT_CODE"
|
||||
|
||||
artifacts:
|
||||
- type: "directory"
|
||||
path: "public/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -1,11 +1,11 @@
|
||||
build:
|
||||
template_file: android_cache-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
cache:
|
||||
artifact_url: ${system.gradle_cache.url}
|
||||
artifact_namespace: ${system.gradle_cache.namespace}
|
||||
system_setup:
|
||||
>
|
||||
${java.packages_trusty.apt}
|
||||
cache:
|
||||
url: ${system.gradle_cache.url}
|
||||
namespace: ${system.gradle_cache.namespace}
|
||||
${java.packages_xenial.apt}
|
||||
scripts:
|
||||
build: "taskcluster/gradle-build.sh"
|
||||
package: "taskcluster/gradle-package.sh"
|
||||
|
@ -2,17 +2,6 @@
|
||||
|
||||
set -xe
|
||||
|
||||
TC_EXPIRE=$1
|
||||
TC_INSTANCE=$2
|
||||
TC_INDEX=$3
|
||||
|
||||
source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
cd ${GRADLE_USER_HOME}/../ && tar -czf $TASKCLUSTER_ARTIFACTS/gradle.tar.gz gradle-cache/
|
||||
|
||||
if [ ! -z "${TC_EXPIRE}" -a ! -z "${TC_INSTANCE}" -a ! -z "${TC_INDEX}" ]; then
|
||||
curl -sSL --fail -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\":\"$TASK_ID\",\"rank\":0,\"expires\":\"${TC_EXPIRE}\",\"data\":{}}" \
|
||||
"http://${TC_INSTANCE}/index/v1/task/${TC_INDEX}"
|
||||
fi;
|
||||
|
@ -1,59 +0,0 @@
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.generic.provisionerId}
|
||||
workerType: ${taskcluster.generic.workerType}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
dependencies:
|
||||
$map: { $eval: build.dependencies }
|
||||
each(b):
|
||||
$eval: as_slugid(b)
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
command:
|
||||
- - "/bin/bash"
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- $let:
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
export TASKCLUSTER_ARTIFACTS="$(pwd)/public/" &&
|
||||
export TASKCLUSTER_ORIG_TASKDIR="$(pwd)" &&
|
||||
(mkdir ../tc-workdir/ || rm -fr ../tc-workdir/*) && cd ../tc-workdir/ &&
|
||||
export TASKCLUSTER_TASK_DIR="$(pwd)" &&
|
||||
export LC_ALL=C &&
|
||||
export MACOSX_DEPLOYMENT_TARGET=10.10 &&
|
||||
export SDKROOT=/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/ &&
|
||||
env &&
|
||||
mkdir -p $TASKCLUSTER_ARTIFACTS/ &&
|
||||
swig_bin=`curl -sSIL -o /dev/null -w "%{http_code}" ${build.homebrew.url}` &&
|
||||
if [ "$swig_bin" != "200" ]; then
|
||||
git clone --quiet ${event.head.repo.url} $TASKCLUSTER_TASK_DIR/DeepSpeech/ds/ &&
|
||||
cd $TASKCLUSTER_TASK_DIR/DeepSpeech/ds && git checkout --quiet ${event.head.sha} &&
|
||||
$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/${build.scripts.build} &&
|
||||
$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/${build.scripts.package} &&
|
||||
curl -sSL --fail -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\":\"$TASK_ID\",\"rank\":0,\"expires\":\"${taskIndexExpire}\",\"data\":{}}" \
|
||||
"http://127.0.0.1:8080/index/v1/task/${build.homebrew.namespace}"
|
||||
fi;
|
||||
|
||||
artifacts:
|
||||
- type: "directory"
|
||||
path: "public/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -1,8 +1,8 @@
|
||||
build:
|
||||
template_file: homebrew-darwin-opt-base.tyml
|
||||
homebrew:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.homebrew_builds.5/artifacts/public/homebrew_builds.tar.gz'
|
||||
namespace: 'project.deepspeech.homebrew_builds.5'
|
||||
template_file: generic_tc_caching-darwin-opt-base.tyml
|
||||
cache:
|
||||
artifact_url: ${system.homebrew_builds.url}
|
||||
artifact_namespace: ${system.homebrew_builds.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/homebrew-build.sh --builds"
|
||||
package: "taskcluster/homebrew-package.sh --builds"
|
||||
|
@ -1,8 +1,8 @@
|
||||
build:
|
||||
template_file: homebrew-darwin-opt-base.tyml
|
||||
homebrew:
|
||||
url: 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.homebrew_tests.6/artifacts/public/homebrew_tests.tar.gz'
|
||||
namespace: 'project.deepspeech.homebrew_tests.6'
|
||||
template_file: generic_tc_caching-darwin-opt-base.tyml
|
||||
cache:
|
||||
artifact_url: ${system.homebrew_tests.url}
|
||||
artifact_namespace: ${system.homebrew_tests.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/homebrew-build.sh --tests"
|
||||
package: "taskcluster/homebrew-package.sh --tests"
|
||||
|
@ -10,9 +10,9 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.cpu.${event.head.sha}"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.cpu/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.cpu/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
|
@ -10,9 +10,9 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.cpu-ctc.${event.head.sha}"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.cpu/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.cpu/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: 'taskcluster/decoder-build.sh'
|
||||
package: 'taskcluster/decoder-package.sh'
|
||||
|
@ -10,9 +10,9 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.gpu.${event.head.sha}"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.gpu/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.gpu/artifacts/public/home.tar.xz"
|
||||
maxRunTime: 14400
|
||||
scripts:
|
||||
build: "taskcluster/cuda-build.sh"
|
||||
|
@ -10,9 +10,9 @@ build:
|
||||
- "index.project.deepspeech.deepspeech.native_client.tflite.${event.head.sha}"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.cpu/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.cpu/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh tflite"
|
||||
package: "taskcluster/package.sh"
|
||||
|
@ -14,12 +14,12 @@ build:
|
||||
apt-get -qq -y install gdebi git pixz &&
|
||||
wget http://mirrors.kernel.org/ubuntu/pool/universe/m/multistrap/multistrap_2.2.0ubuntu2_all.deb -O /tmp/multistrap_2.2.0ubuntu2_all.deb &&
|
||||
echo "y" | gdebi /tmp/multistrap_2.2.0ubuntu2_all.deb &&
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
system_config:
|
||||
>
|
||||
multistrap -d /tmp/multistrap-armbian64-buster/ -f ${system.homedir.linux}/DeepSpeech/ds/native_client/multistrap_armbian64_buster.conf
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.arm64/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.arm64/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/arm64-build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
|
@ -30,7 +30,7 @@ then:
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
image: "ubuntu:14.04"
|
||||
image: "ubuntu:16.04"
|
||||
|
||||
env:
|
||||
TENSORFLOW_BUILD_ARTIFACT: ${build.tensorflow}
|
||||
@ -43,10 +43,8 @@ then:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
extraSystemConfig: { $eval: strip(str(build.system_config)) }
|
||||
in: >
|
||||
apt-get -qq -y remove --purge ubuntu-advantage-tools &&
|
||||
${aptEc2Mirrors} &&
|
||||
adduser --system --home ${system.homedir.linux} ${system.username} &&
|
||||
apt-get -qq update && apt-get -qq -y install ${tensorflow.packages_trusty.apt} pixz pkg-config realpath unzip wget zip && ${extraSystemSetup} &&
|
||||
apt-get -qq update && apt-get -qq -y install ${tensorflow.packages_xenial.apt} pixz pkg-config realpath sudo unzip wget zip && ${extraSystemSetup} &&
|
||||
cd ${system.homedir.linux}/ &&
|
||||
echo -e "#!/bin/bash\nset -xe\n env && id && (wget -O - $TENSORFLOW_BUILD_ARTIFACT | pixz -d | tar -C ${system.homedir.linux}/ -xf - ) && git clone --quiet ${event.head.repo.url} ~/DeepSpeech/ds/ && cd ~/DeepSpeech/ds && git checkout --quiet ${event.head.sha} && ln -s ~/DeepSpeech/ds/native_client/ ~/DeepSpeech/tf/native_client && mkdir -p ${system.homedir.linux}/.cache/node-gyp/ && wget -O - ${system.node_gyp_cache.url} | tar -C ${system.homedir.linux}/.cache/node-gyp/ -xzf - && mkdir -p ${system.homedir.linux}/pyenv-root/ && wget -O - ${system.pyenv.linux.url} | tar -C ${system.homedir.linux}/pyenv-root/ -xzf - && if [ ! -z "${build.gradle_cache.url}" ]; then wget -O - ${build.gradle_cache.url} | tar -C ${system.homedir.linux}/ -xzf - ; fi && if [ ! -z "${build.android_cache.url}" ]; then wget -O - ${build.android_cache.url} | tar -C ${system.homedir.linux}/ -xzf - ; fi;" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh && ${extraSystemConfig} &&
|
||||
|
@ -14,12 +14,12 @@ build:
|
||||
apt-get -qq -y install gdebi git pixz &&
|
||||
wget http://mirrors.kernel.org/ubuntu/pool/universe/m/multistrap/multistrap_2.2.0ubuntu2_all.deb -O /tmp/multistrap_2.2.0ubuntu2_all.deb &&
|
||||
echo "y" | gdebi /tmp/multistrap_2.2.0ubuntu2_all.deb &&
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
system_config:
|
||||
>
|
||||
multistrap -d /tmp/multistrap-raspbian-buster/ -f ${system.homedir.linux}/DeepSpeech/ds/native_client/multistrap_raspbian_buster.conf
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.arm/artifacts/public/home.tar.xz"
|
||||
tensorflow: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.2.c29895fba1b9f9f48e2e54eefb024c69aa333473.arm/artifacts/public/home.tar.xz"
|
||||
scripts:
|
||||
build: "taskcluster/rpi3-build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
|
@ -1,57 +0,0 @@
|
||||
$if: 'event.event in build.allowed'
|
||||
then:
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerType}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
image: "node:12"
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
# This task will inspect system.node_gyp_cache taskcluster index existence:
|
||||
# - if the artifact does not exists, it will build it
|
||||
# - if the artifact exists, it will re-mirror it (if we don't do that, new
|
||||
# index gets published with no artifact and erases existing one)
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- $let:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
apt-get -qq update && apt-get -qq -y install curl git && ${extraSystemSetup};
|
||||
cache_file=`curl -sSIL -o /dev/null -w "%{http_code}" ${system.node_gyp_cache.url}` &&
|
||||
if [ "$cache_file" != "200" ]; then
|
||||
mkdir -p ~/DeepSpeech/ds/ &&
|
||||
git clone --quiet ${event.head.repo.url} ~/DeepSpeech/ds/ &&
|
||||
cd ~/DeepSpeech/ds && git checkout --quiet ${event.head.sha} &&
|
||||
~/DeepSpeech/ds/${build.scripts.build} &&
|
||||
~/DeepSpeech/ds/${build.scripts.package} &&
|
||||
curl -sSL --fail -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\":\"$TASK_ID\",\"rank\":0,\"expires\":\"${taskIndexExpire}\",\"data\":{}}" \
|
||||
"http://taskcluster/index/v1/task/${system.node_gyp_cache.namespace}"
|
||||
fi;
|
||||
|
||||
artifacts:
|
||||
"public":
|
||||
type: "directory"
|
||||
path: "/tmp/artifacts/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -1,5 +1,12 @@
|
||||
build:
|
||||
template_file: node-gyp-cache-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
docker_image: "node:12"
|
||||
cache:
|
||||
artifact_url: "${system.node_gyp_cache.url}"
|
||||
artifact_namespace: "${system.node_gyp_cache.namespace}"
|
||||
system_setup:
|
||||
>
|
||||
(apt-get -qq -y install sudo || true)
|
||||
scripts:
|
||||
build: "taskcluster/node-gyp-populate.sh"
|
||||
package: "taskcluster/node-gyp-package.sh"
|
||||
|
@ -8,7 +8,9 @@ node --version
|
||||
|
||||
npm --version
|
||||
|
||||
npm install -g node-gyp@6.x
|
||||
npm install node-gyp@6.x
|
||||
|
||||
export PATH=$HOME/node_modules/.bin/:$PATH
|
||||
|
||||
devDir=$DS_ROOT_TASK/node-gyp-cache/
|
||||
|
||||
|
@ -8,7 +8,7 @@ build:
|
||||
- "win-amd64-cpu-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
scripts:
|
||||
build: "taskcluster/node-build.sh"
|
||||
|
@ -5,7 +5,7 @@ build:
|
||||
- "win-amd64-gpu-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
scripts:
|
||||
build: "taskcluster/node-build.sh --cuda"
|
||||
|
@ -30,7 +30,7 @@ then:
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
image: "ubuntu:14.04"
|
||||
image: "ubuntu:16.04"
|
||||
|
||||
command:
|
||||
- "/bin/bash"
|
||||
@ -40,10 +40,8 @@ then:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
extraSystemConfig: { $eval: strip(str(build.system_config)) }
|
||||
in: >
|
||||
apt-get -qq -y remove --purge ubuntu-advantage-tools &&
|
||||
${aptEc2Mirrors} &&
|
||||
adduser --system --home ${system.homedir.linux} ${system.username} &&
|
||||
apt-get -qq update && apt-get -qq -y install realpath git wget curl make && ${extraSystemSetup} &&
|
||||
apt-get -qq update && apt-get -qq -y install realpath git wget curl make sudo && ${extraSystemSetup} &&
|
||||
cd ${system.homedir.linux}/ &&
|
||||
echo -e "#!/bin/bash\nset -xe\n env && id && git clone --quiet ${event.head.repo.url} ~/DeepSpeech/ds/ && cd ~/DeepSpeech/ds && git checkout --quiet ${event.head.sha} && mkdir -p ~/DeepSpeech/tf/ && touch ~/DeepSpeech/tf/tc-vars.sh && chmod +x ~/DeepSpeech/tf/tc-vars.sh && mkdir -p ${system.homedir.linux}/.cache/node-gyp/ && wget -O - ${system.node_gyp_cache.url} | tar -C ${system.homedir.linux}/.cache/node-gyp/ -xzf -" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh && ${extraSystemConfig} &&
|
||||
|
@ -6,7 +6,7 @@ build:
|
||||
- "win-amd64-tflite-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages_trusty.prep_12} && ${nodejs.packages_trusty.apt_pinning}
|
||||
${nodejs.packages_xenial.prep_12} && ${nodejs.packages_xenial.apt_pinning}
|
||||
&& apt-get -qq update && apt-get -qq -y install nodejs python-yaml
|
||||
scripts:
|
||||
build: "taskcluster/node-build.sh --tflite"
|
||||
|
@ -18,11 +18,7 @@ for pyver_conf in ${SUPPORTED_PYTHON_VERSIONS}; do
|
||||
|
||||
pyalias="${pyver}_${pyconf}"
|
||||
|
||||
maybe_ssl102_py37 ${pyver}
|
||||
|
||||
LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH \
|
||||
PYTHON_CONFIGURE_OPTS="--enable-unicode=${pyconf} ${PY37_OPENSSL}" \
|
||||
pyenv_install ${pyver} ${pyalias}
|
||||
PYTHON_CONFIGURE_OPTS="--enable-unicode=${pyconf}" pyenv_install ${pyver} ${pyalias}
|
||||
|
||||
setup_pyenv_virtualenv "${pyalias}" "deepspeech"
|
||||
|
||||
|
@ -1,5 +1,8 @@
|
||||
build:
|
||||
template_file: pyenv-darwin-opt-base.tyml
|
||||
template_file: generic_tc_caching-darwin-opt-base.tyml
|
||||
cache:
|
||||
artifact_url: ${system.pyenv.osx.url}
|
||||
artifact_namespace: ${system.pyenv.osx.namespace}
|
||||
scripts:
|
||||
build: "taskcluster/pyenv-build.sh"
|
||||
package: "taskcluster/pyenv-package.sh"
|
||||
|
@ -1,8 +1,11 @@
|
||||
build:
|
||||
template_file: pyenv-linux-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
cache:
|
||||
artifact_url: "${system.pyenv.linux.url}"
|
||||
artifact_namespace: "${system.pyenv.linux.namespace}"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq update && apt-get -qq -y install python-yaml ${python.packages_trusty.apt} wget
|
||||
apt-get -qq update && apt-get -qq -y install python-yaml ${python.packages_xenial.apt} wget
|
||||
scripts:
|
||||
build: "taskcluster/pyenv-build.sh"
|
||||
package: "taskcluster/pyenv-package.sh"
|
||||
|
@ -1,52 +0,0 @@
|
||||
$if: 'event.event in build.allowed'
|
||||
then:
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerType}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
image: ${build.docker_image}
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- $let:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
(apt-get -qq -y remove --purge ubuntu-advantage-tools || true) &&
|
||||
apt-get -qq update && apt-get -qq -y install curl git;
|
||||
swig_bin=`curl -sSIL -o /dev/null -w "%{http_code}" ${system.pyenv.linux.url}` &&
|
||||
if [ "$swig_bin" != "200" ]; then
|
||||
${extraSystemSetup} &&
|
||||
adduser --system --home ${system.homedir.linux} ${system.username} && cd ${system.homedir.linux}/ &&
|
||||
mkdir -p /tmp/artifacts/ && chmod 777 /tmp/artifacts &&
|
||||
echo -e "#!/bin/bash\nset -xe\n env && id && git clone --quiet ${event.head.repo.url} ~/DeepSpeech/ds/ && cd ~/DeepSpeech/ds && git checkout --quiet ${event.head.sha}" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} --preserve-env /bin/bash ${system.homedir.linux}/DeepSpeech/ds/${build.scripts.build} &&
|
||||
sudo -H -u ${system.username} --preserve-env /bin/bash ${system.homedir.linux}/DeepSpeech/ds/${build.scripts.package} ${taskIndexExpire} taskcluster ${system.pyenv.linux.namespace}
|
||||
fi;
|
||||
|
||||
artifacts:
|
||||
"public":
|
||||
type: "directory"
|
||||
path: "/tmp/artifacts/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -2,17 +2,6 @@
|
||||
|
||||
set -xe
|
||||
|
||||
TC_EXPIRE=$1
|
||||
TC_INSTANCE=$2
|
||||
TC_INDEX=$3
|
||||
|
||||
source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
cd ${PYENV_ROOT}/ && $TAR -czf $TASKCLUSTER_ARTIFACTS/pyenv.tar.gz .
|
||||
|
||||
if [ ! -z "${TC_EXPIRE}" -a ! -z "${TC_INSTANCE}" -a ! -z "${TC_INDEX}" ]; then
|
||||
curl -sSL --fail -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\":\"$TASK_ID\",\"rank\":0,\"expires\":\"${TC_EXPIRE}\",\"data\":{}}" \
|
||||
"http://${TC_INSTANCE}/index/v1/task/${TC_INDEX}"
|
||||
fi;
|
||||
|
@ -1,5 +1,8 @@
|
||||
build:
|
||||
template_file: pyenv-win-opt-base.tyml
|
||||
template_file: generic_tc_caching-win-opt-base.tyml
|
||||
cache:
|
||||
artifact_url: "${system.pyenv.win.url}"
|
||||
artifact_namespace: "${system.pyenv.win.namespace}"
|
||||
scripts:
|
||||
build: "taskcluster/pyenv-build.sh"
|
||||
package: "taskcluster/pyenv-package.sh"
|
||||
|
@ -1,59 +0,0 @@
|
||||
$if: 'event.event in build.allowed'
|
||||
then:
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerTypeWin}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
mounts:
|
||||
- file: msys2-base-x86_64.tar.xz
|
||||
content:
|
||||
sha256: c4443113497acb2d2e285d40b929fc55f33f8f669902595ecdf66a655b63dc60
|
||||
url: >-
|
||||
https://github.com/msys2/msys2-installer/releases/download/2020-05-17/msys2-base-x86_64-20200517.tar.xz
|
||||
|
||||
env:
|
||||
TC_MSYS_VERSION: 'MSYS_NT-6.3-9600'
|
||||
MSYS: 'winsymlinks:nativestrict'
|
||||
|
||||
command:
|
||||
- >-
|
||||
"C:\Program Files\7-zip\7z.exe" x -txz -so msys2-base-x86_64.tar.xz |
|
||||
"C:\Program Files\7-zip\7z.exe" x -o%USERPROFILE% -ttar -aoa -si
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "export THIS_BASH_PID=$$; ps -ef | grep '[?]' | awk '{print $2}' | grep -v $THIS_BASH_PID | xargs -r kill; exit 0"
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "pacman -Syu --noconfirm"
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "pacman -Syu --noconfirm"
|
||||
- $let:
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
echo .\msys64\usr\bin\bash.exe --login -cxe "export LC_ALL=C &&
|
||||
export PATH=\"$USERPROFILE/msys64/usr/bin:/c/Python36:/c/Program Files/Git/bin:/c/Program Files/7-Zip/:$PATH\" &&
|
||||
export TASKCLUSTER_ARTIFACTS=\"$(cygpath -u $USERPROFILE/public)\" &&
|
||||
export TASKCLUSTER_TASK_DIR=\"/c/builds/tc-workdir/\" &&
|
||||
echo \"export TASKCLUSTER_TASK_EXIT_CODE=0\" > $USERPROFILE/tc-exit.sh &&
|
||||
env && pacman --noconfirm -R bsdtar && pacman --noconfirm -S tar && mkdir -p $TASKCLUSTER_ARTIFACTS/ && if [ \"`curl -sSIL -o /dev/null -w %%{http_code} ${system.pyenv.win.url}`\" != \"200\" ]; then git clone --quiet ${event.head.repo.url} $TASKCLUSTER_TASK_DIR/DeepSpeech/ds/ && cd $TASKCLUSTER_TASK_DIR/DeepSpeech/ds && git checkout --quiet ${event.head.sha} && $TASKCLUSTER_TASK_DIR/DeepSpeech/ds/${build.scripts.build} && $TASKCLUSTER_TASK_DIR/DeepSpeech/ds/${build.scripts.package} ${taskIndexExpire} taskcluster ${system.pyenv.win.namespace}; fi; echo \"export TASKCLUSTER_TASK_EXIT_CODE=$?\" > $USERPROFILE/tc-exit.sh" | cmd /k
|
||||
|
||||
- .\msys64\usr\bin\bash.exe --login -cxe "source $USERPROFILE/tc-exit.sh && exit $TASKCLUSTER_TASK_EXIT_CODE"
|
||||
|
||||
artifacts:
|
||||
- type: "directory"
|
||||
path: "public/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -1,7 +1,12 @@
|
||||
build:
|
||||
template_file: swig-darwin-opt-base.tyml
|
||||
swig_system: "darwin"
|
||||
swig_arch: "amd64"
|
||||
template_file: generic_tc_caching-darwin-opt-base.tyml
|
||||
build_or_cache:
|
||||
repo: "${system.swig.repo}"
|
||||
sha: "${system.swig.sha1}"
|
||||
dir: "swig"
|
||||
cache:
|
||||
artifact_url: "${system.swig_build.osx.url}"
|
||||
artifact_namespace: "${system.swig_build.osx.namespace}"
|
||||
scripts:
|
||||
build: "taskcluster/build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
|
@ -1,56 +0,0 @@
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.generic.provisionerId}
|
||||
workerType: ${taskcluster.generic.workerType}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
dependencies:
|
||||
$map: { $eval: build.dependencies }
|
||||
each(b):
|
||||
$eval: as_slugid(b)
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
|
||||
command:
|
||||
- - "/bin/bash"
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- $let:
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
export TASKCLUSTER_ARTIFACTS="$(pwd)/public/" &&
|
||||
export TASKCLUSTER_TASKDIR="$(pwd)" &&
|
||||
export TASKCLUSTER_ORIG_TASKDIR="$(pwd)" &&
|
||||
export LC_ALL=C &&
|
||||
export MACOSX_DEPLOYMENT_TARGET=10.10 &&
|
||||
export SDKROOT=/Library/Developer/CommandLineTools/SDKs/MacOSX10.14.sdk/ &&
|
||||
export HOMEBREW_NO_AUTO_UPDATE=1 &&
|
||||
env &&
|
||||
mkdir -p $TASKCLUSTER_ARTIFACTS/ &&
|
||||
swig_bin=`curl -sSIL -o /dev/null -w "%{http_code}" https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.swig.${build.swig_system}.${build.swig_arch}.${system.swig.sha1}/artifacts/public/ds-swig.tar.gz` &&
|
||||
if [ "$swig_bin" != "200" ]; then
|
||||
git clone --quiet ${system.swig.repo} $TASKCLUSTER_TASKDIR/swig/ &&
|
||||
cd $TASKCLUSTER_TASKDIR/swig/ && git checkout --quiet ${system.swig.sha1} &&
|
||||
$TASKCLUSTER_TASKDIR/swig/${build.scripts.build} &&
|
||||
$TASKCLUSTER_TASKDIR/swig/${build.scripts.package} &&
|
||||
curl -sSL --fail -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\":\"$TASK_ID\",\"rank\":0,\"expires\":\"${taskIndexExpire}\",\"data\":{}}" \
|
||||
"http://taskcluster/index/v1/task/project.deepspeech.swig.${build.swig_system}.${build.swig_arch}.${system.swig.sha1}"
|
||||
fi;
|
||||
|
||||
artifacts:
|
||||
- type: "directory"
|
||||
path: "public/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -1,8 +1,13 @@
|
||||
build:
|
||||
template_file: swig-linux-opt-base.tyml
|
||||
docker_image: "ubuntu:14.04"
|
||||
swig_system: "linux"
|
||||
swig_arch: "amd64"
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
docker_image: "ubuntu:16.04"
|
||||
build_or_cache:
|
||||
repo: "${system.swig.repo}"
|
||||
sha: "${system.swig.sha1}"
|
||||
dir: "swig"
|
||||
cache:
|
||||
artifact_url: "${system.swig_build.linux.url}"
|
||||
artifact_namespace: "${system.swig_build.linux.namespace}"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install autoconf automake bison build-essential
|
||||
|
@ -1,54 +0,0 @@
|
||||
$if: 'event.event in build.allowed'
|
||||
then:
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerType}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '6 months' }
|
||||
scopes:
|
||||
- "index:insert-task:project.deepspeech.*"
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
image: ${build.docker_image}
|
||||
|
||||
features:
|
||||
taskclusterProxy: true
|
||||
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "--login"
|
||||
- "-cxe"
|
||||
- $let:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
taskIndexExpire: { $fromNow: '6 months' }
|
||||
in: >
|
||||
(apt-get -qq -y remove --purge ubuntu-advantage-tools || true) &&
|
||||
apt-get -qq update && apt-get -qq -y install curl git && ${extraSystemSetup};
|
||||
swig_bin=`curl -sSIL -o /dev/null -w "%{http_code}" https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.swig.${build.swig_system}.${build.swig_arch}.${system.swig.sha1}/artifacts/public/ds-swig.tar.gz` &&
|
||||
if [ "$swig_bin" != "200" ]; then
|
||||
adduser --system --home ${system.homedir.linux} ${system.username} && cd ${system.homedir.linux}/ &&
|
||||
echo -e "#!/bin/bash\nset -xe\n env && id && (git clone --quiet ${system.swig.repo} ~/swig/ && cd ~/swig/ && git checkout --quiet ${system.swig.sha1})" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh &&
|
||||
sudo -H -u ${system.username} --preserve-env /bin/bash ${system.homedir.linux}/swig/${build.scripts.build} &&
|
||||
sudo -H -u ${system.username} /bin/bash ${system.homedir.linux}/swig/${build.scripts.package} &&
|
||||
curl -sSL --fail -X PUT \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"taskId\":\"$TASK_ID\",\"rank\":0,\"expires\":\"${taskIndexExpire}\",\"data\":{}}" \
|
||||
"http://taskcluster/index/v1/task/project.deepspeech.swig.${build.swig_system}.${build.swig_arch}.${system.swig.sha1}"
|
||||
fi;
|
||||
|
||||
artifacts:
|
||||
"public":
|
||||
type: "directory"
|
||||
path: "/tmp/artifacts/"
|
||||
expires: { $fromNow: '6 months' }
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
@ -1,8 +1,13 @@
|
||||
build:
|
||||
template_file: swig-linux-opt-base.tyml
|
||||
template_file: generic_tc_caching-linux-opt-base.tyml
|
||||
docker_image: "ubuntu:18.04"
|
||||
swig_system: "win"
|
||||
swig_arch: "amd64"
|
||||
build_or_cache:
|
||||
repo: "${system.swig.repo}"
|
||||
sha: "${system.swig.sha1}"
|
||||
dir: "swig"
|
||||
cache:
|
||||
artifact_url: "${system.swig_build.win.url}"
|
||||
artifact_namespace: "${system.swig_build.win.namespace}"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install autoconf automake bison build-essential mingw-w64 &&
|
||||
|
@ -42,10 +42,6 @@ if [ "${OS}" = "Darwin" ]; then
|
||||
fi;
|
||||
fi;
|
||||
|
||||
PY37_OPENSSL_DIR="${PYENV_ROOT}/ssl-xenial"
|
||||
export PY37_LDPATH="${PY37_OPENSSL_DIR}/usr/lib/"
|
||||
export LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH
|
||||
|
||||
export TASKCLUSTER_ARTIFACTS=${TASKCLUSTER_ARTIFACTS:-/tmp/artifacts}
|
||||
export TASKCLUSTER_TMP_DIR=${TASKCLUSTER_TMP_DIR:-/tmp}
|
||||
|
||||
|
@ -112,6 +112,11 @@ android_setup_emulator()
|
||||
|
||||
local _flavor=$1
|
||||
local _api_level=${2:-android-25}
|
||||
local _api_kind=${3:-google_apis}
|
||||
|
||||
if [ -z "${_api_kind}" ]; then
|
||||
_api_kind="google_apis"
|
||||
fi
|
||||
|
||||
export PATH=${ANDROID_SDK_HOME}/tools/bin/:${ANDROID_SDK_HOME}/platform-tools/:$PATH
|
||||
export DS_BINARY_PREFIX="adb shell LD_LIBRARY_PATH=${ANDROID_TMP_DIR}/ds/ ${ANDROID_TMP_DIR}/ds/"
|
||||
@ -123,11 +128,11 @@ android_setup_emulator()
|
||||
android_install_sdk_platform "${_api_level}"
|
||||
|
||||
# Same, yes in case of license
|
||||
yes | sdkmanager --install "system-images;${_api_level};google_apis;${_flavor}"
|
||||
yes | sdkmanager --install "system-images;${_api_level};${_api_kind};${_flavor}"
|
||||
|
||||
android_sdk_accept_licenses
|
||||
|
||||
avdmanager create avd --name "${_flavor}-ds-pixel-${_api_level}" --device 17 --package "system-images;${_api_level};google_apis;${_flavor}"
|
||||
avdmanager create avd --name "${_flavor}-ds-pixel-${_api_level}" --device 17 --package "system-images;${_api_level};${_api_kind};${_flavor}"
|
||||
}
|
||||
|
||||
android_start_emulator()
|
||||
|
@ -22,7 +22,8 @@ popd
|
||||
set +o pipefail
|
||||
|
||||
pushd ${HOME}/DeepSpeech/ds/
|
||||
time ./bin/run-tc-signal_augmentations.sh
|
||||
time ./bin/run-tc-sample_augmentations.sh
|
||||
time ./bin/run-tc-graph_augmentations.sh
|
||||
popd
|
||||
|
||||
virtualenv_deactivate "${pyalias}" "deepspeech"
|
@ -231,7 +231,7 @@ do_deepspeech_netframework_build()
|
||||
# Setup dependencies
|
||||
nuget install DeepSpeechConsole/packages.config -OutputDirectory packages/
|
||||
|
||||
MSBUILD="$(cygpath 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe')"
|
||||
MSBUILD="$(cygpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin\MSBuild.exe')"
|
||||
|
||||
# We need MSYS2_ARG_CONV_EXCL='/' otherwise the '/' of CLI parameters gets mangled and disappears
|
||||
# We build the .NET Client for .NET Framework v4.5,v4.6,v4.7
|
||||
@ -270,7 +270,7 @@ do_deepspeech_netframework_wpf_example_build()
|
||||
# Setup dependencies
|
||||
nuget install DeepSpeechWPF/packages.config -OutputDirectory DeepSpeechWPF/packages/
|
||||
|
||||
MSBUILD="$(cygpath 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe')"
|
||||
MSBUILD="$(cygpath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin\MSBuild.exe')"
|
||||
|
||||
# We need MSYS2_ARG_CONV_EXCL='/' otherwise the '/' of CLI parameters gets mangled and disappears
|
||||
# Build WPF example
|
||||
|
@ -191,47 +191,6 @@ pyenv_install()
|
||||
fi
|
||||
}
|
||||
|
||||
# Hack to extract Ubuntu's 16.04 libssl 1.0.2 packages and use them during the
|
||||
# local build of Python.
|
||||
#
|
||||
# Avoid (risky) upgrade of base system, allowing to keep one task build that
|
||||
# builds all the python packages
|
||||
maybe_ssl102_py37()
|
||||
{
|
||||
pyver=$1
|
||||
|
||||
unset PY37_OPENSSL
|
||||
|
||||
ARCH=$(uname -m)
|
||||
case "${pyver}" in
|
||||
3.7*|3.8*)
|
||||
if [ "${OS}" = "Linux" -a "${ARCH}" = "x86_64" ]; then
|
||||
if [ -d "${PY37_OPENSSL_DIR}" ]; then
|
||||
rm -rf "${PY37_OPENSSL_DIR}"
|
||||
fi
|
||||
|
||||
mkdir -p ${PY37_OPENSSL_DIR}
|
||||
${WGET} -P ${TASKCLUSTER_TMP_DIR} \
|
||||
http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl-dev_1.0.2g-1ubuntu4.15_amd64.deb \
|
||||
http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.0.0_1.0.2g-1ubuntu4.15_amd64.deb
|
||||
|
||||
for deb in ${TASKCLUSTER_TMP_DIR}/libssl*.deb; do
|
||||
dpkg -x ${deb} ${PY37_OPENSSL_DIR}
|
||||
done;
|
||||
|
||||
# Python configure expects things to be under lib/
|
||||
mv ${PY37_OPENSSL_DIR}/usr/include/x86_64-linux-gnu/openssl/opensslconf.h ${PY37_OPENSSL_DIR}/usr/include/openssl/
|
||||
mv ${PY37_OPENSSL_DIR}/lib/x86_64-linux-gnu/lib* ${PY37_OPENSSL_DIR}/usr/lib/
|
||||
mv ${PY37_OPENSSL_DIR}/usr/lib/x86_64-linux-gnu/* ${PY37_OPENSSL_DIR}/usr/lib/
|
||||
ln -sfn libcrypto.so.1.0.0 ${PY37_OPENSSL_DIR}/usr/lib/libcrypto.so
|
||||
ln -sfn libssl.so.1.0.0 ${PY37_OPENSSL_DIR}/usr/lib/libssl.so
|
||||
|
||||
export PY37_OPENSSL="--with-openssl=${PY37_OPENSSL_DIR}/usr"
|
||||
fi;
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
maybe_numpy_min_version()
|
||||
{
|
||||
local pyver=$1
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user