commit
09b04a8f83
@ -23,3 +23,8 @@ runs:
|
||||
which python3
|
||||
python3 --version
|
||||
python3 -c "import sysconfig; print(sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET'))"
|
||||
- shell: bash
|
||||
name: Set up venv with upstream Python
|
||||
run: |
|
||||
python3 -m venv /tmp/venv
|
||||
echo "/tmp/venv/bin" >> $GITHUB_PATH
|
||||
|
3
.github/actions/numpy_vers/action.yml
vendored
3
.github/actions/numpy_vers/action.yml
vendored
@ -16,6 +16,7 @@ runs:
|
||||
steps:
|
||||
- id: numpy
|
||||
run: |
|
||||
set -ex
|
||||
NUMPY_BUILD_VERSION="==1.7.0"
|
||||
NUMPY_DEP_VERSION=">=1.7.0"
|
||||
|
||||
@ -62,7 +63,7 @@ runs:
|
||||
;;
|
||||
|
||||
# TODO: 'Windows*' might not be good
|
||||
Windows*:x86_64)
|
||||
MSYS_NT-10.0-17763:x86_64)
|
||||
case "${{ inputs.pyver }}" in
|
||||
3.5*)
|
||||
NUMPY_BUILD_VERSION="==1.11.0"
|
||||
|
4
.github/actions/python-build/action.yml
vendored
4
.github/actions/python-build/action.yml
vendored
@ -33,9 +33,9 @@ runs:
|
||||
mkdir -p wheels
|
||||
shell: bash
|
||||
- run: |
|
||||
PROJECT_NAME="deepspeech"
|
||||
PROJECT_NAME="stt"
|
||||
if [ "${{ inputs.build_flavor }}" = "tflite" ]; then
|
||||
PROJECT_NAME="deepspeech-tflite"
|
||||
PROJECT_NAME="stt-tflite"
|
||||
fi
|
||||
|
||||
NUMPY_BUILD_VERSION="${{ inputs.numpy_build }}" \
|
||||
|
12
.github/actions/win-install-sox/action.yml
vendored
Normal file
12
.github/actions/win-install-sox/action.yml
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
name: "Install SoX and add to PATH"
|
||||
description: "Install SoX and add to PATH"
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- run: |
|
||||
set -ex
|
||||
wget https://sourceforge.net/projects/sox/files/sox/14.4.2/sox-14.4.2-win32.zip/download -O sox-14.4.2-win32.zip
|
||||
"C:/Program Files/7-Zip/7z.exe" x -o`pwd`/bin/ -tzip -aoa sox-14.4.2-win32.zip
|
||||
rm sox-*zip
|
||||
echo "`pwd`/bin/sox-14.4.2/" >> $GITHUB_PATH
|
||||
shell: bash
|
57
.github/workflows/macOS-amd64.yml
vendored
57
.github/workflows/macOS-amd64.yml
vendored
@ -135,8 +135,13 @@ jobs:
|
||||
|
||||
./bin/run-ci-ldc93s1_new.sh 249 ${bits}
|
||||
./bin/run-ci-ldc93s1_tflite.sh ${bits}
|
||||
- run: |
|
||||
curl -vsSL https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/macOS.amd64.convert_graphdef_memmapped_format.xz | xz -d > /tmp/convert_graphdef_memmapped_format
|
||||
- name: Download convert_graphdef_memmapped_format tool
|
||||
run: |
|
||||
set -ex
|
||||
wget -O temp.zip https://github.com/coqui-ai/STT/releases/download/v0.9.3/convert_graphdef_memmapped_format.macOS.amd64.zip
|
||||
unzip temp.zip
|
||||
rm temp.zip
|
||||
mv convert_graphdef_memmapped_format /tmp
|
||||
chmod +x /tmp/convert_graphdef_memmapped_format
|
||||
/tmp/convert_graphdef_memmapped_format --in_graph=/tmp/train/output_graph.pb --out_graph=/tmp/train/output_graph.pbmm
|
||||
- run: |
|
||||
@ -208,7 +213,7 @@ jobs:
|
||||
path: ${{ github.workspace }}/artifacts/home.tar.xz
|
||||
if: needs.tensorflow_opt-macOS.outputs.status == 'missing'
|
||||
build-lib_macOS:
|
||||
name: "Build libdeepspeech.so and deepspeech C++ binary"
|
||||
name: "Build libstt+client"
|
||||
runs-on: macos-10.15
|
||||
needs: [ build-tensorflow-macOS, tensorflow_opt-macOS ]
|
||||
strategy:
|
||||
@ -247,8 +252,8 @@ jobs:
|
||||
path: ${{ github.workspace }}/artifacts/native_client.tar.xz
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "libdeepspeech.${{ matrix.build-flavor }}.zip"
|
||||
path: ${{ github.workspace }}/artifacts/libdeepspeech.zip
|
||||
name: "libstt.${{ matrix.build-flavor }}.zip"
|
||||
path: ${{ github.workspace }}/artifacts/libstt.zip
|
||||
build-python-macOS:
|
||||
name: "Build python bindings for macOS"
|
||||
runs-on: macos-10.15
|
||||
@ -296,7 +301,7 @@ jobs:
|
||||
numpy_dep: "${{ steps.get_numpy.outputs.dep_version }}"
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "deepspeech-${{ matrix.build-flavor }}-${{ matrix.python-version }}.whl"
|
||||
name: "stt-${{ matrix.build-flavor }}-${{ matrix.python-version }}.whl"
|
||||
path: ${{ github.workspace }}/wheels/*.whl
|
||||
build-nodejs-macOS:
|
||||
name: "Build NodeJS and ElectronJS for macOS"
|
||||
@ -339,8 +344,8 @@ jobs:
|
||||
path: ${{ github.workspace }}/native_client/javascript/wrapper.tar.gz
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "deepspeech-${{ matrix.build-flavor }}.tgz"
|
||||
path: ${{ github.workspace }}/native_client/javascript/deepspeech-*.tgz
|
||||
name: "stt-${{ matrix.build-flavor }}.tgz"
|
||||
path: ${{ github.workspace }}/native_client/javascript/stt-*.tgz
|
||||
test-cpp-macOS:
|
||||
name: "Test C++ binary on macOS"
|
||||
runs-on: macos-10.15
|
||||
@ -353,9 +358,9 @@ jobs:
|
||||
bitrate: ["8k", "16k"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: ${{ github.workspace }}/tmp/
|
||||
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
DEEPSPEECH_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
STT_PROD_MODEL: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
STT_PROD_MODEL_MMAP: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
STT_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@ -395,9 +400,9 @@ jobs:
|
||||
bitrate: ["8k", "16k"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: ${{ github.workspace }}/tmp/
|
||||
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
DEEPSPEECH_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
STT_PROD_MODEL: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
STT_PROD_MODEL_MMAP: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
STT_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@ -408,7 +413,7 @@ jobs:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "deepspeech-${{ matrix.build-flavor }}-${{ matrix.python-version }}.whl"
|
||||
name: "stt-${{ matrix.build-flavor }}-${{ matrix.python-version }}.whl"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
@ -420,7 +425,7 @@ jobs:
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
pip3 install --only-binary :all: --upgrade ${{ env.TASKCLUSTER_TMP_DIR }}/deepspeech*.whl
|
||||
pip3 install --only-binary :all: --upgrade ${{ env.TASKCLUSTER_TMP_DIR }}/stt*.whl
|
||||
- uses: ./.github/actions/run-tests
|
||||
with:
|
||||
runtime: "python"
|
||||
@ -441,9 +446,9 @@ jobs:
|
||||
bitrate: ["16k"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: ${{ github.workspace }}/tmp/
|
||||
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
DEEPSPEECH_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
STT_PROD_MODEL: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
STT_PROD_MODEL_MMAP: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
STT_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@ -454,7 +459,7 @@ jobs:
|
||||
node-version: ${{ matrix.nodejs-version }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "deepspeech-${{ matrix.build-flavor }}.tgz"
|
||||
name: "stt-${{ matrix.build-flavor }}.tgz"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
@ -466,7 +471,7 @@ jobs:
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
npm install ${{ env.TASKCLUSTER_TMP_DIR }}/deepspeech*.tgz
|
||||
npm install ${{ env.TASKCLUSTER_TMP_DIR }}/stt*.tgz
|
||||
- uses: ./.github/actions/run-tests
|
||||
with:
|
||||
runtime: "node"
|
||||
@ -486,9 +491,9 @@ jobs:
|
||||
bitrate: ["16k"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: ${{ github.workspace }}/tmp/
|
||||
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
DEEPSPEECH_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
STT_PROD_MODEL: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
STT_PROD_MODEL_MMAP: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
STT_TEST_MODEL: ${{ github.workspace }}/tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@ -499,7 +504,7 @@ jobs:
|
||||
node-version: 12
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "deepspeech-${{ matrix.build-flavor }}.tgz"
|
||||
name: "stt-${{ matrix.build-flavor }}.tgz"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
@ -511,7 +516,7 @@ jobs:
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
npm install ${{ env.TASKCLUSTER_TMP_DIR }}/deepspeech*.tgz
|
||||
npm install ${{ env.TASKCLUSTER_TMP_DIR }}/stt*.tgz
|
||||
- run: |
|
||||
npm install electron@${{ matrix.electronjs-version }}
|
||||
- uses: ./.github/actions/run-tests
|
||||
|
660
.github/workflows/windows-amd64.yml
vendored
Normal file
660
.github/workflows/windows-amd64.yml
vendored
Normal file
@ -0,0 +1,660 @@
|
||||
name: "Windows amd64"
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
env:
|
||||
TASKCLUSTER_TASK_DIR: ${{ github.workspace }}
|
||||
TASKCLUSTER_ARTIFACTS: ${{ github.workspace }}/artifacts
|
||||
TC_MSYS_VERSION: MSYS_NT-10.0-17763
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
jobs:
|
||||
swig_Windows:
|
||||
name: "Build SWIG for Windows"
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
update: true
|
||||
install: >-
|
||||
autoconf
|
||||
automake
|
||||
bison
|
||||
gcc
|
||||
git
|
||||
make
|
||||
pcre-devel
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: "swig/swig"
|
||||
ref: "90cdbee6a69d13b39d734083b9f91069533b0d7b"
|
||||
- run: |
|
||||
mkdir -p build-static/
|
||||
- run: |
|
||||
sh autogen.sh
|
||||
./configure \
|
||||
--prefix=`pwd`/build-static/ \
|
||||
--program-prefix=ds-
|
||||
- run: |
|
||||
make -j
|
||||
- run: |
|
||||
make install
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ github.job }}
|
||||
path: ${{ github.workspace }}/build-static/
|
||||
swig_Linux:
|
||||
name: "Build SWIG for Linux"
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: "swig/swig"
|
||||
ref: "90cdbee6a69d13b39d734083b9f91069533b0d7b"
|
||||
- run: |
|
||||
sudo apt-get install -y --no-install-recommends autoconf automake build-essential bison libpcre2-dev
|
||||
- run: |
|
||||
mkdir -p build-static/
|
||||
- run: |
|
||||
sh autogen.sh
|
||||
./configure \
|
||||
--prefix=${{ github.workspace }}/build-static/ \
|
||||
--program-prefix=ds-
|
||||
- run: |
|
||||
make -j
|
||||
- run: |
|
||||
make install
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ github.job }}
|
||||
path: ${{ github.workspace }}/build-static/
|
||||
build-ctc-decoder-windows:
|
||||
name: "Build CTC decoder Windows Python package for testing"
|
||||
needs: [swig_Windows]
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
path-type: inherit
|
||||
update: true
|
||||
install: >-
|
||||
git
|
||||
make
|
||||
- uses: ilammy/msvc-dev-cmd@v1
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7.9
|
||||
- run: |
|
||||
python --version
|
||||
python -m pip --version
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "swig_Windows"
|
||||
path: ${{ github.workspace }}/native_client/ds-swig/
|
||||
- name: Link ds-swig into swig
|
||||
run: |
|
||||
set -ex
|
||||
ls -hal native_client/ds-swig/bin
|
||||
ln -s ds-swig.exe native_client/ds-swig/bin/swig.exe
|
||||
chmod +x native_client/ds-swig/bin/ds-swig.exe native_client/ds-swig/bin/swig.exe
|
||||
- name: Remove /usr/bin/link conflicting with MSVC link.exe
|
||||
run: |
|
||||
rm /usr/bin/link
|
||||
- run: |
|
||||
make -C native_client/ctcdecode/ \
|
||||
NUM_PROCESSES=$(nproc) \
|
||||
bindings
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "ds_ctcdecoder-windows-test.whl"
|
||||
path: ${{ github.workspace }}/native_client/ctcdecode/dist/*.whl
|
||||
- run: |
|
||||
make -C native_client/ctcdecode clean-keep-third-party
|
||||
build-ctc-decoder-linux:
|
||||
name: "Build CTC decoder Linux Python package for testing"
|
||||
needs: [swig_Linux]
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.6
|
||||
- run: |
|
||||
python --version
|
||||
pip --version
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "swig_Linux"
|
||||
path: ${{ github.workspace }}/native_client/ds-swig/
|
||||
- run: |
|
||||
ls -hal ${{ github.workspace }}/native_client/ds-swig/bin
|
||||
ln -s ds-swig ${{ github.workspace }}/native_client/ds-swig/bin/swig
|
||||
chmod +x ${{ github.workspace }}/native_client/ds-swig/bin/ds-swig ${{ github.workspace }}/native_client/ds-swig/bin/swig
|
||||
- run: |
|
||||
make -C native_client/ctcdecode/ \
|
||||
NUM_PROCESSES=$(nproc) \
|
||||
bindings
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "ds_ctcdecoder-linux-test.whl"
|
||||
path: ${{ github.workspace }}/native_client/ctcdecode/dist/*.whl
|
||||
- run: |
|
||||
make -C native_client/ctcdecode clean-keep-third-party
|
||||
train-test-model:
|
||||
name: "Train a test model"
|
||||
needs: ["build-ctc-decoder-linux"]
|
||||
runs-on: ubuntu-20.04
|
||||
strategy:
|
||||
matrix:
|
||||
build-flavor: ["tf", "tflite"]
|
||||
bitrate: ["8k", "16k"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.6
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "ds_ctcdecoder-linux-test.whl"
|
||||
- run: |
|
||||
python --version
|
||||
pip --version
|
||||
- run: |
|
||||
pip install --upgrade pip==19.3.1 setuptools==45.0.0 wheel==0.33.6
|
||||
- run: |
|
||||
pip install ds_ctcdecoder-*-cp36-cp36m-manylinux1_x86_64.whl
|
||||
DS_NODECODER=y pip install --upgrade .
|
||||
- name: Run training script
|
||||
run: |
|
||||
set -ex
|
||||
bits=""
|
||||
if [ "${{ matrix.bitrate }}" = "8k" ]; then
|
||||
bits=8000
|
||||
fi
|
||||
if [ "${{ matrix.bitrate }}" = "16k" ]; then
|
||||
bits=16000
|
||||
fi
|
||||
|
||||
# Easier to rename to that we can exercize the LDC93S1 importer code to
|
||||
# generate the CSV file.
|
||||
echo "Moving ${bits} to LDC93S1.wav"
|
||||
mv data/smoke_test/LDC93S1_pcms16le_1_${bits}.wav data/smoke_test/LDC93S1.wav
|
||||
|
||||
./bin/run-ci-ldc93s1_new.sh 249 ${bits}
|
||||
if [ "${{ matrix.build-flavor }}" = "tflite" ]; then
|
||||
./bin/run-ci-ldc93s1_tflite.sh ${bits}
|
||||
fi
|
||||
- name: Download convert_graphdef_memmapped_format tool
|
||||
run: |
|
||||
set -ex
|
||||
wget -O temp.zip https://github.com/coqui-ai/STT/releases/download/v0.9.3/convert_graphdef_memmapped_format.linux.amd64.zip
|
||||
unzip temp.zip
|
||||
rm temp.zip
|
||||
mv convert_graphdef_memmapped_format /tmp
|
||||
chmod +x /tmp/convert_graphdef_memmapped_format
|
||||
/tmp/convert_graphdef_memmapped_format --in_graph=/tmp/train/output_graph.pb --out_graph=/tmp/train/output_graph.pbmm
|
||||
if: matrix.build-flavor == 'tf'
|
||||
- run: |
|
||||
cp /tmp/train*/output_graph.* /tmp/
|
||||
- run: |
|
||||
tar -cf - \
|
||||
-C /tmp/ckpt/ . \
|
||||
| xz -9 -T0 > /tmp/checkpoint.tar.xz
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "test-model.${{ matrix.build-flavor }}-${{ matrix.bitrate }}.zip"
|
||||
path: /tmp/output_graph.*
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "test-checkpoint.${{ matrix.build-flavor }}-${{ matrix.bitrate }}.zip"
|
||||
path: /tmp/checkpoint.tar.xz
|
||||
tensorflow_opt-Windows:
|
||||
name: "Check cache for TensorFlow"
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
status: ${{ steps.check_artifact_exists.outputs.status }}
|
||||
cache_key: ${{ steps.get_cache_key.outputs.key }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- id: get_cache_key
|
||||
uses: ./.github/actions/get_cache_key
|
||||
with:
|
||||
extras: "7"
|
||||
- id: check_artifact_exists
|
||||
uses: ./.github/actions/check_artifact_exists
|
||||
with:
|
||||
name: ${{ steps.get_cache_key.outputs.key }}
|
||||
build-tensorflow-Windows:
|
||||
name: "Build TensorFlow (opt) for Windows"
|
||||
needs: tensorflow_opt-Windows
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- run: true
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'found'
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
path-type: inherit
|
||||
update: true
|
||||
install: >-
|
||||
git
|
||||
patch
|
||||
tar
|
||||
unzip
|
||||
zip
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7.9
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: 'recursive'
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
# It's important that this PATH change only happens *after* the checkout
|
||||
# above, because otherwise the checkout fails when persisisting the
|
||||
# credentials for submodules due to using MSYS2 Git
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
- run: ./ci_scripts/tf-setup.sh
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
- run: ./ci_scripts/tf-build.sh "--windows-cpu"
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
- run: ./ci_scripts/tf-package.sh
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ needs.tensorflow_opt-Windows.outputs.cache_key }}
|
||||
path: ${{ github.workspace }}/artifacts/home.tar.xz
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
build-lib_Windows:
|
||||
name: "Build libstt+client"
|
||||
runs-on: windows-2019
|
||||
needs: [build-tensorflow-Windows, tensorflow_opt-Windows]
|
||||
strategy:
|
||||
matrix:
|
||||
build-flavor: ["tf", "tflite"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: ilammy/msvc-dev-cmd@v1
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
update: true
|
||||
install: >-
|
||||
git
|
||||
make
|
||||
patch
|
||||
pkg-config
|
||||
tar
|
||||
unzip
|
||||
zip
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: ${{ needs.tensorflow_opt-Windows.outputs.cache_key }}
|
||||
path: ${{ github.workspace }}/
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'missing'
|
||||
- uses: ./.github/actions/check_artifact_exists
|
||||
with:
|
||||
name: ${{ needs.tensorflow_opt-Windows.outputs.cache_key }}
|
||||
path: ${{ github.workspace }}/
|
||||
download: true
|
||||
if: needs.tensorflow_opt-Windows.outputs.status == 'found'
|
||||
- run: |
|
||||
"C:/Program Files/7-Zip/7z.exe" x home.tar.xz -so | "C:/Program Files/7-Zip/7z.exe" x -aos -si -ttar -o`pwd`
|
||||
rm home.tar.xz
|
||||
- run: |
|
||||
git status
|
||||
- run: ./ci_scripts/host-build.sh ${{ matrix.build-flavor }}
|
||||
- run: ./ci_scripts/package.sh
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "native_client.${{ matrix.build-flavor }}.tar.xz"
|
||||
path: ${{ github.workspace }}/artifacts/native_client.tar.xz
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "libstt.${{ matrix.build-flavor }}.zip"
|
||||
path: ${{ github.workspace }}/artifacts/libstt.zip
|
||||
build-python-Windows:
|
||||
name: "Build Python bindings for Windows"
|
||||
runs-on: windows-2019
|
||||
needs: [build-lib_Windows, swig_Windows]
|
||||
strategy:
|
||||
matrix:
|
||||
build-flavor: ["tf", "tflite"]
|
||||
python-version: [3.6.8, 3.7.9, 3.8.8, 3.9.2]
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
path-type: inherit
|
||||
update: true
|
||||
install: >-
|
||||
make
|
||||
- uses: ilammy/msvc-dev-cmd@v1
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "native_client.${{ matrix.build-flavor }}.tar.xz"
|
||||
path: ${{ github.workspace }}/tensorflow/bazel-bin/native_client/
|
||||
- run: |
|
||||
pushd tensorflow/bazel-bin/native_client/
|
||||
"C:/Program Files/7-Zip/7z.exe" x native_client.tar.xz -so | "C:/Program Files/7-Zip/7z.exe" x -aoa -si -ttar -o`pwd`
|
||||
ls -hal
|
||||
popd
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "swig_Windows"
|
||||
path: ${{ github.workspace }}/native_client/ds-swig/
|
||||
- name: Link ds-swig into swig
|
||||
run: |
|
||||
set -ex
|
||||
ls -hal native_client/ds-swig/bin
|
||||
ln -s ds-swig.exe native_client/ds-swig/bin/swig.exe
|
||||
chmod +x native_client/ds-swig/bin/ds-swig.exe native_client/ds-swig/bin/swig.exe
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Remove /usr/bin/link conflicting with MSVC link.exe
|
||||
run: |
|
||||
rm /usr/bin/link
|
||||
- id: get_numpy
|
||||
uses: ./.github/actions/numpy_vers
|
||||
with:
|
||||
pyver: ${{ matrix.python-version }}
|
||||
- uses: ./.github/actions/python-build
|
||||
with:
|
||||
build_flavor: ${{ matrix.build-flavor }}
|
||||
numpy_build: "${{ steps.get_numpy.outputs.build_version }}"
|
||||
numpy_dep: "${{ steps.get_numpy.outputs.dep_version }}"
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "stt-${{ matrix.build-flavor }}-${{ matrix.python-version }}.whl"
|
||||
path: ${{ github.workspace }}/wheels/*.whl
|
||||
build-nodejs-Windows:
|
||||
name: "Build NodeJS/ElectronJS for Windows"
|
||||
runs-on: windows-2019
|
||||
needs: [build-lib_Windows, swig_Windows]
|
||||
strategy:
|
||||
matrix:
|
||||
build-flavor: ["tf", "tflite"]
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
path-type: inherit
|
||||
update: true
|
||||
install: >-
|
||||
make
|
||||
tar
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "native_client.${{ matrix.build-flavor }}.tar.xz"
|
||||
path: ${{ github.workspace }}/tensorflow/bazel-bin/native_client/
|
||||
- run: |
|
||||
pushd tensorflow/bazel-bin/native_client/
|
||||
"C:/Program Files/7-Zip/7z.exe" x native_client.tar.xz -so | "C:/Program Files/7-Zip/7z.exe" x -aoa -si -ttar -o`pwd`
|
||||
ls -hal
|
||||
popd
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "swig_Windows"
|
||||
path: ${{ github.workspace }}/native_client/ds-swig/
|
||||
- name: Link ds-swig into swig
|
||||
run: |
|
||||
set -ex
|
||||
ls -hal native_client/ds-swig/bin
|
||||
ln -s ds-swig.exe native_client/ds-swig/bin/swig.exe
|
||||
chmod +x native_client/ds-swig/bin/ds-swig.exe native_client/ds-swig/bin/swig.exe
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 12
|
||||
- uses: ./.github/actions/node-build
|
||||
with:
|
||||
nodejs_versions: "10.0.0 11.0.0 12.7.0 13.0.0 14.0.0 15.0.0"
|
||||
electronjs_versions: "5.0.13 6.0.12 6.1.7 7.0.1 7.1.8 8.0.1 9.0.1 9.1.0 9.2.0 10.0.0 10.1.0 11.0.0 12.0.0"
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "nodewrapper-${{ matrix.build-flavor }}.tar.gz"
|
||||
path: ${{ github.workspace }}/native_client/javascript/wrapper.tar.gz
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "stt-${{ matrix.build-flavor }}.tgz"
|
||||
path: ${{ github.workspace }}/native_client/javascript/stt-*.tgz
|
||||
test-cpp-Windows:
|
||||
name: "Test C++ binary on Windows"
|
||||
runs-on: windows-2019
|
||||
needs: [build-lib_Windows, train-test-model]
|
||||
strategy:
|
||||
matrix:
|
||||
build-flavor: ["tf", "tflite"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: tmp/
|
||||
STT_TEST_MODEL: tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Download native_client.tar.xz
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "native_client.${{ matrix.build-flavor }}.tar.xz"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- name: Extract native_client.tar.xz
|
||||
run: |
|
||||
mkdir -p ${{ env.TASKCLUSTER_TMP_DIR }}/ds
|
||||
pushd ${{ env.TASKCLUSTER_TMP_DIR }}/ds
|
||||
"C:/Program Files/7-Zip/7z.exe" x ../native_client.tar.xz -so | "C:/Program Files/7-Zip/7z.exe" x -aoa -si -ttar -o`pwd`
|
||||
popd
|
||||
- name: Download trained test model
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "test-model.${{ matrix.build-flavor }}-16k.zip"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
- uses: ./.github/actions/run-tests
|
||||
with:
|
||||
runtime: "cppwin"
|
||||
build-flavor: ${{ matrix.build-flavor }}
|
||||
bitrate: "16k"
|
||||
model-kind: ""
|
||||
test-py-Windows:
|
||||
name: "Test Python bindings on Windows"
|
||||
runs-on: windows-2019
|
||||
needs: [ build-python-Windows, train-test-model ]
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.6.8, 3.7.9, 3.8.8, 3.9.2]
|
||||
build-flavor: ["tf", "tflite"]
|
||||
models: ["test", "prod"]
|
||||
bitrate: ["8k", "16k"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: tmp/
|
||||
STT_PROD_MODEL: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
STT_PROD_MODEL_MMAP: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
STT_TEST_MODEL: tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
path-type: inherit
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- uses: ./.github/actions/win-install-sox
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "stt-${{ matrix.build-flavor }}-${{ matrix.python-version }}.whl"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "test-model.${{ matrix.build-flavor }}-${{ matrix.bitrate }}.zip"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
python -m pip install --only-binary :all: --upgrade ${{ env.TASKCLUSTER_TMP_DIR }}/stt*.whl
|
||||
- uses: ./.github/actions/run-tests
|
||||
with:
|
||||
runtime: "python"
|
||||
build-flavor: ${{ matrix.build-flavor }}
|
||||
bitrate: ${{ matrix.bitrate }}
|
||||
model-kind: ${{ matrix.models }}
|
||||
test-nodejs-Windows:
|
||||
name: "Test NodeJS bindings on Windows"
|
||||
runs-on: windows-2019
|
||||
needs: [ build-nodejs-Windows, train-test-model ]
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
strategy:
|
||||
matrix:
|
||||
nodejs-version: [10, 12, 14, 15]
|
||||
build-flavor: ["tf", "tflite"]
|
||||
models: ["test"]
|
||||
bitrate: ["16k"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: tmp/
|
||||
STT_PROD_MODEL: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
STT_PROD_MODEL_MMAP: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
STT_TEST_MODEL: tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
path-type: inherit
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: ${{ matrix.nodejs-version }}
|
||||
- uses: ./.github/actions/win-install-sox
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "stt-${{ matrix.build-flavor }}.tgz"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "test-model.${{ matrix.build-flavor }}-${{ matrix.bitrate }}.zip"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
npm install ${{ env.TASKCLUSTER_TMP_DIR }}/stt*.tgz
|
||||
- uses: ./.github/actions/run-tests
|
||||
with:
|
||||
runtime: "node"
|
||||
build-flavor: ${{ matrix.build-flavor }}
|
||||
bitrate: ${{ matrix.bitrate }}
|
||||
model-kind: ${{ matrix.models }}
|
||||
test-electronjs-Windows:
|
||||
name: "Test ElectronJS bindings on Windows"
|
||||
runs-on: windows-2019
|
||||
needs: [ build-nodejs-Windows, train-test-model ]
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
strategy:
|
||||
matrix:
|
||||
electronjs-version: [5.0.13, 6.1.7, 7.1.8, 8.0.1, 9.2.0, 10.1.0, 11.0.0, 12.0.0]
|
||||
build-flavor: ["tf", "tflite"]
|
||||
models: ["test"]
|
||||
bitrate: ["16k"]
|
||||
env:
|
||||
TASKCLUSTER_TMP_DIR: tmp/
|
||||
STT_PROD_MODEL: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pb
|
||||
STT_PROD_MODEL_MMAP: https://github.com/reuben/STT/releases/download/v0.7.0-alpha.3/output_graph.pbmm
|
||||
STT_TEST_MODEL: tmp/output_graph.pb
|
||||
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v2.3.0-6-g23ad988"
|
||||
steps:
|
||||
- name: Switch git-bash shell to MSYS2 shell by adding MSYS2 path to PATH front
|
||||
run: echo "D:\a\_temp\msys\msys64\usr\bin" >> $GITHUB_PATH
|
||||
- uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
msystem: MSYS
|
||||
path-type: inherit
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 12
|
||||
- uses: ./.github/actions/win-install-sox
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "stt-${{ matrix.build-flavor }}.tgz"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "test-model.${{ matrix.build-flavor }}-${{ matrix.bitrate }}.zip"
|
||||
path: ${{ env.TASKCLUSTER_TMP_DIR }}
|
||||
if: matrix.models == 'test'
|
||||
- run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
if: matrix.models == 'test'
|
||||
- name: Install STT NodeJS package
|
||||
run: |
|
||||
ls -hal ${{ env.TASKCLUSTER_TMP_DIR }}/
|
||||
npm install ${{ env.TASKCLUSTER_TMP_DIR }}/stt*.tgz
|
||||
- run: |
|
||||
npm install electron@${{ matrix.electronjs-version }}
|
||||
- uses: ./.github/actions/run-tests
|
||||
with:
|
||||
runtime: "electronjs"
|
||||
build-flavor: ${{ matrix.build-flavor }}
|
||||
bitrate: ${{ matrix.bitrate }}
|
||||
model-kind: ${{ matrix.models }}
|
||||
timeout-minutes: 5
|
@ -85,7 +85,7 @@ verify_bazel_rebuild()
|
||||
|
||||
cp ${DS_DSDIR}/tensorflow/bazel*.log ${TASKCLUSTER_ARTIFACTS}/
|
||||
|
||||
spurious_rebuilds=$(grep 'Executing action' "${bazel_explain_file}" | grep 'Compiling' | grep -v -E 'no entry in the cache|[for host]|unconditional execution is requested|Executing genrule //native_client:workspace_status|Compiling native_client/workspace_status.cc|Linking native_client/libdeepspeech.so' | wc -l)
|
||||
spurious_rebuilds=$(grep 'Executing action' "${bazel_explain_file}" | grep 'Compiling' | grep -v -E 'no entry in the cache|[for host]|unconditional execution is requested|Executing genrule //native_client:workspace_status|Compiling native_client/workspace_status.cc|Linking native_client/libstt.so' | wc -l)
|
||||
if [ "${spurious_rebuilds}" -ne 0 ]; then
|
||||
echo "Bazel rebuilds some file it should not, please check."
|
||||
|
||||
@ -109,7 +109,14 @@ verify_bazel_rebuild()
|
||||
|
||||
symlink_electron()
|
||||
{
|
||||
ln -s Electron.app/Contents/MacOS/Electron node_modules/electron/dist/node
|
||||
if [ "${OS}" = "Darwin" ]; then
|
||||
ln -s Electron.app/Contents/MacOS/Electron node_modules/electron/dist/node
|
||||
else
|
||||
ln -s electron "node_modules/electron/dist/node"
|
||||
if [ -f "node_modules/electron/dist/chrome-sandbox" ]; then
|
||||
export ELECTRON_DISABLE_SANDBOX=1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
export_node_bin_path()
|
||||
|
@ -10,6 +10,7 @@ if [ "${OS}" = "Linux" ]; then
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
export TASKCLUSTER_TASK_DIR="$(cygpath ${TASKCLUSTER_TASK_DIR})"
|
||||
export DS_ROOT_TASK=${TASKCLUSTER_TASK_DIR}
|
||||
export PYENV_ROOT="${TASKCLUSTER_TASK_DIR}/pyenv-root"
|
||||
export PLATFORM_EXE_SUFFIX=.exe
|
||||
@ -53,11 +54,11 @@ export DS_TFDIR=${DS_ROOT_TASK}/tensorflow
|
||||
export DS_DSDIR=${DS_ROOT_TASK}/
|
||||
export DS_EXAMPLEDIR=${DS_ROOT_TASK}/examples
|
||||
|
||||
export DS_VERSION="$(cat ${DS_DSDIR}/training/deepspeech_training/VERSION)"
|
||||
export DS_VERSION="$(cat ${DS_DSDIR}/training/coqui_stt_training/VERSION)"
|
||||
|
||||
export GRADLE_USER_HOME=${DS_ROOT_TASK}/gradle-cache
|
||||
export ANDROID_SDK_HOME=${DS_ROOT_TASK}/DeepSpeech/Android/SDK/
|
||||
export ANDROID_NDK_HOME=${DS_ROOT_TASK}/DeepSpeech/Android/android-ndk-r18b/
|
||||
export ANDROID_SDK_HOME=${DS_ROOT_TASK}/STT/Android/SDK/
|
||||
export ANDROID_NDK_HOME=${DS_ROOT_TASK}/STT/Android/android-ndk-r18b/
|
||||
|
||||
WGET=${WGET:-"wget"}
|
||||
TAR=${TAR:-"tar"}
|
||||
@ -77,7 +78,7 @@ if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
UNXZ="xz -9 -T0 -d"
|
||||
fi
|
||||
|
||||
model_source="${DEEPSPEECH_TEST_MODEL}"
|
||||
model_source="${STT_TEST_MODEL}"
|
||||
model_name="$(basename "${model_source}")"
|
||||
model_name_mmap="$(basename -s ".pb" "${model_source}").pbmm"
|
||||
model_source_mmap="$(dirname "${model_source}")/${model_name_mmap}"
|
||||
|
@ -251,9 +251,9 @@ assert_tensorflow_version()
|
||||
assert_shows_something "$1" "${EXPECTED_TENSORFLOW_VERSION}"
|
||||
}
|
||||
|
||||
assert_deepspeech_version()
|
||||
assert_stt_version()
|
||||
{
|
||||
assert_not_present "$1" "DeepSpeech: unknown"
|
||||
assert_not_present "$1" "Coqui STT: unknown"
|
||||
}
|
||||
|
||||
# We need to ensure that running on inference really leverages GPU because
|
||||
@ -261,7 +261,7 @@ assert_deepspeech_version()
|
||||
ensure_cuda_usage()
|
||||
{
|
||||
local _maybe_cuda=$1
|
||||
DS_BINARY_FILE=${DS_BINARY_FILE:-"deepspeech"}
|
||||
DS_BINARY_FILE=${DS_BINARY_FILE:-"stt"}
|
||||
|
||||
if [ "${_maybe_cuda}" = "cuda" ]; then
|
||||
set +e
|
||||
@ -278,19 +278,19 @@ ensure_cuda_usage()
|
||||
check_versions()
|
||||
{
|
||||
set +e
|
||||
ds_help=$(${DS_BINARY_PREFIX}deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>&1 1>/dev/null)
|
||||
ds_help=$(${DS_BINARY_PREFIX}stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>&1 1>/dev/null)
|
||||
set -e
|
||||
|
||||
assert_tensorflow_version "${ds_help}"
|
||||
assert_deepspeech_version "${ds_help}"
|
||||
assert_stt_version "${ds_help}"
|
||||
}
|
||||
|
||||
assert_deepspeech_runtime()
|
||||
assert_stt_runtime()
|
||||
{
|
||||
local expected_runtime=$1
|
||||
|
||||
set +e
|
||||
local ds_version=$(${DS_BINARY_PREFIX}deepspeech --version 2>&1)
|
||||
local ds_version=$(${DS_BINARY_PREFIX}stt --version 2>&1)
|
||||
set -e
|
||||
|
||||
assert_shows_something "${ds_version}" "${expected_runtime}"
|
||||
@ -298,23 +298,23 @@ assert_deepspeech_runtime()
|
||||
|
||||
check_runtime_nodejs()
|
||||
{
|
||||
assert_deepspeech_runtime "Runtime: Node"
|
||||
assert_stt_runtime "Runtime: Node"
|
||||
}
|
||||
|
||||
check_runtime_electronjs()
|
||||
{
|
||||
assert_deepspeech_runtime "Runtime: Electron"
|
||||
assert_stt_runtime "Runtime: Electron"
|
||||
}
|
||||
|
||||
run_tflite_basic_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(${DS_BINARY_PREFIX}deepspeech --model ${DATA_TMP_DIR}/${model_name} --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(${DS_BINARY_PREFIX}stt --model ${DATA_TMP_DIR}/${model_name} --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(${DS_BINARY_PREFIX}deepspeech --model ${DATA_TMP_DIR}/${model_name} --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(${DS_BINARY_PREFIX}stt --model ${DATA_TMP_DIR}/${model_name} --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
}
|
||||
@ -322,22 +322,22 @@ run_tflite_basic_inference_tests()
|
||||
run_netframework_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(STTConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended yes 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(STTConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended yes 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(STTConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm=$(STTConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1_lm "${phrase_pbmodel_withlm}" "$?"
|
||||
}
|
||||
@ -345,22 +345,22 @@ run_netframework_inference_tests()
|
||||
run_electronjs_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1_lm "${phrase_pbmodel_withlm}" "$?"
|
||||
}
|
||||
@ -368,30 +368,30 @@ run_electronjs_inference_tests()
|
||||
run_basic_inference_tests()
|
||||
{
|
||||
set +e
|
||||
deepspeech --model "" --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr
|
||||
stt --model "" --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr
|
||||
set -e
|
||||
grep "Missing model information" ${TASKCLUSTER_TMP_DIR}/stderr
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm}" "$status"
|
||||
@ -402,13 +402,13 @@ run_all_inference_tests()
|
||||
run_basic_inference_tests
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_nolm_stereo_44k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm_stereo_44k}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm_stereo_44k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm_stereo_44k}" "$status"
|
||||
@ -416,12 +416,12 @@ run_all_inference_tests()
|
||||
# Run down-sampling warning test only when we actually perform downsampling
|
||||
if [ "${ldc93s1_sample_filename}" != "LDC93S1_pcms16le_1_8000.wav" ]; then
|
||||
set +e
|
||||
phrase_pbmodel_nolm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
phrase_pbmodel_nolm_mono_8k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_nolm_mono_8k}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
phrase_pbmodel_withlm_mono_8k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}"
|
||||
fi;
|
||||
@ -452,19 +452,19 @@ run_prod_inference_tests()
|
||||
local _bitrate=$1
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm_stereo_44k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel_stereo_44k "${phrase_pbmodel_withlm_stereo_44k}" "$status"
|
||||
@ -472,7 +472,7 @@ run_prod_inference_tests()
|
||||
# Run down-sampling warning test only when we actually perform downsampling
|
||||
if [ "${ldc93s1_sample_filename}" != "LDC93S1_pcms16le_1_8000.wav" ]; then
|
||||
set +e
|
||||
phrase_pbmodel_withlm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
phrase_pbmodel_withlm_mono_8k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}"
|
||||
fi;
|
||||
@ -483,19 +483,19 @@ run_prodtflite_inference_tests()
|
||||
local _bitrate=$1
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
phrase_pbmodel_withlm_stereo_44k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel_stereo_44k "${phrase_pbmodel_withlm_stereo_44k}" "$status"
|
||||
@ -503,7 +503,7 @@ run_prodtflite_inference_tests()
|
||||
# Run down-sampling warning test only when we actually perform downsampling
|
||||
if [ "${ldc93s1_sample_filename}" != "LDC93S1_pcms16le_1_8000.wav" ]; then
|
||||
set +e
|
||||
phrase_pbmodel_withlm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
phrase_pbmodel_withlm_mono_8k=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}"
|
||||
fi;
|
||||
@ -512,13 +512,13 @@ run_prodtflite_inference_tests()
|
||||
run_multi_inference_tests()
|
||||
{
|
||||
set +e -o pipefail
|
||||
multi_phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/ 2>${TASKCLUSTER_TMP_DIR}/stderr | tr '\n' '%')
|
||||
multi_phrase_pbmodel_nolm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/ 2>${TASKCLUSTER_TMP_DIR}/stderr | tr '\n' '%')
|
||||
status=$?
|
||||
set -e +o pipefail
|
||||
assert_correct_multi_ldc93s1 "${multi_phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e -o pipefail
|
||||
multi_phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/ 2>${TASKCLUSTER_TMP_DIR}/stderr | tr '\n' '%')
|
||||
multi_phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/ 2>${TASKCLUSTER_TMP_DIR}/stderr | tr '\n' '%')
|
||||
status=$?
|
||||
set -e +o pipefail
|
||||
assert_correct_multi_ldc93s1 "${multi_phrase_pbmodel_withlm}" "$status"
|
||||
@ -526,7 +526,7 @@ run_multi_inference_tests()
|
||||
|
||||
run_hotword_tests()
|
||||
{
|
||||
DS_BINARY_FILE=${DS_BINARY_FILE:-"deepspeech"}
|
||||
DS_BINARY_FILE=${DS_BINARY_FILE:-"stt"}
|
||||
set +e
|
||||
hotwords_decode=$(${DS_BINARY_PREFIX}${DS_BINARY_FILE} --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --hot_words "foo:0.0,bar:-0.1" 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
@ -537,7 +537,7 @@ run_hotword_tests()
|
||||
run_android_hotword_tests()
|
||||
{
|
||||
set +e
|
||||
hotwords_decode=$(${DS_BINARY_PREFIX}deepspeech --model ${DATA_TMP_DIR}/${model_name} --scorer ${DATA_TMP_DIR}/kenlm.scorer --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} --hot_words "foo:0.0,bar:-0.1" 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
hotwords_decode=$(${DS_BINARY_PREFIX}stt --model ${DATA_TMP_DIR}/${model_name} --scorer ${DATA_TMP_DIR}/kenlm.scorer --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} --hot_words "foo:0.0,bar:-0.1" 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${hotwords_decode}" "$status"
|
||||
@ -546,7 +546,7 @@ run_android_hotword_tests()
|
||||
run_cpp_only_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_withlm_intermediate_decode=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 1280 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
phrase_pbmodel_withlm_intermediate_decode=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 1280 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm_intermediate_decode}" "$status"
|
||||
@ -555,13 +555,13 @@ run_cpp_only_inference_tests()
|
||||
run_js_streaming_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm}" "$status"
|
||||
@ -571,14 +571,14 @@ run_js_streaming_prod_inference_tests()
|
||||
{
|
||||
local _bitrate=$1
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
local _bitrate=$1
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
@ -588,14 +588,14 @@ run_js_streaming_prodtflite_inference_tests()
|
||||
{
|
||||
local _bitrate=$1
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
local _bitrate=$1
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
phrase_pbmodel_withlm=$(stt --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
@ -32,7 +32,7 @@ shutdown_bazel()
|
||||
bazel ${BAZEL_OUTPUT_USER_ROOT} shutdown
|
||||
}
|
||||
|
||||
do_deepspeech_binary_build()
|
||||
do_stt_binary_build()
|
||||
{
|
||||
cd ${DS_DSDIR}
|
||||
make -C native_client/ \
|
||||
@ -42,5 +42,5 @@ do_deepspeech_binary_build()
|
||||
EXTRA_CFLAGS="${EXTRA_LOCAL_CFLAGS}" \
|
||||
EXTRA_LDFLAGS="${EXTRA_LOCAL_LDFLAGS}" \
|
||||
EXTRA_LIBS="${EXTRA_LOCAL_LIBS}" \
|
||||
deepspeech${PLATFORM_EXE_SUFFIX}
|
||||
stt${PLATFORM_EXE_SUFFIX}
|
||||
}
|
||||
|
@ -9,10 +9,10 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_PROD_MODEL}
|
||||
model_source=${STT_PROD_MODEL}
|
||||
model_name=$(basename "${model_source}")
|
||||
|
||||
model_source_mmap=${DEEPSPEECH_PROD_MODEL_MMAP}
|
||||
model_source_mmap=${STT_PROD_MODEL_MMAP}
|
||||
model_name_mmap=$(basename "${model_source_mmap}")
|
||||
|
||||
download_model_prod
|
||||
|
@ -9,10 +9,10 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_PROD_MODEL//.pb/.tflite}
|
||||
model_source=${STT_PROD_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
model_source_mmap=${DEEPSPEECH_PROD_MODEL_MMAP//.pbmm/.tflite}
|
||||
model_source_mmap=${STT_PROD_MODEL_MMAP//.pbmm/.tflite}
|
||||
export DATA_TMP_DIR=${TASKCLUSTER_TMP_DIR}
|
||||
|
||||
download_model_prod
|
||||
|
@ -9,7 +9,7 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite}
|
||||
model_source=${STT_TEST_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
export DATA_TMP_DIR=${TASKCLUSTER_TMP_DIR}
|
||||
|
@ -9,7 +9,7 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite}
|
||||
model_source=${STT_TEST_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
export DATA_TMP_DIR=${TASKCLUSTER_TMP_DIR}
|
||||
|
||||
|
23
ci_scripts/cppwin_tflite-tests.sh
Executable file
23
ci_scripts/cppwin_tflite-tests.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
source $(dirname "$0")/all-vars.sh
|
||||
source $(dirname "$0")/all-utils.sh
|
||||
source $(dirname "$0")/asserts.sh
|
||||
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${STT_TEST_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
export DATA_TMP_DIR=${TASKCLUSTER_TMP_DIR}
|
||||
|
||||
download_material "${TASKCLUSTER_TMP_DIR}/ds"
|
||||
|
||||
export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH
|
||||
|
||||
check_versions
|
||||
|
||||
run_basic_inference_tests
|
@ -23,7 +23,7 @@ which node
|
||||
|
||||
node --version
|
||||
|
||||
deepspeech --version
|
||||
stt --version
|
||||
|
||||
check_runtime_electronjs
|
||||
|
||||
|
@ -9,7 +9,7 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite}
|
||||
model_source=${STT_TEST_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
|
||||
@ -27,7 +27,7 @@ which node
|
||||
|
||||
node --version
|
||||
|
||||
deepspeech --version
|
||||
stt --version
|
||||
|
||||
check_runtime_electronjs
|
||||
|
||||
|
@ -11,7 +11,7 @@ source $(dirname "$0")/build-utils.sh
|
||||
source $(dirname "$0")/tf-vars.sh
|
||||
|
||||
BAZEL_TARGETS="
|
||||
//native_client:libdeepspeech.so
|
||||
//native_client:libstt.so
|
||||
//native_client:generate_scorer_package
|
||||
"
|
||||
|
||||
@ -25,4 +25,4 @@ SYSTEM_TARGET=host
|
||||
|
||||
do_bazel_build
|
||||
|
||||
do_deepspeech_binary_build
|
||||
do_stt_binary_build
|
||||
|
@ -9,10 +9,10 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_PROD_MODEL}
|
||||
model_source=${STT_PROD_MODEL}
|
||||
model_name=$(basename "${model_source}")
|
||||
|
||||
model_source_mmap=${DEEPSPEECH_PROD_MODEL_MMAP}
|
||||
model_source_mmap=${STT_PROD_MODEL_MMAP}
|
||||
model_name_mmap=$(basename "${model_source_mmap}")
|
||||
|
||||
download_model_prod
|
||||
|
@ -9,10 +9,10 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_PROD_MODEL//.pb/.tflite}
|
||||
model_source=${STT_PROD_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
model_source_mmap=${DEEPSPEECH_PROD_MODEL_MMAP//.pbmm/.tflite}
|
||||
model_source_mmap=${STT_PROD_MODEL_MMAP//.pbmm/.tflite}
|
||||
|
||||
download_model_prod
|
||||
|
||||
|
@ -9,7 +9,7 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite}
|
||||
model_source=${STT_TEST_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
|
||||
|
@ -5,14 +5,14 @@ set -xe
|
||||
package_native_client()
|
||||
{
|
||||
tensorflow_dir=${DS_TFDIR}
|
||||
deepspeech_dir=${DS_DSDIR}
|
||||
stt_dir=${DS_DSDIR}
|
||||
artifacts_dir=${TASKCLUSTER_ARTIFACTS}
|
||||
artifact_name=$1
|
||||
|
||||
if [ ! -d ${tensorflow_dir} -o ! -d ${deepspeech_dir} -o ! -d ${artifacts_dir} ]; then
|
||||
if [ ! -d ${tensorflow_dir} -o ! -d ${stt_dir} -o ! -d ${artifacts_dir} ]; then
|
||||
echo "Missing directory. Please check:"
|
||||
echo "tensorflow_dir=${tensorflow_dir}"
|
||||
echo "deepspeech_dir=${deepspeech_dir}"
|
||||
echo "stt_dir=${stt_dir}"
|
||||
echo "artifacts_dir=${artifacts_dir}"
|
||||
exit 1
|
||||
fi;
|
||||
@ -22,32 +22,32 @@ package_native_client()
|
||||
fi;
|
||||
|
||||
win_lib=""
|
||||
if [ -f "${tensorflow_dir}/bazel-bin/native_client/libdeepspeech.so.if.lib" ]; then
|
||||
win_lib="-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so.if.lib"
|
||||
if [ -f "${tensorflow_dir}/bazel-bin/native_client/libstt.so.if.lib" ]; then
|
||||
win_lib="-C ${tensorflow_dir}/bazel-bin/native_client/ libstt.so.if.lib"
|
||||
fi;
|
||||
|
||||
${TAR} -cf - \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libstt.so \
|
||||
${win_lib} \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ generate_scorer_package \
|
||||
-C ${deepspeech_dir}/ LICENSE \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech${PLATFORM_EXE_SUFFIX} \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech.h \
|
||||
-C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \
|
||||
-C ${stt_dir}/ LICENSE \
|
||||
-C ${stt_dir}/native_client/ stt${PLATFORM_EXE_SUFFIX} \
|
||||
-C ${stt_dir}/native_client/ coqui-stt.h \
|
||||
-C ${stt_dir}/native_client/kenlm/ README.coqui \
|
||||
| ${XZ} > "${artifacts_dir}/${artifact_name}"
|
||||
}
|
||||
|
||||
package_native_client_ndk()
|
||||
{
|
||||
deepspeech_dir=${DS_DSDIR}
|
||||
stt_dir=${DS_DSDIR}
|
||||
tensorflow_dir=${DS_TFDIR}
|
||||
artifacts_dir=${TASKCLUSTER_ARTIFACTS}
|
||||
artifact_name=$1
|
||||
arch_abi=$2
|
||||
|
||||
if [ ! -d ${deepspeech_dir} -o ! -d ${artifacts_dir} ]; then
|
||||
if [ ! -d ${stt_dir} -o ! -d ${artifacts_dir} ]; then
|
||||
echo "Missing directory. Please check:"
|
||||
echo "deepspeech_dir=${deepspeech_dir}"
|
||||
echo "stt_dir=${stt_dir}"
|
||||
echo "artifacts_dir=${artifacts_dir}"
|
||||
exit 1
|
||||
fi;
|
||||
@ -61,17 +61,17 @@ package_native_client_ndk()
|
||||
fi;
|
||||
|
||||
${TAR} -cf - \
|
||||
-C ${deepspeech_dir}/native_client/libs/${arch_abi}/ deepspeech \
|
||||
-C ${deepspeech_dir}/native_client/libs/${arch_abi}/ libdeepspeech.so \
|
||||
-C ${stt_dir}/native_client/libs/${arch_abi}/ stt \
|
||||
-C ${stt_dir}/native_client/libs/${arch_abi}/ libstt.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ generate_scorer_package \
|
||||
-C ${deepspeech_dir}/native_client/libs/${arch_abi}/ libc++_shared.so \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech.h \
|
||||
-C ${deepspeech_dir}/ LICENSE \
|
||||
-C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \
|
||||
-C ${stt_dir}/native_client/libs/${arch_abi}/ libc++_shared.so \
|
||||
-C ${stt_dir}/native_client/ coqui-stt.h \
|
||||
-C ${stt_dir}/ LICENSE \
|
||||
-C ${stt_dir}/native_client/kenlm/ README.coqui \
|
||||
| ${XZ} > "${artifacts_dir}/${artifact_name}"
|
||||
}
|
||||
|
||||
package_libdeepspeech_as_zip()
|
||||
package_libstt_as_zip()
|
||||
{
|
||||
tensorflow_dir=${DS_TFDIR}
|
||||
artifacts_dir=${TASKCLUSTER_ARTIFACTS}
|
||||
@ -88,5 +88,5 @@ package_libdeepspeech_as_zip()
|
||||
echo "Please specify artifact name."
|
||||
fi;
|
||||
|
||||
${ZIP} -r9 --junk-paths "${artifacts_dir}/${artifact_name}" ${tensorflow_dir}/bazel-bin/native_client/libdeepspeech.so
|
||||
${ZIP} -r9 --junk-paths "${artifacts_dir}/${artifact_name}" ${tensorflow_dir}/bazel-bin/native_client/libstt.so
|
||||
}
|
||||
|
@ -11,11 +11,11 @@ cp ${DS_DSDIR}/tensorflow/bazel*.log ${TASKCLUSTER_ARTIFACTS}/
|
||||
|
||||
package_native_client "native_client.tar.xz"
|
||||
|
||||
package_libdeepspeech_as_zip "libdeepspeech.zip"
|
||||
package_libstt_as_zip "libstt.zip"
|
||||
|
||||
if [ -d ${DS_DSDIR}/wheels ]; then
|
||||
cp ${DS_DSDIR}/wheels/* ${TASKCLUSTER_ARTIFACTS}/
|
||||
cp ${DS_DSDIR}/native_client/javascript/deepspeech-*.tgz ${TASKCLUSTER_ARTIFACTS}/
|
||||
cp ${DS_DSDIR}/native_client/javascript/stt-*.tgz ${TASKCLUSTER_ARTIFACTS}/
|
||||
fi;
|
||||
|
||||
if [ -f ${DS_DSDIR}/native_client/javascript/wrapper.tar.gz ]; then
|
||||
|
@ -9,18 +9,18 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_PROD_MODEL}
|
||||
model_source=${STT_PROD_MODEL}
|
||||
model_name=$(basename "${model_source}")
|
||||
|
||||
model_source_mmap=${DEEPSPEECH_PROD_MODEL_MMAP}
|
||||
model_source_mmap=${STT_PROD_MODEL_MMAP}
|
||||
model_name_mmap=$(basename "${model_source_mmap}")
|
||||
|
||||
download_model_prod
|
||||
|
||||
download_material
|
||||
|
||||
which deepspeech
|
||||
deepspeech --version
|
||||
which stt
|
||||
stt --version
|
||||
|
||||
run_prod_inference_tests "${bitrate}"
|
||||
|
||||
|
@ -11,8 +11,8 @@ set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
download_data
|
||||
|
||||
which deepspeech
|
||||
deepspeech --version
|
||||
which stt
|
||||
stt --version
|
||||
|
||||
run_all_inference_tests
|
||||
|
||||
|
@ -9,16 +9,16 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_PROD_MODEL//.pb/.tflite}
|
||||
model_source=${STT_PROD_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
model_source_mmap=${DEEPSPEECH_PROD_MODEL_MMAP//.pbmm/.tflite}
|
||||
model_source_mmap=${STT_PROD_MODEL_MMAP//.pbmm/.tflite}
|
||||
|
||||
download_model_prod
|
||||
|
||||
download_material
|
||||
|
||||
which deepspeech
|
||||
deepspeech --version
|
||||
which stt
|
||||
stt --version
|
||||
|
||||
run_prodtflite_inference_tests "${bitrate}"
|
||||
|
@ -9,14 +9,14 @@ source $(dirname "$0")/asserts.sh
|
||||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite}
|
||||
model_source=${STT_TEST_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
model_name_mmap=$(basename "${model_source}")
|
||||
|
||||
download_data
|
||||
|
||||
which deepspeech
|
||||
deepspeech --version
|
||||
which stt
|
||||
stt --version
|
||||
|
||||
run_all_inference_tests
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -o pipefail
|
||||
|
||||
source $(dirname $0)/tf-vars.sh
|
||||
|
||||
@ -21,7 +22,10 @@ pushd ${DS_ROOT_TASK}/tensorflow/
|
||||
fi;
|
||||
|
||||
case "$1" in
|
||||
"--linux-cpu"|"--darwin-cpu"|"--windows-cpu")
|
||||
"--windows-cpu")
|
||||
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LIBSTT} ${BUILD_TARGET_LITE_LIB} --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh"
|
||||
;;
|
||||
"--linux-cpu"|"--darwin-cpu")
|
||||
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LIB_CPP_API} ${BUILD_TARGET_LITE_LIB}
|
||||
;;
|
||||
"--linux-cuda"|"--windows-cuda")
|
||||
|
@ -28,9 +28,9 @@ if [ -f "${OUTPUT_ROOT}/tensorflow/lite/tools/benchmark/benchmark_model" ]; then
|
||||
fi
|
||||
|
||||
# It seems that bsdtar and gnutar are behaving a bit differently on the way
|
||||
# they deal with --exclude="./public/*" ; this caused ./DeepSpeech/tensorflow/core/public/
|
||||
# they deal with --exclude="./public/*" ; this caused ./STT/tensorflow/core/public/
|
||||
# to be ditched when we just wanted to get rid of ./public/ on OSX.
|
||||
# Switching to gnutar (already needed for the --transform on DeepSpeech tasks)
|
||||
# Switching to gnutar (already needed for the --transform on STT tasks)
|
||||
# does the trick.
|
||||
TAR_EXCLUDE="--exclude=./dls/*"
|
||||
if [ "${OS}" = "Darwin" ]; then
|
||||
|
@ -65,22 +65,22 @@ bazel shutdown
|
||||
|
||||
if [ ! -z "${install_cuda}" ]; then
|
||||
# Install CUDA and CuDNN
|
||||
mkdir -p ${DS_ROOT_TASK}/DeepSpeech/CUDA/ || true
|
||||
mkdir -p ${DS_ROOT_TASK}/STT/CUDA/ || true
|
||||
pushd ${DS_ROOT_TASK}
|
||||
CUDA_FILE=`basename ${CUDA_URL}`
|
||||
PERL5LIB=. sh ${DS_ROOT_TASK}/dls/${CUDA_FILE} --silent --override --toolkit --toolkitpath=${DS_ROOT_TASK}/DeepSpeech/CUDA/ --defaultroot=${DS_ROOT_TASK}/DeepSpeech/CUDA/
|
||||
PERL5LIB=. sh ${DS_ROOT_TASK}/dls/${CUDA_FILE} --silent --override --toolkit --toolkitpath=${DS_ROOT_TASK}/STT/CUDA/ --defaultroot=${DS_ROOT_TASK}/STT/CUDA/
|
||||
|
||||
CUDNN_FILE=`basename ${CUDNN_URL}`
|
||||
tar xvf ${DS_ROOT_TASK}/dls/${CUDNN_FILE} --strip-components=1 -C ${DS_ROOT_TASK}/DeepSpeech/CUDA/
|
||||
tar xvf ${DS_ROOT_TASK}/dls/${CUDNN_FILE} --strip-components=1 -C ${DS_ROOT_TASK}/STT/CUDA/
|
||||
popd
|
||||
|
||||
LD_LIBRARY_PATH=${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/:${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/stubs/:$LD_LIBRARY_PATH
|
||||
LD_LIBRARY_PATH=${DS_ROOT_TASK}/STT/CUDA/lib64/:${DS_ROOT_TASK}/STT/CUDA/lib64/stubs/:$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH
|
||||
|
||||
# We might lack libcuda.so.1 symlink, let's fix as upstream does:
|
||||
# https://github.com/tensorflow/tensorflow/pull/13811/files?diff=split#diff-2352449eb75e66016e97a591d3f0f43dR96
|
||||
if [ ! -h "${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/stubs/libcuda.so.1" ]; then
|
||||
ln -s "${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/stubs/libcuda.so" "${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/stubs/libcuda.so.1"
|
||||
if [ ! -h "${DS_ROOT_TASK}/STT/CUDA/lib64/stubs/libcuda.so.1" ]; then
|
||||
ln -s "${DS_ROOT_TASK}/STT/CUDA/lib64/stubs/libcuda.so" "${DS_ROOT_TASK}/STT/CUDA/lib64/stubs/libcuda.so.1"
|
||||
fi;
|
||||
|
||||
else
|
||||
@ -88,15 +88,15 @@ else
|
||||
fi
|
||||
|
||||
if [ ! -z "${install_android}" ]; then
|
||||
mkdir -p ${DS_ROOT_TASK}/DeepSpeech/Android/SDK || true
|
||||
mkdir -p ${DS_ROOT_TASK}/STT/Android/SDK || true
|
||||
ANDROID_NDK_FILE=`basename ${ANDROID_NDK_URL}`
|
||||
ANDROID_SDK_FILE=`basename ${ANDROID_SDK_URL}`
|
||||
|
||||
pushd ${DS_ROOT_TASK}/DeepSpeech/Android
|
||||
pushd ${DS_ROOT_TASK}/STT/Android
|
||||
unzip ${DS_ROOT_TASK}/dls/${ANDROID_NDK_FILE}
|
||||
popd
|
||||
|
||||
pushd ${DS_ROOT_TASK}/DeepSpeech/Android/SDK
|
||||
pushd ${DS_ROOT_TASK}/STT/Android/SDK
|
||||
unzip ${DS_ROOT_TASK}/dls/${ANDROID_SDK_FILE}
|
||||
yes | ./tools/bin/sdkmanager --licenses
|
||||
./tools/bin/sdkmanager --update
|
||||
@ -109,4 +109,4 @@ mkdir -p ${TASKCLUSTER_ARTIFACTS} || true
|
||||
|
||||
# Taken from https://www.tensorflow.org/install/source
|
||||
# Only future is needed for our builds, as we don't build the Python package
|
||||
pip install -U --user future==0.17.1 || true
|
||||
python -m pip install -U --user future==0.17.1 || true
|
||||
|
@ -40,9 +40,6 @@ elif [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
export TASKCLUSTER_ARTIFACTS="$(cygpath ${TASKCLUSTER_ARTIFACTS})"
|
||||
|
||||
export DS_ROOT_TASK=${TASKCLUSTER_TASK_DIR}
|
||||
export BAZEL_VC='C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC'
|
||||
export BAZEL_SH='C:\builds\tc-workdir\msys64\usr\bin\bash'
|
||||
export TC_WIN_BUILD_PATH='C:\builds\tc-workdir\msys64\usr\bin;C:\Python36'
|
||||
export MSYS2_ARG_CONV_EXCL='//'
|
||||
|
||||
mkdir -p ${TASKCLUSTER_TASK_DIR}/tmp/
|
||||
@ -90,9 +87,9 @@ fi;
|
||||
export PATH
|
||||
|
||||
if [ "${OS}" = "Linux" ]; then
|
||||
export LD_LIBRARY_PATH=${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/:${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/stubs/:$LD_LIBRARY_PATH
|
||||
export ANDROID_SDK_HOME=${DS_ROOT_TASK}/DeepSpeech/Android/SDK/
|
||||
export ANDROID_NDK_HOME=${DS_ROOT_TASK}/DeepSpeech/Android/android-ndk-r18b/
|
||||
export LD_LIBRARY_PATH=${DS_ROOT_TASK}/STT/CUDA/lib64/:${DS_ROOT_TASK}/STT/CUDA/lib64/stubs/:$LD_LIBRARY_PATH
|
||||
export ANDROID_SDK_HOME=${DS_ROOT_TASK}/STT/Android/SDK/
|
||||
export ANDROID_NDK_HOME=${DS_ROOT_TASK}/STT/Android/android-ndk-r18b/
|
||||
fi;
|
||||
|
||||
export TF_ENABLE_XLA=0
|
||||
@ -119,19 +116,15 @@ export TF_NEED_ROCM=0
|
||||
# This should be gcc-5, hopefully. CUDA and TensorFlow might not be happy, otherwise.
|
||||
export GCC_HOST_COMPILER_PATH=/usr/bin/gcc
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
export PYTHON_BIN_PATH=C:/Python36/python.exe
|
||||
else
|
||||
if [ "${OS}" = "Linux" ]; then
|
||||
source /etc/os-release
|
||||
if [ "${ID}" = "ubuntu" -a "${VERSION_ID}" = "20.04" ]; then
|
||||
export PYTHON_BIN_PATH=/usr/bin/python3
|
||||
else
|
||||
export PYTHON_BIN_PATH=/usr/bin/python2.7
|
||||
fi
|
||||
if [ "${OS}" = "Linux" ]; then
|
||||
source /etc/os-release
|
||||
if [ "${ID}" = "ubuntu" -a "${VERSION_ID}" = "20.04" ]; then
|
||||
export PYTHON_BIN_PATH=/usr/bin/python3
|
||||
else
|
||||
export PYTHON_BIN_PATH=/usr/bin/python2.7
|
||||
export PYTHON_BIN_PATH=/usr/bin/python2.7
|
||||
fi
|
||||
elif [ "${OS}" != "${TC_MSYS_VERSION}" ]; then
|
||||
export PYTHON_BIN_PATH=/usr/bin/python2.7
|
||||
fi
|
||||
|
||||
## Below, define or export some build variables
|
||||
@ -171,7 +164,7 @@ NVCC_COMPUTE="3.5"
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
TF_CUDA_FLAGS="TF_CUDA_CLANG=0 TF_CUDA_VERSION=10.1 TF_CUDNN_VERSION=7.6.0 CUDNN_INSTALL_PATH=\"${CUDA_INSTALL_DIRECTORY}\" TF_CUDA_PATHS=\"${CUDA_INSTALL_DIRECTORY}\" TF_CUDA_COMPUTE_CAPABILITIES=\"${NVCC_COMPUTE}\""
|
||||
else
|
||||
TF_CUDA_FLAGS="TF_CUDA_CLANG=0 TF_CUDA_VERSION=10.1 TF_CUDNN_VERSION=7.6.0 CUDNN_INSTALL_PATH=\"${DS_ROOT_TASK}/DeepSpeech/CUDA\" TF_CUDA_PATHS=\"${DS_ROOT_TASK}/DeepSpeech/CUDA\" TF_CUDA_COMPUTE_CAPABILITIES=\"${NVCC_COMPUTE}\""
|
||||
TF_CUDA_FLAGS="TF_CUDA_CLANG=0 TF_CUDA_VERSION=10.1 TF_CUDNN_VERSION=7.6.0 CUDNN_INSTALL_PATH=\"${DS_ROOT_TASK}/STT/CUDA\" TF_CUDA_PATHS=\"${DS_ROOT_TASK}/STT/CUDA\" TF_CUDA_COMPUTE_CAPABILITIES=\"${NVCC_COMPUTE}\""
|
||||
fi
|
||||
BAZEL_ARM_FLAGS="--config=rpi3 --config=rpi3_opt --copt=-DTFLITE_WITH_RUY_GEMV"
|
||||
BAZEL_ARM64_FLAGS="--config=rpi3-armv8 --config=rpi3-armv8_opt --copt=-DTFLITE_WITH_RUY_GEMV"
|
||||
@ -186,15 +179,7 @@ fi
|
||||
BAZEL_IOS_ARM64_FLAGS="--config=ios_arm64 --define=runtime=tflite --copt=-DTFLITE_WITH_RUY_GEMV"
|
||||
BAZEL_IOS_X86_64_FLAGS="--config=ios_x86_64 --define=runtime=tflite --copt=-DTFLITE_WITH_RUY_GEMV"
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
# Somehow, even with Python being in the PATH, Bazel on windows struggles
|
||||
# with '/usr/bin/env python' ...
|
||||
#
|
||||
# We also force TMP/TEMP otherwise Bazel will pick default Windows one
|
||||
# under %USERPROFILE%\AppData\Local\Temp and with 8.3 file format convention
|
||||
# it messes with cxx_builtin_include_directory
|
||||
BAZEL_EXTRA_FLAGS="--action_env=PATH=${TC_WIN_BUILD_PATH} --action_env=TEMP=${TEMP} --action_env=TMP=${TMP}"
|
||||
else
|
||||
if [ "${OS}" != "${TC_MSYS_VERSION}" ]; then
|
||||
BAZEL_EXTRA_FLAGS="--config=noaws --config=nogcp --config=nohdfs --config=nonccl --copt=-fvisibility=hidden"
|
||||
fi
|
||||
|
||||
@ -211,3 +196,4 @@ BUILD_TARGET_GRAPH_BENCHMARK="//tensorflow/tools/benchmark:benchmark_model"
|
||||
BUILD_TARGET_TOCO="//tensorflow/lite/toco:toco"
|
||||
BUILD_TARGET_LITE_BENCHMARK="//tensorflow/lite/tools/benchmark:benchmark_model"
|
||||
BUILD_TARGET_LITE_LIB="//tensorflow/lite/c:libtensorflowlite_c.so"
|
||||
BUILD_TARGET_LIBSTT="//native_client:libstt.so"
|
||||
|
@ -45,16 +45,16 @@ workspace_status.cc:
|
||||
# Enforce PATH here because swig calls from build_ext looses track of some
|
||||
# variables over several runs
|
||||
bindings: clean-keep-third-party workspace_status.cc $(DS_SWIG_DEP)
|
||||
pip3 install --quiet $(PYTHON_PACKAGES) wheel==0.33.6 setuptools==45.0.0
|
||||
DISTUTILS_USE_SDK=1 PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python3 ./setup.py build_ext --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
python -m pip install --quiet $(PYTHON_PACKAGES) wheel==0.33.6 setuptools==45.0.0
|
||||
DISTUTILS_USE_SDK=1 PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py build_ext --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
find temp_build -type f -name "*.o" -delete
|
||||
DISTUTILS_USE_SDK=1 AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python3 ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
DISTUTILS_USE_SDK=1 AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS)" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
rm -rf temp_build
|
||||
|
||||
bindings-debug: clean-keep-third-party workspace_status.cc $(DS_SWIG_DEP)
|
||||
pip3 install --quiet $(PYTHON_PACKAGES) wheel==0.33.6 setuptools==45.0.0
|
||||
DISTUTILS_USE_SDK=1 PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python3 ./setup.py build_ext --debug --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
python -m pip install --quiet $(PYTHON_PACKAGES) wheel==0.33.6 setuptools==45.0.0
|
||||
DISTUTILS_USE_SDK=1 PATH=$(DS_SWIG_BIN_PATH):$(TOOLCHAIN):$$PATH SWIG_LIB="$(SWIG_LIB)" AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py build_ext --debug --num_processes $(NUM_PROCESSES) $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
$(GENERATE_DEBUG_SYMS)
|
||||
find temp_build -type f -name "*.o" -delete
|
||||
DISTUTILS_USE_SDK=1 AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python3 ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
DISTUTILS_USE_SDK=1 AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) LIBEXE=$(LIBEXE) CFLAGS="$(CFLAGS) $(CXXFLAGS) -DDEBUG" LDFLAGS="$(LDFLAGS_NEEDED)" $(PYTHON_PATH) $(NUMPY_INCLUDE) python ./setup.py bdist_wheel $(PYTHON_PLATFORM_NAME) $(SETUP_FLAGS)
|
||||
rm -rf temp_build
|
||||
|
@ -55,7 +55,7 @@ PYTHON_PLATFORM_NAME ?= --plat-name manylinux1_x86_64
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(TARGET),host-win)
|
||||
ifeq ($(findstring _NT,$(OS)),_NT)
|
||||
TOOLCHAIN := '$(VCToolsInstallDir)\bin\Hostx64\x64\'
|
||||
TOOL_CC := cl.exe
|
||||
TOOL_CXX := cl.exe
|
||||
|
Loading…
Reference in New Issue
Block a user