Merge pull request #2821 from lissyx/win-cuda-tests
Add Windows CUDA CI
This commit is contained in:
commit
5e46d702af
|
@ -252,6 +252,25 @@ assert_deepspeech_version()
|
|||
assert_not_present "$1" "DeepSpeech: unknown"
|
||||
}
|
||||
|
||||
# We need to ensure that running on inference really leverages GPU because
|
||||
# it might default back to CPU
|
||||
ensure_cuda_usage()
|
||||
{
|
||||
local _maybe_cuda=$1
|
||||
DS_BINARY_FILE=${DS_BINARY_FILE:-"deepspeech"}
|
||||
|
||||
if [ "${_maybe_cuda}" = "cuda" ]; then
|
||||
set +e
|
||||
export TF_CPP_MIN_VLOG_LEVEL=1
|
||||
ds_cuda=$(${DS_BINARY_PREFIX}${DS_BINARY_FILE} --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>&1 1>/dev/null)
|
||||
export TF_CPP_MIN_VLOG_LEVEL=
|
||||
set -e
|
||||
|
||||
assert_shows_something "${ds_cuda}" "Successfully opened dynamic library nvcuda.dll"
|
||||
assert_not_present "${ds_cuda}" "Skipping registering GPU devices"
|
||||
fi;
|
||||
}
|
||||
|
||||
check_versions()
|
||||
{
|
||||
set +e
|
||||
|
|
|
@ -13,4 +13,6 @@ export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH
|
|||
|
||||
check_versions
|
||||
|
||||
ensure_cuda_usage "$2"
|
||||
|
||||
run_basic_inference_tests
|
||||
|
|
|
@ -59,6 +59,8 @@ node --version
|
|||
|
||||
check_runtime_electronjs
|
||||
|
||||
ensure_cuda_usage "$4"
|
||||
|
||||
run_electronjs_inference_tests
|
||||
|
||||
if [ "${OS}" = "Linux" ]; then
|
||||
|
|
|
@ -10,7 +10,7 @@ source $(dirname "$0")/tc-tests-utils.sh
|
|||
bitrate=$1
|
||||
set_ldc_sample_filename "${bitrate}"
|
||||
|
||||
if [ "${package_option}" = "--cuda" ]; then
|
||||
if [ "${package_option}" = "cuda" ]; then
|
||||
PROJECT_NAME="DeepSpeech-GPU"
|
||||
elif [ "${package_option}" = "--tflite" ]; then
|
||||
PROJECT_NAME="DeepSpeech-TFLite"
|
||||
|
@ -25,4 +25,7 @@ download_data
|
|||
|
||||
install_nuget "${PROJECT_NAME}"
|
||||
|
||||
DS_BINARY_FILE="DeepSpeechConsole.exe"
|
||||
ensure_cuda_usage "$2"
|
||||
|
||||
run_netframework_inference_tests
|
||||
|
|
|
@ -29,4 +29,6 @@ npm install --prefix ${NODE_ROOT} --cache ${NODE_CACHE} ${deepspeech_npm_url}
|
|||
|
||||
check_runtime_nodejs
|
||||
|
||||
ensure_cuda_usage "$3"
|
||||
|
||||
run_all_inference_tests
|
||||
|
|
|
@ -7,8 +7,8 @@ get_dep_npm_pkg_url()
|
|||
{
|
||||
local all_deps="$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${TASK_ID} | python -c 'import json; import sys; print(" ".join(json.loads(sys.stdin.read())["dependencies"]));')"
|
||||
|
||||
# We try "deepspeech-tflite" first and if we don't find it we try "deepspeech"
|
||||
for pkg_basename in "deepspeech-tflite" "deepspeech"; do
|
||||
# We try "deepspeech-tflite" and "deepspeech-gpu" first and if we don't find it we try "deepspeech"
|
||||
for pkg_basename in "deepspeech-tflite" "deepspeech-gpu" "deepspeech"; do
|
||||
local deepspeech_pkg="${pkg_basename}-${DS_VERSION}.tgz"
|
||||
for dep in ${all_deps}; do
|
||||
local has_artifact=$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts | python -c 'import json; import sys; has_artifact = True in [ e["name"].find("'${deepspeech_pkg}'") > 0 for e in json.loads(sys.stdin.read())["artifacts"] ]; print(has_artifact)')
|
||||
|
|
|
@ -13,12 +13,19 @@ download_data
|
|||
|
||||
virtualenv_activate "${pyalias}" "deepspeech"
|
||||
|
||||
deepspeech_pkg_url=$(get_python_pkg_url ${pyver_pkg} ${py_unicode_type})
|
||||
if [ "$3" = "cuda" ]; then
|
||||
deepspeech_pkg_url=$(get_python_pkg_url "${pyver_pkg}" "${py_unicode_type}" "deepspeech_gpu")
|
||||
else
|
||||
deepspeech_pkg_url=$(get_python_pkg_url "${pyver_pkg}" "${py_unicode_type}")
|
||||
fi;
|
||||
|
||||
LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH pip install --verbose --only-binary :all: --upgrade ${deepspeech_pkg_url} | cat
|
||||
|
||||
which deepspeech
|
||||
deepspeech --version
|
||||
|
||||
ensure_cuda_usage "$3"
|
||||
|
||||
run_all_inference_tests
|
||||
|
||||
virtualenv_deactivate "${pyalias}" "deepspeech"
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "win-amd64-gpu-opt"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
args:
|
||||
tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/taskcluster/tc-cppwin-ds-tests.sh 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA C++ tests (16kHz)"
|
||||
description: "Testing DeepSpeech C++ for Windows/AMD64, CUDA, optimized version (16kHz)"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package-cpu"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win} && ${nodejs.win.prep_12}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 8.0.1 16k"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CPU ElectronJS MultiArch Package v8.0 tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on ElectronJS MultiArch Package v8.0, CPU only, optimized version"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package-gpu"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win} && ${nodejs.win.prep_12}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 8.0.1 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA ElectronJS MultiArch Package v8.0 tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on ElectronJS MultiArch Package v8.0, CUDA, optimized version"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package-tflite"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win} && ${nodejs.win.prep_12}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-electron-tests.sh 12.x 8.0.1 16k"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 TFLite ElectronJS MultiArch Package v8.0 tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on ElectronJS MultiArch Package v8.0, TFLite only, optimized version"
|
|
@ -0,0 +1,11 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "win-amd64-gpu-opt"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
args:
|
||||
tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/taskcluster/tc-netframework-ds-tests.sh 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA .Net Framework tests"
|
||||
description: "Testing DeepSpeech .Net Framework for Windows/AMD64, CUDA, optimized version"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package-gpu"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win} && ${nodejs.win.prep_13}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-node-tests.sh 13.x 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA NodeJS MultiArch Package 13.x tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on NodeJS MultiArch Package v13.x, CUDA, optimized version"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "win-amd64-gpu-opt"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-python-tests.sh 3.5.4:m 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA Python v3.5 tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on Python v3.5, CUDA, optimized version"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "win-amd64-gpu-opt"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-python-tests.sh 3.6.8:m 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA Python v3.6 tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on Python v3.6, CUDA, optimized version"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "win-amd64-gpu-opt"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-python-tests.sh 3.7.6:m 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA Python v3.7 tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on Python v3.7, CUDA, optimized version"
|
|
@ -0,0 +1,14 @@
|
|||
build:
|
||||
template_file: test-win-cuda-opt-base.tyml
|
||||
dependencies:
|
||||
- "win-amd64-gpu-opt"
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
system_setup:
|
||||
>
|
||||
${system.sox_win}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.win}/DeepSpeech/ds/taskcluster/tc-python-tests.sh 3.8.1: 16k cuda"
|
||||
metadata:
|
||||
name: "DeepSpeech Windows AMD64 CUDA Python v3.8 tests"
|
||||
description: "Testing DeepSpeech for Windows/AMD64 on Python v3.8, CUDA, optimized version"
|
|
@ -0,0 +1,80 @@
|
|||
$if: '(event.event != "push") && (event.event != "tag")'
|
||||
then:
|
||||
taskId: ${taskcluster.taskId}
|
||||
provisionerId: ${taskcluster.docker.provisionerId}
|
||||
workerType: ${taskcluster.docker.workerTypeCuda}
|
||||
taskGroupId: ${taskcluster.taskGroupId}
|
||||
schedulerId: ${taskcluster.schedulerId}
|
||||
dependencies:
|
||||
$map: { $eval: build.dependencies }
|
||||
each(b):
|
||||
$eval: as_slugid(b)
|
||||
created: { $fromNow: '0 sec' }
|
||||
deadline: { $fromNow: '1 day' }
|
||||
expires: { $fromNow: '7 days' }
|
||||
|
||||
extra:
|
||||
github:
|
||||
{ $eval: taskcluster.github_events.pull_request }
|
||||
|
||||
payload:
|
||||
maxRunTime: { $eval: to_int(build.maxRunTime) }
|
||||
|
||||
env:
|
||||
$let:
|
||||
training: { $eval: as_slugid(build.test_model_task) }
|
||||
win_amd64_build: { $eval: as_slugid("win-amd64-gpu-opt") }
|
||||
in:
|
||||
DEEPSPEECH_ARTIFACTS_ROOT: https://community-tc.services.mozilla.com/api/queue/v1/task/${win_amd64_build}/artifacts/public
|
||||
DEEPSPEECH_TEST_MODEL: https://community-tc.services.mozilla.com/api/queue/v1/task/${training}/artifacts/public/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.6.0-alpha.15/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.6.0-alpha.15/output_graph.pbmm
|
||||
EXPECTED_TENSORFLOW_VERSION: "${build.tensorflow_git_desc}"
|
||||
TC_MSYS_VERSION: 'MSYS_NT-6.3'
|
||||
MSYS: 'winsymlinks:nativestrict'
|
||||
|
||||
command:
|
||||
- >-
|
||||
"C:\Program Files\7-zip\7z.exe" x -txz -so msys2-base-x86_64.tar.xz |
|
||||
"C:\Program Files\7-zip\7z.exe" x -o%USERPROFILE% -ttar -aoa -si
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "exit"
|
||||
- .\msys64\usr\bin\bash.exe --login -cx "pacman --noconfirm -Syu"
|
||||
- $let:
|
||||
extraSystemSetup: { $eval: strip(str(build.system_setup)) }
|
||||
in: >
|
||||
.\msys64\usr\bin\bash.exe --login -cxe "export LC_ALL=C &&
|
||||
export PATH=\"/c/builds/tc-workdir/msys64/usr/bin:/c/Python36:/c/Program Files/Git/bin:/c/Program Files/7-Zip/:/c/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.0/bin/:$PATH\" &&
|
||||
export TASKCLUSTER_ARTIFACTS=\"$USERPROFILE/public\" &&
|
||||
export TASKCLUSTER_TASK_DIR=\"/c/builds/tc-workdir/\" &&
|
||||
export TASKCLUSTER_NODE_DIR=\"$(cygpath -w $TASKCLUSTER_TASK_DIR/bin)\" &&
|
||||
export TASKCLUSTER_TMP_DIR="$TASKCLUSTER_TASK_DIR/tmp" &&
|
||||
export PIP_DEFAULT_TIMEOUT=60 &&
|
||||
(mkdir $TASKCLUSTER_TASK_DIR || rm -fr $TASKCLUSTER_TASK_DIR/*) && cd $TASKCLUSTER_TASK_DIR &&
|
||||
env &&
|
||||
ln -s $USERPROFILE/msys64 $TASKCLUSTER_TASK_DIR/msys64 &&
|
||||
git clone --quiet ${event.head.repo.url} $TASKCLUSTER_TASK_DIR/DeepSpeech/ds/ &&
|
||||
cd $TASKCLUSTER_TASK_DIR/DeepSpeech/ds && git checkout --quiet ${event.head.sha} &&
|
||||
cd $TASKCLUSTER_TASK_DIR &&
|
||||
(mkdir pyenv-root/ && 7z x -so $USERPROFILE/pyenv.tar.gz | 7z x -opyenv-root/ -aoa -ttar -si ) &&
|
||||
pacman --noconfirm -R bsdtar &&
|
||||
pacman --noconfirm -S tar vim &&
|
||||
${extraSystemSetup} &&
|
||||
/bin/bash ${build.args.tests_cmdline} ;
|
||||
export TASKCLUSTER_TASK_EXIT_CODE=$? &&
|
||||
cd $TASKCLUSTER_TASK_DIR/../ && rm -fr tc-workdir/ && exit $TASKCLUSTER_TASK_EXIT_CODE"
|
||||
|
||||
mounts:
|
||||
- file: msys2-base-x86_64.tar.xz
|
||||
content:
|
||||
sha256: 4e799b5c3efcf9efcb84923656b7bcff16f75a666911abd6620ea8e5e1e9870c
|
||||
url: >-
|
||||
https://sourceforge.net/projects/msys2/files/Base/x86_64/msys2-base-x86_64-20180531.tar.xz/download
|
||||
- file: pyenv.tar.gz
|
||||
content:
|
||||
url: ${system.pyenv.win.url}
|
||||
|
||||
metadata:
|
||||
name: ${build.metadata.name}
|
||||
description: ${build.metadata.description}
|
||||
owner: ${event.head.user.email}
|
||||
source: ${event.head.repo.url}
|
|
@ -5,6 +5,7 @@ taskcluster:
|
|||
workerType: ci
|
||||
workerTypeKvm: kvm
|
||||
workerTypeWin: win-b
|
||||
workerTypeCuda: win-gpu
|
||||
dockerrpi3:
|
||||
provisionerId: proj-deepspeech
|
||||
workerType: ds-rpi3
|
||||
|
|
Loading…
Reference in New Issue