Merge pull request #2749 from lissyx/tc-scripts
Explode tc-tests-utils into several smaller chunks
This commit is contained in:
commit
aa6f84ac56
@ -21,7 +21,5 @@ build:
|
||||
nc_asset_name: 'native_client.tar.xz'
|
||||
args:
|
||||
tests_cmdline: ''
|
||||
convert_graphdef: ''
|
||||
benchmark_model_bin: ''
|
||||
tensorflow_git_desc: 'TensorFlow: v1.15.0-24-gceb46aa'
|
||||
test_model_task: ''
|
||||
|
146
taskcluster/tc-all-utils.sh
Executable file
146
taskcluster/tc-all-utils.sh
Executable file
@ -0,0 +1,146 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
generic_download_tarxz()
|
||||
{
|
||||
target_dir=$1
|
||||
url=$2
|
||||
|
||||
if [ -z "${target_dir}" -o -z "${url}" ]; then
|
||||
echo "Empty name for target directory or URL:"
|
||||
echo " target_dir=${target_dir}"
|
||||
echo " url=${url}"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
mkdir -p ${target_dir} || true
|
||||
|
||||
${WGET} ${url} -O - | ${UNXZ} | ${TAR} -C ${target_dir} -xf -
|
||||
}
|
||||
|
||||
download_native_client_files()
|
||||
{
|
||||
generic_download_tarxz "$1" "${DEEPSPEECH_ARTIFACTS_ROOT}/native_client.tar.xz"
|
||||
}
|
||||
|
||||
set_ldc_sample_filename()
|
||||
{
|
||||
local _bitrate=$1
|
||||
|
||||
if [ -z "${_bitrate}" ]; then
|
||||
echo "Bitrate should not be empty"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
case "${_bitrate}" in
|
||||
8k)
|
||||
ldc93s1_sample_filename='LDC93S1_pcms16le_1_8000.wav'
|
||||
;;
|
||||
16k)
|
||||
ldc93s1_sample_filename='LDC93S1_pcms16le_1_16000.wav'
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
download_data()
|
||||
{
|
||||
${WGET} -P "${TASKCLUSTER_TMP_DIR}" "${model_source}"
|
||||
${WGET} -P "${TASKCLUSTER_TMP_DIR}" "${model_source_mmap}"
|
||||
cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/smoke_test/*.wav ${TASKCLUSTER_TMP_DIR}/
|
||||
cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/smoke_test/pruned_lm.scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer
|
||||
cp -R ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/test ${TASKCLUSTER_TMP_DIR}/test_sources
|
||||
}
|
||||
|
||||
download_material()
|
||||
{
|
||||
target_dir=$1
|
||||
|
||||
download_native_client_files "${target_dir}"
|
||||
download_data
|
||||
|
||||
ls -hal ${TASKCLUSTER_TMP_DIR}/${model_name} ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} ${TASKCLUSTER_TMP_DIR}/LDC93S1*.wav
|
||||
}
|
||||
|
||||
maybe_install_xldd()
|
||||
{
|
||||
# -s required to avoid the noisy output like "Entering / Leaving directories"
|
||||
toolchain=$(make -s -C ${DS_DSDIR}/native_client/ TARGET=${SYSTEM_TARGET} TFDIR=${DS_TFDIR} print-toolchain)
|
||||
if [ ! -x "${toolchain}ldd" ]; then
|
||||
cp "${DS_DSDIR}/native_client/xldd" "${toolchain}ldd" && chmod +x "${toolchain}ldd"
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks whether we run a patched version of bazel.
|
||||
# Patching is required to dump computeKey() parameters to .ckd files
|
||||
# See bazel.patch
|
||||
# Return 0 (success exit code) on patched version, 1 on release version
|
||||
is_patched_bazel()
|
||||
{
|
||||
bazel_version=$(bazel version | grep 'Build label:' | cut -d':' -f2)
|
||||
|
||||
bazel shutdown
|
||||
|
||||
if [ -z "${bazel_version}" ]; then
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
fi;
|
||||
}
|
||||
|
||||
verify_bazel_rebuild()
|
||||
{
|
||||
bazel_explain_file="$1"
|
||||
|
||||
if [ ! -f "${bazel_explain_file}" ]; then
|
||||
echo "No such explain file: ${bazel_explain_file}"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
mkdir -p ${TASKCLUSTER_ARTIFACTS} || true
|
||||
|
||||
cp ${DS_ROOT_TASK}/DeepSpeech/tf/bazel*.log ${TASKCLUSTER_ARTIFACTS}/
|
||||
|
||||
spurious_rebuilds=$(grep 'Executing action' "${bazel_explain_file}" | grep 'Compiling' | grep -v -E 'no entry in the cache|unconditional execution is requested|Executing genrule //native_client:workspace_status|Compiling native_client/workspace_status.cc|Linking native_client/libdeepspeech.so' | wc -l)
|
||||
if [ "${spurious_rebuilds}" -ne 0 ]; then
|
||||
echo "Bazel rebuilds some file it should not, please check."
|
||||
|
||||
if is_patched_bazel; then
|
||||
mkdir -p ${DS_ROOT_TASK}/DeepSpeech/ckd/ds ${DS_ROOT_TASK}/DeepSpeech/ckd/tf
|
||||
tar xf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-tf.tar --strip-components=4 -C ${DS_ROOT_TASK}/DeepSpeech/ckd/ds/
|
||||
tar xf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-ds.tar --strip-components=4 -C ${DS_ROOT_TASK}/DeepSpeech/ckd/tf/
|
||||
|
||||
echo "Making a diff between CKD files"
|
||||
mkdir -p ${TASKCLUSTER_ARTIFACTS}
|
||||
diff -urNw ${DS_ROOT_TASK}/DeepSpeech/ckd/tf/ ${DS_ROOT_TASK}/DeepSpeech/ckd/ds/ | tee ${TASKCLUSTER_ARTIFACTS}/ckd.diff
|
||||
|
||||
rm -fr ${DS_ROOT_TASK}/DeepSpeech/ckd/tf/ ${DS_ROOT_TASK}/DeepSpeech/ckd/ds/
|
||||
else
|
||||
echo "Cannot get CKD information from release, please use patched Bazel"
|
||||
fi;
|
||||
|
||||
exit 1
|
||||
fi;
|
||||
}
|
||||
|
||||
# Should be called from context where Python virtualenv is set
|
||||
verify_ctcdecoder_url()
|
||||
{
|
||||
default_url=$(python util/taskcluster.py --decoder)
|
||||
echo "${default_url}" | grep -F "deepspeech.native_client.v${DS_VERSION}"
|
||||
rc_default_url=$?
|
||||
|
||||
tag_url=$(python util/taskcluster.py --decoder --branch 'v1.2.3')
|
||||
echo "${tag_url}" | grep -F "deepspeech.native_client.v1.2.3"
|
||||
rc_tag_url=$?
|
||||
|
||||
master_url=$(python util/taskcluster.py --decoder --branch 'master')
|
||||
echo "${master_url}" | grep -F "deepspeech.native_client.master"
|
||||
rc_master_url=$?
|
||||
|
||||
if [ ${rc_default_url} -eq 0 -a ${rc_tag_url} -eq 0 -a ${rc_master_url} -eq 0 ]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi;
|
||||
}
|
82
taskcluster/tc-all-vars.sh
Executable file
82
taskcluster/tc-all-vars.sh
Executable file
@ -0,0 +1,82 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
export OS=$(uname)
|
||||
if [ "${OS}" = "Linux" ]; then
|
||||
export DS_ROOT_TASK=${HOME}
|
||||
export SWIG_ROOT="${HOME}/ds-swig"
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
export DS_ROOT_TASK=${TASKCLUSTER_TASK_DIR}
|
||||
export SWIG_ROOT="$(cygpath ${USERPROFILE})/ds-swig"
|
||||
export PLATFORM_EXE_SUFFIX=.exe
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "Darwin" ]; then
|
||||
export SWIG_ROOT="${TASKCLUSTER_ORIG_TASKDIR}/ds-swig"
|
||||
export DS_ROOT_TASK=${TASKCLUSTER_TASK_DIR}
|
||||
|
||||
# It seems chaining |export DYLD_LIBRARY_PATH=...| does not work, maybe
|
||||
# because of SIP? Who knows ...
|
||||
if [ ! -z "${EXTRA_ENV}" ]; then
|
||||
eval "export ${EXTRA_ENV}"
|
||||
fi;
|
||||
fi;
|
||||
|
||||
SWIG_BIN=swig${PLATFORM_EXE_SUFFIX}
|
||||
DS_SWIG_BIN=ds-swig${PLATFORM_EXE_SUFFIX}
|
||||
if [ -f "${SWIG_ROOT}/bin/${DS_SWIG_BIN}" ]; then
|
||||
export PATH=${SWIG_ROOT}/bin/:$PATH
|
||||
export SWIG_LIB="$(find ${SWIG_ROOT}/share/swig/ -type f -name "swig.swg" | xargs dirname)"
|
||||
# Make an alias to be more magic
|
||||
if [ ! -L "${SWIG_ROOT}/bin/${SWIG_BIN}" ]; then
|
||||
ln -s ${DS_SWIG_BIN} ${SWIG_ROOT}/bin/${SWIG_BIN}
|
||||
fi;
|
||||
swig -version
|
||||
swig -swiglib
|
||||
fi;
|
||||
|
||||
export TASKCLUSTER_ARTIFACTS=${TASKCLUSTER_ARTIFACTS:-/tmp/artifacts}
|
||||
export TASKCLUSTER_TMP_DIR=${TASKCLUSTER_TMP_DIR:-/tmp}
|
||||
|
||||
export ANDROID_TMP_DIR=/data/local/tmp
|
||||
|
||||
mkdir -p ${TASKCLUSTER_TMP_DIR} || true
|
||||
|
||||
export DS_TFDIR=${DS_ROOT_TASK}/DeepSpeech/tf
|
||||
export DS_DSDIR=${DS_ROOT_TASK}/DeepSpeech/ds
|
||||
export DS_EXAMPLEDIR=${DS_ROOT_TASK}/DeepSpeech/examples
|
||||
|
||||
export DS_VERSION="$(cat ${DS_DSDIR}/VERSION)"
|
||||
|
||||
export ANDROID_SDK_HOME=${DS_ROOT_TASK}/DeepSpeech/Android/SDK/
|
||||
export ANDROID_NDK_HOME=${DS_ROOT_TASK}/DeepSpeech/Android/android-ndk-r18b/
|
||||
|
||||
WGET=${WGET:-"wget"}
|
||||
TAR=${TAR:-"tar"}
|
||||
XZ=${XZ:-"pixz -9"}
|
||||
UNXZ=${UNXZ:-"pixz -d"}
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
WGET=/usr/bin/wget.exe
|
||||
TAR=/usr/bin/tar.exe
|
||||
XZ="xz -9 -T0 -c -"
|
||||
UNXZ="xz -9 -T0 -d"
|
||||
fi
|
||||
|
||||
model_source="${DEEPSPEECH_TEST_MODEL}"
|
||||
model_name="$(basename "${model_source}")"
|
||||
model_name_mmap="$(basename -s ".pb" "${model_source}").pbmm"
|
||||
model_source_mmap="$(dirname "${model_source}")/${model_name_mmap}"
|
||||
|
||||
ldc93s1_sample_filename=''
|
||||
|
||||
SUPPORTED_PYTHON_VERSIONS=${SUPPORTED_PYTHON_VERSIONS:-3.5.8:ucs4 3.6.10:ucs4 3.7.6:ucs4 3.8.1:ucs4}
|
||||
|
||||
# When updating NodeJS / ElectronJS supported versions, do not forget to increment
|
||||
# deepspeech.node-gyp-cache.<X> in both `system.node_gyp_cache` (taskcluster/.shared.yml)
|
||||
# and route index (taskcluster/node-gyp-cache.yml) to ensure the cache is updated
|
||||
SUPPORTED_NODEJS_VERSIONS=${SUPPORTED_NODEJS_VERSIONS:-10.18.1 11.15.0 12.8.1 13.1.0}
|
||||
SUPPORTED_ELECTRONJS_VERSIONS=${SUPPORTED_ELECTRONJS_VERSIONS:-5.0.13 6.0.12 6.1.7 7.0.1 7.1.8}
|
222
taskcluster/tc-android-utils.sh
Executable file
222
taskcluster/tc-android-utils.sh
Executable file
@ -0,0 +1,222 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
force_java_apk_x86_64()
|
||||
{
|
||||
cd ${DS_DSDIR}/native_client/java/
|
||||
cat <<EOF > libdeepspeech/gradle.properties
|
||||
ABI_FILTERS = x86_64
|
||||
EOF
|
||||
}
|
||||
|
||||
do_deepspeech_java_apk_build()
|
||||
{
|
||||
cd ${DS_DSDIR}
|
||||
|
||||
export ANDROID_HOME=${ANDROID_SDK_HOME}
|
||||
|
||||
all_tasks="$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${TASK_ID} | python -c 'import json; import sys; print(" ".join(json.loads(sys.stdin.read())["dependencies"]));')"
|
||||
|
||||
for dep in ${all_tasks}; do
|
||||
nc_arch="$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${dep} | python -c 'import json; import sys; print(json.loads(sys.stdin.read())["extra"]["nc_asset_name"])' | cut -d'.' -f2)"
|
||||
nc_dir=""
|
||||
|
||||
# if a dep is included that has no "nc_asset_name" then it will be empty, just skip
|
||||
# this is required for running test-apk-android-x86_64-opt because of the training dep
|
||||
if [ ! -z "${nc_arch}" ]; then
|
||||
if [ "${nc_arch}" = "arm64" ]; then
|
||||
nc_dir="arm64-v8a"
|
||||
fi;
|
||||
|
||||
if [ "${nc_arch}" = "armv7" ]; then
|
||||
nc_dir="armeabi-v7a"
|
||||
fi;
|
||||
|
||||
if [ "${nc_arch}" = "x86_64" ]; then
|
||||
nc_dir="x86_64"
|
||||
fi;
|
||||
|
||||
mkdir native_client/java/libdeepspeech/libs/${nc_dir}
|
||||
|
||||
curl -L https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts/public/native_client.tar.xz | tar -C native_client/java/libdeepspeech/libs/${nc_dir}/ -Jxvf - libdeepspeech.so
|
||||
fi;
|
||||
done;
|
||||
|
||||
make -C native_client/java/
|
||||
|
||||
make -C native_client/java/ maven-bundle
|
||||
}
|
||||
|
||||
android_run_tests()
|
||||
{
|
||||
cd ${DS_DSDIR}/native_client/java/
|
||||
|
||||
adb shell service list
|
||||
|
||||
adb shell ls -hal /data/local/tmp/test/
|
||||
|
||||
./gradlew --console=plain libdeepspeech:connectedAndroidTest
|
||||
}
|
||||
|
||||
android_sdk_accept_licenses()
|
||||
{
|
||||
pushd "${ANDROID_SDK_HOME}"
|
||||
yes | ./tools/bin/sdkmanager --licenses
|
||||
popd
|
||||
}
|
||||
|
||||
android_install_sdk()
|
||||
{
|
||||
if [ -z "${ANDROID_SDK_HOME}" ]; then
|
||||
echo "No Android SDK home available, aborting."
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
mkdir -p "${ANDROID_SDK_HOME}" || true
|
||||
${WGET} -P "${TASKCLUSTER_TMP_DIR}" https://dl.google.com/android/repository/sdk-tools-linux-4333796.zip
|
||||
|
||||
pushd "${ANDROID_SDK_HOME}"
|
||||
unzip -qq "${TASKCLUSTER_TMP_DIR}/sdk-tools-linux-4333796.zip"
|
||||
popd
|
||||
|
||||
android_sdk_accept_licenses
|
||||
}
|
||||
|
||||
android_install_ndk()
|
||||
{
|
||||
if [ -z "${ANDROID_NDK_HOME}" ]; then
|
||||
echo "No Android NDK home available, aborting."
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
${WGET} -P "${TASKCLUSTER_TMP_DIR}" https://dl.google.com/android/repository/android-ndk-r18b-linux-x86_64.zip
|
||||
|
||||
mkdir -p ${DS_ROOT_TASK}/DeepSpeech/Android/
|
||||
pushd ${DS_ROOT_TASK}/DeepSpeech/Android/
|
||||
unzip -qq "${TASKCLUSTER_TMP_DIR}/android-ndk-r18b-linux-x86_64.zip"
|
||||
popd
|
||||
}
|
||||
|
||||
android_setup_emulator()
|
||||
{
|
||||
android_install_sdk
|
||||
|
||||
if [ -z "${ANDROID_SDK_HOME}" ]; then
|
||||
echo "No Android SDK home available, aborting."
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "No ARM flavor, please give one."
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
flavor=$1
|
||||
api_level=${2:-android-25}
|
||||
|
||||
export PATH=${ANDROID_SDK_HOME}/tools/bin/:${ANDROID_SDK_HOME}/platform-tools/:$PATH
|
||||
export DS_BINARY_PREFIX="adb shell LD_LIBRARY_PATH=${ANDROID_TMP_DIR}/ds/ ${ANDROID_TMP_DIR}/ds/"
|
||||
|
||||
# minutes (2 minutes by default)
|
||||
export ADB_INSTALL_TIMEOUT=8
|
||||
|
||||
# Pipe yes in case of license being shown
|
||||
yes | sdkmanager --update
|
||||
yes | sdkmanager --install "emulator"
|
||||
|
||||
android_install_sdk_platform "${api_level}"
|
||||
|
||||
# Same, yes in case of license
|
||||
yes | sdkmanager --install "system-images;${api_level};google_apis;${flavor}"
|
||||
|
||||
android_sdk_accept_licenses
|
||||
|
||||
avdmanager create avd --name "ds-pixel" --device 17 --package "system-images;${api_level};google_apis;${flavor}"
|
||||
|
||||
# Use xvfb because:
|
||||
# > emulator: INFO: QtLogger.cpp:68: Warning: could not connect to display ((null):0, (null))
|
||||
|
||||
# -accel on is needed otherwise it is too slow, but it will require KVM support exposed
|
||||
pushd ${ANDROID_SDK_HOME}
|
||||
xvfb-run ./tools/emulator -verbose -avd ds-pixel -no-skin -no-audio -no-window -no-boot-anim -accel off &
|
||||
emulator_rc=$?
|
||||
export ANDROID_DEVICE_EMULATOR=$!
|
||||
popd
|
||||
|
||||
if [ "${emulator_rc}" -ne 0 ]; then
|
||||
echo "Error starting Android emulator, aborting."
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
adb wait-for-device
|
||||
|
||||
adb shell id
|
||||
adb shell cat /proc/cpuinfo
|
||||
|
||||
adb shell service list
|
||||
}
|
||||
|
||||
android_install_sdk_platform()
|
||||
{
|
||||
api_level=${1:-android-27}
|
||||
|
||||
if [ -z "${ANDROID_SDK_HOME}" ]; then
|
||||
echo "No Android SDK home available, aborting."
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
export PATH=${ANDROID_SDK_HOME}/tools/bin/:${ANDROID_SDK_HOME}/platform-tools/:$PATH
|
||||
|
||||
# Pipe yes in case of license being shown
|
||||
yes | sdkmanager --update
|
||||
yes | sdkmanager --install "platform-tools"
|
||||
yes | sdkmanager --install "platforms;${api_level}"
|
||||
|
||||
android_sdk_accept_licenses
|
||||
}
|
||||
|
||||
android_wait_for_emulator()
|
||||
{
|
||||
while [ "${boot_completed}" != "1" ]; do
|
||||
sleep 15
|
||||
boot_completed=$(adb shell getprop sys.boot_completed | tr -d '\r')
|
||||
done
|
||||
}
|
||||
|
||||
android_setup_ndk_data()
|
||||
{
|
||||
adb shell mkdir ${ANDROID_TMP_DIR}/ds/
|
||||
adb push ${TASKCLUSTER_TMP_DIR}/ds/* ${ANDROID_TMP_DIR}/ds/
|
||||
|
||||
adb push \
|
||||
${TASKCLUSTER_TMP_DIR}/${model_name} \
|
||||
${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} \
|
||||
${ANDROID_TMP_DIR}/ds/
|
||||
}
|
||||
|
||||
android_setup_apk_data()
|
||||
{
|
||||
adb shell mkdir ${ANDROID_TMP_DIR}/test/
|
||||
|
||||
adb push \
|
||||
${TASKCLUSTER_TMP_DIR}/${model_name} \
|
||||
${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} \
|
||||
${TASKCLUSTER_TMP_DIR}/kenlm.scorer \
|
||||
${ANDROID_TMP_DIR}/test/
|
||||
}
|
||||
|
||||
android_stop_emulator()
|
||||
{
|
||||
if [ -z "${ANDROID_DEVICE_EMULATOR}" ]; then
|
||||
echo "No ANDROID_DEVICE_EMULATOR"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
# Gracefully stop
|
||||
adb shell reboot -p &
|
||||
|
||||
# Just in case, let it 30 seconds before force-killing
|
||||
sleep 30
|
||||
kill -9 ${ANDROID_DEVICE_EMULATOR} || true
|
||||
}
|
506
taskcluster/tc-asserts.sh
Executable file
506
taskcluster/tc-asserts.sh
Executable file
@ -0,0 +1,506 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
strip() {
|
||||
echo "$(echo $1 | sed -e 's/^[[:space:]]+//' -e 's/[[:space:]]+$//')"
|
||||
}
|
||||
|
||||
# This verify exact inference result
|
||||
assert_correct_inference()
|
||||
{
|
||||
phrase=$(strip "$1")
|
||||
expected=$(strip "$2")
|
||||
status=$3
|
||||
|
||||
if [ "$status" -ne "0" ]; then
|
||||
case "$(cat ${TASKCLUSTER_TMP_DIR}/stderr)" in
|
||||
*"incompatible with minimum version"*)
|
||||
echo "Prod model too old for client, skipping test."
|
||||
return 0
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Client failed to run:"
|
||||
cat ${TASKCLUSTER_TMP_DIR}/stderr
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [ -z "${phrase}" -o -z "${expected}" ]; then
|
||||
echo "One or more empty strings:"
|
||||
echo "phrase: <${phrase}>"
|
||||
echo "expected: <${expected}>"
|
||||
return 1
|
||||
fi;
|
||||
|
||||
if [ "${phrase}" = "${expected}" ]; then
|
||||
echo "Proper output has been produced:"
|
||||
echo "${phrase}"
|
||||
return 0
|
||||
else
|
||||
echo "!! Non matching output !!"
|
||||
echo "got: <${phrase}>"
|
||||
echo "xxd:"; echo "${phrase}" | xxd
|
||||
echo "-------------------"
|
||||
echo "expected: <${expected}>"
|
||||
echo "xxd:"; echo "${expected}" | xxd
|
||||
return 1
|
||||
fi;
|
||||
}
|
||||
|
||||
# This verify that ${expected} is contained within ${phrase}
|
||||
assert_working_inference()
|
||||
{
|
||||
phrase=$1
|
||||
expected=$2
|
||||
status=$3
|
||||
|
||||
if [ -z "${phrase}" -o -z "${expected}" ]; then
|
||||
echo "One or more empty strings:"
|
||||
echo "phrase: <${phrase}>"
|
||||
echo "expected: <${expected}>"
|
||||
return 1
|
||||
fi;
|
||||
|
||||
if [ "$status" -ne "0" ]; then
|
||||
case "$(cat ${TASKCLUSTER_TMP_DIR}/stderr)" in
|
||||
*"incompatible with minimum version"*)
|
||||
echo "Prod model too old for client, skipping test."
|
||||
return 0
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Client failed to run:"
|
||||
cat ${TASKCLUSTER_TMP_DIR}/stderr
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
case "${phrase}" in
|
||||
*${expected}*)
|
||||
echo "Proper output has been produced:"
|
||||
echo "${phrase}"
|
||||
return 0
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "!! Non matching output !!"
|
||||
echo "got: <${phrase}>"
|
||||
echo "xxd:"; echo "${phrase}" | xxd
|
||||
echo "-------------------"
|
||||
echo "expected: <${expected}>"
|
||||
echo "xxd:"; echo "${expected}" | xxd
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
assert_shows_something()
|
||||
{
|
||||
stderr=$1
|
||||
expected=$2
|
||||
|
||||
if [ -z "${stderr}" -o -z "${expected}" ]; then
|
||||
echo "One or more empty strings:"
|
||||
echo "stderr: <${stderr}>"
|
||||
echo "expected: <${expected}>"
|
||||
return 1
|
||||
fi;
|
||||
|
||||
case "${stderr}" in
|
||||
*"incompatible with minimum version"*)
|
||||
echo "Prod model too old for client, skipping test."
|
||||
return 0
|
||||
;;
|
||||
|
||||
*${expected}*)
|
||||
echo "Proper output has been produced:"
|
||||
echo "${stderr}"
|
||||
return 0
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "!! Non matching output !!"
|
||||
echo "got: <${stderr}>"
|
||||
echo "xxd:"; echo "${stderr}" | xxd
|
||||
echo "-------------------"
|
||||
echo "expected: <${expected}>"
|
||||
echo "xxd:"; echo "${expected}" | xxd
|
||||
return 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
assert_not_present()
|
||||
{
|
||||
stderr=$1
|
||||
not_expected=$2
|
||||
|
||||
if [ -z "${stderr}" -o -z "${not_expected}" ]; then
|
||||
echo "One or more empty strings:"
|
||||
echo "stderr: <${stderr}>"
|
||||
echo "not_expected: <${not_expected}>"
|
||||
return 1
|
||||
fi;
|
||||
|
||||
case "${stderr}" in
|
||||
*${not_expected}*)
|
||||
echo "!! Not expected was present !!"
|
||||
echo "got: <${stderr}>"
|
||||
echo "xxd:"; echo "${stderr}" | xxd
|
||||
echo "-------------------"
|
||||
echo "not_expected: <${not_expected}>"
|
||||
echo "xxd:"; echo "${not_expected}" | xxd
|
||||
return 1
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Proper not expected output has not been produced:"
|
||||
echo "${stderr}"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
assert_correct_ldc93s1()
|
||||
{
|
||||
assert_correct_inference "$1" "she had your dark suit in greasy wash water all year" "$2"
|
||||
}
|
||||
|
||||
assert_working_ldc93s1()
|
||||
{
|
||||
assert_working_inference "$1" "she had your dark suit in greasy wash water all year" "$2"
|
||||
}
|
||||
|
||||
assert_correct_ldc93s1_lm()
|
||||
{
|
||||
assert_correct_inference "$1" "she had your dark suit in greasy wash water all year" "$2"
|
||||
}
|
||||
|
||||
assert_working_ldc93s1_lm()
|
||||
{
|
||||
assert_working_inference "$1" "she had your dark suit in greasy wash water all year" "$2"
|
||||
}
|
||||
|
||||
assert_correct_multi_ldc93s1()
|
||||
{
|
||||
assert_shows_something "$1" "/${ldc93s1_sample_filename}%she had your dark suit in greasy wash water all year%" "$?"
|
||||
assert_shows_something "$1" "/LDC93S1_pcms16le_2_44100.wav%she had your dark suit in greasy wash water all year%" "$?"
|
||||
## 8k will output garbage anyway ...
|
||||
# assert_shows_something "$1" "/LDC93S1_pcms16le_1_8000.wav%she hayorasryrtl lyreasy asr watal w water all year%"
|
||||
}
|
||||
|
||||
assert_correct_ldc93s1_prodmodel()
|
||||
{
|
||||
if [ -z "$3" -o "$3" = "16k" ]; then
|
||||
assert_correct_inference "$1" "she had reduce and greasy wash water all year" "$2"
|
||||
fi;
|
||||
|
||||
if [ "$3" = "8k" ]; then
|
||||
assert_correct_inference "$1" "she had conduct suit in greasy wash water all year" "$2"
|
||||
fi;
|
||||
}
|
||||
|
||||
assert_correct_ldc93s1_prodtflitemodel()
|
||||
{
|
||||
if [ -z "$3" -o "$3" = "16k" ]; then
|
||||
assert_correct_inference "$1" "she had i do utterly was or all year" "$2"
|
||||
fi;
|
||||
|
||||
if [ "$3" = "8k" ]; then
|
||||
assert_correct_inference "$1" "she had up a out and we wash or a" "$2"
|
||||
fi;
|
||||
}
|
||||
|
||||
assert_correct_ldc93s1_prodmodel_stereo_44k()
|
||||
{
|
||||
if [ -z "$3" -o "$3" = "16k" ]; then
|
||||
assert_correct_inference "$1" "she had reduce and greasy wash water all year" "$2"
|
||||
fi;
|
||||
|
||||
if [ "$3" = "8k" ]; then
|
||||
assert_correct_inference "$1" "she had reduce and greasy wash water all year" "$2"
|
||||
fi;
|
||||
}
|
||||
|
||||
assert_correct_ldc93s1_prodtflitemodel_stereo_44k()
|
||||
{
|
||||
if [ -z "$3" -o "$3" = "16k" ]; then
|
||||
assert_correct_inference "$1" "she headed grey was or all year" "$2"
|
||||
fi;
|
||||
|
||||
if [ "$3" = "8k" ]; then
|
||||
assert_correct_inference "$1" "she headed grey was or all year" "$2"
|
||||
fi;
|
||||
}
|
||||
|
||||
assert_correct_warning_upsampling()
|
||||
{
|
||||
assert_shows_something "$1" "erratic speech recognition"
|
||||
}
|
||||
|
||||
assert_tensorflow_version()
|
||||
{
|
||||
assert_shows_something "$1" "${EXPECTED_TENSORFLOW_VERSION}"
|
||||
}
|
||||
|
||||
assert_deepspeech_version()
|
||||
{
|
||||
assert_not_present "$1" "DeepSpeech: unknown"
|
||||
}
|
||||
|
||||
check_tensorflow_version()
|
||||
{
|
||||
set +e
|
||||
ds_help=$(${DS_BINARY_PREFIX}deepspeech 2>&1 1>/dev/null)
|
||||
set -e
|
||||
|
||||
assert_tensorflow_version "${ds_help}"
|
||||
assert_deepspeech_version "${ds_help}"
|
||||
}
|
||||
|
||||
assert_deepspeech_runtime()
|
||||
{
|
||||
local expected_runtime=$1
|
||||
|
||||
set +e
|
||||
local ds_version=$(${DS_BINARY_PREFIX}deepspeech --version 2>&1)
|
||||
set -e
|
||||
|
||||
assert_shows_something "${ds_version}" "${expected_runtime}"
|
||||
}
|
||||
|
||||
check_runtime_nodejs()
|
||||
{
|
||||
assert_deepspeech_runtime "Runtime: Node"
|
||||
}
|
||||
|
||||
check_runtime_electronjs()
|
||||
{
|
||||
assert_deepspeech_runtime "Runtime: Electron"
|
||||
}
|
||||
|
||||
run_tflite_basic_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(${DS_BINARY_PREFIX}deepspeech --model ${DATA_TMP_DIR}/${model_name} --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(${DS_BINARY_PREFIX}deepspeech --model ${DATA_TMP_DIR}/${model_name} --audio ${DATA_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
}
|
||||
|
||||
run_netframework_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended yes 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(DeepSpeechConsole.exe --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1_lm "${phrase_pbmodel_withlm}" "$?"
|
||||
}
|
||||
|
||||
run_electronjs_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1 "${phrase_pbmodel_nolm}" "$?"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
set -e
|
||||
assert_working_ldc93s1_lm "${phrase_pbmodel_withlm}" "$?"
|
||||
}
|
||||
|
||||
run_basic_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --extended 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm}" "$status"
|
||||
}
|
||||
|
||||
run_all_inference_tests()
|
||||
{
|
||||
run_basic_inference_tests
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_nolm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1 "${phrase_pbmodel_nolm_stereo_44k}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm_stereo_44k}" "$status"
|
||||
|
||||
# Run down-sampling warning test only when we actually perform downsampling
|
||||
if [ "${ldc93s1_sample_filename}" != "LDC93S1_pcms16le_1_8000.wav" ]; then
|
||||
set +e
|
||||
phrase_pbmodel_nolm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_nolm_mono_8k}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}"
|
||||
fi;
|
||||
}
|
||||
|
||||
run_prod_concurrent_stream_tests()
|
||||
{
|
||||
local _bitrate=$1
|
||||
|
||||
set +e
|
||||
output=$(python ${TASKCLUSTER_TMP_DIR}/test_sources/concurrent_streams.py \
|
||||
--model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} \
|
||||
--scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer \
|
||||
--audio1 ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_16000.wav \
|
||||
--audio2 ${TASKCLUSTER_TMP_DIR}/new-home-in-the-stars-16k.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
|
||||
output1=$(echo "${output}" | head -n 1)
|
||||
output2=$(echo "${output}" | tail -n 1)
|
||||
|
||||
assert_correct_ldc93s1_prodmodel "${output1}" "${status}" "16k"
|
||||
assert_correct_inference "${output2}" "we must find a new home in the stars" "${status}"
|
||||
}
|
||||
|
||||
run_prod_inference_tests()
|
||||
{
|
||||
local _bitrate=$1
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel_stereo_44k "${phrase_pbmodel_withlm_stereo_44k}" "$status" "${_bitrate}"
|
||||
|
||||
# Run down-sampling warning test only when we actually perform downsampling
|
||||
if [ "${ldc93s1_sample_filename}" != "LDC93S1_pcms16le_1_8000.wav" ]; then
|
||||
set +e
|
||||
phrase_pbmodel_withlm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}"
|
||||
fi;
|
||||
}
|
||||
|
||||
run_prodtflite_inference_tests()
|
||||
{
|
||||
local _bitrate=$1
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm_stereo_44k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_2_44100.wav 2>${TASKCLUSTER_TMP_DIR}/stderr)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodtflitemodel_stereo_44k "${phrase_pbmodel_withlm_stereo_44k}" "$status" "${_bitrate}"
|
||||
|
||||
# Run down-sampling warning test only when we actually perform downsampling
|
||||
if [ "${ldc93s1_sample_filename}" != "LDC93S1_pcms16le_1_8000.wav" ]; then
|
||||
set +e
|
||||
phrase_pbmodel_withlm_mono_8k=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/LDC93S1_pcms16le_1_8000.wav 2>&1 1>/dev/null)
|
||||
set -e
|
||||
assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}"
|
||||
fi;
|
||||
}
|
||||
|
||||
run_multi_inference_tests()
|
||||
{
|
||||
set +e -o pipefail
|
||||
multi_phrase_pbmodel_nolm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --audio ${TASKCLUSTER_TMP_DIR}/ 2>${TASKCLUSTER_TMP_DIR}/stderr | tr '\n' '%')
|
||||
status=$?
|
||||
set -e +o pipefail
|
||||
assert_correct_multi_ldc93s1 "${multi_phrase_pbmodel_nolm}" "$status"
|
||||
|
||||
set +e -o pipefail
|
||||
multi_phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/ 2>${TASKCLUSTER_TMP_DIR}/stderr | tr '\n' '%')
|
||||
status=$?
|
||||
set -e +o pipefail
|
||||
assert_correct_multi_ldc93s1 "${multi_phrase_pbmodel_withlm}" "$status"
|
||||
}
|
||||
|
||||
run_cpp_only_inference_tests()
|
||||
{
|
||||
set +e
|
||||
phrase_pbmodel_withlm_intermediate_decode=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream 1280 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm_intermediate_decode}" "$status"
|
||||
}
|
374
taskcluster/tc-build-utils.sh
Executable file
374
taskcluster/tc-build-utils.sh
Executable file
@ -0,0 +1,374 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
do_deepspeech_python_build()
|
||||
{
|
||||
cd ${DS_DSDIR}
|
||||
|
||||
package_option=$1
|
||||
|
||||
unset PYTHON_BIN_PATH
|
||||
unset PYTHONPATH
|
||||
|
||||
if [ -d "${DS_ROOT_TASK}/pyenv.cache/" ]; then
|
||||
export PYENV_ROOT="${DS_ROOT_TASK}/pyenv.cache/DeepSpeech/.pyenv"
|
||||
else
|
||||
export PYENV_ROOT="${DS_ROOT_TASK}/DeepSpeech/.pyenv"
|
||||
fi;
|
||||
|
||||
export PATH_WITHOUT_PYENV=${PATH}
|
||||
export PATH="${PYENV_ROOT}/bin:$PATH"
|
||||
|
||||
install_pyenv "${PYENV_ROOT}"
|
||||
install_pyenv_virtualenv "$(pyenv root)/plugins/pyenv-virtualenv"
|
||||
|
||||
mkdir -p wheels
|
||||
|
||||
SETUP_FLAGS=""
|
||||
if [ "${package_option}" = "--cuda" ]; then
|
||||
SETUP_FLAGS="--project_name deepspeech-gpu"
|
||||
elif [ "${package_option}" = "--tflite" ]; then
|
||||
SETUP_FLAGS="--project_name deepspeech-tflite"
|
||||
fi
|
||||
|
||||
for pyver_conf in ${SUPPORTED_PYTHON_VERSIONS}; do
|
||||
pyver=$(echo "${pyver_conf}" | cut -d':' -f1)
|
||||
pyconf=$(echo "${pyver_conf}" | cut -d':' -f2)
|
||||
|
||||
pyalias="${pyver}_${pyconf}"
|
||||
|
||||
export NUMPY_BUILD_VERSION="==1.7.0"
|
||||
export NUMPY_DEP_VERSION=">=1.7.0"
|
||||
|
||||
maybe_ssl102_py37 ${pyver}
|
||||
|
||||
maybe_numpy_min_version_winamd64 ${pyver}
|
||||
|
||||
LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH \
|
||||
PYTHON_CONFIGURE_OPTS="--enable-unicode=${pyconf} ${PY37_OPENSSL}" \
|
||||
pyenv_install ${pyver} ${pyalias}
|
||||
|
||||
setup_pyenv_virtualenv "${pyalias}" "deepspeech"
|
||||
virtualenv_activate "${pyalias}" "deepspeech"
|
||||
|
||||
# Set LD path because python ssl might require it
|
||||
LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH \
|
||||
EXTRA_CFLAGS="${EXTRA_LOCAL_CFLAGS}" \
|
||||
EXTRA_LDFLAGS="${EXTRA_LOCAL_LDFLAGS}" \
|
||||
EXTRA_LIBS="${EXTRA_LOCAL_LIBS}" \
|
||||
make -C native_client/python/ \
|
||||
TARGET=${SYSTEM_TARGET} \
|
||||
RASPBIAN=${SYSTEM_RASPBIAN} \
|
||||
TFDIR=${DS_TFDIR} \
|
||||
SETUP_FLAGS="${SETUP_FLAGS}" \
|
||||
bindings-clean bindings
|
||||
|
||||
cp native_client/python/dist/*.whl wheels
|
||||
|
||||
make -C native_client/python/ bindings-clean
|
||||
|
||||
unset NUMPY_BUILD_VERSION
|
||||
unset NUMPY_DEP_VERSION
|
||||
|
||||
virtualenv_deactivate "${pyalias}" "deepspeech"
|
||||
done;
|
||||
|
||||
# If not, and if virtualenv_deactivate does not call "pyenv uninstall ${version}"
|
||||
# we get stale python2 in PATH that blocks NodeJS builds
|
||||
export PATH=${PATH_WITHOUT_PYENV}
|
||||
}
|
||||
|
||||
do_deepspeech_decoder_build()
|
||||
{
|
||||
cd ${DS_DSDIR}
|
||||
|
||||
unset PYTHON_BIN_PATH
|
||||
unset PYTHONPATH
|
||||
|
||||
if [ -d "${DS_ROOT_TASK}/pyenv.cache/" ]; then
|
||||
export PYENV_ROOT="${DS_ROOT_TASK}/pyenv.cache/DeepSpeech/.pyenv"
|
||||
else
|
||||
export PYENV_ROOT="${DS_ROOT_TASK}/DeepSpeech/.pyenv"
|
||||
fi;
|
||||
|
||||
export PATH="${PYENV_ROOT}/bin:$PATH"
|
||||
|
||||
install_pyenv "${PYENV_ROOT}"
|
||||
install_pyenv_virtualenv "$(pyenv root)/plugins/pyenv-virtualenv"
|
||||
|
||||
mkdir -p wheels
|
||||
|
||||
for pyver_conf in ${SUPPORTED_PYTHON_VERSIONS}; do
|
||||
pyver=$(echo "${pyver_conf}" | cut -d':' -f1)
|
||||
pyconf=$(echo "${pyver_conf}" | cut -d':' -f2)
|
||||
|
||||
pyalias="${pyver}_${pyconf}"
|
||||
|
||||
export NUMPY_BUILD_VERSION="==1.7.0"
|
||||
export NUMPY_DEP_VERSION=">=1.7.0"
|
||||
|
||||
maybe_ssl102_py37 ${pyver}
|
||||
|
||||
LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH \
|
||||
PYTHON_CONFIGURE_OPTS="--enable-unicode=${pyconf} ${PY37_OPENSSL}" \
|
||||
pyenv_install ${pyver} "${pyalias}"
|
||||
|
||||
setup_pyenv_virtualenv "${pyalias}" "deepspeech"
|
||||
virtualenv_activate "${pyalias}" "deepspeech"
|
||||
|
||||
# Set LD path because python ssl might require it
|
||||
LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH \
|
||||
EXTRA_CFLAGS="${EXTRA_LOCAL_CFLAGS}" \
|
||||
EXTRA_LDFLAGS="${EXTRA_LOCAL_LDFLAGS}" \
|
||||
EXTRA_LIBS="${EXTRA_LOCAL_LIBS}" \
|
||||
make -C native_client/ctcdecode/ \
|
||||
TARGET=${SYSTEM_TARGET} \
|
||||
RASPBIAN=${SYSTEM_RASPBIAN} \
|
||||
TFDIR=${DS_TFDIR} \
|
||||
bindings
|
||||
|
||||
cp native_client/ctcdecode/dist/*.whl wheels
|
||||
|
||||
make -C native_client/ctcdecode clean-keep-common
|
||||
|
||||
unset NUMPY_BUILD_VERSION
|
||||
unset NUMPY_DEP_VERSION
|
||||
|
||||
virtualenv_deactivate "${pyalias}" "deepspeech"
|
||||
done;
|
||||
|
||||
# If not, and if virtualenv_deactivate does not call "pyenv uninstall ${version}"
|
||||
# we get stale python2 in PATH that blocks NodeJS builds
|
||||
export PATH=${PATH_WITHOUT_PYENV}
|
||||
}
|
||||
|
||||
do_deepspeech_nodejs_build()
|
||||
{
|
||||
rename_to_gpu=$1
|
||||
|
||||
npm update
|
||||
|
||||
# Python 2.7 is required for node-pre-gyp, it is only required to force it on
|
||||
# Windows
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
NPM_ROOT=$(cygpath -u "$(npm root)")
|
||||
PYTHON27=":/c/Python27"
|
||||
# node-gyp@5.x behaves erratically with VS2015 and MSBuild.exe detection
|
||||
npm install node-gyp@4.x node-pre-gyp
|
||||
else
|
||||
NPM_ROOT="$(npm root)"
|
||||
npm install node-gyp@5.x node-pre-gyp
|
||||
fi
|
||||
|
||||
export PATH="$NPM_ROOT/.bin/${PYTHON27}:$PATH"
|
||||
|
||||
for node in ${SUPPORTED_NODEJS_VERSIONS}; do
|
||||
EXTRA_CFLAGS="${EXTRA_LOCAL_CFLAGS}" EXTRA_LDFLAGS="${EXTRA_LOCAL_LDFLAGS}" EXTRA_LIBS="${EXTRA_LOCAL_LIBS}" make -C native_client/javascript \
|
||||
TARGET=${SYSTEM_TARGET} \
|
||||
RASPBIAN=${SYSTEM_RASPBIAN} \
|
||||
TFDIR=${DS_TFDIR} \
|
||||
NODE_ABI_TARGET=--target=$node \
|
||||
clean node-wrapper
|
||||
done;
|
||||
|
||||
for electron in ${SUPPORTED_ELECTRONJS_VERSIONS}; do
|
||||
EXTRA_CFLAGS="${EXTRA_LOCAL_CFLAGS}" EXTRA_LDFLAGS="${EXTRA_LOCAL_LDFLAGS}" EXTRA_LIBS="${EXTRA_LOCAL_LIBS}" make -C native_client/javascript \
|
||||
TARGET=${SYSTEM_TARGET} \
|
||||
RASPBIAN=${SYSTEM_RASPBIAN} \
|
||||
TFDIR=${DS_TFDIR} \
|
||||
NODE_ABI_TARGET=--target=$electron \
|
||||
NODE_DIST_URL=--disturl=https://electronjs.org/headers \
|
||||
NODE_RUNTIME=--runtime=electron \
|
||||
clean node-wrapper
|
||||
done;
|
||||
|
||||
if [ "${rename_to_gpu}" = "--cuda" ]; then
|
||||
make -C native_client/javascript clean npm-pack PROJECT_NAME=deepspeech-gpu
|
||||
else
|
||||
make -C native_client/javascript clean npm-pack
|
||||
fi
|
||||
|
||||
tar -czf native_client/javascript/wrapper.tar.gz \
|
||||
-C native_client/javascript/ lib/
|
||||
}
|
||||
|
||||
do_deepspeech_npm_package()
|
||||
{
|
||||
package_option=$1
|
||||
|
||||
cd ${DS_DSDIR}
|
||||
|
||||
npm update
|
||||
|
||||
# Python 2.7 is required for node-pre-gyp, it is only required to force it on
|
||||
# Windows
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
NPM_ROOT=$(cygpath -u "$(npm root)")
|
||||
PYTHON27=":/c/Python27"
|
||||
# node-gyp@5.x behaves erratically with VS2015 and MSBuild.exe detection
|
||||
npm install node-gyp@4.x node-pre-gyp
|
||||
else
|
||||
NPM_ROOT="$(npm root)"
|
||||
npm install node-gyp@5.x node-pre-gyp
|
||||
fi
|
||||
|
||||
export PATH="$NPM_ROOT/.bin/$PYTHON27:$PATH"
|
||||
|
||||
all_tasks="$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${TASK_ID} | python -c 'import json; import sys; print(" ".join(json.loads(sys.stdin.read())["dependencies"]));')"
|
||||
|
||||
for dep in ${all_tasks}; do
|
||||
curl -L https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts/public/wrapper.tar.gz | tar -C native_client/javascript -xzvf -
|
||||
done;
|
||||
|
||||
if [ "${package_option}" = "--cuda" ]; then
|
||||
make -C native_client/javascript clean npm-pack PROJECT_NAME=deepspeech-gpu
|
||||
elif [ "${package_option}" = "--tflite" ]; then
|
||||
make -C native_client/javascript clean npm-pack PROJECT_NAME=deepspeech-tflite
|
||||
else
|
||||
make -C native_client/javascript clean npm-pack
|
||||
fi
|
||||
}
|
||||
|
||||
do_bazel_build()
|
||||
{
|
||||
cd ${DS_ROOT_TASK}/DeepSpeech/tf
|
||||
eval "export ${BAZEL_ENV_FLAGS}"
|
||||
|
||||
if is_patched_bazel; then
|
||||
find ${DS_ROOT_TASK}/DeepSpeech/tf/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-tf.tar -T -
|
||||
fi;
|
||||
|
||||
bazel ${BAZEL_OUTPUT_USER_ROOT} build \
|
||||
-s --explain bazel_monolithic.log --verbose_explanations --experimental_strict_action_env --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
|
||||
|
||||
if is_patched_bazel; then
|
||||
find ${DS_ROOT_TASK}/DeepSpeech/tf/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-ds.tar -T -
|
||||
fi;
|
||||
|
||||
verify_bazel_rebuild "${DS_ROOT_TASK}/DeepSpeech/tf/bazel_monolithic.log"
|
||||
}
|
||||
|
||||
shutdown_bazel()
|
||||
{
|
||||
cd ${DS_ROOT_TASK}/DeepSpeech/tf
|
||||
bazel ${BAZEL_OUTPUT_USER_ROOT} shutdown
|
||||
}
|
||||
|
||||
do_deepspeech_binary_build()
|
||||
{
|
||||
cd ${DS_DSDIR}
|
||||
make -C native_client/ \
|
||||
TARGET=${SYSTEM_TARGET} \
|
||||
TFDIR=${DS_TFDIR} \
|
||||
RASPBIAN=${SYSTEM_RASPBIAN} \
|
||||
EXTRA_CFLAGS="${EXTRA_LOCAL_CFLAGS}" \
|
||||
EXTRA_LDFLAGS="${EXTRA_LOCAL_LDFLAGS}" \
|
||||
EXTRA_LIBS="${EXTRA_LOCAL_LIBS}" \
|
||||
deepspeech${PLATFORM_EXE_SUFFIX}
|
||||
}
|
||||
|
||||
do_deepspeech_ndk_build()
|
||||
{
|
||||
arch_abi=$1
|
||||
|
||||
cd ${DS_DSDIR}/native_client/
|
||||
|
||||
${ANDROID_NDK_HOME}/ndk-build \
|
||||
APP_PLATFORM=android-21 \
|
||||
APP_BUILD_SCRIPT=$(pwd)/Android.mk \
|
||||
NDK_PROJECT_PATH=$(pwd) \
|
||||
APP_STL=c++_shared \
|
||||
TFDIR=${DS_TFDIR} \
|
||||
TARGET_ARCH_ABI=${arch_abi}
|
||||
}
|
||||
|
||||
do_deepspeech_netframework_build()
|
||||
{
|
||||
cd ${DS_DSDIR}/native_client/dotnet
|
||||
|
||||
# Setup dependencies
|
||||
nuget install DeepSpeechConsole/packages.config -OutputDirectory packages/
|
||||
|
||||
MSBUILD="$(cygpath 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe')"
|
||||
|
||||
# We need MSYS2_ARG_CONV_EXCL='/' otherwise the '/' of CLI parameters gets mangled and disappears
|
||||
# We build the .NET Client for .NET Framework v4.5,v4.6,v4.7
|
||||
|
||||
MSYS2_ARG_CONV_EXCL='/' "${MSBUILD}" \
|
||||
DeepSpeechClient/DeepSpeechClient.csproj \
|
||||
/p:Configuration=Release \
|
||||
/p:Platform=x64 \
|
||||
/p:TargetFrameworkVersion="v4.5.2" \
|
||||
/p:OutputPath=bin/nuget/x64/v4.5
|
||||
|
||||
MSYS2_ARG_CONV_EXCL='/' "${MSBUILD}" \
|
||||
DeepSpeechClient/DeepSpeechClient.csproj \
|
||||
/p:Configuration=Release \
|
||||
/p:Platform=x64 \
|
||||
/p:TargetFrameworkVersion="v4.6" \
|
||||
/p:OutputPath=bin/nuget/x64/v4.6
|
||||
|
||||
MSYS2_ARG_CONV_EXCL='/' "${MSBUILD}" \
|
||||
DeepSpeechClient/DeepSpeechClient.csproj \
|
||||
/p:Configuration=Release \
|
||||
/p:Platform=x64 \
|
||||
/p:TargetFrameworkVersion="v4.7" \
|
||||
/p:OutputPath=bin/nuget/x64/v4.7
|
||||
|
||||
MSYS2_ARG_CONV_EXCL='/' "${MSBUILD}" \
|
||||
DeepSpeechConsole/DeepSpeechConsole.csproj \
|
||||
/p:Configuration=Release \
|
||||
/p:Platform=x64
|
||||
}
|
||||
|
||||
do_deepspeech_netframework_wpf_example_build()
|
||||
{
|
||||
cd ${DS_EXAMPLEDIR}/net_framework
|
||||
|
||||
# Setup dependencies
|
||||
nuget install DeepSpeechWPF/packages.config -OutputDirectory DeepSpeechWPF/packages/
|
||||
|
||||
MSBUILD="$(cygpath 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\MSBuild\15.0\Bin\MSBuild.exe')"
|
||||
|
||||
# We need MSYS2_ARG_CONV_EXCL='/' otherwise the '/' of CLI parameters gets mangled and disappears
|
||||
# Build WPF example
|
||||
MSYS2_ARG_CONV_EXCL='/' "${MSBUILD}" \
|
||||
DeepSpeechWPF/DeepSpeech.WPF.csproj \
|
||||
/p:Configuration=Release \
|
||||
/p:Platform=x64 \
|
||||
/p:OutputPath=bin/x64
|
||||
|
||||
}
|
||||
|
||||
do_nuget_build()
|
||||
{
|
||||
PROJECT_NAME=$1
|
||||
if [ -z "${PROJECT_NAME}" ]; then
|
||||
exit "Please call with a valid PROJECT_NAME"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
cd ${DS_DSDIR}/native_client/dotnet
|
||||
|
||||
cp ${DS_TFDIR}/bazel-bin/native_client/libdeepspeech.so nupkg/build
|
||||
|
||||
# We copy the generated clients for .NET into the Nuget framework dirs
|
||||
|
||||
mkdir -p nupkg/lib/net45/
|
||||
cp DeepSpeechClient/bin/nuget/x64/v4.5/DeepSpeechClient.dll nupkg/lib/net45/
|
||||
|
||||
mkdir -p nupkg/lib/net46/
|
||||
cp DeepSpeechClient/bin/nuget/x64/v4.6/DeepSpeechClient.dll nupkg/lib/net46/
|
||||
|
||||
mkdir -p nupkg/lib/net47/
|
||||
cp DeepSpeechClient/bin/nuget/x64/v4.7/DeepSpeechClient.dll nupkg/lib/net47/
|
||||
|
||||
PROJECT_VERSION=$(strip "${DS_VERSION}")
|
||||
sed \
|
||||
-e "s/\$NUPKG_ID/${PROJECT_NAME}/" \
|
||||
-e "s/\$NUPKG_VERSION/${PROJECT_VERSION}/" \
|
||||
nupkg/deepspeech.nuspec.in > nupkg/deepspeech.nuspec && cat nupkg/deepspeech.nuspec
|
||||
|
||||
nuget pack nupkg/deepspeech.nuspec
|
||||
}
|
64
taskcluster/tc-dotnet-utils.sh
Executable file
64
taskcluster/tc-dotnet-utils.sh
Executable file
@ -0,0 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
install_nuget()
|
||||
{
|
||||
PROJECT_NAME=$1
|
||||
if [ -z "${PROJECT_NAME}" ]; then
|
||||
exit "Please call with a valid PROJECT_NAME"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
nuget="${PROJECT_NAME}.${DS_VERSION}.nupkg"
|
||||
|
||||
export PATH=$PATH:$(cygpath ${ChocolateyInstall})/bin
|
||||
|
||||
mkdir -p "${TASKCLUSTER_TMP_DIR}/repo/"
|
||||
mkdir -p "${TASKCLUSTER_TMP_DIR}/ds/"
|
||||
|
||||
nuget_pkg_url=$(get_dep_nuget_pkg_url "${nuget}")
|
||||
console_pkg_url=$(get_dep_nuget_pkg_url "DeepSpeechConsole.exe")
|
||||
|
||||
${WGET} -O - "${nuget_pkg_url}" | gunzip > "${TASKCLUSTER_TMP_DIR}/${PROJECT_NAME}.${DS_VERSION}.nupkg"
|
||||
${WGET} -O - "${console_pkg_url}" | gunzip > "${TASKCLUSTER_TMP_DIR}/ds/DeepSpeechConsole.exe"
|
||||
|
||||
nuget sources add -Name repo -Source $(cygpath -w "${TASKCLUSTER_TMP_DIR}/repo/")
|
||||
|
||||
cd "${TASKCLUSTER_TMP_DIR}"
|
||||
nuget add $(cygpath -w "${TASKCLUSTER_TMP_DIR}/${nuget}") -source repo
|
||||
|
||||
cd "${TASKCLUSTER_TMP_DIR}/ds/"
|
||||
nuget list -Source repo -Prerelease
|
||||
nuget install ${PROJECT_NAME} -Source repo -Prerelease
|
||||
|
||||
ls -halR "${PROJECT_NAME}.${DS_VERSION}"
|
||||
|
||||
nuget install NAudio
|
||||
cp NAudio*/lib/net35/NAudio.dll ${TASKCLUSTER_TMP_DIR}/ds/
|
||||
cp ${PROJECT_NAME}.${DS_VERSION}/build/libdeepspeech.so ${TASKCLUSTER_TMP_DIR}/ds/
|
||||
cp ${PROJECT_NAME}.${DS_VERSION}/lib/net46/DeepSpeechClient.dll ${TASKCLUSTER_TMP_DIR}/ds/
|
||||
|
||||
ls -hal ${TASKCLUSTER_TMP_DIR}/ds/
|
||||
|
||||
export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH
|
||||
}
|
||||
|
||||
# Will inspect this task's dependencies for one that provides a matching NuGet package
|
||||
get_dep_nuget_pkg_url()
|
||||
{
|
||||
local deepspeech_pkg=$1
|
||||
local all_deps="$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${TASK_ID} | python -c 'import json; import sys; print(" ".join(json.loads(sys.stdin.read())["dependencies"]));')"
|
||||
|
||||
for dep in ${all_deps}; do
|
||||
local has_artifact=$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts | python -c 'import json; import sys; has_artifact = True in [ e["name"].find("'${deepspeech_pkg}'") > 0 for e in json.loads(sys.stdin.read())["artifacts"] ]; print(has_artifact)')
|
||||
if [ "${has_artifact}" = "True" ]; then
|
||||
echo "https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts/public/${deepspeech_pkg}"
|
||||
exit 0
|
||||
fi;
|
||||
done;
|
||||
|
||||
echo ""
|
||||
# This should not be reached, otherwise it means we could not find a matching nodejs package
|
||||
exit 1
|
||||
}
|
@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
source $(dirname "$0")/tc-tests-utils.sh
|
||||
|
||||
model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite}
|
||||
model_name=$(basename "${model_source}")
|
||||
|
||||
download_benchmark_model "${TASKCLUSTER_TMP_DIR}/ds"
|
||||
|
||||
export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH
|
||||
|
||||
lite_benchmark_model \
|
||||
--graph=${TASKCLUSTER_TMP_DIR}/ds/${model_name} \
|
||||
--show_flops \
|
||||
--input_layer=input_node,previous_state_c,previous_state_h,input_samples \
|
||||
--input_layer_type=float,float,float,float \
|
||||
--input_layer_shape=1,16,19,26:1,100:1,100:512 \
|
||||
--output_layer=logits,new_state_c,new_state_h,mfccs
|
25
taskcluster/tc-node-utils.sh
Executable file
25
taskcluster/tc-node-utils.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
# Will inspect this task's dependencies for one that provides a matching npm package
|
||||
get_dep_npm_pkg_url()
|
||||
{
|
||||
local all_deps="$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${TASK_ID} | python -c 'import json; import sys; print(" ".join(json.loads(sys.stdin.read())["dependencies"]));')"
|
||||
|
||||
# We try "deepspeech-tflite" first and if we don't find it we try "deepspeech"
|
||||
for pkg_basename in "deepspeech-tflite" "deepspeech"; do
|
||||
local deepspeech_pkg="${pkg_basename}-${DS_VERSION}.tgz"
|
||||
for dep in ${all_deps}; do
|
||||
local has_artifact=$(curl -s https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts | python -c 'import json; import sys; has_artifact = True in [ e["name"].find("'${deepspeech_pkg}'") > 0 for e in json.loads(sys.stdin.read())["artifacts"] ]; print(has_artifact)')
|
||||
if [ "${has_artifact}" = "True" ]; then
|
||||
echo "https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts/public/${deepspeech_pkg}"
|
||||
exit 0
|
||||
fi;
|
||||
done;
|
||||
done;
|
||||
|
||||
echo ""
|
||||
# This should not be reached, otherwise it means we could not find a matching nodejs package
|
||||
exit 1
|
||||
}
|
84
taskcluster/tc-package.sh
Executable file
84
taskcluster/tc-package.sh
Executable file
@ -0,0 +1,84 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
package_native_client()
|
||||
{
|
||||
tensorflow_dir=${DS_TFDIR}
|
||||
deepspeech_dir=${DS_DSDIR}
|
||||
artifacts_dir=${TASKCLUSTER_ARTIFACTS}
|
||||
artifact_name=$1
|
||||
|
||||
if [ ! -d ${tensorflow_dir} -o ! -d ${deepspeech_dir} -o ! -d ${artifacts_dir} ]; then
|
||||
echo "Missing directory. Please check:"
|
||||
echo "tensorflow_dir=${tensorflow_dir}"
|
||||
echo "deepspeech_dir=${deepspeech_dir}"
|
||||
echo "artifacts_dir=${artifacts_dir}"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
if [ -z "${artifact_name}" ]; then
|
||||
echo "Please specify artifact name."
|
||||
fi;
|
||||
|
||||
${TAR} -cf - \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so.if.lib \
|
||||
-C ${deepspeech_dir}/ LICENSE \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech${PLATFORM_EXE_SUFFIX} \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech.h \
|
||||
-C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \
|
||||
| ${XZ} > "${artifacts_dir}/${artifact_name}"
|
||||
}
|
||||
|
||||
package_native_client_ndk()
|
||||
{
|
||||
deepspeech_dir=${DS_DSDIR}
|
||||
artifacts_dir=${TASKCLUSTER_ARTIFACTS}
|
||||
artifact_name=$1
|
||||
arch_abi=$2
|
||||
|
||||
if [ ! -d ${deepspeech_dir} -o ! -d ${artifacts_dir} ]; then
|
||||
echo "Missing directory. Please check:"
|
||||
echo "deepspeech_dir=${deepspeech_dir}"
|
||||
echo "artifacts_dir=${artifacts_dir}"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
if [ -z "${artifact_name}" ]; then
|
||||
echo "Please specify artifact name."
|
||||
fi;
|
||||
|
||||
if [ -z "${arch_abi}" ]; then
|
||||
echo "Please specify arch abi."
|
||||
fi;
|
||||
|
||||
tar -cf - \
|
||||
-C ${deepspeech_dir}/native_client/libs/${arch_abi}/ deepspeech \
|
||||
-C ${deepspeech_dir}/native_client/libs/${arch_abi}/ libdeepspeech.so \
|
||||
-C ${deepspeech_dir}/native_client/libs/${arch_abi}/ libc++_shared.so \
|
||||
-C ${deepspeech_dir}/native_client/ deepspeech.h \
|
||||
-C ${deepspeech_dir}/ LICENSE \
|
||||
-C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \
|
||||
| pixz -9 > "${artifacts_dir}/${artifact_name}"
|
||||
}
|
||||
|
||||
package_libdeepspeech_as_zip()
|
||||
{
|
||||
tensorflow_dir=${DS_TFDIR}
|
||||
artifacts_dir=${TASKCLUSTER_ARTIFACTS}
|
||||
artifact_name=$1
|
||||
|
||||
if [ ! -d ${tensorflow_dir} -o ! -d ${artifacts_dir} ]; then
|
||||
echo "Missing directory. Please check:"
|
||||
echo "tensorflow_dir=${tensorflow_dir}"
|
||||
echo "artifacts_dir=${artifacts_dir}"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
if [ -z "${artifact_name}" ]; then
|
||||
echo "Please specify artifact name."
|
||||
fi;
|
||||
|
||||
zip -r9 --junk-paths "${artifacts_dir}/${artifact_name}" ${tensorflow_dir}/bazel-bin/native_client/libdeepspeech.so
|
||||
}
|
310
taskcluster/tc-py-utils.sh
Executable file
310
taskcluster/tc-py-utils.sh
Executable file
@ -0,0 +1,310 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -xe
|
||||
|
||||
install_pyenv()
|
||||
{
|
||||
if [ -z "${PYENV_ROOT}" ]; then
|
||||
echo "No PYENV_ROOT set";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
mkdir -p "${PYENV_ROOT}/versions/"
|
||||
return;
|
||||
fi
|
||||
|
||||
# Allows updating local cache if required
|
||||
if [ ! -e "${PYENV_ROOT}/bin/pyenv" ]; then
|
||||
git clone --quiet https://github.com/pyenv/pyenv.git ${PYENV_ROOT}
|
||||
else
|
||||
pushd ${PYENV_ROOT}
|
||||
git fetch origin
|
||||
popd
|
||||
fi
|
||||
|
||||
pushd ${PYENV_ROOT}
|
||||
git checkout --quiet 20a1f0cd7a3d2f95800d8e0d5863b4e98f25f4df
|
||||
popd
|
||||
|
||||
if [ ! -d "${PYENV_ROOT}/plugins/pyenv-alias" ]; then
|
||||
git clone https://github.com/s1341/pyenv-alias.git ${PYENV_ROOT}/plugins/pyenv-alias
|
||||
pushd ${PYENV_ROOT}/plugins/pyenv-alias
|
||||
git checkout --quiet 8896eebb5b47389249b35d21d8a5e74aa33aff08
|
||||
popd
|
||||
fi
|
||||
|
||||
eval "$(pyenv init -)"
|
||||
}
|
||||
|
||||
install_pyenv_virtualenv()
|
||||
{
|
||||
local PYENV_VENV=$1
|
||||
|
||||
if [ -z "${PYENV_VENV}" ]; then
|
||||
echo "No PYENV_VENV set";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
echo "No pyenv virtualenv support ; will install virtualenv locally from pip"
|
||||
return
|
||||
fi;
|
||||
|
||||
if [ ! -e "${PYENV_VENV}/bin/pyenv-virtualenv" ]; then
|
||||
git clone --quiet https://github.com/pyenv/pyenv-virtualenv.git ${PYENV_VENV}
|
||||
pushd ${PYENV_VENV}
|
||||
git checkout --quiet 5419dc732066b035a28680475acd7b661c7c397d
|
||||
popd
|
||||
fi;
|
||||
|
||||
eval "$(pyenv virtualenv-init -)"
|
||||
}
|
||||
|
||||
setup_pyenv_virtualenv()
|
||||
{
|
||||
local version=$1
|
||||
local name=$2
|
||||
|
||||
if [ -z "${PYENV_ROOT}" ]; then
|
||||
echo "No PYENV_ROOT set";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
echo "should setup virtualenv ${name} for ${version}"
|
||||
mkdir ${PYENV_ROOT}/versions/${version}/envs
|
||||
PATH=${PYENV_ROOT}/versions/${version}/tools:${PYENV_ROOT}/versions/${version}/tools/Scripts:$PATH python -m venv ${PYENV_ROOT}/versions/${version}/envs/${name}
|
||||
else
|
||||
ls -hal "${PYENV_ROOT}/versions/"
|
||||
|
||||
# There could be a symlink when re-using cacche on macOS
|
||||
# We don't care, let's just remove it
|
||||
if [ -L "${PYENV_ROOT}/versions/${name}" ]; then
|
||||
rm "${PYENV_ROOT}/versions/${name}"
|
||||
fi
|
||||
|
||||
# Don't force-reinstall existing version
|
||||
if [ ! -f "${PYENV_ROOT}/versions/${version}/envs/${name}/bin/activate" ]; then
|
||||
pyenv virtualenv ${version} ${name}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
virtualenv_activate()
|
||||
{
|
||||
local version=$1
|
||||
local name=$2
|
||||
|
||||
if [ -z "${PYENV_ROOT}" ]; then
|
||||
echo "No PYENV_ROOT set";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
source ${PYENV_ROOT}/versions/${version}/envs/${name}/Scripts/activate
|
||||
else
|
||||
source ${PYENV_ROOT}/versions/${version}/envs/${name}/bin/activate
|
||||
fi
|
||||
}
|
||||
|
||||
virtualenv_deactivate()
|
||||
{
|
||||
local version=$1
|
||||
local name=$2
|
||||
|
||||
if [ -z "${PYENV_ROOT}" ]; then
|
||||
echo "No PYENV_ROOT set";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
deactivate
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
rm -fr ${PYENV_ROOT}/versions/${version}/
|
||||
else
|
||||
pyenv uninstall --force ${name}
|
||||
fi
|
||||
}
|
||||
|
||||
pyenv_install()
|
||||
{
|
||||
local version=$1
|
||||
local version_alias=$2
|
||||
|
||||
if [ -z "${version_alias}" ]; then
|
||||
echo "WARNING, no version_alias specified, please ensure call site is okay"
|
||||
version_alias=${version}
|
||||
fi;
|
||||
|
||||
if [ -z "${PYENV_ROOT}" ]; then
|
||||
echo "No PYENV_ROOT set";
|
||||
exit 1;
|
||||
fi;
|
||||
|
||||
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
|
||||
PATH=$(cygpath ${ChocolateyInstall})/bin:$PATH nuget install python -Version ${version} -OutputDirectory ${PYENV_ROOT}/versions/
|
||||
|
||||
mv ${PYENV_ROOT}/versions/python.${version} ${PYENV_ROOT}/versions/${version_alias}
|
||||
|
||||
PY_TOOLS_DIR="$(cygpath -w ${PYENV_ROOT}/versions/${version_alias}/tools/)"
|
||||
TEMP=$(cygpath -w ${DS_ROOT_TASK}/tmp/) PATH=${PY_TOOLS_DIR}:$PATH python -m pip uninstall pip -y
|
||||
PATH=${PY_TOOLS_DIR}:$PATH python -m ensurepip
|
||||
|
||||
pushd ${PYENV_ROOT}/versions/${version_alias}/tools/Scripts/
|
||||
ln -s pip3.exe pip.exe
|
||||
popd
|
||||
else
|
||||
# If there's already a matching directory, we should re-use it
|
||||
# otherwise, pyenv install will force-rebuild
|
||||
ls -hal "${PYENV_ROOT}/versions/${version_alias}/" || true
|
||||
if [ ! -d "${PYENV_ROOT}/versions/${version_alias}/" ]; then
|
||||
VERSION_ALIAS=${version_alias} pyenv install ${version}
|
||||
fi;
|
||||
fi
|
||||
}
|
||||
|
||||
# Hack to extract Ubuntu's 16.04 libssl 1.0.2 packages and use them during the
|
||||
# local build of Python.
|
||||
#
|
||||
# Avoid (risky) upgrade of base system, allowing to keep one task build that
|
||||
# builds all the python packages
|
||||
maybe_ssl102_py37()
|
||||
{
|
||||
pyver=$1
|
||||
|
||||
unset PY37_OPENSSL
|
||||
unset PY37_LDPATH
|
||||
unset PY37_SOURCE_PACKAGE
|
||||
|
||||
ARCH=$(uname -m)
|
||||
|
||||
case "${pyver}" in
|
||||
3.7*|3.8*)
|
||||
if [ "${OS}" = "Linux" -a "${ARCH}" = "x86_64" ]; then
|
||||
PY37_OPENSSL_DIR=${DS_ROOT_TASK}/ssl-xenial
|
||||
|
||||
if [ -d "${PY37_OPENSSL_DIR}" ]; then
|
||||
rm -rf "${PY37_OPENSSL_DIR}"
|
||||
fi
|
||||
|
||||
mkdir -p ${PY37_OPENSSL_DIR}
|
||||
${WGET} -P ${TASKCLUSTER_TMP_DIR} \
|
||||
http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl-dev_1.0.2g-1ubuntu4.15_amd64.deb \
|
||||
http://archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.0.0_1.0.2g-1ubuntu4.15_amd64.deb
|
||||
|
||||
for deb in ${TASKCLUSTER_TMP_DIR}/libssl*.deb; do
|
||||
dpkg -x ${deb} ${PY37_OPENSSL_DIR}
|
||||
done;
|
||||
|
||||
# Python configure expects things to be under lib/
|
||||
mv ${PY37_OPENSSL_DIR}/usr/include/x86_64-linux-gnu/openssl/opensslconf.h ${PY37_OPENSSL_DIR}/usr/include/openssl/
|
||||
mv ${PY37_OPENSSL_DIR}/lib/x86_64-linux-gnu/lib* ${PY37_OPENSSL_DIR}/usr/lib/
|
||||
mv ${PY37_OPENSSL_DIR}/usr/lib/x86_64-linux-gnu/* ${PY37_OPENSSL_DIR}/usr/lib/
|
||||
ln -sfn libcrypto.so.1.0.0 ${PY37_OPENSSL_DIR}/usr/lib/libcrypto.so
|
||||
ln -sfn libssl.so.1.0.0 ${PY37_OPENSSL_DIR}/usr/lib/libssl.so
|
||||
|
||||
export PY37_OPENSSL="--with-openssl=${PY37_OPENSSL_DIR}/usr"
|
||||
export PY37_LDPATH="${PY37_OPENSSL_DIR}/usr/lib/"
|
||||
fi;
|
||||
|
||||
case "${pyver}" in
|
||||
3.7*)
|
||||
export NUMPY_BUILD_VERSION="==1.14.5"
|
||||
export NUMPY_DEP_VERSION=">=1.14.5"
|
||||
;;
|
||||
3.8*)
|
||||
export NUMPY_BUILD_VERSION="==1.17.3"
|
||||
export NUMPY_DEP_VERSION=">=1.17.3"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
maybe_numpy_min_version_winamd64()
|
||||
{
|
||||
local pyver=$1
|
||||
|
||||
if [ "${OS}" != "${TC_MSYS_VERSION}" ]; then
|
||||
return;
|
||||
fi
|
||||
|
||||
# We set >= and < to make sure we have no numpy incompatibilities
|
||||
# otherwise, `from deepspeech.impl` throws with "illegal instruction"
|
||||
case "${pyver}" in
|
||||
3.5*)
|
||||
export NUMPY_BUILD_VERSION="==1.11.0"
|
||||
export NUMPY_DEP_VERSION=">=1.11.0,<1.12.0"
|
||||
;;
|
||||
3.6*)
|
||||
export NUMPY_BUILD_VERSION="==1.12.0"
|
||||
export NUMPY_DEP_VERSION=">=1.12.0,<1.14.5"
|
||||
;;
|
||||
3.7*)
|
||||
export NUMPY_BUILD_VERSION="==1.14.5"
|
||||
export NUMPY_DEP_VERSION=">=1.14.5,<=1.17.0"
|
||||
;;
|
||||
3.8*)
|
||||
export NUMPY_BUILD_VERSION="==1.17.3"
|
||||
export NUMPY_DEP_VERSION=">=1.17.3,<=1.17.3"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
get_python_pkg_url()
|
||||
{
|
||||
local pyver_pkg=$1
|
||||
local py_unicode_type=$2
|
||||
|
||||
local pkgname=$3
|
||||
if [ -z "${pkgname}" ]; then
|
||||
pkgname="deepspeech"
|
||||
fi
|
||||
|
||||
local root=$4
|
||||
if [ -z "${root}" ]; then
|
||||
root="${DEEPSPEECH_ARTIFACTS_ROOT}"
|
||||
fi
|
||||
|
||||
local platform=$(python -c 'import sys; import platform; plat = platform.system().lower(); arch = platform.machine().lower(); plat = "manylinux1" if plat == "linux" and arch == "x86_64" else plat; plat = "macosx_10_10" if plat == "darwin" else plat; plat = "win" if plat == "windows" else plat; sys.stdout.write("%s_%s" % (plat, platform.machine().lower()));')
|
||||
local whl_ds_version="$(python -c 'from pkg_resources import parse_version; print(parse_version("'${DS_VERSION}'"))')"
|
||||
local deepspeech_pkg="${pkgname}-${whl_ds_version}-cp${pyver_pkg}-cp${pyver_pkg}${py_unicode_type}-${platform}.whl"
|
||||
|
||||
echo "${root}/${deepspeech_pkg}"
|
||||
}
|
||||
|
||||
extract_python_versions()
|
||||
{
|
||||
# call extract_python_versions ${pyver_full} pyver pyver_pkg py_unicode_type pyconf pyalias
|
||||
local _pyver_full=$1
|
||||
|
||||
if [ -z "${_pyver_full}" ]; then
|
||||
echo "No python version given, aborting."
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
local _pyver=$(echo "${_pyver_full}" | cut -d':' -f1)
|
||||
|
||||
# 3.8.x => 38
|
||||
local _pyver_pkg=$(echo "${_pyver}" | cut -d'.' -f1,2 | tr -d '.')
|
||||
|
||||
# UCS2 => narrow unicode
|
||||
# UCS4 => wide unicode
|
||||
local _py_unicode_type=$(echo "${_pyver_full}" | cut -d':' -f2)
|
||||
if [ "${_py_unicode_type}" = "m" ]; then
|
||||
local _pyconf="ucs2"
|
||||
elif [ "${_py_unicode_type}" = "mu" ]; then
|
||||
local _pyconf="ucs4"
|
||||
elif [ "${_py_unicode_type}" = "" ]; then # valid for Python 3.8
|
||||
local _pyconf="ucs4"
|
||||
fi;
|
||||
|
||||
local _pyalias="${_pyver}_${_pyconf}"
|
||||
|
||||
eval "${2}=${_pyver}"
|
||||
eval "${3}=${_pyver_pkg}"
|
||||
eval "${4}=${_py_unicode_type}"
|
||||
eval "${5}=${_pyconf}"
|
||||
eval "${6}=${_pyalias}"
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -86,17 +86,15 @@ popd
|
||||
cp /tmp/train/output_graph.pb ${TASKCLUSTER_ARTIFACTS}
|
||||
cp /tmp/train_tflite/output_graph.tflite ${TASKCLUSTER_ARTIFACTS}
|
||||
|
||||
if [ ! -z "${CONVERT_GRAPHDEF_MEMMAPPED}" ]; then
|
||||
convert_graphdef=$(basename "${CONVERT_GRAPHDEF_MEMMAPPED}")
|
||||
wget -P "/tmp/" "${CONVERT_GRAPHDEF_MEMMAPPED}" && chmod +x "/tmp/${convert_graphdef}"
|
||||
pushd ${HOME}/DeepSpeech/ds/
|
||||
python util/taskcluster.py --source tensorflow --artifact convert_graphdef_memmapped_format --branch r1.15 --target /tmp/
|
||||
popd
|
||||
|
||||
/tmp/${convert_graphdef} --in_graph=/tmp/train/output_graph.pb --out_graph=/tmp/train/output_graph.pbmm
|
||||
cp /tmp/train/output_graph.pbmm ${TASKCLUSTER_ARTIFACTS}
|
||||
fi;
|
||||
/tmp/convert_graphdef_memmapped_format --in_graph=/tmp/train/output_graph.pb --out_graph=/tmp/train/output_graph.pbmm
|
||||
cp /tmp/train/output_graph.pbmm ${TASKCLUSTER_ARTIFACTS}
|
||||
|
||||
pushd ${HOME}/DeepSpeech/ds/
|
||||
time ./bin/run-tc-ldc93s1_checkpoint.sh
|
||||
popd
|
||||
|
||||
deactivate
|
||||
pyenv uninstall --force ${PYENV_NAME}
|
||||
|
@ -36,8 +36,6 @@ then:
|
||||
linux_amd64_tflite: { $eval: as_slugid("linux-amd64-tflite-opt") }
|
||||
linux_amd64_ctc: { $eval: as_slugid("linux-amd64-ctc-opt") }
|
||||
in:
|
||||
CONVERT_GRAPHDEF_MEMMAPPED: ${build.convert_graphdef}
|
||||
BENCHMARK_MODEL_BIN: ${build.benchmark_model_bin}
|
||||
DEEPSPEECH_ARTIFACTS_ROOT: https://community-tc.services.mozilla.com/api/queue/v1/task/${linux_amd64_build}/artifacts/public
|
||||
DEEPSPEECH_ARTIFACTS_TFLITE_ROOT: https://community-tc.services.mozilla.com/api/queue/v1/task/${linux_amd64_tflite}/artifacts/public
|
||||
DEEPSPEECH_TEST_MODEL: https://community-tc.services.mozilla.com/api/queue/v1/task/${training}/artifacts/public/output_graph.pb
|
||||
|
@ -1,11 +0,0 @@
|
||||
build:
|
||||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "test-training_16k-linux-amd64-py36m-opt"
|
||||
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-lite_benchmark_model-ds-tests.sh"
|
||||
benchmark_model_bin: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.cpu/artifacts/public/lite_benchmark_model"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU TF Lite benchmark_model"
|
||||
description: "Testing DeepSpeech TF Lite benchmark_model for Linux/AMD64, CPU only, optimized version"
|
@ -7,7 +7,6 @@ build:
|
||||
apt-get -qq -y install ${python.packages_trusty.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-train-tests.sh 3.5.5:m 16k"
|
||||
convert_graphdef: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.cpu/artifacts/public/convert_graphdef_memmapped_format"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU 16kHz training Py3.5"
|
||||
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 16kHz Python 3.5, CPU only, optimized version"
|
||||
|
@ -7,7 +7,6 @@ build:
|
||||
apt-get -qq -y install ${python.packages_trusty.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-train-tests.sh 3.6.4:m 16k"
|
||||
convert_graphdef: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.cpu/artifacts/public/convert_graphdef_memmapped_format"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU 16kHz training Py3.6"
|
||||
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 16kHz Python 3.6, CPU only, optimized version"
|
||||
|
@ -7,7 +7,6 @@ build:
|
||||
apt-get -qq -y install ${python.packages_trusty.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-train-tests.sh 3.6.4:m 8k"
|
||||
convert_graphdef: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r1.15.ceb46aae5836a0f648a2c3da5942af2b7d1b98bf.cpu/artifacts/public/convert_graphdef_memmapped_format"
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU 8kHz training Py3.6"
|
||||
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 8kHz Python 3.6, CPU only, optimized version"
|
||||
|
Loading…
Reference in New Issue
Block a user