Merge pull request #3356 from lissyx/linux-valgrind

Linux valgrind
This commit is contained in:
lissyx 2020-10-01 18:49:19 +02:00 committed by GitHub
commit dd4122a04a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
48 changed files with 12185 additions and 26 deletions

7
ds_generic.supp Normal file
View File

@ -0,0 +1,7 @@
{
libgomp_malloc
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
obj:/usr/lib/*/libgomp.so.1.0.0
}

10
ds_lib.supp Normal file
View File

@ -0,0 +1,10 @@
{
deepspeech_tflite_error_reporter
Memcheck:Leak
match-leak-kinds: reachable
fun:_Znwm
fun:_ZN6tflite20DefaultErrorReporterEv
fun:_ZN16TFLiteModelState4initEPKc
fun:DS_CreateModel
fun:main
}

1356
ds_openfst.supp Normal file

File diff suppressed because it is too large Load Diff

42
ds_sox.supp Normal file
View File

@ -0,0 +1,42 @@
{
sox_effect_gain
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
fun:realloc
fun:lsx_realloc
fun:lsx_usage_lines
fun:lsx_gain_effect_fn
fun:sox_find_effect
fun:_Z14GetAudioBufferPKci
fun:_Z11ProcessFileP10ModelStatePKcb
fun:main
}
{
sox_effect_rate
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
fun:realloc
fun:lsx_realloc
fun:lsx_usage_lines
fun:lsx_rate_effect_fn
fun:sox_find_effect
fun:_Z14GetAudioBufferPKci
fun:_Z11ProcessFileP10ModelStatePKcb
fun:main
}
{
sox_effect_flanger
Memcheck:Leak
match-leak-kinds: reachable
fun:malloc
fun:realloc
fun:lsx_realloc
fun:lsx_usage_lines
fun:lsx_flanger_effect_fn
fun:sox_find_effect
fun:_Z14GetAudioBufferPKci
fun:_Z11ProcessFileP10ModelStatePKcb
fun:main
}

View File

@ -38,6 +38,8 @@ int json_candidate_transcripts = 3;
int stream_size = 0;
int extended_stream_size = 0;
char* hot_words = NULL;
void PrintHelp(const char* bin)
@ -58,6 +60,7 @@ void PrintHelp(const char* bin)
"\t--json\t\t\t\tExtended output, shows word timings as JSON\n"
"\t--candidate_transcripts NUMBER\tNumber of candidate transcripts to include in JSON output\n"
"\t--stream size\t\t\tRun in stream mode, output intermediate results\n"
"\t--extended_stream size\t\t\tRun in stream mode using metadata output, output intermediate results\n"
"\t--hot_words\t\t\tHot-words and their boosts. Word:Boost pairs are comma-separated\n"
"\t--help\t\t\t\tShow help\n"
"\t--version\t\t\tPrint version and exits\n";
@ -82,6 +85,7 @@ bool ProcessArgs(int argc, char** argv)
{"json", no_argument, nullptr, 'j'},
{"candidate_transcripts", required_argument, nullptr, 150},
{"stream", required_argument, nullptr, 's'},
{"extended_stream", required_argument, nullptr, 'S'},
{"hot_words", required_argument, nullptr, 'w'},
{"version", no_argument, nullptr, 'v'},
{"help", no_argument, nullptr, 'h'},
@ -144,6 +148,10 @@ bool ProcessArgs(int argc, char** argv)
stream_size = atoi(optarg);
break;
case 'S':
extended_stream_size = atoi(optarg);
break;
case 'v':
has_versions = true;
break;
@ -172,7 +180,7 @@ bool ProcessArgs(int argc, char** argv)
return false;
}
if (stream_size < 0 || stream_size % 160 != 0) {
if ((stream_size < 0 || stream_size % 160 != 0) || (extended_stream_size < 0 || extended_stream_size % 160 != 0)) {
std::cout <<
"Stream buffer size must be multiples of 160\n";
return false;

View File

@ -205,6 +205,38 @@ LocalDsSTT(ModelState* aCtx, const short* aBuffer, size_t aBufferSize,
DS_FreeString((char *) last);
}
res.string = DS_FinishStream(ctx);
} else if (extended_stream_size > 0) {
StreamingState* ctx;
int status = DS_CreateStream(aCtx, &ctx);
if (status != DS_ERR_OK) {
res.string = strdup("");
return res;
}
size_t off = 0;
const char *last = nullptr;
const char *prev = nullptr;
while (off < aBufferSize) {
size_t cur = aBufferSize - off > extended_stream_size ? extended_stream_size : aBufferSize - off;
DS_FeedAudioContent(ctx, aBuffer + off, cur);
off += cur;
prev = last;
const Metadata* result = DS_IntermediateDecodeWithMetadata(ctx, 1);
const char* partial = CandidateTranscriptToString(&result->transcripts[0]);
if (last == nullptr || strcmp(last, partial)) {
printf("%s\n", partial);
last = partial;
} else {
free((char *) partial);
}
if (prev != nullptr && prev != last) {
free((char *) prev);
}
DS_FreeMetadata((Metadata *)result);
}
const Metadata* result = DS_FinishStreamWithMetadata(ctx, 1);
res.string = CandidateTranscriptToString(&result->transcripts[0]);
DS_FreeMetadata((Metadata *)result);
free((char *) last);
} else {
res.string = DS_SpeechToText(aCtx, aBuffer, aBufferSize);
}

57
parse_valgrind_suppressions.sh Executable file
View File

@ -0,0 +1,57 @@
#! /usr/bin/awk -f
# A script to extract the actual suppression info from the output of (for example) valgrind --leak-check=full --show-reachable=yes --error-limit=no --gen-suppressions=all ./minimal
# The desired bits are between ^{ and ^} (including the braces themselves).
# The combined output should either be appended to /usr/lib/valgrind/default.supp, or placed in a .supp of its own
# If the latter, either tell valgrind about it each time with --suppressions=<filename>, or add that line to ~/.valgrindrc
# NB This script uses the |& operator, which I believe is gawk-specific. In case of failure, check that you're using gawk rather than some other awk
# The script looks for suppressions. When it finds one it stores it temporarily in an array,
# and also feeds it line by line to the external app 'md5sum' which generates a unique checksum for it.
# The checksum is used as an index in a different array. If an item with that index already exists the suppression must be a duplicate and is discarded.
BEGIN { suppression=0; md5sum = "md5sum" }
# If the line begins with '{', it's the start of a supression; so set the var and initialise things
/^{/ {
suppression=1; i=0; next
}
# If the line begins with '}' its the end of a suppression
/^}/ {
if (suppression)
{ suppression=0;
close(md5sum, "to") # We've finished sending data to md5sum, so close that part of the pipe
ProcessInput() # Do the slightly-complicated stuff in functions
delete supparray # We don't want subsequent suppressions to append to it!
}
}
# Otherwise, it's a normal line. If we're inside a supression, store it, and pipe it to md5sum. Otherwise it's cruft, so ignore it
{ if (suppression)
{
supparray[++i] = $0
print |& md5sum
}
}
function ProcessInput()
{
# Pipe the result from md5sum, then close it
md5sum |& getline result
close(md5sum)
# gawk can't cope with enormous ints like $result would be, so stringify it first by prefixing a definite string
resultstring = "prefix"result
if (! (resultstring in chksum_array) )
{ chksum_array[resultstring] = 0; # This checksum hasn't been seen before, so add it to the array
OutputSuppression() # and output the contents of the suppression
}
}
function OutputSuppression()
{
# A suppression is surrounded by '{' and '}'. Its data was stored line by line in the array
print "{"
for (n=1; n <= i; ++n)
{ print supparray[n] }
print "}"
}

View File

@ -11,6 +11,8 @@ training:
deepspeech:
packages_xenial:
apt: 'make build-essential gfortran git libblas-dev liblapack-dev libsox-dev libmagic-dev libgsm1-dev libltdl-dev libpng-dev python python-dev zlib1g-dev'
packages_bionic:
apt: 'apt-get -qq update && apt-get -qq -y install make build-essential gfortran git libblas-dev liblapack-dev libsox-dev libmagic-dev libgsm1-dev libltdl-dev libpng-dev python3 python3-dev python-is-python3 zlib1g-dev libbz2-dev liblzma-dev'
kenlm:
packages_xenial:
apt: 'apt-get -qq update && apt-get -qq -y install cmake realpath build-essential libboost-dev wget software-properties-common zlib1g-dev libbz2-dev liblzma-dev libboost-program-options-dev libboost-system-dev libboost-thread-dev libboost-test-dev'
@ -21,12 +23,19 @@ kenlm:
tensorflow:
packages_xenial:
apt: 'apt-get -qq update && apt-get -qq -y install realpath build-essential python-virtualenv python-dev python-pip libblas-dev liblapack-dev gfortran wget software-properties-common pixz zip zlib1g-dev unzip'
packages_bionic:
apt: 'apt-get -qq update && apt-get -qq -y install coreutils build-essential python3-virtualenv python3-dev python3-pip python-is-python3 libblas-dev liblapack-dev gfortran wget software-properties-common pixz zip zlib1g-dev unzip'
packages_win:
pacman: 'pacman --noconfirm -S patch unzip tar'
msys64: 'ln -s $USERPROFILE/msys64 $TASKCLUSTER_TASK_DIR/msys64'
valgrind:
packages_bionic:
apt: 'apt-get -qq update && apt-get -qq -y install python3 python3-simplejson python-is-python3 valgrind'
java:
packages_xenial:
apt: 'apt-get -qq -y install curl software-properties-common wget unzip && add-apt-repository --yes ppa:openjdk-r/ppa && apt-get -qq update && DEBIAN_FRONTEND=noninteractive apt-get -qq -y --force-yes install openjdk-8-jdk && java -version && update-ca-certificates -f'
packages_bionic:
apt: 'apt-get -qq -y install curl software-properties-common wget unzip && add-apt-repository --yes ppa:openjdk-r/ppa && apt-get -qq update && DEBIAN_FRONTEND=noninteractive apt-get -qq -y --force-yes install openjdk-8-jdk && java -version && update-ca-certificates -f'
electronjs:
packages_xenial:
apt: 'libatk1.0-0 libatk-bridge2.0-0 libcairo2 libcups2 libdbus-1-3 libgdk-pixbuf2.0-0 libgtk-3-0 libnspr4 libnss3 libpango-1.0-0 libpangocairo-1.0-0 libx11-xcb1 libxcomposite1 libxcursor1 libxdamage1 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 xvfb'
@ -204,6 +213,47 @@ system:
ios_x86_64:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.ios_x86_64/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.ios_x86_64"
tensorflow_gcc9:
linux_amd64_cpu:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cpu_gcc9/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cpu_gcc9"
linux_amd64_cuda:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cuda_gcc9/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cuda_gcc9"
tensorflow_dbg:
linux_amd64_cpu:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cpu/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cpu"
linux_amd64_cuda:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cuda/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.cuda"
linux_armv7:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.arm/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.arm"
linux_arm64:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.arm64/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.arm64"
darwin_amd64:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.osx/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.osx"
android_arm64:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.android-arm64/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.android-arm64"
android_armv7:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.android-armv7/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.android-armv7"
win_amd64_cpu:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.win/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.win"
win_amd64_cuda:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.win-cuda/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.win-cuda"
ios_arm64:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.ios_arm64/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.ios_arm64"
ios_x86_64:
url: "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.ios_x86_64/artifacts/public/home.tar.xz"
namespace: "project.deepspeech.tensorflow_dbg.pip.r2.3.23ad988fcde60fb01f9533e95004bbc4877a9143.0.ios_x86_64"
username: 'build-user'
homedir:
linux: '/home/build-user'

View File

@ -0,0 +1,21 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_android-arm64-dbg"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.android-arm64-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.android-arm64-dbg"
- "index.project.deepspeech.deepspeech.native_client.android-arm64-dbg.${event.head.sha}"
tensorflow: ${system.tensorflow_dbg.android_arm64.url}
scripts:
setup: "taskcluster/tc-true.sh"
build: "taskcluster/android-build-dbg.sh arm64-v8a"
package: "taskcluster/android-package.sh arm64-v8a"
nc_asset_name: "native_client.arm64.cpu.android_dbg.tar.xz"
workerType: "${docker.dsBuild}"
metadata:
name: "DeepSpeech Android ARM64 debug"
description: "Building DeepSpeech for Android ARM64, debug version"

View File

@ -0,0 +1,20 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_android-armv7-dbg"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.android-armv7-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.android-armv7-dbg"
- "index.project.deepspeech.deepspeech.native_client.android-armv7-dbg.${event.head.sha}"
tensorflow: ${system.tensorflow_dbg.android_armv7.url}
scripts:
build: "taskcluster/android-build-dbg.sh armeabi-v7a"
package: "taskcluster/android-package.sh armeabi-v7a"
nc_asset_name: "native_client.armv7.cpu.android_dbg.tar.xz"
workerType: "${docker.dsBuild}"
metadata:
name: "DeepSpeech Android ARMv7 debug"
description: "Building DeepSpeech for Android ARMv7, debug version"

View File

@ -0,0 +1,35 @@
#!/bin/bash
set -xe
arm_flavor=$1
source $(dirname "$0")/tc-tests-utils.sh
source $(dirname "$0")/tf_tc-vars.sh
BAZEL_TARGETS="
//native_client:libdeepspeech.so
"
if [ "${arm_flavor}" = "armeabi-v7a" ]; then
LOCAL_ANDROID_FLAGS="${BAZEL_ANDROID_ARM_FLAGS}"
fi
if [ "${arm_flavor}" = "arm64-v8a" ]; then
LOCAL_ANDROID_FLAGS="${BAZEL_ANDROID_ARM64_FLAGS}"
fi
if [ "${arm_flavor}" = "x86_64" ]; then
LOCAL_ANDROID_FLAGS="--config=android --cpu=x86_64 --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++14 --copt=-D_GLIBCXX_USE_C99"
fi
BAZEL_BUILD_FLAGS="--define=runtime=tflite ${LOCAL_ANDROID_FLAGS} ${BAZEL_EXTRA_FLAGS}"
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
SYSTEM_TARGET=
SYSTEM_RASPBIAN=
do_bazel_build "dbg"
export EXTRA_LOCAL_CFLAGS="-ggdb"
do_deepspeech_ndk_build "${arm_flavor}"

View File

@ -0,0 +1,20 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_android-arm64-dbg"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.android-x86_64-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.android-x86_64-dbg"
- "index.project.deepspeech.deepspeech.native_client.android-x86_64-dbg.${event.head.sha}"
tensorflow: ${system.tensorflow_dbg.android_arm64.url}
scripts:
build: "taskcluster/android-build-dbg.sh x86_64"
package: "taskcluster/android-package.sh x86_64"
nc_asset_name: "native_client.x86_64.cpu.android_dbg.tar.xz"
workerType: "${docker.dsBuild}"
metadata:
name: "DeepSpeech Android x86_64 debug"
description: "Building DeepSpeech for Android x86_64, debug version"

23
taskcluster/arm64-build-dbg.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash
set -xe
source $(dirname "$0")/tc-tests-utils.sh
source $(dirname "$0")/tf_tc-vars.sh
BAZEL_TARGETS="
//native_client:libdeepspeech.so
"
BAZEL_BUILD_FLAGS="${BAZEL_ARM64_FLAGS} ${BAZEL_EXTRA_FLAGS}"
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
SYSTEM_TARGET=rpi3-armv8
SYSTEM_RASPBIAN=/tmp/multistrap-armbian64-buster
maybe_install_xldd
do_bazel_build "dbg"
export EXTRA_LOCAL_CFLAGS="-ggdb"
do_deepspeech_binary_build

22
taskcluster/cuda-build-dbg.sh Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash
set -xe
source $(dirname "$0")/tc-tests-utils.sh
source $(dirname "$0")/tf_tc-vars.sh
BAZEL_TARGETS="
//native_client:libdeepspeech.so
"
BAZEL_ENV_FLAGS="TF_NEED_CUDA=1 ${TF_CUDA_FLAGS}"
BAZEL_BUILD_FLAGS="${BAZEL_CUDA_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BAZEL_OPT_FLAGS}"
SYSTEM_TARGET=host
EXTRA_LOCAL_CFLAGS=""
EXTRA_LOCAL_LDFLAGS="-L${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/ -L${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/stubs/ -lcudart -lcuda"
do_bazel_build "dbg"
export EXTRA_LOCAL_CFLAGS="-ggdb"
do_deepspeech_binary_build

View File

@ -16,6 +16,9 @@ payload:
features:
taskclusterProxy: true
env:
DEBIAN_FRONTEND: "noninteractive"
command:
- "/bin/bash"
- "--login"

27
taskcluster/host-build-dbg.sh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/bash
set -xe
runtime=$1
source $(dirname "$0")/tc-tests-utils.sh
source $(dirname "$0")/tf_tc-vars.sh
BAZEL_TARGETS="
//native_client:libdeepspeech.so
"
if [ "${runtime}" = "tflite" ]; then
BAZEL_BUILD_TFLITE="--define=runtime=tflite"
fi;
BAZEL_BUILD_FLAGS="${BAZEL_BUILD_TFLITE} ${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS}"
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
SYSTEM_TARGET=host
do_bazel_build "dbg"
export EXTRA_LOCAL_CFLAGS="-ggdb"
do_deepspeech_binary_build

View File

@ -32,4 +32,3 @@ else
fi
do_deepspeech_nodejs_build

View File

@ -0,0 +1,25 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_linux-amd64-cpu_gcc9"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.cpu-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.cpu-dbg"
- "index.project.deepspeech.deepspeech.native_client.cpu-dbg.${event.head.sha}"
tensorflow: ${system.tensorflow_gcc9.linux_amd64_cpu.url}
docker_image: "ubuntu:20.04"
system_config:
>
${deepspeech.packages_bionic.apt}
scripts:
setup: "taskcluster/tc-true.sh"
build: "taskcluster/host-build-dbg.sh"
package: "taskcluster/package.sh"
nc_asset_name: "native_client.amd64.cpu.linux_dbg.tar.xz"
workerType: "${docker.tfBuild}"
metadata:
name: "DeepSpeech Linux AMD64 CPU Debug"
description: "Building DeepSpeech for Linux/AMD64, CPU only, debug version"

View File

@ -0,0 +1,26 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_linux-amd64-gpu_gcc9"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.gpu-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.gpu-dbg"
- "index.project.deepspeech.deepspeech.native_client.gpu-dbg.${event.head.sha}"
tensorflow: ${system.tensorflow_gcc9.linux_amd64_cuda.url}
docker_image: "ubuntu:20.04"
system_config:
>
${deepspeech.packages_bionic.apt}
maxRunTime: 14400
scripts:
setup: "taskcluster/tc-true.sh"
build: "taskcluster/cuda-build-dbg.sh"
package: "taskcluster/package.sh"
nc_asset_name: "native_client.amd64.cuda.linux_dbg.tar.xz"
workerType: "${docker.tfBuild}"
metadata:
name: "DeepSpeech Linux AMD64 CUDA debug"
description: "Building DeepSpeech for Linux/AMD64, CUDA-enabled, debug version"

View File

@ -0,0 +1,25 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_linux-amd64-cpu_gcc9"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.tflite-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.tflite-dbg"
- "index.project.deepspeech.deepspeech.native_client.tflite-dbg.${event.head.sha}"
tensorflow: ${system.tensorflow_gcc9.linux_amd64_cpu.url}
docker_image: "ubuntu:20.04"
system_config:
>
${deepspeech.packages_bionic.apt}
scripts:
setup: "taskcluster/tc-true.sh"
build: "taskcluster/host-build-dbg.sh tflite"
package: "taskcluster/package.sh"
nc_asset_name: "native_client.amd64.tflite.linux_dbg.tar.xz"
workerType: "${docker.tfBuild}"
metadata:
name: "DeepSpeech Linux AMD64 TFLite debug"
description: "Building DeepSpeech for Linux/AMD64, TFLite, debug version"

View File

@ -0,0 +1,30 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_linux-arm64-cpu-dbg"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm64-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm64-dbg"
- "index.project.deepspeech.deepspeech.native_client.arm64-dbg.${event.head.sha}"
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
system_setup:
>
apt-get -qq -y install gdebi git pixz &&
wget http://mirrors.kernel.org/ubuntu/pool/universe/m/multistrap/multistrap_2.2.0ubuntu2_all.deb -O /tmp/multistrap_2.2.0ubuntu2_all.deb &&
echo "y" | gdebi /tmp/multistrap_2.2.0ubuntu2_all.deb
system_config:
>
multistrap -d /tmp/multistrap-armbian64-buster/ -f ${system.homedir.linux}/DeepSpeech/ds/native_client/multistrap_armbian64_buster.conf
tensorflow: ${system.tensorflow_dbg.linux_arm64.url}
scripts:
setup: "taskcluster/tc-true.sh"
build: "taskcluster/arm64-build-dbg.sh"
package: "taskcluster/package.sh"
nc_asset_name: "native_client.arm64.cpu.linux_dbg.tar.xz"
workerType: "${docker.dsBuild}"
metadata:
name: "DeepSpeech Linux ARM64 Cortex-A53 CPU debug"
description: "Building DeepSpeech for Linux ARM64 Cortex-A53, CPU only, debug version"

View File

@ -30,10 +30,11 @@ then:
payload:
maxRunTime: { $eval: to_int(build.maxRunTime) }
image: "ubuntu:16.04"
image: ${build.docker_image}
env:
TENSORFLOW_BUILD_ARTIFACT: ${build.tensorflow}
DEBIAN_FRONTEND: "noninteractive"
command:
- "/bin/bash"
@ -44,7 +45,7 @@ then:
extraSystemConfig: { $eval: strip(str(build.system_config)) }
in: >
adduser --system --home ${system.homedir.linux} ${system.username} &&
apt-get -qq update && apt-get -qq -y install ${deepspeech.packages_xenial.apt} pixz pkg-config realpath sudo unzip wget zip && ${extraSystemSetup} &&
apt-get -qq update && apt-get -qq -y install ${deepspeech.packages_xenial.apt} pixz pkg-config coreutils sudo unzip wget zip && ${extraSystemSetup} &&
cd ${system.homedir.linux}/ &&
echo -e "#!/bin/bash\nset -xe\n env && id && (wget -O - $TENSORFLOW_BUILD_ARTIFACT | pixz -d | tar -C ${system.homedir.linux}/ -xf - ) && cd ~/DeepSpeech/ds && git remote set-url origin ${event.head.repo.url} && git fetch origin && git checkout --quiet ${event.head.sha} && git submodule --quiet sync tensorflow/ && git submodule --quiet update tensorflow/ && mkdir -p ${system.homedir.linux}/.cache/node-gyp/ && wget -O - ${system.node_gyp_cache.url} | tar -C ${system.homedir.linux}/.cache/node-gyp/ -xzf - && mkdir -p ${system.homedir.linux}/pyenv-root/ && wget -O - ${system.pyenv.linux.url} | tar -C ${system.homedir.linux}/pyenv-root/ -xzf - && if [ ! -z "${build.gradle_cache.url}" ]; then wget -O - ${build.gradle_cache.url} | tar -C ${system.homedir.linux}/ -xzf - ; fi && if [ ! -z "${build.android_cache.url}" ]; then wget -O - ${build.android_cache.url} | tar -C ${system.homedir.linux}/ -xzf - ; fi;" > /tmp/clone.sh && chmod +x /tmp/clone.sh &&
sudo -H -u ${system.username} /bin/bash /tmp/clone.sh && ${extraSystemConfig} &&

View File

@ -0,0 +1,30 @@
build:
template_file: linux-opt-base.tyml
dependencies:
- "swig-linux-amd64"
- "node-gyp-cache"
- "pyenv-linux-amd64"
- "tf_linux-rpi3-cpu-dbg"
routes:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm-dbg"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm-dbg"
- "index.project.deepspeech.deepspeech.native_client.arm-dbg.${event.head.sha}"
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
system_setup:
>
apt-get -qq -y install gdebi git pixz &&
wget http://mirrors.kernel.org/ubuntu/pool/universe/m/multistrap/multistrap_2.2.0ubuntu2_all.deb -O /tmp/multistrap_2.2.0ubuntu2_all.deb &&
echo "y" | gdebi /tmp/multistrap_2.2.0ubuntu2_all.deb
system_config:
>
multistrap -d /tmp/multistrap-raspbian-buster/ -f ${system.homedir.linux}/DeepSpeech/ds/native_client/multistrap_raspbian_buster.conf
tensorflow: ${system.tensorflow.linux_armv7.url}
scripts:
setup: "taskcluster/tc-true.sh"
build: "taskcluster/rpi3-build-dbg.sh"
package: "taskcluster/package.sh"
workerType: "${docker.dsBuild}"
nc_asset_name: "native_client.rpi3.cpu.linux_dbg.tar.xz"
metadata:
name: "DeepSpeech Linux RPi3/ARMv7 CPU debug"
description: "Building DeepSpeech for Linux RPi3 ARMv7, CPU only, debug version"

23
taskcluster/rpi3-build-dbg.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash
set -xe
source $(dirname "$0")/tc-tests-utils.sh
source $(dirname "$0")/tf_tc-vars.sh
BAZEL_TARGETS="
//native_client:libdeepspeech.so
"
BAZEL_BUILD_FLAGS="${BAZEL_ARM_FLAGS} ${BAZEL_EXTRA_FLAGS}"
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
SYSTEM_TARGET=rpi3
SYSTEM_RASPBIAN=/tmp/multistrap-raspbian-buster
maybe_install_xldd
do_bazel_build "dbg"
export EXTRA_LOCAL_CFLAGS="-ggdb"
do_deepspeech_binary_build

View File

@ -3,19 +3,25 @@ build:
dependencies:
# Make sure builds are ready
- "linux-arm64-cpu-opt"
- "linux-arm64-cpu-dbg"
- "darwin-amd64-cpu-opt"
- "darwin-amd64-ctc-opt"
- "darwin-amd64-tflite-opt"
- "linux-amd64-cpu-opt"
- "linux-amd64-cpu-dbg"
- "linux-amd64-ctc-opt"
- "linux-amd64-tflite-opt"
- "linux-amd64-tflite-dbg"
- "linux-amd64-gpu-opt"
- "linux-rpi3-cpu-opt"
- "linux-rpi3-cpu-dbg"
- "node-package-gpu"
- "node-package-cpu"
- "node-package-tflite"
- "android-arm64-cpu-opt"
- "android-arm64-cpu-dbg"
- "android-armv7-cpu-opt"
- "android-armv7-cpu-dbg"
- "android-java-opt"
- "win-amd64-cpu-opt"
- "win-amd64-gpu-opt"
@ -64,6 +70,12 @@ build:
- "win-amd64-tflite-opt"
- "ios-x86_64-tflite-opt"
- "ios-arm64-tflite-opt"
- "linux-arm64-cpu-dbg"
- "linux-amd64-cpu-dbg"
- "linux-amd64-tflite-dbg"
- "linux-rpi3-cpu-dbg"
- "android-arm64-cpu-dbg"
- "android-armv7-cpu-dbg"
java_aar:
- "android-java-opt"
nuget:

View File

@ -181,21 +181,26 @@ do_deepspeech_npm_package()
do_bazel_build()
{
local _opt_or_dbg=${1:-"opt"}
cd ${DS_TFDIR}
eval "export ${BAZEL_ENV_FLAGS}"
if is_patched_bazel; then
find ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-tf.tar -T -
if [ "${_opt_or_dbg}" = "opt" ]; then
if is_patched_bazel; then
find ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-tf.tar -T -
fi;
fi;
bazel ${BAZEL_OUTPUT_USER_ROOT} build \
-s --explain bazel_monolithic.log --verbose_explanations --experimental_strict_action_env --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
-s --explain bazel_monolithic.log --verbose_explanations --experimental_strict_action_env --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c ${_opt_or_dbg} ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
if is_patched_bazel; then
find ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-ds.tar -T -
if [ "${_opt_or_dbg}" = "opt" ]; then
if is_patched_bazel; then
find ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-ds.tar -T -
fi;
verify_bazel_rebuild "${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel_monolithic.log"
fi;
verify_bazel_rebuild "${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel_monolithic.log"
}
shutdown_bazel()

View File

@ -22,6 +22,9 @@ source ${tc_tests_utils}/tc-node-utils.sh
# Scoping of .Net-related tooling
source ${tc_tests_utils}/tc-dotnet-utils.sh
# For checking with valgrind
source ${tc_tests_utils}/tc-valgrind-utils.sh
# Functions that controls directly the build process
source ${tc_tests_utils}/tc-build-utils.sh

25
taskcluster/tc-valgrind-cpp.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
set -xe
kind=$1
source $(dirname "$0")/tc-tests-utils.sh
set_ldc_sample_filename "16k"
download_material "${TASKCLUSTER_TMP_DIR}/ds"
mkdir -p ${TASKCLUSTER_ARTIFACTS} || true
export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH
if [ "${kind}" = "--basic" ]; then
run_valgrind_basic
run_valgrind_stream
fi
if [ "${kind}" = "--metadata" ]; then
run_valgrind_extended
run_valgrind_extended_stream
fi

View File

@ -0,0 +1,29 @@
#!/bin/bash
set -xe
kind=$1
source $(dirname "$0")/tc-tests-utils.sh
set_ldc_sample_filename "16k"
model_source=${DEEPSPEECH_TEST_MODEL//.pb/.tflite}
model_name=$(basename "${model_source}")
model_name_mmap=$(basename "${model_source}")
download_material "${TASKCLUSTER_TMP_DIR}/ds"
mkdir -p ${TASKCLUSTER_ARTIFACTS} || true
export PATH=${TASKCLUSTER_TMP_DIR}/ds/:$PATH
if [ "${kind}" = "--basic" ]; then
run_valgrind_basic
run_valgrind_stream
fi
if [ "${kind}" = "--metadata" ]; then
run_valgrind_extended
run_valgrind_extended_stream
fi

View File

@ -0,0 +1,68 @@
#!/bin/bash
set -xe
# How to generate / update valgrind suppression lists:
# https://wiki.wxwidgets.org/Valgrind_Suppression_File_Howto#How_to_make_a_suppression_file
#
# $ valgrind --leak-check=full --show-reachable=yes --error-limit=no --gen-suppressions=all --log-file=minimalraw.log ./minimal
# $ cat ./minimalraw.log | ./parse_valgrind_suppressions.sh > minimal.supp
VALGRIND_CMD=${VALGRIND_CMD:-"valgrind \
--error-exitcode=4242 \
--errors-for-leak-kinds=all \
--leak-check=full \
--leak-resolution=high \
--show-reachable=yes \
--track-origins=yes \
--gen-suppressions=all \
--suppressions=${DS_DSDIR}/ds_generic.supp \
--suppressions=${DS_DSDIR}/ds_lib.supp \
--suppressions=${DS_DSDIR}/ds_sox.supp \
--suppressions=${DS_DSDIR}/ds_openfst.supp \
--suppressions=${DS_DSDIR}/tensorflow_full_runtime.supp \
--suppressions=${DS_DSDIR}/tensorflow_tflite_runtime.supp \
"}
run_valgrind_basic()
{
${VALGRIND_CMD} --log-file=${TASKCLUSTER_ARTIFACTS}/valgrind_basic.log \
deepspeech \
--model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} \
--scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer \
--audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} \
-t
}
run_valgrind_stream()
{
${VALGRIND_CMD} --log-file=${TASKCLUSTER_ARTIFACTS}/valgrind_stream.log \
deepspeech \
--model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} \
--scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer \
--audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} \
--stream 320 \
-t
}
run_valgrind_extended()
{
${VALGRIND_CMD} --log-file=${TASKCLUSTER_ARTIFACTS}/valgrind_extended.log \
deepspeech \
--model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} \
--scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer \
--audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} \
--extended \
-t
}
run_valgrind_extended_stream()
{
${VALGRIND_CMD} --log-file=${TASKCLUSTER_ARTIFACTS}/valgrind_stream_extended.log \
deepspeech \
--model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} \
--scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer \
--audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} \
--extended_stream 320 \
-t
}

0
taskcluster/test-cpp_16k_tflite-linux-amd64-opt.yml Executable file → Normal file
View File

0
taskcluster/test-cpp_8k_tflite-linux-amd64-opt.yml Executable file → Normal file
View File

View File

@ -0,0 +1,16 @@
build:
template_file: test-linux-opt-base.tyml
dependencies:
- "linux-amd64-tflite-dbg"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
docker_image: "ubuntu:20.04"
system_setup:
>
${valgrind.packages_bionic.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-valgrind-cpp_tflite.sh --basic"
workerType: "${docker.dsHighMemTests}"
metadata:
name: "DeepSpeech Linux AMD64 valgrind C++ TFLite basic tests"
description: "Testing basic DeepSpeech valgrind C++ TFLite for Linux/AMD64"

View File

@ -0,0 +1,16 @@
build:
template_file: test-linux-opt-base.tyml
dependencies:
- "linux-amd64-cpu-dbg"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
docker_image: "ubuntu:20.04"
system_setup:
>
${valgrind.packages_bionic.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-valgrind-cpp.sh --basic"
workerType: "${docker.dsHighMemTests}"
metadata:
name: "DeepSpeech Linux AMD64 valgrind C++ basic tests"
description: "Testing basic DeepSpeech valgrind C++ for Linux/AMD64"

View File

@ -0,0 +1,16 @@
build:
template_file: test-linux-opt-base.tyml
dependencies:
- "linux-amd64-tflite-dbg"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
docker_image: "ubuntu:20.04"
system_setup:
>
${valgrind.packages_bionic.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-valgrind-cpp_tflite.sh --metadata"
workerType: "${docker.dsHighMemTests}"
metadata:
name: "DeepSpeech Linux AMD64 valgrind C++ TFLite metadata tests"
description: "Testing metadata DeepSpeech valgrind C++ TFLite for Linux/AMD64"

View File

@ -0,0 +1,16 @@
build:
template_file: test-linux-opt-base.tyml
dependencies:
- "linux-amd64-cpu-dbg"
- "test-training_16k-linux-amd64-py36m-opt"
test_model_task: "test-training_16k-linux-amd64-py36m-opt"
docker_image: "ubuntu:20.04"
system_setup:
>
${valgrind.packages_bionic.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/taskcluster/tc-valgrind-cpp.sh --metadata"
workerType: "${docker.dsHighMemTests}"
metadata:
name: "DeepSpeech Linux AMD64 valgrind C++ metadata tests"
description: "Testing metadata DeepSpeech valgrind C++ for Linux/AMD64"

0
taskcluster/test-cpp_tflite-linux-amd64-prod-opt.yml Executable file → Normal file
View File

View File

@ -34,6 +34,7 @@ then:
DECODER_ARTIFACTS_ROOT: https://community-tc.services.mozilla.com/api/queue/v1/task/${linux_amd64_ctc}/artifacts/public
PIP_DEFAULT_TIMEOUT: "60"
EXPECTED_TENSORFLOW_VERSION: "${build.tensorflow_git_desc}"
DEBIAN_FRONTEND: "noninteractive"
command:
- "/bin/bash"

View File

@ -0,0 +1,17 @@
build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.tensorflow_dbg.android_arm64.url}
artifact_namespace: ${system.tensorflow_dbg.android_arm64.namespace}
system_config:
>
${tensorflow.packages_xenial.apt} && ${java.packages_xenial.apt}
scripts:
setup: "taskcluster/tf_tc-setup.sh --android-arm64"
build: "taskcluster/tf_tc-build.sh --android-arm64 dbg"
package: "taskcluster/tf_tc-package.sh"
maxRunTime: 14400
workerType: "${docker.tfBuild}"
metadata:
name: "TensorFlow Android ARM64 debug"
description: "Building TensorFlow for Android ARM64, debug version"

View File

@ -0,0 +1,17 @@
build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.tensorflow_dbg.android_armv7.url}
artifact_namespace: ${system.tensorflow_dbg.android_armv7.namespace}
system_config:
>
${tensorflow.packages_xenial.apt} && ${java.packages_xenial.apt}
scripts:
setup: "taskcluster/tf_tc-setup.sh --android-armv7"
build: "taskcluster/tf_tc-build.sh --android-armv7 dbg"
package: "taskcluster/tf_tc-package.sh"
maxRunTime: 14400
workerType: "${docker.tfBuild}"
metadata:
name: "TensorFlow Android ARMv7 debug"
description: "Building TensorFlow for Android ARMv7, debug version"

View File

@ -0,0 +1,18 @@
build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.tensorflow_gcc9.linux_amd64_cpu.url}
artifact_namespace: ${system.tensorflow_gcc9.linux_amd64_cpu.namespace}
docker_image: "ubuntu:20.04"
system_config:
>
${tensorflow.packages_bionic.apt} && ${java.packages_bionic.apt}
scripts:
setup: "taskcluster/tf_tc-setup.sh"
build: "taskcluster/tf_tc-build.sh --linux-cpu --py3"
package: "taskcluster/tf_tc-package.sh"
maxRunTime: 14400
workerType: "${docker.tfBuild}"
metadata:
name: "TensorFlow Linux AMD64 CPU opt/gcc9"
description: "Building TensorFlow for Linux/AMD64, CPU only, opt/gcc9"

View File

@ -0,0 +1,18 @@
build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.tensorflow_gcc9.linux_amd64_cuda.url}
artifact_namespace: ${system.tensorflow_gcc9.linux_amd64_cuda.namespace}
docker_image: "ubuntu:20.04"
system_config:
>
${tensorflow.packages_bionic.apt} && ${java.packages_bionic.apt}
scripts:
setup: "taskcluster/tf_tc-setup.sh --linux-cuda"
build: "taskcluster/tf_tc-build.sh --linux-cuda --py3"
package: "taskcluster/tf_tc-package.sh"
maxRunTime: 14400
workerType: "${docker.tfBuild}"
metadata:
name: "TensorFlow Linux AMD64 CUDA opt/gcc9"
description: "Building TensorFlow for Linux/AMD64, CUDA-enabled, opt/gcc9"

View File

@ -0,0 +1,17 @@
build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.tensorflow_dbg.linux_arm64.url}
artifact_namespace: ${system.tensorflow_dbg.linux_arm64.namespace}
system_config:
>
${tensorflow.packages_xenial.apt} && ${java.packages_xenial.apt}
scripts:
setup: "taskcluster/tf_tc-setup.sh"
build: "taskcluster/tf_tc-build.sh --linux-arm64 dbg"
package: "taskcluster/tf_tc-package.sh"
maxRunTime: 14400
workerType: "${docker.tfBuild}"
metadata:
name: "TensorFlow Linux ARM64 Cortex-A53 CPU debug"
description: "Building TensorFlow for Linux ARM64 Cortex-A53, CPU only, debug version"

View File

@ -0,0 +1,17 @@
build:
template_file: generic_tc_caching-linux-opt-base.tyml
cache:
artifact_url: ${system.tensorflow_dbg.linux_armv7.url}
artifact_namespace: ${system.tensorflow_dbg.linux_armv7.namespace}
system_config:
>
${tensorflow.packages_xenial.apt} && ${java.packages_xenial.apt}
scripts:
setup: "taskcluster/tf_tc-setup.sh"
build: "taskcluster/tf_tc-build.sh --linux-arm dbg"
package: "taskcluster/tf_tc-package.sh"
maxRunTime: 14400
workerType: "${docker.tfBuild}"
metadata:
name: "TensorFlow Linux RPi3/ARMv7 CPU debug"
description: "Building TensorFlow for Linux RPi3 ARMv7, CPU only, debug version"

View File

@ -14,32 +14,36 @@ pushd ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/
# Force toolchain sync (useful on macOS ?)
bazel ${BAZEL_OUTPUT_USER_ROOT} sync --configure
OPT_OR_DBG=${2:-opt}
MAYBE_DEBUG=$2
OPT_OR_DBG="-c opt"
if [ "${MAYBE_DEBUG}" = "dbg" ]; then
OPT_OR_DBG="-c dbg"
fi;
case "$1" in
"--linux-cpu"|"--darwin-cpu"|"--windows-cpu")
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LIB_CPP_API} ${BUILD_TARGET_LITE_LIB}
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LIB_CPP_API} ${BUILD_TARGET_LITE_LIB}
;;
"--linux-cuda"|"--windows-cuda")
eval "export ${TF_CUDA_FLAGS}" && (echo "" | TF_NEED_CUDA=1 ./configure) && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_CUDA_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BAZEL_OPT_FLAGS} ${BUILD_TARGET_LIB_CPP_API}
eval "export ${TF_CUDA_FLAGS}" && (echo "" | TF_NEED_CUDA=1 ./configure) && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_CUDA_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BAZEL_OPT_FLAGS} ${BUILD_TARGET_LIB_CPP_API}
;;
"--linux-arm")
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_ARM_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_ARM_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
;;
"--linux-arm64")
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_ARM64_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
echo "" | TF_NEED_CUDA=0 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_ARM64_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
;;
"--android-armv7")
echo "" | TF_SET_ANDROID_WORKSPACE=1 ./configure && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_ANDROID_ARM_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
echo "" | TF_SET_ANDROID_WORKSPACE=1 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_ANDROID_ARM_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
;;
"--android-arm64")
echo "" | TF_SET_ANDROID_WORKSPACE=1 ./configure && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_ANDROID_ARM64_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
echo "" | TF_SET_ANDROID_WORKSPACE=1 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_ANDROID_ARM64_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BUILD_TARGET_LITE_LIB}
;;
"--ios-arm64")
echo "" | TF_NEED_CUDA=0 TF_CONFIGURE_IOS=1 ./configure && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_IOS_ARM64_FLAGS} ${BUILD_TARGET_LITE_LIB}
echo "" | TF_NEED_CUDA=0 TF_CONFIGURE_IOS=1 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_IOS_ARM64_FLAGS} ${BUILD_TARGET_LITE_LIB}
;;
"--ios-x86_64")
echo "" | TF_NEED_CUDA=0 TF_CONFIGURE_IOS=1 ./configure && ${BAZEL_BUILD} -c ${OPT_OR_DBG} ${BAZEL_IOS_X86_64_FLAGS} ${BUILD_TARGET_LITE_LIB}
echo "" | TF_NEED_CUDA=0 TF_CONFIGURE_IOS=1 ./configure && ${BAZEL_BUILD} ${OPT_OR_DBG} ${BAZEL_IOS_X86_64_FLAGS} ${BUILD_TARGET_LITE_LIB}
;;
esac

View File

@ -122,7 +122,16 @@ export GCC_HOST_COMPILER_PATH=/usr/bin/gcc
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
export PYTHON_BIN_PATH=C:/Python36/python.exe
else
export PYTHON_BIN_PATH=/usr/bin/python2.7
if [ "${OS}" = "Linux" ]; then
source /etc/os-release
if [ "${ID}" = "ubuntu" -a "${VERSION_ID}" = "20.04" ]; then
export PYTHON_BIN_PATH=/usr/bin/python3
else
export PYTHON_BIN_PATH=/usr/bin/python2.7
fi
else
export PYTHON_BIN_PATH=/usr/bin/python2.7
fi
fi
## Below, define or export some build variables
@ -137,18 +146,16 @@ fi
# See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for targetting specific CPUs
if [ "${OS}" = "${TC_MSYS_VERSION}" ]; then
CC_OPT_FLAGS="/arch:AVX"
OPT_FLAGS="/arch:AVX"
else
CC_OPT_FLAGS="-mtune=generic -march=x86-64 -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx"
OPT_FLAGS="-mtune=generic -march=x86-64 -msse -msse2 -msse3 -msse4.1 -msse4.2 -mavx"
fi
BAZEL_OPT_FLAGS=""
for flag in ${CC_OPT_FLAGS};
for flag in ${OPT_FLAGS};
do
BAZEL_OPT_FLAGS="${BAZEL_OPT_FLAGS} --copt=${flag}"
done;
export CC_OPT_FLAGS
BAZEL_OUTPUT_CACHE_DIR="${DS_ROOT_TASK}/.bazel_cache/"
BAZEL_OUTPUT_CACHE_INSTANCE="${BAZEL_OUTPUT_CACHE_DIR}/output/"
mkdir -p ${BAZEL_OUTPUT_CACHE_INSTANCE} || true

9945
tensorflow_full_runtime.supp Normal file

File diff suppressed because it is too large Load Diff

View File