Merge pull request #1127 from lissyx/one-libdeepspeech
Build TensorFlow as monolithic into libdeepspeech.so
This commit is contained in:
commit
1aca3a32d9
|
@ -0,0 +1,264 @@
|
|||
diff --git a/src/main/java/com/google/devtools/build/lib/analysis/actions/FileWriteAction.java b/src/main/java/com/google/devtools/build/lib/analysis/actions/FileWriteAction.java
|
||||
index c7aa4cb63..e084bc27c 100644
|
||||
--- a/src/main/java/com/google/devtools/build/lib/analysis/actions/FileWriteAction.java
|
||||
+++ b/src/main/java/com/google/devtools/build/lib/analysis/actions/FileWriteAction.java
|
||||
@@ -28,6 +28,7 @@ import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
+import java.io.PrintWriter;
|
||||
import java.util.zip.GZIPInputStream;
|
||||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
@@ -73,6 +74,8 @@ public final class FileWriteAction extends AbstractFileWriteAction {
|
||||
*/
|
||||
private final CharSequence fileContents;
|
||||
|
||||
+ private final Artifact output;
|
||||
+
|
||||
/** Minimum length (in chars) for content to be eligible for compression. */
|
||||
private static final int COMPRESS_CHARS_THRESHOLD = 256;
|
||||
|
||||
@@ -90,6 +93,7 @@ public final class FileWriteAction extends AbstractFileWriteAction {
|
||||
fileContents = new CompressedString((String) fileContents);
|
||||
}
|
||||
this.fileContents = fileContents;
|
||||
+ this.output = output;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -230,11 +234,32 @@ public final class FileWriteAction extends AbstractFileWriteAction {
|
||||
*/
|
||||
@Override
|
||||
protected String computeKey() {
|
||||
+ // System.err.println("src/main/java/com/google/devtools/build/lib/analysis/actions/FileWriteAction.java => output: " + output.getExecPath());
|
||||
+ // ".ckd" Compute Key Debug
|
||||
+ PrintWriter computeKeyDebugWriter = null;
|
||||
+ String computeKeyDebugFile = output.getExecPath() + ".FileWriteAction.ckd";
|
||||
+ try {
|
||||
+ computeKeyDebugWriter = new PrintWriter(computeKeyDebugFile, "UTF-8");
|
||||
+ } catch (java.io.FileNotFoundException ex) {
|
||||
+ System.err.println("Unable to create " + computeKeyDebugFile);
|
||||
+ } catch (java.io.UnsupportedEncodingException ex) {
|
||||
+ System.err.println("Unsupported encoding");
|
||||
+ }
|
||||
+
|
||||
Fingerprint f = new Fingerprint();
|
||||
f.addString(GUID);
|
||||
+ computeKeyDebugWriter.println("GUID: " + GUID);
|
||||
+
|
||||
f.addString(String.valueOf(makeExecutable));
|
||||
+ computeKeyDebugWriter.println("MAKEEXECUTABLE: " + String.valueOf(makeExecutable));
|
||||
+
|
||||
f.addString(getFileContents());
|
||||
- return f.hexDigestAndReset();
|
||||
+ computeKeyDebugWriter.println("FILECONTENTS: " + getFileContents());
|
||||
+
|
||||
+ String rv = f.hexDigestAndReset();
|
||||
+ computeKeyDebugWriter.println("KEY: " + rv);
|
||||
+ computeKeyDebugWriter.close();
|
||||
+ return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
diff --git a/src/main/java/com/google/devtools/build/lib/analysis/actions/SpawnAction.java b/src/main/java/com/google/devtools/build/lib/analysis/actions/SpawnAction.java
|
||||
index 580788160..26883eb92 100644
|
||||
--- a/src/main/java/com/google/devtools/build/lib/analysis/actions/SpawnAction.java
|
||||
+++ b/src/main/java/com/google/devtools/build/lib/analysis/actions/SpawnAction.java
|
||||
@@ -60,6 +60,7 @@ import com.google.devtools.build.lib.util.ShellEscaper;
|
||||
import com.google.devtools.build.lib.vfs.PathFragment;
|
||||
import com.google.protobuf.GeneratedMessage.GeneratedExtension;
|
||||
import java.nio.charset.Charset;
|
||||
+import java.io.PrintWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
@@ -91,6 +92,9 @@ public class SpawnAction extends AbstractAction implements ExecutionInfoSpecifie
|
||||
|
||||
private final CommandLine argv;
|
||||
|
||||
+ private final Iterable<Artifact> inputs;
|
||||
+ private final Iterable<Artifact> outputs;
|
||||
+
|
||||
private final boolean executeUnconditionally;
|
||||
private final boolean isShellCommand;
|
||||
private final String progressMessage;
|
||||
@@ -197,6 +201,9 @@ public class SpawnAction extends AbstractAction implements ExecutionInfoSpecifie
|
||||
this.mnemonic = mnemonic;
|
||||
this.executeUnconditionally = executeUnconditionally;
|
||||
this.extraActionInfoSupplier = extraActionInfoSupplier;
|
||||
+
|
||||
+ this.inputs = inputs;
|
||||
+ this.outputs = outputs;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -312,23 +319,89 @@ public class SpawnAction extends AbstractAction implements ExecutionInfoSpecifie
|
||||
|
||||
@Override
|
||||
protected String computeKey() {
|
||||
+ boolean genruleSetup = String.valueOf(Iterables.get(inputs, 0).getExecPath()).contains("genrule/genrule-setup.sh");
|
||||
+ boolean validGenrule = genruleSetup && (Iterables.size(inputs) > 1);
|
||||
+
|
||||
+ String genruleScript = null;
|
||||
+ if (validGenrule) {
|
||||
+ genruleScript = String.valueOf(Iterables.get(inputs, 1).getExecPath());
|
||||
+ }
|
||||
+
|
||||
+ // ".ckd" Compute Key Debug
|
||||
+ PrintWriter computeKeyDebugWriter = null;
|
||||
+ if (validGenrule) {
|
||||
+ String computeKeyDebugFile = genruleScript + ".SpawnAction.ckd";
|
||||
+ try {
|
||||
+ computeKeyDebugWriter = new PrintWriter(computeKeyDebugFile, "UTF-8");
|
||||
+ } catch (java.io.FileNotFoundException ex) {
|
||||
+ System.err.println("Unable to create " + computeKeyDebugFile);
|
||||
+ } catch (java.io.UnsupportedEncodingException ex) {
|
||||
+ System.err.println("Unsupported encoding");
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ validGenrule = validGenrule && (computeKeyDebugWriter != null);
|
||||
+
|
||||
Fingerprint f = new Fingerprint();
|
||||
f.addString(GUID);
|
||||
+ if (validGenrule) { computeKeyDebugWriter.println("GUID: " + GUID); }
|
||||
+
|
||||
f.addStrings(argv.arguments());
|
||||
+ if (validGenrule) {
|
||||
+ for (String input : argv.arguments()) {
|
||||
+ computeKeyDebugWriter.println("ARGUMENTS: " + input);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
f.addString(getMnemonic());
|
||||
+ if (validGenrule) { computeKeyDebugWriter.println("MNEMONIC: " + getMnemonic()); }
|
||||
+
|
||||
// We don't need the toolManifests here, because they are a subset of the inputManifests by
|
||||
// definition and the output of an action shouldn't change whether something is considered a
|
||||
// tool or not.
|
||||
f.addPaths(getRunfilesSupplier().getRunfilesDirs());
|
||||
+ if (validGenrule) {
|
||||
+ for (PathFragment path : getRunfilesSupplier().getRunfilesDirs()) {
|
||||
+ computeKeyDebugWriter.println("RUNFILESDIRS: " + path.getPathString());
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
ImmutableList<Artifact> runfilesManifests = getRunfilesSupplier().getManifests();
|
||||
f.addInt(runfilesManifests.size());
|
||||
+ if (validGenrule) { computeKeyDebugWriter.println("RUNFILESMANIFESTSSIZE: " + runfilesManifests.size()); }
|
||||
+
|
||||
for (Artifact runfilesManifest : runfilesManifests) {
|
||||
f.addPath(runfilesManifest.getExecPath());
|
||||
+ if (validGenrule) { computeKeyDebugWriter.println("RUNFILESMANIFEST: " + runfilesManifest.getExecPath().getPathString()); }
|
||||
}
|
||||
+
|
||||
f.addStringMap(getEnvironment());
|
||||
+ if (validGenrule) {
|
||||
+ for (Map.Entry<String, String> entry : getEnvironment().entrySet()) {
|
||||
+ computeKeyDebugWriter.println("ENV: " + entry.getKey() + "=" + entry.getValue());
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
f.addStrings(getClientEnvironmentVariables());
|
||||
+ if (validGenrule) {
|
||||
+ for (String input : argv.arguments()) {
|
||||
+ computeKeyDebugWriter.println("CLIENTENV: " + input);
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
f.addStringMap(getExecutionInfo());
|
||||
- return f.hexDigestAndReset();
|
||||
+ if (validGenrule) {
|
||||
+ for (Map.Entry<String, String> entry : executionInfo.entrySet()) {
|
||||
+ computeKeyDebugWriter.println("EXECINFO: " + entry.getKey() + "=" + entry.getValue());
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ String rv = f.hexDigestAndReset();
|
||||
+ if (validGenrule) {
|
||||
+ computeKeyDebugWriter.println("KEY: " + rv);
|
||||
+ computeKeyDebugWriter.close();
|
||||
+ }
|
||||
+ return rv;
|
||||
}
|
||||
|
||||
@Override
|
||||
diff --git a/src/main/java/com/google/devtools/build/lib/rules/cpp/CppCompileAction.java b/src/main/java/com/google/devtools/build/lib/rules/cpp/CppCompileAction.java
|
||||
index 3559fffde..3ba39617c 100644
|
||||
--- a/src/main/java/com/google/devtools/build/lib/rules/cpp/CppCompileAction.java
|
||||
+++ b/src/main/java/com/google/devtools/build/lib/rules/cpp/CppCompileAction.java
|
||||
@@ -1111,10 +1111,30 @@ public class CppCompileAction extends AbstractAction
|
||||
|
||||
@Override
|
||||
public String computeKey() {
|
||||
+ // ".ckd" Compute Key Debug
|
||||
+ PrintWriter computeKeyDebugWriter = null;
|
||||
+ String computeKeyDebugFile = getInternalOutputFile() + ".CppCompileAction.ckd";
|
||||
+ try {
|
||||
+ computeKeyDebugWriter = new PrintWriter(computeKeyDebugFile, "UTF-8");
|
||||
+ } catch (java.io.FileNotFoundException ex) {
|
||||
+ System.err.println("Unable to create " + computeKeyDebugFile);
|
||||
+ } catch (java.io.UnsupportedEncodingException ex) {
|
||||
+ System.err.println("Unsupported encoding");
|
||||
+ }
|
||||
+
|
||||
Fingerprint f = new Fingerprint();
|
||||
f.addUUID(actionClassId);
|
||||
+ computeKeyDebugWriter.println("UUID: " + actionClassId);
|
||||
+
|
||||
f.addStringMap(getEnvironment());
|
||||
+ for (Map.Entry<String, String> entry : getEnvironment().entrySet()) {
|
||||
+ computeKeyDebugWriter.println("ENV: " + entry.getKey() + "=" + entry.getValue());
|
||||
+ }
|
||||
+
|
||||
f.addStringMap(executionInfo);
|
||||
+ for (Map.Entry<String, String> entry : executionInfo.entrySet()) {
|
||||
+ computeKeyDebugWriter.println("EXECINFO: " + entry.getKey() + "=" + entry.getValue());
|
||||
+ }
|
||||
|
||||
// For the argv part of the cache key, ignore all compiler flags that explicitly denote module
|
||||
// file (.pcm) inputs. Depending on input discovery, some of the unused ones are removed from
|
||||
@@ -1124,6 +1144,9 @@ public class CppCompileAction extends AbstractAction
|
||||
// A better long-term solution would be to make the compiler to find them automatically and
|
||||
// never hand in the .pcm files explicitly on the command line in the first place.
|
||||
f.addStrings(compileCommandLine.getArgv(getInternalOutputFile(), null));
|
||||
+ for (String input : compileCommandLine.getArgv(getInternalOutputFile(), null)) {
|
||||
+ computeKeyDebugWriter.println("COMMAND: " + input);
|
||||
+ }
|
||||
|
||||
/*
|
||||
* getArgv() above captures all changes which affect the compilation
|
||||
@@ -1133,19 +1156,31 @@ public class CppCompileAction extends AbstractAction
|
||||
* have changed, otherwise we might miss some errors.
|
||||
*/
|
||||
f.addPaths(context.getDeclaredIncludeDirs());
|
||||
+ for (PathFragment path : context.getDeclaredIncludeDirs()) {
|
||||
+ computeKeyDebugWriter.println("DECLAREDINCLUDEDIRS: " + path.getPathString());
|
||||
+ }
|
||||
f.addPaths(context.getDeclaredIncludeWarnDirs());
|
||||
+ for (PathFragment path : context.getDeclaredIncludeWarnDirs()) {
|
||||
+ computeKeyDebugWriter.println("DECLAREDINCLUDEWARNDIRS: " + path.getPathString());
|
||||
+ }
|
||||
for (Artifact declaredIncludeSrc : context.getDeclaredIncludeSrcs()) {
|
||||
f.addPath(declaredIncludeSrc.getExecPath());
|
||||
+ computeKeyDebugWriter.println("DECLAREDINCLUDESRCS: " + declaredIncludeSrc.getExecPath().getPathString());
|
||||
}
|
||||
f.addInt(0); // mark the boundary between input types
|
||||
for (Artifact input : getMandatoryInputs()) {
|
||||
f.addPath(input.getExecPath());
|
||||
+ computeKeyDebugWriter.println("MANDATORYINPUTS: " + input.getExecPath().getPathString());
|
||||
}
|
||||
f.addInt(0);
|
||||
for (Artifact input : prunableInputs) {
|
||||
f.addPath(input.getExecPath());
|
||||
+ computeKeyDebugWriter.println("PRUNABLEINPUTS: " + input.getExecPath().getPathString());
|
||||
}
|
||||
- return f.hexDigestAndReset();
|
||||
+ String rv = f.hexDigestAndReset();
|
||||
+ computeKeyDebugWriter.println("KEY: " + rv);
|
||||
+ computeKeyDebugWriter.close();
|
||||
+ return rv;
|
||||
}
|
||||
|
||||
@Override
|
|
@ -1,9 +1,9 @@
|
|||
# Description: Deepspeech native client library.
|
||||
|
||||
load("//tensorflow:tensorflow.bzl",
|
||||
"if_linux_x86_64", "tf_cc_shared_object")
|
||||
load("@org_tensorflow//tensorflow:tensorflow.bzl",
|
||||
"if_linux_x86_64", "tf_cc_shared_object", "if_cuda")
|
||||
|
||||
load("//tensorflow/compiler/aot:tfcompile.bzl",
|
||||
load("@org_tensorflow//tensorflow/compiler/aot:tfcompile.bzl",
|
||||
"tf_library")
|
||||
|
||||
load(":deepspeech.bzl", "if_native_model")
|
||||
|
@ -16,7 +16,7 @@ config_setting(
|
|||
)
|
||||
|
||||
tf_library(
|
||||
name = "deepspeech_model",
|
||||
name = "deepspeech_model_core",
|
||||
cpp_class = "DeepSpeech::nativeModel",
|
||||
# We don't need tests or benchmark binaries
|
||||
gen_test=False, gen_benchmark=False,
|
||||
|
@ -26,7 +26,7 @@ tf_library(
|
|||
config = "tfcompile.config.pbtxt",
|
||||
# This depends on //tensorflow:rpi3 condition defined in mozilla/tensorflow
|
||||
tfcompile_flags = select({
|
||||
"//tensorflow:rpi3": str('--target_triple="armv6-linux-gnueabihf" --target_cpu="cortex-a53" --target_features="+neon-fp-armv8"'),
|
||||
"//tensorflow:rpi3": str('--target_cpu="cortex-a53"'),
|
||||
"//conditions:default": str('')
|
||||
}),
|
||||
)
|
||||
|
@ -45,36 +45,66 @@ genrule(
|
|||
cmd = "cp $(DS_MODEL_FILE) $@"
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "deepspeech",
|
||||
srcs = ["deepspeech.cc", "alphabet.h", "beam_search.h", "trie_node.h"] +
|
||||
tf_cc_shared_object(
|
||||
name = "libdeepspeech.so",
|
||||
srcs = ["deepspeech.cc", "deepspeech.h", "deepspeech_utils.h", "alphabet.h", "beam_search.h", "trie_node.h"] +
|
||||
if_native_model(["deepspeech_model_core.h"]) +
|
||||
glob(["kenlm/lm/*.cc", "kenlm/util/*.cc", "kenlm/util/double-conversion/*.cc",
|
||||
"kenlm/lm/*.hh", "kenlm/util/*.hh", "kenlm/util/double-conversion/*.h"],
|
||||
exclude = ["kenlm/*/*test.cc", "kenlm/*/*main.cc"]) +
|
||||
glob(["boost_locale/**/*.hpp"]),
|
||||
hdrs = ["deepspeech.h"],
|
||||
# -Wno-sign-compare to silent a lot of warnings from tensorflow itself,
|
||||
# which makes it harder to see our own warnings
|
||||
copts = ["-std=c++11", "-Wno-sign-compare"] + if_native_model([
|
||||
copts = ["-std=c++11", "-Wno-sign-compare", "-fvisibility=hidden"] + if_native_model([
|
||||
"-DDS_MODEL_TIMESTEPS=$(DS_MODEL_TIMESTEPS)",
|
||||
"-DDS_NATIVE_MODEL=1",
|
||||
]),
|
||||
linkopts = select({
|
||||
"//tensorflow:darwin": [
|
||||
"-Wl,-install_name,@rpath/libdeepspeech.so"
|
||||
],
|
||||
"//conditions:default": []
|
||||
}),
|
||||
deps = [
|
||||
"//tensorflow/core:core",
|
||||
"//tensorflow/core:core_cpu",
|
||||
"//tensorflow/core:direct_session",
|
||||
### "//tensorflow/core:all_kernels",
|
||||
### => Trying to be more fine-grained
|
||||
### Obtained by trial/error process ...
|
||||
### CPU only build libdeepspeech.so from 63M to 36M
|
||||
"//tensorflow/core/kernels:constant_op", # Const
|
||||
"//tensorflow/core/kernels:identity_op", # Identity
|
||||
"//tensorflow/core/kernels:transpose_op", # Transpose
|
||||
"//tensorflow/core/kernels:reshape_op", # Reshape
|
||||
"//tensorflow/core/kernels:shape_ops", # Shape
|
||||
"//tensorflow/core/kernels:strided_slice_op", # StridedSlice
|
||||
"//tensorflow/core/kernels:pack_op", # Pack
|
||||
"//tensorflow/core/kernels:reverse_op", # ReverseV2
|
||||
"//tensorflow/core/kernels:concat_op", # ConcatV2
|
||||
"//tensorflow/core/kernels:split_op", # Split
|
||||
"//tensorflow/core/kernels:sparse_to_dense_op", # SparseToDense
|
||||
"//tensorflow/core/kernels:relu_op", # Relu
|
||||
"//tensorflow/core/kernels:bias_op", # BiasAdd
|
||||
"//tensorflow/core/kernels:math", # Range, MatMul
|
||||
"//tensorflow/core/kernels:tensor_array_ops", # Placeholder, TensorArrayV3
|
||||
"//tensorflow/core/kernels:control_flow_ops", # Enter
|
||||
"//tensorflow/core/kernels:ctc_ops", # CTCBeamSearchDecoder
|
||||
### Needed by production model produced without "--use_seq_length False"
|
||||
"//tensorflow/core/kernels:logging_ops", # Assert
|
||||
"//tensorflow/core/kernels:reverse_sequence_op", # ReverseSequence
|
||||
# Classic deps
|
||||
"//tensorflow/core/util/ctc",
|
||||
"//third_party/eigen3",
|
||||
":deepspeech_utils"
|
||||
] + if_native_model([":deepspeech_model", "//tensorflow/compiler/tf2xla:xla_compiled_cpu_function"]),
|
||||
] + if_native_model([
|
||||
"//tensorflow/compiler/tf2xla:xla_compiled_cpu_function",
|
||||
])
|
||||
+ if_cuda([
|
||||
"//tensorflow/core:core",
|
||||
"//tensorflow/core/kernels:slice_op_gpu", # Slice GPU
|
||||
]),
|
||||
includes = ["kenlm", "boost_locale"],
|
||||
defines = ["KENLM_MAX_ORDER=6"],
|
||||
)
|
||||
|
||||
tf_cc_shared_object(
|
||||
name = "libdeepspeech_model.so",
|
||||
deps = [":deepspeech_model_core"]
|
||||
)
|
||||
|
||||
# We have a single rule including c_speech_features and kissfft here as Bazel
|
||||
# doesn't support static linking in library targets.
|
||||
|
||||
|
|
|
@ -46,15 +46,13 @@ bindings-package: MANIFEST.in
|
|||
bindings: bindings-build bindings-package
|
||||
|
||||
run: deepspeech
|
||||
${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/tensorflow:${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} ./deepspeech ${ARGS}
|
||||
${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} ./deepspeech ${ARGS}
|
||||
|
||||
debug: deepspeech
|
||||
${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/tensorflow:${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} gdb --args ./deepspeech ${ARGS}
|
||||
${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} gdb --args ./deepspeech ${ARGS}
|
||||
|
||||
install: deepspeech
|
||||
install -d ${PREFIX}/lib
|
||||
install -m 0644 ${TFDIR}/bazel-bin/tensorflow/libtensorflow_cc.so ${PREFIX}/lib/
|
||||
install -m 0644 ${TFDIR}/bazel-bin/tensorflow/libtensorflow_framework.so ${PREFIX}/lib/
|
||||
install -m 0644 ${TFDIR}/bazel-bin/native_client/libdeepspeech.so ${PREFIX}/lib/
|
||||
install -m 0644 ${TFDIR}/bazel-bin/native_client/libdeepspeech_utils.so ${PREFIX}/lib/
|
||||
install -d ${PREFIX}/bin
|
||||
|
@ -65,6 +63,4 @@ uninstall:
|
|||
rmdir --ignore-fail-on-non-empty ${PREFIX}/bin
|
||||
rm -f ${PREFIX}/lib/libdeepspeech_utils.so
|
||||
rm -f ${PREFIX}/lib/libdeepspeech.so
|
||||
rm -f ${PREFIX}/lib/libtensorflow_cc.so
|
||||
rm -f ${PREFIX}/lib/libtensorflow_framework.so
|
||||
rmdir --ignore-fail-on-non-empty ${PREFIX}/lib
|
||||
|
|
|
@ -72,7 +72,7 @@ Before building the DeepSpeech client libraries, you will need to prepare your e
|
|||
Then you can build the Tensorflow and DeepSpeech libraries.
|
||||
|
||||
```
|
||||
bazel build -c opt --copt=-O3 //tensorflow:libtensorflow_cc.so //tensorflow:libtensorflow_framework.so //native_client:deepspeech //native_client:deepspeech_utils //native_client:libctc_decoder_with_kenlm.so //native_client:generate_trie
|
||||
bazel build --config=monolithic -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:deepspeech_utils //native_client:libctc_decoder_with_kenlm.so //native_client:generate_trie
|
||||
```
|
||||
|
||||
Finally, you can change to the `native_client` directory and use the `Makefile`. By default, the `Makefile` will assume there is a TensorFlow checkout in a directory above the DeepSpeech checkout. If that is not the case, set the environment variable `TFDIR` to point to the right directory.
|
||||
|
@ -96,17 +96,16 @@ Bazel defines:
|
|||
|
||||
Bazel targets:
|
||||
* `//native_client:deepspeech_model`: to produce `libdeepspeech_model.so`
|
||||
* `//tensorflow/compiler/aot:runtime `, `//tensorflow/compiler/xla/service/cpu:runtime_matmul`, `//tensorflow/compiler/xla:executable_run_options`
|
||||
|
||||
In the end, the previous example becomes:
|
||||
|
||||
```
|
||||
bazel build -c opt --copt=-O3 --define=DS_NATIVE_MODEL=1 --define=DS_MODEL_TIMESTEPS=64 --define=DS_MODEL_FRAMESIZE=494 --define=DS_MODEL_FILE=/tmp/model.ldc93s1.pb //tensorflow:libtensorflow_cc.so //tensorflow:libtensorflow_framework.so //native_client:deepspeech_model //tensorflow/compiler/aot:runtime //tensorflow/compiler/xla/service/cpu:runtime_matmul //tensorflow/compiler/xla:executable_run_options //native_client:deepspeech //native_client:deepspeech_utils //native_client:libctc_decoder_with_kenlm.so //native_client:generate_trie
|
||||
bazel build --config=monolithic -c opt --copt=-O3 --copt=-fvisibility=hidden --define=DS_NATIVE_MODEL=1 --define=DS_MODEL_TIMESTEPS=64 --define=DS_MODEL_FRAMESIZE=494 --define=DS_MODEL_FILE=/tmp/model.ldc93s1.pb //native_client:deepspeech_model //native_client:libdeepspeech.so //native_client:deepspeech_utils //native_client:libctc_decoder_with_kenlm.so //native_client:generate_trie
|
||||
```
|
||||
|
||||
Later, when building either `deepspeech` binaries or bindings, you will have to add some extra variables to your `make` command-line (assuming `TFDIR` points to your TensorFlow's git clone):
|
||||
```
|
||||
EXTRA_LDFLAGS="-L${TFDIR}/bazel-bin/tensorflow/compiler/xla/ -L${TFDIR}/bazel-bin/tensorflow/compiler/aot/ -L${TFDIR}/bazel-bin/tensorflow/compiler/xla/service/cpu/" EXTRA_LIBS="-ldeepspeech_model -lruntime -lexecutable_run_options -lruntime_matmul"
|
||||
EXTRA_LIBS="-ldeepspeech_model"
|
||||
```
|
||||
|
||||
## Installing
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
|
||||
|
||||
#include "native_client/deepspeech_model.h" // generated
|
||||
#include "native_client/deepspeech_model_core.h" // generated
|
||||
#endif
|
||||
|
||||
#include <iostream>
|
||||
|
@ -37,6 +37,7 @@ class Private {
|
|||
bool run_aot;
|
||||
};
|
||||
|
||||
DEEPSPEECH_EXPORT
|
||||
Model::Model(const char* aModelPath, int aNCep, int aNContext,
|
||||
const char* aAlphabetConfigPath, int aBeamWidth)
|
||||
{
|
||||
|
@ -97,6 +98,7 @@ Model::Model(const char* aModelPath, int aNCep, int aNContext,
|
|||
}
|
||||
}
|
||||
|
||||
DEEPSPEECH_EXPORT
|
||||
Model::~Model()
|
||||
{
|
||||
if (mPriv->session) {
|
||||
|
@ -109,6 +111,7 @@ Model::~Model()
|
|||
delete mPriv;
|
||||
}
|
||||
|
||||
DEEPSPEECH_EXPORT
|
||||
void
|
||||
Model::enableDecoderWithLM(const char* aAlphabetConfigPath, const char* aLMPath,
|
||||
const char* aTriePath, float aLMWeight,
|
||||
|
@ -118,6 +121,7 @@ Model::enableDecoderWithLM(const char* aAlphabetConfigPath, const char* aLMPath,
|
|||
aLMWeight, aWordCountWeight, aValidWordCountWeight);
|
||||
}
|
||||
|
||||
DEEPSPEECH_EXPORT
|
||||
void
|
||||
Model::getInputVector(const short* aBuffer, unsigned int aBufferSize,
|
||||
int aSampleRate, float** aMfcc, int* aNFrames,
|
||||
|
@ -204,6 +208,7 @@ Model::decode(int aNFrames, float*** aLogits)
|
|||
return output;
|
||||
}
|
||||
|
||||
DEEPSPEECH_EXPORT
|
||||
char*
|
||||
Model::infer(float* aMfcc, int aNFrames, int aFrameLen)
|
||||
{
|
||||
|
@ -303,6 +308,7 @@ Model::infer(float* aMfcc, int aNFrames, int aFrameLen)
|
|||
return decode(aNFrames, input_data_mat);
|
||||
}
|
||||
|
||||
DEEPSPEECH_EXPORT
|
||||
char*
|
||||
Model::stt(const short* aBuffer, unsigned int aBufferSize, int aSampleRate)
|
||||
{
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
|
||||
#include <cstddef>
|
||||
|
||||
#define DEEPSPEECH_EXPORT __attribute__ ((visibility("default")))
|
||||
|
||||
namespace DeepSpeech
|
||||
{
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
namespace DeepSpeech {
|
||||
|
||||
DEEPSPEECH_EXPORT
|
||||
void
|
||||
audioToInputVector(const short* aBuffer, unsigned int aBufferSize,
|
||||
int aSampleRate, int aNCep, int aNContext, float** aMfcc,
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
|
||||
#ifndef __DEEPSPEECH_UTILS_H__
|
||||
#define __DEEPSPEECH_UTILS_H__
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#define DEEPSPEECH_EXPORT __attribute__ ((visibility("default")))
|
||||
|
||||
namespace DeepSpeech
|
||||
{
|
||||
|
||||
|
|
|
@ -48,8 +48,8 @@ LDFLAGS_RPATH := -Wl,-rpath,@executable_path
|
|||
endif
|
||||
|
||||
CFLAGS += $(EXTRA_CFLAGS)
|
||||
LIBS := -ldeepspeech -ldeepspeech_utils -ltensorflow_cc -ltensorflow_framework $(EXTRA_LIBS)
|
||||
LDFLAGS_DIRS := -L${TFDIR}/bazel-bin/tensorflow -L${TFDIR}/bazel-bin/native_client $(EXTRA_LDFLAGS)
|
||||
LIBS := -ldeepspeech -ldeepspeech_utils $(EXTRA_LIBS)
|
||||
LDFLAGS_DIRS := -L${TFDIR}/bazel-bin/native_client $(EXTRA_LDFLAGS)
|
||||
LDFLAGS += $(LDFLAGS_NEEDED) $(LDFLAGS_RPATH) $(LDFLAGS_DIRS) $(LIBS)
|
||||
|
||||
AS := $(TOOLCHAIN)as
|
||||
|
@ -85,7 +85,7 @@ define copy_missing_libs
|
|||
missing_libs=""; \
|
||||
for lib in $$SRC_FILE; do \
|
||||
if [ "$(OS)" = "Darwin" ]; then \
|
||||
new_missing="$$( (for f in $$(otool -L $$lib 2>/dev/null | tail -n +2 | awk '{ print $$1 }' | grep -v '$$lib'); do ls -hal $$f; done;) 2>&1 | grep 'No such' | cut -d':' -f2 | xargs basename)"; \
|
||||
new_missing="$$( (for f in $$(otool -L $$lib 2>/dev/null | tail -n +2 | awk '{ print $$1 }' | grep -v '$$lib'); do ls -hal $$f; done;) 2>&1 | grep 'No such' | cut -d':' -f2 | xargs basename -a)"; \
|
||||
missing_libs="$$missing_libs $$new_missing"; \
|
||||
else \
|
||||
missing_libs="$$missing_libs $$($(LDD) $$lib | grep 'not found' | awk '{ print $$1 }')"; \
|
||||
|
|
|
@ -29,5 +29,4 @@ build:
|
|||
package: ''
|
||||
args:
|
||||
tests_cmdline: ''
|
||||
python_wheel: ''
|
||||
deepspeech_pkg_name: ''
|
||||
|
|
|
@ -7,7 +7,7 @@ source $(dirname "$0")/../tc-tests-utils.sh
|
|||
source ${DS_ROOT_TASK}/DeepSpeech/tf/tc-vars.sh
|
||||
|
||||
BAZEL_TARGETS="
|
||||
//native_client:deepspeech
|
||||
//native_client:libdeepspeech.so
|
||||
//native_client:deepspeech_utils
|
||||
${BAZEL_AOT_TARGETS}"
|
||||
|
||||
|
@ -19,7 +19,7 @@ EXTRA_LOCAL_LDFLAGS="${EXTRA_AOT_LDFLAGS}"
|
|||
EXTRA_LOCAL_LIBS="${EXTRA_AOT_LIBS}"
|
||||
|
||||
do_get_model_parameters "${DEEPSPEECH_TEST_MODEL}" AOT_MODEL_PARAMS
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS} ${BAZEL_AOT_BUILD_FLAGS} ${AOT_MODEL_PARAMS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BAZEL_AOT_BUILD_FLAGS} ${AOT_MODEL_PARAMS}"
|
||||
|
||||
do_bazel_build
|
||||
|
||||
|
|
|
@ -7,14 +7,13 @@ source $(dirname "$0")/../tc-tests-utils.sh
|
|||
source ${DS_ROOT_TASK}/DeepSpeech/tf/tc-vars.sh
|
||||
|
||||
BAZEL_TARGETS="
|
||||
//native_client:deepspeech
|
||||
//native_client:libdeepspeech.so
|
||||
//native_client:deepspeech_utils
|
||||
//native_client:generate_trie
|
||||
${BAZEL_CTC_TARGETS}
|
||||
"
|
||||
|
||||
BAZEL_ENV_FLAGS="TF_NEED_CUDA=1 ${TF_CUDA_FLAGS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_CUDA_FLAGS} ${BAZEL_OPT_FLAGS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_CUDA_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BAZEL_OPT_FLAGS}"
|
||||
SYSTEM_TARGET=host
|
||||
EXTRA_LOCAL_CFLAGS=""
|
||||
EXTRA_LOCAL_LDFLAGS="-L${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/ -L${DS_ROOT_TASK}/DeepSpeech/CUDA/lib64/stubs/ -lcudart -lcuda"
|
||||
|
@ -26,3 +25,5 @@ do_deepspeech_binary_build
|
|||
do_deepspeech_python_build rename_to_gpu
|
||||
|
||||
do_deepspeech_nodejs_build rename_to_gpu
|
||||
|
||||
$(dirname "$0")/decoder-build.sh
|
||||
|
|
|
@ -4,9 +4,8 @@ build:
|
|||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.osx_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.${event.head.sha}.osx_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.osx_aot.${event.head.sha}"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.osx/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.osx/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.osx/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.osx/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.osx/artifacts/public/summarize_graph"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh --aot"
|
||||
package: "taskcluster/package.sh"
|
||||
|
|
|
@ -6,9 +6,8 @@ build:
|
|||
- "index.project.deepspeech.deepspeech.native_client.osx.${event.head.sha}"
|
||||
- "notify.irc-channel.${notifications.irc}.on-exception"
|
||||
- "notify.irc-channel.${notifications.irc}.on-failed"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.osx/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.osx/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.osx/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.osx/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.osx/artifacts/public/summarize_graph"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
|
|
|
@ -36,11 +36,10 @@ payload:
|
|||
|
||||
env:
|
||||
$let:
|
||||
training: { $eval: as_slugid("test-training-linux-amd64-py27-opt") }
|
||||
training: { $eval: as_slugid("test-training_upstream-linux-amd64-py27-opt") }
|
||||
in:
|
||||
TENSORFLOW_BUILD_ARTIFACT: ${build.tensorflow}
|
||||
SUMMARIZE_GRAPH_BINARY: ${build.summarize_graph}
|
||||
LIBTENSORFLOW_FRAMEWORK: ${build.libtensorflow_framework}
|
||||
DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL: https://github.com/lissyx/DeepSpeech/releases/download/0.0.2/tc-fake-prod.988_e120.LSTM.ldc93s1.pb
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ source $(dirname "$0")/../tc-tests-utils.sh
|
|||
source ${DS_ROOT_TASK}/DeepSpeech/tf/tc-vars.sh
|
||||
|
||||
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS}"
|
||||
BAZEL_TARGETS="${BAZEL_CTC_TARGETS}"
|
||||
|
||||
do_bazel_build
|
||||
do_bazel_shared_build
|
||||
|
|
|
@ -7,13 +7,12 @@ source $(dirname "$0")/../tc-tests-utils.sh
|
|||
source ${DS_ROOT_TASK}/DeepSpeech/tf/tc-vars.sh
|
||||
|
||||
BAZEL_TARGETS="
|
||||
//native_client:deepspeech
|
||||
//native_client:libdeepspeech.so
|
||||
//native_client:deepspeech_utils
|
||||
//native_client:generate_trie
|
||||
${BAZEL_CTC_TARGETS}
|
||||
"
|
||||
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS}"
|
||||
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
|
||||
SYSTEM_TARGET=host
|
||||
|
||||
|
@ -35,3 +34,5 @@ do_deepspeech_binary_build
|
|||
do_deepspeech_python_build
|
||||
|
||||
do_deepspeech_nodejs_build
|
||||
|
||||
$(dirname "$0")/decoder-build.sh
|
||||
|
|
|
@ -4,9 +4,8 @@ build:
|
|||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.cpu_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.${event.head.sha}.cpu_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.cpu_aot.${event.head.sha}"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/summarize_graph"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install nodejs python-yaml &&
|
||||
|
|
|
@ -5,10 +5,9 @@ build:
|
|||
- "pull_request.reopened"
|
||||
template_file: linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/libtensorflow_framework.so"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/summarize_graph"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install nodejs python-yaml &&
|
||||
|
|
|
@ -13,9 +13,8 @@ build:
|
|||
system_config:
|
||||
>
|
||||
${swig.patch_nodejs.linux}
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/summarize_graph"
|
||||
scripts:
|
||||
build: "taskcluster/host-build.sh"
|
||||
package: "taskcluster/package.sh"
|
||||
|
|
|
@ -4,9 +4,8 @@ build:
|
|||
- "pull_request.synchronize"
|
||||
- "pull_request.reopened"
|
||||
template_file: linux-opt-base.tyml
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/summarize_graph"
|
||||
scripts:
|
||||
build: 'taskcluster/decoder-build.sh'
|
||||
package: 'taskcluster/decoder-package.sh'
|
||||
|
|
|
@ -11,9 +11,8 @@ build:
|
|||
system_config:
|
||||
>
|
||||
${swig.patch_nodejs.linux}
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.gpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.gpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.gpu/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.gpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.gpu/artifacts/public/summarize_graph"
|
||||
maxRunTime: 14400
|
||||
scripts:
|
||||
build: "taskcluster/cuda-build.sh"
|
||||
|
|
|
@ -33,11 +33,10 @@ then:
|
|||
|
||||
env:
|
||||
$let:
|
||||
training: { $eval: as_slugid("test-training-linux-amd64-py27-opt") }
|
||||
training: { $eval: as_slugid("test-training_upstream-linux-amd64-py27-opt") }
|
||||
in:
|
||||
TENSORFLOW_BUILD_ARTIFACT: ${build.tensorflow}
|
||||
SUMMARIZE_GRAPH_BINARY: ${build.summarize_graph}
|
||||
LIBTENSORFLOW_FRAMEWORK: ${build.libtensorflow_framework}
|
||||
DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb
|
||||
DEEPSPEECH_PROD_MODEL: https://github.com/lissyx/DeepSpeech/releases/download/0.0.2/tc-fake-prod.988_e120.LSTM.ldc93s1.pb
|
||||
|
||||
|
|
|
@ -4,9 +4,8 @@ build:
|
|||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.arm_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.${event.head.sha}.arm_aot"
|
||||
- "index.project.deepspeech.deepspeech.native_client.arm_aot.${event.head.sha}"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.arm/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.arm/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/summarize_graph"
|
||||
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
|
||||
system_setup:
|
||||
>
|
||||
|
|
|
@ -4,9 +4,8 @@ build:
|
|||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.arm"
|
||||
- "index.project.deepspeech.deepspeech.native_client.${event.head.branch}.${event.head.sha}.arm"
|
||||
- "index.project.deepspeech.deepspeech.native_client.arm.${event.head.sha}"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.arm/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.arm/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/summarize_graph"
|
||||
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
|
||||
system_setup:
|
||||
>
|
||||
|
|
|
@ -14,9 +14,8 @@ build:
|
|||
system_config:
|
||||
>
|
||||
${swig.patch_nodejs.linux}
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/summarize_graph"
|
||||
libtensorflow_framework: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/libtensorflow_framework.so"
|
||||
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/home.tar.xz"
|
||||
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.995194cbb18744f48347ce3c003e999b4d5187f1.cpu/artifacts/public/summarize_graph"
|
||||
scripts:
|
||||
build: "taskcluster/node-build.sh"
|
||||
package: "taskcluster/node-package.sh"
|
||||
|
|
|
@ -6,6 +6,8 @@ source $(dirname "$0")/../tc-tests-utils.sh
|
|||
|
||||
mkdir -p ${TASKCLUSTER_ARTIFACTS} || true
|
||||
|
||||
cp ${DS_ROOT_TASK}/DeepSpeech/tf/bazel*.log ${TASKCLUSTER_ARTIFACTS}/
|
||||
|
||||
package_native_client "native_client.tar.xz"
|
||||
|
||||
if [ -d ${DS_ROOT_TASK}/DeepSpeech/ds/wheels ]; then
|
||||
|
|
|
@ -7,13 +7,12 @@ source $(dirname "$0")/../tc-tests-utils.sh
|
|||
source ${DS_ROOT_TASK}/DeepSpeech/tf/tc-vars.sh
|
||||
|
||||
BAZEL_TARGETS="
|
||||
//native_client:deepspeech
|
||||
//native_client:libdeepspeech.so
|
||||
//native_client:deepspeech_utils
|
||||
//native_client:generate_trie
|
||||
${BAZEL_CTC_TARGETS}
|
||||
"
|
||||
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_ARM_FLAGS}"
|
||||
BAZEL_BUILD_FLAGS="${BAZEL_ARM_FLAGS} ${BAZEL_EXTRA_FLAGS}"
|
||||
BAZEL_ENV_FLAGS="TF_NEED_CUDA=0"
|
||||
SYSTEM_TARGET=rpi3
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt} zip
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt} zip
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "darwin-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${cpp.brew.setup} && ${cpp.brew.packages} && ${cpp.brew.env}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-cpp-ds-tests.sh --aot"
|
||||
metadata:
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-cpp-ds-tests.sh"
|
||||
metadata:
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-cpp-ds-tests-prod.sh"
|
||||
metadata:
|
||||
|
|
|
@ -30,11 +30,10 @@ then:
|
|||
|
||||
env:
|
||||
$let:
|
||||
training: { $eval: as_slugid("test-training-linux-amd64-py27-opt") }
|
||||
training: { $eval: as_slugid("test-training_upstream-linux-amd64-py27-opt") }
|
||||
darwin_amd64_build: { $eval: as_slugid("darwin-amd64-cpu-opt") }
|
||||
node_package: { $eval: as_slugid("node-package") }
|
||||
in:
|
||||
TENSORFLOW_WHEEL: ${build.python_wheel}
|
||||
DEEPSPEECH_ARTIFACTS_ROOT: https://queue.taskcluster.net/v1/task/${darwin_amd64_build}/artifacts/public
|
||||
DEEPSPEECH_NODEJS: https://queue.taskcluster.net/v1/task/${node_package}/artifacts/public
|
||||
DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb
|
||||
|
|
|
@ -31,13 +31,12 @@ then:
|
|||
|
||||
env:
|
||||
$let:
|
||||
training: { $eval: as_slugid("test-training-linux-amd64-py27-opt") }
|
||||
training: { $eval: as_slugid("test-training_upstream-linux-amd64-py27-opt") }
|
||||
linux_amd64_aot_test: { $eval: as_slugid("linux-amd64-cpu-aot_test-opt") }
|
||||
linux_amd64_build: { $eval: as_slugid("linux-amd64-cpu-opt") }
|
||||
linux_amd64_ctc: { $eval: as_slugid("linux-amd64-ctc-opt") }
|
||||
node_package: { $eval: as_slugid("node-package") }
|
||||
in:
|
||||
TENSORFLOW_WHEEL: ${build.python_wheel}
|
||||
DEEPSPEECH_ARTIFACTS_ROOT: https://queue.taskcluster.net/v1/task/${linux_amd64_build}/artifacts/public
|
||||
DEEPSPEECH_PYTHON_PACKAGE: https://queue.taskcluster.net/v1/task/${linux_amd64_build}/artifacts/public/${build.deepspeech_pkg_name}
|
||||
DEEPSPEECH_NODEJS: https://queue.taskcluster.net/v1/task/${node_package}/artifacts/public
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.brew.setup} && ${nodejs.brew.prep_4} && ${nodejs.brew.env}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_4} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_4} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_4} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.brew.setup} && ${nodejs.brew.prep_5} && ${nodejs.brew.env}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_5} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_5} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_5} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.brew.setup} && ${nodejs.brew.prep_6} && ${nodejs.brew.env}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_6} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.brew.setup} && ${nodejs.brew.prep_7} && ${nodejs.brew.env}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_7} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_7} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_7} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.brew.setup} && ${nodejs.brew.prep_8} && ${nodejs.brew.env}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_8} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_8} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_8} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.brew.setup} && ${nodejs.brew.prep_9} && ${nodejs.brew.env}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_9} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_9} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "node-package"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
${nodejs.packages.prep_9} && apt-get -qq update && apt-get -qq -y install ${nodejs.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "darwin-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
args:
|
||||
tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/tc-python-tests.sh 2.7.13"
|
||||
metadata:
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "darwin-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
args:
|
||||
tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/tc-python-tests.sh 3.4.6"
|
||||
metadata:
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "darwin-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
args:
|
||||
tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/tc-python-tests.sh 3.5.3"
|
||||
metadata:
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-darwin-opt-base.tyml
|
||||
dependencies:
|
||||
- "darwin-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
args:
|
||||
tests_cmdline: "$TASKCLUSTER_TASK_DIR/DeepSpeech/ds/tc-python-tests.sh 3.6.2"
|
||||
metadata:
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-aot_test-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -2,7 +2,7 @@ build:
|
|||
template_file: test-linux-opt-base.tyml
|
||||
dependencies:
|
||||
- "linux-amd64-cpu-opt"
|
||||
- "test-training-linux-amd64-py27-opt"
|
||||
- "test-training_upstream-linux-amd64-py27-opt"
|
||||
system_setup:
|
||||
>
|
||||
apt-get -qq -y install ${python.packages.apt}
|
||||
|
|
|
@ -8,7 +8,6 @@ build:
|
|||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 2.7.13 mozilla deepspeech"
|
||||
python_wheel: 'https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/tensorflow_warpctc-1.4.0-cp27-cp27mu-linux_x86_64.whl'
|
||||
deepspeech_pkg_name: 'deepspeech-0.1.1-cp27-cp27mu-manylinux1_x86_64.whl'
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU training Py2.7 (DS)"
|
|
@ -8,7 +8,6 @@ build:
|
|||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 3.6.2 mozilla deepspeech"
|
||||
python_wheel: 'https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu-py36/artifacts/public/tensorflow_warpctc-1.4.0-cp36-cp36m-linux_x86_64.whl'
|
||||
deepspeech_pkg_name: 'deepspeech-0.1.1-cp36-cp36m-manylinux1_x86_64.whl'
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU training Py3.6 (DS)"
|
|
@ -7,7 +7,6 @@ build:
|
|||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 2.7.13 mozilla"
|
||||
python_wheel: 'https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu/artifacts/public/tensorflow_warpctc-1.4.0-cp27-cp27mu-linux_x86_64.whl'
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU training Py2.7"
|
||||
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 Python 2.7, CPU only, optimized version"
|
|
@ -7,7 +7,6 @@ build:
|
|||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 3.4.6 mozilla"
|
||||
python_wheel: 'https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu-py34/artifacts/public/tensorflow_warpctc-1.4.0-cp34-cp34m-linux_x86_64.whl'
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU training Py3.4"
|
||||
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 Python 3.4, CPU only, optimized version"
|
|
@ -7,7 +7,6 @@ build:
|
|||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 3.5.3 mozilla"
|
||||
python_wheel: 'https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu-py35/artifacts/public/tensorflow_warpctc-1.4.0-cp35-cp35m-linux_x86_64.whl'
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU training Py3.5"
|
||||
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 Python 3.5, CPU only, optimized version"
|
|
@ -7,7 +7,6 @@ build:
|
|||
apt-get -qq -y install ${python.packages.apt}
|
||||
args:
|
||||
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 3.6.2 mozilla"
|
||||
python_wheel: 'https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.master.1390dc180e25b5821be80b407ddc5fad73d4ef6a.cpu-py36/artifacts/public/tensorflow_warpctc-1.4.0-cp36-cp36m-linux_x86_64.whl'
|
||||
metadata:
|
||||
name: "DeepSpeech Linux AMD64 CPU training Py3.6"
|
||||
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 Python 3.6, CPU only, optimized version"
|
|
@ -29,20 +29,15 @@ export DS_DSDIR=${DS_ROOT_TASK}/DeepSpeech/ds
|
|||
export BAZEL_CTC_TARGETS="//native_client:libctc_decoder_with_kenlm.so"
|
||||
|
||||
export EXTRA_AOT_CFLAGS=""
|
||||
export EXTRA_AOT_LDFLAGS="-L${DS_TFDIR}/bazel-bin/tensorflow/compiler/xla -L${DS_TFDIR}/bazel-bin/tensorflow/compiler/tf2xla -L${DS_TFDIR}/bazel-bin/tensorflow/compiler/aot -L${DS_TFDIR}/bazel-bin/tensorflow/compiler/xla/service/cpu"
|
||||
export EXTRA_AOT_LIBS="-ldeepspeech_model -lxla_compiled_cpu_function -lruntime -lruntime_matmul -lruntime_matvec -lexecutable_run_options"
|
||||
export EXTRA_AOT_LDFLAGS=""
|
||||
export EXTRA_AOT_LIBS="-ldeepspeech_model"
|
||||
|
||||
# FIXME:
|
||||
# Previously, with r1.3, we could use timesteps of 64
|
||||
# With r1.4 it seems to eat too much resources at tfcompile step
|
||||
export BAZEL_AOT_BUILD_FLAGS="--define=DS_NATIVE_MODEL=1 --define=DS_MODEL_TIMESTEPS=16"
|
||||
export BAZEL_AOT_TARGETS="
|
||||
//native_client:deepspeech_model
|
||||
//tensorflow/compiler/aot:runtime
|
||||
//tensorflow/compiler/xla/service/cpu:runtime_matmul
|
||||
//tensorflow/compiler/xla/service/cpu:runtime_matvec
|
||||
//tensorflow/compiler/xla:executable_run_options
|
||||
//tensorflow/compiler/tf2xla:xla_compiled_cpu_function
|
||||
//native_client:libdeepspeech_model.so
|
||||
"
|
||||
|
||||
model_source=${DEEPSPEECH_TEST_MODEL}
|
||||
|
@ -221,7 +216,6 @@ do_get_model_parameters()
|
|||
|
||||
wget "${model_url}" -O "${model_file}"
|
||||
wget "${SUMMARIZE_GRAPH_BINARY}" -O "/tmp/summarize_graph"
|
||||
wget "${LIBTENSORFLOW_FRAMEWORK}" -O "/tmp/libtensorflow_framework.so"
|
||||
|
||||
chmod +x /tmp/summarize_graph
|
||||
|
||||
|
@ -235,12 +229,77 @@ do_get_model_parameters()
|
|||
eval $__result="'--define=DS_MODEL_FRAMESIZE=${model_width} --define=DS_MODEL_FILE=${model_file}'"
|
||||
}
|
||||
|
||||
# Checks whether we run a patched version of bazel.
|
||||
# Patching is required to dump computeKey() parameters to .ckd files
|
||||
# See bazel.patch
|
||||
# Return 0 (success exit code) on patched version, 1 on release version
|
||||
is_patched_bazel()
|
||||
{
|
||||
bazel_version=$(bazel version | grep 'Build label:' | cut -d':' -f2)
|
||||
|
||||
if [ -z "${bazel_version}" ]; then
|
||||
return 0;
|
||||
else
|
||||
return 1;
|
||||
fi;
|
||||
}
|
||||
|
||||
verify_bazel_rebuild()
|
||||
{
|
||||
bazel_explain_file="$1"
|
||||
|
||||
if [ ! -f "${bazel_explain_file}" ]; then
|
||||
echo "No such explain file: ${bazel_explain_file}"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
spurious_rebuilds=$(grep 'Executing action' "${bazel_explain_file}" | grep 'Compiling' | grep -v -E 'no entry in the cache|unconditional execution is requested' | wc -l)
|
||||
if [ "${spurious_rebuilds}" -ne 0 ]; then
|
||||
echo "Bazel rebuilds some file it should not, please check."
|
||||
|
||||
if is_patched_bazel; then
|
||||
mkdir -p ${DS_ROOT_TASK}/DeepSpeech/ckd/ds ${DS_ROOT_TASK}/DeepSpeech/ckd/tf
|
||||
tar xf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-tf.tar --strip-components=4 -C ${DS_ROOT_TASK}/DeepSpeech/ckd/ds/
|
||||
tar xf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-ds.tar --strip-components=4 -C ${DS_ROOT_TASK}/DeepSpeech/ckd/tf/
|
||||
|
||||
echo "Making a diff between CKD files"
|
||||
mkdir -p ${TASKCLUSTER_ARTIFACTS}
|
||||
diff -urNw ${DS_ROOT_TASK}/DeepSpeech/ckd/tf/ ${DS_ROOT_TASK}/DeepSpeech/ckd/ds/ | tee ${TASKCLUSTER_ARTIFACTS}/ckd.diff
|
||||
|
||||
rm -fr ${DS_ROOT_TASK}/DeepSpeech/ckd/tf/ ${DS_ROOT_TASK}/DeepSpeech/ckd/ds/
|
||||
else
|
||||
echo "Cannot get CKD information from release, please use patched Bazel"
|
||||
fi;
|
||||
|
||||
exit 1
|
||||
fi;
|
||||
}
|
||||
|
||||
do_bazel_build()
|
||||
{
|
||||
cd ${DS_ROOT_TASK}/DeepSpeech/tf
|
||||
eval "export ${BAZEL_ENV_FLAGS}"
|
||||
PATH=${DS_ROOT_TASK}/bin/:$PATH bazel ${BAZEL_OUTPUT_USER_ROOT} build \
|
||||
-c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
|
||||
|
||||
if is_patched_bazel; then
|
||||
find ${DS_ROOT_TASK}/DeepSpeech/tf/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-tf.tar -T -
|
||||
fi;
|
||||
|
||||
bazel ${BAZEL_OUTPUT_USER_ROOT} build \
|
||||
-s --explain bazel_monolithic.log --verbose_explanations --experimental_strict_action_env --config=monolithic -c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
|
||||
|
||||
if is_patched_bazel; then
|
||||
find ${DS_ROOT_TASK}/DeepSpeech/tf/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-ds.tar -T -
|
||||
fi;
|
||||
|
||||
verify_bazel_rebuild "${DS_ROOT_TASK}/DeepSpeech/tf/bazel_monolithic.log"
|
||||
}
|
||||
|
||||
do_bazel_shared_build()
|
||||
{
|
||||
cd ${DS_ROOT_TASK}/DeepSpeech/tf
|
||||
eval "export ${BAZEL_ENV_FLAGS}"
|
||||
bazel ${BAZEL_OUTPUT_USER_ROOT} build \
|
||||
-s --explain bazel_shared.log --verbose_explanations --experimental_strict_action_env -c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
|
||||
}
|
||||
|
||||
do_deepspeech_binary_build()
|
||||
|
@ -361,13 +420,6 @@ package_native_client()
|
|||
|
||||
if [ -f "${tensorflow_dir}/bazel-bin/native_client/libdeepspeech_model.so" ]; then
|
||||
tar -cf - \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/ libtensorflow_cc.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/ libtensorflow_framework.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/aot/ libruntime.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/xla/service/cpu/ libruntime_matmul.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/xla/service/cpu/ libruntime_matvec.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/xla/ libexecutable_run_options.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/compiler/tf2xla/ libxla_compiled_cpu_function.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ generate_trie \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libctc_decoder_with_kenlm.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \
|
||||
|
@ -379,8 +431,6 @@ package_native_client()
|
|||
| pixz -9 > "${artifacts_dir}/${artifact_name}"
|
||||
else
|
||||
tar -cf - \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/ libtensorflow_cc.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/tensorflow/ libtensorflow_framework.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ generate_trie \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libctc_decoder_with_kenlm.so \
|
||||
-C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \
|
||||
|
|
|
@ -42,9 +42,15 @@ fi;
|
|||
if [ "${ds}" = "deepspeech" ]; then
|
||||
pip install "${DEEPSPEECH_PYTHON_PACKAGE}" | cat
|
||||
python -c "import tensorflow; from deepspeech.utils import audioToInputVector"
|
||||
fi;
|
||||
|
||||
download_ctc_kenlm "/tmp/ds"
|
||||
# Since this build depends on the completion of the whole deepspeech package
|
||||
# and we might get into funny situation with --config=monolithic, then let's
|
||||
# be extra-cautious and leverage our dependency against the build to also
|
||||
# test with libctc_decoder_with_kenlm.so that is packaged for release
|
||||
download_native_client_files "/tmp/ds"
|
||||
else
|
||||
download_ctc_kenlm "/tmp/ds"
|
||||
fi;
|
||||
|
||||
pushd ${HOME}/DeepSpeech/ds/
|
||||
time ./bin/run-tc-ldc93s1.sh
|
||||
|
|
Loading…
Reference in New Issue