diff --git a/native_client/BUILD b/native_client/BUILD index 693963c8..7bdeb85e 100644 --- a/native_client/BUILD +++ b/native_client/BUILD @@ -58,7 +58,10 @@ tf_cc_shared_object( "//tensorflow/core/kernels:control_flow_ops", # Enter "//tensorflow/core/kernels:tile_ops", # Tile "//tensorflow/core/kernels:gather_op", # Gather - "//tensorflow/contrib/rnn:lstm_ops_kernels" # BlockLSTM + "//tensorflow/contrib/rnn:lstm_ops_kernels", # BlockLSTM + "//tensorflow/core/kernels:random_ops", # RandomGammaGrad + "//tensorflow/core/kernels:pack_op", # Pack + "//tensorflow/core/kernels:gather_nd_op", # GatherNd #### Needed by production model produced without "--use_seq_length False" #"//tensorflow/core/kernels:logging_ops", # Assert #"//tensorflow/core/kernels:reverse_sequence_op", # ReverseSequence diff --git a/native_client/README.md b/native_client/README.md index b44b6bf3..939529e9 100644 --- a/native_client/README.md +++ b/native_client/README.md @@ -51,10 +51,11 @@ Check the [main README](../README.md) for more details. If you'd like to build the binaries yourself, you'll need the following pre-requisites downloaded/installed: -* [TensorFlow source and requirements](https://www.tensorflow.org/install/install_sources) +* [TensorFlow requirements](https://www.tensorflow.org/install/install_sources) +* [TensorFlow `r1.11` sources](https://github.com/mozilla/tensorflow/tree/r1.11) * [libsox](https://sourceforge.net/projects/sox/) -We recommend using our fork of TensorFlow since it includes fixes for common problems encountered when building the native client files, you can [get it here](https://github.com/mozilla/tensorflow/). +It is required to use our fork of TensorFlow since it includes fixes for common problems encountered when building the native client files. If you'd like to build the language bindings, you'll also need: @@ -73,7 +74,7 @@ ln -s ../DeepSpeech/native_client ./ ## Building Before building the DeepSpeech client libraries, you will need to prepare your environment to configure and build TensorFlow. -Preferably, checkout the version of tensorflow which is currently supported by DeepSpeech (see requirements.txt), and use bazel version 0.10.0. +Preferably, checkout the version of tensorflow which is currently supported by DeepSpeech (see requirements.txt), and use the bazel version recommended by TensorFlow for that version. Then, follow the [instructions](https://www.tensorflow.org/install/install_sources) on the TensorFlow site for your platform, up to the end of 'Configure the installation'. After that, you can build the Tensorflow and DeepSpeech libraries using the following commands. Please note that the flags for `libctc_decoder_with_kenlm.so` differs a little bit. diff --git a/native_client/kenlm/README.mozilla b/native_client/kenlm/README.mozilla index e063a4c7..5ae7d64c 100644 --- a/native_client/kenlm/README.mozilla +++ b/native_client/kenlm/README.mozilla @@ -7,3 +7,6 @@ The following procedure was run to remove unneeded files: cd kenlm rm -rf windows include lm/filter lm/builder util/stream util/getopt.* python + +This was done in order to ensure uniqueness of double_conversion: +git grep 'double_conversion' | cut -d':' -f1 | sort | uniq | xargs sed -ri 's/double_conversion/kenlm_double_conversion/g' diff --git a/native_client/kenlm/util/double-conversion/bignum-dtoa.cc b/native_client/kenlm/util/double-conversion/bignum-dtoa.cc index 3d217bf0..4825888d 100644 --- a/native_client/kenlm/util/double-conversion/bignum-dtoa.cc +++ b/native_client/kenlm/util/double-conversion/bignum-dtoa.cc @@ -32,7 +32,7 @@ #include "bignum.h" #include "ieee.h" -namespace double_conversion { +namespace kenlm_double_conversion { static int NormalizedExponent(uint64_t significand, int exponent) { ASSERT(significand != 0); @@ -637,4 +637,4 @@ static void FixupMultiply10(int estimated_power, bool is_even, } } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/bignum-dtoa.h b/native_client/kenlm/util/double-conversion/bignum-dtoa.h index 34b96199..e6a84b55 100644 --- a/native_client/kenlm/util/double-conversion/bignum-dtoa.h +++ b/native_client/kenlm/util/double-conversion/bignum-dtoa.h @@ -30,7 +30,7 @@ #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { enum BignumDtoaMode { // Return the shortest correct representation. @@ -79,6 +79,6 @@ enum BignumDtoaMode { void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits, Vector buffer, int* length, int* point); -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_ diff --git a/native_client/kenlm/util/double-conversion/bignum.cc b/native_client/kenlm/util/double-conversion/bignum.cc index 747491a0..3ff99d36 100644 --- a/native_client/kenlm/util/double-conversion/bignum.cc +++ b/native_client/kenlm/util/double-conversion/bignum.cc @@ -28,7 +28,7 @@ #include "bignum.h" #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { Bignum::Bignum() : bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) { @@ -761,4 +761,4 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) { } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/bignum.h b/native_client/kenlm/util/double-conversion/bignum.h index 5ec3544f..03a20601 100644 --- a/native_client/kenlm/util/double-conversion/bignum.h +++ b/native_client/kenlm/util/double-conversion/bignum.h @@ -30,7 +30,7 @@ #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { class Bignum { public: @@ -140,6 +140,6 @@ class Bignum { DISALLOW_COPY_AND_ASSIGN(Bignum); }; -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_BIGNUM_H_ diff --git a/native_client/kenlm/util/double-conversion/cached-powers.cc b/native_client/kenlm/util/double-conversion/cached-powers.cc index 9dcfa367..e61d7f34 100644 --- a/native_client/kenlm/util/double-conversion/cached-powers.cc +++ b/native_client/kenlm/util/double-conversion/cached-powers.cc @@ -33,7 +33,7 @@ #include "cached-powers.h" -namespace double_conversion { +namespace kenlm_double_conversion { struct CachedPower { uint64_t significand; @@ -172,4 +172,4 @@ void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent, ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance); } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/cached-powers.h b/native_client/kenlm/util/double-conversion/cached-powers.h index 61a50614..254fed08 100644 --- a/native_client/kenlm/util/double-conversion/cached-powers.h +++ b/native_client/kenlm/util/double-conversion/cached-powers.h @@ -30,7 +30,7 @@ #include "diy-fp.h" -namespace double_conversion { +namespace kenlm_double_conversion { class PowersOfTenCache { public: @@ -59,6 +59,6 @@ class PowersOfTenCache { int* found_exponent); }; -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_CACHED_POWERS_H_ diff --git a/native_client/kenlm/util/double-conversion/diy-fp.cc b/native_client/kenlm/util/double-conversion/diy-fp.cc index ddd1891b..264c541a 100644 --- a/native_client/kenlm/util/double-conversion/diy-fp.cc +++ b/native_client/kenlm/util/double-conversion/diy-fp.cc @@ -29,7 +29,7 @@ #include "diy-fp.h" #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { void DiyFp::Multiply(const DiyFp& other) { // Simply "emulates" a 128 bit multiplication. @@ -54,4 +54,4 @@ void DiyFp::Multiply(const DiyFp& other) { f_ = result_f; } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/diy-fp.h b/native_client/kenlm/util/double-conversion/diy-fp.h index 9dcf8fbd..71552b9b 100644 --- a/native_client/kenlm/util/double-conversion/diy-fp.h +++ b/native_client/kenlm/util/double-conversion/diy-fp.h @@ -30,7 +30,7 @@ #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { // This "Do It Yourself Floating Point" class implements a floating-point number // with a uint64 significand and an int exponent. Normalized DiyFp numbers will @@ -113,6 +113,6 @@ class DiyFp { int e_; }; -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_DIY_FP_H_ diff --git a/native_client/kenlm/util/double-conversion/double-conversion.cc b/native_client/kenlm/util/double-conversion/double-conversion.cc index 8a7923c5..115fe16f 100644 --- a/native_client/kenlm/util/double-conversion/double-conversion.cc +++ b/native_client/kenlm/util/double-conversion/double-conversion.cc @@ -37,7 +37,7 @@ #include "strtod.h" #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { const DoubleToStringConverter& DoubleToStringConverter::EcmaScriptConverter() { int flags = UNIQUE_ZERO | EMIT_POSITIVE_EXPONENT_SIGN; @@ -886,4 +886,4 @@ double StringToDoubleConverter::StringToIeee( return sign? -converted: converted; } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/double-conversion.h b/native_client/kenlm/util/double-conversion/double-conversion.h index 1c3387d4..d3a57c05 100644 --- a/native_client/kenlm/util/double-conversion/double-conversion.h +++ b/native_client/kenlm/util/double-conversion/double-conversion.h @@ -30,7 +30,7 @@ #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { class DoubleToStringConverter { public: @@ -531,6 +531,6 @@ class StringToDoubleConverter { DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter); }; -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_ diff --git a/native_client/kenlm/util/double-conversion/fast-dtoa.cc b/native_client/kenlm/util/double-conversion/fast-dtoa.cc index 1a0f8235..ff2936d6 100644 --- a/native_client/kenlm/util/double-conversion/fast-dtoa.cc +++ b/native_client/kenlm/util/double-conversion/fast-dtoa.cc @@ -31,7 +31,7 @@ #include "diy-fp.h" #include "ieee.h" -namespace double_conversion { +namespace kenlm_double_conversion { // The minimal and maximal target exponent define the range of w's binary // exponent, where 'w' is the result of multiplying the input by a cached power @@ -661,4 +661,4 @@ bool FastDtoa(double v, return result; } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/fast-dtoa.h b/native_client/kenlm/util/double-conversion/fast-dtoa.h index 5f1e8eee..9e576b9a 100644 --- a/native_client/kenlm/util/double-conversion/fast-dtoa.h +++ b/native_client/kenlm/util/double-conversion/fast-dtoa.h @@ -30,7 +30,7 @@ #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { enum FastDtoaMode { // Computes the shortest representation of the given input. The returned @@ -83,6 +83,6 @@ bool FastDtoa(double d, int* length, int* decimal_point); -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_FAST_DTOA_H_ diff --git a/native_client/kenlm/util/double-conversion/fixed-dtoa.cc b/native_client/kenlm/util/double-conversion/fixed-dtoa.cc index 7c1a9527..a1a16a62 100644 --- a/native_client/kenlm/util/double-conversion/fixed-dtoa.cc +++ b/native_client/kenlm/util/double-conversion/fixed-dtoa.cc @@ -30,7 +30,7 @@ #include "fixed-dtoa.h" #include "ieee.h" -namespace double_conversion { +namespace kenlm_double_conversion { // Represents a 128bit type. This class should be replaced by a native type on // platforms that support 128bit integers. @@ -399,4 +399,4 @@ bool FastFixedDtoa(double v, return true; } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/fixed-dtoa.h b/native_client/kenlm/util/double-conversion/fixed-dtoa.h index 3bdd08e2..f78f32fe 100644 --- a/native_client/kenlm/util/double-conversion/fixed-dtoa.h +++ b/native_client/kenlm/util/double-conversion/fixed-dtoa.h @@ -30,7 +30,7 @@ #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { // Produces digits necessary to print a given number with // 'fractional_count' digits after the decimal point. @@ -51,6 +51,6 @@ namespace double_conversion { bool FastFixedDtoa(double v, int fractional_count, Vector buffer, int* length, int* decimal_point); -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_FIXED_DTOA_H_ diff --git a/native_client/kenlm/util/double-conversion/ieee.h b/native_client/kenlm/util/double-conversion/ieee.h index 839dc47d..ee11508f 100644 --- a/native_client/kenlm/util/double-conversion/ieee.h +++ b/native_client/kenlm/util/double-conversion/ieee.h @@ -30,7 +30,7 @@ #include "diy-fp.h" -namespace double_conversion { +namespace kenlm_double_conversion { // We assume that doubles and uint64_t have the same endianness. static uint64_t double_to_uint64(double d) { return BitCast(d); } @@ -393,6 +393,6 @@ class Single { const uint32_t d32_; }; -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_DOUBLE_H_ diff --git a/native_client/kenlm/util/double-conversion/strtod.cc b/native_client/kenlm/util/double-conversion/strtod.cc index 55b4daa5..2c66e6e5 100644 --- a/native_client/kenlm/util/double-conversion/strtod.cc +++ b/native_client/kenlm/util/double-conversion/strtod.cc @@ -33,7 +33,7 @@ #include "cached-powers.h" #include "ieee.h" -namespace double_conversion { +namespace kenlm_double_conversion { // 2^53 = 9007199254740992. // Any integer with at most 15 decimal digits will hence fit into a double @@ -555,4 +555,4 @@ float Strtof(Vector buffer, int exponent) { } } -} // namespace double_conversion +} // namespace kenlm_double_conversion diff --git a/native_client/kenlm/util/double-conversion/strtod.h b/native_client/kenlm/util/double-conversion/strtod.h index ed0293b8..1873a858 100644 --- a/native_client/kenlm/util/double-conversion/strtod.h +++ b/native_client/kenlm/util/double-conversion/strtod.h @@ -30,7 +30,7 @@ #include "utils.h" -namespace double_conversion { +namespace kenlm_double_conversion { // The buffer must only contain digits in the range [0-9]. It must not // contain a dot or a sign. It must not start with '0', and must not be empty. @@ -40,6 +40,6 @@ double Strtod(Vector buffer, int exponent); // contain a dot or a sign. It must not start with '0', and must not be empty. float Strtof(Vector buffer, int exponent); -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_STRTOD_H_ diff --git a/native_client/kenlm/util/double-conversion/utils.h b/native_client/kenlm/util/double-conversion/utils.h index 35a29a78..ae40b116 100644 --- a/native_client/kenlm/util/double-conversion/utils.h +++ b/native_client/kenlm/util/double-conversion/utils.h @@ -126,7 +126,7 @@ typedef unsigned __int64 uint64_t; DISALLOW_COPY_AND_ASSIGN(TypeName) #endif -namespace double_conversion { +namespace kenlm_double_conversion { static const int kCharSize = sizeof(char); @@ -315,6 +315,6 @@ inline Dest BitCast(Source* source) { return BitCast(reinterpret_cast(source)); } -} // namespace double_conversion +} // namespace kenlm_double_conversion #endif // DOUBLE_CONVERSION_UTILS_H_ diff --git a/native_client/kenlm/util/file_piece.cc b/native_client/kenlm/util/file_piece.cc index 182cce9d..4c009e07 100644 --- a/native_client/kenlm/util/file_piece.cc +++ b/native_client/kenlm/util/file_piece.cc @@ -162,8 +162,8 @@ void FilePiece::Initialize(const char *name, std::ostream *show_progress, std::s namespace { -static const double_conversion::StringToDoubleConverter kConverter( - double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK | double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES, +static const kenlm_double_conversion::StringToDoubleConverter kConverter( + kenlm_double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK | kenlm_double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES, std::numeric_limits::quiet_NaN(), std::numeric_limits::quiet_NaN(), "inf", diff --git a/native_client/kenlm/util/float_to_string.cc b/native_client/kenlm/util/float_to_string.cc index 1e16d6f9..4c503962 100644 --- a/native_client/kenlm/util/float_to_string.cc +++ b/native_client/kenlm/util/float_to_string.cc @@ -5,17 +5,17 @@ namespace util { namespace { -const double_conversion::DoubleToStringConverter kConverter(double_conversion::DoubleToStringConverter::NO_FLAGS, "inf", "NaN", 'e', -6, 21, 6, 0); +const kenlm_double_conversion::DoubleToStringConverter kConverter(kenlm_double_conversion::DoubleToStringConverter::NO_FLAGS, "inf", "NaN", 'e', -6, 21, 6, 0); } // namespace char *ToString(double value, char *to) { - double_conversion::StringBuilder builder(to, ToStringBuf::kBytes); + kenlm_double_conversion::StringBuilder builder(to, ToStringBuf::kBytes); kConverter.ToShortest(value, &builder); return &to[builder.position()]; } char *ToString(float value, char *to) { - double_conversion::StringBuilder builder(to, ToStringBuf::kBytes); + kenlm_double_conversion::StringBuilder builder(to, ToStringBuf::kBytes); kConverter.ToShortestSingle(value, &builder); return &to[builder.position()]; } diff --git a/requirements.txt b/requirements.txt index 5c423db4..8e5b57e9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ pandas progressbar2 python-utils -tensorflow == 1.6.0 +tensorflow == 1.11.0rc2 numpy matplotlib scipy diff --git a/taskcluster/darwin-amd64-cpu-opt.yml b/taskcluster/darwin-amd64-cpu-opt.yml index e3cea581..bbf8fb29 100644 --- a/taskcluster/darwin-amd64-cpu-opt.yml +++ b/taskcluster/darwin-amd64-cpu-opt.yml @@ -6,8 +6,8 @@ build: - "index.project.deepspeech.deepspeech.native_client.osx.${event.head.sha}" - "notify.irc-channel.${notifications.irc}.on-exception" - "notify.irc-channel.${notifications.irc}.on-failed" - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.osx/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.osx/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.osx/artifacts/public/home.tar.xz" + summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.osx/artifacts/public/summarize_graph" scripts: build: "taskcluster/host-build.sh" package: "taskcluster/package.sh" diff --git a/taskcluster/linux-amd64-cpu-opt.yml b/taskcluster/linux-amd64-cpu-opt.yml index 3e59ff83..8ea84bac 100644 --- a/taskcluster/linux-amd64-cpu-opt.yml +++ b/taskcluster/linux-amd64-cpu-opt.yml @@ -14,8 +14,8 @@ build: system_config: > ${swig.patch_nodejs.linux} - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/home.tar.xz" + summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph" scripts: build: "taskcluster/host-build.sh" package: "taskcluster/package.sh" diff --git a/taskcluster/linux-amd64-ctc-opt.yml b/taskcluster/linux-amd64-ctc-opt.yml index ce9bfd72..35268224 100644 --- a/taskcluster/linux-amd64-ctc-opt.yml +++ b/taskcluster/linux-amd64-ctc-opt.yml @@ -4,8 +4,8 @@ build: - "pull_request.synchronize" - "pull_request.reopened" template_file: linux-opt-base.tyml - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/home.tar.xz" + summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph" scripts: build: 'taskcluster/decoder-build.sh' package: 'taskcluster/decoder-package.sh' diff --git a/taskcluster/linux-amd64-gpu-opt.yml b/taskcluster/linux-amd64-gpu-opt.yml index abc1c3f8..9fc07ffd 100644 --- a/taskcluster/linux-amd64-gpu-opt.yml +++ b/taskcluster/linux-amd64-gpu-opt.yml @@ -12,8 +12,8 @@ build: system_config: > ${swig.patch_nodejs.linux} - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.gpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.gpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.gpu/artifacts/public/home.tar.xz" + summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.gpu/artifacts/public/summarize_graph" maxRunTime: 14400 scripts: build: "taskcluster/cuda-build.sh" diff --git a/taskcluster/linux-arm64-cpu-opt.yml b/taskcluster/linux-arm64-cpu-opt.yml index 225afab4..fd101496 100644 --- a/taskcluster/linux-arm64-cpu-opt.yml +++ b/taskcluster/linux-arm64-cpu-opt.yml @@ -4,8 +4,8 @@ build: - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm64" - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm64" - "index.project.deepspeech.deepspeech.native_client.arm64.${event.head.sha}" - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.arm64/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.arm64/artifacts/public/home.tar.xz" + summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph" ## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787 system_setup: > diff --git a/taskcluster/linux-rpi3-cpu-opt.yml b/taskcluster/linux-rpi3-cpu-opt.yml index 2a16e623..f4b67cf3 100644 --- a/taskcluster/linux-rpi3-cpu-opt.yml +++ b/taskcluster/linux-rpi3-cpu-opt.yml @@ -4,8 +4,8 @@ build: - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm" - "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm" - "index.project.deepspeech.deepspeech.native_client.arm.${event.head.sha}" - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.arm/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.arm/artifacts/public/home.tar.xz" + summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph" ## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787 system_setup: > diff --git a/taskcluster/node-package.yml b/taskcluster/node-package.yml index 7cf59f4c..fad30dc6 100644 --- a/taskcluster/node-package.yml +++ b/taskcluster/node-package.yml @@ -16,8 +16,8 @@ build: system_config: > ${swig.patch_nodejs.linux} - tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/home.tar.xz" - summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph" + tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/home.tar.xz" + summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph" scripts: build: "taskcluster/node-build.sh" package: "taskcluster/node-package.sh" diff --git a/taskcluster/test-armbian-opt-base.tyml b/taskcluster/test-armbian-opt-base.tyml index 79310dbf..4020b4b6 100644 --- a/taskcluster/test-armbian-opt-base.tyml +++ b/taskcluster/test-armbian-opt-base.tyml @@ -44,7 +44,7 @@ then: PIP_DEFAULT_TIMEOUT: 60 PIP_EXTRA_INDEX_URL: "https://lissyx.github.io/deepspeech-python-wheels/" EXTRA_PYTHON_CONFIGURE_OPTS: "--with-fpectl" # Required by Debian Stretch - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17" command: - "/bin/bash" diff --git a/taskcluster/test-darwin-opt-base.tyml b/taskcluster/test-darwin-opt-base.tyml index 0c73da7d..cebebcfa 100644 --- a/taskcluster/test-darwin-opt-base.tyml +++ b/taskcluster/test-darwin-opt-base.tyml @@ -39,7 +39,7 @@ then: DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pb DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pbmm - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17" command: - - "/bin/bash" diff --git a/taskcluster/test-linux-opt-base.tyml b/taskcluster/test-linux-opt-base.tyml index a5c5176d..5c5a2b50 100644 --- a/taskcluster/test-linux-opt-base.tyml +++ b/taskcluster/test-linux-opt-base.tyml @@ -44,7 +44,7 @@ then: DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pb DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pbmm PIP_DEFAULT_TIMEOUT: 60 - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17" command: - "/bin/bash" diff --git a/taskcluster/test-raspbian-opt-base.tyml b/taskcluster/test-raspbian-opt-base.tyml index fdbc6d3e..8f8eca04 100644 --- a/taskcluster/test-raspbian-opt-base.tyml +++ b/taskcluster/test-raspbian-opt-base.tyml @@ -44,7 +44,7 @@ then: PIP_DEFAULT_TIMEOUT: 60 PIP_EXTRA_INDEX_URL: "https://www.piwheels.org/simple" EXTRA_PYTHON_CONFIGURE_OPTS: "--with-fpectl" # Required by Raspbian Stretch / PiWheels - EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473" + EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17" command: - "/bin/bash" diff --git a/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml b/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml index 69dad816..ee287973 100644 --- a/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml +++ b/taskcluster/test-training_upstream-linux-amd64-py27mu-opt.yml @@ -7,7 +7,7 @@ build: apt-get -qq -y install ${python.packages_trusty.apt} args: tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 2.7.14:mu" - convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/convert_graphdef_memmapped_format" + convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/convert_graphdef_memmapped_format" metadata: name: "DeepSpeech Linux AMD64 CPU upstream training Py2.7 mu" description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 using upstream TensorFlow Python 2.7 mu, CPU only, optimized version"