Update to TensorFlow r1.11

This commit is contained in:
Alexandre Lissy 2018-09-24 06:59:02 +02:00
parent 4b66535d2a
commit 1e0945e430
36 changed files with 72 additions and 65 deletions

View File

@ -58,7 +58,10 @@ tf_cc_shared_object(
"//tensorflow/core/kernels:control_flow_ops", # Enter
"//tensorflow/core/kernels:tile_ops", # Tile
"//tensorflow/core/kernels:gather_op", # Gather
"//tensorflow/contrib/rnn:lstm_ops_kernels" # BlockLSTM
"//tensorflow/contrib/rnn:lstm_ops_kernels", # BlockLSTM
"//tensorflow/core/kernels:random_ops", # RandomGammaGrad
"//tensorflow/core/kernels:pack_op", # Pack
"//tensorflow/core/kernels:gather_nd_op", # GatherNd
#### Needed by production model produced without "--use_seq_length False"
#"//tensorflow/core/kernels:logging_ops", # Assert
#"//tensorflow/core/kernels:reverse_sequence_op", # ReverseSequence

View File

@ -51,10 +51,11 @@ Check the [main README](../README.md) for more details.
If you'd like to build the binaries yourself, you'll need the following pre-requisites downloaded/installed:
* [TensorFlow source and requirements](https://www.tensorflow.org/install/install_sources)
* [TensorFlow requirements](https://www.tensorflow.org/install/install_sources)
* [TensorFlow `r1.11` sources](https://github.com/mozilla/tensorflow/tree/r1.11)
* [libsox](https://sourceforge.net/projects/sox/)
We recommend using our fork of TensorFlow since it includes fixes for common problems encountered when building the native client files, you can [get it here](https://github.com/mozilla/tensorflow/).
It is required to use our fork of TensorFlow since it includes fixes for common problems encountered when building the native client files.
If you'd like to build the language bindings, you'll also need:
@ -73,7 +74,7 @@ ln -s ../DeepSpeech/native_client ./
## Building
Before building the DeepSpeech client libraries, you will need to prepare your environment to configure and build TensorFlow.
Preferably, checkout the version of tensorflow which is currently supported by DeepSpeech (see requirements.txt), and use bazel version 0.10.0.
Preferably, checkout the version of tensorflow which is currently supported by DeepSpeech (see requirements.txt), and use the bazel version recommended by TensorFlow for that version.
Then, follow the [instructions](https://www.tensorflow.org/install/install_sources) on the TensorFlow site for your platform, up to the end of 'Configure the installation'.
After that, you can build the Tensorflow and DeepSpeech libraries using the following commands. Please note that the flags for `libctc_decoder_with_kenlm.so` differs a little bit.

View File

@ -7,3 +7,6 @@ The following procedure was run to remove unneeded files:
cd kenlm
rm -rf windows include lm/filter lm/builder util/stream util/getopt.* python
This was done in order to ensure uniqueness of double_conversion:
git grep 'double_conversion' | cut -d':' -f1 | sort | uniq | xargs sed -ri 's/double_conversion/kenlm_double_conversion/g'

View File

@ -32,7 +32,7 @@
#include "bignum.h"
#include "ieee.h"
namespace double_conversion {
namespace kenlm_double_conversion {
static int NormalizedExponent(uint64_t significand, int exponent) {
ASSERT(significand != 0);
@ -637,4 +637,4 @@ static void FixupMultiply10(int estimated_power, bool is_even,
}
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
enum BignumDtoaMode {
// Return the shortest correct representation.
@ -79,6 +79,6 @@ enum BignumDtoaMode {
void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
Vector<char> buffer, int* length, int* point);
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_BIGNUM_DTOA_H_

View File

@ -28,7 +28,7 @@
#include "bignum.h"
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
Bignum::Bignum()
: bigits_(bigits_buffer_, kBigitCapacity), used_digits_(0), exponent_(0) {
@ -761,4 +761,4 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
class Bignum {
public:
@ -140,6 +140,6 @@ class Bignum {
DISALLOW_COPY_AND_ASSIGN(Bignum);
};
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_BIGNUM_H_

View File

@ -33,7 +33,7 @@
#include "cached-powers.h"
namespace double_conversion {
namespace kenlm_double_conversion {
struct CachedPower {
uint64_t significand;
@ -172,4 +172,4 @@ void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "diy-fp.h"
namespace double_conversion {
namespace kenlm_double_conversion {
class PowersOfTenCache {
public:
@ -59,6 +59,6 @@ class PowersOfTenCache {
int* found_exponent);
};
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_CACHED_POWERS_H_

View File

@ -29,7 +29,7 @@
#include "diy-fp.h"
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
void DiyFp::Multiply(const DiyFp& other) {
// Simply "emulates" a 128 bit multiplication.
@ -54,4 +54,4 @@ void DiyFp::Multiply(const DiyFp& other) {
f_ = result_f;
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
// This "Do It Yourself Floating Point" class implements a floating-point number
// with a uint64 significand and an int exponent. Normalized DiyFp numbers will
@ -113,6 +113,6 @@ class DiyFp {
int e_;
};
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_DIY_FP_H_

View File

@ -37,7 +37,7 @@
#include "strtod.h"
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
const DoubleToStringConverter& DoubleToStringConverter::EcmaScriptConverter() {
int flags = UNIQUE_ZERO | EMIT_POSITIVE_EXPONENT_SIGN;
@ -886,4 +886,4 @@ double StringToDoubleConverter::StringToIeee(
return sign? -converted: converted;
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
class DoubleToStringConverter {
public:
@ -531,6 +531,6 @@ class StringToDoubleConverter {
DISALLOW_IMPLICIT_CONSTRUCTORS(StringToDoubleConverter);
};
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_DOUBLE_CONVERSION_H_

View File

@ -31,7 +31,7 @@
#include "diy-fp.h"
#include "ieee.h"
namespace double_conversion {
namespace kenlm_double_conversion {
// The minimal and maximal target exponent define the range of w's binary
// exponent, where 'w' is the result of multiplying the input by a cached power
@ -661,4 +661,4 @@ bool FastDtoa(double v,
return result;
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
enum FastDtoaMode {
// Computes the shortest representation of the given input. The returned
@ -83,6 +83,6 @@ bool FastDtoa(double d,
int* length,
int* decimal_point);
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_FAST_DTOA_H_

View File

@ -30,7 +30,7 @@
#include "fixed-dtoa.h"
#include "ieee.h"
namespace double_conversion {
namespace kenlm_double_conversion {
// Represents a 128bit type. This class should be replaced by a native type on
// platforms that support 128bit integers.
@ -399,4 +399,4 @@ bool FastFixedDtoa(double v,
return true;
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
// Produces digits necessary to print a given number with
// 'fractional_count' digits after the decimal point.
@ -51,6 +51,6 @@ namespace double_conversion {
bool FastFixedDtoa(double v, int fractional_count,
Vector<char> buffer, int* length, int* decimal_point);
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_FIXED_DTOA_H_

View File

@ -30,7 +30,7 @@
#include "diy-fp.h"
namespace double_conversion {
namespace kenlm_double_conversion {
// We assume that doubles and uint64_t have the same endianness.
static uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
@ -393,6 +393,6 @@ class Single {
const uint32_t d32_;
};
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_DOUBLE_H_

View File

@ -33,7 +33,7 @@
#include "cached-powers.h"
#include "ieee.h"
namespace double_conversion {
namespace kenlm_double_conversion {
// 2^53 = 9007199254740992.
// Any integer with at most 15 decimal digits will hence fit into a double
@ -555,4 +555,4 @@ float Strtof(Vector<const char> buffer, int exponent) {
}
}
} // namespace double_conversion
} // namespace kenlm_double_conversion

View File

@ -30,7 +30,7 @@
#include "utils.h"
namespace double_conversion {
namespace kenlm_double_conversion {
// The buffer must only contain digits in the range [0-9]. It must not
// contain a dot or a sign. It must not start with '0', and must not be empty.
@ -40,6 +40,6 @@ double Strtod(Vector<const char> buffer, int exponent);
// contain a dot or a sign. It must not start with '0', and must not be empty.
float Strtof(Vector<const char> buffer, int exponent);
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_STRTOD_H_

View File

@ -126,7 +126,7 @@ typedef unsigned __int64 uint64_t;
DISALLOW_COPY_AND_ASSIGN(TypeName)
#endif
namespace double_conversion {
namespace kenlm_double_conversion {
static const int kCharSize = sizeof(char);
@ -315,6 +315,6 @@ inline Dest BitCast(Source* source) {
return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
}
} // namespace double_conversion
} // namespace kenlm_double_conversion
#endif // DOUBLE_CONVERSION_UTILS_H_

View File

@ -162,8 +162,8 @@ void FilePiece::Initialize(const char *name, std::ostream *show_progress, std::s
namespace {
static const double_conversion::StringToDoubleConverter kConverter(
double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK | double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES,
static const kenlm_double_conversion::StringToDoubleConverter kConverter(
kenlm_double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK | kenlm_double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES,
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(),
"inf",

View File

@ -5,17 +5,17 @@
namespace util {
namespace {
const double_conversion::DoubleToStringConverter kConverter(double_conversion::DoubleToStringConverter::NO_FLAGS, "inf", "NaN", 'e', -6, 21, 6, 0);
const kenlm_double_conversion::DoubleToStringConverter kConverter(kenlm_double_conversion::DoubleToStringConverter::NO_FLAGS, "inf", "NaN", 'e', -6, 21, 6, 0);
} // namespace
char *ToString(double value, char *to) {
double_conversion::StringBuilder builder(to, ToStringBuf<double>::kBytes);
kenlm_double_conversion::StringBuilder builder(to, ToStringBuf<double>::kBytes);
kConverter.ToShortest(value, &builder);
return &to[builder.position()];
}
char *ToString(float value, char *to) {
double_conversion::StringBuilder builder(to, ToStringBuf<float>::kBytes);
kenlm_double_conversion::StringBuilder builder(to, ToStringBuf<float>::kBytes);
kConverter.ToShortestSingle(value, &builder);
return &to[builder.position()];
}

View File

@ -1,7 +1,7 @@
pandas
progressbar2
python-utils
tensorflow == 1.6.0
tensorflow == 1.11.0rc2
numpy
matplotlib
scipy

View File

@ -6,8 +6,8 @@ build:
- "index.project.deepspeech.deepspeech.native_client.osx.${event.head.sha}"
- "notify.irc-channel.${notifications.irc}.on-exception"
- "notify.irc-channel.${notifications.irc}.on-failed"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.osx/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.osx/artifacts/public/summarize_graph"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.osx/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.osx/artifacts/public/summarize_graph"
scripts:
build: "taskcluster/host-build.sh"
package: "taskcluster/package.sh"

View File

@ -14,8 +14,8 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph"
scripts:
build: "taskcluster/host-build.sh"
package: "taskcluster/package.sh"

View File

@ -4,8 +4,8 @@ build:
- "pull_request.synchronize"
- "pull_request.reopened"
template_file: linux-opt-base.tyml
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph"
scripts:
build: 'taskcluster/decoder-build.sh'
package: 'taskcluster/decoder-package.sh'

View File

@ -12,8 +12,8 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.gpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.gpu/artifacts/public/summarize_graph"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.gpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.gpu/artifacts/public/summarize_graph"
maxRunTime: 14400
scripts:
build: "taskcluster/cuda-build.sh"

View File

@ -4,8 +4,8 @@ build:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm64"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm64"
- "index.project.deepspeech.deepspeech.native_client.arm64.${event.head.sha}"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.arm64/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.arm64/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph"
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
system_setup:
>

View File

@ -4,8 +4,8 @@ build:
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.arm"
- "index.project.deepspeech.deepspeech.native_client.${event.head.branchortag}.${event.head.sha}.arm"
- "index.project.deepspeech.deepspeech.native_client.arm.${event.head.sha}"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.arm/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.arm/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph"
## multistrap 2.2.0-ubuntu1 is broken in 14.04: https://bugs.launchpad.net/ubuntu/+source/multistrap/+bug/1313787
system_setup:
>

View File

@ -16,8 +16,8 @@ build:
system_config:
>
${swig.patch_nodejs.linux}
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/summarize_graph"
tensorflow: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/home.tar.xz"
summarize_graph: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/summarize_graph"
scripts:
build: "taskcluster/node-build.sh"
package: "taskcluster/node-package.sh"

View File

@ -44,7 +44,7 @@ then:
PIP_DEFAULT_TIMEOUT: 60
PIP_EXTRA_INDEX_URL: "https://lissyx.github.io/deepspeech-python-wheels/"
EXTRA_PYTHON_CONFIGURE_OPTS: "--with-fpectl" # Required by Debian Stretch
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473"
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17"
command:
- "/bin/bash"

View File

@ -39,7 +39,7 @@ then:
DEEPSPEECH_TEST_MODEL: https://queue.taskcluster.net/v1/task/${training}/artifacts/public/output_graph.pb
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pb
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pbmm
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473"
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17"
command:
- - "/bin/bash"

View File

@ -44,7 +44,7 @@ then:
DEEPSPEECH_PROD_MODEL: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pb
DEEPSPEECH_PROD_MODEL_MMAP: https://github.com/reuben/DeepSpeech/releases/download/v0.2.0-prod/output_graph.pbmm
PIP_DEFAULT_TIMEOUT: 60
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473"
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17"
command:
- "/bin/bash"

View File

@ -44,7 +44,7 @@ then:
PIP_DEFAULT_TIMEOUT: 60
PIP_EXTRA_INDEX_URL: "https://www.piwheels.org/simple"
EXTRA_PYTHON_CONFIGURE_OPTS: "--with-fpectl" # Required by Raspbian Stretch / PiWheels
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.6.0-18-g5021473"
EXPECTED_TENSORFLOW_VERSION: "TensorFlow: v1.11.0-rc2-4-g77b7b17"
command:
- "/bin/bash"

View File

@ -7,7 +7,7 @@ build:
apt-get -qq -y install ${python.packages_trusty.apt}
args:
tests_cmdline: "${system.homedir.linux}/DeepSpeech/ds/tc-train-tests.sh 2.7.14:mu"
convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.6.50214731ea43f41ee036ce9af0c0c4a10185fc8f.cpu/artifacts/public/convert_graphdef_memmapped_format"
convert_graphdef: "https://index.taskcluster.net/v1/task/project.deepspeech.tensorflow.pip.r1.11.77b7b1791c33420735aa3a03d7e8cb16ce449b43.cpu/artifacts/public/convert_graphdef_memmapped_format"
metadata:
name: "DeepSpeech Linux AMD64 CPU upstream training Py2.7 mu"
description: "Training a DeepSpeech LDC93S1 model for Linux/AMD64 using upstream TensorFlow Python 2.7 mu, CPU only, optimized version"