Merge pull request #2212 from mozilla/workspace-status-version

Use bazel workspace status to generate versions
This commit is contained in:
Reuben Morais 2019-06-26 10:58:06 +00:00 committed by GitHub
commit 87a9605886
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 73 additions and 105 deletions

View File

@ -169,7 +169,7 @@ RUN ./configure
# Build DeepSpeech
RUN bazel build --config=monolithic --config=cuda -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-mtune=generic --copt=-march=x86-64 --copt=-msse --copt=-msse2 --copt=-msse3 --copt=-msse4.1 --copt=-msse4.2 --copt=-mavx --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie --verbose_failures --action_env=LD_LIBRARY_PATH=${LD_LIBRARY_PATH}
RUN bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=cuda -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-mtune=generic --copt=-march=x86-64 --copt=-msse --copt=-msse2 --copt=-msse3 --copt=-msse4.1 --copt=-msse4.2 --copt=-mavx --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie --verbose_failures --action_env=LD_LIBRARY_PATH=${LD_LIBRARY_PATH}
###
### Using TensorFlow upstream should work

View File

@ -14,19 +14,12 @@ config_setting(
)
genrule(
name = "ds_git_version",
outs = ["ds_version.h"],
cmd = "$(location :ds_git_version.sh) >$@",
tools = [":ds_git_version.sh"],
local = 1,
)
genrule(
name = "ds_graph_version",
outs = ["ds_graph_version.h"],
cmd = "$(location :ds_graph_version.sh) >$@",
tools = [":ds_graph_version.sh"],
name = "workspace_status",
outs = ["workspace_status.cc"],
cmd = "$(location :gen_workspace_status.sh) >$@",
tools = [":gen_workspace_status.sh"],
local = 1,
stamp = 1,
)
KENLM_SOURCES = glob(["kenlm/lm/*.cc", "kenlm/util/*.cc", "kenlm/util/double-conversion/*.cc",
@ -72,8 +65,8 @@ tf_cc_shared_object(
"alphabet.h",
"modelstate.h",
"modelstate.cc",
"ds_version.h",
"ds_graph_version.h"] +
"workspace_status.h",
"workspace_status.cc"] +
DECODER_SOURCES +
select({
"//native_client:tflite": [

View File

@ -56,7 +56,7 @@ ln -s ../DeepSpeech/native_client ./
You can now use Bazel to build the main DeepSpeech library, `libdeepspeech.so`, as well as the `generate_trie` binary. Add `--config=cuda` if you want a CUDA build.
```
bazel build --config=monolithic -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie
bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie
```
The generated binaries will be saved to `bazel-bin/native_client/`.
@ -128,13 +128,13 @@ We do support cross-compilation. Please refer to our `mozilla/tensorflow` fork,
So your command line for `RPi3` and `ARMv7` should look like:
```
bazel build --config=monolithic --config=rpi3 --config=rpi3_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie
bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=rpi3 --config=rpi3_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie
```
And your command line for `LePotato` and `ARM64` should look like:
```
bazel build --config=monolithic --config=rpi3-armv8 --config=rpi3-armv8_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie
bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=rpi3-armv8 --config=rpi3-armv8_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_trie
```
While we test only on RPi3 Raspbian Stretch and LePotato ARMBian stretch, anything compatible with `armv7-a cortex-a53` or `armv8-a cortex-a53` should be fine.
@ -156,13 +156,13 @@ Please refer to TensorFlow documentation on how to setup the environment to buil
You can build the `libdeepspeech.so` using (ARMv7):
```
bazel build --config=monolithic --config=android --config=android_arm --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++11 --copt=-D_GLIBCXX_USE_C99 //native_client:libdeepspeech.so
bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=android --config=android_arm --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++11 --copt=-D_GLIBCXX_USE_C99 //native_client:libdeepspeech.so
```
Or (ARM64):
```
bazel build --config=monolithic --config=android --config=android_arm64 --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++11 --copt=-D_GLIBCXX_USE_C99 //native_client:libdeepspeech.so
bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=android --config=android_arm64 --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++11 --copt=-D_GLIBCXX_USE_C99 //native_client:libdeepspeech.so
```
Building the `deepspeech` binary will happen through `ndk-build` (ARMv7):

View File

@ -0,0 +1,26 @@
#!/bin/bash
set -ex
# This script will be run bazel when building process starts to
# generate key-value information that represents the status of the
# workspace. The output should be like
#
# KEY1 VALUE1
# KEY2 VALUE2
#
# Keys starting with STABLE_ cause dependent rules to be re-run when their value
# changes.
#
# If the script exits with non-zero code, it's considered as a failure
# and the output will be discarded.
# The code below presents an implementation that works for git repository
tf_git_rev=$(git describe --long --tags)
echo "STABLE_TF_GIT_VERSION ${tf_git_rev}"
pushd native_client
ds_git_rev=$(git describe --long --tags)
echo "STABLE_DS_GIT_VERSION ${ds_git_rev}"
ds_graph_version=$(cat ../GRAPH_VERSION)
echo "STABLE_DS_GRAPH_VERSION ${ds_graph_version}"
popd

View File

@ -13,7 +13,7 @@
#include "alphabet.h"
#include "modelstate.h"
#include "native_client/ds_version.h"
#include "workspace_status.h"
#ifndef USE_TFLITE
#include "tfmodelstate.h"

View File

@ -107,14 +107,14 @@ At this point we are ready to start building the `native_client`, go to `tensorf
We will add AVX/AVX2 support in the command, please make sure that your CPU supports these instructions before adding the flags, if not you can remove them.
```bash
bazel build -c opt --copt=/arch:AVX --copt=/arch:AVX2 //native_client:libdeepspeech.so
bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" -c opt --copt=/arch:AVX --copt=/arch:AVX2 //native_client:libdeepspeech.so
```
#### GPU with CUDA
If you enabled CUDA in [configure.py](https://github.com/mozilla/tensorflow/blob/master/configure.py) configuration command now you can add `--config=cuda` to compile with CUDA support.
```bash
bazel build -c opt --config=cuda --copt=/arch:AVX --copt=/arch:AVX2 //native_client:libdeepspeech.so
bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" -c opt --config=cuda --copt=/arch:AVX --copt=/arch:AVX2 //native_client:libdeepspeech.so
```
Be patient, if you enabled AVX/AVX2 and CUDA it will take a long time. Finally you should see it stops and shows the path to the generated `libdeepspeech.so`.

View File

@ -1,51 +0,0 @@
#!/bin/bash
if [ `uname` = "Darwin" ]; then
export PATH="/Users/build-user/TaskCluster/Workdir/tasks/tc-workdir/homebrew/opt/coreutils/libexec/gnubin:${PATH}"
fi
if [ `uname -o` = "Msys" ]; then
export PATH="/c/Program Files/Git/bin/:${PATH}"
fi
DS_GIT_DIR="$(realpath "$(dirname "$(realpath "$0")")/../.git")"
if [ ! -d "${DS_GIT_DIR}" -a ! -f "${DS_GIT_DIR}" ]; then
return 1
fi;
TF_GIT_DIR="$(realpath $(pwd)/tensorflow/../.git)"
if [ ! -d "${TF_GIT_DIR}" -a ! -f "${TF_GIT_DIR}" ]; then
return 1
fi;
# Handle the case of git submodules, the .git file contains the path to the tree
if [ -f "${DS_GIT_DIR}" ]; then
pushd $(dirname ${DS_GIT_DIR}) > /dev/null
DS_GIT_DIR=$(realpath "$(grep '^gitdir:' .git | cut -d' ' -f2)")
popd > /dev/null
fi;
if [ -f "${TF_GIT_DIR}" ]; then
pushd $(dirname ${TF_GIT_DIR}) > /dev/null
TF_GIT_DIR=$(realpath "$(grep '^gitdir:' .git | cut -d' ' -f2)")
popd > /dev/null
fi;
DS_GIT_VERSION=$(git --git-dir="${DS_GIT_DIR}" describe --long --tags)
if [ $? -ne 0 ]; then
DS_GIT_VERSION=unknown;
fi
TF_GIT_VERSION=$(git --git-dir="${TF_GIT_DIR}" describe --long --tags)
if [ $? -ne 0 ]; then
TF_GIT_VERSION=unknown;
fi
cat <<EOF
#include <string>
const char* ds_git_version() {
return "${DS_GIT_VERSION}";
}
const char* tf_local_git_version() {
return "${TF_GIT_VERSION}";
}
EOF

View File

@ -1,19 +0,0 @@
#!/bin/bash
if [ `uname` = "Darwin" ]; then
export PATH="/Users/build-user/TaskCluster/Workdir/tasks/tc-workdir/homebrew/opt/coreutils/libexec/gnubin:${PATH}"
fi
DS_DIR="$(realpath "$(dirname "$(realpath "$0")")/../")"
if [ ! -d "${DS_DIR}" ]; then
exit 1
fi;
DS_GRAPH_VERSION=$(cat "${DS_DIR}/GRAPH_VERSION")
if [ $? -ne 0 ]; then
exit 1
fi
cat <<EOF
#define DS_GRAPH_VERSION ${DS_GRAPH_VERSION}
EOF

View File

@ -0,0 +1,19 @@
#!/bin/bash
set -x
tf_git_version=$(grep "STABLE_TF_GIT_VERSION" "bazel-out/stable-status.txt" | cut -d' ' -f2)
ds_git_version=$(grep "STABLE_DS_GIT_VERSION" "bazel-out/stable-status.txt" | cut -d' ' -f2)
ds_graph_version=$(grep "STABLE_DS_GRAPH_VERSION" "bazel-out/stable-status.txt" | cut -d' ' -f2)
cat <<EOF
const char *tf_local_git_version() {
return "${tf_git_version}";
}
const char *ds_git_version() {
return "${ds_git_version}";
}
const int ds_graph_version() {
return ${ds_graph_version};
}
EOF

View File

@ -1,6 +1,6 @@
#include "tfmodelstate.h"
#include "ds_graph_version.h"
#include "workspace_status.h"
using namespace tensorflow;
using std::vector;
@ -81,10 +81,10 @@ TFModelState::init(const char* model_path,
}
int graph_version = graph_def_.version();
if (graph_version < DS_GRAPH_VERSION) {
if (graph_version < ds_graph_version()) {
std::cerr << "Specified model file version (" << graph_version << ") is "
<< "incompatible with minimum version supported by this client ("
<< DS_GRAPH_VERSION << "). See "
<< ds_graph_version() << "). See "
<< "https://github.com/mozilla/DeepSpeech/#model-compatibility "
<< "for more information" << std::endl;
return DS_ERR_MODEL_INCOMPATIBLE;

View File

@ -0,0 +1,8 @@
#ifndef WORKSPACE_STATUS_H
#define WORKSPACE_STATUS_H
const char *tf_local_git_version();
const char *ds_git_version();
const int ds_graph_version();
#endif // WORKSPACE_STATUS_H

View File

@ -811,7 +811,7 @@ do_bazel_build()
fi;
bazel ${BAZEL_OUTPUT_USER_ROOT} build \
-s --explain bazel_monolithic.log --verbose_explanations --experimental_strict_action_env --config=monolithic -c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
-s --explain bazel_monolithic.log --verbose_explanations --experimental_strict_action_env --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
if is_patched_bazel; then
find ${DS_ROOT_TASK}/DeepSpeech/tf/bazel-out/ -iname "*.ckd" | tar -cf ${DS_ROOT_TASK}/DeepSpeech/bazel-ckd-ds.tar -T -
@ -826,14 +826,6 @@ shutdown_bazel()
bazel ${BAZEL_OUTPUT_USER_ROOT} shutdown
}
do_bazel_shared_build()
{
cd ${DS_ROOT_TASK}/DeepSpeech/tf
eval "export ${BAZEL_ENV_FLAGS}"
bazel ${BAZEL_OUTPUT_USER_ROOT} build \
-s --explain bazel_shared.log --verbose_explanations --experimental_strict_action_env -c opt ${BAZEL_BUILD_FLAGS} ${BAZEL_TARGETS}
}
do_deepspeech_binary_build()
{
cd ${DS_DSDIR}