From d2009582e9ba65ffc9ac40c6038a1f932550a8c6 Mon Sep 17 00:00:00 2001 From: Kelly Davis Date: Fri, 5 Mar 2021 12:48:08 +0100 Subject: [PATCH 1/8] Rebranding WIP --- BIBLIOGRAPHY.md | 3 +- CODE_OWNERS.rst | 16 +-- CONTRIBUTING.rst | 16 +-- Dockerfile.build.tmpl | 34 +++--- Dockerfile.train.tmpl | 18 +-- GRAPH_VERSION | 2 +- Makefile | 8 +- RELEASE.rst | 12 -- SUPPORT.rst | 6 +- VERSION | 2 +- bin/compare_samples.py | 4 +- bin/data_set_tool.py | 8 +- bin/import_aidatatang.py | 2 +- bin/import_aishell.py | 2 +- bin/import_ccpmf.py | 6 +- bin/import_cv.py | 8 +- bin/import_cv2.py | 10 +- bin/import_fisher.py | 2 +- bin/import_freestmandarin.py | 2 +- bin/import_gram_vaani.py | 2 +- bin/import_ldc93s1.py | 2 +- bin/import_librivox.py | 2 +- bin/import_lingua_libre.py | 4 +- bin/import_m-ailabs.py | 4 +- bin/import_magicdata.py | 2 +- bin/import_primewords.py | 2 +- bin/import_slr57.py | 4 +- bin/import_swb.py | 4 +- bin/import_swc.py | 6 +- bin/import_ted.py | 4 +- bin/import_ts.py | 8 +- bin/import_tuda.py | 6 +- bin/import_vctk.py | 6 +- bin/import_voxforge.py | 2 +- bin/play.py | 10 +- bin/run-ldc93s1.sh | 6 +- bin/run-tc-graph_augmentations.sh | 2 +- bin/run-tc-ldc93s1_checkpoint.sh | 2 +- bin/run-tc-ldc93s1_checkpoint_bytes.sh | 2 +- bin/run-tc-ldc93s1_checkpoint_sdb.sh | 2 +- bin/run-tc-ldc93s1_new.sh | 2 +- bin/run-tc-ldc93s1_new_bytes.sh | 2 +- bin/run-tc-ldc93s1_new_bytes_tflite.sh | 2 +- bin/run-tc-ldc93s1_new_metrics.sh | 2 +- bin/run-tc-ldc93s1_new_sdb.sh | 2 +- bin/run-tc-ldc93s1_new_sdb_csv.sh | 2 +- bin/run-tc-ldc93s1_singleshotinference.sh | 4 +- bin/run-tc-ldc93s1_tflite.sh | 4 +- bin/run-tc-transfer.sh | 6 +- data/README.rst | 4 +- data/lm/generate_lm.py | 2 +- doc/{DeepSpeech.rst => Architecture.rst} | 4 +- doc/BUILDING.rst | 110 +++++++++--------- doc/BUILDING_DotNet.rst | 26 ++--- doc/Contributed-Examples.rst | 2 +- doc/Decoder.rst | 12 +- doc/Error-Codes.rst | 2 +- doc/Flags.rst | 4 +- doc/HotWordBoosting-Examples.rst | 8 +- doc/Makefile | 2 +- doc/ParallelOptimization.rst | 2 +- doc/Scorer.rst | 10 +- doc/TRAINING.rst | 72 ++++++------ doc/USING.rst | 103 ++++++++-------- doc/conf.py | 24 ++-- doc/doxygen-c.conf | 2 +- doc/index.rst | 44 +++---- doc/make.bat | 2 +- evaluate.py | 2 +- evaluate_tflite.py | 4 +- examples/README.rst | 4 +- lm_optimizer.py | 12 +- native_client/BUILD | 24 ++-- native_client/CODINGSTYLE.md | 2 +- native_client/Makefile | 24 ++-- native_client/args.h | 8 +- native_client/bazel_workspace_status_cmd.sh | 4 +- native_client/client.cc | 6 +- native_client/{deepspeech.h => coqui-stt.h} | 70 +++++------ native_client/ctcdecode/scorer.cpp | 2 +- native_client/ctcdecode/scorer.h | 2 +- native_client/ctcdecode/setup.py | 2 +- native_client/ctcdecode/swigwrapper.i | 6 +- native_client/deepspeech.cc | 10 +- native_client/deepspeech_errors.cc | 2 +- native_client/definitions.mk | 6 +- native_client/generate_scorer_package.cpp | 2 +- native_client/java/README.md | 2 +- native_client/java/jni/deepspeech.i | 4 +- native_client/javascript/Makefile | 4 +- native_client/javascript/README.md | 4 +- native_client/javascript/client.ts | 4 +- native_client/javascript/deepspeech.i | 4 +- native_client/javascript/index.ts | 10 +- native_client/javascript/package.json.in | 16 +-- .../kenlm/{README.mozilla => README.coqui} | 0 native_client/modelstate.h | 2 +- native_client/python/README.rst | 2 +- native_client/python/__init__.py | 66 +++++------ native_client/python/client.py | 6 +- native_client/python/impl.i | 4 +- native_client/python/setup.py | 26 ++--- native_client/swift/deepspeech-ios.podspec | 12 +- .../deepspeech_ios.xcodeproj/project.pbxproj | 8 +- .../swift/deepspeech_ios/DeepSpeech.swift | 2 +- .../deepspeech_ios/deepspeech_ios.modulemap | 2 +- .../deepspeech_ios_test/ContentView.swift | 6 +- .../SpeechRecognitionImpl.swift | 8 +- native_client/test/concurrent_streams.py | 4 +- native_client/tflitemodelstate.cc | 3 +- native_client/tfmodelstate.cc | 3 +- setup.py | 10 +- stats.py | 2 +- taskcluster/tc-all-vars.sh | 2 +- taskcluster/tc-package.sh | 20 ++-- tests/test_importers.py | 2 +- tests/test_value_range.py | 2 +- DeepSpeech.py => train.py | 2 +- .../GRAPH_VERSION | 0 .../VERSION | 0 .../__init__.py | 0 .../evaluate.py | 0 .../train.py | 0 .../util/__init__.py | 0 .../util/audio.py | 0 .../util/augmentations.py | 0 .../util/check_characters.py | 0 .../util/checkpoints.py | 0 .../util/config.py | 4 +- .../util/downloader.py | 0 .../util/evaluate_tools.py | 0 .../util/feeding.py | 0 .../util/flags.py | 14 +-- .../util/gpu.py | 0 .../util/helpers.py | 4 +- .../util/importers.py | 0 .../util/io.py | 0 .../util/logging.py | 0 .../util/sample_collections.py | 2 +- .../util/stm.py | 0 .../util/taskcluster.py | 0 .../util/text.py | 0 transcribe.py | 14 +-- util/taskcluster.py | 2 +- 144 files changed, 584 insertions(+), 594 deletions(-) delete mode 100644 RELEASE.rst rename doc/{DeepSpeech.rst => Architecture.rst} (99%) rename native_client/{deepspeech.h => coqui-stt.h} (93%) rename native_client/kenlm/{README.mozilla => README.coqui} (100%) rename DeepSpeech.py => train.py (83%) rename training/{deepspeech_training => coqui_stt_training}/GRAPH_VERSION (100%) rename training/{deepspeech_training => coqui_stt_training}/VERSION (100%) rename training/{deepspeech_training => coqui_stt_training}/__init__.py (100%) rename training/{deepspeech_training => coqui_stt_training}/evaluate.py (100%) rename training/{deepspeech_training => coqui_stt_training}/train.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/__init__.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/audio.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/augmentations.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/check_characters.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/checkpoints.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/config.py (98%) rename training/{deepspeech_training => coqui_stt_training}/util/downloader.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/evaluate_tools.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/feeding.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/flags.py (94%) rename training/{deepspeech_training => coqui_stt_training}/util/gpu.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/helpers.py (97%) rename training/{deepspeech_training => coqui_stt_training}/util/importers.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/io.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/logging.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/sample_collections.py (99%) rename training/{deepspeech_training => coqui_stt_training}/util/stm.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/taskcluster.py (100%) rename training/{deepspeech_training => coqui_stt_training}/util/text.py (100%) diff --git a/BIBLIOGRAPHY.md b/BIBLIOGRAPHY.md index 1d392a66..f675f38b 100644 --- a/BIBLIOGRAPHY.md +++ b/BIBLIOGRAPHY.md @@ -1,5 +1,4 @@ -This file contains a list of papers in chronological order that have been published -using DeepSpeech. +This file contains a list of papers in chronological order that have been published using 🐸STT. To appear ========== diff --git a/CODE_OWNERS.rst b/CODE_OWNERS.rst index 0ae6659a..92150211 100644 --- a/CODE_OWNERS.rst +++ b/CODE_OWNERS.rst @@ -1,7 +1,7 @@ -DeepSpeech code owners / governance system -========================================== +Coqui STT code owners / governance system +========================================= -DeepSpeech is run under a governance system inspired (and partially copied from) by the `Mozilla module ownership system `_. The project is roughly divided into modules, and each module has its own owners, which are responsible for reviewing pull requests and deciding on technical direction for their modules. Module ownership authority is given to people who have worked extensively on areas of the project. +🐸STT is run under a governance system inspired (and partially copied from) by the `Mozilla module ownership system `_. The project is roughly divided into modules, and each module has its own owners, which are responsible for reviewing pull requests and deciding on technical direction for their modules. Module ownership authority is given to people who have worked extensively on areas of the project. Module owners also have the authority of naming other module owners or appointing module peers, which are people with authority to review pull requests in that module. They can also sub-divide their module into sub-modules with their own owners. @@ -46,7 +46,7 @@ Testing & CI Native inference client ----------------------- -Everything that goes into libdeepspeech.so and is not specifically covered in another area fits here. +Everything that goes into libstt.so and is not specifically covered in another area fits here. - Alexandre Lissy (@lissyx) - Reuben Morais (@reuben) @@ -110,7 +110,7 @@ Documentation - Alexandre Lissy (@lissyx) - Reuben Morais (@reuben) -Third party bindings --------------------- - -Hosted externally and owned by the individual authors. See the `list of third-party bindings `_ for more info. +.. Third party bindings + -------------------- + + Hosted externally and owned by the individual authors. See the `list of third-party bindings `_ for more info. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index c7970a34..f12967da 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,14 +1,14 @@ Contribution guidelines ======================= -Welcome to the DeepSpeech project! We are excited to see your interest, and appreciate your support! +Welcome to the 🐸STT project! We are excited to see your interest, and appreciate your support! This repository is governed by Mozilla's code of conduct and etiquette guidelines. For more details, please read the `Mozilla Community Participation Guidelines `_. How to Make a Good Pull Request ------------------------------- -Here's some guidelines on how to make a good PR to DeepSpeech. +Here's some guidelines on how to make a good PR to 🐸STT. Bug-fix PR ^^^^^^^^^^ @@ -18,20 +18,20 @@ You've found a bug and you were able to squash it! Great job! Please write a sho Documentation PR ^^^^^^^^^^^^^^^^ -If you're just making updates or changes to the documentation, there's no need to run all of DeepSpeech's tests for Continuous Integration (i.e. Taskcluster tests). In this case, at the end of your short but clear commit message, you should add **X-DeepSpeech: NOBUILD**. This will trigger the CI tests to skip your PR, saving both time and compute. +If you're just making updates or changes to the documentation, there's no need to run all of 🐸STT's tests for Continuous Integration (i.e. Taskcluster tests). In this case, at the end of your short but clear commit message, you should add **X-DeepSpeech: NOBUILD**. This will trigger the CI tests to skip your PR, saving both time and compute. New Feature PR ^^^^^^^^^^^^^^ -You've made some core changes to DeepSpeech, and you would like to share them back with the community -- great! First things first: if you're planning to add a feature (not just fix a bug or docs) let the DeepSpeech team know ahead of time and get some feedback early. A quick check-in with the team can save time during code-review, and also ensure that your new feature fits into the project. +You've made some core changes to 🐸STT, and you would like to share them back with the community -- great! First things first: if you're planning to add a feature (not just fix a bug or docs) let the 🐸STT team know ahead of time and get some feedback early. A quick check-in with the team can save time during code-review, and also ensure that your new feature fits into the project. -The DeepSpeech codebase is made of many connected parts. There is Python code for training DeepSpeech, core C++ code for running inference on trained models, and multiple language bindings to the C++ core so you can use DeepSpeech in your favorite language. +The 🐸STT codebase is made of many connected parts. There is Python code for training 🐸STT, core C++ code for running inference on trained models, and multiple language bindings to the C++ core so you can use 🐸STT in your favorite language. -Whenever you add a new feature to DeepSpeech and what to contribute that feature back to the project, here are some things to keep in mind: +Whenever you add a new feature to 🐸STT and what to contribute that feature back to the project, here are some things to keep in mind: -1. You've made changes to the core C++ code. Core changes can have downstream effects on all parts of the DeepSpeech project, so keep that in mind. You should minimally also make necessary changes to the C client (i.e. **args.h** and **client.cc**). The bindings for Python, Java, and Javascript are SWIG generated, and in the best-case scenario you won't have to worry about them. However, if you've added a whole new feature, you may need to make custom tweaks to those bindings, because SWIG may not automagically work with your new feature, especially if you've exposed new arguments. The bindings for .NET and Swift are not generated automatically. It would be best if you also made the necessary manual changes to these bindings as well. It is best to communicate with the core DeepSpeech team and come to an understanding of where you will likely need to work with the bindings. They can't predict all the bugs you will run into, but they will have a good idea of how to plan for some obvious challenges. +1. You've made changes to the core C++ code. Core changes can have downstream effects on all parts of the 🐸STT project, so keep that in mind. You should minimally also make necessary changes to the C client (i.e. **args.h** and **client.cc**). The bindings for Python, Java, and Javascript are SWIG generated, and in the best-case scenario you won't have to worry about them. However, if you've added a whole new feature, you may need to make custom tweaks to those bindings, because SWIG may not automagically work with your new feature, especially if you've exposed new arguments. The bindings for .NET and Swift are not generated automatically. It would be best if you also made the necessary manual changes to these bindings as well. It is best to communicate with the core 🐸STT team and come to an understanding of where you will likely need to work with the bindings. They can't predict all the bugs you will run into, but they will have a good idea of how to plan for some obvious challenges. 2. You've made changes to the Python code. Make sure you run a linter (described below). -3. Make sure your new feature doesn't regress the project. If you've added a significant feature or amount of code, you want to be sure your new feature doesn't create performance issues. For example, if you've made a change to the DeepSpeech decoder, you should know that inference performance doesn't drop in terms of latency, accuracy, or memory usage. Unless you're proposing a new decoding algorithm, you probably don't have to worry about affecting accuracy. However, it's very possible you've affected latency or memory usage. You should run local performance tests to make sure no bugs have crept in. There are lots of tools to check latency and memory usage, and you should use what is most comfortable for you and gets the job done. If you're on Linux, you might find [[perf](https://perf.wiki.kernel.org/index.php/Main_Page)] to be a useful tool. You can use sample WAV files for testing which are provided in the `DeepSpeech/data/` directory. +3. Make sure your new feature doesn't regress the project. If you've added a significant feature or amount of code, you want to be sure your new feature doesn't create performance issues. For example, if you've made a change to the 🐸STT decoder, you should know that inference performance doesn't drop in terms of latency, accuracy, or memory usage. Unless you're proposing a new decoding algorithm, you probably don't have to worry about affecting accuracy. However, it's very possible you've affected latency or memory usage. You should run local performance tests to make sure no bugs have crept in. There are lots of tools to check latency and memory usage, and you should use what is most comfortable for you and gets the job done. If you're on Linux, you might find [[perf](https://perf.wiki.kernel.org/index.php/Main_Page)] to be a useful tool. You can use sample WAV files for testing which are provided in the `STT/data/` directory. Requesting review on your PR ---------------------------- diff --git a/Dockerfile.build.tmpl b/Dockerfile.build.tmpl index e6648102..cb88d80c 100644 --- a/Dockerfile.build.tmpl +++ b/Dockerfile.build.tmpl @@ -3,8 +3,8 @@ # Need devel version cause we need /usr/include/cudnn.h FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04 -ENV DEEPSPEECH_REPO=#DEEPSPEECH_REPO# -ENV DEEPSPEECH_SHA=#DEEPSPEECH_SHA# +ENV STT_REPO=#STT_REPO# +ENV STT_SHA=#STT_SHA# # >> START Install base software @@ -113,15 +113,15 @@ RUN echo "build --spawn_strategy=standalone --genrule_strategy=standalone" \ WORKDIR / -RUN git clone --recursive $DEEPSPEECH_REPO DeepSpeech -WORKDIR /DeepSpeech -RUN git checkout $DEEPSPEECH_SHA +RUN git clone --recursive $STT_REPO STT +WORKDIR /STT +RUN git checkout $STT_SHA RUN git submodule sync tensorflow/ RUN git submodule update --init tensorflow/ # >> START Build and bind -WORKDIR /DeepSpeech/tensorflow +WORKDIR /STT/tensorflow # Fix for not found script https://github.com/tensorflow/tensorflow/issues/471 RUN ./configure @@ -132,7 +132,7 @@ RUN ./configure # passing LD_LIBRARY_PATH is required cause Bazel doesn't pickup it from environment -# Build DeepSpeech +# Build STT RUN bazel build \ --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" \ --config=monolithic \ @@ -149,22 +149,22 @@ RUN bazel build \ --copt=-msse4.2 \ --copt=-mavx \ --copt=-fvisibility=hidden \ - //native_client:libdeepspeech.so \ + //native_client:libstt.so \ --verbose_failures \ --action_env=LD_LIBRARY_PATH=${LD_LIBRARY_PATH} -# Copy built libs to /DeepSpeech/native_client -RUN cp bazel-bin/native_client/libdeepspeech.so /DeepSpeech/native_client/ +# Copy built libs to /STT/native_client +RUN cp bazel-bin/native_client/libstt.so /STT/native_client/ # Build client.cc and install Python client and decoder bindings -ENV TFDIR /DeepSpeech/tensorflow +ENV TFDIR /STT/tensorflow RUN nproc -WORKDIR /DeepSpeech/native_client -RUN make NUM_PROCESSES=$(nproc) deepspeech +WORKDIR /STT/native_client +RUN make NUM_PROCESSES=$(nproc) stt -WORKDIR /DeepSpeech +WORKDIR /STT RUN cd native_client/python && make NUM_PROCESSES=$(nproc) bindings RUN pip3 install --upgrade native_client/python/dist/*.whl @@ -176,8 +176,8 @@ RUN pip3 install --upgrade native_client/ctcdecode/dist/*.whl # Allow Python printing utf-8 ENV PYTHONIOENCODING UTF-8 -# Build KenLM in /DeepSpeech/native_client/kenlm folder -WORKDIR /DeepSpeech/native_client +# Build KenLM in /STT/native_client/kenlm folder +WORKDIR /STT/native_client RUN rm -rf kenlm && \ git clone https://github.com/kpu/kenlm && \ cd kenlm && \ @@ -188,4 +188,4 @@ RUN rm -rf kenlm && \ make -j $(nproc) # Done -WORKDIR /DeepSpeech +WORKDIR /STT diff --git a/Dockerfile.train.tmpl b/Dockerfile.train.tmpl index 9baa76d9..d306dcaf 100644 --- a/Dockerfile.train.tmpl +++ b/Dockerfile.train.tmpl @@ -3,8 +3,8 @@ FROM tensorflow/tensorflow:1.15.4-gpu-py3 ENV DEBIAN_FRONTEND=noninteractive -ENV DEEPSPEECH_REPO=#DEEPSPEECH_REPO# -ENV DEEPSPEECH_SHA=#DEEPSPEECH_SHA# +ENV STT_REPO=#STT_REPO# +ENV STT_SHA=#STT_SHA# RUN apt-get update && apt-get install -y --no-install-recommends \ apt-utils \ @@ -20,7 +20,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ unzip \ wget -# We need to remove it because it's breaking deepspeech install later with +# We need to remove it because it's breaking STT install later with # weird errors about setuptools RUN apt-get purge -y python3-xdg @@ -31,10 +31,10 @@ RUN apt-get install -y --no-install-recommends libopus0 libsndfile1 RUN rm -rf /var/lib/apt/lists/* WORKDIR / -RUN git clone $DEEPSPEECH_REPO DeepSpeech +RUN git clone $STT_REPO STT -WORKDIR /DeepSpeech -RUN git checkout $DEEPSPEECH_SHA +WORKDIR /STT +RUN git checkout $STT_SHA # Build CTC decoder first, to avoid clashes on incompatible versions upgrades RUN cd native_client/ctcdecode && make NUM_PROCESSES=$(nproc) bindings @@ -43,7 +43,7 @@ RUN pip3 install --upgrade native_client/ctcdecode/dist/*.whl # Prepare deps RUN pip3 install --upgrade pip==20.2.2 wheel==0.34.2 setuptools==49.6.0 -# Install DeepSpeech +# Install STT # - No need for the decoder since we did it earlier # - There is already correct TensorFlow GPU installed on the base image, # we don't want to break that @@ -54,7 +54,7 @@ RUN python3 util/taskcluster.py --source tensorflow --branch r1.15 \ --artifact convert_graphdef_memmapped_format --target . # Build KenLM to generate new scorers -WORKDIR /DeepSpeech/native_client +WORKDIR /STT/native_client RUN rm -rf kenlm && \ git clone https://github.com/kpu/kenlm && \ cd kenlm && \ @@ -63,6 +63,6 @@ RUN rm -rf kenlm && \ cd build && \ cmake .. && \ make -j $(nproc) -WORKDIR /DeepSpeech +WORKDIR /STT RUN ./bin/run-ldc93s1.sh diff --git a/GRAPH_VERSION b/GRAPH_VERSION index b9a65815..06f18ad2 120000 --- a/GRAPH_VERSION +++ b/GRAPH_VERSION @@ -1 +1 @@ -training/deepspeech_training/GRAPH_VERSION \ No newline at end of file +training/coqui_stt_training/GRAPH_VERSION \ No newline at end of file diff --git a/Makefile b/Makefile index 2d28d24b..6953c437 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ -DEEPSPEECH_REPO ?= https://github.com/mozilla/DeepSpeech.git -DEEPSPEECH_SHA ?= origin/master +STT_REPO ?= https://github.com/coqui-ai/STT.git +STT_SHA ?= origin/main Dockerfile%: Dockerfile%.tmpl sed \ - -e "s|#DEEPSPEECH_REPO#|$(DEEPSPEECH_REPO)|g" \ - -e "s|#DEEPSPEECH_SHA#|$(DEEPSPEECH_SHA)|g" \ + -e "s|#STT_REPO#|$(STT_REPO)|g" \ + -e "s|#STT_SHA#|$(STT_SHA)|g" \ < $< > $@ diff --git a/RELEASE.rst b/RELEASE.rst deleted file mode 100644 index 4e9143c0..00000000 --- a/RELEASE.rst +++ /dev/null @@ -1,12 +0,0 @@ - -Making a (new) release of the codebase -====================================== - - -* Update version in VERSION file, commit -* Open PR, ensure all tests are passing properly -* Merge the PR -* Fetch the new master, tag it with (hopefully) the same version as in VERSION -* Push that to Github -* New build should be triggered and new packages should be made -* TaskCluster should schedule a merge build **including** a "DeepSpeech Packages" task diff --git a/SUPPORT.rst b/SUPPORT.rst index d72a7418..ad8e64d5 100644 --- a/SUPPORT.rst +++ b/SUPPORT.rst @@ -5,8 +5,8 @@ Contact/Getting Help There are several ways to contact us or to get help: -#. `Discourse Forums `_ - The `Deep Speech category on Discourse `_ is the first place to look. Search for keywords related to your question or problem to see if someone else has run into it already. If you can't find anything relevant there, search on our `issue tracker `_ to see if there is an existing issue about your problem. +#. `GitHub Discussions `_ - `GitHub Discussions `_ is the first place to look. Search for keywords related to your question or problem to see if someone else has run into it already. If you can't find anything relevant there, search on our `issue tracker `_ to see if there is an existing issue about your problem. -#. `Matrix chat `_ - If your question is not addressed by either the `FAQ `_ or `Discourse Forums `_\ , you can contact us on the ``#machinelearning`` channel on `Mozilla Matrix `_\ ; people there can try to answer/help +#. `Matrix chat `_ - If your question is not addressed on `GitHub Discussions `_\ , you can contact us on the ``#stt:matrix.org`` `channel on Matrix `_. -#. `Create a new issue `_ - Finally, if you have a bug report or a feature request that isn't already covered by an existing issue, please open an issue in our repo and fill the appropriate information on your hardware and software setup. +#. `Create a new issue `_ - Finally, if you have a bug report or a feature request that isn't already covered by an existing issue, please open an issue in our repo and fill the appropriate information on your hardware and software setup. diff --git a/VERSION b/VERSION index 8a3ed242..9b8b7c93 120000 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -training/deepspeech_training/VERSION \ No newline at end of file +training/coqui_stt_training/VERSION \ No newline at end of file diff --git a/bin/compare_samples.py b/bin/compare_samples.py index 19a60575..3bef72ca 100755 --- a/bin/compare_samples.py +++ b/bin/compare_samples.py @@ -6,8 +6,8 @@ import sys import argparse import numpy as np -from deepspeech_training.util.audio import AUDIO_TYPE_NP, mean_dbfs -from deepspeech_training.util.sample_collections import load_sample +from coqui_stt_training.util.audio import AUDIO_TYPE_NP, mean_dbfs +from coqui_stt_training.util.sample_collections import load_sample def fail(message): diff --git a/bin/data_set_tool.py b/bin/data_set_tool.py index 604684b9..521dda21 100755 --- a/bin/data_set_tool.py +++ b/bin/data_set_tool.py @@ -8,20 +8,20 @@ import argparse import progressbar from pathlib import Path -from deepspeech_training.util.audio import ( +from coqui_stt_training.util.audio import ( AUDIO_TYPE_PCM, AUDIO_TYPE_OPUS, AUDIO_TYPE_WAV, change_audio_types, ) -from deepspeech_training.util.downloader import SIMPLE_BAR -from deepspeech_training.util.sample_collections import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR +from coqui_stt_training.util.sample_collections import ( CSVWriter, DirectSDBWriter, TarWriter, samples_from_sources, ) -from deepspeech_training.util.augmentations import ( +from coqui_stt_training.util.augmentations import ( parse_augmentations, apply_sample_augmentations, SampleAugmentation diff --git a/bin/import_aidatatang.py b/bin/import_aidatatang.py index c53eba09..8eac7de6 100755 --- a/bin/import_aidatatang.py +++ b/bin/import_aidatatang.py @@ -5,7 +5,7 @@ import tarfile import pandas -from deepspeech_training.util.importers import get_importers_parser +from coqui_stt_training.util.importers import get_importers_parser COLUMN_NAMES = ["wav_filename", "wav_filesize", "transcript"] diff --git a/bin/import_aishell.py b/bin/import_aishell.py index 341d0d88..3ca71f02 100755 --- a/bin/import_aishell.py +++ b/bin/import_aishell.py @@ -5,7 +5,7 @@ import tarfile import pandas -from deepspeech_training.util.importers import get_importers_parser +from coqui_stt_training.util.importers import get_importers_parser COLUMNNAMES = ["wav_filename", "wav_filesize", "transcript"] diff --git a/bin/import_ccpmf.py b/bin/import_ccpmf.py index 0895b144..0d670a43 100755 --- a/bin/import_ccpmf.py +++ b/bin/import_ccpmf.py @@ -30,9 +30,9 @@ except ImportError as ex: import requests import json -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.helpers import secs_to_hours -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.helpers import secs_to_hours +from coqui_stt_training.util.importers import ( get_counter, get_importers_parser, get_imported_samples, diff --git a/bin/import_cv.py b/bin/import_cv.py index 392a1301..a59c9a25 100755 --- a/bin/import_cv.py +++ b/bin/import_cv.py @@ -10,13 +10,13 @@ from multiprocessing import Pool import progressbar import sox -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, print_import_report, ) -from deepspeech_training.util.importers import validate_label_eng as validate_label +from coqui_stt_training.util.importers import validate_label_eng as validate_label FIELDNAMES = ["wav_filename", "wav_filesize", "transcript"] SAMPLE_RATE = 16000 @@ -35,7 +35,7 @@ def _download_and_preprocess_data(target_dir): archive_path = maybe_download(ARCHIVE_NAME, target_dir, ARCHIVE_URL) # Conditionally extract common voice data _maybe_extract(target_dir, ARCHIVE_DIR_NAME, archive_path) - # Conditionally convert common voice CSV files and mp3 data to DeepSpeech CSVs and wav + # Conditionally convert common voice CSV files and mp3 data to Coqui STT CSVs and wav _maybe_convert_sets(target_dir, ARCHIVE_DIR_NAME) diff --git a/bin/import_cv2.py b/bin/import_cv2.py index 19a5741c..fcc3635a 100755 --- a/bin/import_cv2.py +++ b/bin/import_cv2.py @@ -3,7 +3,7 @@ Broadly speaking, this script takes the audio downloaded from Common Voice for a certain language, in addition to the *.tsv files output by CorporaCreator, and the script formats the data and transcripts to be in a state usable by -DeepSpeech.py +train.py Use "python3 import_cv2.py -h" for help """ import csv @@ -15,8 +15,8 @@ from multiprocessing import Pool import progressbar import sox -from deepspeech_training.util.downloader import SIMPLE_BAR -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR +from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, get_importers_parser, @@ -138,9 +138,9 @@ def _maybe_convert_set(dataset, tsv_dir, audio_dir, filter_obj, space_after_ever print_import_report(counter, SAMPLE_RATE, MAX_SECS) output_csv = os.path.join(os.path.abspath(audio_dir), dataset + ".csv") - print("Saving new DeepSpeech-formatted CSV file to: ", output_csv) + print("Saving new Coqui STT-formatted CSV file to: ", output_csv) with open(output_csv, "w", encoding="utf-8", newline="") as output_csv_file: - print("Writing CSV file for DeepSpeech.py as: ", output_csv) + print("Writing CSV file for train.py as: ", output_csv) writer = csv.DictWriter(output_csv_file, fieldnames=FIELDNAMES) writer.writeheader() bar = progressbar.ProgressBar(max_value=len(rows), widgets=SIMPLE_BAR) diff --git a/bin/import_fisher.py b/bin/import_fisher.py index 1b5a495c..9c6f8a7b 100755 --- a/bin/import_fisher.py +++ b/bin/import_fisher.py @@ -11,7 +11,7 @@ import librosa import pandas import soundfile # <= Has an external dependency on libsndfile -from deepspeech_training.util.importers import validate_label_eng as validate_label +from coqui_stt_training.util.importers import validate_label_eng as validate_label # Prerequisite: Having the sph2pipe tool in your PATH: # https://www.ldc.upenn.edu/language-resources/tools/sphere-conversion-tools diff --git a/bin/import_freestmandarin.py b/bin/import_freestmandarin.py index 55ce9128..f1838d91 100755 --- a/bin/import_freestmandarin.py +++ b/bin/import_freestmandarin.py @@ -6,7 +6,7 @@ import tarfile import numpy as np import pandas -from deepspeech_training.util.importers import get_importers_parser +from coqui_stt_training.util.importers import get_importers_parser COLUMN_NAMES = ["wav_filename", "wav_filesize", "transcript"] diff --git a/bin/import_gram_vaani.py b/bin/import_gram_vaani.py index 71fcee08..80bf0241 100755 --- a/bin/import_gram_vaani.py +++ b/bin/import_gram_vaani.py @@ -12,7 +12,7 @@ import pandas as pd from sox import Transformer import swifter -from deepspeech_training.util.importers import get_importers_parser, get_validate_label +from coqui_stt_training.util.importers import get_importers_parser, get_validate_label __version__ = "0.1.0" _logger = logging.getLogger(__name__) diff --git a/bin/import_ldc93s1.py b/bin/import_ldc93s1.py index 86a00d74..85088b93 100755 --- a/bin/import_ldc93s1.py +++ b/bin/import_ldc93s1.py @@ -4,7 +4,7 @@ import sys import pandas -from deepspeech_training.util.downloader import maybe_download +from coqui_stt_training.util.downloader import maybe_download def _download_and_preprocess_data(data_dir): diff --git a/bin/import_librivox.py b/bin/import_librivox.py index 32c1d20a..491488fa 100755 --- a/bin/import_librivox.py +++ b/bin/import_librivox.py @@ -12,7 +12,7 @@ import progressbar from sox import Transformer from tensorflow.python.platform import gfile -from deepspeech_training.util.downloader import maybe_download +from coqui_stt_training.util.downloader import maybe_download SAMPLE_RATE = 16000 diff --git a/bin/import_lingua_libre.py b/bin/import_lingua_libre.py index 956d7a0b..1c8f31ae 100755 --- a/bin/import_lingua_libre.py +++ b/bin/import_lingua_libre.py @@ -12,8 +12,8 @@ from multiprocessing import Pool import progressbar import sox -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, get_importers_parser, diff --git a/bin/import_m-ailabs.py b/bin/import_m-ailabs.py index bbaa744b..0e655612 100755 --- a/bin/import_m-ailabs.py +++ b/bin/import_m-ailabs.py @@ -10,8 +10,8 @@ from multiprocessing import Pool import progressbar -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, get_importers_parser, diff --git a/bin/import_magicdata.py b/bin/import_magicdata.py index c8502784..8b289804 100755 --- a/bin/import_magicdata.py +++ b/bin/import_magicdata.py @@ -6,7 +6,7 @@ import wave import pandas -from deepspeech_training.util.importers import get_importers_parser +from coqui_stt_training.util.importers import get_importers_parser COLUMN_NAMES = ["wav_filename", "wav_filesize", "transcript"] diff --git a/bin/import_primewords.py b/bin/import_primewords.py index 08f3302a..4643bd39 100755 --- a/bin/import_primewords.py +++ b/bin/import_primewords.py @@ -7,7 +7,7 @@ import tarfile import numpy as np import pandas -from deepspeech_training.util.importers import get_importers_parser +from coqui_stt_training.util.importers import get_importers_parser COLUMN_NAMES = ["wav_filename", "wav_filesize", "transcript"] diff --git a/bin/import_slr57.py b/bin/import_slr57.py index 57588696..94869c21 100755 --- a/bin/import_slr57.py +++ b/bin/import_slr57.py @@ -9,8 +9,8 @@ from multiprocessing import Pool import progressbar -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, get_importers_parser, diff --git a/bin/import_swb.py b/bin/import_swb.py index c55ce298..b192d9f8 100755 --- a/bin/import_swb.py +++ b/bin/import_swb.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # ensure that you have downloaded the LDC dataset LDC97S62 and tar exists in a folder e.g. # ./data/swb/swb1_LDC97S62.tgz -# from the deepspeech directory run with: ./bin/import_swb.py ./data/swb/ +# from the Coqui STT directory run with: ./bin/import_swb.py ./data/swb/ import codecs import fnmatch import os @@ -17,7 +17,7 @@ import pandas import requests import soundfile # <= Has an external dependency on libsndfile -from deepspeech_training.util.importers import validate_label_eng as validate_label +from coqui_stt_training.util.importers import validate_label_eng as validate_label # ARCHIVE_NAME refers to ISIP alignments from 01/29/03 ARCHIVE_NAME = "switchboard_word_alignments.tar.gz" diff --git a/bin/import_swc.py b/bin/import_swc.py index 3775de05..d660b300 100755 --- a/bin/import_swc.py +++ b/bin/import_swc.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Downloads and prepares (parts of) the "Spoken Wikipedia Corpora" for DeepSpeech.py +Downloads and prepares (parts of) the "Spoken Wikipedia Corpora" for train.py Use "python3 import_swc.py -h" for help """ @@ -22,8 +22,8 @@ from multiprocessing.pool import ThreadPool import progressbar import sox -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import validate_label_eng as validate_label +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import validate_label_eng as validate_label from ds_ctcdecoder import Alphabet SWC_URL = "https://www2.informatik.uni-hamburg.de/nats/pub/SWC/SWC_{language}.tar" diff --git a/bin/import_ted.py b/bin/import_ted.py index bad1452f..f88a248f 100755 --- a/bin/import_ted.py +++ b/bin/import_ted.py @@ -10,8 +10,8 @@ import pandas from sox import Transformer from tensorflow.python.platform import gfile -from deepspeech_training.util.downloader import maybe_download -from deepspeech_training.util.stm import parse_stm_file +from coqui_stt_training.util.downloader import maybe_download +from coqui_stt_training.util.stm import parse_stm_file def _download_and_preprocess_data(data_dir): diff --git a/bin/import_ts.py b/bin/import_ts.py index e0130130..0ce3fdf2 100755 --- a/bin/import_ts.py +++ b/bin/import_ts.py @@ -10,8 +10,8 @@ import progressbar import sox import unidecode -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, get_importers_parser, @@ -25,7 +25,7 @@ MAX_SECS = 15 ARCHIVE_NAME = "2019-04-11_fr_FR" ARCHIVE_DIR_NAME = "ts_" + ARCHIVE_NAME ARCHIVE_URL = ( - "https://deepspeech-storage-mirror.s3.fr-par.scw.cloud/" + ARCHIVE_NAME + ".zip" + "https://Coqui STT-storage-mirror.s3.fr-par.scw.cloud/" + ARCHIVE_NAME + ".zip" ) @@ -38,7 +38,7 @@ def _download_and_preprocess_data(target_dir, english_compatible=False): ) # Conditionally extract archive data _maybe_extract(target_dir, ARCHIVE_DIR_NAME, archive_path) - # Conditionally convert TrainingSpeech data to DeepSpeech CSVs and wav + # Conditionally convert TrainingSpeech data to Coqui STT CSVs and wav _maybe_convert_sets( target_dir, ARCHIVE_DIR_NAME, english_compatible=english_compatible ) diff --git a/bin/import_tuda.py b/bin/import_tuda.py index da0cb42b..16f4dcc8 100755 --- a/bin/import_tuda.py +++ b/bin/import_tuda.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Downloads and prepares (parts of) the "German Distant Speech" corpus (TUDA) for DeepSpeech.py +Downloads and prepares (parts of) the "German Distant Speech" corpus (TUDA) for train.py Use "python3 import_tuda.py -h" for help """ import argparse @@ -14,8 +14,8 @@ from collections import Counter import progressbar -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import validate_label_eng as validate_label +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import validate_label_eng as validate_label from ds_ctcdecoder import Alphabet TUDA_VERSION = "v2" diff --git a/bin/import_vctk.py b/bin/import_vctk.py index f9c86799..b2b85b6d 100755 --- a/bin/import_vctk.py +++ b/bin/import_vctk.py @@ -11,8 +11,8 @@ from zipfile import ZipFile import librosa import progressbar -from deepspeech_training.util.downloader import SIMPLE_BAR, maybe_download -from deepspeech_training.util.importers import ( +from coqui_stt_training.util.downloader import SIMPLE_BAR, maybe_download +from coqui_stt_training.util.importers import ( get_counter, get_imported_samples, print_import_report, @@ -35,7 +35,7 @@ def _download_and_preprocess_data(target_dir): archive_path = maybe_download(ARCHIVE_NAME, target_dir, ARCHIVE_URL) # Conditionally extract common voice data _maybe_extract(target_dir, ARCHIVE_DIR_NAME, archive_path) - # Conditionally convert common voice CSV files and mp3 data to DeepSpeech CSVs and wav + # Conditionally convert common voice CSV files and mp3 data to Coqui STT CSVs and wav _maybe_convert_sets(target_dir, ARCHIVE_DIR_NAME) diff --git a/bin/import_voxforge.py b/bin/import_voxforge.py index cae5f744..b01dca72 100755 --- a/bin/import_voxforge.py +++ b/bin/import_voxforge.py @@ -14,7 +14,7 @@ from os import makedirs, path import pandas from bs4 import BeautifulSoup from tensorflow.python.platform import gfile -from deepspeech_training.util.downloader import maybe_download +from coqui_stt_training.util.downloader import maybe_download """The number of jobs to run in parallel""" NUM_PARALLEL = 8 diff --git a/bin/play.py b/bin/play.py index 60383344..59433e18 100755 --- a/bin/play.py +++ b/bin/play.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Tool for playing (and augmenting) single samples or samples from Sample Databases (SDB files) and DeepSpeech CSV files +Tool for playing (and augmenting) single samples or samples from Sample Databases (SDB files) and 🐸STT CSV files Use "python3 play.py -h" for help """ @@ -9,9 +9,9 @@ import sys import random import argparse -from deepspeech_training.util.audio import get_loadable_audio_type_from_extension, AUDIO_TYPE_PCM, AUDIO_TYPE_WAV -from deepspeech_training.util.sample_collections import SampleList, LabeledSample, samples_from_source -from deepspeech_training.util.augmentations import parse_augmentations, apply_sample_augmentations, SampleAugmentation +from coqui_stt_training.util.audio import get_loadable_audio_type_from_extension, AUDIO_TYPE_PCM, AUDIO_TYPE_WAV +from coqui_stt_training.util.sample_collections import SampleList, LabeledSample, samples_from_source +from coqui_stt_training.util.augmentations import parse_augmentations, apply_sample_augmentations, SampleAugmentation def get_samples_in_play_order(): @@ -68,7 +68,7 @@ def play_collection(): def handle_args(): parser = argparse.ArgumentParser( description="Tool for playing (and augmenting) single samples or samples from Sample Databases (SDB files) " - "and DeepSpeech CSV files" + "and Coqui STT CSV files" ) parser.add_argument("source", help="Sample DB, CSV or WAV file to play samples from") parser.add_argument( diff --git a/bin/run-ldc93s1.sh b/bin/run-ldc93s1.sh index 4bee5c70..3f635da5 100755 --- a/bin/run-ldc93s1.sh +++ b/bin/run-ldc93s1.sh @@ -1,7 +1,7 @@ #!/bin/sh set -xe -if [ ! -f DeepSpeech.py ]; then - echo "Please make sure you run this from DeepSpeech's top level directory." +if [ ! -f train.py ]; then + echo "Please make sure you run this from STT's top level directory." exit 1 fi; @@ -20,7 +20,7 @@ fi # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar \ +python -u train.py --noshow_progressbar \ --train_files data/ldc93s1/ldc93s1.csv \ --test_files data/ldc93s1/ldc93s1.csv \ --train_batch_size 1 \ diff --git a/bin/run-tc-graph_augmentations.sh b/bin/run-tc-graph_augmentations.sh index 9b6181ae..c958ef2e 100755 --- a/bin/run-tc-graph_augmentations.sh +++ b/bin/run-tc-graph_augmentations.sh @@ -14,7 +14,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_csv} --train_batch_size 1 \ --scorer "" \ --augment dropout \ diff --git a/bin/run-tc-ldc93s1_checkpoint.sh b/bin/run-tc-ldc93s1_checkpoint.sh index 9dc4e84e..c499c5a8 100755 --- a/bin/run-tc-ldc93s1_checkpoint.sh +++ b/bin/run-tc-ldc93s1_checkpoint.sh @@ -14,7 +14,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_csv} --train_batch_size 1 \ --dev_files ${ldc93s1_csv} --dev_batch_size 1 \ --test_files ${ldc93s1_csv} --test_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_checkpoint_bytes.sh b/bin/run-tc-ldc93s1_checkpoint_bytes.sh index d6fe98e9..8af85a44 100755 --- a/bin/run-tc-ldc93s1_checkpoint_bytes.sh +++ b/bin/run-tc-ldc93s1_checkpoint_bytes.sh @@ -14,7 +14,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_csv} --train_batch_size 1 \ --dev_files ${ldc93s1_csv} --dev_batch_size 1 \ --test_files ${ldc93s1_csv} --test_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_checkpoint_sdb.sh b/bin/run-tc-ldc93s1_checkpoint_sdb.sh index c811f984..d3006f30 100755 --- a/bin/run-tc-ldc93s1_checkpoint_sdb.sh +++ b/bin/run-tc-ldc93s1_checkpoint_sdb.sh @@ -20,7 +20,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_sdb} --train_batch_size 1 \ --dev_files ${ldc93s1_sdb} --dev_batch_size 1 \ --test_files ${ldc93s1_sdb} --test_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_new.sh b/bin/run-tc-ldc93s1_new.sh index 8e9cf4d4..fb1d48ca 100755 --- a/bin/run-tc-ldc93s1_new.sh +++ b/bin/run-tc-ldc93s1_new.sh @@ -17,7 +17,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_csv} --train_batch_size 1 \ --feature_cache '/tmp/ldc93s1_cache' \ --dev_files ${ldc93s1_csv} --dev_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_new_bytes.sh b/bin/run-tc-ldc93s1_new_bytes.sh index 5ce787d3..2296ed1f 100755 --- a/bin/run-tc-ldc93s1_new_bytes.sh +++ b/bin/run-tc-ldc93s1_new_bytes.sh @@ -17,7 +17,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_csv} --train_batch_size 1 \ --feature_cache '/tmp/ldc93s1_cache' \ --dev_files ${ldc93s1_csv} --dev_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_new_bytes_tflite.sh b/bin/run-tc-ldc93s1_new_bytes_tflite.sh index f1a79f12..3cb8da59 100755 --- a/bin/run-tc-ldc93s1_new_bytes_tflite.sh +++ b/bin/run-tc-ldc93s1_new_bytes_tflite.sh @@ -16,7 +16,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar \ +python -u train.py --noshow_progressbar \ --n_hidden 100 \ --checkpoint_dir '/tmp/ckpt_bytes' \ --export_dir '/tmp/train_bytes_tflite' \ diff --git a/bin/run-tc-ldc93s1_new_metrics.sh b/bin/run-tc-ldc93s1_new_metrics.sh index 01403bf1..6077cb41 100755 --- a/bin/run-tc-ldc93s1_new_metrics.sh +++ b/bin/run-tc-ldc93s1_new_metrics.sh @@ -17,7 +17,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_csv} --train_batch_size 1 \ --dev_files ${ldc93s1_csv} --dev_batch_size 1 \ --test_files ${ldc93s1_csv} --test_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_new_sdb.sh b/bin/run-tc-ldc93s1_new_sdb.sh index 6cd4a450..47c8eefc 100755 --- a/bin/run-tc-ldc93s1_new_sdb.sh +++ b/bin/run-tc-ldc93s1_new_sdb.sh @@ -23,7 +23,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_sdb} --train_batch_size 1 \ --dev_files ${ldc93s1_sdb} --dev_batch_size 1 \ --test_files ${ldc93s1_sdb} --test_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_new_sdb_csv.sh b/bin/run-tc-ldc93s1_new_sdb_csv.sh index ec3e7774..3a1f61ef 100755 --- a/bin/run-tc-ldc93s1_new_sdb_csv.sh +++ b/bin/run-tc-ldc93s1_new_sdb_csv.sh @@ -23,7 +23,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_sdb},${ldc93s1_csv} --train_batch_size 1 \ --feature_cache '/tmp/ldc93s1_cache_sdb_csv' \ --dev_files ${ldc93s1_sdb},${ldc93s1_csv} --dev_batch_size 1 \ diff --git a/bin/run-tc-ldc93s1_singleshotinference.sh b/bin/run-tc-ldc93s1_singleshotinference.sh index 997bf08f..cf5e4abb 100755 --- a/bin/run-tc-ldc93s1_singleshotinference.sh +++ b/bin/run-tc-ldc93s1_singleshotinference.sh @@ -14,7 +14,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ +python -u train.py --noshow_progressbar --noearly_stop \ --train_files ${ldc93s1_csv} --train_batch_size 1 \ --dev_files ${ldc93s1_csv} --dev_batch_size 1 \ --test_files ${ldc93s1_csv} --test_batch_size 1 \ @@ -23,7 +23,7 @@ python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ --learning_rate 0.001 --dropout_rate 0.05 \ --scorer_path 'data/smoke_test/pruned_lm.scorer' -python -u DeepSpeech.py \ +python -u train.py \ --n_hidden 100 \ --checkpoint_dir '/tmp/ckpt' \ --scorer_path 'data/smoke_test/pruned_lm.scorer' \ diff --git a/bin/run-tc-ldc93s1_tflite.sh b/bin/run-tc-ldc93s1_tflite.sh index f7daca21..ca9fd976 100755 --- a/bin/run-tc-ldc93s1_tflite.sh +++ b/bin/run-tc-ldc93s1_tflite.sh @@ -16,7 +16,7 @@ fi; # and when trying to run on multiple devices (like GPUs), this will break export CUDA_VISIBLE_DEVICES=0 -python -u DeepSpeech.py --noshow_progressbar \ +python -u train.py --noshow_progressbar \ --n_hidden 100 \ --checkpoint_dir '/tmp/ckpt' \ --export_dir '/tmp/train_tflite' \ @@ -26,7 +26,7 @@ python -u DeepSpeech.py --noshow_progressbar \ mkdir /tmp/train_tflite/en-us -python -u DeepSpeech.py --noshow_progressbar \ +python -u train.py --noshow_progressbar \ --n_hidden 100 \ --checkpoint_dir '/tmp/ckpt' \ --export_dir '/tmp/train_tflite/en-us' \ diff --git a/bin/run-tc-transfer.sh b/bin/run-tc-transfer.sh index aae6d71a..4a0edeab 100755 --- a/bin/run-tc-transfer.sh +++ b/bin/run-tc-transfer.sh @@ -29,7 +29,7 @@ for LOAD in 'init' 'last' 'auto'; do echo "########################################################" echo "#### Train ENGLISH model with just --checkpoint_dir ####" echo "########################################################" - python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ + python -u train.py --noshow_progressbar --noearly_stop \ --alphabet_config_path "./data/alphabet.txt" \ --load_train "$LOAD" \ --train_files "${ldc93s1_csv}" --train_batch_size 1 \ @@ -43,7 +43,7 @@ for LOAD in 'init' 'last' 'auto'; do echo "##############################################################################" echo "#### Train ENGLISH model with --save_checkpoint_dir --load_checkpoint_dir ####" echo "##############################################################################" - python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ + python -u train.py --noshow_progressbar --noearly_stop \ --alphabet_config_path "./data/alphabet.txt" \ --load_train "$LOAD" \ --train_files "${ldc93s1_csv}" --train_batch_size 1 \ @@ -58,7 +58,7 @@ for LOAD in 'init' 'last' 'auto'; do echo "####################################################################################" echo "#### Transfer to RUSSIAN model with --save_checkpoint_dir --load_checkpoint_dir ####" echo "####################################################################################" - python -u DeepSpeech.py --noshow_progressbar --noearly_stop \ + python -u train.py --noshow_progressbar --noearly_stop \ --drop_source_layers 1 \ --alphabet_config_path "${ru_dir}/alphabet.ru" \ --load_train 'last' \ diff --git a/data/README.rst b/data/README.rst index f731a31c..289146c9 100644 --- a/data/README.rst +++ b/data/README.rst @@ -3,9 +3,9 @@ Language-Specific Data This directory contains language-specific data files. Most importantly, you will find here: -1. A list of unique characters for the target language (e.g. English) in ``data/alphabet.txt``. After installing the training code, you can check ``python -m deepspeech_training.util.check_characters --help`` for a tool that creates an alphabet file from a list of training CSV files. +1. A list of unique characters for the target language (e.g. English) in ``data/alphabet.txt``. After installing the training code, you can check ``python -m coqui_stt_training.util.check_characters --help`` for a tool that creates an alphabet file from a list of training CSV files. 2. A script used to generate a binary n-gram language model: ``data/lm/generate_lm.py``. -For more information on how to build these resources from scratch, see the ``External scorer scripts`` section on `deepspeech.readthedocs.io `_. +For more information on how to build these resources from scratch, see the ``External scorer scripts`` section on `stt.readthedocs.io `_. diff --git a/data/lm/generate_lm.py b/data/lm/generate_lm.py index 659d5077..47941437 100644 --- a/data/lm/generate_lm.py +++ b/data/lm/generate_lm.py @@ -130,7 +130,7 @@ def build_lm(args, data_lower, vocab_str): def main(): parser = argparse.ArgumentParser( - description="Generate lm.binary and top-k vocab for DeepSpeech." + description="Generate lm.binary and top-k vocab for Coqui STT." ) parser.add_argument( "--input_txt", diff --git a/doc/DeepSpeech.rst b/doc/Architecture.rst similarity index 99% rename from doc/DeepSpeech.rst rename to doc/Architecture.rst index 3d74d22e..a701a888 100644 --- a/doc/DeepSpeech.rst +++ b/doc/Architecture.rst @@ -1,5 +1,5 @@ -DeepSpeech Model -================ +STT Model +========= The aim of this project is to create a simple, open, and ubiquitous speech recognition engine. Simple, in that the engine should not require server-class diff --git a/doc/BUILDING.rst b/doc/BUILDING.rst index 56484205..fea38f40 100644 --- a/doc/BUILDING.rst +++ b/doc/BUILDING.rst @@ -1,12 +1,12 @@ .. _build-native-client: -Building DeepSpeech Binaries -============================ +Building Coqui STT Binaries +=========================== This section describes how to rebuild binaries. We have already several prebuilt binaries for all the supported platform, it is highly advised to use them except if you know what you are doing. -If you'd like to build the DeepSpeech binaries yourself, you'll need the following pre-requisites downloaded and installed: +If you'd like to build the 🐸STT binaries yourself, you'll need the following pre-requisites downloaded and installed: * `Bazel 3.1.0 `_ * `General TensorFlow r2.3 requirements `_ @@ -26,18 +26,18 @@ If you'd like to build the language bindings or the decoder package, you'll also Dependencies ------------ -If you follow these instructions, you should compile your own binaries of DeepSpeech (built on TensorFlow using Bazel). +If you follow these instructions, you should compile your own binaries of 🐸STT (built on TensorFlow using Bazel). For more information on configuring TensorFlow, read the docs up to the end of `"Configure the Build" `_. Checkout source code ^^^^^^^^^^^^^^^^^^^^ -Clone DeepSpeech source code (TensorFlow will come as a submdule): +Clone 🐸STT source code (TensorFlow will come as a submdule): .. code-block:: - git clone https://github.com/mozilla/DeepSpeech.git + git clone https://github.com/coqui-ai/STT.git git submodule sync tensorflow/ git submodule update --init tensorflow/ @@ -56,24 +56,24 @@ After you have installed the correct version of Bazel, configure TensorFlow: cd tensorflow ./configure -Compile DeepSpeech ------------------- +Compile Coqui STT +----------------- -Compile ``libdeepspeech.so`` +Compile ``libstt.so`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Within your TensorFlow directory, there should be a symbolic link to the DeepSpeech ``native_client`` directory. If it is not present, create it with the follow command: +Within your TensorFlow directory, there should be a symbolic link to the 🐸STT ``native_client`` directory. If it is not present, create it with the follow command: .. code-block:: cd tensorflow ln -s ../native_client -You can now use Bazel to build the main DeepSpeech library, ``libdeepspeech.so``. Add ``--config=cuda`` if you want a CUDA build. +You can now use Bazel to build the main 🐸STT library, ``libstt.so``. Add ``--config=cuda`` if you want a CUDA build. .. code-block:: - bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-fvisibility=hidden //native_client:libdeepspeech.so + bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-fvisibility=hidden //native_client:libstt.so The generated binaries will be saved to ``bazel-bin/native_client/``. @@ -82,24 +82,24 @@ The generated binaries will be saved to ``bazel-bin/native_client/``. Compile ``generate_scorer_package`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Following the same setup as for ``libdeepspeech.so`` above, you can rebuild the ``generate_scorer_package`` binary by adding its target to the command line: ``//native_client:generate_scorer_package``. +Following the same setup as for ``libstt.so`` above, you can rebuild the ``generate_scorer_package`` binary by adding its target to the command line: ``//native_client:generate_scorer_package``. Using the example from above you can build the library and that binary at the same time: .. code-block:: - bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-fvisibility=hidden //native_client:libdeepspeech.so //native_client:generate_scorer_package + bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic -c opt --copt=-O3 --copt="-D_GLIBCXX_USE_CXX11_ABI=0" --copt=-fvisibility=hidden //native_client:libstt.so //native_client:generate_scorer_package The generated binaries will be saved to ``bazel-bin/native_client/``. Compile Language Bindings ^^^^^^^^^^^^^^^^^^^^^^^^^ -Now, ``cd`` into the ``DeepSpeech/native_client`` directory and use the ``Makefile`` to build all the language bindings (C++ client, Python package, Nodejs package, etc.). +Now, ``cd`` into the ``STT/native_client`` directory and use the ``Makefile`` to build all the language bindings (C++ client, Python package, Nodejs package, etc.). .. code-block:: - cd ../DeepSpeech/native_client - make deepspeech + cd ../STT/native_client + make stt Installing your own Binaries ---------------------------- @@ -121,9 +121,9 @@ Included are a set of generated Python bindings. After following the above build cd native_client/python make bindings - pip install dist/deepspeech* + pip install dist/stt-* -The API mirrors the C++ API and is demonstrated in `client.py `_. Refer to `deepspeech.h `_ for documentation. +The API mirrors the C++ API and is demonstrated in `client.py `_. Refer to `coqui-stt.h `_ for documentation. Install NodeJS / ElectronJS bindings ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -136,7 +136,7 @@ After following the above build and installation instructions, the Node.JS bindi make build make npm-pack -This will create the package ``deepspeech-VERSION.tgz`` in ``native_client/javascript``. +This will create the package ``stt-VERSION.tgz`` in ``native_client/javascript``. Install the CTC decoder package ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -196,23 +196,23 @@ So your command line for ``RPi3`` and ``ARMv7`` should look like: .. code-block:: - bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=rpi3 --config=rpi3_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libdeepspeech.so + bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=rpi3 --config=rpi3_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libstt.so And your command line for ``LePotato`` and ``ARM64`` should look like: .. code-block:: - bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=rpi3-armv8 --config=rpi3-armv8_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libdeepspeech.so + bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=rpi3-armv8 --config=rpi3-armv8_opt -c opt --copt=-O3 --copt=-fvisibility=hidden //native_client:libstt.so While we test only on RPi3 Raspbian Buster and LePotato ARMBian Buster, anything compatible with ``armv7-a cortex-a53`` or ``armv8-a cortex-a53`` should be fine. -The ``deepspeech`` binary can also be cross-built, with ``TARGET=rpi3`` or ``TARGET=rpi3-armv8``. This might require you to setup a system tree using the tool ``multistrap`` and the multitrap configuration files: ``native_client/multistrap_armbian64_buster.conf`` and ``native_client/multistrap_raspbian_buster.conf``. +The ``stt`` binary can also be cross-built, with ``TARGET=rpi3`` or ``TARGET=rpi3-armv8``. This might require you to setup a system tree using the tool ``multistrap`` and the multitrap configuration files: ``native_client/multistrap_armbian64_buster.conf`` and ``native_client/multistrap_raspbian_buster.conf``. The path of the system tree can be overridden from the default values defined in ``definitions.mk`` through the ``RASPBIAN`` ``make`` variable. .. code-block:: - cd ../DeepSpeech/native_client - make TARGET= deepspeech + cd ../STT/native_client + make TARGET= stt Android devices support ----------------------- @@ -224,64 +224,66 @@ Please refer to TensorFlow documentation on how to setup the environment to buil Using the library from Android project ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We provide uptodate and tested ``libdeepspeech`` usable as an ``AAR`` package, -for Android versions starting with 7.0 to 11.0. The package is published on -`JCenter `_, -and the ``JCenter`` repository should be available by default in any Android -project. Please make sure your project is setup to pull from this repository. -You can then include the library by just adding this line to your -``gradle.build``, adjusting ``VERSION`` to the version you need: +Due to the discontinuation of Bintray JCenter we do not have pre-built Android packages published for now. We are working to move to Maven Central and will update this section when it's available. -.. code-block:: +.. We provide uptodate and tested ``libstt`` usable as an ``AAR`` package, + for Android versions starting with 7.0 to 11.0. The package is published on + `JCenter `_, + and the ``JCenter`` repository should be available by default in any Android + project. Please make sure your project is setup to pull from this repository. + You can then include the library by just adding this line to your + ``gradle.build``, adjusting ``VERSION`` to the version you need: + + .. code-block:: + + implementation 'stt.coqui.ai:libstt:VERSION@aar' - implementation 'deepspeech.mozilla.org:libdeepspeech:VERSION@aar' - -Building ``libdeepspeech.so`` +Building ``libstt.so`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -You can build the ``libdeepspeech.so`` using (ARMv7): +You can build the ``libstt.so`` using (ARMv7): .. code-block:: - bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=android --config=android_arm --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++14 --copt=-D_GLIBCXX_USE_C99 //native_client:libdeepspeech.so + bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=android --config=android_arm --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++14 --copt=-D_GLIBCXX_USE_C99 //native_client:libstt.so Or (ARM64): .. code-block:: - bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=android --config=android_arm64 --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++14 --copt=-D_GLIBCXX_USE_C99 //native_client:libdeepspeech.so + bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=android --config=android_arm64 --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++14 --copt=-D_GLIBCXX_USE_C99 //native_client:libstt.so -Building ``libdeepspeech.aar`` +Building ``libstt.aar`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the unlikely event you have to rebuild the JNI bindings, source code is -available under the ``libdeepspeech`` subdirectory. Building depends on shared -object: please ensure to place ``libdeepspeech.so`` into the -``libdeepspeech/libs/{arm64-v8a,armeabi-v7a,x86_64}/`` matching subdirectories. +available under the ``libstt`` subdirectory. Building depends on shared +object: please ensure to place ``libstt.so`` into the +``libstt/libs/{arm64-v8a,armeabi-v7a,x86_64}/`` matching subdirectories. Building the bindings is managed by ``gradle`` and should be limited to issuing -``./gradlew libdeepspeech:build``, producing an ``AAR`` package in -``./libdeepspeech/build/outputs/aar/``. +``./gradlew libstt:build``, producing an ``AAR`` package in +``./libstt/build/outputs/aar/``. Please note that you might have to copy the file to a local Maven repository and adapt file naming (when missing, the error message should states what filename it expects and where). -Building C++ ``deepspeech`` binary +Building C++ ``stt`` binary ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Building the ``deepspeech`` binary will happen through ``ndk-build`` (ARMv7): +Building the ``stt`` binary will happen through ``ndk-build`` (ARMv7): .. code-block:: - cd ../DeepSpeech/native_client + cd ../STT/native_client $ANDROID_NDK_HOME/ndk-build APP_PLATFORM=android-21 APP_BUILD_SCRIPT=$(pwd)/Android.mk NDK_PROJECT_PATH=$(pwd) APP_STL=c++_shared TFDIR=$(pwd)/../tensorflow/ TARGET_ARCH_ABI=armeabi-v7a And (ARM64): .. code-block:: - cd ../DeepSpeech/native_client + cd ../STT/native_client $ANDROID_NDK_HOME/ndk-build APP_PLATFORM=android-21 APP_BUILD_SCRIPT=$(pwd)/Android.mk NDK_PROJECT_PATH=$(pwd) APP_STL=c++_shared TFDIR=$(pwd)/../tensorflow/ TARGET_ARCH_ABI=arm64-v8a Android demo APK @@ -303,13 +305,13 @@ demo of one usage of the application. For example, it's only able to read PCM mono 16kHz 16-bits file and it might fail on some WAVE file that are not following exactly the specification. -Running ``deepspeech`` via adb +Running ``stt`` via adb ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You should use ``adb push`` to send data to device, please refer to Android documentation on how to use that. -Please push DeepSpeech data to ``/sdcard/deepspeech/``\ , including: +Please push 🐸STT data to ``/sdcard/STT/``\ , including: * ``output_graph.tflite`` which is the TF Lite model @@ -319,8 +321,8 @@ Please push DeepSpeech data to ``/sdcard/deepspeech/``\ , including: Then, push binaries from ``native_client.tar.xz`` to ``/data/local/tmp/ds``\ : -* ``deepspeech`` -* ``libdeepspeech.so`` +* ``stt`` +* ``libstt.so`` * ``libc++_shared.so`` You should then be able to run as usual, using a shell from ``adb shell``\ : @@ -328,7 +330,7 @@ You should then be able to run as usual, using a shell from ``adb shell``\ : .. code-block:: user@device$ cd /data/local/tmp/ds/ - user@device$ LD_LIBRARY_PATH=$(pwd)/ ./deepspeech [...] + user@device$ LD_LIBRARY_PATH=$(pwd)/ ./stt [...] Please note that Android linker does not support ``rpath`` so you have to set ``LD_LIBRARY_PATH``. Properly wrapped / packaged bindings does embed the library diff --git a/doc/BUILDING_DotNet.rst b/doc/BUILDING_DotNet.rst index a85598e0..9421918f 100644 --- a/doc/BUILDING_DotNet.rst +++ b/doc/BUILDING_DotNet.rst @@ -1,9 +1,9 @@ .. _build-native-client-dotnet: -Building DeepSpeech native client for Windows -============================================= +Building Coqui STT native client for Windows +============================================ -Now we can build the native client of DeepSpeech and run inference on Windows using the C# client, to do that we need to compile the ``native_client``. +Now we can build the native client of 🐸STT and run inference on Windows using the C# client, to do that we need to compile the ``native_client``. **Table of Contents** @@ -44,11 +44,11 @@ We highly recommend sticking to the recommended versions of CUDA/cuDNN in order Getting the code ---------------- -We need to clone ``mozilla/DeepSpeech``. +We need to clone ``coqui-ai/STT``. .. code-block:: bash - git clone https://github.com/mozilla/DeepSpeech + git clone https://github.com/coqui-ai/STT git submodule sync tensorflow/ git submodule update --init tensorflow/ @@ -61,8 +61,8 @@ There should already be a symbolic link, for this example let's suppose that we . ├── D:\ - │ ├── cloned # Contains DeepSpeech and tensorflow side by side - │ │ └── DeepSpeech # Root of the cloned DeepSpeech + │ ├── cloned # Contains 🐸STT and tensorflow side by side + │ │ └── STT # Root of the cloned 🐸STT │ │ ├── tensorflow # Root of the cloned mozilla/tensorflow └── ... @@ -71,7 +71,7 @@ Change your path accordingly to your path structure, for the structure above we .. code-block:: bash - mklink /d "D:\cloned\DeepSpeech\tensorflow\native_client" "D:\cloned\DeepSpeech\native_client" + mklink /d "D:\cloned\STT\tensorflow\native_client" "D:\cloned\STT\native_client" Adding environment variables ---------------------------- @@ -119,7 +119,7 @@ Building the native_client There's one last command to run before building, you need to run the `configure.py `_ inside ``tensorflow`` cloned directory. -At this point we are ready to start building the ``native_client``, go to ``tensorflow`` sub-directory, following our examples should be ``D:\cloned\DeepSpeech\tensorflow``. +At this point we are ready to start building the ``native_client``, go to ``tensorflow`` sub-directory, following our examples should be ``D:\cloned\STT\tensorflow``. CPU ~~~ @@ -128,7 +128,7 @@ We will add AVX/AVX2 support in the command, please make sure that your CPU supp .. code-block:: bash - bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" -c opt --copt=/arch:AVX --copt=/arch:AVX2 //native_client:libdeepspeech.so + bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" -c opt --copt=/arch:AVX --copt=/arch:AVX2 //native_client:libstt.so GPU with CUDA ~~~~~~~~~~~~~ @@ -137,11 +137,11 @@ If you enabled CUDA in `configure.py `_ in your DeepSpeech directory and open the Visual Studio solution, then we need to build in debug or release mode, finally we just need to copy ``libdeepspeech.so`` to the generated ``x64/Debug`` or ``x64/Release`` directory. +As for now we can only use the generated ``libstt.so`` with the C# clients, go to `native_client/dotnet/ `_ in your STT directory and open the Visual Studio solution, then we need to build in debug or release mode, finally we just need to copy ``libstt.so`` to the generated ``x64/Debug`` or ``x64/Release`` directory. diff --git a/doc/Contributed-Examples.rst b/doc/Contributed-Examples.rst index 7eaba452..a4c08a86 100644 --- a/doc/Contributed-Examples.rst +++ b/doc/Contributed-Examples.rst @@ -1,4 +1,4 @@ User contributed examples ========================= -There are also several user contributed examples available on a separate examples repository: `https://github.com/mozilla/DeepSpeech-examples `_. +There are also several user contributed examples available on a separate examples repository: `https://github.com/coqui-ai/STT-examples `_. diff --git a/doc/Decoder.rst b/doc/Decoder.rst index da974bc4..1ca2026b 100644 --- a/doc/Decoder.rst +++ b/doc/Decoder.rst @@ -6,7 +6,7 @@ CTC beam search decoder Introduction ^^^^^^^^^^^^ -DeepSpeech uses the `Connectionist Temporal Classification `_ loss function. For an excellent explanation of CTC and its usage, see this Distill article: `Sequence Modeling with CTC `_. This document assumes the reader is familiar with the concepts described in that article, and describes DeepSpeech specific behaviors that developers building systems with DeepSpeech should know to avoid problems. +🐸STT uses the `Connectionist Temporal Classification `_ loss function. For an excellent explanation of CTC and its usage, see this Distill article: `Sequence Modeling with CTC `_. This document assumes the reader is familiar with the concepts described in that article, and describes 🐸STT specific behaviors that developers building systems with 🐸STT should know to avoid problems. Note: Documentation for the tooling for creating custom scorer packages is available in :ref:`scorer-scripts`. @@ -16,19 +16,19 @@ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "S External scorer ^^^^^^^^^^^^^^^ -DeepSpeech clients support OPTIONAL use of an external language model to improve the accuracy of the predicted transcripts. In the code, command line parameters, and documentation, this is referred to as a "scorer". The scorer is used to compute the likelihood (also called a score, hence the name "scorer") of sequences of words or characters in the output, to guide the decoder towards more likely results. This improves accuracy significantly. +🐸STT clients support OPTIONAL use of an external language model to improve the accuracy of the predicted transcripts. In the code, command line parameters, and documentation, this is referred to as a "scorer". The scorer is used to compute the likelihood (also called a score, hence the name "scorer") of sequences of words or characters in the output, to guide the decoder towards more likely results. This improves accuracy significantly. -The use of an external scorer is fully optional. When an external scorer is not specified, DeepSpeech still uses a beam search decoding algorithm, but without any outside scoring. +The use of an external scorer is fully optional. When an external scorer is not specified, 🐸STT still uses a beam search decoding algorithm, but without any outside scoring. -Currently, the DeepSpeech external scorer is implemented with `KenLM `_, plus some tooling to package the necessary files and metadata into a single ``.scorer`` package. The tooling lives in ``data/lm/``. The scripts included in ``data/lm/`` can be used and modified to build your own language model based on your particular use case or language. See :ref:`scorer-scripts` for more details on how to reproduce our scorer file as well as create your own. +Currently, the 🐸STT external scorer is implemented with `KenLM `_, plus some tooling to package the necessary files and metadata into a single ``.scorer`` package. The tooling lives in ``data/lm/``. The scripts included in ``data/lm/`` can be used and modified to build your own language model based on your particular use case or language. See :ref:`scorer-scripts` for more details on how to reproduce our scorer file as well as create your own. -The scripts are geared towards replicating the language model files we release as part of `DeepSpeech model releases `_, but modifying them to use different datasets or language model construction parameters should be simple. +The scripts are geared towards replicating the language model files we release as part of `STT model releases `_, but modifying them to use different datasets or language model construction parameters should be simple. Decoding modes ^^^^^^^^^^^^^^ -DeepSpeech currently supports two modes of operation with significant differences at both training and decoding time. Note that Bytes output mode is experimental and has not been tested for languages other than Chinese Mandarin. +🐸STT currently supports two modes of operation with significant differences at both training and decoding time. Note that Bytes output mode is experimental and has not been tested for languages other than Chinese Mandarin. Default mode (alphabet based) diff --git a/doc/Error-Codes.rst b/doc/Error-Codes.rst index 361ca025..932d6b46 100644 --- a/doc/Error-Codes.rst +++ b/doc/Error-Codes.rst @@ -5,7 +5,7 @@ Error codes Below is the definition for all error codes used in the API, their numerical values, and a human readable description. -.. literalinclude:: ../native_client/deepspeech.h +.. literalinclude:: ../native_client/coqui-stt.h :language: c :start-after: sphinx-doc: error_code_listing_start :end-before: sphinx-doc: error_code_listing_end diff --git a/doc/Flags.rst b/doc/Flags.rst index 66b26f0c..deb3e65a 100644 --- a/doc/Flags.rst +++ b/doc/Flags.rst @@ -3,12 +3,12 @@ Command-line flags for the training scripts =========================================== -Below you can find the definition of all command-line flags supported by the training scripts. This includes ``DeepSpeech.py``, ``evaluate.py``, ``evaluate_tflite.py``, ``transcribe.py`` and ``lm_optimizer.py``. +Below you can find the definition of all command-line flags supported by the training scripts. This includes ``train.py``, ``evaluate.py``, ``evaluate_tflite.py``, ``transcribe.py`` and ``lm_optimizer.py``. Flags ----- -.. literalinclude:: ../training/deepspeech_training/util/flags.py +.. literalinclude:: ../training/coqui_stt_training/util/flags.py :language: python :linenos: :lineno-match: diff --git a/doc/HotWordBoosting-Examples.rst b/doc/HotWordBoosting-Examples.rst index a234c89c..deb32ddd 100644 --- a/doc/HotWordBoosting-Examples.rst +++ b/doc/HotWordBoosting-Examples.rst @@ -1,7 +1,7 @@ Hot-word boosting API Usage example =================================== -With DeepSpeech 0.9 release a new API feature was introduced that allows boosting probability from the scorer of given words. It is exposed in all bindings (C, Python, JS, Java and .Net). +With the 🐸STT 0.9 release a new API feature was introduced that allows boosting probability from the scorer of given words. It is exposed in all bindings (C, Python, JS, Java and .Net). Currently, it provides three methods for the Model class: @@ -19,11 +19,11 @@ It is worth noting that boosting non-existent words in scorer (mostly proper nou Adjusting the boosting value ---------------------------- -For hot-word boosting it is hard to determine what the optimal value that one might be searching for is. Additionally, this is dependant on the input audio file. In practice, as it was reported by DeepSpeech users, the value should be not bigger than 20.0 for positive value boosting. Nevertheless, each usecase is different and you might need to adjust values on your own. +For hot-word boosting it is hard to determine what the optimal value that one might be searching for is. Additionally, this is dependant on the input audio file. In practice, as it was reported by 🐸STT users, the value should be not bigger than 20.0 for positive value boosting. Nevertheless, each usecase is different and you might need to adjust values on your own. -There is a user contributed script available on ``DeepSpeech-examples`` repository for adjusting boost values: +There is a user contributed script available on ``STT-examples`` repository for adjusting boost values: -`https://github.com/mozilla/DeepSpeech-examples/tree/master/hotword_adjusting `_. +`https://github.com/coqui-ai/STT-examples/tree/master/hotword_adjusting `_. Positive value boosting diff --git a/doc/Makefile b/doc/Makefile index 0980ab24..2eb83d83 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -4,7 +4,7 @@ # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build -SPHINXPROJ = DeepSpeech +SPHINXPROJ = "Coqui STT" SOURCEDIR = . BUILDDIR = .build diff --git a/doc/ParallelOptimization.rst b/doc/ParallelOptimization.rst index e0d3734c..9f6dca36 100644 --- a/doc/ParallelOptimization.rst +++ b/doc/ParallelOptimization.rst @@ -1,7 +1,7 @@ Parallel Optimization ===================== -This is how we implement optimization of the DeepSpeech model across GPUs on a +This is how we implement optimization of the 🐸STT model across GPUs on a single host. Parallel optimization can take on various forms. For example one can use asynchronous updates of the model, synchronous updates of the model, or some combination of the two. diff --git a/doc/Scorer.rst b/doc/Scorer.rst index 1f374604..881a3f91 100644 --- a/doc/Scorer.rst +++ b/doc/Scorer.rst @@ -3,11 +3,11 @@ External scorer scripts ======================= -DeepSpeech pre-trained models include an external scorer. This document explains how to reproduce our external scorer, as well as adapt the scripts to create your own. +🐸STT pre-trained models include an external scorer. This document explains how to reproduce our external scorer, as well as adapt the scripts to create your own. The scorer is composed of two sub-components, a KenLM language model and a trie data structure containing all words in the vocabulary. In order to create the scorer package, first we must create a KenLM language model (using ``data/lm/generate_lm.py``, and then use ``generate_scorer_package`` to create the final package file including the trie data structure. -The ``generate_scorer_package`` binary is part of the native client package that is included with official releases. You can find the appropriate archive for your platform in the `GitHub release downloads `_. The native client package is named ``native_client.{arch}.{config}.{plat}.tar.xz``, where ``{arch}`` is the architecture the binary was built for, for example ``amd64`` or ``arm64``, ``config`` is the build configuration, which for building decoder packages does not matter, and ``{plat}`` is the platform the binary was built-for, for example ``linux`` or ``osx``. If you wanted to run the ``generate_scorer_package`` binary on a Linux desktop, you would download ``native_client.amd64.cpu.linux.tar.xz``. +The ``generate_scorer_package`` binary is part of the native client package that is included with official releases. You can find the appropriate archive for your platform in the `GitHub release downloads `_. The native client package is named ``native_client.{arch}.{config}.{plat}.tar.xz``, where ``{arch}`` is the architecture the binary was built for, for example ``amd64`` or ``arm64``, ``config`` is the build configuration, which for building decoder packages does not matter, and ``{plat}`` is the platform the binary was built-for, for example ``linux`` or ``osx``. If you wanted to run the ``generate_scorer_package`` binary on a Linux desktop, you would download ``native_client.amd64.cpu.linux.tar.xz``. Reproducing our external scorer ------------------------------- @@ -26,7 +26,7 @@ Then use the ``generate_lm.py`` script to generate ``lm.binary`` and ``vocab-500 As input you can use a plain text (e.g. ``file.txt``) or gzipped (e.g. ``file.txt.gz``) text file with one sentence in each line. -If you are using a container created from ``Dockerfile.build``, you can use ``--kenlm_bins /DeepSpeech/native_client/kenlm/build/bin/``. +If you are using a container created from ``Dockerfile.build``, you can use ``--kenlm_bins /STT/native_client/kenlm/build/bin/``. Else you have to build `KenLM `_ first and then pass the build directory to the script. .. code-block:: bash @@ -44,7 +44,7 @@ Afterwards you can use ``generate_scorer_package`` to generate the scorer packag cd data/lm # Download and extract appropriate native_client package: - curl -LO http://github.com/mozilla/DeepSpeech/releases/... + curl -LO http://github.com/coqui-ai/STT/releases/... tar xvf native_client.*.tar.xz ./generate_scorer_package --alphabet ../alphabet.txt --lm lm.binary --vocab vocab-500000.txt \ --package kenlm.scorer --default_alpha 0.931289039105002 --default_beta 1.1834137581510284 @@ -59,6 +59,6 @@ Building your own scorer can be useful if you're using models in a narrow usage The LibriSpeech LM training text used by our scorer is around 4GB uncompressed, which should give an idea of the size of a corpus needed for a reasonable language model for general speech recognition. For more constrained use cases with smaller vocabularies, you don't need as much data, but you should still try to gather as much as you can. -With a text corpus in hand, you can then re-use ``generate_lm.py`` and ``generate_scorer_package`` to create your own scorer that is compatible with DeepSpeech clients and language bindings. Before building the language model, you must first familiarize yourself with the `KenLM toolkit `_. Most of the options exposed by the ``generate_lm.py`` script are simply forwarded to KenLM options of the same name, so you must read the KenLM documentation in order to fully understand their behavior. +With a text corpus in hand, you can then re-use ``generate_lm.py`` and ``generate_scorer_package`` to create your own scorer that is compatible with 🐸STT clients and language bindings. Before building the language model, you must first familiarize yourself with the `KenLM toolkit `_. Most of the options exposed by the ``generate_lm.py`` script are simply forwarded to KenLM options of the same name, so you must read the KenLM documentation in order to fully understand their behavior. After using ``generate_lm.py`` to create a KenLM language model binary file, you can use ``generate_scorer_package`` to create a scorer package as described in the previous section. Note that we have a :github:`lm_optimizer.py script ` which can be used to find good default values for alpha and beta. To use it, you must first generate a package with any value set for default alpha and beta flags. For this step, it doesn't matter what values you use, as they'll be overridden by ``lm_optimizer.py`` later. Then, use ``lm_optimizer.py`` with this scorer file to find good alpha and beta values. Finally, use ``generate_scorer_package`` again, this time with the new values. diff --git a/doc/TRAINING.rst b/doc/TRAINING.rst index a5a08e24..97703f4a 100644 --- a/doc/TRAINING.rst +++ b/doc/TRAINING.rst @@ -15,11 +15,11 @@ Prerequisites for training a model Getting the training code ^^^^^^^^^^^^^^^^^^^^^^^^^ -Clone the latest released stable branch from Github (e.g. 0.9.3, check `here `_): +Clone the latest released stable branch from Github (e.g. 0.9.3, check `here `_): .. code-block:: bash - git clone --branch v0.9.3 https://github.com/mozilla/DeepSpeech + git clone --branch v0.9.3 https://github.com/coqui-ai/STT If you plan on committing code or you want to report bugs, please use the master branch. @@ -28,31 +28,31 @@ Creating a virtual environment Throughout the documentation we assume you are using **virtualenv** to manage your Python environments. This setup is the one used and recommended by the project authors and is the easiest way to make sure you won't run into environment issues. If you're using **Anaconda, Miniconda or Mamba**, first read the instructions at :ref:`training-with-conda` and then continue from the installation step below. -In creating a virtual environment you will create a directory containing a ``python3`` binary and everything needed to run deepspeech. You can use whatever directory you want. For the purpose of the documentation, we will rely on ``$HOME/tmp/deepspeech-train-venv``. You can create it using this command: +In creating a virtual environment you will create a directory containing a ``python3`` binary and everything needed to run 🐸STT. You can use whatever directory you want. For the purpose of the documentation, we will rely on ``$HOME/tmp/coqui-stt-train-venv``. You can create it using this command: .. code-block:: - $ python3 -m venv $HOME/tmp/deepspeech-train-venv/ + $ python3 -m venv $HOME/tmp/coqui-stt-train-venv/ Once this command completes successfully, the environment will be ready to be activated. Activating the environment ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Each time you need to work with DeepSpeech, you have to *activate* this virtual environment. This is done with this simple command: +Each time you need to work with 🐸STT, you have to *activate* this virtual environment. This is done with this simple command: .. code-block:: - $ source $HOME/tmp/deepspeech-train-venv/bin/activate + $ source $HOME/tmp/coqui-stt-train-venv/bin/activate -Installing DeepSpeech Training Code and its dependencies -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Installing Coqui STT Training Code and its dependencies +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Install the required dependencies using ``pip3``\ : .. code-block:: bash - cd DeepSpeech + cd STT pip3 install --upgrade pip==20.2.2 wheel==0.34.2 setuptools==49.6.0 pip3 install --upgrade -e . @@ -95,11 +95,11 @@ This should ensure that you'll re-use the upstream Python 3 TensorFlow GPU-enabl make Dockerfile.train -If you want to specify a different DeepSpeech repository / branch, you can pass ``DEEPSPEECH_REPO`` or ``DEEPSPEECH_SHA`` parameters: +If you want to specify a different 🐸STT repository / branch, you can pass ``STT_REPO`` or ``STT_SHA`` parameters: .. code-block:: bash - make Dockerfile.train DEEPSPEECH_REPO=git://your/fork DEEPSPEECH_SHA=origin/your-branch + make Dockerfile.train STT_REPO=git://your/fork STT_SHA=origin/your-branch Common Voice training data ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -112,7 +112,7 @@ After extraction of such a data set, you'll find the following contents: * the ``*.tsv`` files output by CorporaCreator for the downloaded language * the mp3 audio files they reference in a ``clips`` sub-directory. -For bringing this data into a form that DeepSpeech understands, you have to run the CommonVoice v2.0 importer (\ ``bin/import_cv2.py``\ ): +For bringing this data into a form that 🐸STT understands, you have to run the CommonVoice v2.0 importer (\ ``bin/import_cv2.py``\ ): .. code-block:: bash @@ -134,22 +134,22 @@ The CSV files comprise of the following fields: * ``wav_filesize`` - samples size given in bytes, used for sorting the data before training. Expects integer. * ``transcript`` - transcription target for the sample. -To use Common Voice data during training, validation and testing, you pass (comma separated combinations of) their filenames into ``--train_files``\ , ``--dev_files``\ , ``--test_files`` parameters of ``DeepSpeech.py``. +To use Common Voice data during training, validation and testing, you pass (comma separated combinations of) their filenames into ``--train_files``\ , ``--dev_files``\ , ``--test_files`` parameters of ``train.py``. -If, for example, Common Voice language ``en`` was extracted to ``../data/CV/en/``\ , ``DeepSpeech.py`` could be called like this: +If, for example, Common Voice language ``en`` was extracted to ``../data/CV/en/``\ , ``train.py`` could be called like this: .. code-block:: bash - python3 DeepSpeech.py --train_files ../data/CV/en/clips/train.csv --dev_files ../data/CV/en/clips/dev.csv --test_files ../data/CV/en/clips/test.csv + python3 train.py --train_files ../data/CV/en/clips/train.csv --dev_files ../data/CV/en/clips/dev.csv --test_files ../data/CV/en/clips/test.csv Training a model ^^^^^^^^^^^^^^^^ -The central (Python) script is ``DeepSpeech.py`` in the project's root directory. For its list of command line options, you can call: +The central (Python) script is ``train.py`` in the project's root directory. For its list of command line options, you can call: .. code-block:: bash - python3 DeepSpeech.py --helpfull + python3 train.py --helpfull To get the output of this in a slightly better-formatted way, you can also look at the flag definitions in :ref:`training-flags`. @@ -157,7 +157,7 @@ For executing pre-configured training scenarios, there is a collection of conven **If you experience GPU OOM errors while training, try reducing the batch size with the ``--train_batch_size``\ , ``--dev_batch_size`` and ``--test_batch_size`` parameters.** -As a simple first example you can open a terminal, change to the directory of the DeepSpeech checkout, activate the virtualenv created above, and run: +As a simple first example you can open a terminal, change to the directory of the 🐸STT checkout, activate the virtualenv created above, and run: .. code-block:: bash @@ -165,9 +165,9 @@ As a simple first example you can open a terminal, change to the directory of th This script will train on a small sample dataset composed of just a single audio file, the sample file for the `TIMIT Acoustic-Phonetic Continuous Speech Corpus `_, which can be overfitted on a GPU in a few minutes for demonstration purposes. From here, you can alter any variables with regards to what dataset is used, how many training iterations are run and the default values of the network parameters. -Feel also free to pass additional (or overriding) ``DeepSpeech.py`` parameters to these scripts. Then, just run the script to train the modified network. +Feel also free to pass additional (or overriding) ``train.py`` parameters to these scripts. Then, just run the script to train the modified network. -Each dataset has a corresponding importer script in ``bin/`` that can be used to download (if it's freely available) and preprocess the dataset. See ``bin/import_librivox.py`` for an example of how to import and preprocess a large dataset for training with DeepSpeech. +Each dataset has a corresponding importer script in ``bin/`` that can be used to download (if it's freely available) and preprocess the dataset. See ``bin/import_librivox.py`` for an example of how to import and preprocess a large dataset for training with 🐸STT. Some importers might require additional code to properly handled your locale-specific requirements. Such handling is dealt with ``--validate_label_locale`` flag that allows you to source out-of-tree Python script that defines a ``validate_label`` function. Please refer to ``util/importers.py`` for implementation example of that function. If you don't provide this argument, the default ``validate_label`` function will be used. This one is only intended for English language, so you might have consistency issues in your data for other languages. @@ -191,10 +191,10 @@ Automatic Mixed Precision (AMP) training on GPU for TensorFlow has been recently Mixed precision training makes use of both FP32 and FP16 precisions where appropriate. FP16 operations can leverage the Tensor cores on NVIDIA GPUs (Volta, Turing or newer architectures) for improved throughput. Mixed precision training also often allows larger batch sizes. Automatic mixed precision training can be enabled by including the flag `--automatic_mixed_precision` at training time: ``` -python3 DeepSpeech.py --train_files ./train.csv --dev_files ./dev.csv --test_files ./test.csv --automatic_mixed_precision +python3 train.py --train_files ./train.csv --dev_files ./dev.csv --test_files ./test.csv --automatic_mixed_precision ``` -On a Volta generation V100 GPU, automatic mixed precision speeds up DeepSpeech training and evaluation by ~30%-40%. +On a Volta generation V100 GPU, automatic mixed precision speeds up 🐸STT training and evaluation by ~30%-40%. Checkpointing ^^^^^^^^^^^^^ @@ -212,7 +212,7 @@ Refer to the :ref:`usage instructions ` for information on running a Exporting a model for TFLite ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you want to experiment with the TF Lite engine, you need to export a model that is compatible with it, then use the ``--export_tflite`` flags. If you already have a trained model, you can re-export it for TFLite by running ``DeepSpeech.py`` again and specifying the same ``checkpoint_dir`` that you used for training, as well as passing ``--export_tflite --export_dir /model/export/destination``. If you changed the alphabet you also need to add the ``--alphabet_config_path my-new-language-alphabet.txt`` flag. +If you want to experiment with the TF Lite engine, you need to export a model that is compatible with it, then use the ``--export_tflite`` flags. If you already have a trained model, you can re-export it for TFLite by running ``train.py`` again and specifying the same ``checkpoint_dir`` that you used for training, as well as passing ``--export_tflite --export_dir /model/export/destination``. If you changed the alphabet you also need to add the ``--alphabet_config_path my-new-language-alphabet.txt`` flag. Making a mmap-able model for inference ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -236,9 +236,9 @@ Upon sucessfull run, it should report about conversion of a non-zero number of n Continuing training from a release model ---------------------------------------- -There are currently two supported approaches to make use of a pre-trained DeepSpeech model: fine-tuning or transfer-learning. Choosing which one to use is a simple decision, and it depends on your target dataset. Does your data use the same alphabet as the release model? If "Yes": fine-tune. If "No" use transfer-learning. +There are currently two supported approaches to make use of a pre-trained 🐸STT model: fine-tuning or transfer-learning. Choosing which one to use is a simple decision, and it depends on your target dataset. Does your data use the same alphabet as the release model? If "Yes": fine-tune. If "No" use transfer-learning. -If your own data uses the *extact* same alphabet as the English release model (i.e. `a-z` plus `'`) then the release model's output layer will match your data, and you can just fine-tune the existing parameters. However, if you want to use a new alphabet (e.g. Cyrillic `а`, `б`, `д`), the output layer of a release DeepSpeech model will *not* match your data. In this case, you should use transfer-learning (i.e. remove the trained model's output layer, and reinitialize a new output layer that matches your target character set. +If your own data uses the *extact* same alphabet as the English release model (i.e. `a-z` plus `'`) then the release model's output layer will match your data, and you can just fine-tune the existing parameters. However, if you want to use a new alphabet (e.g. Cyrillic `а`, `б`, `д`), the output layer of a release 🐸STT model will *not* match your data. In this case, you should use transfer-learning (i.e. remove the trained model's output layer, and reinitialize a new output layer that matches your target character set. N.B. - If you have access to a pre-trained model which uses UTF-8 bytes at the output layer you can always fine-tune, because any alphabet should be encodable as UTF-8. @@ -247,14 +247,14 @@ N.B. - If you have access to a pre-trained model which uses UTF-8 bytes at the o Fine-Tuning (same alphabet) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you'd like to use one of the pre-trained models to bootstrap your training process (fine tuning), you can do so by using the ``--checkpoint_dir`` flag in ``DeepSpeech.py``. Specify the path where you downloaded the checkpoint from the release, and training will resume from the pre-trained model. +If you'd like to use one of the pre-trained models to bootstrap your training process (fine tuning), you can do so by using the ``--checkpoint_dir`` flag in ``train.py``. Specify the path where you downloaded the checkpoint from the release, and training will resume from the pre-trained model. For example, if you want to fine tune the entire graph using your own data in ``my-train.csv``\ , ``my-dev.csv`` and ``my-test.csv``\ , for three epochs, you can something like the following, tuning the hyperparameters as needed: .. code-block:: bash mkdir fine_tuning_checkpoints - python3 DeepSpeech.py --n_hidden 2048 --checkpoint_dir path/to/checkpoint/folder --epochs 3 --train_files my-train.csv --dev_files my-dev.csv --test_files my_dev.csv --learning_rate 0.0001 + python3 train.py --n_hidden 2048 --checkpoint_dir path/to/checkpoint/folder --epochs 3 --train_files my-train.csv --dev_files my-dev.csv --test_files my_dev.csv --learning_rate 0.0001 Notes about the release checkpoints: the released models were trained with ``--n_hidden 2048``\ , so you need to use that same value when initializing from the release models. Since v0.6.0, the release models are also trained with ``--train_cudnn``\ , so you'll need to specify that as well. If you don't have a CUDA compatible GPU, then you can workaround it by using the ``--load_cudnn`` flag. Use ``--helpfull`` to get more information on how the flags work. @@ -270,17 +270,17 @@ If you try to load a release model without following these steps, you'll get an Transfer-Learning (new alphabet) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you want to continue training an alphabet-based DeepSpeech model (i.e. not a UTF-8 model) on a new language, or if you just want to add new characters to your custom alphabet, you will probably want to use transfer-learning instead of fine-tuning. If you're starting with a pre-trained UTF-8 model -- even if your data comes from a different language or uses a different alphabet -- the model will be able to predict your new transcripts, and you should use fine-tuning instead. +If you want to continue training an alphabet-based 🐸STT model (i.e. not a UTF-8 model) on a new language, or if you just want to add new characters to your custom alphabet, you will probably want to use transfer-learning instead of fine-tuning. If you're starting with a pre-trained UTF-8 model -- even if your data comes from a different language or uses a different alphabet -- the model will be able to predict your new transcripts, and you should use fine-tuning instead. -In a nutshell, DeepSpeech's transfer-learning allows you to remove certain layers from a pre-trained model, initialize new layers for your target data, stitch together the old and new layers, and update all layers via gradient descent. You will remove the pre-trained output layer (and optionally more layers) and reinitialize parameters to fit your target alphabet. The simplest case of transfer-learning is when you remove just the output layer. +In a nutshell, 🐸STT's transfer-learning allows you to remove certain layers from a pre-trained model, initialize new layers for your target data, stitch together the old and new layers, and update all layers via gradient descent. You will remove the pre-trained output layer (and optionally more layers) and reinitialize parameters to fit your target alphabet. The simplest case of transfer-learning is when you remove just the output layer. -In DeepSpeech's implementation of transfer-learning, all removed layers will be contiguous, starting from the output layer. The key flag you will want to experiment with is ``--drop_source_layers``. This flag accepts an integer from ``1`` to ``5`` and allows you to specify how many layers you want to remove from the pre-trained model. For example, if you supplied ``--drop_source_layers 3``, you will drop the last three layers of the pre-trained model: the output layer, penultimate layer, and LSTM layer. All dropped layers will be reinintialized, and (crucially) the output layer will be defined to match your supplied target alphabet. +In 🐸STT's implementation of transfer-learning, all removed layers will be contiguous, starting from the output layer. The key flag you will want to experiment with is ``--drop_source_layers``. This flag accepts an integer from ``1`` to ``5`` and allows you to specify how many layers you want to remove from the pre-trained model. For example, if you supplied ``--drop_source_layers 3``, you will drop the last three layers of the pre-trained model: the output layer, penultimate layer, and LSTM layer. All dropped layers will be reinintialized, and (crucially) the output layer will be defined to match your supplied target alphabet. You need to specify the location of the pre-trained model with ``--load_checkpoint_dir`` and define where your new model checkpoints will be saved with ``--save_checkpoint_dir``. You need to specify how many layers to remove (aka "drop") from the pre-trained model: ``--drop_source_layers``. You also need to supply your new alphabet file using the standard ``--alphabet_config_path`` (remember, using a new alphabet is the whole reason you want to use transfer-learning). .. code-block:: bash - python3 DeepSpeech.py \ + python3 train.py \ --drop_source_layers 1 \ --alphabet_config_path my-new-language-alphabet.txt \ --save_checkpoint_dir path/to/output-checkpoint/folder \ @@ -292,7 +292,7 @@ You need to specify the location of the pre-trained model with ``--load_checkpoi UTF-8 mode ^^^^^^^^^^ -DeepSpeech includes a UTF-8 operating mode which can be useful to model languages with very large alphabets, such as Chinese Mandarin. For details on how it works and how to use it, see :ref:`decoder-docs`. +🐸STT includes a UTF-8 operating mode which can be useful to model languages with very large alphabets, such as Chinese Mandarin. For details on how it works and how to use it, see :ref:`decoder-docs`. .. _training-data-augmentation: @@ -314,7 +314,7 @@ For example, for the ``overlay`` augmentation: .. code-block:: - python3 DeepSpeech.py --augment overlay[p=0.1,source=/path/to/audio.sdb,snr=20.0] ... + python3 train.py --augment overlay[p=0.1,source=/path/to/audio.sdb,snr=20.0] ... In the documentation below, whenever a value is specified as ```` or ````, it supports one of the follow formats: @@ -485,7 +485,7 @@ Example training with all augmentations: .. code-block:: bash - python -u DeepSpeech.py \ + python -u train.py \ --train_files "train.sdb" \ --feature_cache ./feature.cache \ --cache_for_epochs 10 \ @@ -541,5 +541,5 @@ To prevent common problems, make sure you **always use a separate environment wh .. code-block:: bash - (base) $ conda create -n deepspeech python=3.7 - (base) $ conda activate deepspeech + (base) $ conda create -n coqui-stt python=3.7 + (base) $ conda activate coqui-stt diff --git a/doc/USING.rst b/doc/USING.rst index 3380144a..370304e8 100644 --- a/doc/USING.rst +++ b/doc/USING.rst @@ -3,7 +3,7 @@ Using a Pre-trained Model ========================= -Inference using a DeepSpeech pre-trained model can be done with a client/language binding package. We have four clients/language bindings in this repository, listed below, and also a few community-maintained clients/language bindings in other repositories, listed `further down in this README <#third-party-bindings>`_. +Inference using a 🐸STT pre-trained model can be done with a client/language binding package. We have four clients/language bindings in this repository, listed below, and also a few community-maintained clients/language bindings in other repositories, listed `further down in this README <#third-party-bindings>`_. * :ref:`The C API `. * :ref:`The Python package/language binding ` @@ -13,7 +13,7 @@ Inference using a DeepSpeech pre-trained model can be done with a client/languag .. _runtime-deps: -Running ``deepspeech`` might, see below, require some runtime dependencies to be already installed on your system: +Running ``stt`` might, see below, require some runtime dependencies to be already installed on your system: * ``sox`` - The Python and Node.JS clients use SoX to resample files to 16kHz. * ``libgomp1`` - libsox (statically linked into the clients) depends on OpenMP. Some people have had to install this manually. @@ -33,23 +33,23 @@ The GPU capable builds (Python, NodeJS, C++, etc) depend on CUDA 10.1 and CuDNN Getting the pre-trained model ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you want to use the pre-trained English model for performing speech-to-text, you can download it (along with other important inference material) from the DeepSpeech `releases page `_. Alternatively, you can run the following command to download the model files in your current directory: +If you want to use the pre-trained English model for performing speech-to-text, you can download it (along with other important inference material) from the 🐸STT `releases page `_. Alternatively, you can run the following command to download the model files in your current directory: .. code-block:: bash - wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm - wget https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer + wget https://github.com/coqui-ai/STT/releases/download/v0.9.3/coqui-stt-0.9.3-models.pbmm + wget https://github.com/coqui-ai/STT/releases/download/v0.9.3/coqui-stt-0.9.3-models.scorer -There are several pre-trained model files available in official releases. Files ending in ``.pbmm`` are compatible with clients and language bindings built against the standard TensorFlow runtime. Usually these packages are simply called ``deepspeech``. These files are also compatible with CUDA enabled clients and language bindings. These packages are usually called ``deepspeech-gpu``. Files ending in ``.tflite`` are compatible with clients and language bindings built against the `TensorFlow Lite runtime `_. These models are optimized for size and performance in low power devices. On desktop platforms, the compatible packages are called ``deepspeech-tflite``. On Android and Raspberry Pi, we only publish TensorFlow Lite enabled packages, and they are simply called ``deepspeech``. You can see a full list of supported platforms and which TensorFlow runtime is supported at :ref:`supported-platforms-inference`. +There are several pre-trained model files available in official releases. Files ending in ``.pbmm`` are compatible with clients and language bindings built against the standard TensorFlow runtime. Usually these packages are simply called ``stt``. These files are also compatible with CUDA enabled clients and language bindings. These packages are usually called ``stt-gpu``. Files ending in ``.tflite`` are compatible with clients and language bindings built against the `TensorFlow Lite runtime `_. These models are optimized for size and performance in low power devices. On desktop platforms, the compatible packages are called ``stt-tflite``. On Android and Raspberry Pi, we only publish TensorFlow Lite enabled packages, and they are simply called ``stt``. You can see a full list of supported platforms and which TensorFlow runtime is supported at :ref:`supported-platforms-inference`. +--------------------+---------------------+---------------------+ | Package/Model type | .pbmm | .tflite | +====================+=====================+=====================+ -| deepspeech | Depends on platform | Depends on platform | +| stt | Depends on platform | Depends on platform | +--------------------+---------------------+---------------------+ -| deepspeech-gpu | ✅ | ❌ | +| stt-gpu | ✅ | ❌ | +--------------------+---------------------+---------------------+ -| deepspeech-tflite | ❌ | ✅ | +| stt-tflite | ❌ | ✅ | +--------------------+---------------------+---------------------+ Finally, the pre-trained model files also include files ending in ``.scorer``. These are external scorers (language models) that are used at inference time in conjunction with an acoustic model (``.pbmm`` or ``.tflite`` file) to produce transcriptions. We also provide further documentation on :ref:`the decoding process ` and :ref:`how scorers are generated `. @@ -61,82 +61,82 @@ The release notes include detailed information on how the released models were t The process for training an acoustic model is described in :ref:`training-docs`. In particular, fine tuning a release model using your own data can be a good way to leverage relatively smaller amounts of data that would not be sufficient for training a new model from scratch. See the :ref:`fine tuning and transfer learning sections ` for more information. :ref:`Data augmentation ` can also be a good way to increase the value of smaller training sets. -Creating your own external scorer from text data is another way that you can adapt the model to your specific needs. The process and tools used to generate an external scorer package are described in :ref:`scorer-scripts` and an overview of how the external scorer is used by DeepSpeech to perform inference is available in :ref:`decoder-docs`. Generating a smaller scorer from a single purpose text dataset is a quick process and can bring significant accuracy improvements, specially for more constrained, limited vocabulary applications. +Creating your own external scorer from text data is another way that you can adapt the model to your specific needs. The process and tools used to generate an external scorer package are described in :ref:`scorer-scripts` and an overview of how the external scorer is used by 🐸STT to perform inference is available in :ref:`decoder-docs`. Generating a smaller scorer from a single purpose text dataset is a quick process and can bring significant accuracy improvements, specially for more constrained, limited vocabulary applications. Model compatibility ^^^^^^^^^^^^^^^^^^^ -DeepSpeech models are versioned to keep you from trying to use an incompatible graph with a newer client after a breaking change was made to the code. If you get an error saying your model file version is too old for the client, you should either upgrade to a newer model release, re-export your model from the checkpoint using a newer version of the code, or downgrade your client if you need to use the old model and can't re-export it. +🐸STT models are versioned to keep you from trying to use an incompatible graph with a newer client after a breaking change was made to the code. If you get an error saying your model file version is too old for the client, you should either upgrade to a newer model release, re-export your model from the checkpoint using a newer version of the code, or downgrade your client if you need to use the old model and can't re-export it. .. _py-usage: Using the Python package ^^^^^^^^^^^^^^^^^^^^^^^^ -Pre-built binaries which can be used for performing inference with a trained model can be installed with ``pip3``. You can then use the ``deepspeech`` binary to do speech-to-text on an audio file: +Pre-built binaries which can be used for performing inference with a trained model can be installed with ``pip3``. You can then use the ``stt`` binary to do speech-to-text on an audio file: For the Python bindings, it is highly recommended that you perform the installation within a Python 3.5 or later virtual environment. You can find more information about those in `this documentation `_. We will continue under the assumption that you already have your system properly setup to create new virtual environments. -Create a DeepSpeech virtual environment -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Create a Coqui STT virtual environment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In creating a virtual environment you will create a directory containing a ``python3`` binary and everything needed to run deepspeech. You can use whatever directory you want. For the purpose of the documentation, we will rely on ``$HOME/tmp/deepspeech-venv``. You can create it using this command: +In creating a virtual environment you will create a directory containing a ``python3`` binary and everything needed to run 🐸STT. You can use whatever directory you want. For the purpose of the documentation, we will rely on ``$HOME/tmp/coqui-stt-venv``. You can create it using this command: .. code-block:: - $ virtualenv -p python3 $HOME/tmp/deepspeech-venv/ + $ virtualenv -p python3 $HOME/tmp/coqui-stt-venv/ Once this command completes successfully, the environment will be ready to be activated. Activating the environment ~~~~~~~~~~~~~~~~~~~~~~~~~~ -Each time you need to work with DeepSpeech, you have to *activate* this virtual environment. This is done with this simple command: +Each time you need to work with 🐸STT, you have to *activate* this virtual environment. This is done with this simple command: .. code-block:: - $ source $HOME/tmp/deepspeech-venv/bin/activate + $ source $HOME/tmp/coqui-stt-venv/bin/activate -Installing DeepSpeech Python bindings -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Installing Coqui STT Python bindings +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once your environment has been set-up and loaded, you can use ``pip3`` to manage packages locally. On a fresh setup of the ``virtualenv``\ , you will have to install the DeepSpeech wheel. You can check if ``deepspeech`` is already installed with ``pip3 list``. +Once your environment has been set-up and loaded, you can use ``pip3`` to manage packages locally. On a fresh setup of the ``virtualenv``\ , you will have to install the 🐸STT wheel. You can check if ``stt`` is already installed with ``pip3 list``. To perform the installation, just use ``pip3`` as such: .. code-block:: - $ pip3 install deepspeech + $ pip3 install stt -If ``deepspeech`` is already installed, you can update it as such: +If ``stt`` is already installed, you can update it as such: .. code-block:: - $ pip3 install --upgrade deepspeech + $ pip3 install --upgrade stt Alternatively, if you have a supported NVIDIA GPU on Linux, you can install the GPU specific package as follows: .. code-block:: - $ pip3 install deepspeech-gpu + $ pip3 install stt-gpu -See the `release notes `_ to find which GPUs are supported. Please ensure you have the required `CUDA dependency <#cuda-dependency>`_. +See the `release notes `_ to find which GPUs are supported. Please ensure you have the required `CUDA dependency <#cuda-dependency>`_. -You can update ``deepspeech-gpu`` as follows: +You can update ``stt-gpu`` as follows: .. code-block:: - $ pip3 install --upgrade deepspeech-gpu + $ pip3 install --upgrade stt-gpu -In both cases, ``pip3`` should take care of installing all the required dependencies. After installation has finished, you should be able to call ``deepspeech`` from the command-line. +In both cases, ``pip3`` should take care of installing all the required dependencies. After installation has finished, you should be able to call ``stt`` from the command-line. Note: the following command assumes you `downloaded the pre-trained model <#getting-the-pre-trained-model>`_. .. code-block:: bash - deepspeech --model deepspeech-0.9.3-models.pbmm --scorer deepspeech-0.9.3-models.scorer --audio my_audio_file.wav + stt --model stt-0.9.3-models.pbmm --scorer stt-0.9.3-models.scorer --audio my_audio_file.wav The ``--scorer`` argument is optional, and represents an external language model to be used when transcribing the audio. @@ -151,7 +151,9 @@ You can download the JS bindings using ``npm``\ : .. code-block:: bash - npm install deepspeech + npm install stt + +Special thanks to `Huan - Google Developers Experts in Machine Learning (ML GDE) `_ for providing the STT project name on npmjs.org Please note that as of now, we support: - Node.JS versions 4 to 13. @@ -163,9 +165,9 @@ Alternatively, if you're using Linux and have a supported NVIDIA GPU, you can in .. code-block:: bash - npm install deepspeech-gpu + npm install stt-gpu -See the `release notes `_ to find which GPUs are supported. Please ensure you have the required `CUDA dependency <#cuda-dependency>`_. +See the `release notes `_ to find which GPUs are supported. Please ensure you have the required `CUDA dependency <#cuda-dependency>`_. See the :ref:`TypeScript client ` for an example of how to use the bindings programatically. @@ -174,7 +176,7 @@ See the :ref:`TypeScript client ` for an example of how to use t Using the command-line client ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To download the pre-built binaries for the ``deepspeech`` command-line (compiled C++) client, use ``util/taskcluster.py``\ : +To download the pre-built binaries for the ``stt`` command-line (compiled C++) client, use ``util/taskcluster.py``\ : .. code-block:: bash @@ -192,17 +194,17 @@ also, if you need some binaries different than current master, like ``v0.2.0-alp python3 util/taskcluster.py --branch "v0.2.0-alpha.6" --target "." -The script ``taskcluster.py`` will download ``native_client.tar.xz`` (which includes the ``deepspeech`` binary and associated libraries) and extract it into the current folder. Also, ``taskcluster.py`` will download binaries for Linux/x86_64 by default, but you can override that behavior with the ``--arch`` parameter. See the help info with ``python util/taskcluster.py -h`` for more details. Specific branches of DeepSpeech or TensorFlow can be specified as well. +The script ``taskcluster.py`` will download ``native_client.tar.xz`` (which includes the ``stt`` binary and associated libraries) and extract it into the current folder. Also, ``taskcluster.py`` will download binaries for Linux/x86_64 by default, but you can override that behavior with the ``--arch`` parameter. See the help info with ``python util/taskcluster.py -h`` for more details. Specific branches of 🐸STT or TensorFlow can be specified as well. -Alternatively you may manually download the ``native_client.tar.xz`` from the [releases](https://github.com/mozilla/DeepSpeech/releases). +Alternatively you may manually download the ``native_client.tar.xz`` from the [releases](https://github.com/coqui-ai/STT/releases). Note: the following command assumes you `downloaded the pre-trained model <#getting-the-pre-trained-model>`_. .. code-block:: bash - ./deepspeech --model deepspeech-0.9.3-models.pbmm --scorer deepspeech-0.9.3-models.scorer --audio audio_input.wav + ./stt --model coqui-stt-0.9.3-models.pbmm --scorer coqui-stt-0.9.3-models.scorer --audio audio_input.wav -See the help output with ``./deepspeech -h`` for more details. +See the help output with ``./stt -h`` for more details. Installing bindings from source ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -212,28 +214,27 @@ If pre-built binaries aren't available for your system, you'll need to install t Dockerfile for building from source ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -We provide ``Dockerfile.build`` to automatically build ``libdeepspeech.so``, the C++ native client, Python bindings, and KenLM. +We provide ``Dockerfile.build`` to automatically build ``libstt.so``, the C++ native client, Python bindings, and KenLM. You need to generate the Dockerfile from the template using: .. code-block:: bash make Dockerfile.build -If you want to specify a different DeepSpeech repository / branch, you can pass ``DEEPSPEECH_REPO`` or ``DEEPSPEECH_SHA`` parameters: +If you want to specify a different repository / branch, you can pass ``STT_REPO`` or ``STT_SHA`` parameters: .. code-block:: bash - make Dockerfile.build DEEPSPEECH_REPO=git://your/fork DEEPSPEECH_SHA=origin/your-branch + make Dockerfile.build STT_REPO=git://your/fork STT_SHA=origin/your-branch -Third party bindings -^^^^^^^^^^^^^^^^^^^^ +.. Third party bindings + ^^^^^^^^^^^^^^^^^^^^ -In addition to the bindings above, third party developers have started to provide bindings to other languages: + In addition to the bindings above, third party developers have started to provide bindings to other languages: - -* `Asticode `_ provides `Golang `_ bindings in its `go-astideepspeech `_ repo. -* `RustAudio `_ provide a `Rust `_ binding, the installation and use of which is described in their `deepspeech-rs `_ repo. -* `stes `_ provides preliminary `PKGBUILDs `_ to install the client and python bindings on `Arch Linux `_ in the `arch-deepspeech `_ repo. -* `gst-deepspeech `_ provides a `GStreamer `_ plugin which can be used from any language with GStreamer bindings. -* `thecodrr `_ provides `Vlang `_ bindings. The installation and use of which is described in their `vspeech `_ repo. -* `eagledot `_ provides `NIM-lang `_ bindings. The installation and use of which is described in their `nim-deepspeech `_ repo. + * `Asticode `_ provides `Golang `_ bindings in its `go-astideepspeech `_ repo. + * `RustAudio `_ provide a `Rust `_ binding, the installation and use of which is described in their `deepspeech-rs `_ repo. + * `stes `_ provides preliminary `PKGBUILDs `_ to install the client and python bindings on `Arch Linux `_ in the `arch-deepspeech `_ repo. + * `gst-deepspeech `_ provides a `GStreamer `_ plugin which can be used from any language with GStreamer bindings. + * `thecodrr `_ provides `Vlang `_ bindings. The installation and use of which is described in their `vspeech `_ repo. + * `eagledot `_ provides `NIM-lang `_ bindings. The installation and use of which is described in their `nim-deepspeech `_ repo. diff --git a/doc/conf.py b/doc/conf.py index 401ba08b..45fa6d49 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# DeepSpeech documentation build configuration file, created by +# Coqui STT documentation build configuration file, created by # sphinx-quickstart on Thu Feb 2 21:20:39 2017. # # This file is execfile()d with the current directory set to its @@ -24,7 +24,7 @@ import sys sys.path.insert(0, os.path.abspath('../')) -autodoc_mock_imports = ['deepspeech'] +autodoc_mock_imports = ['stt'] # This is in fact only relevant on ReadTheDocs, but we want to run the same way # on our CI as in RTD to avoid regressions on RTD that we would not catch on @@ -45,9 +45,9 @@ import semver # -- Project information ----------------------------------------------------- -project = u'DeepSpeech' -copyright = '2019-2020 Mozilla Corporation, 2020 DeepSpeech authors' -author = 'DeepSpeech authors' +project = u'Coqui STT' +copyright = '2019-2020 Mozilla Corporation, 2020 DeepSpeech authors, 2021 Coqui GmbH' +author = 'Coqui GmbH' with open('../VERSION', 'r') as ver: v = ver.read().strip() @@ -147,7 +147,7 @@ html_static_path = ['.static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'DeepSpeechdoc' +htmlhelp_basename = 'STTdoc' # -- Options for LaTeX output --------------------------------------------- @@ -174,8 +174,8 @@ latex_elements = { # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'DeepSpeech.tex', u'DeepSpeech Documentation', - u'DeepSpeech authors', 'manual'), + (master_doc, 'STT.tex', u'Coqui STT Documentation', + u'Coqui GmbH', 'manual'), ] @@ -184,7 +184,7 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'deepspeech', u'DeepSpeech Documentation', + (master_doc, 'stt', u'Coqui STT Documentation', [author], 1) ] @@ -195,8 +195,8 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'DeepSpeech', u'DeepSpeech Documentation', - author, 'DeepSpeech', 'One line description of project.', + (master_doc, 'STT', u'Coqui STT Documentation', + author, 'STT', 'One line description of project.', 'Miscellaneous'), ] @@ -206,5 +206,5 @@ texinfo_documents = [ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} -extlinks = {'github': ('https://github.com/mozilla/DeepSpeech/blob/v{}/%s'.format(release), +extlinks = {'github': ('https://github.com/coqui-ai/STT/blob/v{}/%s'.format(release), '%s')} diff --git a/doc/doxygen-c.conf b/doc/doxygen-c.conf index f36f57b2..ec2ac239 100644 --- a/doc/doxygen-c.conf +++ b/doc/doxygen-c.conf @@ -790,7 +790,7 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = native_client/deepspeech.h +INPUT = native_client/coqui-stt.h # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/doc/index.rst b/doc/index.rst index 33285c67..a22b8f21 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,54 +1,54 @@ -.. DeepSpeech documentation master file, created by +.. Coqui STT documentation master file, created by sphinx-quickstart on Thu Feb 2 21:20:39 2017. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to DeepSpeech's documentation! -====================================== +Coqui STT +========= -DeepSpeech is an open source Speech-To-Text engine, using a model trained by machine learning techniques based on `Baidu's Deep Speech research paper `_. Project DeepSpeech uses Google's `TensorFlow `_ to make the implementation easier. +Coqui STT (🐸STT) is an open source Speech-To-Text engine, using a model trained by machine learning techniques based on `Baidu's Deep Speech research paper `_. 🐸STT uses Google's `TensorFlow `_ to make the implementation easier. -To install and use DeepSpeech all you have to do is: +To install and use 🐸STT all you have to do is: .. code-block:: bash # Create and activate a virtualenv - virtualenv -p python3 $HOME/tmp/deepspeech-venv/ - source $HOME/tmp/deepspeech-venv/bin/activate + virtualenv -p python3 $HOME/tmp/stt/ + source $HOME/tmp/stt/bin/activate - # Install DeepSpeech - pip3 install deepspeech + # Install 🐸STT + pip3 install stt # Download pre-trained English model files - curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm - curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer + curl -LO https://github.com/coqui-ai/STT/releases/download/v0.9.3/coqui-stt-0.9.3-models.pbmm + curl -LO https://github.com/coqui-ai/STT/releases/download/v0.9.3/coqui-stt-0.9.3-models.scorer # Download example audio files - curl -LO https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/audio-0.9.3.tar.gz + curl -LO https://github.com/coqui-ai/STT/releases/download/v0.9.3/audio-0.9.3.tar.gz tar xvf audio-0.9.3.tar.gz # Transcribe an audio file - deepspeech --model deepspeech-0.9.3-models.pbmm --scorer deepspeech-0.9.3-models.scorer --audio audio/2830-3980-0043.wav + stt --model coqui-stt-0.9.3-models.pbmm --scorer coqui-stt-0.9.3-models.scorer --audio audio/2830-3980-0043.wav -A pre-trained English model is available for use and can be downloaded following the instructions in :ref:`the usage docs `. For the latest release, including pre-trained models and checkpoints, `see the GitHub releases page `_. +A pre-trained English model is available for use and can be downloaded following the instructions in :ref:`the usage docs `. For the latest release, including pre-trained models and checkpoints, `see the GitHub releases page `_. -Quicker inference can be performed using a supported NVIDIA GPU on Linux. See the `release notes `_ to find which GPUs are supported. To run ``deepspeech`` on a GPU, install the GPU specific package: +Quicker inference can be performed using a supported NVIDIA GPU on Linux. See the `release notes `_ to find which GPUs are supported. To run ``stt`` on a GPU, install the GPU specific package: .. code-block:: bash # Create and activate a virtualenv - virtualenv -p python3 $HOME/tmp/deepspeech-gpu-venv/ - source $HOME/tmp/deepspeech-gpu-venv/bin/activate + virtualenv -p python3 $HOME/tmp/coqui-stt-gpu-venv/ + source $HOME/tmp/coqui-stt-gpu-venv/bin/activate - # Install DeepSpeech CUDA enabled package - pip3 install deepspeech-gpu + # Install 🐸STT CUDA enabled package + pip3 install stt-gpu # Transcribe an audio file. - deepspeech --model deepspeech-0.9.3-models.pbmm --scorer deepspeech-0.9.3-models.scorer --audio audio/2830-3980-0043.wav + stt --model coqui-stt-0.9.3-models.pbmm --scorer coqui-stt-0.9.3-models.scorer --audio audio/2830-3980-0043.wav Please ensure you have the required :ref:`CUDA dependencies `. -See the output of ``deepspeech -h`` for more information on the use of ``deepspeech``. (If you experience problems running ``deepspeech``, please check :ref:`required runtime dependencies `). +See the output of ``stt -h`` for more information on the use of ``stt``. (If you experience problems running ``stt``, please check :ref:`required runtime dependencies `). .. toctree:: :maxdepth: 2 @@ -78,7 +78,7 @@ See the output of ``deepspeech -h`` for more information on the use of ``deepspe :maxdepth: 2 :caption: Architecture and training - DeepSpeech + Architecture Geometry diff --git a/doc/make.bat b/doc/make.bat index cfcbc831..277fcf31 100644 --- a/doc/make.bat +++ b/doc/make.bat @@ -9,7 +9,7 @@ if "%SPHINXBUILD%" == "" ( ) set SOURCEDIR=. set BUILDDIR=.build -set SPHINXPROJ=DeepSpeech +set SPHINXPROJ="Coqui STT" if "%1" == "" goto help diff --git a/evaluate.py b/evaluate.py index dc502542..eca856b2 100644 --- a/evaluate.py +++ b/evaluate.py @@ -4,7 +4,7 @@ from __future__ import absolute_import, division, print_function if __name__ == '__main__': try: - from deepspeech_training import evaluate as ds_evaluate + from coqui_stt_training import evaluate as ds_evaluate except ImportError: print('Training package is not installed. See training documentation.') raise diff --git a/evaluate_tflite.py b/evaluate_tflite.py index 0d462615..e9b44725 100644 --- a/evaluate_tflite.py +++ b/evaluate_tflite.py @@ -11,8 +11,8 @@ import os import sys from deepspeech import Model -from deepspeech_training.util.evaluate_tools import calculate_and_print_report -from deepspeech_training.util.flags import create_flags +from coqui_stt_training.util.evaluate_tools import calculate_and_print_report +from coqui_stt_training.util.flags import create_flags from functools import partial from multiprocessing import JoinableQueue, Process, cpu_count, Manager from six.moves import zip, range diff --git a/examples/README.rst b/examples/README.rst index f5ebb1bd..2d71bc17 100644 --- a/examples/README.rst +++ b/examples/README.rst @@ -1,6 +1,6 @@ Examples ======== -DeepSpeech examples were moved to a separate repository. +🐸STT examples were moved to a separate repository. -New location: https://github.com/mozilla/DeepSpeech-examples +New location: https://github.com/coqui-ai/STT-examples diff --git a/lm_optimizer.py b/lm_optimizer.py index 25d8a05e..74a02dc7 100644 --- a/lm_optimizer.py +++ b/lm_optimizer.py @@ -7,12 +7,12 @@ import optuna import sys import tensorflow.compat.v1 as tfv1 -from deepspeech_training.evaluate import evaluate -from deepspeech_training.train import create_model -from deepspeech_training.util.config import Config, initialize_globals -from deepspeech_training.util.flags import create_flags, FLAGS -from deepspeech_training.util.logging import log_error -from deepspeech_training.util.evaluate_tools import wer_cer_batch +from coqui_stt_training.evaluate import evaluate +from coqui_stt_training.train import create_model +from coqui_stt_training.util.config import Config, initialize_globals +from coqui_stt_training.util.flags import create_flags, FLAGS +from coqui_stt_training.util.logging import log_error +from coqui_stt_training.util.evaluate_tools import wer_cer_batch from ds_ctcdecoder import Scorer diff --git a/native_client/BUILD b/native_client/BUILD index d25454a1..ead08f6a 100644 --- a/native_client/BUILD +++ b/native_client/BUILD @@ -1,4 +1,4 @@ -# Description: Deepspeech native client library. +# Description: Coqui STT native client library. load("@org_tensorflow//tensorflow:tensorflow.bzl", "tf_cc_shared_object", "tf_copts", "lrt_if_needed") load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda") @@ -112,10 +112,10 @@ cc_library( ) cc_library( - name = "deepspeech_bundle", + name = "coqui_stt_bundle", srcs = [ "deepspeech.cc", - "deepspeech.h", + "coqui-stt.h", "deepspeech_errors.cc", "modelstate.cc", "modelstate.h", @@ -165,7 +165,7 @@ cc_library( #"//tensorflow/core:all_kernels", ### => Trying to be more fine-grained ### Use bin/ops_in_graph.py to list all the ops used by a frozen graph. - ### CPU only build, libdeepspeech.so file size reduced by ~50% + ### CPU only build, libstt.so file size reduced by ~50% "//tensorflow/core/kernels:spectrogram_op", # AudioSpectrogram "//tensorflow/core/kernels:bias_op", # BiasAdd "//tensorflow/core/kernels:cast_op", # Cast @@ -205,24 +205,24 @@ cc_library( ) tf_cc_shared_object( - name = "libdeepspeech.so", - deps = [":deepspeech_bundle"], + name = "libstt.so", + deps = [":coqui_stt_bundle"], ) ios_static_framework( - name = "deepspeech_ios", - deps = [":deepspeech_bundle"], + name = "coqui_stt_ios", + deps = [":coqui_stt_bundle"], families = ["iphone", "ipad"], minimum_os_version = "9.0", linkopts = ["-lstdc++"], ) genrule( - name = "libdeepspeech_so_dsym", - srcs = [":libdeepspeech.so"], - outs = ["libdeepspeech.so.dSYM"], + name = "libstt_so_dsym", + srcs = [":libstt.so"], + outs = ["libstt.so.dSYM"], output_to_bindir = True, - cmd = "dsymutil $(location :libdeepspeech.so) -o $@" + cmd = "dsymutil $(location :libstt.so) -o $@" ) cc_binary( diff --git a/native_client/CODINGSTYLE.md b/native_client/CODINGSTYLE.md index ddb8fc82..127b959d 100644 --- a/native_client/CODINGSTYLE.md +++ b/native_client/CODINGSTYLE.md @@ -1,5 +1,5 @@ This file contains some notes on coding style within the C++ portion of the -DeepSpeech project. It is very much a work in progress and incomplete. +🐸STT project. It is very much a work in progress and incomplete. General ======= diff --git a/native_client/Makefile b/native_client/Makefile index b645499c..15e1092f 100644 --- a/native_client/Makefile +++ b/native_client/Makefile @@ -13,35 +13,35 @@ include definitions.mk -default: $(DEEPSPEECH_BIN) +default: $(STT_BIN) clean: rm -f deepspeech -$(DEEPSPEECH_BIN): client.cc Makefile +$(STT_BIN): client.cc Makefile $(CXX) $(CFLAGS) $(CFLAGS_DEEPSPEECH) $(SOX_CFLAGS) client.cc $(LDFLAGS) $(SOX_LDFLAGS) ifeq ($(OS),Darwin) - install_name_tool -change bazel-out/local-opt/bin/native_client/libdeepspeech.so @rpath/libdeepspeech.so deepspeech + install_name_tool -change bazel-out/local-opt/bin/native_client/libstt.so @rpath/libstt.so stt endif -run: $(DEEPSPEECH_BIN) - ${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} ./deepspeech ${ARGS} +run: $(STT_BIN) + ${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} ./stt ${ARGS} -debug: $(DEEPSPEECH_BIN) - ${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} gdb --args ./deepspeech ${ARGS} +debug: $(STT_BIN) + ${META_LD_LIBRARY_PATH}=${TFDIR}/bazel-bin/native_client:${${META_LD_LIBRARY_PATH}} gdb --args ./stt ${ARGS} -install: $(DEEPSPEECH_BIN) +install: $(STT_BIN) install -d ${PREFIX}/lib install -m 0644 ${TFDIR}/bazel-bin/native_client/libdeepspeech.so ${PREFIX}/lib/ install -d ${PREFIX}/include - install -m 0644 deepspeech.h ${PREFIX}/include + install -m 0644 coqui-stt.h ${PREFIX}/include install -d ${PREFIX}/bin - install -m 0755 deepspeech ${PREFIX}/bin/ + install -m 0755 stt ${PREFIX}/bin/ uninstall: - rm -f ${PREFIX}/bin/deepspeech + rm -f ${PREFIX}/bin/stt rmdir --ignore-fail-on-non-empty ${PREFIX}/bin - rm -f ${PREFIX}/lib/libdeepspeech.so + rm -f ${PREFIX}/lib/libstt.so rmdir --ignore-fail-on-non-empty ${PREFIX}/lib print-toolchain: diff --git a/native_client/args.h b/native_client/args.h index 069347e0..04c5eb88 100644 --- a/native_client/args.h +++ b/native_client/args.h @@ -8,7 +8,7 @@ #endif #include -#include "deepspeech.h" +#include "coqui-stt.h" char* model = NULL; @@ -47,7 +47,7 @@ void PrintHelp(const char* bin) std::cout << "Usage: " << bin << " --model MODEL [--scorer SCORER] --audio AUDIO [-t] [-e]\n" "\n" - "Running DeepSpeech inference.\n" + "Running Coqui STT inference.\n" "\n" "\t--model MODEL\t\t\tPath to the model (protocol buffer binary file)\n" "\t--scorer SCORER\t\t\tPath to the external scorer file\n" @@ -65,7 +65,7 @@ void PrintHelp(const char* bin) "\t--help\t\t\t\tShow help\n" "\t--version\t\t\tPrint version and exits\n"; char* version = DS_Version(); - std::cerr << "DeepSpeech " << version << "\n"; + std::cerr << "Coqui STT " << version << "\n"; DS_FreeString(version); exit(1); } @@ -170,7 +170,7 @@ bool ProcessArgs(int argc, char** argv) if (has_versions) { char* version = DS_Version(); - std::cout << "DeepSpeech " << version << "\n"; + std::cout << "Coqui " << version << "\n"; DS_FreeString(version); return false; } diff --git a/native_client/bazel_workspace_status_cmd.sh b/native_client/bazel_workspace_status_cmd.sh index a1a5a2a0..1af17e26 100755 --- a/native_client/bazel_workspace_status_cmd.sh +++ b/native_client/bazel_workspace_status_cmd.sh @@ -22,8 +22,8 @@ echo "STABLE_TF_GIT_VERSION ${tf_git_rev}" pushd $(dirname "$0") ds_git_rev=$(git describe --long --tags) echo "STABLE_DS_GIT_VERSION ${ds_git_rev}" -ds_version=$(cat ../training/deepspeech_training/VERSION) +ds_version=$(cat ../training/coqui_stt_training/VERSION) echo "STABLE_DS_VERSION ${ds_version}" -ds_graph_version=$(cat ../training/deepspeech_training/GRAPH_VERSION) +ds_graph_version=$(cat ../training/coqui_stt_training/GRAPH_VERSION) echo "STABLE_DS_GRAPH_VERSION ${ds_graph_version}" popd diff --git a/native_client/client.cc b/native_client/client.cc index 7d88b4d6..70c199e7 100644 --- a/native_client/client.cc +++ b/native_client/client.cc @@ -34,7 +34,7 @@ #endif // NO_DIR #include -#include "deepspeech.h" +#include "coqui-stt.h" #include "args.h" typedef struct { @@ -406,7 +406,7 @@ ProcessFile(ModelState* context, const char* path, bool show_times) { ds_audio_buffer audio = GetAudioBuffer(path, DS_GetModelSampleRate(context)); - // Pass audio to DeepSpeech + // Pass audio to STT // We take half of buffer_size because buffer is a char* while // LocalDsSTT() expected a short* ds_result result = LocalDsSTT(context, @@ -450,7 +450,7 @@ main(int argc, char **argv) return 1; } - // Initialise DeepSpeech + // Initialise STT ModelState* ctx; // sphinx-doc: c_ref_model_start int status = DS_CreateModel(model, &ctx); diff --git a/native_client/deepspeech.h b/native_client/coqui-stt.h similarity index 93% rename from native_client/deepspeech.h rename to native_client/coqui-stt.h index fbec4721..24c7ef66 100644 --- a/native_client/deepspeech.h +++ b/native_client/coqui-stt.h @@ -1,5 +1,5 @@ -#ifndef DEEPSPEECH_H -#define DEEPSPEECH_H +#ifndef COQUI_STT_H +#define COQUI_STT_H #ifdef __cplusplus extern "C" { @@ -7,12 +7,12 @@ extern "C" { #ifndef SWIG #if defined _MSC_VER - #define DEEPSPEECH_EXPORT __declspec(dllexport) + #define STT_EXPORT __declspec(dllexport) #else - #define DEEPSPEECH_EXPORT __attribute__ ((visibility("default"))) + #define STT_EXPORT __attribute__ ((visibility("default"))) #endif /*End of _MSC_VER*/ #else - #define DEEPSPEECH_EXPORT + #define STT_EXPORT #endif typedef struct ModelState ModelState; @@ -96,14 +96,14 @@ DS_FOR_EACH_ERROR(DEFINE) }; /** - * @brief An object providing an interface to a trained DeepSpeech model. + * @brief An object providing an interface to a trained Coqui STT model. * * @param aModelPath The path to the frozen model graph. * @param[out] retval a ModelState pointer * * @return Zero on success, non-zero on failure. */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_CreateModel(const char* aModelPath, ModelState** retval); @@ -116,7 +116,7 @@ int DS_CreateModel(const char* aModelPath, * * @return Beam width value used by the model. */ -DEEPSPEECH_EXPORT +STT_EXPORT unsigned int DS_GetModelBeamWidth(const ModelState* aCtx); /** @@ -128,7 +128,7 @@ unsigned int DS_GetModelBeamWidth(const ModelState* aCtx); * * @return Zero on success, non-zero on failure. */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_SetModelBeamWidth(ModelState* aCtx, unsigned int aBeamWidth); @@ -139,13 +139,13 @@ int DS_SetModelBeamWidth(ModelState* aCtx, * * @return Sample rate expected by the model for its input. */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_GetModelSampleRate(const ModelState* aCtx); /** * @brief Frees associated resources and destroys model object. */ -DEEPSPEECH_EXPORT +STT_EXPORT void DS_FreeModel(ModelState* ctx); /** @@ -156,7 +156,7 @@ void DS_FreeModel(ModelState* ctx); * * @return Zero on success, non-zero on failure (invalid arguments). */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_EnableExternalScorer(ModelState* aCtx, const char* aScorerPath); @@ -171,7 +171,7 @@ int DS_EnableExternalScorer(ModelState* aCtx, * * @return Zero on success, non-zero on failure (invalid arguments). */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_AddHotWord(ModelState* aCtx, const char* word, float boost); @@ -184,7 +184,7 @@ int DS_AddHotWord(ModelState* aCtx, * * @return Zero on success, non-zero on failure (invalid arguments). */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_EraseHotWord(ModelState* aCtx, const char* word); @@ -195,7 +195,7 @@ int DS_EraseHotWord(ModelState* aCtx, * * @return Zero on success, non-zero on failure (invalid arguments). */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_ClearHotWords(ModelState* aCtx); /** @@ -205,7 +205,7 @@ int DS_ClearHotWords(ModelState* aCtx); * * @return Zero on success, non-zero on failure. */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_DisableExternalScorer(ModelState* aCtx); /** @@ -217,13 +217,13 @@ int DS_DisableExternalScorer(ModelState* aCtx); * * @return Zero on success, non-zero on failure. */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_SetScorerAlphaBeta(ModelState* aCtx, float aAlpha, float aBeta); /** - * @brief Use the DeepSpeech model to convert speech to text. + * @brief Use the Coqui STT model to convert speech to text. * * @param aCtx The ModelState pointer for the model to use. * @param aBuffer A 16-bit, mono raw audio signal at the appropriate @@ -233,13 +233,13 @@ int DS_SetScorerAlphaBeta(ModelState* aCtx, * @return The STT result. The user is responsible for freeing the string using * {@link DS_FreeString()}. Returns NULL on error. */ -DEEPSPEECH_EXPORT +STT_EXPORT char* DS_SpeechToText(ModelState* aCtx, const short* aBuffer, unsigned int aBufferSize); /** - * @brief Use the DeepSpeech model to convert speech to text and output results + * @brief Use the Coqui STT model to convert speech to text and output results * including metadata. * * @param aCtx The ModelState pointer for the model to use. @@ -253,7 +253,7 @@ char* DS_SpeechToText(ModelState* aCtx, * user is responsible for freeing Metadata by calling {@link DS_FreeMetadata()}. * Returns NULL on error. */ -DEEPSPEECH_EXPORT +STT_EXPORT Metadata* DS_SpeechToTextWithMetadata(ModelState* aCtx, const short* aBuffer, unsigned int aBufferSize, @@ -270,7 +270,7 @@ Metadata* DS_SpeechToTextWithMetadata(ModelState* aCtx, * * @return Zero for success, non-zero on failure. */ -DEEPSPEECH_EXPORT +STT_EXPORT int DS_CreateStream(ModelState* aCtx, StreamingState** retval); @@ -282,7 +282,7 @@ int DS_CreateStream(ModelState* aCtx, * appropriate sample rate (matching what the model was trained on). * @param aBufferSize The number of samples in @p aBuffer. */ -DEEPSPEECH_EXPORT +STT_EXPORT void DS_FeedAudioContent(StreamingState* aSctx, const short* aBuffer, unsigned int aBufferSize); @@ -295,7 +295,7 @@ void DS_FeedAudioContent(StreamingState* aSctx, * @return The STT intermediate result. The user is responsible for freeing the * string using {@link DS_FreeString()}. */ -DEEPSPEECH_EXPORT +STT_EXPORT char* DS_IntermediateDecode(const StreamingState* aSctx); /** @@ -310,7 +310,7 @@ char* DS_IntermediateDecode(const StreamingState* aSctx); * responsible for freeing Metadata by calling {@link DS_FreeMetadata()}. * Returns NULL on error. */ -DEEPSPEECH_EXPORT +STT_EXPORT Metadata* DS_IntermediateDecodeWithMetadata(const StreamingState* aSctx, unsigned int aNumResults); @@ -325,7 +325,7 @@ Metadata* DS_IntermediateDecodeWithMetadata(const StreamingState* aSctx, * * @note This method will free the state pointer (@p aSctx). */ -DEEPSPEECH_EXPORT +STT_EXPORT char* DS_FinishStream(StreamingState* aSctx); /** @@ -343,7 +343,7 @@ char* DS_FinishStream(StreamingState* aSctx); * * @note This method will free the state pointer (@p aSctx). */ -DEEPSPEECH_EXPORT +STT_EXPORT Metadata* DS_FinishStreamWithMetadata(StreamingState* aSctx, unsigned int aNumResults); @@ -356,19 +356,19 @@ Metadata* DS_FinishStreamWithMetadata(StreamingState* aSctx, * * @note This method will free the state pointer (@p aSctx). */ -DEEPSPEECH_EXPORT +STT_EXPORT void DS_FreeStream(StreamingState* aSctx); /** * @brief Free memory allocated for metadata information. */ -DEEPSPEECH_EXPORT +STT_EXPORT void DS_FreeMetadata(Metadata* m); /** - * @brief Free a char* string returned by the DeepSpeech API. + * @brief Free a char* string returned by the Coqui STT API. */ -DEEPSPEECH_EXPORT +STT_EXPORT void DS_FreeString(char* str); /** @@ -377,7 +377,7 @@ void DS_FreeString(char* str); * * @return The version string. */ -DEEPSPEECH_EXPORT +STT_EXPORT char* DS_Version(); /** @@ -386,13 +386,13 @@ char* DS_Version(); * * @return The error description. */ -DEEPSPEECH_EXPORT +STT_EXPORT char* DS_ErrorCodeToErrorMessage(int aErrorCode); -#undef DEEPSPEECH_EXPORT +#undef STT_EXPORT #ifdef __cplusplus } #endif -#endif /* DEEPSPEECH_H */ +#endif /* COQUI_STT_H */ diff --git a/native_client/ctcdecode/scorer.cpp b/native_client/ctcdecode/scorer.cpp index 5f25a335..b77c63f7 100644 --- a/native_client/ctcdecode/scorer.cpp +++ b/native_client/ctcdecode/scorer.cpp @@ -125,7 +125,7 @@ int Scorer::load_trie(std::ifstream& fin, const std::string& file_path) if (version < FILE_VERSION) { std::cerr << "Update your scorer file."; } else { - std::cerr << "Downgrade your scorer file or update your version of DeepSpeech."; + std::cerr << "Downgrade your scorer file or update your version of Coqui STT."; } std::cerr << std::endl; return DS_ERR_SCORER_VERSION_MISMATCH; diff --git a/native_client/ctcdecode/scorer.h b/native_client/ctcdecode/scorer.h index 5aee1046..67ea96d3 100644 --- a/native_client/ctcdecode/scorer.h +++ b/native_client/ctcdecode/scorer.h @@ -13,7 +13,7 @@ #include "path_trie.h" #include "alphabet.h" -#include "deepspeech.h" +#include "coqui-stt.h" const double OOV_SCORE = -1000.0; const std::string START_TOKEN = ""; diff --git a/native_client/ctcdecode/setup.py b/native_client/ctcdecode/setup.py index 82e702a8..e18578af 100644 --- a/native_client/ctcdecode/setup.py +++ b/native_client/ctcdecode/setup.py @@ -51,7 +51,7 @@ def maybe_rebuild(srcs, out_name, build_dir): num_parallel=known_args.num_processes, debug=debug) -project_version = read('../../training/deepspeech_training/VERSION').strip() +project_version = read('../../training/coqui_stt_training/VERSION').strip() build_dir = 'temp_build/temp_build' diff --git a/native_client/ctcdecode/swigwrapper.i b/native_client/ctcdecode/swigwrapper.i index 683a3426..facc83eb 100644 --- a/native_client/ctcdecode/swigwrapper.i +++ b/native_client/ctcdecode/swigwrapper.i @@ -44,14 +44,14 @@ namespace std { %constant const char* __version__ = ds_version(); %constant const char* __git_version__ = ds_git_version(); -// Import only the error code enum definitions from deepspeech.h +// Import only the error code enum definitions from coqui-stt.h // We can't just do |%ignore "";| here because it affects this file globally (even // files %include'd above). That causes SWIG to lose destructor information and // leads to leaks of the wrapper objects. // Instead we ignore functions and classes (structs), which are the only other -// things in deepspeech.h. If we add some new construct to deepspeech.h we need +// things in coqui-stt.h. If we add some new construct to coqui-stt.h we need // to update the ignore rules here to avoid exposing unwanted APIs in the decoder // package. %rename("$ignore", %$isfunction) ""; %rename("$ignore", %$isclass) ""; -%include "../deepspeech.h" +%include "../coqui-stt.h" diff --git a/native_client/deepspeech.cc b/native_client/deepspeech.cc index 57f77ba1..945f870f 100644 --- a/native_client/deepspeech.cc +++ b/native_client/deepspeech.cc @@ -9,7 +9,7 @@ #include #include -#include "deepspeech.h" +#include "coqui-stt.h" #include "alphabet.h" #include "modelstate.h" @@ -25,7 +25,7 @@ #ifdef __ANDROID__ #include -#define LOG_TAG "libdeepspeech" +#define LOG_TAG "libstt" #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__) #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__) #else @@ -269,12 +269,12 @@ DS_CreateModel(const char* aModelPath, *retval = nullptr; std::cerr << "TensorFlow: " << tf_local_git_version() << std::endl; - std::cerr << "DeepSpeech: " << ds_git_version() << std::endl; + std::cerr << " Coqui STT: " << ds_git_version() << std::endl; #ifdef __ANDROID__ LOGE("TensorFlow: %s", tf_local_git_version()); LOGD("TensorFlow: %s", tf_local_git_version()); - LOGE("DeepSpeech: %s", ds_git_version()); - LOGD("DeepSpeech: %s", ds_git_version()); + LOGE(" Coqui STT: %s", ds_git_version()); + LOGD(" Coqui STT: %s", ds_git_version()); #endif if (!aModelPath || strlen(aModelPath) < 1) { diff --git a/native_client/deepspeech_errors.cc b/native_client/deepspeech_errors.cc index 1f1e4d8d..912b087d 100644 --- a/native_client/deepspeech_errors.cc +++ b/native_client/deepspeech_errors.cc @@ -1,4 +1,4 @@ -#include "deepspeech.h" +#include "coqui-stt.h" #include char* diff --git a/native_client/definitions.mk b/native_client/definitions.mk index 737ec8f9..c8fee508 100644 --- a/native_client/definitions.mk +++ b/native_client/definitions.mk @@ -18,8 +18,8 @@ ifeq ($(findstring _NT,$(OS)),_NT) PLATFORM_EXE_SUFFIX := .exe endif -DEEPSPEECH_BIN := deepspeech$(PLATFORM_EXE_SUFFIX) -CFLAGS_DEEPSPEECH := -std=c++11 -o $(DEEPSPEECH_BIN) +STT_BIN := stt$(PLATFORM_EXE_SUFFIX) +CFLAGS_DEEPSPEECH := -std=c++11 -o $(STT_BIN) LINK_DEEPSPEECH := -ldeepspeech LINK_PATH_DEEPSPEECH := -L${TFDIR}/bazel-bin/native_client @@ -63,7 +63,7 @@ TOOL_LD := link.exe TOOL_LIBEXE := lib.exe LINK_DEEPSPEECH := $(TFDIR)\bazel-bin\native_client\libdeepspeech.so.if.lib LINK_PATH_DEEPSPEECH := -CFLAGS_DEEPSPEECH := -nologo -Fe$(DEEPSPEECH_BIN) +CFLAGS_DEEPSPEECH := -nologo -Fe$(STT_BIN) SOX_CFLAGS := SOX_LDFLAGS := PYTHON_PACKAGES := numpy${NUMPY_BUILD_VERSION} diff --git a/native_client/generate_scorer_package.cpp b/native_client/generate_scorer_package.cpp index 0af0bfd9..f4e7c07b 100644 --- a/native_client/generate_scorer_package.cpp +++ b/native_client/generate_scorer_package.cpp @@ -11,7 +11,7 @@ using namespace std; #include "ctcdecode/decoder_utils.h" #include "ctcdecode/scorer.h" #include "alphabet.h" -#include "deepspeech.h" +#include "coqui-stt.h" namespace po = boost::program_options; diff --git a/native_client/java/README.md b/native_client/java/README.md index 89ebc594..f21b61c4 100644 --- a/native_client/java/README.md +++ b/native_client/java/README.md @@ -1 +1 @@ -Full project description and documentation on GitHub: [https://github.com/mozilla/DeepSpeech](https://github.com/mozilla/DeepSpeech). +Full project description and documentation on [https://stt.readthedocs.io/](https://stt.readthedocs.io/). diff --git a/native_client/java/jni/deepspeech.i b/native_client/java/jni/deepspeech.i index cd5a97a5..99a66d71 100644 --- a/native_client/java/jni/deepspeech.i +++ b/native_client/java/jni/deepspeech.i @@ -2,7 +2,7 @@ %{ #define SWIG_FILE_WITH_INIT -#include "../../deepspeech.h" +#include "../../coqui-stt.h" %} %include "typemaps.i" @@ -71,4 +71,4 @@ %ignore "Metadata::transcripts"; %ignore "CandidateTranscript::tokens"; -%include "../deepspeech.h" +%include "../coqui-stt.h" diff --git a/native_client/javascript/Makefile b/native_client/javascript/Makefile index 05eaff55..c2670dea 100644 --- a/native_client/javascript/Makefile +++ b/native_client/javascript/Makefile @@ -2,8 +2,8 @@ NODE_BUILD_TOOL ?= node-pre-gyp NODE_ABI_TARGET ?= NODE_BUILD_VERBOSE ?= --verbose NPM_TOOL ?= npm -PROJECT_NAME ?= deepspeech -PROJECT_VERSION ?= $(shell cat ../../training/deepspeech_training/VERSION | tr -d '\n') +PROJECT_NAME ?= stt +PROJECT_VERSION ?= $(shell cat ../../training/coqui_stt_training/VERSION | tr -d '\n') NPM_ROOT ?= $(shell npm root) NODE_PRE_GYP_ABI_CROSSWALK_FILE ?= $(NPM_ROOT)/../abi_crosswalk_priv.json diff --git a/native_client/javascript/README.md b/native_client/javascript/README.md index b77e0318..f703b42a 100644 --- a/native_client/javascript/README.md +++ b/native_client/javascript/README.md @@ -1 +1,3 @@ -Full project description and documentation on [https://deepspeech.readthedocs.io/](https://deepspeech.readthedocs.io/). +Full project description and documentation on [https://stt.readthedocs.io/](https://stt.readthedocs.io/). + +Special thanks to [Huan - Google Developers Experts in Machine Learning (ML GDE)](https://github.com/huan) for providing the STT project name on npmjs.org diff --git a/native_client/javascript/client.ts b/native_client/javascript/client.ts index 3e5f1305..c396f4bf 100644 --- a/native_client/javascript/client.ts +++ b/native_client/javascript/client.ts @@ -14,7 +14,7 @@ const Duplex = require("stream").Duplex; class VersionAction extends argparse.Action { call(parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: string | string[], optionString: string | null) { - console.log('DeepSpeech ' + Ds.Version()); + console.log('Coqui STT ' + Ds.Version()); let runtime = 'Node'; if (process.versions.electron) { runtime = 'Electron'; @@ -24,7 +24,7 @@ class VersionAction extends argparse.Action { } } -let parser = new argparse.ArgumentParser({addHelp: true, description: 'Running DeepSpeech inference.'}); +let parser = new argparse.ArgumentParser({addHelp: true, description: 'Running Coqui STT inference.'}); parser.addArgument(['--model'], {required: true, help: 'Path to the model (protocol buffer binary file)'}); parser.addArgument(['--scorer'], {help: 'Path to the external scorer file'}); parser.addArgument(['--audio'], {required: true, help: 'Path to the audio file to run (WAV format)'}); diff --git a/native_client/javascript/deepspeech.i b/native_client/javascript/deepspeech.i index e311a41b..26178cf6 100644 --- a/native_client/javascript/deepspeech.i +++ b/native_client/javascript/deepspeech.i @@ -5,7 +5,7 @@ #define SWIG_FILE_WITH_INIT #include #include -#include "deepspeech.h" +#include "coqui-stt.h" using namespace v8; using namespace node; @@ -95,4 +95,4 @@ using namespace node; %rename ("%(strip:[DS_])s") ""; -%include "../deepspeech.h" +%include "../coqui-stt.h" diff --git a/native_client/javascript/index.ts b/native_client/javascript/index.ts index 44edec47..9ad915fb 100644 --- a/native_client/javascript/index.ts +++ b/native_client/javascript/index.ts @@ -1,7 +1,7 @@ import binary from 'node-pre-gyp'; import path from 'path'; -// 'lib', 'binding', 'v0.1.1', ['node', 'v' + process.versions.modules, process.platform, process.arch].join('-'), 'deepspeech-bindings.node') +// 'lib', 'binding', 'v0.1.1', ['node', 'v' + process.versions.modules, process.platform, process.arch].join('-'), 'stt-bindings.node') const binding_path = binary.find(path.resolve(path.join(__dirname, 'package.json'))); // On Windows, we can't rely on RPATH being set to $ORIGIN/../ or on @@ -62,7 +62,7 @@ export interface Metadata { } /** - * Provides an interface to a DeepSpeech stream. The constructor cannot be called + * Provides an interface to a Coqui STT stream. The constructor cannot be called * directly, use :js:func:`Model.createStream`. */ class StreamImpl { @@ -142,7 +142,7 @@ class StreamImpl { export type Stream = StreamImpl; /** - * An object providing an interface to a trained DeepSpeech model. + * An object providing an interface to a trained Coqui STT model. */ export class Model { /** @internal */ @@ -282,7 +282,7 @@ export class Model { } /** - * Use the DeepSpeech model to perform Speech-To-Text. + * Use the Coqui STT model to perform Speech-To-Text. * * @param aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). * @@ -293,7 +293,7 @@ export class Model { } /** - * Use the DeepSpeech model to perform Speech-To-Text and output metadata + * Use the Coqui STT model to perform Speech-To-Text and output metadata * about the results. * * @param aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). diff --git a/native_client/javascript/package.json.in b/native_client/javascript/package.json.in index 2494577f..f6011925 100644 --- a/native_client/javascript/package.json.in +++ b/native_client/javascript/package.json.in @@ -1,15 +1,15 @@ { "name" : "$(PROJECT_NAME)", "version" : "$(PROJECT_VERSION)", - "description" : "DeepSpeech NodeJS bindings", + "description" : "Coqui STT NodeJS bindings", "main" : "./index.js", "types": "./index.d.ts", "bin": { - "deepspeech": "./client.js" + "stt": "./client.js" }, - "author" : "DeepSpeech authors", + "author" : "Coqui GmbH", "license": "MPL-2.0", - "homepage": "https://github.com/mozilla/DeepSpeech/tree/v$(PROJECT_VERSION)#project-deepspeech", + "homepage": "https://github.com/coqui-ai/STT", "files": [ "README.md", "client.js", @@ -18,18 +18,18 @@ "lib/*" ], "bugs": { - "url": "https://github.com/mozilla/DeepSpeech/issues" + "url": "https://github.com/coqui-ai/STT/issues" }, "repository" : { "type" : "git", - "url" : "git://github.com/mozilla/DeepSpeech.git" + "url" : "git://github.com/coqui-ai/STT.git" }, "binary": { - "module_name" : "deepspeech", + "module_name" : "stt", "module_path" : "./lib/binding/v{version}/{platform}-{arch}/{node_abi}/", "remote_path" : "./v{version}/{configuration}/", "package_name": "{module_name}-v{version}-{node_abi}-{platform}-{arch}.tar.gz", - "host" : "https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.v1.0.0-warpctc.arm/artifacts/public/" + "host" : "https://host.invalid" }, "dependencies" : { "node-pre-gyp": "0.15.x", diff --git a/native_client/kenlm/README.mozilla b/native_client/kenlm/README.coqui similarity index 100% rename from native_client/kenlm/README.mozilla rename to native_client/kenlm/README.coqui diff --git a/native_client/modelstate.h b/native_client/modelstate.h index 4beb78b4..08034759 100644 --- a/native_client/modelstate.h +++ b/native_client/modelstate.h @@ -3,7 +3,7 @@ #include -#include "deepspeech.h" +#include "coqui-stt.h" #include "alphabet.h" #include "ctcdecode/scorer.h" diff --git a/native_client/python/README.rst b/native_client/python/README.rst index 04d6bb29..a4861429 100644 --- a/native_client/python/README.rst +++ b/native_client/python/README.rst @@ -1 +1 @@ -Full project description and documentation on `https://deepspeech.readthedocs.io/ `_ +Full project description and documentation on `https://stt.readthedocs.io/ `_ diff --git a/native_client/python/__init__.py b/native_client/python/__init__.py index d2aea13b..49b5b3d3 100644 --- a/native_client/python/__init__.py +++ b/native_client/python/__init__.py @@ -17,14 +17,14 @@ if platform.system().lower() == "windows": # directory for the dynamic linker os.environ['PATH'] = dslib_path + ';' + os.environ['PATH'] -import deepspeech +import stt # rename for backwards compatibility -from deepspeech.impl import Version as version +from stt.impl import Version as version class Model(object): """ - Class holding a DeepSpeech model + Class holding a Coqui STT model :param aModelPath: Path to model file to load :type aModelPath: str @@ -33,14 +33,14 @@ class Model(object): # make sure the attribute is there if CreateModel fails self._impl = None - status, impl = deepspeech.impl.CreateModel(model_path) + status, impl = stt.impl.CreateModel(model_path) if status != 0: - raise RuntimeError("CreateModel failed with '{}' (0x{:X})".format(deepspeech.impl.ErrorCodeToErrorMessage(status),status)) + raise RuntimeError("CreateModel failed with '{}' (0x{:X})".format(stt.impl.ErrorCodeToErrorMessage(status),status)) self._impl = impl def __del__(self): if self._impl: - deepspeech.impl.FreeModel(self._impl) + stt.impl.FreeModel(self._impl) self._impl = None def beamWidth(self): @@ -51,7 +51,7 @@ class Model(object): :return: Beam width value used by the model. :type: int """ - return deepspeech.impl.GetModelBeamWidth(self._impl) + return stt.impl.GetModelBeamWidth(self._impl) def setBeamWidth(self, beam_width): """ @@ -63,7 +63,7 @@ class Model(object): :return: Zero on success, non-zero on failure. :type: int """ - return deepspeech.impl.SetModelBeamWidth(self._impl, beam_width) + return stt.impl.SetModelBeamWidth(self._impl, beam_width) def sampleRate(self): """ @@ -72,7 +72,7 @@ class Model(object): :return: Sample rate. :type: int """ - return deepspeech.impl.GetModelSampleRate(self._impl) + return stt.impl.GetModelSampleRate(self._impl) def enableExternalScorer(self, scorer_path): """ @@ -83,9 +83,9 @@ class Model(object): :throws: RuntimeError on error """ - status = deepspeech.impl.EnableExternalScorer(self._impl, scorer_path) + status = stt.impl.EnableExternalScorer(self._impl, scorer_path) if status != 0: - raise RuntimeError("EnableExternalScorer failed with '{}' (0x{:X})".format(deepspeech.impl.ErrorCodeToErrorMessage(status),status)) + raise RuntimeError("EnableExternalScorer failed with '{}' (0x{:X})".format(stt.impl.ErrorCodeToErrorMessage(status),status)) def disableExternalScorer(self): """ @@ -93,7 +93,7 @@ class Model(object): :return: Zero on success, non-zero on failure. """ - return deepspeech.impl.DisableExternalScorer(self._impl) + return stt.impl.DisableExternalScorer(self._impl) def addHotWord(self, word, boost): """ @@ -109,9 +109,9 @@ class Model(object): :throws: RuntimeError on error """ - status = deepspeech.impl.AddHotWord(self._impl, word, boost) + status = stt.impl.AddHotWord(self._impl, word, boost) if status != 0: - raise RuntimeError("AddHotWord failed with '{}' (0x{:X})".format(deepspeech.impl.ErrorCodeToErrorMessage(status),status)) + raise RuntimeError("AddHotWord failed with '{}' (0x{:X})".format(stt.impl.ErrorCodeToErrorMessage(status),status)) def eraseHotWord(self, word): """ @@ -122,9 +122,9 @@ class Model(object): :throws: RuntimeError on error """ - status = deepspeech.impl.EraseHotWord(self._impl, word) + status = stt.impl.EraseHotWord(self._impl, word) if status != 0: - raise RuntimeError("EraseHotWord failed with '{}' (0x{:X})".format(deepspeech.impl.ErrorCodeToErrorMessage(status),status)) + raise RuntimeError("EraseHotWord failed with '{}' (0x{:X})".format(stt.impl.ErrorCodeToErrorMessage(status),status)) def clearHotWords(self): """ @@ -132,9 +132,9 @@ class Model(object): :throws: RuntimeError on error """ - status = deepspeech.impl.ClearHotWords(self._impl) + status = stt.impl.ClearHotWords(self._impl) if status != 0: - raise RuntimeError("ClearHotWords failed with '{}' (0x{:X})".format(deepspeech.impl.ErrorCodeToErrorMessage(status),status)) + raise RuntimeError("ClearHotWords failed with '{}' (0x{:X})".format(stt.impl.ErrorCodeToErrorMessage(status),status)) def setScorerAlphaBeta(self, alpha, beta): """ @@ -149,11 +149,11 @@ class Model(object): :return: Zero on success, non-zero on failure. :type: int """ - return deepspeech.impl.SetScorerAlphaBeta(self._impl, alpha, beta) + return stt.impl.SetScorerAlphaBeta(self._impl, alpha, beta) def stt(self, audio_buffer): """ - Use the DeepSpeech model to perform Speech-To-Text. + Use the Coqui STT model to perform Speech-To-Text. :param audio_buffer: A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). :type audio_buffer: numpy.int16 array @@ -161,11 +161,11 @@ class Model(object): :return: The STT result. :type: str """ - return deepspeech.impl.SpeechToText(self._impl, audio_buffer) + return stt.impl.SpeechToText(self._impl, audio_buffer) def sttWithMetadata(self, audio_buffer, num_results=1): """ - Use the DeepSpeech model to perform Speech-To-Text and return results including metadata. + Use the Coqui STT model to perform Speech-To-Text and return results including metadata. :param audio_buffer: A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). :type audio_buffer: numpy.int16 array @@ -176,7 +176,7 @@ class Model(object): :return: Metadata object containing multiple candidate transcripts. Each transcript has per-token metadata including timing information. :type: :func:`Metadata` """ - return deepspeech.impl.SpeechToTextWithMetadata(self._impl, audio_buffer, num_results) + return stt.impl.SpeechToTextWithMetadata(self._impl, audio_buffer, num_results) def createStream(self): """ @@ -188,15 +188,15 @@ class Model(object): :throws: RuntimeError on error """ - status, ctx = deepspeech.impl.CreateStream(self._impl) + status, ctx = stt.impl.CreateStream(self._impl) if status != 0: - raise RuntimeError("CreateStream failed with '{}' (0x{:X})".format(deepspeech.impl.ErrorCodeToErrorMessage(status),status)) + raise RuntimeError("CreateStream failed with '{}' (0x{:X})".format(stt.impl.ErrorCodeToErrorMessage(status),status)) return Stream(ctx) class Stream(object): """ - Class wrapping a DeepSpeech stream. The constructor cannot be called directly. + Class wrapping a stt stream. The constructor cannot be called directly. Use :func:`Model.createStream()` """ def __init__(self, native_stream): @@ -217,7 +217,7 @@ class Stream(object): """ if not self._impl: raise RuntimeError("Stream object is not valid. Trying to feed an already finished stream?") - deepspeech.impl.FeedAudioContent(self._impl, audio_buffer) + stt.impl.FeedAudioContent(self._impl, audio_buffer) def intermediateDecode(self): """ @@ -230,7 +230,7 @@ class Stream(object): """ if not self._impl: raise RuntimeError("Stream object is not valid. Trying to decode an already finished stream?") - return deepspeech.impl.IntermediateDecode(self._impl) + return stt.impl.IntermediateDecode(self._impl) def intermediateDecodeWithMetadata(self, num_results=1): """ @@ -246,7 +246,7 @@ class Stream(object): """ if not self._impl: raise RuntimeError("Stream object is not valid. Trying to decode an already finished stream?") - return deepspeech.impl.IntermediateDecodeWithMetadata(self._impl, num_results) + return stt.impl.IntermediateDecodeWithMetadata(self._impl, num_results) def finishStream(self): """ @@ -261,7 +261,7 @@ class Stream(object): """ if not self._impl: raise RuntimeError("Stream object is not valid. Trying to finish an already finished stream?") - result = deepspeech.impl.FinishStream(self._impl) + result = stt.impl.FinishStream(self._impl) self._impl = None return result @@ -282,7 +282,7 @@ class Stream(object): """ if not self._impl: raise RuntimeError("Stream object is not valid. Trying to finish an already finished stream?") - result = deepspeech.impl.FinishStreamWithMetadata(self._impl, num_results) + result = stt.impl.FinishStreamWithMetadata(self._impl, num_results) self._impl = None return result @@ -295,12 +295,12 @@ class Stream(object): """ if not self._impl: raise RuntimeError("Stream object is not valid. Trying to free an already finished stream?") - deepspeech.impl.FreeStream(self._impl) + stt.impl.FreeStream(self._impl) self._impl = None # This is only for documentation purpose -# Metadata, CandidateTranscript and TokenMetadata should be in sync with native_client/deepspeech.h +# Metadata, CandidateTranscript and TokenMetadata should be in sync with native_client/coqui-stt.h class TokenMetadata(object): """ Stores each individual character, along with its timing information diff --git a/native_client/python/client.py b/native_client/python/client.py index ca1c8e92..85290c76 100644 --- a/native_client/python/client.py +++ b/native_client/python/client.py @@ -10,7 +10,7 @@ import sys import wave import json -from deepspeech import Model, version +from stt import Model, version from timeit import default_timer as timer try: @@ -83,12 +83,12 @@ class VersionAction(argparse.Action): super(VersionAction, self).__init__(nargs=0, *args, **kwargs) def __call__(self, *args, **kwargs): - print('DeepSpeech ', version()) + print('Coqui STT ', version()) exit(0) def main(): - parser = argparse.ArgumentParser(description='Running DeepSpeech inference.') + parser = argparse.ArgumentParser(description='Running Coqui STT inference.') parser.add_argument('--model', required=True, help='Path to the model (protocol buffer binary file)') parser.add_argument('--scorer', required=False, diff --git a/native_client/python/impl.i b/native_client/python/impl.i index 3ee4b516..b7cc0476 100644 --- a/native_client/python/impl.i +++ b/native_client/python/impl.i @@ -2,7 +2,7 @@ %{ #define SWIG_FILE_WITH_INIT -#include "deepspeech.h" +#include "coqui-stt.h" %} %include "numpy.i" @@ -125,4 +125,4 @@ static PyObject *parent_reference() { %rename ("%(strip:[DS_])s") ""; -%include "../deepspeech.h" +%include "../coqui-stt.h" diff --git a/native_client/python/setup.py b/native_client/python/setup.py index 0e1d0e62..093b85cd 100755 --- a/native_client/python/setup.py +++ b/native_client/python/setup.py @@ -24,14 +24,14 @@ def main(): numpy_include = os.getenv('NUMPY_INCLUDE', numpy_include) numpy_min_ver = os.getenv('NUMPY_DEP_VERSION', '') - project_name = 'deepspeech' + project_name = 'stt' if '--project_name' in sys.argv: project_name_idx = sys.argv.index('--project_name') project_name = sys.argv[project_name_idx + 1] sys.argv.remove('--project_name') sys.argv.pop(project_name_idx) - with open('../../training/deepspeech_training/VERSION', 'r') as ver: + with open('../../training/coqui_stt_training/VERSION', 'r') as ver: project_version = ver.read().strip() class BuildExtFirst(build): @@ -59,7 +59,7 @@ def main(): raise AssertionError('os.name == java not expected') - ds_ext = Extension(name='deepspeech._impl', + ds_ext = Extension(name='stt._impl', sources=['impl.i'], include_dirs=[numpy_include, '../'], library_dirs=list(map(lambda x: x.strip(), lib_dirs_split(os.getenv('MODEL_LDFLAGS', '')))), @@ -67,24 +67,24 @@ def main(): swig_opts=['-c++', '-keyword']) setup(name=project_name, - description='A library for running inference on a DeepSpeech model', + description='A library for running inference on a Coqui STT model', long_description=read('README.rst'), long_description_content_type='text/x-rst; charset=UTF-8', - author='Mozilla', + author='Coqui GmbH', version=project_version, - package_dir={'deepspeech': '.'}, + package_dir={'stt': '.'}, cmdclass={'build': BuildExtFirst}, license='MPL-2.0', - url='https://github.com/mozilla/DeepSpeech', + url='https://github.com/coqui-ai/STT', project_urls={ - 'Documentation': 'https://github.com/mozilla/DeepSpeech/tree/v{}#project-deepspeech'.format(project_version), - 'Tracker': 'https://github.com/mozilla/DeepSpeech/issues', - 'Repository': 'https://github.com/mozilla/DeepSpeech/tree/v{}'.format(project_version), - 'Discussions': 'https://discourse.mozilla.org/c/deep-speech', + 'Documentation': 'https://stt.readthedocs.io', + 'Tracker': 'https://github.com/coqui-ai/STT/issues', + 'Repository': 'https://github.com/coqui-ai/STT/tree/v{}'.format(project_version), + 'Discussions': 'https://github.com/coqui-ai/STT/discussions', }, ext_modules=[ds_ext], - py_modules=['deepspeech', 'deepspeech.client', 'deepspeech.impl'], - entry_points={'console_scripts':['deepspeech=deepspeech.client:main']}, + py_modules=['stt', 'stt.client', 'stt.impl'], + entry_points={'console_scripts':['stt=stt.client:main']}, install_requires=['numpy%s' % numpy_min_ver], include_package_data=True, classifiers=[ diff --git a/native_client/swift/deepspeech-ios.podspec b/native_client/swift/deepspeech-ios.podspec index ad9eccf1..ea28a6d1 100644 --- a/native_client/swift/deepspeech-ios.podspec +++ b/native_client/swift/deepspeech-ios.podspec @@ -1,16 +1,16 @@ # Pull in version from outside -version = File.read(File.join(__dir__, "../../training/deepspeech_training/VERSION")).split("\n")[0] +version = File.read(File.join(__dir__, "../../training/coqui_stt_training/VERSION")).split("\n")[0] Pod::Spec.new do |s| - s.name = "deepspeech-ios" + s.name = "stt-ios" s.version = version - s.summary = "DeepSpeech" - s.homepage = "https://github.com/mozilla/DeepSpeech" + s.summary = "Coqui STT" + s.homepage = "https://github.com/coqui-ai/STT" s.license = "Mozilla Public License 2.0" - s.authors = "DeepSpeech authors" + s.authors = "Coqui GmbH" s.platforms = { :ios => "9.0" } - s.source = { :git => "https://github.com/mozilla/DeepSpeech.git", :tag => "v#{s.version}" } + s.source = { :git => "https://github.com/coqui-ai/STT.git", :tag => "v#{s.version}" } # Assuming taskcluster build location. Depending on your Xcode setup, this might be in # build/Release-iphoneos/deepspeech_ios.framework instead. diff --git a/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj b/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj index 488f008c..3f3a2d7f 100644 --- a/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj +++ b/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj @@ -9,7 +9,7 @@ /* Begin PBXBuildFile section */ 505B136B24960D550007DADA /* deepspeech_ios.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 505B136124960D550007DADA /* deepspeech_ios.framework */; }; 505B137224960D550007DADA /* deepspeech_ios.h in Headers */ = {isa = PBXBuildFile; fileRef = 505B136424960D550007DADA /* deepspeech_ios.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 505B137D24961AF20007DADA /* deepspeech.h in Headers */ = {isa = PBXBuildFile; fileRef = 505B137C24961AF20007DADA /* deepspeech.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 505B137D24961AF20007DADA /* coqui-stt.h in Headers */ = {isa = PBXBuildFile; fileRef = 505B137C24961AF20007DADA /* coqui-stt.h */; settings = {ATTRIBUTES = (Private, ); }; }; 505B137F24961BA70007DADA /* DeepSpeech.swift in Sources */ = {isa = PBXBuildFile; fileRef = 505B137E24961BA70007DADA /* DeepSpeech.swift */; }; AD2FD0F925678F8800314F2E /* deepspeech_ios.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = AD2FD0F825678F8800314F2E /* deepspeech_ios.framework */; }; /* End PBXBuildFile section */ @@ -43,7 +43,7 @@ 505B136524960D550007DADA /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 505B136A24960D550007DADA /* deepspeech_iosTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = deepspeech_iosTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; 505B137B249619C90007DADA /* deepspeech_ios.modulemap */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.module-map"; path = deepspeech_ios.modulemap; sourceTree = ""; }; - 505B137C24961AF20007DADA /* deepspeech.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = deepspeech.h; path = ../../deepspeech.h; sourceTree = ""; }; + 505B137C24961AF20007DADA /* coqui-stt.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = coqui-stt.h; path = ../../coqui-stt.h; sourceTree = ""; }; 505B137E24961BA70007DADA /* DeepSpeech.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DeepSpeech.swift; sourceTree = ""; }; AD2FD0F825678F8800314F2E /* deepspeech_ios.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; path = deepspeech_ios.framework; sourceTree = ""; }; /* End PBXFileReference section */ @@ -89,7 +89,7 @@ 505B136324960D550007DADA /* deepspeech_ios */ = { isa = PBXGroup; children = ( - 505B137C24961AF20007DADA /* deepspeech.h */, + 505B137C24961AF20007DADA /* coqui-stt.h */, 505B136424960D550007DADA /* deepspeech_ios.h */, 505B137E24961BA70007DADA /* DeepSpeech.swift */, 505B137B249619C90007DADA /* deepspeech_ios.modulemap */, @@ -114,7 +114,7 @@ buildActionMask = 2147483647; files = ( 505B137224960D550007DADA /* deepspeech_ios.h in Headers */, - 505B137D24961AF20007DADA /* deepspeech.h in Headers */, + 505B137D24961AF20007DADA /* coqui-stt.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/native_client/swift/deepspeech_ios/DeepSpeech.swift b/native_client/swift/deepspeech_ios/DeepSpeech.swift index 77a6a818..5d254c99 100644 --- a/native_client/swift/deepspeech_ios/DeepSpeech.swift +++ b/native_client/swift/deepspeech_ios/DeepSpeech.swift @@ -9,7 +9,7 @@ import deepspeech_ios.libdeepspeech_Private public enum DeepSpeechError: Error { - // Should be kept in sync with deepspeech.h + // Should be kept in sync with coqui-stt.h case noModel(errorCode: Int32) case invalidAlphabet(errorCode: Int32) case invalidShape(errorCode: Int32) diff --git a/native_client/swift/deepspeech_ios/deepspeech_ios.modulemap b/native_client/swift/deepspeech_ios/deepspeech_ios.modulemap index 078ac915..de2e5365 100644 --- a/native_client/swift/deepspeech_ios/deepspeech_ios.modulemap +++ b/native_client/swift/deepspeech_ios/deepspeech_ios.modulemap @@ -5,7 +5,7 @@ framework module deepspeech_ios { module * { export * } explicit module libdeepspeech_Private { - header "deepspeech.h" + header "coqui-stt.h" export * link "deepspeech" } diff --git a/native_client/swift/deepspeech_ios_test/ContentView.swift b/native_client/swift/deepspeech_ios_test/ContentView.swift index 0eb7c776..424a6d6f 100644 --- a/native_client/swift/deepspeech_ios_test/ContentView.swift +++ b/native_client/swift/deepspeech_ios_test/ContentView.swift @@ -3,8 +3,8 @@ // deepspeech_ios_test // // Created by Reuben Morais on 15.06.20. -// Copyright © 2020 Mozilla. All rights reserved. -// +// Copyright © 2020 Mozilla +// Copyright © 2021 Coqui GmbH import SwiftUI @@ -14,7 +14,7 @@ struct ContentView: View { var body: some View { VStack { - Text("DeepSpeech iOS Demo") + Text("Coqui STT iOS Demo") .font(.system(size: 30)) Button("Recognize files", action: recognizeFiles) .padding(30) diff --git a/native_client/swift/deepspeech_ios_test/SpeechRecognitionImpl.swift b/native_client/swift/deepspeech_ios_test/SpeechRecognitionImpl.swift index ecb22059..02a27d6d 100644 --- a/native_client/swift/deepspeech_ios_test/SpeechRecognitionImpl.swift +++ b/native_client/swift/deepspeech_ios_test/SpeechRecognitionImpl.swift @@ -1,11 +1,11 @@ // -// DeepSpeech.swift +// SpeechRecognitionImpl.swift // deepspeech_ios_test // // Created by Erik Ziegler on 27.07.20. -// Copyright © 2020 Mozilla. All rights reserved. -// - +// Copyright © 2020 Mozilla +// Copyright © 2020 Erik Ziegler +// Copyright © 2021 Coqui GmbH import Foundation import AVFoundation import AudioToolbox diff --git a/native_client/test/concurrent_streams.py b/native_client/test/concurrent_streams.py index e435b43f..8bce5807 100644 --- a/native_client/test/concurrent_streams.py +++ b/native_client/test/concurrent_streams.py @@ -6,11 +6,11 @@ import argparse import numpy as np import wave -from deepspeech import Model +from stt import Model def main(): - parser = argparse.ArgumentParser(description='Running DeepSpeech inference.') + parser = argparse.ArgumentParser(description='Running STT inference.') parser.add_argument('--model', required=True, help='Path to the model (protocol buffer binary file)') parser.add_argument('--scorer', nargs='?', diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index 50a68a4b..eb8a476c 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -255,8 +255,7 @@ TFLiteModelState::init(const char* model_path) std::cerr << "Specified model file version (" << *graph_version << ") is " << "incompatible with minimum version supported by this client (" << ds_graph_version() << "). See " - << "https://github.com/mozilla/DeepSpeech/blob/" - << ds_git_version() << "/doc/USING.rst#model-compatibility " + << "https://stt.readthedocs.io/en/latest/USING.html#model-compatibility " << "for more information" << std::endl; return DS_ERR_MODEL_INCOMPATIBLE; } diff --git a/native_client/tfmodelstate.cc b/native_client/tfmodelstate.cc index 65328e30..a81412da 100644 --- a/native_client/tfmodelstate.cc +++ b/native_client/tfmodelstate.cc @@ -91,8 +91,7 @@ TFModelState::init(const char* model_path) std::cerr << "Specified model file version (" << graph_version << ") is " << "incompatible with minimum version supported by this client (" << ds_graph_version() << "). See " - << "https://github.com/mozilla/DeepSpeech/blob/" - << ds_git_version() << "/doc/USING.rst#model-compatibility " + << "https://stt.readthedocs.io/en/latest/USING.html#model-compatibility " << "for more information" << std::endl; return DS_ERR_MODEL_INCOMPATIBLE; } diff --git a/setup.py b/setup.py index b16e6552..daca12e1 100644 --- a/setup.py +++ b/setup.py @@ -95,11 +95,11 @@ def main(): install_requires = install_requires + tensorflow_pypi_dep setup( - name='deepspeech_training', + name='coqui_stt_training', version=version, - description='Training code for DeepSpeech', - url='https://github.com/mozilla/DeepSpeech', - author='DeepSpeech authors', + description='Training code for Coqui STT', + url='https://github.com/coqui-ai/STT', + author='Coqui STT authors', license='MPL-2.0', # Classifiers help users find your project by categorizing it. # @@ -118,7 +118,7 @@ def main(): # If there are data files included in your packages that need to be # installed, specify them here. package_data={ - 'deepspeech_training': [ + 'coqui_stt_training': [ 'VERSION', 'GRAPH_VERSION', ], diff --git a/stats.py b/stats.py index 569f2743..dbd8ecd3 100644 --- a/stats.py +++ b/stats.py @@ -3,7 +3,7 @@ import argparse import functools import pandas -from deepspeech_training.util.helpers import secs_to_hours +from coqui_stt_training.util.helpers import secs_to_hours from pathlib import Path diff --git a/taskcluster/tc-all-vars.sh b/taskcluster/tc-all-vars.sh index ef1cba84..5f2e04ae 100755 --- a/taskcluster/tc-all-vars.sh +++ b/taskcluster/tc-all-vars.sh @@ -53,7 +53,7 @@ export DS_TFDIR=${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow export DS_DSDIR=${DS_ROOT_TASK}/DeepSpeech/ds export DS_EXAMPLEDIR=${DS_ROOT_TASK}/DeepSpeech/examples -export DS_VERSION="$(cat ${DS_DSDIR}/training/deepspeech_training/VERSION)" +export DS_VERSION="$(cat ${DS_DSDIR}/training/coqui_stt_training/VERSION)" export GRADLE_USER_HOME=${DS_ROOT_TASK}/gradle-cache export ANDROID_SDK_HOME=${DS_ROOT_TASK}/DeepSpeech/Android/SDK/ diff --git a/taskcluster/tc-package.sh b/taskcluster/tc-package.sh index 65280551..278e6564 100755 --- a/taskcluster/tc-package.sh +++ b/taskcluster/tc-package.sh @@ -22,13 +22,13 @@ package_native_client() fi; ${TAR} -cf - \ - -C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so \ - -C ${tensorflow_dir}/bazel-bin/native_client/ libdeepspeech.so.if.lib \ + -C ${tensorflow_dir}/bazel-bin/native_client/ libstt.so \ + -C ${tensorflow_dir}/bazel-bin/native_client/ libstt.so.if.lib \ -C ${tensorflow_dir}/bazel-bin/native_client/ generate_scorer_package \ -C ${deepspeech_dir}/ LICENSE \ - -C ${deepspeech_dir}/native_client/ deepspeech${PLATFORM_EXE_SUFFIX} \ - -C ${deepspeech_dir}/native_client/ deepspeech.h \ - -C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \ + -C ${deepspeech_dir}/native_client/ stt${PLATFORM_EXE_SUFFIX} \ + -C ${deepspeech_dir}/native_client/ coqui-stt.h \ + -C ${deepspeech_dir}/native_client/kenlm/ README.coqui \ | ${XZ} > "${artifacts_dir}/${artifact_name}" } @@ -56,13 +56,13 @@ package_native_client_ndk() fi; tar -cf - \ - -C ${deepspeech_dir}/native_client/libs/${arch_abi}/ deepspeech \ - -C ${deepspeech_dir}/native_client/libs/${arch_abi}/ libdeepspeech.so \ + -C ${deepspeech_dir}/native_client/libs/${arch_abi}/ stt \ + -C ${deepspeech_dir}/native_client/libs/${arch_abi}/ libstt.so \ -C ${tensorflow_dir}/bazel-bin/native_client/ generate_scorer_package \ -C ${deepspeech_dir}/native_client/libs/${arch_abi}/ libc++_shared.so \ - -C ${deepspeech_dir}/native_client/ deepspeech.h \ + -C ${deepspeech_dir}/native_client/ coqui-stt.h \ -C ${deepspeech_dir}/ LICENSE \ - -C ${deepspeech_dir}/native_client/kenlm/ README.mozilla \ + -C ${deepspeech_dir}/native_client/kenlm/ README.coqui \ | pixz -9 > "${artifacts_dir}/${artifact_name}" } @@ -83,5 +83,5 @@ package_libdeepspeech_as_zip() echo "Please specify artifact name." fi; - zip -r9 --junk-paths "${artifacts_dir}/${artifact_name}" ${tensorflow_dir}/bazel-bin/native_client/libdeepspeech.so + zip -r9 --junk-paths "${artifacts_dir}/${artifact_name}" ${tensorflow_dir}/bazel-bin/native_client/libstt.so } diff --git a/tests/test_importers.py b/tests/test_importers.py index 8f52a21c..a897d0fd 100644 --- a/tests/test_importers.py +++ b/tests/test_importers.py @@ -1,7 +1,7 @@ import unittest from argparse import Namespace -from deepspeech_training.util.importers import validate_label_eng, get_validate_label +from coqui_stt_training.util.importers import validate_label_eng, get_validate_label from pathlib import Path def from_here(path): diff --git a/tests/test_value_range.py b/tests/test_value_range.py index c08dcbd5..d10c0029 100644 --- a/tests/test_value_range.py +++ b/tests/test_value_range.py @@ -2,7 +2,7 @@ import unittest import numpy as np import tensorflow as tf -from deepspeech_training.util.helpers import ValueRange, get_value_range, pick_value_from_range, tf_pick_value_from_range +from coqui_stt_training.util.helpers import ValueRange, get_value_range, pick_value_from_range, tf_pick_value_from_range class TestValueRange(unittest.TestCase): diff --git a/DeepSpeech.py b/train.py similarity index 83% rename from DeepSpeech.py rename to train.py index 0fa4ae8a..caf4e1d4 100755 --- a/DeepSpeech.py +++ b/train.py @@ -4,7 +4,7 @@ from __future__ import absolute_import, division, print_function if __name__ == '__main__': try: - from deepspeech_training import train as ds_train + from coqui_stt_training import train as ds_train except ImportError: print('Training package is not installed. See training documentation.') raise diff --git a/training/deepspeech_training/GRAPH_VERSION b/training/coqui_stt_training/GRAPH_VERSION similarity index 100% rename from training/deepspeech_training/GRAPH_VERSION rename to training/coqui_stt_training/GRAPH_VERSION diff --git a/training/deepspeech_training/VERSION b/training/coqui_stt_training/VERSION similarity index 100% rename from training/deepspeech_training/VERSION rename to training/coqui_stt_training/VERSION diff --git a/training/deepspeech_training/__init__.py b/training/coqui_stt_training/__init__.py similarity index 100% rename from training/deepspeech_training/__init__.py rename to training/coqui_stt_training/__init__.py diff --git a/training/deepspeech_training/evaluate.py b/training/coqui_stt_training/evaluate.py similarity index 100% rename from training/deepspeech_training/evaluate.py rename to training/coqui_stt_training/evaluate.py diff --git a/training/deepspeech_training/train.py b/training/coqui_stt_training/train.py similarity index 100% rename from training/deepspeech_training/train.py rename to training/coqui_stt_training/train.py diff --git a/training/deepspeech_training/util/__init__.py b/training/coqui_stt_training/util/__init__.py similarity index 100% rename from training/deepspeech_training/util/__init__.py rename to training/coqui_stt_training/util/__init__.py diff --git a/training/deepspeech_training/util/audio.py b/training/coqui_stt_training/util/audio.py similarity index 100% rename from training/deepspeech_training/util/audio.py rename to training/coqui_stt_training/util/audio.py diff --git a/training/deepspeech_training/util/augmentations.py b/training/coqui_stt_training/util/augmentations.py similarity index 100% rename from training/deepspeech_training/util/augmentations.py rename to training/coqui_stt_training/util/augmentations.py diff --git a/training/deepspeech_training/util/check_characters.py b/training/coqui_stt_training/util/check_characters.py similarity index 100% rename from training/deepspeech_training/util/check_characters.py rename to training/coqui_stt_training/util/check_characters.py diff --git a/training/deepspeech_training/util/checkpoints.py b/training/coqui_stt_training/util/checkpoints.py similarity index 100% rename from training/deepspeech_training/util/checkpoints.py rename to training/coqui_stt_training/util/checkpoints.py diff --git a/training/deepspeech_training/util/config.py b/training/coqui_stt_training/util/config.py similarity index 98% rename from training/deepspeech_training/util/config.py rename to training/coqui_stt_training/util/config.py index 358aa6ab..6339ab25 100755 --- a/training/deepspeech_training/util/config.py +++ b/training/coqui_stt_training/util/config.py @@ -58,7 +58,7 @@ def initialize_globals(): # Set default checkpoint dir if not FLAGS.checkpoint_dir: - FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints')) + FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('stt', 'checkpoints')) if FLAGS.load_train not in ['last', 'best', 'init', 'auto']: FLAGS.load_train = 'auto' @@ -68,7 +68,7 @@ def initialize_globals(): # Set default summary dir if not FLAGS.summary_dir: - FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries')) + FLAGS.summary_dir = xdg.save_data_path(os.path.join('stt', 'summaries')) # Standard session configuration that'll be used for all new sessions. c.session_config = tfv1.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement, diff --git a/training/deepspeech_training/util/downloader.py b/training/coqui_stt_training/util/downloader.py similarity index 100% rename from training/deepspeech_training/util/downloader.py rename to training/coqui_stt_training/util/downloader.py diff --git a/training/deepspeech_training/util/evaluate_tools.py b/training/coqui_stt_training/util/evaluate_tools.py similarity index 100% rename from training/deepspeech_training/util/evaluate_tools.py rename to training/coqui_stt_training/util/evaluate_tools.py diff --git a/training/deepspeech_training/util/feeding.py b/training/coqui_stt_training/util/feeding.py similarity index 100% rename from training/deepspeech_training/util/feeding.py rename to training/coqui_stt_training/util/feeding.py diff --git a/training/deepspeech_training/util/flags.py b/training/coqui_stt_training/util/flags.py similarity index 94% rename from training/deepspeech_training/util/flags.py rename to training/coqui_stt_training/util/flags.py index fcbd6dd0..69678bb6 100644 --- a/training/deepspeech_training/util/flags.py +++ b/training/coqui_stt_training/util/flags.py @@ -83,9 +83,9 @@ def create_flags(): # Checkpointing - f.DEFINE_string('checkpoint_dir', '', 'directory from which checkpoints are loaded and to which they are saved - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') - f.DEFINE_string('load_checkpoint_dir', '', 'directory in which checkpoints are stored - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') - f.DEFINE_string('save_checkpoint_dir', '', 'directory to which checkpoints are saved - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') + f.DEFINE_string('checkpoint_dir', '', 'directory from which checkpoints are loaded and to which they are saved - defaults to directory "stt/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') + f.DEFINE_string('load_checkpoint_dir', '', 'directory in which checkpoints are stored - defaults to directory "stt/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') + f.DEFINE_string('save_checkpoint_dir', '', 'directory to which checkpoints are saved - defaults to directory "stt/checkpoints" within user\'s data home specified by the XDG Base Directory Specification') f.DEFINE_integer('checkpoint_secs', 600, 'checkpoint saving interval in seconds') f.DEFINE_integer('max_to_keep', 5, 'number of checkpoint files to keep - default value is 5') f.DEFINE_string('load_train', 'auto', 'what checkpoint to load before starting the training process. "last" for loading most recent epoch checkpoint, "best" for loading best validation loss checkpoint, "init" for initializing a new checkpoint, "auto" for trying several options.') @@ -109,7 +109,7 @@ def create_flags(): f.DEFINE_string('export_author_id', 'author', 'author of the exported model. GitHub user or organization name used to uniquely identify the author of this model') f.DEFINE_string('export_model_name', 'model', 'name of the exported model. Must not contain forward slashes.') - f.DEFINE_string('export_model_version', '0.0.1', 'semantic version of the exported model. See https://semver.org/. This is fully controlled by you as author of the model and has no required connection with DeepSpeech versions') + f.DEFINE_string('export_model_version', '0.0.1', 'semantic version of the exported model. See https://semver.org/. This is fully controlled by you as author of the model and has no required connection with Coqui STT versions') def str_val_equals_help(name, val_desc): f.DEFINE_string(name, '<{}>'.format(val_desc), val_desc) @@ -117,8 +117,8 @@ def create_flags(): str_val_equals_help('export_contact_info', 'public contact information of the author. Can be an email address, or a link to a contact form, issue tracker, or discussion forum. Must provide a way to reach the model authors') str_val_equals_help('export_license', 'SPDX identifier of the license of the exported model. See https://spdx.org/licenses/. If the license does not have an SPDX identifier, use the license name.') str_val_equals_help('export_language', 'language the model was trained on - IETF BCP 47 language tag including at least language, script and region subtags. E.g. "en-Latn-UK" or "de-Latn-DE" or "cmn-Hans-CN". Include as much info as you can without loss of precision. For example, if a model is trained on Scottish English, include the variant subtag: "en-Latn-GB-Scotland".') - str_val_equals_help('export_min_ds_version', 'minimum DeepSpeech version (inclusive) the exported model is compatible with') - str_val_equals_help('export_max_ds_version', 'maximum DeepSpeech version (inclusive) the exported model is compatible with') + str_val_equals_help('export_min_ds_version', 'minimum Coqui STT version (inclusive) the exported model is compatible with') + str_val_equals_help('export_max_ds_version', 'maximum Coqui STT version (inclusive) the exported model is compatible with') str_val_equals_help('export_description', 'Freeform description of the model being exported. Markdown accepted. You can also leave this flag unchanged and edit the generated .md file directly. Useful things to describe are demographic and acoustic characteristics of the data used to train the model, any architectural changes, names of public datasets that were used when applicable, hyperparameters used for training, evaluation results on standard benchmark datasets, etc.') # Reporting @@ -129,7 +129,7 @@ def create_flags(): f.DEFINE_boolean('log_placement', False, 'whether to log device placement of the operators to the console') f.DEFINE_integer('report_count', 5, 'number of phrases for each of best WER, median WER and worst WER to print out during a WER report') - f.DEFINE_string('summary_dir', '', 'target directory for TensorBoard summaries - defaults to directory "deepspeech/summaries" within user\'s data home specified by the XDG Base Directory Specification') + f.DEFINE_string('summary_dir', '', 'target directory for TensorBoard summaries - defaults to directory "stt/summaries" within user\'s data home specified by the XDG Base Directory Specification') f.DEFINE_string('test_output_file', '', 'path to a file to save all src/decoded/distance/loss tuples generated during a test epoch') diff --git a/training/deepspeech_training/util/gpu.py b/training/coqui_stt_training/util/gpu.py similarity index 100% rename from training/deepspeech_training/util/gpu.py rename to training/coqui_stt_training/util/gpu.py diff --git a/training/deepspeech_training/util/helpers.py b/training/coqui_stt_training/util/helpers.py similarity index 97% rename from training/deepspeech_training/util/helpers.py rename to training/coqui_stt_training/util/helpers.py index 7545c8ee..ab5a7d8a 100644 --- a/training/deepspeech_training/util/helpers.py +++ b/training/coqui_stt_training/util/helpers.py @@ -47,14 +47,14 @@ def check_ctcdecoder_version(): from ds_ctcdecoder import __version__ as decoder_version except ImportError as e: if e.msg.find('__version__') > 0: - print("DeepSpeech version ({ds_version}) requires CTC decoder to expose __version__. " + print("Coqui STT version ({ds_version}) requires CTC decoder to expose __version__. " "Please upgrade the ds_ctcdecoder package to version {ds_version}".format(ds_version=ds_version_s)) sys.exit(1) raise e rv = semver.compare(ds_version_s, decoder_version) if rv != 0: - print("DeepSpeech version ({}) and CTC decoder version ({}) do not match. " + print("Coqui STT version ({}) and CTC decoder version ({}) do not match. " "Please ensure matching versions are in use.".format(ds_version_s, decoder_version)) sys.exit(1) diff --git a/training/deepspeech_training/util/importers.py b/training/coqui_stt_training/util/importers.py similarity index 100% rename from training/deepspeech_training/util/importers.py rename to training/coqui_stt_training/util/importers.py diff --git a/training/deepspeech_training/util/io.py b/training/coqui_stt_training/util/io.py similarity index 100% rename from training/deepspeech_training/util/io.py rename to training/coqui_stt_training/util/io.py diff --git a/training/deepspeech_training/util/logging.py b/training/coqui_stt_training/util/logging.py similarity index 100% rename from training/deepspeech_training/util/logging.py rename to training/coqui_stt_training/util/logging.py diff --git a/training/deepspeech_training/util/sample_collections.py b/training/coqui_stt_training/util/sample_collections.py similarity index 99% rename from training/deepspeech_training/util/sample_collections.py rename to training/coqui_stt_training/util/sample_collections.py index adb5ee95..5a467d50 100644 --- a/training/deepspeech_training/util/sample_collections.py +++ b/training/coqui_stt_training/util/sample_collections.py @@ -514,7 +514,7 @@ class SampleList: class CSV(SampleList): - """Sample collection reader for reading a DeepSpeech CSV file + """Sample collection reader for reading a Coqui STT CSV file Automatically orders samples by CSV column wav_filesize (if available).""" def __init__(self, csv_filename, labeled=None, reverse=False): """ diff --git a/training/deepspeech_training/util/stm.py b/training/coqui_stt_training/util/stm.py similarity index 100% rename from training/deepspeech_training/util/stm.py rename to training/coqui_stt_training/util/stm.py diff --git a/training/deepspeech_training/util/taskcluster.py b/training/coqui_stt_training/util/taskcluster.py similarity index 100% rename from training/deepspeech_training/util/taskcluster.py rename to training/coqui_stt_training/util/taskcluster.py diff --git a/training/deepspeech_training/util/text.py b/training/coqui_stt_training/util/text.py similarity index 100% rename from training/deepspeech_training/util/text.py rename to training/coqui_stt_training/util/text.py diff --git a/transcribe.py b/transcribe.py index 665921a2..1695d7b6 100755 --- a/transcribe.py +++ b/transcribe.py @@ -13,11 +13,11 @@ import logging logging.getLogger('sox').setLevel(logging.ERROR) import glob -from deepspeech_training.util.audio import AudioFile -from deepspeech_training.util.config import Config, initialize_globals -from deepspeech_training.util.feeding import split_audio_file -from deepspeech_training.util.flags import create_flags, FLAGS -from deepspeech_training.util.logging import log_error, log_info, log_progress, create_progressbar +from coqui_stt_training.util.audio import AudioFile +from coqui_stt_training.util.config import Config, initialize_globals +from coqui_stt_training.util.feeding import split_audio_file +from coqui_stt_training.util.flags import create_flags, FLAGS +from coqui_stt_training.util.logging import log_error, log_info, log_progress, create_progressbar from ds_ctcdecoder import ctc_beam_search_decoder_batch, Scorer from multiprocessing import Process, cpu_count @@ -28,8 +28,8 @@ def fail(message, code=1): def transcribe_file(audio_path, tlog_path): - from deepspeech_training.train import create_model # pylint: disable=cyclic-import,import-outside-toplevel - from deepspeech_training.util.checkpoints import load_graph_for_evaluation + from coqui_stt_training.train import create_model # pylint: disable=cyclic-import,import-outside-toplevel + from coqui_stt_training.util.checkpoints import load_graph_for_evaluation initialize_globals() scorer = Scorer(FLAGS.lm_alpha, FLAGS.lm_beta, FLAGS.scorer_path, Config.alphabet) try: diff --git a/util/taskcluster.py b/util/taskcluster.py index 9ef72f41..efd3242a 100644 --- a/util/taskcluster.py +++ b/util/taskcluster.py @@ -4,7 +4,7 @@ from __future__ import absolute_import, division, print_function if __name__ == '__main__': try: - from deepspeech_training.util import taskcluster as dsu_taskcluster + from coqui_stt_training.util import taskcluster as dsu_taskcluster except ImportError: print('Training package is not installed. See training documentation.') raise From 915886b3b77f26e92bcdc6ca5ec86fe89fd21b56 Mon Sep 17 00:00:00 2001 From: Kelly Davis Date: Fri, 5 Mar 2021 12:52:21 +0100 Subject: [PATCH 2/8] Main README logo --- README.rst | 20 +++++++------------- images/coqui-STT-logo-green.png | Bin 0 -> 61544 bytes 2 files changed, 7 insertions(+), 13 deletions(-) create mode 100644 images/coqui-STT-logo-green.png diff --git a/README.rst b/README.rst index 26a48afc..3347f0b4 100644 --- a/README.rst +++ b/README.rst @@ -1,22 +1,16 @@ -Project DeepSpeech -================== +.. image:: images/coqui-STT-logo-green.png + :alt: Coqui STT logo -.. image:: https://readthedocs.org/projects/deepspeech/badge/?version=latest - :target: https://deepspeech.readthedocs.io/?badge=latest +.. image:: https://readthedocs.org/projects/stt/badge/?version=latest + :target: https://stt.readthedocs.io/?badge=latest :alt: Documentation +**Coqui STT** is an open-source Speech-To-Text engine, using a model trained by machine learning techniques based on `Baidu's Deep Speech research paper `_. 🐸STT uses Google's `TensorFlow `_ to make the implementation easier. -.. image:: https://community-tc.services.mozilla.com/api/github/v1/repository/mozilla/DeepSpeech/master/badge.svg - :target: https://community-tc.services.mozilla.com/api/github/v1/repository/mozilla/DeepSpeech/master/latest - :alt: Task Status +**Documentation** for installation, usage, and training models are available on `stt.readthedocs.io `_. - -DeepSpeech is an open-source Speech-To-Text engine, using a model trained by machine learning techniques based on `Baidu's Deep Speech research paper `_. Project DeepSpeech uses Google's `TensorFlow `_ to make the implementation easier. - -Documentation for installation, usage, and training models are available on `deepspeech.readthedocs.io `_. - -For the latest release, including pre-trained models and checkpoints, `see the latest release on GitHub `_. +For the **latest release**, including pre-trained models and checkpoints, `see the latest release on GitHub `_. For contribution guidelines, see `CONTRIBUTING.rst `_. diff --git a/images/coqui-STT-logo-green.png b/images/coqui-STT-logo-green.png new file mode 100644 index 0000000000000000000000000000000000000000..2afec422d08f03f6f0dd6f222d38f8c2d818fcd3 GIT binary patch literal 61544 zcmYhi1z6P4);&CQcXvxjN_R_1igXGHNP~1YDBVbxh;)~PbR*5s-Q6|A_rv>t@BJPg z=7E8kIcJ}B)?Ry^eInIV84g$e|kP(4rT;Id01HXh=OG~RM zNJ~>YIy-!_wzU9(nBsloMdiCC$a{^a6lL|ukEl1|@`QA;37nF&Sc5_*wXA0TY)x`l zO~w^A37}RWAc+%}VzQ652sNPwtszBZWF+Q)OSi+(-01evfgr*60@A6J87!( z4iR0jzs_tFD6L04TuSWV)ST9;F(?-NN}m!ZMM`VVqb7?%__6MDVcNi7zol;Rjk~F> zguxF8bp5agYBYj=XX1Cu<+|UqofD;;a!-33#QOr)*#Fuu+}u{%HL8N1xgCpKA=iYQ zw+ddhrLwUBf9%#aT1p4KJGU`8p9+=2w&eZ3OO?R?&Y9pR2*}enF&RBBuZ^8TqLwpc zTLmG7cY37P%!#fnuP!cjSPDTPAPRlw&^{%CVPIJhKPt(|fSzBza@&d%foD)0<#kTraen_~8dAMtxji6ovdst@ zV#UXAuKPHwDa21PiO#s2*>3YBQ)=6K^&YDtyY|Smf(4>9+9gE8=2}9~ zP+2EErt=z?rM^36X8F{tPO^m!obuUvG0k47{6b@&YI5bpeb@1kq!E@W5(bf zb@TrYeYnC%m2`&vJ6BI+=;=5!^IeEDE0_vx!YVkWEt+hRRNr3Tcd zMylCw@!xMV>`qd@OXx_=c<1@p$Y;>sCPDK5wj5WV5Qh!PkME(bv`KjPuF$H4OYb-F z+r@^*ucp}aZ07D-lx?y&7wSmNG^8TPan14K>5`(D`(v2Cm&}%eR{+LbG>6R*AEE{CM%wL0wW2$O<@w zQuA8#k%~&!^BV?l$$0D??p$jGody>WUh~f3{ueLRCN&AyrG_G#X_*N@#rdJl7P`XD zaD}*bb|ond+HIN-4(*4KncJuSpo;H?wN>o&g#xk*pIf%)6U(YxGV#!e6q4I|)0WH^ zY?c}x`##jBigkX#PHV6oBSK>dTPnAGZ4)WGdim~GkzM6tir0+t52{;3hx(gqv=??l zEL$unOxTrI%KAb6SfIk{^BWH%yhc}y0_Bq1L)PQ}H^>&zKm6Afy1NAE9KHf%-Aj?x-WCUXtdyKavYSrZ|f7b(90duNdAZ@jQbsz zc|cH%tWA2^d}y)5;ufLQLH|as-S)pc7iIB7pIha9>KN2uBBF-M&GMq(D!2h|v~>Zq z4l1&o@-PHM98c6x{{6{!-kHd--M45i<9jEC_M11NvClUt4l0zk1eKpmvygXtp-IL@ zPt_+&m8ov704$#wGW3aFm^iiVtl4&3#~5?>twR+47A2ec?Y5-vIQJvp=|k5qu+qt< z648GVHy`h+f8p|Qv3SBnu6|R0qIh#>t=0nC4UH4~Hpz-0s&O^~BeS+4NS+)|bQ`{8W34{)L|dI02-xC_^etMh;3 zELV3lz_CTMd1J8}@!AX~v_GWV5X}$>R}Vl084z}v`x6Y8bGQ&6S5{nCsSXHNds6>L z5!Ky?&J7oMuXmTVd1{XODo*4c+Xbs!PN@LWfBnMDlg6lyFF_k%W6N}qlXBC(ovA54 zzupP!`(-<-3|<7VB@2)h-&?fHFjrUO{f4B)I+s&J|4h%hW5~`W$FE_GT^Xt&w%6ZR zC`1cy?Nez;SzIhjGaHnS*!mI!_ zdViAX`MFHsS$m>u>7wGOpE$V9;dqVa?7u8-7Cx~rvs_nHTgf5x>j}wl|Ibd5L5Guf z_Z*Vq#iyYx?(w15uf6D#$VQKc{17*2<%#@mOAnogzpCsHi6xAdvmv>iJZ^noOev>8 zws_-vZ)ZGArRCgOQ?8ODvoJURID2EGJ!Z&I1Yb=X!A#kMrh1u^%_~ z(DrZdoo8Bq9R+2}M*hCF&KS{HK7O8TO@I{p0yT{JX2c<>d5p1sIxMcu1U21RU?dbN zCPwF#Dkn1|kK+kTzH^4?{{r&JFtFC8{+lKFdmiV#Qmp~4EKg}ObWGdT+J^EH*9Qrs zCCh~xL1Wf3V&}#q^GY!@^qb$!zBj*gl~S&EozvQ%8ic>bl0NW$d4qG*n!VC!F_-zF zHiebyr)v?gg;959wRf@D#ENUT0TH#bGAjJ~aJSqek;CSHD{%~Kq4$zHmL|LGfaR(g z`<;H{TkC!~CI}JDOD$n`9~0s2_5cRMa6wz};7CjhHgXf{5e z$}k4WFd!pv%ENu)uDcIksq^Dc#WstohEo5nx;;P}zgGsb%V3r#-Rkv7`>3#k4zKrq zTX2!1vHumgyt^;@K2Ys~^z|0JN&qUe0{CERd$VupvM&dEa8o>P1nkrVfW8taPz1BU z?Jv1^vq7|fW?aA8v}!t4`C3){gP8pYS)R^a&=|eMaaH1G(WDt)h1Z4u-Q<5ZN{dua z={K65{@k)d6WyGkn$NjgB-FnU-$kG*-Kd)m!qipwwy<6c zR&qOR0MX8FbbF@@3>XVZA7*acxOum8tgXN9#Aab7{a9LXB2(UP%nWkuB8TRZgSq|I zMX1*9u>D6?wD3*TFFXIvyvSU)ol=`s$jAq?8n!Ktiio_79`hCWrjRv z9_tY`az#b3qR-oBMmPi&U&G(!x_j@qrvsc z=W%wIE)rEe)~@@rDe5c+{z)34@UM#bai0locM)rjrPROx8(f~;A8-Cs+sy4SjLp>&y95y@b#*Gfc6(vRs7$Q*vsDdmB zRF7=NEB{8NliO$d+l#620oV5Nb4?ZwcKAJQHgaJZF?bBDHcPH9=!Kg~37}KV+h2GR z?FW0htHKx{?SfC5%$y2J=aw35d5K(FUxYmX^>RhNdc>1>OMtoQIc^@?p(Ne+f~DnE zW(dpd-~=ntDl>=ch5OeTO+V`NT^4KlZ`#@pYZq>n|Ju1Ip@|Enib&5Cy#o%=!R4&A&qmCzMssDpKRZ! zjX6;+ZHRjBhn;^wEpZ-W9R zet@mo`iX1%-P|$&)3TnB(+(h*YANu z)Sep&sM8yVWk}IWaYVv;j&%^refltqaJyoZsBJ`UC7{m4|NW4MZxw4zuYLL-KUa5) z_DK)wRvncN)F^SQ__Qy|!z7{NjnKof1|Gif*WuLPW%N>?V&TU&; zr(c{qy2w>_4=Sr+G;MSz)gEF;}r9=B=!-HKKBXTR{ z3+;Vcs^V%+w5QT|n+<6rVqsjGaI9-8<0xr@iEEsy^G zw?(0;n0Oi2zI5<{F#=a52gU8%Om(Ly=aQ@X0!xaBk#k=w*@8m*g{>GI;dbwmu zm>cozH(#P+MbPdtS^?EAJQFa7_O&Yh#>-R4&;o#;SR`0j&*fUI8Lt5^Lg*vPy?(l*=kd{ z>5N0iUHQIG^M6DnQsmHqDfY&DM=2K}FCUFm_s{5S-pq;bpg7r$@NYW``R(MLt-1S% z4`;kr@}N3^O%Vqa54FK3WloRT3~ljunGIjYMpK{m^jnoSyN#KM`L^(uv+iI)z$^&R z$(-%2eePD-?{@wf=`E;V<|hx@3rb0lTdu-uZzKlOR%x z3jz41%3jH*UHcLHE<~X?8^AhdLC)?!_2xBq zVpu&s@}6b?u8s-<;Cgg$B=!TyFTDu`;l9EHe0Iv^NPhR=`+M?d6+fu3Wj62D{8w@H zfabk6%>@S!7w`2K2ED84f1z(afqE?0)VXfAic!6d%dEHgFd`h*lY_ixzn?>9{dfLB zeB~@KW4YOV+`r0EX5TXCs1Q(`1s!eM`n;k}ox3aC&%5{Ay{Tx-B^(5WH$GEMzT^H7 z=e^5G_vV&H9C4VYAngp8uL=9DA*_r z^dR|?JvCoHEX>C)fx=P>8!@ARZw)Vs0nf8@ZU;pqMNMlqbzm2iO2w=3F=C)zG zceG!BpQ%lSipTixX8E6LzZ752T5)zjX1M&b>hF1OdH?@rdKWQtu#SE4Mqq5jK4bGz z9&-%lyJAJyAzSbS#~FDp06?z=Bzun>FGuzFOpM}02c5q<`(K4P;``sJ2aW$_ji=; zN*g*2PIS8sg1%8PWawr1orCW^P5uEsI4nXNB>;QkiBX+E7gEk$}HDB zOINnUm#QiOfb=_@@Lu=!e{p-hQ%9dVgQ)e=^o<1a zZKsl2Kg}R6H@L0_j{ivu)L)E}1SFD;0MtU1Nky`8Ef@XiHWdXbf9nH zQW-EKeEF~(Uj!Bb1@D!+)_nDYQr?1TKa1Gsy#s3{OC**I0L9PEuEtZdfqG!QTV6X| zZuk2fLFh6a;1MnTDM5X+Efbuz`7a=4RCJ`M%nqU31_~*nb#h@XR1X56qOL80;%<#v z$&a*1FGEoEWW3j{tx}OebymTsak@N#sLqZXS2O4%@!Nqjb$biK#F*F^9`yexDydJX zOLvF^&!$T@*etI;B?CjijUF}LymQY1>ehyO9v(3Y^9(D~6B3|0ga4;!<~!~8dP~u0 zdcFvZVVEy&x24>P?3rMxG4dcNLc=4pvFuLMhk*aIxX<=DL)zBi*UuaAo+B}fvsGF8 zUJjShVtz9tOE;WyRUc{xOHh%d2^1+c)S@8maoq=#niAK}@_{Vd&vtr%%7r6;wr8K) z=}8TGmuwPhTV#8P*nagdjbdD-P%4Mi*wb4iUwLg@=H9^ngew^fz~6;kTR#OtRpYND zau-a2Vn|@#-O!g3_QI-3(ZSzeAd-($gmtysGbfC|iU1Vs%P*eG6!-G0iTR5GiBc=T z?=yCKYF+)_Jp7z3@E6x=%upD&qiY<-;&(l$Fd332kTTA?zS8(3*L-y~W@KZycpdiC;x-YLm;PHUOMZ-B4=YXa zQ{7v@xOI*3|8$P^>saEDBl!19WT@(z64eXt=0Wyby`}IGVUmN`Ke28YscR$#*5!c! z9L7J=f~MKCxh5mS=+f;AMOT~bN*v5L13vb8s$LcM+P$#|rw}fte{Ozfq%ZE@+t35u zfvG)HKZNcyzU;@5^Kn+FWV791<>%FLk0Wm;!kAg$-4Zzb|1(J~svE zOadU0+v0(8$saEz+i~9U@O6B6*(xVuX)dSf&zCw*P<6QaD!^my9&hu8s=o2Gt6?}V z4ZLW6KN=Y>2>r7`B?Ea%_QrkGUm5d&qh(rbq5StTwi1&WeDo$G>~{-?E=ZQ>xsc<( zUK9fNgMhJ)gbb?c9;>4!E5aKTcfSn;CM)>L-|BoDuC4QO_*-SB^X^Du%#{#-IuvsR zbl2I}sa$lreTVeTUu|FJ;+(p&kuGolKP&>5I_uAE??qR4B3zDYW+KvvkbO&J1t9`a zB7Ft6O9L~=@#FdsHCglERcyZ&`WZQ%3)bocH@rF;JR=KaN7qnfTPx=bj=+%`6-wLmg1fQz}- z<&ck^;iYD@7)f(=Tz=-0T2XBH0DrJeuo?{`(nwG;t#=3IxcYG|EFcer)2pxzur9~g zh3#d&%6vK;ZW>H_{9C*`zwC)Ul`@*H>wHyZ5nQqcF!oImP|ZK>hPUkmo%&rj|E|eD zPd}*zR89X>xt&t|tOKbXA0RMm^xGB60Z&|BtqA*L5(UOMpa!8jKNczHt9MIQm!3u= z$)DE%$@?{j!?ipvYvODATbrY&xkWAa863AXG+SL+v52sMpTrVk5f`8#BpRYzVq{Yg zAqa+MNEq4y%9W@xU4war0EhL~A)M<`*zNdF8X<^{^9KrIbO0F5?4~;n5!5;|R!+Vs z29(TFAX%Xeii3z|smDDul)p45C4F#fo6D43%phxGdYCCx>O6kCwY;FR*VmJM!lkT< zkZhrn#lx&~OlTP(v?phYr-$4qjJfp$*ocNwn2H#B&AgGQ1PI#$USfX>)>n zBc}iYE-H({tTN1VjsgO_r;KG^Y``E~MAHA$|Mj9Bb4@zY*ys%eJ>VS1%|>gr;RtKl zw~PX%1_Q*G>3@1$8)SRMkeYSb33{lMe+xx--~A4 z@cGCAQpld9g(7$wF+oX1^cIvrt*VH?jDVu>%(r?G&MP<4Do$(%Iwreo$68hGzQyai zy^(laTzfoRKa{iHJDZ!xCRc{Df{EJ-u_7c(J^vN#DgpC_Gm>5+0+t6iCyyyN^t6$M5M&fH+}P`M6&W5u4FTDc!nL6aqC_Ha?27dWwuk4q`469;8axF9 zCvn;!@{IjM;W1woKeyPA@}4k-LS%|r5{k)^B}^fj*g@3r5;8K=*}>Fax5kWW-qJxP zbwOOH3o<6}xT3@``!W$$J4o7d-efN#k9(uF-@^JlYinwqqccjh=0s>SV@D%K#!5v@ zP?KWj<07kr@(r*apGmXHR|Z@=rNbk`iY6tZV2f>aCQrcxs>Pz#EC%@X!zz;hGf;R=ANiH(|vgo#`V0$q4*MH9tO7S32L&tin(%W{QOW)G$fmKz!Pdm zU8@?bd{NEkR2@2|=3y?TdNGCx$7Yxpl_&knE8(=QSxw_9MFK}0ILw5#27O6zfc7Nk@ z%ChHEdTL>-f3P*8fp3<`XBa5ra@dMt|Vm-VJ=>&j4&XiF|Y^`~++qWK+qw zqp|y$Jv9u)a^5JnXg&q24ivr9klDo^Ih7?;vxd$+e$$ax#FxKT>SzXW$kklcDe~D< zf`5`r;k(fb6;R{Q(vTx-P}8)0F?{pGcLl%Put1sgEu-nX$1AcHQ4$uhH=kZdfzi!Y z0=URbM+jBEaLztn^<7s2+V?RojHk26TsIV3HyiHCWP>})3z$Ug%H0qiIm$~-CCR*q zS~nkm>b2}P_5CIL(B4-k{L#CMe_EP+F*H2^`!ZaasnxC|WW;fT?)6c~i)~^%x~T(b zFBdV2F;+&)%rWj%cZ1-U+}}o$sc~+WNYT52E~LYF?QATCmy4R9_yHw#Hbhf zGIP^PO2MYEfdn}FXv)!|GQ@Q0qdNf$bH1^ zd`>|={%Cbj#=7TB0sE^ClMy%KBg7F(75VE{FJf4A9Eok}qyQ^;c}?lBT-X;(3BTYq zgs5?KOTQf(w;)Hj9~fW;B)BJ#zqQ$Xv^#?=pF#NZuDz@vHNcx`5!a$23d7W?d$Z34T7|~~c52oEZ%luVbSA+t zaZ99wXm{48RgZ7OuosH;tPk%}>2sCg?94~lkr&>EHCPukCg$3;R~q;JyqG&1HhwUe z2lR^iq?YuX_Hb*A2AJ-d3j^Xd=s|}y ziAp$(DCwR-#hdejP{RoSTqHeXahtlgKV0ruhFP|i*1|)IeJRJ)9)o1_NmE?ez+?N4 znNl^jV6LP#rS3WJ8#o+l2Kf-=20Xd~@y{Zs^o3K*iHhx^UmpAcwqET_D+iH|j+F;D zq?+_+MjoSeViN>fp@)c2DQ3YGg(V849m#l~J?cFzn%N~LF&t0Pq~V!fG4x%Mt$B0& zxjo3cHlsDFCpf<3{o(o3Ij21QR~x$qvdo9Ld_;{b4CYOG3wHURV(*EBaSLQT$wauFYstgd2>dlR%j6W1BhZOmlAB<}t6&P28-dm-y~cc3EoxMZ%$O0e&6BZk{- zE|)K+3_e_eF*Di@I%VurW>4HwrkCEt$U~JN_{*npcmu>s2JzC)Q99->Gf)U1TL_$HMHaCd9AH@ODI8%;z?WD@BkYIm-OuBY(Pzt7T=E zqur7wRn?d0k1V9f=6+5NyaR@G{gGmKugxVZiinO^?+f`rX5~ zztFdLP~%+4>Nk%Wn@rHL%SY^wB{>1=JQW^>fn6^(%VI8n>9Q%mZUuv{6gG zM$J-%-c$ykc3>V?2G82a{ywnIP6({do^mSpf3Koc#<>F%fglxR578wwZCm_Y1A>MiRyXY|5f)C^|G zliO{o{_>L8`MymxTnJ}vIDMCWowqi55TYKZf@gF)qv?#<)6G)J%9(EL2r*ciSJDGO zo{g3G6<=sB7NQAptCR6wu0^ggdB^V4v2?eBat3k9y+(9;LK&An>=)kou}v>xCMw}z z(2-|`sYd*F+g!KT5TNVj9!pV?Q}OFQZ4cAY_Px5PFcf1$8APIBP8?3$y+lLSZ=eT! zKI>Mcb7I^XiHx4tkaxXEA9m{_%Uf>LYH@4N35^j7O%t z$dTynU5;Q(ut3adUI^}+O}aDJMhxW|sT)`FsTgX3n|hFu$d~ktwm@eRDRVm}ZWPoy zht0z_^5=?P@v{$7hxl)@UrDmCLt-c&$76Q0?|M3;)r0$_h5AMkoP=1W)R9w!2K59)x5qeRSrnHp&8B_6Jbf!|Xsq(MJpt|{M8xdr)J{`7tr zKU;Fxi#{~oz&(*x5d$aCJ}AW9!}`2~UpdZMjg6u+$^6p$CMx3&K7i^z(zm1Y;oA4p z13s_c2EOfzXDm$2<1GrTj;g2yh>2DTCjc*MqH*BdQj9senYry`|C+r24?#axPcrP% zTHWmOEOJeEUai0n3JV;kF~KL75PQ`5p93EqhD;v$`gnk{obb65s~V;~cdwN0mG{*z zJuMvAiHW!i5#U%VU%oDiwbfUr7CX4f1b)B`IVDRvejfZPS!9Y{ub4Uu%9s2>B=km5 z^3m?JET$P>nwkYBa!~C++(frHPO}-C9&ve)20q?mS6Vl5{jr#I+!rtVQTXlw2dB~t zK_bC>)1rv7VSHDC2VZJh;@7Z=uG!*YUTzFC8sc4j7>Jnx#ZrLvQE}6I_mAr{;`pOr zl%HZxX@l#uxJuBQ6t-iR0QTcFxGU$HtjSJeXg#Btew<=?=&oP%?%k2JO16NeybD~Q zJ^aYuKxZ@r(>DQwZ}gg`-=*D&Vmv;2oKzdOd8utOj`4T$!ys39_#T*=<*Qz43;Jz* zfL^MKbvcofJBi>E6h>Rt*&kL&Hx}ffVwwmu;*vVuz2QfKZJ?{+OA_2`7e4}WTRH&E zOPc_fBzQ~lyy<91DgO6NBB%}J^4EVpEP8V@PsGE&qOsMPH7`uqe5b$oUcPt-X0KmV zh>1w!38}CJeuz|AV@FES+mWO5?eS$WD$XmK7;eSrfZZ&laecJa$GiybbH}X{j>}D& ztzNayLWM#<3Mlo0YbX<_<>hhnv87br3{j!2dShhYB3V6K0$D8`FE`3UuD{;br61eF zF5vY*gAzBC_H4OlmulVf_62-98pm)d-*xhnbWM=?VR+(-TUh9VE0qd~OqL3%9)}=^ z*}iW2s#GDw{yXTfgwDN(wI4x#d3~zi4@94Pas*=Fgk!>^9ddgV`LFMQs@77Urhk3D zzP2k*ZM3VUdpYUJwvT6V{5|ZNwlFBPJt<1WkJD){|~F94E$li6k2uJ zl+noX?s(j-kcj+h;_aox-@Ts_|+PfQDq4g5A5E#8g%G%$+l7 znJh3KyynH|S6=lqzhdOUjB&wBswdYl;$SHacR4m?{f2c+HrIi=`Uo?*AO22g8B`v- zbmGl_-rQmqGj7>L?+xA>YX*<{axc2lE>-x%x_^nJWfOtvm@HLPbLa#xGp5`P=5g zJU_!5kr)V=SQ)@$j#DXjQPxVDQ&sWeL8s3XM@ucWm6Qz;+PxC^;c>ib2edU=v6v6p z*(3y+*yh5rYP0>yLf1G=_6Fqq52k?YOI#c59fVjOK2wCvgD5rKRl`9uOesdadv(-s z7@b6KA(-Uau2B4Iy}1B5$rJ}_PZ)p|gWk+SUc~-apnekC0kvhadJmWqNeZRiy(@;o zj~foS_}l~z?=&go$aFojOB4k+5qBZ(@TLDC8*Sny+H+O)#nLqzRzPD#z=}WNKus>T zzYP$I+3bwk3>W)mljR8;60(=F;-xW&LSBiLG-w^peEB3H{N`1BTLDiXZmA4y>_0&x z8w3O?`V{MX7+vy*E$N7JM=AIB_RvvUet%*?8WkV6Uz{jvsQA%>Nyd&sz0Rj$HTFO9 zx?h)+<*TygGi-bS%#d)22p1Eu5;iJFu9^odqgXIuA@-I40e*}D9OIQCP^(A89H-SL zP*h`Th00i5bIN=2+FCM)78uhmk_<+UzjKJwNWlC0Uf{w)V62g5JDHl4dE)~g5vCc~ zV~Kxp&3iar1v1^VPr(^(^_moJvqf3HNb8R)QD+*4fn%H1lc22gQct+Vj&`uf`MuG- z%oT+obfh1^+E7{Ba1(bdB4^~cRw}~{WbO4yC5C<{P~W-y`1Zma305f~u+!)zr3DO5 zIq=k-_e9{f5xk91_~fi|eQl%zq0&z|`g4tpcBoxq0_qiClK#Wy4KFMUl}FxsHwlaSM9 zm%d$l?9NJt9YAra4~!AIgV+RrE@SYLXRx}{=9-~gvu(hJB9s07;71H$@p(3&>yZb+ z_8o=X4 zra5E$=QgO4oXe<1#oxmDLg*_lWT3%V|eoBF9ntOkemLOphDN z^X1N$bB6$kN-P6d_P1i76|sCK#_)p(_$E?~kfS06V+b`qj**046n|fn%LhURB!69p zg}*#|s^K0V(CPamhDuEqcLA-8p!#Jwn#cnXVqc~^-V{z*oI#8uJ!X`=rosr>sry>a z!TwVZzO+Wy!d7hukY}_tSw8R4twEafGpc|;1O~)`2GkY@2!f^?z5Bb_KFz{zR>=1T zWstZ$AAnQ7ZQ^8b7%eQqnG@RP(a!+p_Yc37i`3>q@38H8i*zI^PQ7P38`oj1hEs|D zu@>8*jdU~Fuy3g_0jX|eh~}+LW%&g{+Pnwc$fXx)Q96L@B;m*4AI? zva(ez2%b5G8(-BzR%neZg7@}fJPE|*hFHR|MT$Rap&4R>-AF=ygA3oAtu?RWo&WLz z4gQ%&Q|&30dE|5#f=dyNWqmAAV;N}^t<`|eV8%RcJL+mUuAkK0wb2y9o&bgns;-Wh3eh*Hq>L+HO zhR#_@!u{JF5w~`$uB(_efpuG36_f=N4#v%qRXop~h zGHerOhRR?0Q*X;^YT*od!6o&OsLg&9SO<<9!zC04furh(W@;t^AOn*$T(Zf#z780m zE`RQ(-X>ZPIRV&WJaU|}Wv6bggWFWtUGmo=h{ax2hC5~EF$OmBJpdeRF5xZ1d|_cpi!ou<@7d&XNcBb4#OzZN5ydN++iqagE7C^8P@g zemeE`!)LrsFiA&XQJ-tKY<|4-b^Du%9k{{Dm-f`ul%b}VE<9aTmX^UQnC_a2HmV)4I=tytqHSHVRt=!7|KB)gQ38x8o_}Clhal}~p z>md37o+vPE`P`qq{$_sL#^id8{Lk{&RB_I*AtGN3S%a_%K|x&Y6VSmk2zMR?t>>Ik z1hbqfjB62Sj-gDYKCBx|G^-gk?u{I`H-o!cb?!K+TBhS)WhssE7@%X=rjXPL(#e8vz>a6ryl?{V`(~~FT&xFI)3{;cgxjLiYQSN2Yb)gcB3(I0_5utR}OVGfHR}!0>mhw z$3M#Bz`BlHO2HEs$p6yNx#?ej!~?V?vB<3$PUQo)htc#{_NiIkbU8k17E+1rUsDwi z+JFd?DEdWqeyOFXh8{%F^GyFioY#1VEpWE9DKr-r?>lRx0zhPlJ65~quP4Ha8aDm$I(IwRIPvm_b5!_uav1CUBuw;7 z{h4U|=NI5DepIS97A*m9y>fJWymWz6+}+S2S-mufkE)>Jb2HPgi1JlN>I|YvyxJN! z#6r3M5tawB#2(1<$_*~)jAD;^DH7PaTJ10u65qeBH?Lx!|B8lVh5-~OtkksIyGBkD zMOlG!u(mzAj2a8908#Ky;)+(8p3>|o;1Cz-Z}>=P!ew5Wi-N8kg|i%VdCZ4eNGSM>)PShlE-)#eksxP7Mm{4#aYB|^jI2;N+JN>1KD3qkeG-&N&pPk z4AA`|D}i?fh}5gG;&irm@*%0voKCX>&H#;(kTzj0H11;Hw^ zeV4-xmeUKJ}vF_>xzL!P+*R;wrdD?7b740W=+eRV;enV_G9Q2bLTE%jtW1ezXLK$X_ufDjLaK5I7(lLzp*a?&CJ3bZ&MBYhuHaGS5$@ zU>M^pwgdKj%tnmVb$xSInxM(hr+8q^EG2Y}Y5N5!hBi3PAKnb3;!T*t6|OfM$-(k~ zV-JndrF>vhT&0T%XA?ByH#JG~GvZW~)O8_vz#%kW1rwDQg)pIp5n6w9TvGB>$!@ee zQK;}BsIik*AIPTGZPwEH;fJvpl8Gi@S94)J>Y^FCan(M}&P%b6lO^kC20%6EztNlX zM#LiYjxr$A$jjSI;u*V>lo&!3o9XHSt!_K@kN0mXH?D9%`0I71jpboEz1Lwu$kC>$ z=Wt4VNcLSuf^8u&R_iqLG>!GuyWh&TeJgYZ@ZGjg0=P3uYC@c7+ z2a#T9_Com{K*4ow?>a8F|UlI&{&eijG(JG}9JkBCbOO>|~)Bt{=N1%kti%dn)cwyi^*J_y#gZXngG zy*R2t{i{{b(Yl^n$iW-ajcv}odrvWxaWXrnI9xj-Y?$%hu?9C1Zy+2ShzkVtnnn05tPZ) zuPQ>-8490iAD{thHYoKDfvH+0wx#Pbju_$iy_@6pbyGlK^UeUj#uLJsLrK9$(9F=qOQN~^$%`AZ&dt% zOI)fybv23fh5aqi`kx2Rv4)(>(ub#fNcgP{6bq(Ms7C30P^uzp#a@nx5^+#I;kDJ zs*&c6OTf#46hkTA1*q`K!)AN8MrgOq=IrF!<+uQ?6Bf(z4czU+Z7YjJuU>QIjt6SU zI=1OTjxXqW`i8ZJpRSQs5S7Q5qzXwWvJ`HTsZ3-b8eC*Px}Au!4SoZma_ z<~dWheMS}ny7jV2k*CzmM+d3KgB8k;{z`hAu9mg)LSc5a{K1niIWD0D_vT z*rju2qH5ilS=^^8lwm__26qJYI}0~g+Q_3OdyDEETVcpkQ1{v)+sf9@whD=QV*d9H zKq%X9E75CW&0P+mK-;3*?KBr~Ii~tGczS`@Jj2oc=<~k@yL_PejwU`RQ(dw_p}tOZ zdUYsrhy}m!cV^eR4fFRzG^5QQIjf%VFkgB+gN3~oqeC@}sTV27FJIq0Hziu6b~RLn z;_rU*a5Y_34bnuxra&mE2)6SNvSo~g1N+BYZ2!SDgl85<%Qt)Imz;8x_hcY1MoghN z-ti4)70$^#E2f!wr>-m7(m@|u>prFp6bNW0i~MmGSa3M5-ZgQ^=8J+=N7iu%wV>o1 ziDoq5bU0*Y_!}lj$FA#|JMs3dY+B=2}b*EAL6_ybPda%rOn{cuW$K21iz{vkKMIjwegjMTmWSV zTnv|d{VNCm(;T25-^GAloKY<{4Y-Baal9|>VDlN`8#gC+;WG4c0o^?2Is7GalIpP+ zj@*?Pc(*&f-zR`n_9`YLhjs+Y*THK9c^h8ewqOO{G#kX=-^kuM<^$RnfPvY<7ZDc(IfNV_N?Y;eI) zAQ;w4mnh*3rOOGl@I{j;koo;tP34dpxa=jBiL{ull=30|czxY`vA3#BiMmG@v~Ae| z?LL?_D$5drr^OCkSwyrrbug2wKu}BSZ9K!^?DszzRPZ?GsJtnzInVLmkX%hm1eF~I zHVUKRz3Od#`-U%|q7unp2jD%kkN!%zABtTRFF1f>1EPbYj7s3P&}n2}%5rf18D8{g z>%lIY|9zW3+HUU&R?SfB1nGXmG=-?|5paLOr+}MEP&o*D?A!AA`SaO-E>*-3>>3lE ziSk!wb<~uIC*!?H#l_~tu6P`C;hxBGzcJ#@TcA4j4|)3&gIOGIu^n9_RNw?IA}NTx zl~mCX6PNLu$A{IdA>bt2^x6MrOhhDRL+|mlw;V!*& zN=S!vN=w5kh%`vYA}!tB9a7Q_5+6!;r+|cXch}Ng3+MHB&iRWAu3cE>nP+D1d+wPR z4|9oh_?Mg22i~cb&wGzDbUe^4GMyT)57*u~o>bP$22%#EEh&x4*;_uL?5*?j(25Zs zpkPhyYN!lPnnTkKAh}@Gz9AWG81`b{-8M#BtgzY6KO>qSh12}RxN=_bzJ}*k9ajRi zESR?3(0?~F!o~t@Ollxi5|ccyOVgb|y>>a^cnzQ3}r z(D7B!MPlt+)*sFA@_Of#(|4cYXZpMh1xSo8O z2!<>l)r!YFa>wG?!crx5bBo%ZJ`!Pkq8Hx==_2HiqrvF=(@(D466bnqB%~VD?H0w* z5>l|2sL)vKQ7xf(D9q=9QzczB;-4CL3$#L+WT^QnXq>WuI&CiSw+DVX7TV!OL2?SI zfvW)fJ@vMb1yv)1eF|lP%ANH5C;^y<#qxSG`kIoMS_m#b4;a&NG$iT9k=#9`>W?F< zv5sKW#Al1!l7)bS2%JlBETunwfNHyjPb-vXVy(_taaL=Z{FmBcANCynci6QxqY*^Sq?AXB;=?r{@lp%JdwZl zJ__?>*u^i?Ci?EJ#IXWgr5d#u+aVq#fONB&U{wnXe*c>x+icNL#<=R+HHb9JQ zBi=Z}&PI<_$QmmPCo3azX01oj?Q{uw{~r>8MR&tXw9Ot7{kt7bQ;|0gS!^_YpFt>$ z&lag?M4@$dj3O>ng$F)P1Xem#$F)!$!C*_%JY!?W7X z#0H%3tHV-SAN2YLUhAz1^@e`?Tr-0VX0cmj3K2(iPG1iRcAyJ{5FV)X^2 zhB*Va6hiS*k@&dr2rn%^qszsFZ~0`{l-L_$fLWLcnvvc1mB1Nkzk-%&KVI7bV4Er> zd5%d0J3=zn0Cl*dW{MbJK-oefn-#y}B-_cVqSLdNw^`fb>*a$U=)u`OnQJ3fFvW^{ z`%lx$30GouY^52&Fg_?MJZVMLYNO7sb-yyB`mVJ)Y}Nn{%(r55h-j`f+^x!V4s)?R zt20JggJ?9@E`GOovL*VuQUeCqkKnHpO-|_6NyHe6dNy5r$v)0S|4ytxuBor;rvBkO zRe3^;>ytb2XLV8~OP2Q+tb+}TK|jiJ|2mh_b`uLjyMm2@rb~2H; z6FUikwGBM&=JROD^K8eahshEXF8rjXh_};FMN{(;(-v-mejL>tIBLcc1}Bd-*{HqO#l3P zc{&@q>V1d*(T6kzPF8b-KvTQYzD84oG;q&d*rWzwyL@n&P~Hr#DkC3s3rmm3gqtz4 z1M__70Lzi(Am5VT17RUZt!)q2uzy@uk zbj(kH=xz1{W}o;*``J-6JpcLWFL@5amb;jE@*65>8Q??-2df($`eN1x^vBrcWS;&( z;J#O8x61k|E5}{?aVstBVcIgoqx$7X+9ohP;QOk3}Kzo5r2ccJ;Mp?w3XITGD+jD5EBFRChwlZ?1@%rfD0YX`B0 zCuspp2g>NB0k}Y=2_wZ7DMD$;3n6LWRA^Ld&<$V|(Gs%4cO~CdVvx)>d;I ze;-|)w=!Fr9Uoi8MO>WKDH{a@9liC?+<*Hjz2O(FR42vW#_ITVr@-K2XAQ>`z0Dkd zIB@s&p{l{7#9Der)VcZNNB_?1@0l-dPAYWF+GNaDe0vIU?KI%DRYm@qoOZhA0=i&A z`+IvU-MD3{HA>!CXD;D5%Um?>ktO?FLOH0e1EZj3>j~x=zv-ntixNMHvl}IaXR`;~ z_GaJnpn?HA6PFnl(45;(MsnP|1t-C|sRtff>iE!QSbxE=RPnQV!5 z5Dw|qzkFEgSoosN>%DNPwdOrw6wNv$p7t>?G#FLGpySKvOIexW-OP#p_+s<$t)d~H zuhRm$%<~svHzk`}m7L1V+i9a_pvv-h&G}BEDADM0m)9yumf9CNJ8u!}7Dvf(oI6CV zQm446`C$7&^59HrEvL=$;U|A>YWrmFqJ%o9k+`?#M&9I*!kD$le4FRrBQu@ql~C3& zy{hstb;D;%ZDB?qR+JmxunT@436!GEvL{r_dJ^5!uvwArr*~`&_r*2i3Y!usV`nImU+mzlir?9k)w+@!~SBn0v;50?0_JA+LJ+`s3p7yy%vc3@2@W(N$+hPy9Bf#&alvY-QI<0=2aDw#?eOo} zWCI7t+BYSG4dJNwD0mB+2-e;dKAm`7-R#E}KcPD6G!Tc}>(Oo5UY+~Y$cfL&!S~n( zdRv)h)=Kn}`v++)_Ry{V;%ChSdDttpA-!=KS{LLi%*A;iP?Jx!-9C=R&d&SqrjpFb z4G&o|YQD2sPi0yY+e+ZCO{sv5zAsO2KPqL3wtw`wkCgFCw?e(@D(6qy>^FFjhFt{EY_^J^|ADUxn(`3ye$f> zQyP}qHIL(f_RQgS*gM*YOZh7@EMQ*&>?n@uNgNU(JtV~dp^}@&lim)QD@zif*z;k& zHpVYsEVXXDdG7hP4}?Q(Q=J{*9nX=Be4XI>feZu78LaYKmkyf=SLiCvt^w2zIUdo3 ztIHPjDuA0H0K4EVUN&Mu0u4`0s1Uw%Oh9}Q!yg26%pk}p12a8c&5>He(@V(SG3ac> z@ovHi?8w6Y$oD4zt%VVg+!Ez&(CId_v7rbTs8*F(t{YK(wCb)I3!cmkx=2Of-n8K4 zb+bVr6`@x!G_6s!o#z#?4%z4m$_1SLo z>8q-?;liu{EbtnhARd8 z25l!Fxdy$CYl1(*qkJoc4%Pe4+EosXE#Z8vb|?`ARb^~=QTkDGg=sdKlACuCdG?sO zk&vjRr4*TaiFX-iECrvDXnwyp6;b`e9_M^^GwV_$deaU+19t7qytahX1vGG;j%uXt zAHWtfeh3p#JiK&7Xu2)N?e0mkQcEI3sG+nH$X33j4X@rRkZ)GLAe70|Z%e9Xpxbo14v5xbA^00H@aG zuNmEYk2&%alhfhxX(MZOOWiya>e{8 zgN_~ku#$;8Yyqg5b#7E=wVXlVmpQv3kJ-8HwHZCyi<DqPr>g z7#GK~26xr=_K1klho$afuJX)Ax~iO8027>*u;-lLt4OWNq=zZ6F>?t^vBBR4)H*f8 zd5tH7ta|0cvAqT(L*A}pWLbXWFWMY@NqrwQ;D8Y-E|(uaau`~>7x+Drh>4K@48E5? zZIm}-ZXn@3=O zSC0+JI;|OL%C&0MDtkfluPG0m39-v)O@0a02C~h*Z#Txp+=V&8Xb?KPio6 zYVvY0hIse%2u(2`ebEhU-=H7uUuLGvONCdBU{l^!a5krr5F>=9XVNH7b-mTMCfTSm zC-Jk8UvTKP3SuM26JR z5vMLCxB$ss*#HRrgyDWK)!rS|HUAYHEuSPU_~jiG&G4p*lU{ini8&wiYQ3C|YDJ;q z3NcMNM+09jhrD;iZuTnwcG8h7Nf5TYZM8>#Wty@M-oFyE6&ZIiix0hUFZ&S|LyEri z{5DGk!8GNa;l230B8w8^O|POD&Y0fsN4L{`Lb%D=i7luB;f@NM{MzaUUfJa;5#b<| zYZLCJk8FnOO*M{hzYBS?X!G8S;Uj??gYQ*qvWLnPxqCAIyr~$I=V>{#VirsGeId}+e5K>&sp&OMfRGX+sY?_(Zw$x3C1IF zTZ9r1D9>ap+-7V%isVo4a>i{jx!Nk~aV_THLbc}hSn$hP12t16O9$E8d8*0?2Wz5h z1vokIyK6>6)V}9k>r#mGy~llu6(Ujds~ff!!F&)!C4HSu)unS-KE{~_ukZ$Uu*mC- zq+GLjoG^kuJdLQP;T@byqrAG^d-r*=^D`^#3+P9Sqt|x=zE|Xej4`gSv6^LczfS`7 zl-N>B<$3`nx!U&zTbpxvi+dBNZ?a>&uV7@uB-FRUc2i%E)y`Vd^^fzg!O*`Q_I=~iVez)z zVvagqLBH?V)@c2vVNN(*<;?d=`^Le%Y7ddJn(-vUmltVc3L&zVcE_`-tgY;l0ET6| zUJIjI*Km5HR$AF}X*ct-d{NSIH8Qc((1Eox7QVL{pm7*}RBRZ$_a@)vodlwe8-8IM2vQPZSv18a!2BDceWR@+(C2MTy-o}& zEz`f}eyvk#&TSEHAaC4TGlmk$3V2C5gL|0&M#BC+iXPYLTyl$}$#*p;i~wJA{E~R8)jYiWa-|ZAj!*OE%JCyTsgOT=UeY_uHByfb})>`o%h&M z&2c8je^tb20^SC%5IL9m;trU+V6SybAerTZ)%=K4T%Fu7a{Z#dM_R-ytfH>{LWL=U z<5gK!yVc*L zW;EgSJC4M`vEJKL)JsjE$i1BC5x0Htg<;U_LV{!jFa`2TCgPJM z-y*w#Gm+wPJGTMZ>?!Per^HALV=u_^^SfbTZodlsd#Tf+>q^oe0BVY>BOwH`+W7G` z2T2p<$3_teqM^7Dv>1w~p6yl(1>qD-zoS_cL5k&$y(0k3$L48#X{)ZU@bg|t|9^l! zw<)hB&jC7kvl&&xv%=BpdE0$jo?h?b+CVm&q4O!%a@F|^ZAGlp@+_bh11;srdGqP8 z&BpdcA>HXVrqt{QY9|HjpJCYOy`Ai&t;GCOx6Q%{4v5cXdZsEn?z@?~CdYFUZfw1~ zgC0zt;k$GGbfu^N%5xri?n?&fv@-g8?%|d0n*qoU9(3y6_cUkU^T2<1NIUz^n*fZSSalp1@Y|!-<*%yf#MUMg z{!eZ{AFIyFrBuNqb*_qBc?D|3Gq!*m3f)UAn>WzN$rgFrRR{X#8<%JvxnNqsLCcnmJ)6FkI=$h^Vu7`7P;{en|KYY6&#D4)mDe! zh&773Oi|L$j_P#mjK5?8aQD7Q2JM6eD0%W*cN(c}7F(Pa3l44gE0SlTJ7)c=?ypaB zjDYrp8UT;c7`ZaU#g_nN3~(9O$2ElE;^yHpD^C8?@{Yqf56YFQ}PUF2YF0UQ@9sO_h zjXzy_Q}pf8gAU^Y?0wx=ypTI^AwT=CO$64pJ&hV-yF+ni|H$XP6egGBSxM!1SKDv1 zOAWKqrH|{LYdac;R&FZ-4mU^AjI`0fq#4?EAsQxjK)jZe?hhKs4RdbxY7R%1d|BWg zXPDY8%vP@v`J;z_D!dtRm<**y=TryQ=9CqU8yi8?cZ8$bvma?3t0Oe8^FZ>(VBye0 zej#sE8#+xtdY>0A=rrx8iF$rpD4bpNR>#Bet=?e{yEj9KkP(EfmwTJ)&HlIFY0@oC zzcuWbo_DWdGsW0~|0%-4e~FT*;^3-e3WNaS(Gs8oF!KA;7<_6vu{(arwCzkEs9z3K ze6jua{kYgmk3wjtDj&Kj$$@YxUX0)~6VrDqdKc=6bl5lt*OGN!3f!v6Gx^X~YU>Zb zY00F1>S?R_U*Dt^H@(GX)?kMT$CpPxY3nu zQ=VfLXf4%YE3(tI#xM>q` zNcnA?G#O@=i&LQPRdLx(qdhwyH&k$B1p$ICwp`IOXzbD6vYk~=^wh;x5evwaW0^n} zbw6tPwm^iIRLCCirV^xDhh|#^=$qaZj^NOKV*A@c9lR%owDSU9Ke~E&0(Mg2<}^l# z-JYuG;1Ps?<0k`?KGXap0OCQ|^5#K_FX*}*Dw_K?*?ay$X>-9M6QQaUD>(mUoTD;R z#n5B9`q|+;z6KLWk6+V(!1pV*z6>~ldtjr1hj(9~ZR?m-g~;8W2K&TT`hr?Dq@ej^ z{RdjZvG`s-*NV95jP0ir?^_;15{*=S*Hg;OA!Q1$lDcxcU&5sxbPE9HECWhj)Al9! zVl!KY?hSv|aX;aTUn{5Wr+??B_bdpz7R&p%qvy#+)=Ygt#k|%Vh9B=Ir2Wm^rxVy zf+APay_LSlAWQLAY}8Dl@ zVoZJc*9ySy8UR5r3J&(PY4^;TxUTS-xI`x-Lwb{TDZcafDl&YxlfB?*q`wZyb@8A4 ze1re*@+}uc)wE{G?WYl|8%|#9-Ao(R0rZnW^A}Tq^Ia1LN~v?C51MVz@N7FwDgfb- zK%y`Yffa0?16#brYm|jIVYL}pN8vPw4VD_FZVa8u{%btaL4F#(rKR-m!DqTKO) zT@9){9I-SB+P?>A!jWFE6651%g)G|lOu2P|wob?DGLkeXMW;tFcgFR@ST4Q+XCQ zb^9*YRB2ea>+x){etPENVg?TI=357Y@W9MJ=qTb?s5&)S$xYxWznB1q1>P%WevtTd zubF)5)iK6hR;gid_xReNb_6Rkm(JpM63HMH07d+Mf~Zabx$$Le$SS({8q)j^9H4Y@ z*uRK5`qk4HHmPVkmEjcxA!F0fMu7tA?M(hE>cZfQT3la?a`go5-|J#N4n`N}ewWDG zk3-pc!$f&WF?H#Bd4iurzY#=|skYD{Q^xR;JWwf3X}}*f@Bp;e>eN=i^38D3%W`8?p)uJJ?e|E*ELN z&!nz1kL8#?$&cPrv(vESd6zszOl|=^6iSN%Z zyosTPy#7ai7iaMgb;(|4GN@}bW1SvFi*3)i#VOdRGX9J|Q1++IYC9-dIQNsw9Nid9 zQ?t1XAJipsU+#fkQ#Qcwbd)d+8wuKnVy>rGjPdaPLnL(X?y)(*t_srZMd`Hjo} z&c)nMxU+0u&+|>FS^G74Lc!A}kQse1&kx+!$V^4{7cay!Y`pPw5ay_p5Sa+62Da9M zPdSG9H!j7dw!~WfxKk{cdUBXZjgr_BjZVvjK3d<>0er3Ca7l znR@gchXd^m)VOfwGA*9a9+5K1ApoWFKcdq5@R=;zu56#7eEk3n>OC`CZ2c>`C5`}kb~r)8Hb)cH<@ zN*ZXeP`{j0uQZ#Dv(P{5YN*cA_mrf%z{3P@4H^Lc4b5p~U2{zTLb5mWk@Gh3ga?yF zTAuYMvxvQl%N?86%l@dpf+3%niX#_L?KA-OCI2mO@N0Z$+r|?602;Q9Ld8zw2R=!a zi4;C|XwqDFgG}J)#V8ig-Wo88lezC4+a=88+1%FBml2OI-+{Nzap8F92occY^w7nka#`4 z8z>cE&%_b^>gmiNuS{@Z`Viu_= zLytn_|7Y&b#D)vJA>6ekay{2L$Um;+>39AbtAwN6rpc|3$=~hJsj-t^@)>n`UR^tE z=4;MCco7y7diO)fB4dSSnt6>=zc?m zS;EIY4?AEVd49AgUJd#F?Sdi?)4SD>Jx?n` zSaS28?X8lF?YFR5@(phZ^R*=u%Js5=?1fla$q3vZ)J215X!?7)CXr&{Hjb?KCvOwehA)Yz%M__0m*M-fmO*Y&kd|| zSEd;)uesA2Z^LRO?|6RD<3h7 zv1_Go{6jJ5a!0v1qJSmocM$#wVtd<_IR{P7H-hr1|5}It>l`G~?)YPp?2ufKET+Y4 z=?ZaI)}3|MS9Uy~Q?Gr_jC$j@SJFmZp55A}^&JBEy>A&Ix*{&gj&cmq&aD*z&?QNP z0UzwSay3eC5efUVR4i{+2-car96}_7O>L0+e6=x0P^OQEFbvOx7Voz^t-sLeUi0bS&M91(VGIUJwQDBUq=kxNs;uCTqX0D&i!5r3RMN$QvvuB+ zQPBMsU38|lvHUAlpSd2r-s5FA=nKM<+m$f}4)n^&Fk_gwnn z{N=hXNlP@?kjx1<&Fu&H=Y`P#$*5@t2kplLs?QDG32%@Xqtde8sR0=>3H33y}-L=-1o`}W$C{Q)YzG7I} z!$3>E*%!bv{>Vz9{jS9UdO$hiGf?8+Op_Az#_&4vM1JR9nTkbKP4}iIuSHV+-uPfX zRXAZhTqe|$NhCo98l?5k(|8~@qyrnOBuNr416@eyvh%b+Em6x*k1T0E`TNE^Z9n0I zyX2-mSe0P@lPRkAN#+^+vAgXD8?UQGpkCfI?uIvYNL_Ikh9$Ql3niF`AjAih9+r=X zx5-CP!|-S6+rlkQkF!7G-^wcz^67ehN;=(pQPEReg@Tw6QmT1@!~(FiKDdY)DgORR zaU7k&%n?csY~DL-ClUGHW_#>@&H&K6YBAz)vZj-Jyo;~enrA&={J@Y1?g?gq?Hlfc zXu!#mBg#@XAiM#q%lyQsQ*k?K=&8&?M!m&MuND82V}(6S>TX*2>6~+<|6L>dW`B~Y zrDFJ&Nk2_%oSei?R=R-_-51n_23qbJxf=lv$B=Gjzjf74vn3GN^)1}3D+oLsOjRi; zN=^j6D+d1v~k}lpllfjEEFn9 zjK2?TduQHtD*|jQ%%6FoMKfWiGV87cidy^*ulP`rs6tWtJSt^m+9A+89S9paU{%zm zLKZCA4^l%8Q1YJJ?`Lg_Di5>)kC|uF2t_C1r`b5eEvRaX;NmF*;8J04@0mH}L}+4w zJZX4{CNa$0#+1%|(R^OQL4vnS6H(V|`A17t1$s(e^Z>ErC@d$YdYDKSC) zG$RL~wQ8itZ*ndcm{EyKDNLNg&sar!TZh_OAqi06bHpkTDEP2(sKLi+m8 zieSiYv9!ogBaRjap9_sQ6K6og=wi@pW8Y)&2xsRNKFfp zJq-q9qk<2Qk$2oBOZg3=pZ%gMRu+-VbH9xFVwWMYbR^)>f*q;AW@K5PK~g4CbZJZ9 z)^0>KpVh}7BA*$3I;Oza-_P1*^EKYaFRVp^Xrm-uq9QcSH{Tygj(gIm5|3wC+_c-O zJaDx%6qg$I@e5u_o5aI_@9aXw?rX89?4Em$+*E%uCE=J6~_F0ZL2N;u6>%CDjpJV7p0B+X*h#^( z{RELvJHc7q>b-WgjeZz_WQ5Xd2Rj}S+>?rTVhTb|hvY#!0;8OiFoIK`F*-dBCCMKxCv=$yl z=e+t!{#xdj?jK}7J$?&+?EIe%9%k(Q@7Bp(sShmeD#z}nhOb?=D&P`!3+fWkf4VqQ z_2HO*oy+B#5Fg zku!Wq0Iz?dDNpz9#MiZ-tk=6Gdwjf|#*&v9$Y~UGSj~k5B;m)Ce#u~w^*m{I12tY{ ze=}wdJV_i{d^4b+%`ujHrjsSivc?zF4gF&t%j~D5uVRn{!0a*wjA;Ib>Hn2pw<6{M zdRB&?N3W4i15c202jr?A6~MO2SB)pu^Yg$@KC=BHd7c9#zI8mcC)IYV7;r!?(5cbs ze98jcOrvr6on=E{hpbcP>f+1yU9syozbrWK;g6K90JAAEkh%1hVCP~uYAgq6de8`o z;Up*4A(EZOtR$I_tsw5U?;gM=$XxK{u+S2)HVdfbDFZZ>5q!GpW;&@N+RxzM%AO!+ zBaxEQL?Awz;O}&JHFRkL|M5LO=WuS6cF$9Rc9DSO!R!hu3l=>24rG4@JXA9p25r0o z3c3KUKy9|WXNj(-FrE%~@$B(Kbmn>#DFtL12+eDng3mh!==E5_VT*OfZvr}$I#>`C zP&c&RW(m;@DBWd?zHD0a*y-n6(E``fmcp^hg)ZOzqX=&T-Nft6)RSr;RXpBEECUKJ zzsHWfheohRVMlQHJ(4R5j>_A5XfucK*E5D)GTnvc)g z!V&IvOJRcovIxk@fy^G!yhVvB&&1q^556(2GRAhH7u11j-@5BqXr=fRAZ(bEjZ&(l ze@tcNnKmBD5&Wh|4d;A*HuR%7b0l@Xk}pD>FToFCA65tmM=AJ%%)LZyogJhG>uQ_Q zUK2S|2?h=vSat>xyd6w$;;z80pyT8ON<=0I*EjvpRG+tS5*QSvGsw-5C6=n?nD~}^p#k(Y z25>u61&Xnm_U7#Sk2N>ZU~Y19q>+y#(uUHuDr{UZWKJu)8M$_~|?;2IhFZRh?g& z%=H*C<|b~Z|B;c%i}iN+a|i6oGC;(^=|?}AQB?ib4WD?BbQ@)07N812x7&(RDO*K? zjK=DVOvD8e0$}YYQOsR>tLUwMD{jYY$?*%CkS-rGgj>0B37}ZCAF3wwqZ{kI(~W`e zL#G@7+eJ{gJ^tN(KQ*xXio`T2;qB`3O|MCFJivQ*kaV?wCsqc-&p-lj5 z5Swz7ns`j}|Zclc<};SGF#rFo%I!#;-eLVc2kgDk|Pdf6r|uPi!3P zMn6n#$OXh(yZQ*t2FcPn?RyLS1(d+A5{Odc z+{~}Rc{j{xse${Z$mivsEapRfvp<^8GeCTpIO_~iiYgE<|3DY0^LhQ)UzIpFMQ9QS z>@(mkAk;~r6y1P)#yvWz9joTCi&g{rcJ`MMDH!zFkrU3JMqCBNTCw-OwE`$e*g@dV z{zIT6VT66+QP4olnsirRmYk>8jBphsP2JvRg{in?IP1lNHX$yO>;jY(C_As{IIW_an!FL3Rem_n!Cn)Nw>Rsn^$|Zv_)B63eUo8B8KiD|4E+Q^j=MWWQuwJdG7orUV73 zNNtONIFF|~!hSE{z4aKR(niYJlVlLWs*2!VzfjJFW4YtoCh!_Le8$2?+Jts<27OX+HMDX0Wk zRsjY)0Q76%xg=bI5@enOa_i)fS^I#$n0fSC1gC*&dR?NZ-GmaCl6+TkFkzXKJVP(L zWFXMJfX?ml^bxM&^HY`39Nq;~eoW=-SMUfWRYm|F#5=SUpf%U8`0F)R*cO)@MVOr9 zGfFxUf&Ev#P9PaHmT=i!kG$zbIn&FVBA>w#5qy_$CJO+)A4gSFzmUXu>1S3a09g3E z3v9x_uegl{vNP#eOu*!u4{ONl#s4}i>;oMWks;#vLvdNJSV!0nk0~1gT3`K`vUM49 zfzEaCd}JBf8GCG~#itMN6*1y?ElP77VwzA86#dXvnLvlZ61;amd2qh~E^ka@Djgs( zmyzUSchFl+?;eT^yv}TC=;z0FMds^7EOJ8-;>`gpf%hO>y_UjVGo0E!kW4nYV5g zqYIoH&fB^8+9=CwS4s2dw-5?E?$fQt$HooN*H|YT{D=<(i1!4{v-~$jF@2N=QhkP1 zekdd20noq3au7*A(*Da@qdbAimtfJCFNo)r2`GsB`Sbz-&gl-ElDwuu8^$AdB%~hF zcsXbA-_aiTW@B|#-OznssZ9fbp@7Q(dh`#gI;Gm1neD#$245zl!}a%sCmY1Agl~i= zs|(!b$-ack+@~{IeKGg=ICuTM6c`>pR;zevxp-0>x_q&06RG{qnefnRK&_z5ZCh1W zZe3{mEg{x5gR(|ecVip`fV=dsZ-*`yJ{4~oPOg;bch0)EYbEVT)B*guR_vY_qY332 z+iTHvivucZ)(8mDjP^SRqQLB}j*-k4V0~`j^7GN7y(CQQ@6^;5TP+6o=YApvVndV) zTDQ#?KI)HzrT;XI^{>j@1MX!4393YDDS*Wx+xr6rb;Fz6?J<>N(bN}N%84J*o-8i_Uw=$NJt01Wv*=6FO<+rg8c~Jjag!zhpV|X*Qfxw zybsz^0)*ILQ*B(CQw0OS*6o0;aesFr#-|oF8YA%q2B42LeHv;L>Q|WWf+%b2mOBYb zG|tr30fwdo3@zljVUqMxb5RA)^kuNJHSlivAh`F=K#`;U~X%Q~k9a zSsv0oHUBG8KUoRL7<+7P2aQ`GN7OF9&wT(o^puX8k{gd?_zk9GWh^L=u_+3yw=>=S zFzXY~-}!IwP?F*QwvqgI16%#K0bWsQb7MR77ed)-Z8Re**mN>e34fCsDv*;a2J8<+ zv;u4RPVDQRB3zOeC#(wYgNmkRnjg_xWB_RYMxvcwy9CT|htP^Ahznc$ZY0Bp4D8@jzGUu*!W7 z1=<0|k5AEVmWgp7(sZQ8BwQ#4kFqL5${!xWRm3WSF8McbrfSC@ALXsMNxedD+0Fk& zx}m}V$fu(TniZ8)A=h!f@(oLu$W<d|lX6zCBZt$6GFA7Z4k_sa-z_YtIaN%b4 z^h`kFMKFtZ1DIFCrO{6`X%r;r;sDtt49R~P31Le2EJ~T(^4K^KRD=PH%*jP;x6@>g zRdPa4+op@MdHDuoVq@l4KGIl;Qf;~)7F=pcM*k>A^+$(9J2bmnUTaAf?A;G0s_m2e z_yN)g%U7e&cC-<1MRLme;>CPdz4hNrbm)c2VoL`^xrnAAKv$Qli}CSz0(J<}?wrxF z3hY&yf&sGMGbus|vUNcJG-XU+NBIhOfN zc<+04>{aa>g6_x=J6Nx93t^M(X$C?$Kd82-xRJ3u+mNL@D82i?xq5z{DTr)&nWp6m z293}rA%4pyQ*re^6B;rmg7;;QE>o8!&y8ymClEK$wxC)y=};+Cddkx+B@t{CyCo58 zvzoS&Z@9a}G|)|py{w_2^QWV^mBQ7lBd4P41yFlDOz=~eFr-a`6~+Nz+wXu5m22bZ zlY9&r@DI3nl-#A0+*>j-F_};*XRz{-KE#^p4SY(c$Q=~8r|L)S#>qp2?%k+Ml35=n z>wE=6lt5yBz5(CXv#BVdcDPwvlnIhvJ>OC2*r2F?#wpO6X415DuV=b-nS%G8J`yv3 zLl#2*RZESfTQA-F`$2&Od)HJe8^t;ayTFz#R)S0HR=M5^{vPUsAn6Y-p#wc|2Rs)3 zzyaL`=)H6bYR(bz2@75q zy3C-ogKV9^zlZ>j_G1bb88ox20ur4PJ;1&OG2t_eja5*ggw!>(6Uw3fT0;OS1tz@R zO)kcM>m9iCz{!lLV)a^9-o>jMt&G|fTR>-Z3W6l}7Lnu`Nl4)tRi-l(@Q6;8LAj)e zPNXaHF*~%b8K@|;$h4jH(OqcMet~K~1r}f*Z3QnG2atmfQVr#n8fMyo<~j1DGplk)Z@}sSwK{(`~qz`zZTYP&Sn;F}#x} zS*hOZ z)AaZSPi^VX&+L-oz9@3>=wHefElEz#CZ? zEB3?+-$};4e%{4)PR96V3Z)8If!jQLCYx8Q{=+VhPq^9kSUphgC%vbb5Jpp{f=oGa z)1PC|eedPi-#-;%fd>qUpFf#aQMWY(dWg8?EeOndN7n#P&4_>!Kuq5%gMUgEOnMKo z{5KYg&rw6>wI}-EtJ`{sf{4a$MJfkkG&DtpL#YCT`0coP6~H9D6CU?m<-qbtl&>0n zk)R6Dmg-drrSeFs)nw*&S~PWEXlm9M;PdRS1q*{{^yA&7QM18Gu$Li?-0tz)JC;*H zzzK)8bxs!s!#8P8Na+&|(tlu!t?dtvYWoISyJ1?HVFK57<|U(t?S9 z#=V0j$6yzWXkqJ5{`A-Tk|B^LeI_jncIhADEwl*kj#_$XfTuT=hq-v3Nfim$YL^Pw zH_5ZaK-XcQocEoc_ybz&{6J%dfPPz7k0c?^bUhl zX|4S_gsn62WL)<1C4#n1vbKxdQXl=130E-IJ-0jXUVrI7R@14}F_GY#APwps@5BQE zOQ9|ll;ryemlS5mu)1kq!RAvZ<`;X>khQwW}dcA5HHZ z16uy)1AGKk!*$Ud($`s<>}t3pQZbFBh-Bxyie;m}t%xA^yauORrtAF@wBQgr{Bm|c zbx)Emb7%XDL?2Qb4N0Z|3+J_hr_)jE68~ zD`{A|9}M&j0zshRYObMe2Cno)=+99o7{`w8+KF-C)6#4Ar@j3nl<`XxyhKS2)3X1x zogx?{i|BN%zO+-d*I-C#5HN2nG4Sd?g7-A<5ExIcC%{q>1o2?)5oPWGkaKl0W^q_6B;~iUD|4Kl_QDS$5c)CoB%q+!{)VhsrxC-6T*zBI{wA)@T#V$7PBf2 zimnWi-bT>)g2i6G$cli@BIGet9kMZH znacBvDeSM8VQACE*f04XK4Z}pJ0i0~$Jd;!Bf7~Ft0!Lb8rN941k*O+g8*UB#z9VA za+v0Wu(~U%jDt#!m+1GcatF&h$46EAH-6thmFAOL2GSo_=fHU+`njz2PqN_AKiz(;@ZUU^mfw8%2bVX<7#+Ui7EqZdb4v^ggo6Z{ zmvbx)T%U~1c60zLO40y zLi$1m=k7|4>e6IM(urVt&^g?6Vxh)g^lkIWQU{F{h-S@ej^=a@xBnbw6SDPH zLr0U(u5IhY<7)+*IvR3+#)>8F%fDD~zYD+=Y7876cJWX^wCdACuXuu2;>HR4I#FgS zT%^zoFvS;Kqwni@N~8!VVP%rZ(3gZ*@Cr-xG2W35zWH>l+ou*$bWu{}bq|-NM(W@XvnHdS ztu!=@7SQWHdB7PH>G* zgBIfqH!Y}>O zkn{*2%!3`S`^(MW0}p?d|7u$gC|Qg(?2@2|6PcEpsY4pBU-ELn4<^R!lDq)1e`XPC z+$i`_Y6o?r%>svUD*{0Q3#n=*@#yosWhiYgv>LKO>80232+l^=0qd<%B(rOa&a;wV6#s{2!rTBU_ zPjEEBkTuNBqP({=Fj@yDPDQ@DH+ z9?S@k^8Tf>d}}Wyy~i2G@%SRKj1UorPkF3zz;>y>Hj1a?k#1Dw>Nv#d-|a~Nw}z5q z4jtV9G))3>mAW&>+NLqTZH;gRF{OAT+GLH_XE!doEL!8r-m=Ji&?ZgEuJ~x#t+k2( zZOUUT{wLK!@Isf?Bt+JS(?-5C?Kdb-;M(E}>Q7L6u9 z!j`BXkCYd2549iskUjVUf|Gcw!jS&KM|k+r`lN z%T1Dp990K}?2+&vfUNnZx}*v5{w73cArLG;{3(#YjU-Obp4N(q4LhwRKn}mlBc6)* zKpLIIcKy67B>??buY;uc&5R{-FTb3aPt=dnmYrYJ+#atE#p+e=qE)&MW%_N#X@;zk z{t_TA@jbj0*+sv;$15(@e&f+FxDIEto@jN?q2+OfDC&%pt?}kA{z?CHG#n2kE1WI~ zK8%qY7?1Y^@?*OOaDxmmMT}r{oL`EDJUf9L`#&SkI5QdYL9O`oDztXHdK1_izB$E* zd`>XG40C0aX#)WWy@0vs5nI@&akr*iV`dD$)@26~e|@rPWMLK?@N1%tcOwUw_egzW+Al`s(jyx)mg6 z-71)Phg-}T>?{wHpX<`;Milxi`X?@F7yDpT9N&LpjD;rS?hHt1O=`lkZt#(T$RU6U zKYu96i=*?vtX55hCSx6giGFJiVT%&NhD)$m=P&r0L#Us|*XZbv0AU|>Nm?&P+@pZgrTg$3)x zN>o$`@rODQ_PI}^jYhrn-5s;F+lJBx+Yl#CYL@FCu?b$_oB028_$cH=_&*3=q#8gkhuV zK8VLZmXa`nsA5BQvt*UU8?gOZ3AJk=c#^h$;S6=6V)c1Oi60F~Dd)goO6kBs3**BM zKe2t)%3@A6B|L^=Sqf;w7_!HHL!Z5($lfLY4Gn_n;$kVUrU%VPZM7DXXIpf~8-BOY zL)-uO4`E=|3h4Wsz2AnTVdcI6vW1*p|7$&&TsSP(3}sZecgdb(dH{pYC8?DJc$1Kr zBWG6LM#WP@7LpD}qo)%0EKNe|-6x4s=4*;#a< zOQcH`c+d-+fPo4=$-y5z9|EPxAr5s$e`5pVdl}w-PN-~IuEPvEi4Duc@_Z}cSU2i` zt>(v6_I%>RTLe@Zu=v@L`?}IcrwYSQu%&oQdDK@9Jm)PV3B?H&cTbbzrG5Lw&g|S# z#i@1I2_&9>Bcqy9q=GJ0xi`JoPeGcDyq%L&6ntGxAt+&srns`s+sMth=8hNU+>m?R zdC8#>7Ea3aA}}yG_UltB5Lp0;Zxkpv@5Qr#py^rq1o@nmq0eMRTBHIVkmz}NS(uGw(>w&Tuo?L?J*_`KBz zGig`9*zxJ*rRa96oh$%77iXdA&1)T7Q%?txLO$dq8$_`l{*1nSC}^l}M|f($5H5FII|nlFGhyu=0+gmdd@M54xZQrR0=4_v`o_Ixkz z%S` zvJD+D{fBIMDSoS3x&C{TyH8M8N$F5lY*EyI9B|APZ&Yr_|KxZ;h4m3ta!M`6i-KTa zB*O8$;Y5k0aMYSHM*C1aK_>bEtP>cdPJt(+2*Xccbd+N=sAT^M9Xif`m&9T*1IL4_^EQ8A@a=6d{)LbFJoVa7B)0 zF^P`=#NE1myOYDIdk6ct@#x@z56zU@cdma&EqS*bnGNj4&^DIF39sxIrk;?aFybIm z=kE7dMVV4wkI31#ZqU(5DVjeHn3&TU-W&<%7G=AL9 z-McTb*e>|znKsyd>nVvM8wo^ptq-Cm1Py1yED;ebcn;f51r0Tdp1}1kTWWOSj&-;{ zA2l(=)3RO~PfECor$$DR)+aiTv>4%-|wR z_FnODW^|zfZC-xVf%NL#y78z8F<0o<5qZI*kmQg>(-pHF7nQigSb?12L%YTmi$#60b>t11BFz~h6`?sX! zi{*>T^W8)$R(m^RiMXA${g0Om)~I3yN@9N!%2tP;H>*KoE8V(o zvMtZx4|Crg7(r^Nfb2H&BXGhI1g9K0>_6ee(he~$CF!d386#mgrM{vkJiaC3IAir2 zfUU2SV`15owmm<@Vs$d6v~n*5pgjZ|Jz<9gu|*{{PN_d#cgn!##*sHEsV;4p5gs=m zq<;W~fCqgyle9}rQVKtzjv4=D#Q9k4MwbqhYXFzxt+aT}De~&`oe+Wr{)dJsI+Yh; z;m`(k?^5XJT<;aaQ!(HWRvkZHVHM9XgXm5QJYJDOM=te)#-jwl76eR2py`WL{?0hv zO6M_`NjFfnw-Wg}Q|9~WJ>Xl{)TXDNtj=7{iwe`30T7+;H0yRRA~=;$8HZeL4#`GM zM&sak3k!fRn%mB7-^%c2v*p??k zBp&I&XtSm5k1~HmyMoB_CAQ`O)+w>F1^YFngz7SyV5&`UOuAU9eN<`nQ_YXksmzR} zqiA)8kobdvyKdne9oR_w>8UK#=a_2tijxhVcrj;qn3@QT;ZEc#+5ozC;~I2v-^z^i z9_am;g7)v}KnNz5Ls4!8jI6{YemL3lP(v?pb8a)=fUY7lH4%~%Ze-UBLErNXO84O7 zQ7mZRkM&01-OPb1=0U4|ak+jK`^4-GX;=l!+JyqFc7!pxkmor5e#$Bc*=*w%k_;%9 zJx?J@$)uX|bM9+G(^}Z&>8_ped}|cMy9r)Z9-KR+LOFuEpqhD%P2Jw~&1Txx_+Y}ZJt&Cescdo zt6Ezzzx1EFkb8s%fidD9%8vb9pQZzJrZnx3iWkLDiI4t_CX1Yolt@`U(&W!ypu@J! zRH*_xR1l1awF&e@#1i)K<%zj5`;BBkDC;aN^aJ^J=gJ31WgR58XvCWFbZSe z^;yJOb?AWo6}izDSE+_f4qAFR>>?L-I2)y)@2LqUM}}fBb<5)*=>RRF@e%-nRAx>E_fQNGww-oMmf9mt16_e5js8(Pz`oxs__RrnlWp z{?D5s&R?MD%nd-1+f4&oBW0JJ7exoc=}1oGd1{zL^O*}SHLFY)ZE?iB_kIYi7{WvE z!{VE%WZ)xYt3fm=_U{Ez%_R*-f)k2B_%2erXhdTWj)Rx;eIkW732!oO#oYd*#~hH9<78djyx*7q83D}=TF<#hAJD>* z*{;DE1T1_AHaW02$Be`mJ=6ORTzu(RLHn(&*Cr-gtt#Zdsg(4R=3KNqWcJ3}*&2-Q3$fM99vjj+L|Bhi>FvlFPI{a{T~L>viz{1Q zCFWA9!~P`NNu}ia+<9cKJ?D}W;Z%Z3M>2o$9~6^=lxUu_Vo$DI8ap+{omDoAxlDnx zVNWjUPSe}E6GYfl{s{F=&rPuEluk$g8wK7MM~)Bea#I{OesJv6+4_D7{t z(v^OXs6rTZIze4Fb2O%N~%23ht-+Y0a zQOd>>_8@8WL~i>eRLm}nM34Kzq3cEyjQ_dZYRu`!>M!8Z7KbPt@L#^#C6QH3rrQh+ zS*TkwrF~ct9;3BSR{tnf2z=tCYr>E`ki1V$CLP>K+VY&6g|x7JG6+LMRMOLdrI-vn zk;~V7#p==kEF6{5?c4P8&n$P0+sQTNksAh#D1$YMkvJemFjKmhQqtQ6fciVSF{AE* z%x_}@$|dDUu$6Q9WcZ7*I6X@gM+RvRO65z`CXA zis2V$vcD;J7TM=;>K#Pj;H{mzpNlXducuu+hg=y*y=x=w_PFe3uO4?#@k}{MRJLwC zEIP5Z(MGX#)80X>z37o!3&gN^hX)A{>`j09LLqzw85Sw4P}Kk1fIOR7MMBNA1wvUu z4Ox6(d)QZOkUCYRpp=aFyl<6UeM`CEF{=f7TD}^WTQu`U^Mr&q>1tjHD*1KU-Tgf# ze#cKF8H?FM+(iyTIy&&=4Z7Qp!b4vk=FQrfKUo{{@;^pWm(Pu!iA}a+Bp*qrPwxnq zuT~U)VL&J&=|^nqx+TcUzIZuq)K7K06lNEz$M5G-24Y>TD0dSq&p%Lu)<)lQc)ei6Q)O4^qLbI!EvT}Cg>G2D28Px>WyVPOh$w@ke#Pl&B@ zzCe#WAVd`Ss_F`$T|N469LC>Jw*09k7J#ny5=6w3EiHA297Gaq{ZfzlT5RS2Czf$p zegVXZatD#7Ay#&fuX^rWOCz&nc>A_}X*{>p>hv2!N)CI0GXci|Fbdm8OgS5(*RLug zcQTHQs}cDwl#Sg)<2v!geEqf&yOeB?199F=_V z$(PUzzf1u?O*=fHO@4|Dsl0g=!z3L}pe8q8D#rk7^Mh32VF~?utf1Xh*x9?r)gRyC zwuMjs)jQ7n!=5KLRi&R6gr6;nUk$pcXkIiw-}UkKW~!g(NJ^8^j&cn&&-nqjJ@|#k zv7d0sc$A5_@OCkpR!2lS8D|sHu$FWCJ)OhuS^R>WonrG-3f;GFowk;Dv*}TU1@2W? zHJN{xjcYslN24`BDCS`s;EsNJ_yf3lnlY%i(*LGVTV6Nw@~{Wy7UFk zDf>t1#Z}cS5M=w8Vv5rW|7qB2%_W~AOU8-niJRkr+~B5GFYgH&!cjNbzRi$^i6T^6Z9@8->ta?7ghZ`GvS1 zz?Fql#`-4z%=uPLH_H1W4NPndu+Q=(h8D}8ci0IpRQG=}SYc30$ln~abt>=re=UHp zGo7?IvS=3kaJq0cH+Jmqeo+uhP}2rN)jQ(8+!G2$*>0^ifdTFVTjbKr-Ir|7c-MeJxUQ9as%DS;j@s6a zpP+0{nviEtpT+JJUOVvx4;szUQ+jEQ5l#sv7$0@`jc{<+1miOu)i6Z6^2>241Y*J0 zwf-~sz!g=cCNRWpm8V;7;LLzy_3DjI_Es@_teQJ=pdmqHyB>0l4@UGlh}+FARKEQR z{-?hMU2qy`>CM2gD?fgA;%i?7LFGK5#hn{SkAeY?8-9rF;-^?w0PMHShn4+5)fPpXAjz1-6?z1~!OuBOyTsZ|p8p}U9 zh{PPc*pDk~yMbk*K^plbQY>!tN zJ-~qN30;g+l2~xfGwJZ-l7Esq zE0hQeQLnyFB_+rfmoip=(&*6v^O{957KIN(SbWd382y@f@fe%#j;-;TsIg5}b?Fq^ zuw5D2MSGX4?2==Brv0oGn?5JfTCr-X`r+D6p;g&USv7XNDZA9;BbVDkB|EG2yA z?Ls}nA4TM;u)=?8YL&6_`iJxGawUL37^Lckcxe}aSdTatOf4&$m3x*QJG^PW3N>|>Q=iQx5Tkvxv(-BJQ^W9^BuU3039jW5|a`1`@EK`TKgW*J5YFHL)W`l=7;ly*Peg*5w z1eK}DDQ>P0i_txHjl1wl65O1$?B_S};4kzMERP7k63HmQ>OYfKa_yKUB(&j^QTrUC zwmfLX%CNLTI`aCP6rZUHokt3%pCN45rX72c+HRZg5#q1irGKU9wm`<}zz_f2YY)y@ z{>w;3hnr;h&e3ujq4Ia@v-{0+>OK1p#vnv6>=Hl6!S7#gkiLPttyE@UY2K8d?2wXv z2tX@VlN^izt6a{Cllq)0fM1NH0NNAc1D)0pDJm$k85~VNne(+>c-@-=C^3HmaZYh* zo_eaH@vvSZn&+7wbwDM;LQy`FS`YhFbXPUO8B_iCw32!83>)~n!uZ|ElkemCY{+C;Xmd^l=t5u3L5*KbcBB0l z4|>z&3~y+VYalW|{I5z%tM$wQlEQg%$?~%Z2US;9wxn?QfG`KNC~|P}oAe*?NpsI` zc9`y;cWszMYiPYgnop4V*ei^ja^E2d*!pF=dhKRyVeK!}{->~T{le$p^;64Yu-p}RyeEJ0;Wv!Mc|s`z(H z>!lpRQhkXNa1{DO3<62vuzQV~qR>3m3b8scCa`tN!=e>+)6Z(GJ87OQcChYP_^UuB zzyzXwFi9D*0tP59WpbN7h7><&$mFrfT|%0Zl<)PsNkZs z!B^nq;lammGPUaK=9m~e?%K;BctXQ=9DJzSY$~gB0)Ua@LQkNY;fwd*&ei?qmk9S) z!#ErosJrQC7&J|^XL9p5_r=^OxMZK(t#l!vdGiU(O5XvMlc5CM75et1bBw4|!32-OIGrzDby8 ztuKzghC{xc;g2T*}-;@Zq#CcK^Ly@;w&N$4I|M7F_J>~rJ9S`4Rsm}uzp&^O4qT8#L|N6JdfSN*& zX;nv$x!|*NU$a|GJD<+QobhO}Juyg?VL3GDbXHHDbIWf^SHS7vO{KbHB;&5D@$A2b z@}gPrwqe}d#{9MYUQN@FsR7?lzW05rl~24UykvXhgY16lz=NGZg7Rs|E#lXZFQk%vo5E^=pN+KkQnfguh9x)MW=zSm96A1D~L45z={K zOfrmG5v*MUJh=UFJ2gPmAPJI{eQNXqv|u_K%(_@SHnvTlIvp~fBjih5hu=zm2juwj ztRBXb3nG(`yzs}5Xb__QPXhqJpO$ez??g5sJ+uA_l0{uViMCm}w+MY?@F&weIUkk` z7Sdx$I)(0CYzJ=2G=*;{3J6uObr4yR%e2>RO4mER&F|0Jh1OiRn3vND9@gJtJLx6f zEf0-i$ZX3WFP`%?oGY{UKUoN@olG_$D%7>jRnW~ zi{j70jR*BrZsP82;{8_AwokP~%5DxdccY_J5GnK0Dk}sMN?r>Bh7!*H04zi;y~Jf% z(hAz!t`EOLG+$XrKNQWf?_ch1X1z)8E$03VE^D1u z=lxGQyN7yNce`3G4sQF29{V=##1;WI`f<(72-1 z@zUtxSos6@EB+fCP_3r@FeyUod0|k4{5;^IxJ_cH4AXiq271{ejRWSkfyJ?T+!>+T!Jx4!geHmy=ef#Os2i#mlYb_fb&z00!h30qxNw)^=}zjvKP zqX(6Yh%^Pv%YjgQ-|+9z4|sC1xC6nJak32AQXZ=*A-F-mZDURE#~r3m4k2o92C{e) z4_WZjrQ~#weIrqIhpMpe%0}|S7-E4FAyIwQ@pl7WmHy~-oO4Bp5#s|?D1T;!(y7q3 z+geHX(-6yrF@-n_xz>5VfIIMBRUPR*MKvXksqC=BV<)RDs4{(S&Fkv4!L75+BOg`m z7j6Rs{6s7_m!9h;K)zd6>z(+3)12P#xw#Fh18Y_O{6xjMaCf>1^kUBOX}(M5z&iML zW+0GfZS&Ra>FbJs?Q{-u%A|Iq&`v7we|Lr^*)TgY40e6O`h1bg1&PgkNvGrt5&XCe zx6>W!31xF0hde9zR@`-h;<#_j0h{EzcoKDXy)PNJ~0jn20t z7J^e39fRo{#0)MQFZY)Wxu0m?a>2|RHFp6&ZuYGW8h#kgmDt1qX7IMgmSq##d~6EucBsjL71cs7=BiD=SK!F6oQNMH~c3G;RnDC%3hTq zyQ98ehFH`huiyA zSbGmMmiVjat%(WO51`ktxexd31g0Ykx^ zt}K^<@y%q^V?Pm%yEXWvJ|x}-!K89N-CNnOm#;}$WmpjN ze)iK7pB1-b7=NQU)o%>I!#fm>-KOjFu^Jyax z<)||oK92xklx1y+!W_l>9U?@n*u2m0*yoLO_x1T>agU>ch;C}J_G2TxRqfM;JI zvlbGMr!11+NMhtfOaTE_4>f(6d4Ye{0aEAez)rG6*d;K-uCPvR=8+j)Dp)kZkK2jw zyRp`Y;p>l4!N#KxYeH*QVu ze@#v$)fRHzvig^A>E`Sjyha6*2d(9=(*YmnmfI>Y`7`<5{J)|HI#C&3VOm-ioEUyQ zy#M6`%J?Cvo=?qzy3#|PyudK65Z5vDZrC@GW1LcZ*0d$bADO@Y$t^Qe^8zz@;TJVO z8gBEL32Qx&MJ}DxjgsyL1p*0(i@3;e<)`A~8Wn;Xk zZ7%lX)UJ9(6L>8&lFhSgCY#ayMQ-3ho?n(<9ROByUn4%O$=QNVOU-B9C2Bl z4d;dQIC0#5r}*p!A4qE$&GejMAOd!Z2bzMtLz<8RWNm`gxe?*9j^TJV{D2oPawTAh zMqkX>qu<#B=kB>6#L$X^XK8oQ1#PrIu5;>3FS*Z6js>{c6! z%r~^=>#MDw)kKgE)pB+;uI!iVoHKS29j}(IK|03I1oL*93k|uiqc{%=X@bSQs3FDN ztVWZ`ziIs(2NQh<%5#n)OC(=DS;z`yB{_%!=7ebJ9b)u8x3J0#F1a4k;)@9~RMzu! zT$S$~Lh0oaaS>r&Q*-E8W}Y8+|CTGhPb#o6QI(n1)vNGTACctl_}bKGStdsqVCzzD zLw&SjTnhLUa^%%Y=KV+F(tcd%GOxUas_!oAM9qClH7=nB&VOej>ZD@{y^QP{D;cfY z5PCMiL*s>GSMn zT#GTdrJx>j*LBE_hMe_HIsNA@!?f4D$P4sVL$pbPaAHs7_E$z6kI8-!V3tDH9W$yW z9a;vlJ_NlB!Uy^ZpMcW=R#EDMI};cNJ4_4>!~xT&1;tV&6QQ0k`v6TMDNlV7witjT z?Bs+RtZXA)oFa0R3nbN;H4ATdUfQSI2%_cm+!2Ak%B+^B#a*M-#`9@qpmyb3A8ckp z+lD3}Cy_2vxoV zWqXvW5g1FSk88O3`Acr8z-?9bcSHsu&3<6eXU za9z)gLw(J&0ZL$ocjxV+z{hQDj$r*`sQ2)cYWH^}V$?GVk@H+z%lMCsPWvH7et{Hk z!|Q$mjU2DBtP{Ed)9j$~1#m8B3bX@DmrY>9=ZD*VqOaA z?I14x4SfHlIdPGGBcuiDc)*YCn_R{1sR=)QpWemC#!fjsJz%(wE#E57*Vjm5&i%E{ z`t8Rn1YXOv%?NuJjoe6Kuv}a^AV852%FAI)uiMJc<4pgvM{YaL?{e_W^m9VTwL5Eq z>;P|Q*G!C|Tg%tNY;1*F-}fMxS0(Z+UaMdn;PwzF+l}^_Ur3c_lmP>Zv-0QGJ1XT& z&dRlVmF;(o$1l_CE>&5tmqY@j>vDD@Z4k=$>zw^n-XAd2BXEA&LBJ7jtun)iE(7P~ z`2jWq@V!OKEe`nS%>;S8{)-fFB0z*g8tuW7$~?$14>!GE1Fw9;i`-H|tLxX+G;s8%I&6f|d+VD|Gj;rn zT<`OA*$X&nL=WpiYHw5YVCSt^g$?Jyfq(TBr6hc^$^V>+LvH6$WY|9&rpHyZlc_H< zK4qC#$Z?_B@l?X!^E6;IzNW0Ho}~mgaKAtPM{Dn!Zk@-QWpJOa?Z%sqt(DvgVIBs` zdvf5)dSkjyMop-457e|X`mhdEm2Z&hF)pgIj&(1Yk!sowc@v`1F(Up=5g@X8BKoI9 z2xEPq_^CZ2Vi|-Z%p`aabvGL8h`i)f|3+b9)}kbJcJ-@YUA~x91b;k`b4pZVT0FFt#T!X$z9?IQzV4s6E1)B6v!4;pX5{4W;NVMtw+=uM-3BKPh-dFB#30e^xQ z_%4(SB=_LyPkVLReIjj2?Qj=3w4 znO^>e;AFCTjz9D{>u~2RnD>7pn3mvhJe89%V%68JW14m4v$ZbXoi-Z>W|6+y3INsb zz3Jv83&x{1_YA&59438Gxjkm`PL-{-bF1S>W05^5TWlar$CZ9H5CaW!mV+Fyg6t%l z6P9kgpG1wc93+6);6p{keejp>{f9bDrj9vdoAtIMe^E;H_t~`{lyCiyafmM|-|S6t z=F|>mL@Zpn&mSjzz^&&x7adF8>7x#S9p+$4uQSxrA0~()YKCe>4+`Ugctyd+#!cES zsDS<7&N+ z6OMv}Of5d|xj4;@9w^sj|NH``|J6s}y>+6xMbk0M=ga(R#{Da&x~{vCIT|+e7e-#6 zcj}Lq>Efl2?JB>Ae+fPLPTOM}0CJkXU_^y!$jBf z{gFNtS}GC#&;6s zvMS_pD)%s9nICX@VTQ0_MgTPIQrtFX=}B86l;S%%SGTvMgwjZg+K!)Xa*8d6^Hu_u z5--2ss&{Qb5G$Jgr|kCZr4on00mQqxJ3eGK6^K^?Mbpq63D^VeYF%c;sM z@_&i!jk9>tQm&pQ^mKB%P+Mph?{5FX8Ib+s(G4?iPpe-p5&J031Uw|7Ow=DFnW!(I z8<_u{Xn-rDEL+@D`^O;h@Ig-<)fEIP49Z5qI!j?nI4bCz>maf9|i&9n{v%ZGH0%DX%H+ zlXS?qH@(M(G*7i1N&hTVcPT)+Sz6q&Q%&%>-f@rKptcZM*}G` zt21>s*`fIb7|*@ODVf;{km_BQ>>Rwm-v}f)xp1f&$_SzNGY9YIfAT@s(XVkt?BaaR zpYVS{^58`0Bg3{zEjg`n&b3sTXfrI`@M>%p%$D@ZnV_?k(zukORt7 z4zS&*zi9y$+${w#ucx~$*|R;KmxbxCCHqYL2Hh|GV7_JzdD|e=Cjs4yPmy@h1?~Z8 zL?135K(ZooX0bV2WiVhFH)O7i5Ofmi z4~RoqP+a7?X0S<6vd@8JCbz%GXZBpPx2ajDcYyue`VNTR77U#z8hEpbTXL(Svd+wN z7l}hN^8d_R z-t^X%CwNCoQVu@*d@`8`1|B~%yWjsa1 zENcZvt1%LQlJ4uQ?g*KBcq>W8C?7K#N3tGo8M%pVT;1vr)nf%tG?dXLx+)LaxD{wQ zHDa7uw4x$`ZK{D4FX<$v*5w}*i`&JOF*7NNKLL7=$4*3oEjrj5l>q1-far%} za}MeLi!+)!Gk8kyuJY!p&b+o)41~#h-SBp6puM=a#`&Rqw-#AZi3iw^!LV~EoArfG za{Y|7xfLi(48ZjIB5aMIWvhK>`VZOg`Zuo%++ZZ4uLi(Vmd(1fpH(%+Ltf2_Yjtus z!1}YRqHjE>8@wG(9^I6HF5_ly!>?8 z8*Pak*2e|iC&==A!SXsqXY16m2lo#x@KO$EkN!pj(KX^8!NMyX-9MUxmV@b{sHYwk zuh_rA-Y8d{X#iqDcXPCiLk+$ifPkIywHOMNcahZLvzn0vVbM;e^Yt=Ee$dRc+&Xjb zyLbf%4LeB`bl$Rn!XR2NW&N})l$p*}gYHeUWkTZ0$X}hLplD^q|B-uz#8ZMxcU@Pu z0S3ExLocqf>Q`Z#Qsrh|b3tV(_eiM#)6JQ5kt5{yY#zxr{`{?}u}P4}=N<9{;|Cub z23g6UzRcyr8Yg_;4C5=YkvQIFT#J$+Q+s@t#4{UU29-a{BWL5gQ`j`|PybEOGDEpl zivNN-i=F8(Z;WmWVJuNYkb7R}Lg;`!QUB!+3m@8Ot?z)n)f_lo_-F>|(8KyH@zS{K z)Xo1L$lcaO(UhDtGzbBz&?T<^v?a|l1p9T9Q3u&d`2V!T2*2Z;i{f_!lKaq^2Y-(y zpDU-{WlZF3@ha`^H4>LucPIz2|NG)6y*~^whHUy;GnR5dl^q!w;T>~_cO8%h8`pX3 zcmKtoje&vrm%0a5F<~@L3s_)WkU79uQST4FSphAWnWKRi`Zn32=p;N)Na79Y8T$^- z9?`#|=W+PSsGPHn{#%K@YMFfm>Oc=ucGGhA)fog0usy<_lOC}b@|VW%b^1n~1+?Nf zZ!;Tf?HxAeH#I@`^MA*_7!VVWuwwo#?iuG0`$x!TR)u+^Z*+E(_Bxjnzp|@u3zXVJ zzG||zk@ltRzo75BSUaF8XC#fHNH=|8%e-X~aW{(d9BP2nf#b)A-w+w|L}novN&IWv zHe`zBaH$+c<_(Xb6HWVL(Ut8y`6IpYV}Z+mqGu(IxwmyV-dV<)Nl`ZLaqqZ26%d31 z7hq zo}`q+SzGAqW5O{_(y2WAV?(=N-LKoeTcJczgO8FGu^Xv>g^X_Xe+w=K+zHeqP*Bha z_iM@^?I`ZpLX?A~_jc~U;Cc{%u*c$?I|KBPF*0kd7XX4l2ZeziIq<%3KssPJ#a$ybY!&qV<={x<2LKIA^(H0!YC<*k+a{cZV!tgeTbUH#Qy^xC4&PcqNG zi?j79OMG+hXB`(S;IsTfbMpe)`v ze=i{=9nvk`BAwD9As`)#w4`*GNGK^GNW+p63QKoLHv$5}(kYS)EK9sM&-3{Gz4HfL z7Z=yLXXebzXU?1$7s6PRm@#EwMCcdjTM7R&YMl1e#Av*e@v-~;0 zrPk&53Tt%icXs@?;~Zv-9t=TqW)f@e`kQZ$&UBA%-u1n?)f5eD`dR@z5Epxa{BDlu zyI5RpCXg*dg3la5AD;)}mofz8|7e?NexMdKD>^pXjXe_1Ld*Nz?Fl{D85|p#@<`^3 zkpqF5*q4)+m&?TVUNOf9e2j#pkJ8K%IjkPJU)hYwQrlcxIZ}VQzc^ahcry)pwTr$} zM(yPQCXL-iU8(tqH}O#G@VL79b$jYj2rjeqchtf1IO7L*I9ef{E^w(}O(0MQjXKiB)hg?{gEG zva97>mEIPlFyvLt=DZy9bKRGYZeuTEycI|p@a!}z zw}y50!rwQOXe^t|+cz&lR}h1Ul1~|9^&?W;P<2Id z&VtyBVz=oc4qX-b8)Zeqe*HY_p`Z|%d9SyJ)V^&{-ls>~s2BncNa8S#N7v7sD@Ezyn9cf)+)&%3y>n;6%--5oVm{-2s`kR>}*FB<5=fx ziRjUXHzN*gQtrHD>cKN{Y>*blVM)4}GVG(!p3xSbmEZIfwa8WJdhIG{%rX4iw@ zU~4}pWTim2$!`?%kxOTicP9s`cElO!yY-MF;GTbuMIa%spwId!|EEC^n& ze??$i#=};?(bJ8MHSng9`=zbF#4o*sH%rb(ZBeVpi})?bwm`<%tgicc*3!h*g%ZM`r-m=W zd7QMKmhyu6IOULfTn2Ts1EVM>Y&Ky(dog%;mWkTSH40+_Cr@!Kr6}~)XRqxLHu`l$;U@*vaz$jJ1^6W5w#x?HH0%urU z>*tum*Mp-*O6=EAfp+?+!;S|<=8^j2^1ua5+eHe>-9PgC{rT5^{9{cekjhD8c?2>a z?h(4X&E|%s*yJ_rgGz1=kKlv1p>9d04dg+x$9w%~fiwue zvAmy}g9XC4EJdF)KQ-Q=2nn59Qrg!n^vcw~DxKVl7AVH-JEO3ga!jQ-qewqH3bOJy z-pkb5@P!Tgyd%+9gPEO+{(s`t&zNJ1ql$?gE zzKEl;2pp3CXvM<|y!_e>w7^8J-HmG$x7Rhbi|zBd2{}F*<;xY@6m!7i=J<7czABV3 zTWQo)9l`kYeR6SHSA9pg?9oFeiqB!P3=G^RG`Yk;FBXVU>w(K>=UJ*i%A_;WV_^ArkO)!T&6hI_zia8U9RW{2KB1up)`RuL#$JR750fRUvunh zSIG{We&0af{Q@q0kgeJyEkxo%^Cqi4SD(Nfp8cqTNRn~dUP6cvMVx}>)$nYM0u*}0 z2%4@5I?deA3`jDfhBoKBgp=S@U~MW(lkg7Za`wx1yi~!KVCO-neJD#P+oe6`bISKe ztz+K-uM$pQNz7Qp(s=e0*;-pfl7mbRI?fFk)a!9`btmKD@^{ON8Mn5O++xw;qO4@x zk3587!#esQ9}POaZ^^q?hY4{vv4=GifRXpZsXW%$q}t3f6xKjGgUGyVq~!<-XdP@F zx%*ByL9QjC=TquYn-=9_lli@WSlf~$3N-TCi=Sf{BbCbNF8_EO&5Y;4*F$&FI^%Z- zdRKYa;b`nPx|g^8u{JkkG~2|Mi!6j0aPs~Wdb9W$c@HONMc0QZcY^k(G;V=rvedgm zIEar=o;=%)(3gptCFr@>?*1s$S5qD79HOGab$8jESNKMuQ~(49dlrwCnX}&!9NqNi z&B*iH`V=2fdq_0WQ~UASjW-dL=?zev`dKc1S4q&O7&D}@uZ%sUjoiP*GQQzzT(&!x zL_n&;gwlQHZ_)_kKrYt#t<@1kYt&bWlF$6{B1^=bAyi-^VB)OwdHUX@q|A46n|64< zK~U=cq1bst>q_qu2yq!*OY$wI4LRP6(ZftS-lHWWf++ySbzpnW*Kh$%l-j{7XyT{a7wV=NNN8|7@l z?o{$>q~hUcO<{-ifTg1wpmW!_?0$*mhzkYHL`%KTX>v&U9xRIqOHVc$=#KvS=ebR{ z_gJX&yZ#N836d83!^lG!Y~Z$Q`kdtmW1FM$p@j6Ou#tiBCc)9)5pnTzu3m!dQoy7Vk@|j_fjk zQx3utITbN_9eufIcdS*07FBIDD`Ng;GGo`M+41gln9XebR-|7~Z;lUQhRTo)UAlY9 z_d>nxXI^OaDx$HVFp&g9R1<~gayTUJ$o?McRIowWE?}g2ce7-75 z0+wL>8PDSMFj1`8Rn>pag#spWBuS7nXBBwJpR$0lDT7i(M^}wKezD7-#JFLUxQ2=) zMy@j!W|Pgx9f0a2A-bm|#j#1fdC_Tr%%1_y8)3!>M@Nt2SM{gV6WJ=k2i;FRi zMM1~HQ)m8h9Pmrs~sC+oDeI43DD{5ixTY&%=7f1Jo9dF$S4_3MN z9t;~|i3P-RIVc=l43#>TgX|Epfs8A^K*|S81eM}>a1{F+TYqQWQ_fgFFG<%%X1kYa zSOR#2CGvSRgkhGKdA*iyr2GN!u>(^AH*^bwO2nrSV~W5M_!w+ld zj<+@z5kxYSYYzUYo!{5J5TL#{q{8uU#Y$~Q3)F|~G=psObcd~Ir7&-iX|B&DQVC3J z{U`l2McqvJ`Ao@%67)7sB2}NFB0zJ~gYB33dfeaU)f;_WjVCXM%35=^V5bRDCu%sX z1Uon84$uQ>x5?toI6nRY=AXWrpT)7$Zj407G`=1fb$hFUjh-2%ML;7GiA${C2ij6e zKqDuItd$Hn*kvpw4+#SgDKBpc^5Oa(8=>PWIi759cpIn zq20t9QaOc)L=-Gbd8~(W1C~bJtGzYYRDJ9e>(qt16Rh}!i}^Fs_NT1SV4-9%Ops2= zhnP0tW!;w98xnMV&{p@G%S%4gt<2Z^Y08FV%b8MlHSoS@#kZ2b-V%zGZ|C}_undaE zf7ZABin)BWR4rq!_u}?j7~Ndc;eZqYtoFN`$V!Kchm zy%ALA9EpFBf=hf&){xiUQprrnpa;WE4#(#KoCI6+@4H$OGp>f#kjbjNK& z%7mvJClyi985IbD*7C#6kLo>=fOOh&(eh_K=@fC|nnH3w1seP9b3yLjZ*_yqUm7se zeeQB@=P2;lZC3oMj@x*Bmw7L5UGrvo_>1xSAfR5*nc%Tlbi6bn2rM5Rhy`@rxn}1^ z5UDr{a|hHUJ_&41CWw9>_?`Z{WM=m58MNES+6tUk=Lp>k*Kw6Eo6Cov?rO>`pd>5! zR|@4$<14>hO3r8|A9$`MMLl(e&b=?GvhR7HGN4OZ+XgMHmU+;d(oe^fdYb*15O0e~ z(|0Y}FL)g$z}G`#F+44m;XklpH`eeWgWeJiOFnv<;kz>9rYV*ft&V1BVo@TKg$iYO z;;cmbWp7M%rwrnIYro8v0w~rMswuZGO)>qTaxo9?LUH5Xywt-8;^@3osb1X`JXT#7 z7JG8|o(p%xnXr*4R49^O2s;{CBn&$>|5cNkBopHuxV#y{hu$vK*{WSacp`Xv+Tj<@ zG9VgXev1SXRJ<q&BCoaECGWl zf4578{b{iXHAPRXfZO^Ygj(y*DRJ3$Ys$s6#_+7`*!KXluinRS8@uO-O<85#mY9^e zZx?Pxwx@jJGkCB4<0f#h-cUuP=NNHSQpl0Y4;g)}DEnGLY-#jDg$pZV-kw29?x`TB z5=1$!8*+-ZkhOy-5-AnPpnWaHp88Y`4v`3Y%?FiEQFX8H$|F;v4;8=^EebLezaheU zftyD`$U?{-nP_KQ9IhFaLc(bMrn$%U-Sl7?V!OPX_ z0*$*FMXqss+P6B+8N0u$v2+ zh^oMd&MG&sOE`cT@z%2&iWM44NCXkN)Z=H3_ngw{N(XM6#HxaTWc&B{^O8T9i4o6G z`QLzo_;&`Oe$PFL+ZJ27idEA)h)^w1`8`I?5TOPr*LuuH@=iBim}pY`=hs|cpp*L# z+{RQ>&TrHfhL0B6V77w-@r6~@(kXAN9Zc&BoRk!MWE0fkY{c1^#$3X4BHPz}IQLmR zcmHh#t!H_*+fggn+`fi{%CP&BQY28nC^J0y`bnj2t)%tQCV~L^LxgtpMqJ-SN78w% z3s@>Xov>Owr!lRuJZ^@^-m=>}v1w$Za*T%)3Nmgu0N1x7qHV=Po?#K8uE=&qCa#XC zlv~?ESwWY(Fbx_ERf;8iEBwy%wh4cNSic?`vCguQlKN@-(Z_9UXs?*i)uv-6pEEIJ z<0i;;Vkn>!diz7^nCLNB32jVPUzg}={*&~pU)uI1#V zqbs%AzDMJ?`PFY(d;|5Fx`KI02$Mmi-FbMZ$ak_I(1ZU-cWeqhB3l=Ija-+P>e7(8 z+@eNoey69}c@w1E;w^cZ>N-Xl_f8+EqZDucoHLFOiXp+xI*35k%~oND)$rgG(V`3) z^nGAMwn73r)tjoh#zU0+ZhL@KMro5Awjv{+y-0sO)UZ{fL>Tv3GXy0f9g6`ANT(uD z(5DLD5-EryI1)8IeXPar=y0z<)7cr#V?-KH6UE6#A1m5f6Q7by7oI4X7q=@;g+zpT z1vc1Ok%CgI0svF)R_#g;?)0?&n1uw7WM&AFBBKzL>DP~$sFcDUR)5}Cf60Pd72dfC zdhbz*vKYAMLtE9TEDtYI4`|VRShV{U{NuRiXH9q;qLEYOhluG)KGWm@s%@PLJ zJFANZccb=PP@pi;g7PaHAyx&X2Z%H)1Ect$Bw9CHW0Zgb5*$+TuF%o4Q?x@vUyiwp zk1h%BZOrZahc9wEUi#f`0gK^WdaeY&B{TS)SbFb$b5}z<3ol6l! zf*x;K+*+!yGs~?6qGe%56QC5M8c~RP=^UyNx`KXM;s)~p{W2H##3WCj(=#h7$S0*@ z{Qlf$1Gf?6)W`|)JX0+M2Va$Bhccqv%p@Z|t(Y$Mo94-M?y$i=@RJCRFoAc8 z1=^jY6wIg~8k$Ai(ayKBIv@_b$BO={CBZ6_oKNHJE%6@?Fr$jDaow<*`}@l*f2(67 zdQor9vo8FztR;9zmo|W4d=c7uym?F&QU0>7KE=K2>qgVm$6D^i&ph0Y;ZW;u&s4T=d`Q;S zdekoan3QG$51sZ)X~A{IUSx9y4_qUv--_^wYNx>y8T!)l{CX8NF*MnvMECcE1==bv z7$Vrobx38SB{^&uK_-Sz?h?d_bT&Gy-Y>Smw(4fL5{gMFijbg7@wP&@5&LP!Scl>3 zMJuQ@R-TFD&8+>;PEK+?kslhopLN=8`0s02Dt!_G)z%!uME_FI*NU+^a{w*l0JQXT z<7XyWHw$u>QWh_EHJ{J2HnguOH-1UYk!9K}0<=pIp0KK$vWIybMaB)!-o0m0RZv(* zQ4YQzY%&qXQR`O&{9Wcp(AR_IE*ff%F}{SfN4N2e`QOT4zpRW+BWssT?_~pd(o0^+ zvr5NA|4W+Yv*ZbZm*rtpTgsC@#0uS0x$M6AF`#6(T{us zT0YFfW`l^-lm7dxJvfpDF|PyjY}{X&d^dDD3XbRdtYhwR19;{4qX|Q|nS zUU2-&oaz>W`WXOy>S`YUW>I zX9i{<>D6tA;ZY%gk&QGVcQszVBvt?TmxGlJx17M9k6y5SY2UNkE;4+CH`tja8?};4v@p z{*Dx^rlR3Q?O_T2)@1?5=G?DKzPlr}irR~AAZZSYjB|fEQ*KPjF&0wgrn8$T!oGL7 zGf%$%JG+1Ce0Nny$uC() z#=ENmzf-ygi;$WM;o_X;;N>%`g|G_gHk6 z!Q(`LMe^kcTWXDdR{SIonDX#7;J^je3%yG~+uWM2oE{Cxud27sr$BWPzgFSzJlDv_ zpFJ?qf7(9V)B&H$e=PgfZ&iCf_I=5tDhq!ud6kl>Zi%$OCLj)Ip`XZg{WDB!)lKCm zXodYk7ct9!3=Prwaq>1h_^m&b*9bL^=NZzhZbUXq?1yyoPnEd=Sa6XFgvlU@72GS2 z4OShOx!YMpW|~NQ@BfX!`-YRxyv=`746q6(JOvDy*Z80-z-RVc58F$N+MBW&i&ph# zv`t8Pcmjjj_XAM-Npk6zfCy-l#tC?6?#EhAG@x`5(L)deE|p|+8T$V=1K?G(pOfxc z9a{An()Bw|Ba5r{q80Toqtt8Pd9q)Bs2CF z=EQSjwEpUTm01gp=L8tL&i3u>khMjHEBiLKQqSs1hHg&dpQ@_>N_{H)Ibj}GP6?}_ z>bH|_zUVh!MFJ2RGoT-OfvL#S$qd z0l&A<&WY)km(}K;no#IUm%Y&><-BMvP`PUAsPhJ}-lx_R7zIOl?6YpT>4nOpti+{{~A8%3?D@;4OmJ*~ILWWvBZt*(pP?NNewa3{Y zq^serL*U1a)>k&;EjZRnmF@;o;yF zHt;wY-tkf;)U*f87t0aa$<3%hAvZvz70wB4!_kKAOM@!uCR)5{EIsd~Rq8@+*f-Au zJCw(73phw_HI3&NA<~meu=%-Vui?I;{gAFb{$OC~+sa_9bL8q*^iH?dOS4d@nV?SQ z^2?%r(0i8WfCy&5mXbr_EIASGGHX^Ry7g{C8rubnE7-Jw)@9F5JGgMUDTt3*4v_Y# zc=V)q&%;^X_WII--`ioWZtMYkP}z#wZCLuh#92j%kuM!JtE@4YiQJ0oFZ=VpJr=dHTdlk<8{p1@{3)NbDq4 zkn>VCoBve@5HgwP&}qNlM;;~}(wl9-!e|(4jWt$Ua{fsFs6U@F%{4ppym5GBEaSrijEN0@%X5 zT46KU;y0KAKz+b$ z9Lub;Y5naB{V%Jv;Da^Cx06RTK!)~Rlii+RyDoNiM{MeNU%~Zv9{|QT+(Cb5Wl}K~ zj(P9dL-r1gW;mL?R{N+2Xh*!dxmvLE5|G{Zan(F*vX*al z^7KJp;=R8BWPe7yzWI#2!sMjr8vDD?yN4$@%6p$(Bs0{``pmtp1_OfTkhfc@qn}^? zdAwfdeawcIH1SCuk)$sF!0!RG*Ajq49f$T8AMw;@gK}Rm{Kp;DQm=2o3NXpl!?}j; z0SuCX)a$L}GMnWJu1*cqS}sR`8t?T!xaT7W-)kn2$nP{jOVZY8WN?f?sqe6aaW0Fc63cq)lv zI2LzI`gU)|E7hCd&4YpTQh}eU40r2YPf3ArGf=#O&BEz{I*4PYfiTr#RYI zk}`p;*jG!}Cu^Bns>ElszkH`Oz7q$uL68!g{s|o8A@^B95O<1)m%MYv=1-A%lx-4k z_RAyyGNcIcF*F_g*mPWAlT~*>743de<2_IMztN?_72PDU{SBZMJL+L7?CB&THn_ih zIX78#lmq92benFufVCcbQE>Jbi3DI@SBYo%Dq{2JCnFT3qy(X;AR)FUPQ=@1(Z5Px zyKrahIUe2;CFlG#GAsJz+(YfUa((f9*&vAM-MPw(UJL7-J5d$X{yv8aiP*xnxtSPsq-Sc;tEnwwuil|ll> zseyE(i276I9hsxhzq3L#dD*?Ojah1LCL)ne-p*VN$z!!E?wv-u1G_X8wLVsWwNKR) zIFPIV19|sG9r)_R132Meaxw>i&QMsB-TbH4#{ zn(gN1z)pqmZRB_Dw*SS>LGDnL^!vb718VkjyWA?^oYCMv&LXUx=skO<_bGa_&F)W7>S`<`~#|8L!d*Yw9nA1rQm`e{!7`M&x7M%-KUBWmTgt_vDA96VrT^pqfHFdIt*&v{?%H+Sbw0V5`@kW zk@Av%%OLvVwkdz+)GnwhCF*pdQmVZviy9n@ kEbPl%#fR+wCvz3zjxksMl~h8kAP68;MNNe&IdJ&@17HoUPXGV_ literal 0 HcmV?d00001 From 99fc28a6c753647c1e172466dcc77b1cacfa74f5 Mon Sep 17 00:00:00 2001 From: Kelly Davis Date: Fri, 5 Mar 2021 16:46:18 +0100 Subject: [PATCH 3/8] More rebranding --- ISSUE_TEMPLATE.md | 2 +- doc/Decoder.rst | 2 +- evaluate_tflite.py | 8 +-- native_client/Android.mk | 2 +- native_client/CODINGSTYLE.md | 2 +- native_client/Makefile | 2 +- native_client/definitions.mk | 6 +-- .../dotnet/DeepSpeechClient/NativeImp.cs | 52 +++++++++---------- .../{deepspeech.nuspec.in => stt.nuspec.in} | 12 ++--- native_client/java/Makefile | 14 ++--- native_client/java/app/build.gradle | 4 +- .../ExampleInstrumentedTest.java | 4 +- .../java/app/src/main/AndroidManifest.xml | 2 +- .../sttexampleapp}/DeepSpeechActivity.java | 4 +- .../coqui/sttexampleapp}/ExampleUnitTest.java | 2 +- .../java/{libdeepspeech => libstt}/.gitignore | 0 .../{libdeepspeech => libstt}/CMakeLists.txt | 0 .../{libdeepspeech => libstt}/build.gradle | 24 ++++----- .../gradle.properties | 0 .../{libdeepspeech => libstt}/libs/.gitignore | 0 .../proguard-rules.pro | 0 .../java/ai/coqui/libstt}/test/BasicTest.java | 8 +-- .../src/main/AndroidManifest.xml | 2 +- .../ai/coqui/libstt}/DeepSpeechModel.java | 2 +- .../libstt}/DeepSpeechStreamingState.java | 2 +- .../libstt_doc}/CandidateTranscript.java | 2 +- .../libstt_doc}/DeepSpeech_Error_Codes.java | 2 +- .../java/ai/coqui/libstt_doc}/Metadata.java | 2 +- .../main/java/ai/coqui/libstt_doc}/README.rst | 2 +- .../ai/coqui/libstt_doc}/TokenMetadata.java | 2 +- .../src/main/res/values/strings.xml | 0 .../ai/coqui/libstt}/ExampleUnitTest.java | 2 +- native_client/javascript/package.json.in | 2 +- native_client/python/setup.py | 2 +- .../deepspeech_ios.xcodeproj/project.pbxproj | 2 +- .../swift/deepspeech_ios/DeepSpeech.swift | 4 +- .../swift/deepspeech_ios/deepspeech_ios.h | 4 +- .../project.pbxproj | 2 +- .../deepspeech_ios_test/AppDelegate.swift | 4 +- .../deepspeech_ios_test/AudioContext.swift | 5 +- .../deepspeech_ios_test/SceneDelegate.swift | 3 +- .../deepspeech_ios_testTests.swift | 3 +- taskcluster/README.rst | 2 +- taskcluster/android-build-dbg.sh | 2 +- taskcluster/android-build.sh | 2 +- taskcluster/arm64-build-dbg.sh | 2 +- taskcluster/arm64-build.sh | 2 +- taskcluster/cuda-build-dbg.sh | 2 +- taskcluster/cuda-build.sh | 2 +- taskcluster/examples-base.tyml | 2 +- taskcluster/host-build-dbg.sh | 2 +- taskcluster/host-build.sh | 2 +- taskcluster/rpi3-build-dbg.sh | 2 +- taskcluster/rpi3-build.sh | 2 +- taskcluster/tc-all-utils.sh | 2 +- taskcluster/tc-android-utils.sh | 2 +- taskcluster/tc-build-utils.sh | 12 ++--- taskcluster/tc-dotnet-utils.sh | 2 +- taskcluster/win-build.sh | 10 ++-- taskcluster/win-opt-base.tyml | 2 +- 60 files changed, 128 insertions(+), 125 deletions(-) rename native_client/dotnet/nupkg/{deepspeech.nuspec.in => stt.nuspec.in} (54%) rename native_client/java/app/src/androidTest/java/{org/deepspeech => ai/coqui/sttexampleapp}/ExampleInstrumentedTest.java (85%) rename native_client/java/app/src/main/java/{org/deepspeech => ai/coqui/sttexampleapp}/DeepSpeechActivity.java (98%) rename native_client/java/{libdeepspeech/src/test/java/org/deepspeech/libdeepspeech => app/src/test/java/ai/coqui/sttexampleapp}/ExampleUnitTest.java (90%) rename native_client/java/{libdeepspeech => libstt}/.gitignore (100%) rename native_client/java/{libdeepspeech => libstt}/CMakeLists.txt (100%) rename native_client/java/{libdeepspeech => libstt}/build.gradle (79%) rename native_client/java/{libdeepspeech => libstt}/gradle.properties (100%) rename native_client/java/{libdeepspeech => libstt}/libs/.gitignore (100%) rename native_client/java/{libdeepspeech => libstt}/proguard-rules.pro (100%) rename native_client/java/{libdeepspeech/src/androidTest/java/org/deepspeech/libdeepspeech => libstt/src/androidTest/java/ai/coqui/libstt}/test/BasicTest.java (95%) rename native_client/java/{libdeepspeech => libstt}/src/main/AndroidManifest.xml (60%) rename native_client/java/{libdeepspeech/src/main/java/org/deepspeech/libdeepspeech => libstt/src/main/java/ai/coqui/libstt}/DeepSpeechModel.java (99%) rename native_client/java/{libdeepspeech/src/main/java/org/deepspeech/libdeepspeech => libstt/src/main/java/ai/coqui/libstt}/DeepSpeechStreamingState.java (87%) rename native_client/java/{libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc => libstt/src/main/java/ai/coqui/libstt_doc}/CandidateTranscript.java (98%) rename native_client/java/{libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc => libstt/src/main/java/ai/coqui/libstt_doc}/DeepSpeech_Error_Codes.java (98%) rename native_client/java/{libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc => libstt/src/main/java/ai/coqui/libstt_doc}/Metadata.java (97%) rename native_client/java/{libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc => libstt/src/main/java/ai/coqui/libstt_doc}/README.rst (53%) rename native_client/java/{libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc => libstt/src/main/java/ai/coqui/libstt_doc}/TokenMetadata.java (97%) rename native_client/java/{libdeepspeech => libstt}/src/main/res/values/strings.xml (100%) rename native_client/java/{app/src/test/java/org/deepspeech => libstt/src/test/java/ai/coqui/libstt}/ExampleUnitTest.java (93%) diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 08345c3a..2d72979b 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,4 +1,4 @@ -For support and discussions, please use our [Discourse forums](https://discourse.mozilla.org/c/deep-speech). +For support and discussions, please use [GitHub Discussions](https://github.com/coqui-ai/STT/discussions). If you've found a bug, or have a feature request, then please create an issue with the following information: diff --git a/doc/Decoder.rst b/doc/Decoder.rst index 1ca2026b..471f1234 100644 --- a/doc/Decoder.rst +++ b/doc/Decoder.rst @@ -22,7 +22,7 @@ The use of an external scorer is fully optional. When an external scorer is not Currently, the 🐸STT external scorer is implemented with `KenLM `_, plus some tooling to package the necessary files and metadata into a single ``.scorer`` package. The tooling lives in ``data/lm/``. The scripts included in ``data/lm/`` can be used and modified to build your own language model based on your particular use case or language. See :ref:`scorer-scripts` for more details on how to reproduce our scorer file as well as create your own. -The scripts are geared towards replicating the language model files we release as part of `STT model releases `_, but modifying them to use different datasets or language model construction parameters should be simple. +The scripts are geared towards replicating the language model files we release as part of `STT model releases `_, but modifying them to use different datasets or language model construction parameters should be simple. Decoding modes diff --git a/evaluate_tflite.py b/evaluate_tflite.py index e9b44725..af3c2385 100644 --- a/evaluate_tflite.py +++ b/evaluate_tflite.py @@ -19,14 +19,14 @@ from six.moves import zip, range r''' This module should be self-contained: - - build libdeepspeech.so with TFLite: - - bazel build [...] --define=runtime=tflite [...] //native_client:libdeepspeech.so + - build libstt.so with TFLite: + - bazel build [...] --define=runtime=tflite [...] //native_client:libstt.so - make -C native_client/python/ TFDIR=... bindings - setup a virtualenv - - pip install native_client/python/dist/deepspeech*.whl + - pip install native_client/python/dist/*.whl - pip install -r requirements_eval_tflite.txt -Then run with a TF Lite model, a scorer and a CSV test file +Then run with a TFLite model, a scorer and a CSV test file ''' def tflite_worker(model, scorer, queue_in, queue_out, gpu_mask): diff --git a/native_client/Android.mk b/native_client/Android.mk index d21551fd..6ffb0101 100644 --- a/native_client/Android.mk +++ b/native_client/Android.mk @@ -2,7 +2,7 @@ LOCAL_PATH := $(call my-dir) include $(CLEAR_VARS) LOCAL_MODULE := deepspeech-prebuilt -LOCAL_SRC_FILES := $(TFDIR)/bazel-bin/native_client/libdeepspeech.so +LOCAL_SRC_FILES := $(TFDIR)/bazel-bin/native_client/libstt.so include $(PREBUILT_SHARED_LIBRARY) include $(CLEAR_VARS) diff --git a/native_client/CODINGSTYLE.md b/native_client/CODINGSTYLE.md index 127b959d..f0e4ec48 100644 --- a/native_client/CODINGSTYLE.md +++ b/native_client/CODINGSTYLE.md @@ -25,4 +25,4 @@ File naming Doubts ====== -If in doubt, please ask on our Matrix chat channel: https://chat.mozilla.org/#/room/#machinelearning:mozilla.org +If in doubt, please ask on our Matrix chat channel: https://matrix.to/#/#stt:matrix.org?via=matrix.org diff --git a/native_client/Makefile b/native_client/Makefile index 15e1092f..e9594d48 100644 --- a/native_client/Makefile +++ b/native_client/Makefile @@ -32,7 +32,7 @@ debug: $(STT_BIN) install: $(STT_BIN) install -d ${PREFIX}/lib - install -m 0644 ${TFDIR}/bazel-bin/native_client/libdeepspeech.so ${PREFIX}/lib/ + install -m 0644 ${TFDIR}/bazel-bin/native_client/libstt.so ${PREFIX}/lib/ install -d ${PREFIX}/include install -m 0644 coqui-stt.h ${PREFIX}/include install -d ${PREFIX}/bin diff --git a/native_client/definitions.mk b/native_client/definitions.mk index c8fee508..b6ca13e4 100644 --- a/native_client/definitions.mk +++ b/native_client/definitions.mk @@ -20,7 +20,7 @@ endif STT_BIN := stt$(PLATFORM_EXE_SUFFIX) CFLAGS_DEEPSPEECH := -std=c++11 -o $(STT_BIN) -LINK_DEEPSPEECH := -ldeepspeech +LINK_DEEPSPEECH := -lstt LINK_PATH_DEEPSPEECH := -L${TFDIR}/bazel-bin/native_client ifeq ($(TARGET),host) @@ -61,7 +61,7 @@ TOOL_CC := cl.exe TOOL_CXX := cl.exe TOOL_LD := link.exe TOOL_LIBEXE := lib.exe -LINK_DEEPSPEECH := $(TFDIR)\bazel-bin\native_client\libdeepspeech.so.if.lib +LINK_DEEPSPEECH := $(TFDIR)\bazel-bin\native_client\libstt.so.if.lib LINK_PATH_DEEPSPEECH := CFLAGS_DEEPSPEECH := -nologo -Fe$(STT_BIN) SOX_CFLAGS := @@ -182,7 +182,7 @@ define copy_missing_libs new_missing="$$( (for f in $$(otool -L $$lib 2>/dev/null | tail -n +2 | awk '{ print $$1 }' | grep -v '$$lib'); do ls -hal $$f; done;) 2>&1 | grep 'No such' | cut -d':' -f2 | xargs basename -a)"; \ missing_libs="$$missing_libs $$new_missing"; \ elif [ "$(OS)" = "${TC_MSYS_VERSION}" ]; then \ - missing_libs="libdeepspeech.so"; \ + missing_libs="libstt.so"; \ else \ missing_libs="$$missing_libs $$($(LDD) $$lib | grep 'not found' | awk '{ print $$1 }')"; \ fi; \ diff --git a/native_client/dotnet/DeepSpeechClient/NativeImp.cs b/native_client/dotnet/DeepSpeechClient/NativeImp.cs index 1a7dacac..49532360 100644 --- a/native_client/dotnet/DeepSpeechClient/NativeImp.cs +++ b/native_client/dotnet/DeepSpeechClient/NativeImp.cs @@ -6,107 +6,107 @@ using System.Runtime.InteropServices; namespace DeepSpeechClient { /// - /// Wrapper for the native implementation of "libdeepspeech.so" + /// Wrapper for the native implementation of "libstt.so" /// internal static class NativeImp { #region Native Implementation - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl, + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] internal static extern IntPtr DS_Version(); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal unsafe static extern ErrorCodes DS_CreateModel(string aModelPath, ref IntPtr** pint); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal unsafe static extern IntPtr DS_ErrorCodeToErrorMessage(int aErrorCode); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal unsafe static extern uint DS_GetModelBeamWidth(IntPtr** aCtx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal unsafe static extern ErrorCodes DS_SetModelBeamWidth(IntPtr** aCtx, uint aBeamWidth); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal unsafe static extern ErrorCodes DS_CreateModel(string aModelPath, uint aBeamWidth, ref IntPtr** pint); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal unsafe static extern int DS_GetModelSampleRate(IntPtr** aCtx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern ErrorCodes DS_EnableExternalScorer(IntPtr** aCtx, string aScorerPath); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern ErrorCodes DS_AddHotWord(IntPtr** aCtx, string aWord, float aBoost); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern ErrorCodes DS_EraseHotWord(IntPtr** aCtx, string aWord); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern ErrorCodes DS_ClearHotWords(IntPtr** aCtx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern ErrorCodes DS_DisableExternalScorer(IntPtr** aCtx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern ErrorCodes DS_SetScorerAlphaBeta(IntPtr** aCtx, float aAlpha, float aBeta); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl, + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] internal static unsafe extern IntPtr DS_SpeechToText(IntPtr** aCtx, short[] aBuffer, uint aBufferSize); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl, SetLastError = true)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, SetLastError = true)] internal static unsafe extern IntPtr DS_SpeechToTextWithMetadata(IntPtr** aCtx, short[] aBuffer, uint aBufferSize, uint aNumResults); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern void DS_FreeModel(IntPtr** aCtx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern ErrorCodes DS_CreateStream(IntPtr** aCtx, ref IntPtr** retval); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern void DS_FreeStream(IntPtr** aSctx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern void DS_FreeMetadata(IntPtr metadata); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern void DS_FreeString(IntPtr str); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl, + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] internal static unsafe extern void DS_FeedAudioContent(IntPtr** aSctx, short[] aBuffer, uint aBufferSize); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern IntPtr DS_IntermediateDecode(IntPtr** aSctx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern IntPtr DS_IntermediateDecodeWithMetadata(IntPtr** aSctx, uint aNumResults); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl, + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] internal static unsafe extern IntPtr DS_FinishStream(IntPtr** aSctx); - [DllImport("libdeepspeech.so", CallingConvention = CallingConvention.Cdecl)] + [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] internal static unsafe extern IntPtr DS_FinishStreamWithMetadata(IntPtr** aSctx, uint aNumResults); #endregion diff --git a/native_client/dotnet/nupkg/deepspeech.nuspec.in b/native_client/dotnet/nupkg/stt.nuspec.in similarity index 54% rename from native_client/dotnet/nupkg/deepspeech.nuspec.in rename to native_client/dotnet/nupkg/stt.nuspec.in index 68e192c8..5c563bb6 100644 --- a/native_client/dotnet/nupkg/deepspeech.nuspec.in +++ b/native_client/dotnet/nupkg/stt.nuspec.in @@ -3,14 +3,14 @@ $NUPKG_ID $NUPKG_VERSION - DeepSpeech - DeepSpeech authors - DeepSpeech authors + Coqui STT + Coqui GmbH + Coqui GmbH MPL-2.0 - http://github.com/mozilla/DeepSpeech + http://github.com/coqui-ai/STT false - A library for running inference with a DeepSpeech model - Copyright (c) 2019-2020 Mozilla Corporation, 2020 DeepSpeech authors + A library for doing speech recognition using a Coqui STT model + Copyright (c) 2019-2020 Mozilla Corporation, (c) 2020 DeepSpeech authors, (c) 2021 Coqui GmbH native speech speech_recognition diff --git a/native_client/java/Makefile b/native_client/java/Makefile index 90493621..31f5c078 100644 --- a/native_client/java/Makefile +++ b/native_client/java/Makefile @@ -2,7 +2,7 @@ include ../definitions.mk -ARCHS := $(shell grep 'ABI_FILTERS' libdeepspeech/gradle.properties | cut -d'=' -f2 | sed -e 's/;/ /g') +ARCHS := $(shell grep 'ABI_FILTERS' libstt/gradle.properties | cut -d'=' -f2 | sed -e 's/;/ /g') GRADLE ?= ./gradlew all: apk @@ -14,13 +14,13 @@ apk-clean: $(GRADLE) clean libs-clean: - rm -fr libdeepspeech/libs/*/libdeepspeech.so + rm -fr libstt/libs/*/libstt.so -libdeepspeech/libs/%/libdeepspeech.so: - -mkdir libdeepspeech/libs/$*/ - cp ${TFDIR}/bazel-out/$*-*/bin/native_client/libdeepspeech.so libdeepspeech/libs/$*/ +libstt/libs/%/libstt.so: + -mkdir libstt/libs/$*/ + cp ${TFDIR}/bazel-out/$*-*/bin/native_client/libstt.so libstt/libs/$*/ -apk: apk-clean bindings $(patsubst %,libdeepspeech/libs/%/libdeepspeech.so,$(ARCHS)) +apk: apk-clean bindings $(patsubst %,libstt/libs/%/libstt.so,$(ARCHS)) $(GRADLE) build maven-bundle: apk @@ -28,4 +28,4 @@ maven-bundle: apk $(GRADLE) zipMavenArtifacts bindings: clean ds-swig - $(DS_SWIG_ENV) swig -c++ -java -package org.deepspeech.libdeepspeech -outdir libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/ -o jni/deepspeech_wrap.cpp jni/deepspeech.i + $(DS_SWIG_ENV) swig -c++ -java -package ai.coqui.libstt -outdir libstt/src/main/java/ai/coqui/libstt/ -o jni/deepspeech_wrap.cpp jni/deepspeech.i diff --git a/native_client/java/app/build.gradle b/native_client/java/app/build.gradle index 3b5b124a..11ef0ee5 100644 --- a/native_client/java/app/build.gradle +++ b/native_client/java/app/build.gradle @@ -4,7 +4,7 @@ android { compileSdkVersion 27 defaultConfig { - applicationId "org.deepspeech" + applicationId "ai.coqui.sttexampleapp" minSdkVersion 21 targetSdkVersion 27 versionName androidGitVersion.name() @@ -28,7 +28,7 @@ android { dependencies { implementation fileTree(dir: 'libs', include: ['*.jar']) - implementation project(':libdeepspeech') + implementation project(':libstt') implementation 'com.android.support:appcompat-v7:27.1.1' implementation 'com.android.support.constraint:constraint-layout:1.1.3' testImplementation 'junit:junit:4.12' diff --git a/native_client/java/app/src/androidTest/java/org/deepspeech/ExampleInstrumentedTest.java b/native_client/java/app/src/androidTest/java/ai/coqui/sttexampleapp/ExampleInstrumentedTest.java similarity index 85% rename from native_client/java/app/src/androidTest/java/org/deepspeech/ExampleInstrumentedTest.java rename to native_client/java/app/src/androidTest/java/ai/coqui/sttexampleapp/ExampleInstrumentedTest.java index ea6458a1..0a68a324 100644 --- a/native_client/java/app/src/androidTest/java/org/deepspeech/ExampleInstrumentedTest.java +++ b/native_client/java/app/src/androidTest/java/ai/coqui/sttexampleapp/ExampleInstrumentedTest.java @@ -1,4 +1,4 @@ -package org.deepspeech; +package ai.coqui.sttexampleapp import android.content.Context; import android.support.test.InstrumentationRegistry; @@ -21,6 +21,6 @@ public class ExampleInstrumentedTest { // Context of the app under test. Context appContext = InstrumentationRegistry.getTargetContext(); - assertEquals("org.deepspeech", appContext.getPackageName()); + assertEquals("ai.coqui.sttexampleapp", appContext.getPackageName()); } } diff --git a/native_client/java/app/src/main/AndroidManifest.xml b/native_client/java/app/src/main/AndroidManifest.xml index 668ef13f..e9a371d1 100644 --- a/native_client/java/app/src/main/AndroidManifest.xml +++ b/native_client/java/app/src/main/AndroidManifest.xml @@ -1,6 +1,6 @@ + package="ai.coqui.sttexampleapp"> + package="ai.coqui.libstt" /> diff --git a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/DeepSpeechModel.java b/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechModel.java similarity index 99% rename from native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/DeepSpeechModel.java rename to native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechModel.java index a9f17b44..9e3d6b26 100644 --- a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/DeepSpeechModel.java +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechModel.java @@ -1,4 +1,4 @@ -package org.deepspeech.libdeepspeech; +package ai.coqui.libstt; /** * @brief Exposes a DeepSpeech model in Java diff --git a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/DeepSpeechStreamingState.java b/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechStreamingState.java similarity index 87% rename from native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/DeepSpeechStreamingState.java rename to native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechStreamingState.java index cd9aafe4..63d18a10 100644 --- a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/DeepSpeechStreamingState.java +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechStreamingState.java @@ -1,4 +1,4 @@ -package org.deepspeech.libdeepspeech; +package ai.coqui.libstt; public final class DeepSpeechStreamingState { private SWIGTYPE_p_StreamingState _sp; diff --git a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/CandidateTranscript.java b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/CandidateTranscript.java similarity index 98% rename from native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/CandidateTranscript.java rename to native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/CandidateTranscript.java index 53a0ef64..1ee01eb8 100644 --- a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/CandidateTranscript.java +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/CandidateTranscript.java @@ -6,7 +6,7 @@ * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ -package org.deepspeech.libdeepspeech; +package ai.coqui.libstt; /** * A single transcript computed by the model, including a confidence
diff --git a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/DeepSpeech_Error_Codes.java b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/DeepSpeech_Error_Codes.java similarity index 98% rename from native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/DeepSpeech_Error_Codes.java rename to native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/DeepSpeech_Error_Codes.java index 0a05439d..93147fb8 100644 --- a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/DeepSpeech_Error_Codes.java +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/DeepSpeech_Error_Codes.java @@ -6,7 +6,7 @@ * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ -package org.deepspeech.libdeepspeech; +package ai.coqui.libstt; public enum DeepSpeech_Error_Codes { ERR_OK(0x0000), diff --git a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/Metadata.java b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/Metadata.java similarity index 97% rename from native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/Metadata.java rename to native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/Metadata.java index b85fc82e..7cc73b49 100644 --- a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/Metadata.java +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/Metadata.java @@ -6,7 +6,7 @@ * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ -package org.deepspeech.libdeepspeech; +package ai.coqui.libstt; /** * An array of CandidateTranscript objects computed by the model. diff --git a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/README.rst b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/README.rst similarity index 53% rename from native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/README.rst rename to native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/README.rst index 0181ab31..d9078803 100644 --- a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/README.rst +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/README.rst @@ -8,4 +8,4 @@ To update, please install SWIG (4.0 at least) and then run from native_client/ja .. code-block:: - swig -c++ -java -doxygen -package org.deepspeech.libdeepspeech -outdir libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc -o jni/deepspeech_wrap.cpp jni/deepspeech.i + swig -c++ -java -doxygen -package ai.coqui.libstt -outdir libstt/src/main/java/ai/coqui/libstt_doc -o jni/deepspeech_wrap.cpp jni/deepspeech.i diff --git a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/TokenMetadata.java b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/TokenMetadata.java similarity index 97% rename from native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/TokenMetadata.java rename to native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/TokenMetadata.java index 45ed9052..6e147eca 100644 --- a/native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/TokenMetadata.java +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/TokenMetadata.java @@ -6,7 +6,7 @@ * the SWIG interface file instead. * ----------------------------------------------------------------------------- */ -package org.deepspeech.libdeepspeech; +package ai.coqui.libstt; /** * Stores text of an individual token, along with its timing information diff --git a/native_client/java/libdeepspeech/src/main/res/values/strings.xml b/native_client/java/libstt/src/main/res/values/strings.xml similarity index 100% rename from native_client/java/libdeepspeech/src/main/res/values/strings.xml rename to native_client/java/libstt/src/main/res/values/strings.xml diff --git a/native_client/java/app/src/test/java/org/deepspeech/ExampleUnitTest.java b/native_client/java/libstt/src/test/java/ai/coqui/libstt/ExampleUnitTest.java similarity index 93% rename from native_client/java/app/src/test/java/org/deepspeech/ExampleUnitTest.java rename to native_client/java/libstt/src/test/java/ai/coqui/libstt/ExampleUnitTest.java index a40f0582..64262910 100644 --- a/native_client/java/app/src/test/java/org/deepspeech/ExampleUnitTest.java +++ b/native_client/java/libstt/src/test/java/ai/coqui/libstt/ExampleUnitTest.java @@ -1,4 +1,4 @@ -package org.deepspeech; +package ai.coqui.libstt; import org.junit.Test; diff --git a/native_client/javascript/package.json.in b/native_client/javascript/package.json.in index f6011925..b02a174c 100644 --- a/native_client/javascript/package.json.in +++ b/native_client/javascript/package.json.in @@ -1,7 +1,7 @@ { "name" : "$(PROJECT_NAME)", "version" : "$(PROJECT_VERSION)", - "description" : "Coqui STT NodeJS bindings", + "description" : "A library for doing speech recognition using a Coqui STT model", "main" : "./index.js", "types": "./index.d.ts", "bin": { diff --git a/native_client/python/setup.py b/native_client/python/setup.py index 093b85cd..8db083a0 100755 --- a/native_client/python/setup.py +++ b/native_client/python/setup.py @@ -67,7 +67,7 @@ def main(): swig_opts=['-c++', '-keyword']) setup(name=project_name, - description='A library for running inference on a Coqui STT model', + description='A library for doing speech recognition using a Coqui STT model', long_description=read('README.rst'), long_description_content_type='text/x-rst; charset=UTF-8', author='Coqui GmbH', diff --git a/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj b/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj index 3f3a2d7f..cdd0419b 100644 --- a/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj +++ b/native_client/swift/deepspeech_ios.xcodeproj/project.pbxproj @@ -166,7 +166,7 @@ attributes = { LastSwiftUpdateCheck = 1150; LastUpgradeCheck = 1150; - ORGANIZATIONNAME = Mozilla; + ORGANIZATIONNAME = "Coqui GmbH"; TargetAttributes = { 505B136024960D550007DADA = { CreatedOnToolsVersion = 11.5; diff --git a/native_client/swift/deepspeech_ios/DeepSpeech.swift b/native_client/swift/deepspeech_ios/DeepSpeech.swift index 5d254c99..f6f4d983 100644 --- a/native_client/swift/deepspeech_ios/DeepSpeech.swift +++ b/native_client/swift/deepspeech_ios/DeepSpeech.swift @@ -3,8 +3,8 @@ // deepspeech_ios // // Created by Reuben Morais on 14.06.20. -// Copyright © 2020 Mozilla. All rights reserved. -// +// Copyright © 2020 Mozilla +// Copyright © 2021 Coqui GmbH import deepspeech_ios.libdeepspeech_Private diff --git a/native_client/swift/deepspeech_ios/deepspeech_ios.h b/native_client/swift/deepspeech_ios/deepspeech_ios.h index a40fb954..271129bc 100644 --- a/native_client/swift/deepspeech_ios/deepspeech_ios.h +++ b/native_client/swift/deepspeech_ios/deepspeech_ios.h @@ -3,8 +3,8 @@ // deepspeech_ios // // Created by Reuben Morais on 14.06.20. -// Copyright © 2020 Mozilla. All rights reserved. -// +// Copyright © 2020 Mozilla +// Copyright © 2021 Coqui GmbH #import diff --git a/native_client/swift/deepspeech_ios_test.xcodeproj/project.pbxproj b/native_client/swift/deepspeech_ios_test.xcodeproj/project.pbxproj index 524126c8..a7d03464 100644 --- a/native_client/swift/deepspeech_ios_test.xcodeproj/project.pbxproj +++ b/native_client/swift/deepspeech_ios_test.xcodeproj/project.pbxproj @@ -236,7 +236,7 @@ attributes = { LastSwiftUpdateCheck = 1150; LastUpgradeCheck = 1150; - ORGANIZATIONNAME = Mozilla; + ORGANIZATIONNAME = "Coqui GmbH"; TargetAttributes = { 50F787EE2497683900D52237 = { CreatedOnToolsVersion = 11.5; diff --git a/native_client/swift/deepspeech_ios_test/AppDelegate.swift b/native_client/swift/deepspeech_ios_test/AppDelegate.swift index 32753486..46ac2aea 100644 --- a/native_client/swift/deepspeech_ios_test/AppDelegate.swift +++ b/native_client/swift/deepspeech_ios_test/AppDelegate.swift @@ -3,8 +3,8 @@ // deepspeech_ios_test // // Created by Reuben Morais on 15.06.20. -// Copyright © 2020 Mozilla. All rights reserved. -// +// Copyright © 2020 Mozilla +// Copyright © 2021 Coqui GmbH import UIKit diff --git a/native_client/swift/deepspeech_ios_test/AudioContext.swift b/native_client/swift/deepspeech_ios_test/AudioContext.swift index 60999bd3..10f7dfba 100644 --- a/native_client/swift/deepspeech_ios_test/AudioContext.swift +++ b/native_client/swift/deepspeech_ios_test/AudioContext.swift @@ -3,8 +3,9 @@ // deepspeech_ios_test // // Created by Erik Ziegler on 27.07.20. -// Copyright © 2020 Mozilla. All rights reserved. -// +// Copyright © 2020 Mozilla +// Copyright © 2020 Erik Ziegler +// Copyright © 2020 Coqui GmbH import Foundation import AVFoundation diff --git a/native_client/swift/deepspeech_ios_test/SceneDelegate.swift b/native_client/swift/deepspeech_ios_test/SceneDelegate.swift index 40d85e4a..5a5fb54d 100644 --- a/native_client/swift/deepspeech_ios_test/SceneDelegate.swift +++ b/native_client/swift/deepspeech_ios_test/SceneDelegate.swift @@ -3,7 +3,8 @@ // deepspeech_ios_test // // Created by Reuben Morais on 15.06.20. -// Copyright © 2020 Mozilla. All rights reserved. +// Copyright © 2020 Mozilla +// Copyright © 2021 Coqui GmbH // import UIKit diff --git a/native_client/swift/deepspeech_ios_testTests/deepspeech_ios_testTests.swift b/native_client/swift/deepspeech_ios_testTests/deepspeech_ios_testTests.swift index 0e5b449d..4bd823da 100644 --- a/native_client/swift/deepspeech_ios_testTests/deepspeech_ios_testTests.swift +++ b/native_client/swift/deepspeech_ios_testTests/deepspeech_ios_testTests.swift @@ -3,7 +3,8 @@ // deepspeech_ios_testTests // // Created by Reuben Morais on 15.06.20. -// Copyright © 2020 Mozilla. All rights reserved. +// Copyright © 2020 Mozilla +// Copyright © 2021 Coqui GmbH // import XCTest diff --git a/taskcluster/README.rst b/taskcluster/README.rst index 1c71b295..0ffb7c6e 100644 --- a/taskcluster/README.rst +++ b/taskcluster/README.rst @@ -2,6 +2,6 @@ Taskcluster =========== -This directory contains files associated with Taskcluster -- a task execution framework for Mozilla's Continuous Integration system. +This directory contains files associated with Taskcluster -- a task execution framework developed by Mozilla for use in Continuous Integration systems. Please consult the `existing Taskcluster documentation `_. diff --git a/taskcluster/android-build-dbg.sh b/taskcluster/android-build-dbg.sh index 9646bb3d..3825b41c 100755 --- a/taskcluster/android-build-dbg.sh +++ b/taskcluster/android-build-dbg.sh @@ -9,7 +9,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so " if [ "${arm_flavor}" = "armeabi-v7a" ]; then diff --git a/taskcluster/android-build.sh b/taskcluster/android-build.sh index 2b9e0393..4f64b4e8 100644 --- a/taskcluster/android-build.sh +++ b/taskcluster/android-build.sh @@ -9,7 +9,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so //native_client:generate_scorer_package " diff --git a/taskcluster/arm64-build-dbg.sh b/taskcluster/arm64-build-dbg.sh index 20d4cd87..ed1ec012 100755 --- a/taskcluster/arm64-build-dbg.sh +++ b/taskcluster/arm64-build-dbg.sh @@ -7,7 +7,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so " BAZEL_BUILD_FLAGS="${BAZEL_ARM64_FLAGS} ${BAZEL_EXTRA_FLAGS}" diff --git a/taskcluster/arm64-build.sh b/taskcluster/arm64-build.sh index 1ca4028e..eda65194 100644 --- a/taskcluster/arm64-build.sh +++ b/taskcluster/arm64-build.sh @@ -7,7 +7,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so //native_client:generate_scorer_package " diff --git a/taskcluster/cuda-build-dbg.sh b/taskcluster/cuda-build-dbg.sh index e5e44d78..3163346b 100755 --- a/taskcluster/cuda-build-dbg.sh +++ b/taskcluster/cuda-build-dbg.sh @@ -7,7 +7,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so " BAZEL_ENV_FLAGS="TF_NEED_CUDA=1 ${TF_CUDA_FLAGS}" diff --git a/taskcluster/cuda-build.sh b/taskcluster/cuda-build.sh index f8213f81..e9ffd120 100755 --- a/taskcluster/cuda-build.sh +++ b/taskcluster/cuda-build.sh @@ -7,7 +7,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so //native_client:generate_scorer_package " diff --git a/taskcluster/examples-base.tyml b/taskcluster/examples-base.tyml index 643f8d88..11aa9267 100644 --- a/taskcluster/examples-base.tyml +++ b/taskcluster/examples-base.tyml @@ -21,7 +21,7 @@ then: DEEPSPEECH_MODEL: "https://github.com/reuben/DeepSpeech/releases/download/v0.8.0/models_0.8.tar.gz" DEEPSPEECH_AUDIO: "https://github.com/mozilla/DeepSpeech/releases/download/v0.4.1/audio-0.4.1.tar.gz" PIP_DEFAULT_TIMEOUT: "60" - EXAMPLES_CLONE_URL: "https://github.com/mozilla/DeepSpeech-examples" + EXAMPLES_CLONE_URL: "https://github.com/coqui-ai/STT-examples" EXAMPLES_CHECKOUT_TARGET: "master" command: diff --git a/taskcluster/host-build-dbg.sh b/taskcluster/host-build-dbg.sh index 2ced0313..21e92b44 100755 --- a/taskcluster/host-build-dbg.sh +++ b/taskcluster/host-build-dbg.sh @@ -9,7 +9,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so " if [ "${runtime}" = "tflite" ]; then diff --git a/taskcluster/host-build.sh b/taskcluster/host-build.sh index 84f07302..38d03482 100755 --- a/taskcluster/host-build.sh +++ b/taskcluster/host-build.sh @@ -9,7 +9,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so //native_client:generate_scorer_package " diff --git a/taskcluster/rpi3-build-dbg.sh b/taskcluster/rpi3-build-dbg.sh index c0652eae..62585ab4 100755 --- a/taskcluster/rpi3-build-dbg.sh +++ b/taskcluster/rpi3-build-dbg.sh @@ -7,7 +7,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so " BAZEL_BUILD_FLAGS="${BAZEL_ARM_FLAGS} ${BAZEL_EXTRA_FLAGS}" diff --git a/taskcluster/rpi3-build.sh b/taskcluster/rpi3-build.sh index eabff730..44c5bcef 100755 --- a/taskcluster/rpi3-build.sh +++ b/taskcluster/rpi3-build.sh @@ -7,7 +7,7 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so //native_client:generate_scorer_package " diff --git a/taskcluster/tc-all-utils.sh b/taskcluster/tc-all-utils.sh index 504954d1..2d746fdf 100755 --- a/taskcluster/tc-all-utils.sh +++ b/taskcluster/tc-all-utils.sh @@ -156,7 +156,7 @@ verify_bazel_rebuild() cp ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel*.log ${TASKCLUSTER_ARTIFACTS}/ - spurious_rebuilds=$(grep 'Executing action' "${bazel_explain_file}" | grep 'Compiling' | grep -v -E 'no entry in the cache|[for host]|unconditional execution is requested|Executing genrule //native_client:workspace_status|Compiling native_client/workspace_status.cc|Linking native_client/libdeepspeech.so' | wc -l) + spurious_rebuilds=$(grep 'Executing action' "${bazel_explain_file}" | grep 'Compiling' | grep -v -E 'no entry in the cache|[for host]|unconditional execution is requested|Executing genrule //native_client:workspace_status|Compiling native_client/workspace_status.cc|Linking native_client/libstt.so' | wc -l) if [ "${spurious_rebuilds}" -ne 0 ]; then echo "Bazel rebuilds some file it should not, please check." diff --git a/taskcluster/tc-android-utils.sh b/taskcluster/tc-android-utils.sh index 8f4eb69c..fa43a23b 100755 --- a/taskcluster/tc-android-utils.sh +++ b/taskcluster/tc-android-utils.sh @@ -39,7 +39,7 @@ do_deepspeech_java_apk_build() mkdir native_client/java/libdeepspeech/libs/${nc_dir} - curl -L https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts/public/native_client.tar.xz | tar -C native_client/java/libdeepspeech/libs/${nc_dir}/ -Jxvf - libdeepspeech.so + curl -L https://community-tc.services.mozilla.com/api/queue/v1/task/${dep}/artifacts/public/native_client.tar.xz | tar -C native_client/java/libdeepspeech/libs/${nc_dir}/ -Jxvf - libstt.so fi; done; diff --git a/taskcluster/tc-build-utils.sh b/taskcluster/tc-build-utils.sh index e823175c..f409731f 100755 --- a/taskcluster/tc-build-utils.sh +++ b/taskcluster/tc-build-utils.sh @@ -17,9 +17,9 @@ do_deepspeech_python_build() SETUP_FLAGS="" if [ "${package_option}" = "--cuda" ]; then - SETUP_FLAGS="--project_name deepspeech-gpu" + SETUP_FLAGS="--project_name stt-gpu" elif [ "${package_option}" = "--tflite" ]; then - SETUP_FLAGS="--project_name deepspeech-tflite" + SETUP_FLAGS="--project_name stt-tflite" fi for pyver_conf in ${SUPPORTED_PYTHON_VERSIONS}; do @@ -139,7 +139,7 @@ do_deepspeech_nodejs_build() done; if [ "${rename_to_gpu}" = "--cuda" ]; then - make -C native_client/javascript clean npm-pack PROJECT_NAME=deepspeech-gpu + make -C native_client/javascript clean npm-pack PROJECT_NAME=stt-gpu else make -C native_client/javascript clean npm-pack fi @@ -312,7 +312,7 @@ do_nuget_build() cd ${DS_DSDIR}/native_client/dotnet - cp ${DS_TFDIR}/bazel-bin/native_client/libdeepspeech.so nupkg/build + cp ${DS_TFDIR}/bazel-bin/native_client/libstt.so nupkg/build # We copy the generated clients for .NET into the Nuget framework dirs @@ -332,9 +332,9 @@ do_nuget_build() sed \ -e "s/\$NUPKG_ID/${PROJECT_NAME}/" \ -e "s/\$NUPKG_VERSION/${PROJECT_VERSION}/" \ - nupkg/deepspeech.nuspec.in > nupkg/deepspeech.nuspec && cat nupkg/deepspeech.nuspec + nupkg/stt.nuspec.in > nupkg/stt.nuspec && cat nupkg/stt.nuspec - nuget pack nupkg/deepspeech.nuspec + nuget pack nupkg/stt.nuspec } do_deepspeech_ios_framework_build() diff --git a/taskcluster/tc-dotnet-utils.sh b/taskcluster/tc-dotnet-utils.sh index e62ad6e2..07cd331d 100755 --- a/taskcluster/tc-dotnet-utils.sh +++ b/taskcluster/tc-dotnet-utils.sh @@ -36,7 +36,7 @@ install_nuget() nuget install NAudio -Version 1.10.0 cp NAudio*/lib/net35/NAudio.dll ${TASKCLUSTER_TMP_DIR}/ds/ - cp ${PROJECT_NAME}.${DS_VERSION}/build/libdeepspeech.so ${TASKCLUSTER_TMP_DIR}/ds/ + cp ${PROJECT_NAME}.${DS_VERSION}/build/libstt.so ${TASKCLUSTER_TMP_DIR}/ds/ cp ${PROJECT_NAME}.${DS_VERSION}/lib/net46/DeepSpeechClient.dll ${TASKCLUSTER_TMP_DIR}/ds/ ls -hal ${TASKCLUSTER_TMP_DIR}/ds/ diff --git a/taskcluster/win-build.sh b/taskcluster/win-build.sh index 8eb6b0e9..7967a2cf 100755 --- a/taskcluster/win-build.sh +++ b/taskcluster/win-build.sh @@ -9,20 +9,20 @@ source $(dirname "$0")/tc-tests-utils.sh source $(dirname "$0")/tf_tc-vars.sh BAZEL_TARGETS=" -//native_client:libdeepspeech.so +//native_client:libstt.so //native_client:generate_scorer_package " if [ "${package_option}" = "--cuda" ]; then BAZEL_ENV_FLAGS="TF_NEED_CUDA=1 ${TF_CUDA_FLAGS}" BAZEL_BUILD_FLAGS="${BAZEL_CUDA_FLAGS} ${BAZEL_EXTRA_FLAGS} ${BAZEL_OPT_FLAGS}" - PROJECT_NAME="DeepSpeech-GPU" + PROJECT_NAME="STT-GPU" elif [ "${package_option}" = "--tflite" ]; then - PROJECT_NAME="DeepSpeech-TFLite" + PROJECT_NAME="STT-TFLite" BAZEL_BUILD_FLAGS="--define=runtime=tflite ${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS}" BAZEL_ENV_FLAGS="TF_NEED_CUDA=0" else - PROJECT_NAME="DeepSpeech" + PROJECT_NAME="STT" BAZEL_BUILD_FLAGS="${BAZEL_OPT_FLAGS} ${BAZEL_EXTRA_FLAGS}" BAZEL_ENV_FLAGS="TF_NEED_CUDA=0" fi @@ -32,7 +32,7 @@ SYSTEM_TARGET=host-win do_bazel_build if [ "${package_option}" = "--cuda" ]; then - cp ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-bin/native_client/liblibdeepspeech.so.ifso ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-bin/native_client/libdeepspeech.so.if.lib + cp ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-bin/native_client/liblibstt.so.ifso ${DS_ROOT_TASK}/DeepSpeech/ds/tensorflow/bazel-bin/native_client/libstt.so.if.lib fi export PATH=$PATH:$(cygpath ${ChocolateyInstall})/bin:'/c/Program Files/nodejs/' diff --git a/taskcluster/win-opt-base.tyml b/taskcluster/win-opt-base.tyml index 0247f140..0eccf928 100644 --- a/taskcluster/win-opt-base.tyml +++ b/taskcluster/win-opt-base.tyml @@ -29,7 +29,7 @@ payload: TC_MSYS_VERSION: 'MSYS_NT-6.3-9600' MSYS: 'winsymlinks:nativestrict' GIT_LFS_SKIP_SMUDGE: '1' - EXAMPLES_CLONE_URL: "https://github.com/mozilla/DeepSpeech-examples" + EXAMPLES_CLONE_URL: "https://github.com/coqui-ai/STT-examples" EXAMPLES_CHECKOUT_TARGET: "master" command: From f33f0b382d7158e7b88be55f956e168742bc385c Mon Sep 17 00:00:00 2001 From: Kelly Davis Date: Sun, 7 Mar 2021 11:14:16 +0100 Subject: [PATCH 4/8] More rebranding, Python and JS packages verified --- CODE_OF_CONDUCT.md | 139 ++++++++++++++++-- CONTRIBUTING.rst | 6 +- README.rst | 4 + doc/BUILDING_DotNet.rst | 2 +- doc/USING.rst | 2 +- doc/conf.py | 2 +- doc/doxygen-java.conf | 2 +- doc/index.rst | 2 +- evaluate_tflite.py | 2 +- native_client/generate_scorer_package.cpp | 2 +- native_client/java/libstt/CMakeLists.txt | 6 +- native_client/java/libstt/build.gradle | 2 +- .../libstt/src/main/res/values/strings.xml | 2 +- native_client/java/settings.gradle | 2 +- native_client/javascript/Makefile | 14 +- native_client/javascript/binding.gyp | 4 +- .../javascript/{deepspeech.i => stt.i} | 0 native_client/python/Makefile | 4 +- native_client/tflitemodelstate.cc | 2 +- taskcluster/examples-base.tyml | 2 +- taskcluster/win-opt-base.tyml | 2 +- 21 files changed, 162 insertions(+), 41 deletions(-) rename native_client/javascript/{deepspeech.i => stt.i} (100%) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 498baa3f..bdb48cd1 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,15 +1,132 @@ -# Community Participation Guidelines +# Contributor Covenant Code of Conduct -This repository is governed by Mozilla's code of conduct and etiquette guidelines. -For more details, please read the -[Mozilla Community Participation Guidelines](https://www.mozilla.org/about/governance/policies/participation/). +## Our Pledge -## How to Report -For more information on how to report violations of the Community Participation Guidelines, please read our '[How to Report](https://www.mozilla.org/about/governance/policies/participation/reporting/)' page. +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual identity +and orientation. - +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement by emailing +[coc-report@coqui.ai](mailto:coc-report@coqui.ai). +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +[https://www.contributor-covenant.org/version/2/0/code_of_conduct.html][v2.0]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available +at [https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.0]: https://www.contributor-covenant.org/version/2/0/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f12967da..1d65f660 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -3,7 +3,7 @@ Contribution guidelines Welcome to the 🐸STT project! We are excited to see your interest, and appreciate your support! -This repository is governed by Mozilla's code of conduct and etiquette guidelines. For more details, please read the `Mozilla Community Participation Guidelines `_. +This repository is governed by the Contributor Covenant Code of Conduct. For more details, see the `CODE_OF_CONDUCT.md `_. How to Make a Good Pull Request ------------------------------- @@ -47,9 +47,9 @@ Before making a Pull Request for Python code changes, check your changes for bas .. code-block:: bash pip install pylint cardboardlint - cardboardlinter --refspec master + cardboardlinter --refspec main -This will compare the code against master and run the linter on all the changes. We plan to introduce more linter checks (e.g. for C++) in the future. To run it automatically as a git pre-commit hook, do the following: +This will compare the code against the main branch and run the linter on all the changes. We plan to introduce more linter checks (e.g. for C++) in the future. To run it automatically as a git pre-commit hook, do the following: .. code-block:: bash diff --git a/README.rst b/README.rst index 3347f0b4..d9c3af91 100644 --- a/README.rst +++ b/README.rst @@ -6,6 +6,8 @@ :target: https://stt.readthedocs.io/?badge=latest :alt: Documentation +|Contributor Covenant| + **Coqui STT** is an open-source Speech-To-Text engine, using a model trained by machine learning techniques based on `Baidu's Deep Speech research paper `_. 🐸STT uses Google's `TensorFlow `_ to make the implementation easier. **Documentation** for installation, usage, and training models are available on `stt.readthedocs.io `_. @@ -15,3 +17,5 @@ For the **latest release**, including pre-trained models and checkpoints, `see t For contribution guidelines, see `CONTRIBUTING.rst `_. For contact and support information, see `SUPPORT.rst `_. + +.. |Contributor Covenant| image:: https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg :target: CODE_OF_CONDUCT.md diff --git a/doc/BUILDING_DotNet.rst b/doc/BUILDING_DotNet.rst index 9421918f..5bc73175 100644 --- a/doc/BUILDING_DotNet.rst +++ b/doc/BUILDING_DotNet.rst @@ -144,4 +144,4 @@ Be patient, if you enabled AVX/AVX2 and CUDA it will take a long time. Finally y Using the generated library --------------------------- -As for now we can only use the generated ``libstt.so`` with the C# clients, go to `native_client/dotnet/ `_ in your STT directory and open the Visual Studio solution, then we need to build in debug or release mode, finally we just need to copy ``libstt.so`` to the generated ``x64/Debug`` or ``x64/Release`` directory. +As for now we can only use the generated ``libstt.so`` with the C# clients, go to `native_client/dotnet/ `_ in your STT directory and open the Visual Studio solution, then we need to build in debug or release mode, finally we just need to copy ``libstt.so`` to the generated ``x64/Debug`` or ``x64/Release`` directory. diff --git a/doc/USING.rst b/doc/USING.rst index 370304e8..9ca9c4d4 100644 --- a/doc/USING.rst +++ b/doc/USING.rst @@ -188,7 +188,7 @@ or if you're on macOS: python3 util/taskcluster.py --arch osx --target . -also, if you need some binaries different than current master, like ``v0.2.0-alpha.6``\ , you can use ``--branch``\ : +also, if you need some binaries different than current main branch, like ``v0.2.0-alpha.6``\ , you can use ``--branch``\ : .. code-block:: bash diff --git a/doc/conf.py b/doc/conf.py index 45fa6d49..dc447452 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -99,7 +99,7 @@ templates_path = ['.templates'] # source_suffix = ['.rst', '.md'] source_suffix = '.rst' -# The master toctree document. +# The main toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation diff --git a/doc/doxygen-java.conf b/doc/doxygen-java.conf index 9516d6ec..c6474d7a 100644 --- a/doc/doxygen-java.conf +++ b/doc/doxygen-java.conf @@ -790,7 +790,7 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech/ native_client/java/libdeepspeech/src/main/java/org/deepspeech/libdeepspeech_doc/ +INPUT = native_client/java/libstt/src/main/java/ai/coqui/libstt/ native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses diff --git a/doc/index.rst b/doc/index.rst index a22b8f21..edb048f8 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -1,4 +1,4 @@ -.. Coqui STT documentation master file, created by +.. Coqui STT documentation main file, created by sphinx-quickstart on Thu Feb 2 21:20:39 2017. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. diff --git a/evaluate_tflite.py b/evaluate_tflite.py index af3c2385..d8cff40f 100644 --- a/evaluate_tflite.py +++ b/evaluate_tflite.py @@ -10,7 +10,7 @@ import csv import os import sys -from deepspeech import Model +from stt import Model from coqui_stt_training.util.evaluate_tools import calculate_and_print_report from coqui_stt_training.util.flags import create_flags from functools import partial diff --git a/native_client/generate_scorer_package.cpp b/native_client/generate_scorer_package.cpp index f4e7c07b..0cadb429 100644 --- a/native_client/generate_scorer_package.cpp +++ b/native_client/generate_scorer_package.cpp @@ -103,7 +103,7 @@ main(int argc, char** argv) ("package", po::value(), "Path to save scorer package.") ("default_alpha", po::value(), "Default value of alpha hyperparameter (float).") ("default_beta", po::value(), "Default value of beta hyperparameter (float).") - ("force_bytes_output_mode", po::value(), "Boolean flag, force set or unset bytes output mode in the scorer package. If not set, infers from the vocabulary. See for further explanation.") + ("force_bytes_output_mode", po::value(), "Boolean flag, force set or unset bytes output mode in the scorer package. If not set, infers from the vocabulary. See for further explanation.") ; po::variables_map vm; diff --git a/native_client/java/libstt/CMakeLists.txt b/native_client/java/libstt/CMakeLists.txt index c64ea47e..6009c166 100644 --- a/native_client/java/libstt/CMakeLists.txt +++ b/native_client/java/libstt/CMakeLists.txt @@ -26,12 +26,12 @@ add_library( deepspeech-lib set_target_properties( deepspeech-lib PROPERTIES IMPORTED_LOCATION - ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libdeepspeech.so ) + ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libstt.so ) add_custom_command( TARGET deepspeech-jni POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy - ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libdeepspeech.so - ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libdeepspeech.so ) + ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libstt.so + ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libstt.so ) # Searches for a specified prebuilt library and stores the path as a diff --git a/native_client/java/libstt/build.gradle b/native_client/java/libstt/build.gradle index 3d1364e1..286b2db5 100644 --- a/native_client/java/libstt/build.gradle +++ b/native_client/java/libstt/build.gradle @@ -44,7 +44,7 @@ android { installOptions "-d","-t" } - // Avoid scanning libdeepspeech_doc + // Avoid scanning libstt_doc sourceSets { main.java.srcDirs = [ 'src/main/java/ai/coqui/libstt/' ] } diff --git a/native_client/java/libstt/src/main/res/values/strings.xml b/native_client/java/libstt/src/main/res/values/strings.xml index 7628d1ac..8dd7c26e 100644 --- a/native_client/java/libstt/src/main/res/values/strings.xml +++ b/native_client/java/libstt/src/main/res/values/strings.xml @@ -1,3 +1,3 @@ - libdeepspeech + libstt diff --git a/native_client/java/settings.gradle b/native_client/java/settings.gradle index a31d5636..78868112 100644 --- a/native_client/java/settings.gradle +++ b/native_client/java/settings.gradle @@ -1 +1 @@ -include ':app', ':libdeepspeech' +include ':app', ':libstt' diff --git a/native_client/javascript/Makefile b/native_client/javascript/Makefile index c2670dea..70f7686f 100644 --- a/native_client/javascript/Makefile +++ b/native_client/javascript/Makefile @@ -27,11 +27,11 @@ endif default: build clean: - rm -f deepspeech_wrap.cxx package.json package-lock.json + rm -f stt_wrap.cxx package.json package-lock.json rm -rf ./build/ clean-npm-pack: - rm -fr ./deepspeech-*.tgz + rm -fr ./stt-*.tgz really-clean: clean clean-npm-pack rm -fr ./node_modules/ @@ -46,14 +46,14 @@ package.json: package.json.in npm-dev: package.json $(NPM_TOOL) install --prefix=$(NPM_ROOT)/../ --ignore-scripts --force --verbose --production=false . -configure: deepspeech_wrap.cxx package.json npm-dev +configure: stt_wrap.cxx package.json npm-dev PATH="$(NODE_MODULES_BIN):${PATH}" $(NODE_BUILD_TOOL) configure $(NODE_BUILD_VERBOSE) -build: configure deepspeech_wrap.cxx +build: configure stt_wrap.cxx PATH="$(NODE_MODULES_BIN):${PATH}" NODE_PRE_GYP_ABI_CROSSWALK=$(NODE_PRE_GYP_ABI_CROSSWALK_FILE) AS=$(AS) CC=$(CC) CXX=$(CXX) LD=$(LD) CFLAGS="$(CFLAGS)" CXXFLAGS="$(CXXFLAGS)" LDFLAGS="$(RPATH_NODEJS) $(LDFLAGS)" LIBS=$(LIBS) $(NODE_BUILD_TOOL) $(NODE_PLATFORM_TARGET) $(NODE_RUNTIME) $(NODE_ABI_TARGET) $(NODE_DEVDIR) $(NODE_DIST_URL) --no-color rebuild $(NODE_BUILD_VERBOSE) copy-deps: build - $(call copy_missing_libs,lib/binding/*/*/*/deepspeech.node,lib/binding/*/*/) + $(call copy_missing_libs,lib/binding/*/*/*/stt.node,lib/binding/*/*/) node-wrapper: copy-deps build PATH="$(NODE_MODULES_BIN):${PATH}" NODE_PRE_GYP_ABI_CROSSWALK=$(NODE_PRE_GYP_ABI_CROSSWALK_FILE) $(NODE_BUILD_TOOL) $(NODE_PLATFORM_TARGET) $(NODE_RUNTIME) $(NODE_ABI_TARGET) $(NODE_DEVDIR) $(NODE_DIST_URL) --no-color package $(NODE_BUILD_VERBOSE) @@ -61,5 +61,5 @@ node-wrapper: copy-deps build npm-pack: clean package.json index.ts npm-dev PATH="$(NODE_MODULES_BIN):${PATH}" tsc && $(NPM_TOOL) pack $(NODE_BUILD_VERBOSE) -deepspeech_wrap.cxx: deepspeech.i ds-swig - $(DS_SWIG_ENV) swig -c++ -javascript -node deepspeech.i +stt_wrap.cxx: stt.i ds-swig + $(DS_SWIG_ENV) swig -c++ -javascript -node stt.i diff --git a/native_client/javascript/binding.gyp b/native_client/javascript/binding.gyp index 65f56abb..594f1f39 100644 --- a/native_client/javascript/binding.gyp +++ b/native_client/javascript/binding.gyp @@ -1,8 +1,8 @@ { "targets": [ { - "target_name": "deepspeech", - "sources": [ "deepspeech_wrap.cxx" ], + "target_name": "stt", + "sources": [ "stt_wrap.cxx" ], "libraries": [ "$(LIBS)" ], diff --git a/native_client/javascript/deepspeech.i b/native_client/javascript/stt.i similarity index 100% rename from native_client/javascript/deepspeech.i rename to native_client/javascript/stt.i diff --git a/native_client/python/Makefile b/native_client/python/Makefile index 10924654..a18f3fe0 100644 --- a/native_client/python/Makefile +++ b/native_client/python/Makefile @@ -3,7 +3,7 @@ include ../definitions.mk bindings-clean: - rm -rf dist temp_build deepspeech.egg-info MANIFEST.in temp_lib + rm -rf dist temp_build stt.egg-info MANIFEST.in temp_lib rm -f impl_wrap.cpp impl.py # Enforce PATH here because swig calls from build_ext looses track of some @@ -14,7 +14,7 @@ bindings-build: ds-swig MANIFEST.in: bindings-build > $@ - $(call copy_missing_libs,temp_build/deepspeech/*.so,temp_build/deepspeech/lib,$@) + $(call copy_missing_libs,temp_build/stt/*.so,temp_build/stt/lib,$@) # On Unix, _wrap.o gets generated # On Windows, _wrap.obj it is diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index eb8a476c..fe8d20ca 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -4,7 +4,7 @@ #ifdef __ANDROID__ #include -#define LOG_TAG "libdeepspeech" +#define LOG_TAG "libstt" #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__) #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__) #else diff --git a/taskcluster/examples-base.tyml b/taskcluster/examples-base.tyml index 11aa9267..8c2f3e7f 100644 --- a/taskcluster/examples-base.tyml +++ b/taskcluster/examples-base.tyml @@ -22,7 +22,7 @@ then: DEEPSPEECH_AUDIO: "https://github.com/mozilla/DeepSpeech/releases/download/v0.4.1/audio-0.4.1.tar.gz" PIP_DEFAULT_TIMEOUT: "60" EXAMPLES_CLONE_URL: "https://github.com/coqui-ai/STT-examples" - EXAMPLES_CHECKOUT_TARGET: "master" + EXAMPLES_CHECKOUT_TARGET: "main" command: - "/bin/bash" diff --git a/taskcluster/win-opt-base.tyml b/taskcluster/win-opt-base.tyml index 0eccf928..d67c98a8 100644 --- a/taskcluster/win-opt-base.tyml +++ b/taskcluster/win-opt-base.tyml @@ -30,7 +30,7 @@ payload: MSYS: 'winsymlinks:nativestrict' GIT_LFS_SKIP_SMUDGE: '1' EXAMPLES_CLONE_URL: "https://github.com/coqui-ai/STT-examples" - EXAMPLES_CHECKOUT_TARGET: "master" + EXAMPLES_CHECKOUT_TARGET: "main" command: - >- From 95f122806e7653a7d9b857d68054b6d673ada5d6 Mon Sep 17 00:00:00 2001 From: Kelly Davis Date: Sun, 7 Mar 2021 11:34:01 +0100 Subject: [PATCH 5/8] More rebranding, Java package, C++ impl --- native_client/Android.mk | 6 +++--- native_client/BUILD | 6 +++--- native_client/Makefile | 2 +- native_client/java/Makefile | 4 ++-- .../ai/coqui/sttexampleapp/DeepSpeechActivity.java | 4 ++-- .../java/app/src/main/res/values/strings.xml | 2 +- native_client/java/jni/{deepspeech.i => stt.i} | 0 native_client/java/libstt/CMakeLists.txt | 14 +++++++------- .../main/java/ai/coqui/libstt/DeepSpeechModel.java | 4 ++-- .../src/main/java/ai/coqui/libstt_doc/README.rst | 2 +- native_client/{deepspeech.cc => stt.cc} | 0 .../{deepspeech_errors.cc => stt_errors.cc} | 0 12 files changed, 22 insertions(+), 22 deletions(-) rename native_client/java/jni/{deepspeech.i => stt.i} (100%) rename native_client/{deepspeech.cc => stt.cc} (100%) rename native_client/{deepspeech_errors.cc => stt_errors.cc} (100%) diff --git a/native_client/Android.mk b/native_client/Android.mk index 6ffb0101..49bf8f93 100644 --- a/native_client/Android.mk +++ b/native_client/Android.mk @@ -1,14 +1,14 @@ LOCAL_PATH := $(call my-dir) include $(CLEAR_VARS) -LOCAL_MODULE := deepspeech-prebuilt +LOCAL_MODULE := stt-prebuilt LOCAL_SRC_FILES := $(TFDIR)/bazel-bin/native_client/libstt.so include $(PREBUILT_SHARED_LIBRARY) include $(CLEAR_VARS) LOCAL_CPP_EXTENSION := .cc .cxx .cpp -LOCAL_MODULE := deepspeech +LOCAL_MODULE := stt LOCAL_SRC_FILES := client.cc -LOCAL_SHARED_LIBRARIES := deepspeech-prebuilt +LOCAL_SHARED_LIBRARIES := stt-prebuilt LOCAL_LDFLAGS := -Wl,--no-as-needed include $(BUILD_EXECUTABLE) diff --git a/native_client/BUILD b/native_client/BUILD index ead08f6a..e905fb94 100644 --- a/native_client/BUILD +++ b/native_client/BUILD @@ -114,9 +114,9 @@ cc_library( cc_library( name = "coqui_stt_bundle", srcs = [ - "deepspeech.cc", + "stt.cc", "coqui-stt.h", - "deepspeech_errors.cc", + "stt_errors.cc", "modelstate.cc", "modelstate.h", "workspace_status.cc", @@ -229,7 +229,7 @@ cc_binary( name = "generate_scorer_package", srcs = [ "generate_scorer_package.cpp", - "deepspeech_errors.cc", + "stt_errors.cc", ], copts = ["-std=c++11"], deps = [ diff --git a/native_client/Makefile b/native_client/Makefile index e9594d48..9fbc99a1 100644 --- a/native_client/Makefile +++ b/native_client/Makefile @@ -16,7 +16,7 @@ include definitions.mk default: $(STT_BIN) clean: - rm -f deepspeech + rm -f stt $(STT_BIN): client.cc Makefile $(CXX) $(CFLAGS) $(CFLAGS_DEEPSPEECH) $(SOX_CFLAGS) client.cc $(LDFLAGS) $(SOX_LDFLAGS) diff --git a/native_client/java/Makefile b/native_client/java/Makefile index 31f5c078..21746f87 100644 --- a/native_client/java/Makefile +++ b/native_client/java/Makefile @@ -8,7 +8,7 @@ GRADLE ?= ./gradlew all: apk clean: apk-clean - rm -rf *.java jni/deepspeech_wrap.cpp + rm -rf *.java jni/stt_wrap.cpp apk-clean: $(GRADLE) clean @@ -28,4 +28,4 @@ maven-bundle: apk $(GRADLE) zipMavenArtifacts bindings: clean ds-swig - $(DS_SWIG_ENV) swig -c++ -java -package ai.coqui.libstt -outdir libstt/src/main/java/ai/coqui/libstt/ -o jni/deepspeech_wrap.cpp jni/deepspeech.i + $(DS_SWIG_ENV) swig -c++ -java -package ai.coqui.libstt -outdir libstt/src/main/java/ai/coqui/libstt/ -o jni/stt_wrap.cpp jni/stt.i diff --git a/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/DeepSpeechActivity.java b/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/DeepSpeechActivity.java index b1a1818c..7f0836d9 100644 --- a/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/DeepSpeechActivity.java +++ b/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/DeepSpeechActivity.java @@ -132,10 +132,10 @@ public class DeepSpeechActivity extends AppCompatActivity { this._tfliteModel = (EditText) findViewById(R.id.tfliteModel); this._audioFile = (EditText) findViewById(R.id.audioFile); - this._tfliteModel.setText("/sdcard/deepspeech/output_graph.tflite"); + this._tfliteModel.setText("/sdcard/stt/output_graph.tflite"); this._tfliteStatus.setText("Ready, waiting ..."); - this._audioFile.setText("/sdcard/deepspeech/audio.wav"); + this._audioFile.setText("/sdcard/stt/audio.wav"); this._startInference = (Button) findViewById(R.id.btnStartInference); } diff --git a/native_client/java/app/src/main/res/values/strings.xml b/native_client/java/app/src/main/res/values/strings.xml index 9dff9e7b..528244a7 100644 --- a/native_client/java/app/src/main/res/values/strings.xml +++ b/native_client/java/app/src/main/res/values/strings.xml @@ -1,3 +1,3 @@ - DeepSpeech + Coqui STT diff --git a/native_client/java/jni/deepspeech.i b/native_client/java/jni/stt.i similarity index 100% rename from native_client/java/jni/deepspeech.i rename to native_client/java/jni/stt.i diff --git a/native_client/java/libstt/CMakeLists.txt b/native_client/java/libstt/CMakeLists.txt index 6009c166..040d17a5 100644 --- a/native_client/java/libstt/CMakeLists.txt +++ b/native_client/java/libstt/CMakeLists.txt @@ -11,24 +11,24 @@ cmake_minimum_required(VERSION 3.4.1) # Gradle automatically packages shared libraries with your APK. add_library( # Sets the name of the library. - deepspeech-jni + stt-jni # Sets the library as a shared library. SHARED # Provides a relative path to your source file(s). - ../jni/deepspeech_wrap.cpp ) + ../jni/stt_wrap.cpp ) -add_library( deepspeech-lib +add_library( stt-lib SHARED IMPORTED ) -set_target_properties( deepspeech-lib +set_target_properties( stt-lib PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libstt.so ) -add_custom_command( TARGET deepspeech-jni POST_BUILD +add_custom_command( TARGET stt-jni POST_BUILD COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_SOURCE_DIR}/libs/${ANDROID_ABI}/libstt.so ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/libstt.so ) @@ -52,9 +52,9 @@ find_library( # Sets the name of the path variable. # build script, prebuilt third-party libraries, or system libraries. target_link_libraries( # Specifies the target library. - deepspeech-jni + stt-jni - deepspeech-lib + stt-lib # Links the target library to the log library # included in the NDK. diff --git a/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechModel.java b/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechModel.java index 9e3d6b26..b349fae7 100644 --- a/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechModel.java +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt/DeepSpeechModel.java @@ -6,8 +6,8 @@ package ai.coqui.libstt; public class DeepSpeechModel { static { - System.loadLibrary("deepspeech-jni"); - System.loadLibrary("deepspeech"); + System.loadLibrary("stt-jni"); + System.loadLibrary("stt"); } // FIXME: We should have something better than those SWIGTYPE_* diff --git a/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/README.rst b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/README.rst index d9078803..e85fb9b4 100644 --- a/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/README.rst +++ b/native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/README.rst @@ -8,4 +8,4 @@ To update, please install SWIG (4.0 at least) and then run from native_client/ja .. code-block:: - swig -c++ -java -doxygen -package ai.coqui.libstt -outdir libstt/src/main/java/ai/coqui/libstt_doc -o jni/deepspeech_wrap.cpp jni/deepspeech.i + swig -c++ -java -doxygen -package ai.coqui.libstt -outdir libstt/src/main/java/ai/coqui/libstt_doc -o jni/stt_wrap.cpp jni/stt.i diff --git a/native_client/deepspeech.cc b/native_client/stt.cc similarity index 100% rename from native_client/deepspeech.cc rename to native_client/stt.cc diff --git a/native_client/deepspeech_errors.cc b/native_client/stt_errors.cc similarity index 100% rename from native_client/deepspeech_errors.cc rename to native_client/stt_errors.cc From 136ca35ca2741d5f8455c4b68bcc6b3ad90fb555 Mon Sep 17 00:00:00 2001 From: Kelly Davis Date: Sun, 7 Mar 2021 11:37:17 +0100 Subject: [PATCH 6/8] Contributor covenant badge --- README.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index d9c3af91..1ef9d14e 100644 --- a/README.rst +++ b/README.rst @@ -6,7 +6,9 @@ :target: https://stt.readthedocs.io/?badge=latest :alt: Documentation -|Contributor Covenant| +.. image:: https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg + :target: CODE_OF_CONDUCT.md + :alt: Contributor Covenant **Coqui STT** is an open-source Speech-To-Text engine, using a model trained by machine learning techniques based on `Baidu's Deep Speech research paper `_. 🐸STT uses Google's `TensorFlow `_ to make the implementation easier. @@ -17,5 +19,3 @@ For the **latest release**, including pre-trained models and checkpoints, `see t For contribution guidelines, see `CONTRIBUTING.rst `_. For contact and support information, see `SUPPORT.rst `_. - -.. |Contributor Covenant| image:: https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg :target: CODE_OF_CONDUCT.md From 6d4d1a71531a3369fa5225770cd81c2c011c0d9e Mon Sep 17 00:00:00 2001 From: Kelly Davis Date: Sun, 7 Mar 2021 14:29:02 +0100 Subject: [PATCH 7/8] More rebranding, API names, iOS, .NET --- .taskcluster.yml => .taskcluster.yml.disabled | 0 bin/run-ldc93s1.sh | 2 +- doc/BUILDING.rst | 14 +- doc/C-API.rst | 84 +++---- doc/DotNet-API.rst | 34 +-- doc/DotNet-Examples.rst | 8 +- doc/Java-API.rst | 18 +- doc/Java-Examples.rst | 8 +- doc/SUPPORTED_PLATFORMS.rst | 28 +-- doc/Structs.rst | 6 +- doc/conf.py | 6 +- ds_lib.supp | 2 +- ds_openfst.supp | 4 +- native_client/args.h | 8 +- native_client/client.cc | 58 ++--- native_client/coqui-stt.h | 140 +++++------ native_client/ctcdecode/__init__.py | 2 +- native_client/ctcdecode/scorer.cpp | 12 +- .../DeepSpeechClient/Enums/ErrorCodes.cs | 33 --- .../dotnet/{DeepSpeech.sln => STT.sln} | 4 +- .../dotnet/STTClient/Enums/ErrorCodes.cs | 33 +++ .../Extensions/NativeExtensions.cs | 8 +- .../Interfaces/ISTT.cs} | 26 +- .../Models/CandidateTranscript.cs | 2 +- .../Models/Metadata.cs | 2 +- .../Models/Stream.cs} | 8 +- .../Models/TokenMetadata.cs | 2 +- .../NativeImp.cs | 54 ++--- .../DeepSpeech.cs => STTClient/STT.cs} | 92 ++++---- .../STTClient.csproj} | 0 .../Structs/CandidateTranscript.cs | 2 +- .../Structs/Metadata.cs | 2 +- .../Structs/TokenMetadata.cs | 2 +- .../App.config | 0 .../Program.cs | 8 +- .../Properties/AssemblyInfo.cs | 8 +- .../STTConsole.csproj} | 8 +- .../arctic_a0024.wav | Bin .../packages.config | 0 .../{DeepSpeechWPF => STTWPF}/.gitignore | 0 .../{DeepSpeechWPF => STTWPF}/App.config | 0 .../dotnet/{DeepSpeechWPF => STTWPF}/App.xaml | 4 +- .../{DeepSpeechWPF => STTWPF}/App.xaml.cs | 18 +- .../{DeepSpeechWPF => STTWPF}/MainWindow.xaml | 4 +- .../MainWindow.xaml.cs | 4 +- .../Properties/AssemblyInfo.cs | 8 +- .../Properties/Resources.Designer.cs | 4 +- .../Properties/Resources.resx | 0 .../Properties/Settings.Designer.cs | 2 +- .../Properties/Settings.settings | 0 .../STT.WPF.csproj} | 8 +- .../DeepSpeech.WPF.sln => STTWPF/STT.WPF.sln} | 4 +- .../ViewModels/BindableBase.cs | 2 +- .../ViewModels/MainWindowViewModel.cs | 16 +- .../{DeepSpeechWPF => STTWPF}/packages.config | 0 .../nupkg/{stt.nuspec.in => STT.spec.in} | 0 .../build/{DeepSpeech.targets => STT.targets} | 0 native_client/generate_scorer_package.cpp | 4 +- .../java/app/src/main/AndroidManifest.xml | 2 +- ...epSpeechActivity.java => STTActivity.java} | 10 +- ...ivity_deep_speech.xml => activity_stt.xml} | 2 +- native_client/java/jni/stt.i | 16 +- .../java/ai/coqui/libstt/test/BasicTest.java | 32 +-- .../{DeepSpeechModel.java => STTModel.java} | 30 +-- ...amingState.java => STTStreamingState.java} | 4 +- .../coqui/libstt_doc/CandidateTranscript.java | 2 +- .../java/ai/coqui/libstt_doc/Metadata.java | 2 +- ..._Error_Codes.java => STT_Error_Codes.java} | 18 +- .../ai/coqui/libstt_doc/TokenMetadata.java | 2 +- native_client/javascript/stt.i | 18 +- native_client/modelstate.cc | 2 +- native_client/modelstate.h | 2 +- native_client/python/impl.i | 22 +- native_client/stt.cc | 102 ++++---- native_client/stt_errors.cc | 4 +- .../deepspeech_ios/deepspeech_ios.modulemap | 12 - ...deepspeech-ios.podspec => stt-ios.podspec} | 6 +- .../project.pbxproj | 106 ++++----- .../contents.xcworkspacedata | 2 +- .../xcshareddata/IDEWorkspaceChecks.plist | 0 .../xcshareddata/xcschemes/stt_ios.xcscheme} | 18 +- .../contents.xcworkspacedata | 4 +- .../xcshareddata/IDEWorkspaceChecks.plist | 0 .../xcshareddata/WorkspaceSettings.xcsettings | 0 .../{deepspeech_ios => stt_ios}/Info.plist | 0 .../DeepSpeech.swift => stt_ios/STT.swift} | 222 +++++++++--------- .../deepspeech_ios.h => stt_ios/stt_ios.h} | 6 +- native_client/swift/stt_ios/stt_ios.modulemap | 12 + .../project.pbxproj | 152 ++++++------ .../contents.xcworkspacedata | 2 +- .../xcshareddata/IDEWorkspaceChecks.plist | 0 .../xcschemes/stt_ios_test.xcscheme} | 30 +-- .../AppDelegate.swift | 2 +- .../AppIcon.appiconset/Contents.json | 0 .../Assets.xcassets/Contents.json | 0 .../AudioContext.swift | 4 +- .../Base.lproj/LaunchScreen.storyboard | 0 .../ContentView.swift | 2 +- .../Info.plist | 0 .../Preview Assets.xcassets/Contents.json | 0 .../SceneDelegate.swift | 2 +- .../SpeechRecognitionImpl.swift | 16 +- .../Info.plist | 0 .../stt_ios_testTests.swift} | 8 +- .../Info.plist | 0 .../stt_ios_testUITests.swift} | 10 +- native_client/tflitemodelstate.cc | 24 +- native_client/tfmodelstate.cc | 24 +- requirements_eval_tflite.txt | 2 +- taskcluster/tc-netframework-ds-tests.sh | 8 +- tensorflow_full_runtime.supp | 30 +-- training/coqui_stt_training/train.py | 2 +- 112 files changed, 891 insertions(+), 891 deletions(-) rename .taskcluster.yml => .taskcluster.yml.disabled (100%) delete mode 100644 native_client/dotnet/DeepSpeechClient/Enums/ErrorCodes.cs rename native_client/dotnet/{DeepSpeech.sln => STT.sln} (79%) create mode 100644 native_client/dotnet/STTClient/Enums/ErrorCodes.cs rename native_client/dotnet/{DeepSpeechClient => STTClient}/Extensions/NativeExtensions.cs (95%) rename native_client/dotnet/{DeepSpeechClient/Interfaces/IDeepSpeech.cs => STTClient/Interfaces/ISTT.cs} (88%) rename native_client/dotnet/{DeepSpeechClient => STTClient}/Models/CandidateTranscript.cs (93%) rename native_client/dotnet/{DeepSpeechClient => STTClient}/Models/Metadata.cs (89%) rename native_client/dotnet/{DeepSpeechClient/Models/DeepSpeechStream.cs => STTClient/Models/Stream.cs} (81%) rename native_client/dotnet/{DeepSpeechClient => STTClient}/Models/TokenMetadata.cs (93%) rename native_client/dotnet/{DeepSpeechClient => STTClient}/NativeImp.cs (59%) rename native_client/dotnet/{DeepSpeechClient/DeepSpeech.cs => STTClient/STT.cs} (75%) rename native_client/dotnet/{DeepSpeechClient/DeepSpeechClient.csproj => STTClient/STTClient.csproj} (100%) rename native_client/dotnet/{DeepSpeechClient => STTClient}/Structs/CandidateTranscript.cs (94%) rename native_client/dotnet/{DeepSpeechClient => STTClient}/Structs/Metadata.cs (92%) rename native_client/dotnet/{DeepSpeechClient => STTClient}/Structs/TokenMetadata.cs (93%) rename native_client/dotnet/{DeepSpeechConsole => STTConsole}/App.config (100%) rename native_client/dotnet/{DeepSpeechConsole => STTConsole}/Program.cs (96%) rename native_client/dotnet/{DeepSpeechConsole => STTConsole}/Properties/AssemblyInfo.cs (85%) rename native_client/dotnet/{DeepSpeechConsole/DeepSpeechConsole.csproj => STTConsole/STTConsole.csproj} (93%) rename native_client/dotnet/{DeepSpeechConsole => STTConsole}/arctic_a0024.wav (100%) rename native_client/dotnet/{DeepSpeechConsole => STTConsole}/packages.config (100%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/.gitignore (100%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/App.config (100%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/App.xaml (74%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/App.xaml.cs (61%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/MainWindow.xaml (98%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/MainWindow.xaml.cs (87%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/Properties/AssemblyInfo.cs (91%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/Properties/Resources.Designer.cs (94%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/Properties/Resources.resx (100%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/Properties/Settings.Designer.cs (96%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/Properties/Settings.settings (100%) rename native_client/dotnet/{DeepSpeechWPF/DeepSpeech.WPF.csproj => STTWPF/STT.WPF.csproj} (95%) rename native_client/dotnet/{DeepSpeechWPF/DeepSpeech.WPF.sln => STTWPF/STT.WPF.sln} (80%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/ViewModels/BindableBase.cs (98%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/ViewModels/MainWindowViewModel.cs (97%) rename native_client/dotnet/{DeepSpeechWPF => STTWPF}/packages.config (100%) rename native_client/dotnet/nupkg/{stt.nuspec.in => STT.spec.in} (100%) rename native_client/dotnet/nupkg/build/{DeepSpeech.targets => STT.targets} (100%) rename native_client/java/app/src/main/java/ai/coqui/sttexampleapp/{DeepSpeechActivity.java => STTActivity.java} (95%) rename native_client/java/app/src/main/res/layout/{activity_deep_speech.xml => activity_stt.xml} (99%) rename native_client/java/libstt/src/main/java/ai/coqui/libstt/{DeepSpeechModel.java => STTModel.java} (88%) rename native_client/java/libstt/src/main/java/ai/coqui/libstt/{DeepSpeechStreamingState.java => STTStreamingState.java} (60%) rename native_client/java/libstt/src/main/java/ai/coqui/libstt_doc/{DeepSpeech_Error_Codes.java => STT_Error_Codes.java} (76%) delete mode 100644 native_client/swift/deepspeech_ios/deepspeech_ios.modulemap rename native_client/swift/{deepspeech-ios.podspec => stt-ios.podspec} (77%) rename native_client/swift/{deepspeech_ios.xcodeproj => stt_ios.xcodeproj}/project.pbxproj (77%) rename native_client/swift/{deepspeech_ios.xcodeproj => stt_ios.xcodeproj}/project.xcworkspace/contents.xcworkspacedata (68%) rename native_client/swift/{deepspeech_ios.xcodeproj => stt_ios.xcodeproj}/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist (100%) rename native_client/swift/{deepspeech_ios.xcodeproj/xcshareddata/xcschemes/deepspeech_ios.xcscheme => stt_ios.xcodeproj/xcshareddata/xcschemes/stt_ios.xcscheme} (81%) rename native_client/swift/{deepspeech_ios.xcworkspace => stt_ios.xcworkspace}/contents.xcworkspacedata (55%) rename native_client/swift/{deepspeech_ios.xcworkspace => stt_ios.xcworkspace}/xcshareddata/IDEWorkspaceChecks.plist (100%) rename native_client/swift/{deepspeech_ios.xcworkspace => stt_ios.xcworkspace}/xcshareddata/WorkspaceSettings.xcsettings (100%) rename native_client/swift/{deepspeech_ios => stt_ios}/Info.plist (100%) rename native_client/swift/{deepspeech_ios/DeepSpeech.swift => stt_ios/STT.swift} (64%) rename native_client/swift/{deepspeech_ios/deepspeech_ios.h => stt_ios/stt_ios.h} (64%) create mode 100644 native_client/swift/stt_ios/stt_ios.modulemap rename native_client/swift/{deepspeech_ios_test.xcodeproj => stt_ios_test.xcodeproj}/project.pbxproj (76%) rename native_client/swift/{deepspeech_ios_test.xcodeproj => stt_ios_test.xcodeproj}/project.xcworkspace/contents.xcworkspacedata (66%) rename native_client/swift/{deepspeech_ios_test.xcodeproj => stt_ios_test.xcodeproj}/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist (100%) rename native_client/swift/{deepspeech_ios_test.xcodeproj/xcshareddata/xcschemes/deepspeech_ios_test.xcscheme => stt_ios_test.xcodeproj/xcshareddata/xcschemes/stt_ios_test.xcscheme} (75%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/AppDelegate.swift (98%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/Assets.xcassets/AppIcon.appiconset/Contents.json (100%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/Assets.xcassets/Contents.json (100%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/AudioContext.swift (98%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/Base.lproj/LaunchScreen.storyboard (100%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/ContentView.swift (98%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/Info.plist (100%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/Preview Content/Preview Assets.xcassets/Contents.json (100%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/SceneDelegate.swift (99%) rename native_client/swift/{deepspeech_ios_test => stt_ios_test}/SpeechRecognitionImpl.swift (95%) rename native_client/swift/{deepspeech_ios_testTests => stt_ios_testTests}/Info.plist (100%) rename native_client/swift/{deepspeech_ios_testTests/deepspeech_ios_testTests.swift => stt_ios_testTests/stt_ios_testTests.swift} (85%) rename native_client/swift/{deepspeech_ios_testUITests => stt_ios_testUITests}/Info.plist (100%) rename native_client/swift/{deepspeech_ios_testUITests/deepspeech_ios_testUITests.swift => stt_ios_testUITests/stt_ios_testUITests.swift} (88%) diff --git a/.taskcluster.yml b/.taskcluster.yml.disabled similarity index 100% rename from .taskcluster.yml rename to .taskcluster.yml.disabled diff --git a/bin/run-ldc93s1.sh b/bin/run-ldc93s1.sh index 3f635da5..d19722b0 100755 --- a/bin/run-ldc93s1.sh +++ b/bin/run-ldc93s1.sh @@ -13,7 +13,7 @@ fi; if [ -d "${COMPUTE_KEEP_DIR}" ]; then checkpoint_dir=$COMPUTE_KEEP_DIR else - checkpoint_dir=$(python -c 'from xdg import BaseDirectory as xdg; print(xdg.save_data_path("deepspeech/ldc93s1"))') + checkpoint_dir=$(python -c 'from xdg import BaseDirectory as xdg; print(xdg.save_data_path("stt/ldc93s1"))') fi # Force only one visible device because we have a single-sample dataset diff --git a/doc/BUILDING.rst b/doc/BUILDING.rst index fea38f40..3f0457d4 100644 --- a/doc/BUILDING.rst +++ b/doc/BUILDING.rst @@ -60,7 +60,7 @@ Compile Coqui STT ----------------- Compile ``libstt.so`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^ Within your TensorFlow directory, there should be a symbolic link to the 🐸STT ``native_client`` directory. If it is not present, create it with the follow command: @@ -238,8 +238,8 @@ Due to the discontinuation of Bintray JCenter we do not have pre-built Android p implementation 'stt.coqui.ai:libstt:VERSION@aar' -Building ``libstt.so`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Building ``libstt.so`` for Android +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ You can build the ``libstt.so`` using (ARMv7): @@ -254,7 +254,7 @@ Or (ARM64): bazel build --workspace_status_command="bash native_client/bazel_workspace_status_cmd.sh" --config=monolithic --config=android --config=android_arm64 --define=runtime=tflite --action_env ANDROID_NDK_API_LEVEL=21 --cxxopt=-std=c++14 --copt=-D_GLIBCXX_USE_C99 //native_client:libstt.so Building ``libstt.aar`` -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^ In the unlikely event you have to rebuild the JNI bindings, source code is available under the ``libstt`` subdirectory. Building depends on shared @@ -270,7 +270,7 @@ and adapt file naming (when missing, the error message should states what filename it expects and where). Building C++ ``stt`` binary -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^ Building the ``stt`` binary will happen through ``ndk-build`` (ARMv7): @@ -306,7 +306,7 @@ mono 16kHz 16-bits file and it might fail on some WAVE file that are not following exactly the specification. Running ``stt`` via adb -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^ You should use ``adb push`` to send data to device, please refer to Android documentation on how to use that. @@ -349,7 +349,7 @@ to leverage GPU / DSP / NPU * Hexagon, the Qualcomm-specific DSP This is highly experimental: -* Requires passing environment variable ``DS_TFLITE_DELEGATE`` with values of +* Requires passing environment variable ``STT_TFLITE_DELEGATE`` with values of ``gpu``, ``nnapi`` or ``hexagon`` (only one at a time) * Might require exported model changes (some Op might not be supported) * We can't guarantee it will work, nor it will be faster than default diff --git a/doc/C-API.rst b/doc/C-API.rst index d9c2da1d..b76c06b8 100644 --- a/doc/C-API.rst +++ b/doc/C-API.rst @@ -10,65 +10,65 @@ C API See also the list of error codes including descriptions for each error in :ref:`error-codes`. -.. doxygenfunction:: DS_CreateModel - :project: deepspeech-c +.. doxygenfunction:: STT_CreateModel + :project: stt-c -.. doxygenfunction:: DS_FreeModel - :project: deepspeech-c +.. doxygenfunction:: STT_FreeModel + :project: stt-c -.. doxygenfunction:: DS_EnableExternalScorer - :project: deepspeech-c +.. doxygenfunction:: STT_EnableExternalScorer + :project: stt-c -.. doxygenfunction:: DS_DisableExternalScorer - :project: deepspeech-c +.. doxygenfunction:: STT_DisableExternalScorer + :project: stt-c -.. doxygenfunction:: DS_AddHotWord - :project: deepspeech-c +.. doxygenfunction:: STT_AddHotWord + :project: stt-c -.. doxygenfunction:: DS_EraseHotWord - :project: deepspeech-c +.. doxygenfunction:: STT_EraseHotWord + :project: stt-c -.. doxygenfunction:: DS_ClearHotWords - :project: deepspeech-c +.. doxygenfunction:: STT_ClearHotWords + :project: stt-c -.. doxygenfunction:: DS_SetScorerAlphaBeta - :project: deepspeech-c +.. doxygenfunction:: STT_SetScorerAlphaBeta + :project: stt-c -.. doxygenfunction:: DS_GetModelSampleRate - :project: deepspeech-c +.. doxygenfunction:: STT_GetModelSampleRate + :project: stt-c -.. doxygenfunction:: DS_SpeechToText - :project: deepspeech-c +.. doxygenfunction:: STT_SpeechToText + :project: stt-c -.. doxygenfunction:: DS_SpeechToTextWithMetadata - :project: deepspeech-c +.. doxygenfunction:: STT_SpeechToTextWithMetadata + :project: stt-c -.. doxygenfunction:: DS_CreateStream - :project: deepspeech-c +.. doxygenfunction:: STT_CreateStream + :project: stt-c -.. doxygenfunction:: DS_FeedAudioContent - :project: deepspeech-c +.. doxygenfunction:: STT_FeedAudioContent + :project: stt-c -.. doxygenfunction:: DS_IntermediateDecode - :project: deepspeech-c +.. doxygenfunction:: STT_IntermediateDecode + :project: stt-c -.. doxygenfunction:: DS_IntermediateDecodeWithMetadata - :project: deepspeech-c +.. doxygenfunction:: STT_IntermediateDecodeWithMetadata + :project: stt-c -.. doxygenfunction:: DS_FinishStream - :project: deepspeech-c +.. doxygenfunction:: STT_FinishStream + :project: stt-c -.. doxygenfunction:: DS_FinishStreamWithMetadata - :project: deepspeech-c +.. doxygenfunction:: STT_FinishStreamWithMetadata + :project: stt-c -.. doxygenfunction:: DS_FreeStream - :project: deepspeech-c +.. doxygenfunction:: STT_FreeStream + :project: stt-c -.. doxygenfunction:: DS_FreeMetadata - :project: deepspeech-c +.. doxygenfunction:: STT_FreeMetadata + :project: stt-c -.. doxygenfunction:: DS_FreeString - :project: deepspeech-c +.. doxygenfunction:: STT_FreeString + :project: stt-c -.. doxygenfunction:: DS_Version - :project: deepspeech-c +.. doxygenfunction:: STT_Version + :project: stt-c diff --git a/doc/DotNet-API.rst b/doc/DotNet-API.rst index 92342ded..bba28896 100644 --- a/doc/DotNet-API.rst +++ b/doc/DotNet-API.rst @@ -2,18 +2,18 @@ ============== -DeepSpeech Class +STT Class ---------------- -.. doxygenclass:: DeepSpeechClient::DeepSpeech - :project: deepspeech-dotnet +.. doxygenclass:: STTClient::STT + :project: stt-dotnet :members: -DeepSpeechStream Class +Stream Class ---------------------- -.. doxygenclass:: DeepSpeechClient::Models::DeepSpeechStream - :project: deepspeech-dotnet +.. doxygenclass:: STTClient::Models::Stream + :project: stt-dotnet :members: ErrorCodes @@ -21,33 +21,33 @@ ErrorCodes See also the main definition including descriptions for each error in :ref:`error-codes`. -.. doxygenenum:: DeepSpeechClient::Enums::ErrorCodes - :project: deepspeech-dotnet +.. doxygenenum:: STTClient::Enums::ErrorCodes + :project: stt-dotnet Metadata -------- -.. doxygenclass:: DeepSpeechClient::Models::Metadata - :project: deepspeech-dotnet +.. doxygenclass:: STTClient::Models::Metadata + :project: stt-dotnet :members: Transcripts CandidateTranscript ------------------- -.. doxygenclass:: DeepSpeechClient::Models::CandidateTranscript - :project: deepspeech-dotnet +.. doxygenclass:: STTClient::Models::CandidateTranscript + :project: stt-dotnet :members: Tokens, Confidence TokenMetadata ------------- -.. doxygenclass:: DeepSpeechClient::Models::TokenMetadata - :project: deepspeech-dotnet +.. doxygenclass:: STTClient::Models::TokenMetadata + :project: stt-dotnet :members: Text, Timestep, StartTime -DeepSpeech Interface +STT Interface -------------------- -.. doxygeninterface:: DeepSpeechClient::Interfaces::IDeepSpeech - :project: deepspeech-dotnet +.. doxygeninterface:: STTClient::Interfaces::ISTT + :project: stt-dotnet :members: diff --git a/doc/DotNet-Examples.rst b/doc/DotNet-Examples.rst index a00ee833..beec6243 100644 --- a/doc/DotNet-Examples.rst +++ b/doc/DotNet-Examples.rst @@ -1,12 +1,12 @@ .NET API Usage example ====================== -Examples are from `native_client/dotnet/DeepSpeechConsole/Program.cs`. +Examples are from `native_client/dotnet/STTConsole/Program.cs`. Creating a model instance and loading model ------------------------------------------- -.. literalinclude:: ../native_client/dotnet/DeepSpeechConsole/Program.cs +.. literalinclude:: ../native_client/dotnet/STTConsole/Program.cs :language: csharp :linenos: :lineno-match: @@ -16,7 +16,7 @@ Creating a model instance and loading model Performing inference -------------------- -.. literalinclude:: ../native_client/dotnet/DeepSpeechConsole/Program.cs +.. literalinclude:: ../native_client/dotnet/STTConsole/Program.cs :language: csharp :linenos: :lineno-match: @@ -26,4 +26,4 @@ Performing inference Full source code ---------------- -See :download:`Full source code<../native_client/dotnet/DeepSpeechConsole/Program.cs>`. +See :download:`Full source code<../native_client/dotnet/STTConsole/Program.cs>`. diff --git a/doc/Java-API.rst b/doc/Java-API.rst index a61bd1b1..69603141 100644 --- a/doc/Java-API.rst +++ b/doc/Java-API.rst @@ -1,29 +1,29 @@ Java ==== -DeepSpeechModel +STTModel --------------- -.. doxygenclass:: org::deepspeech::libdeepspeech::DeepSpeechModel - :project: deepspeech-java +.. doxygenclass:: ai::coqui::libstt::STTModel + :project: stt-java :members: Metadata -------- -.. doxygenclass:: org::deepspeech::libdeepspeech::Metadata - :project: deepspeech-java +.. doxygenclass:: ai::coqui::libstt::Metadata + :project: stt-java :members: getNumTranscripts, getTranscript CandidateTranscript ------------------- -.. doxygenclass:: org::deepspeech::libdeepspeech::CandidateTranscript - :project: deepspeech-java +.. doxygenclass:: ai::coqui::libstt::CandidateTranscript + :project: stt-java :members: getNumTokens, getConfidence, getToken TokenMetadata ------------- -.. doxygenclass:: org::deepspeech::libdeepspeech::TokenMetadata - :project: deepspeech-java +.. doxygenclass:: ai::coqui::libstt::TokenMetadata + :project: stt-java :members: getText, getTimestep, getStartTime diff --git a/doc/Java-Examples.rst b/doc/Java-Examples.rst index 04836ed5..834354df 100644 --- a/doc/Java-Examples.rst +++ b/doc/Java-Examples.rst @@ -1,12 +1,12 @@ Java API Usage example ====================== -Examples are from `native_client/java/app/src/main/java/org/deepspeech/DeepSpeechActivity.java`. +Examples are from `native_client/java/app/src/main/java/ai/coqui/STTActivity.java`. Creating a model instance and loading model ------------------------------------------- -.. literalinclude:: ../native_client/java/app/src/main/java/org/deepspeech/DeepSpeechActivity.java +.. literalinclude:: ../native_client/java/app/src/main/java/ai/coqui/STTActivity.java :language: java :linenos: :lineno-match: @@ -16,7 +16,7 @@ Creating a model instance and loading model Performing inference -------------------- -.. literalinclude:: ../native_client/java/app/src/main/java/org/deepspeech/DeepSpeechActivity.java +.. literalinclude:: ../native_client/java/app/src/main/java/ai/coqui/STTActivity.java :language: java :linenos: :lineno-match: @@ -26,4 +26,4 @@ Performing inference Full source code ---------------- -See :download:`Full source code<../native_client/java/app/src/main/java/org/deepspeech/DeepSpeechActivity.java>`. +See :download:`Full source code<../native_client/java/app/src/main/java/ai/coqui/STTActivity.java>`. diff --git a/doc/SUPPORTED_PLATFORMS.rst b/doc/SUPPORTED_PLATFORMS.rst index 1ccfb7e3..800d92f2 100644 --- a/doc/SUPPORTED_PLATFORMS.rst +++ b/doc/SUPPORTED_PLATFORMS.rst @@ -9,61 +9,61 @@ Linux / AMD64 without GPU ^^^^^^^^^^^^^^^^^^^^^^^^^ * x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference) * Ubuntu 14.04+ (glibc >= 2.19, libstdc++6 >= 4.8) -* Full TensorFlow runtime (``deepspeech`` packages) -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* Full TensorFlow runtime (``stt`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) Linux / AMD64 with GPU ^^^^^^^^^^^^^^^^^^^^^^ * x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference) * Ubuntu 14.04+ (glibc >= 2.19, libstdc++6 >= 4.8) * CUDA 10.0 (and capable GPU) -* Full TensorFlow runtime (``deepspeech`` packages) -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* Full TensorFlow runtime (``stt`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) Linux / ARMv7 ^^^^^^^^^^^^^ * Cortex-A53 compatible ARMv7 SoC with Neon support * Raspbian Buster-compatible distribution -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) Linux / Aarch64 ^^^^^^^^^^^^^^^ * Cortex-A72 compatible Aarch64 SoC * ARMbian Buster-compatible distribution -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) Android / ARMv7 ^^^^^^^^^^^^^^^ * ARMv7 SoC with Neon support * Android 7.0-10.0 * NDK API level >= 21 -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) Android / Aarch64 ^^^^^^^^^^^^^^^^^ * Aarch64 SoC * Android 7.0-10.0 * NDK API level >= 21 -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) macOS / AMD64 ^^^^^^^^^^^^^ * x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference) * macOS >= 10.10 -* Full TensorFlow runtime (``deepspeech`` packages) -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* Full TensorFlow runtime (``stt`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) Windows / AMD64 without GPU ^^^^^^^^^^^^^^^^^^^^^^^^^^^ * x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference) * Windows Server >= 2012 R2 ; Windows >= 8.1 -* Full TensorFlow runtime (``deepspeech`` packages) -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* Full TensorFlow runtime (``stt`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) Windows / AMD64 with GPU ^^^^^^^^^^^^^^^^^^^^^^^^ * x86-64 CPU with AVX/FMA (one can rebuild without AVX/FMA, but it might slow down inference) * Windows Server >= 2012 R2 ; Windows >= 8.1 * CUDA 10.0 (and capable GPU) -* Full TensorFlow runtime (``deepspeech`` packages) -* TensorFlow Lite runtime (``deepspeech-tflite`` packages) +* Full TensorFlow runtime (``stt`` packages) +* TensorFlow Lite runtime (``stt-tflite`` packages) diff --git a/doc/Structs.rst b/doc/Structs.rst index 5d532277..14869dd2 100644 --- a/doc/Structs.rst +++ b/doc/Structs.rst @@ -5,19 +5,19 @@ Metadata -------- .. doxygenstruct:: Metadata - :project: deepspeech-c + :project: stt-c :members: CandidateTranscript ------------------- .. doxygenstruct:: CandidateTranscript - :project: deepspeech-c + :project: stt-c :members: TokenMetadata ------------- .. doxygenstruct:: TokenMetadata - :project: deepspeech-c + :project: stt-c :members: diff --git a/doc/conf.py b/doc/conf.py index dc447452..92b315e9 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -81,9 +81,9 @@ extensions = [ breathe_projects = { - "deepspeech-c": "xml-c/", - "deepspeech-java": "xml-java/", - "deepspeech-dotnet": "xml-dotnet/", + "stt-c": "xml-c/", + "stt-java": "xml-java/", + "stt-dotnet": "xml-dotnet/", } js_source_path = "../native_client/javascript/index.ts" diff --git a/ds_lib.supp b/ds_lib.supp index d7748e34..98f40177 100644 --- a/ds_lib.supp +++ b/ds_lib.supp @@ -5,6 +5,6 @@ fun:_Znwm fun:_ZN6tflite20DefaultErrorReporterEv fun:_ZN16TFLiteModelState4initEPKc - fun:DS_CreateModel + fun:STT_CreateModel fun:main } diff --git a/ds_openfst.supp b/ds_openfst.supp index 378659db..8cb96016 100644 --- a/ds_openfst.supp +++ b/ds_openfst.supp @@ -815,7 +815,7 @@ fun:_ZN6Scorer9load_trieERSt14basic_ifstreamIcSt11char_traitsIcEERKNSt7__cxx1112basic_stringIcS2_SaIcEEE fun:_ZN6Scorer7load_lmERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE fun:_ZN6Scorer4initERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERK8Alphabet - fun:DS_EnableExternalScorer + fun:STT_EnableExternalScorer fun:main } { @@ -831,7 +831,7 @@ fun:_ZN6Scorer9load_trieERSt14basic_ifstreamIcSt11char_traitsIcEERKNSt7__cxx1112basic_stringIcS2_SaIcEEE fun:_ZN6Scorer7load_lmERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE fun:_ZN6Scorer4initERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERK8Alphabet - fun:DS_EnableExternalScorer + fun:STT_EnableExternalScorer fun:main } { diff --git a/native_client/args.h b/native_client/args.h index 04c5eb88..30ed3181 100644 --- a/native_client/args.h +++ b/native_client/args.h @@ -64,9 +64,9 @@ void PrintHelp(const char* bin) "\t--hot_words\t\t\tHot-words and their boosts. Word:Boost pairs are comma-separated\n" "\t--help\t\t\t\tShow help\n" "\t--version\t\t\tPrint version and exits\n"; - char* version = DS_Version(); + char* version = STT_Version(); std::cerr << "Coqui STT " << version << "\n"; - DS_FreeString(version); + STT_FreeString(version); exit(1); } @@ -169,9 +169,9 @@ bool ProcessArgs(int argc, char** argv) } if (has_versions) { - char* version = DS_Version(); + char* version = STT_Version(); std::cout << "Coqui " << version << "\n"; - DS_FreeString(version); + STT_FreeString(version); return false; } diff --git a/native_client/client.cc b/native_client/client.cc index 70c199e7..93afa555 100644 --- a/native_client/client.cc +++ b/native_client/client.cc @@ -168,17 +168,17 @@ LocalDsSTT(ModelState* aCtx, const short* aBuffer, size_t aBufferSize, // sphinx-doc: c_ref_inference_start if (extended_output) { - Metadata *result = DS_SpeechToTextWithMetadata(aCtx, aBuffer, aBufferSize, 1); + Metadata *result = STT_SpeechToTextWithMetadata(aCtx, aBuffer, aBufferSize, 1); res.string = CandidateTranscriptToString(&result->transcripts[0]); - DS_FreeMetadata(result); + STT_FreeMetadata(result); } else if (json_output) { - Metadata *result = DS_SpeechToTextWithMetadata(aCtx, aBuffer, aBufferSize, json_candidate_transcripts); + Metadata *result = STT_SpeechToTextWithMetadata(aCtx, aBuffer, aBufferSize, json_candidate_transcripts); res.string = MetadataToJSON(result); - DS_FreeMetadata(result); + STT_FreeMetadata(result); } else if (stream_size > 0) { StreamingState* ctx; - int status = DS_CreateStream(aCtx, &ctx); - if (status != DS_ERR_OK) { + int status = STT_CreateStream(aCtx, &ctx); + if (status != STT_ERR_OK) { res.string = strdup(""); return res; } @@ -187,28 +187,28 @@ LocalDsSTT(ModelState* aCtx, const short* aBuffer, size_t aBufferSize, const char *prev = nullptr; while (off < aBufferSize) { size_t cur = aBufferSize - off > stream_size ? stream_size : aBufferSize - off; - DS_FeedAudioContent(ctx, aBuffer + off, cur); + STT_FeedAudioContent(ctx, aBuffer + off, cur); off += cur; prev = last; - const char* partial = DS_IntermediateDecode(ctx); + const char* partial = STT_IntermediateDecode(ctx); if (last == nullptr || strcmp(last, partial)) { printf("%s\n", partial); last = partial; } else { - DS_FreeString((char *) partial); + STT_FreeString((char *) partial); } if (prev != nullptr && prev != last) { - DS_FreeString((char *) prev); + STT_FreeString((char *) prev); } } if (last != nullptr) { - DS_FreeString((char *) last); + STT_FreeString((char *) last); } - res.string = DS_FinishStream(ctx); + res.string = STT_FinishStream(ctx); } else if (extended_stream_size > 0) { StreamingState* ctx; - int status = DS_CreateStream(aCtx, &ctx); - if (status != DS_ERR_OK) { + int status = STT_CreateStream(aCtx, &ctx); + if (status != STT_ERR_OK) { res.string = strdup(""); return res; } @@ -217,10 +217,10 @@ LocalDsSTT(ModelState* aCtx, const short* aBuffer, size_t aBufferSize, const char *prev = nullptr; while (off < aBufferSize) { size_t cur = aBufferSize - off > extended_stream_size ? extended_stream_size : aBufferSize - off; - DS_FeedAudioContent(ctx, aBuffer + off, cur); + STT_FeedAudioContent(ctx, aBuffer + off, cur); off += cur; prev = last; - const Metadata* result = DS_IntermediateDecodeWithMetadata(ctx, 1); + const Metadata* result = STT_IntermediateDecodeWithMetadata(ctx, 1); const char* partial = CandidateTranscriptToString(&result->transcripts[0]); if (last == nullptr || strcmp(last, partial)) { printf("%s\n", partial); @@ -231,14 +231,14 @@ LocalDsSTT(ModelState* aCtx, const short* aBuffer, size_t aBufferSize, if (prev != nullptr && prev != last) { free((char *) prev); } - DS_FreeMetadata((Metadata *)result); + STT_FreeMetadata((Metadata *)result); } - const Metadata* result = DS_FinishStreamWithMetadata(ctx, 1); + const Metadata* result = STT_FinishStreamWithMetadata(ctx, 1); res.string = CandidateTranscriptToString(&result->transcripts[0]); - DS_FreeMetadata((Metadata *)result); + STT_FreeMetadata((Metadata *)result); free((char *) last); } else { - res.string = DS_SpeechToText(aCtx, aBuffer, aBufferSize); + res.string = STT_SpeechToText(aCtx, aBuffer, aBufferSize); } // sphinx-doc: c_ref_inference_stop @@ -404,7 +404,7 @@ GetAudioBuffer(const char* path, int desired_sample_rate) void ProcessFile(ModelState* context, const char* path, bool show_times) { - ds_audio_buffer audio = GetAudioBuffer(path, DS_GetModelSampleRate(context)); + ds_audio_buffer audio = GetAudioBuffer(path, STT_GetModelSampleRate(context)); // Pass audio to STT // We take half of buffer_size because buffer is a char* while @@ -418,7 +418,7 @@ ProcessFile(ModelState* context, const char* path, bool show_times) if (result.string) { printf("%s\n", result.string); - DS_FreeString((char*)result.string); + STT_FreeString((char*)result.string); } if (show_times) { @@ -453,16 +453,16 @@ main(int argc, char **argv) // Initialise STT ModelState* ctx; // sphinx-doc: c_ref_model_start - int status = DS_CreateModel(model, &ctx); + int status = STT_CreateModel(model, &ctx); if (status != 0) { - char* error = DS_ErrorCodeToErrorMessage(status); + char* error = STT_ErrorCodeToErrorMessage(status); fprintf(stderr, "Could not create model: %s\n", error); free(error); return 1; } if (set_beamwidth) { - status = DS_SetModelBeamWidth(ctx, beam_width); + status = STT_SetModelBeamWidth(ctx, beam_width); if (status != 0) { fprintf(stderr, "Could not set model beam width.\n"); return 1; @@ -470,13 +470,13 @@ main(int argc, char **argv) } if (scorer) { - status = DS_EnableExternalScorer(ctx, scorer); + status = STT_EnableExternalScorer(ctx, scorer); if (status != 0) { fprintf(stderr, "Could not enable external scorer.\n"); return 1; } if (set_alphabeta) { - status = DS_SetScorerAlphaBeta(ctx, lm_alpha, lm_beta); + status = STT_SetScorerAlphaBeta(ctx, lm_alpha, lm_beta); if (status != 0) { fprintf(stderr, "Error setting scorer alpha and beta.\n"); return 1; @@ -494,7 +494,7 @@ main(int argc, char **argv) // so, check the boost string before we turn it into a float bool boost_is_valid = (pair_[1].find_first_not_of("-.0123456789") == std::string::npos); float boost = strtof((pair_[1]).c_str(),0); - status = DS_AddHotWord(ctx, word, boost); + status = STT_AddHotWord(ctx, word, boost); if (status != 0 || !boost_is_valid) { fprintf(stderr, "Could not enable hot-word.\n"); return 1; @@ -555,7 +555,7 @@ main(int argc, char **argv) sox_quit(); #endif // NO_SOX - DS_FreeModel(ctx); + STT_FreeModel(ctx); return 0; } diff --git a/native_client/coqui-stt.h b/native_client/coqui-stt.h index 24c7ef66..7794bc79 100644 --- a/native_client/coqui-stt.h +++ b/native_client/coqui-stt.h @@ -61,37 +61,37 @@ typedef struct Metadata { // sphinx-doc: error_code_listing_start -#define DS_FOR_EACH_ERROR(APPLY) \ - APPLY(DS_ERR_OK, 0x0000, "No error.") \ - APPLY(DS_ERR_NO_MODEL, 0x1000, "Missing model information.") \ - APPLY(DS_ERR_INVALID_ALPHABET, 0x2000, "Invalid alphabet embedded in model. (Data corruption?)") \ - APPLY(DS_ERR_INVALID_SHAPE, 0x2001, "Invalid model shape.") \ - APPLY(DS_ERR_INVALID_SCORER, 0x2002, "Invalid scorer file.") \ - APPLY(DS_ERR_MODEL_INCOMPATIBLE, 0x2003, "Incompatible model.") \ - APPLY(DS_ERR_SCORER_NOT_ENABLED, 0x2004, "External scorer is not enabled.") \ - APPLY(DS_ERR_SCORER_UNREADABLE, 0x2005, "Could not read scorer file.") \ - APPLY(DS_ERR_SCORER_INVALID_LM, 0x2006, "Could not recognize language model header in scorer.") \ - APPLY(DS_ERR_SCORER_NO_TRIE, 0x2007, "Reached end of scorer file before loading vocabulary trie.") \ - APPLY(DS_ERR_SCORER_INVALID_TRIE, 0x2008, "Invalid magic in trie header.") \ - APPLY(DS_ERR_SCORER_VERSION_MISMATCH, 0x2009, "Scorer file version does not match expected version.") \ - APPLY(DS_ERR_FAIL_INIT_MMAP, 0x3000, "Failed to initialize memory mapped model.") \ - APPLY(DS_ERR_FAIL_INIT_SESS, 0x3001, "Failed to initialize the session.") \ - APPLY(DS_ERR_FAIL_INTERPRETER, 0x3002, "Interpreter failed.") \ - APPLY(DS_ERR_FAIL_RUN_SESS, 0x3003, "Failed to run the session.") \ - APPLY(DS_ERR_FAIL_CREATE_STREAM, 0x3004, "Error creating the stream.") \ - APPLY(DS_ERR_FAIL_READ_PROTOBUF, 0x3005, "Error reading the proto buffer model file.") \ - APPLY(DS_ERR_FAIL_CREATE_SESS, 0x3006, "Failed to create session.") \ - APPLY(DS_ERR_FAIL_CREATE_MODEL, 0x3007, "Could not allocate model state.") \ - APPLY(DS_ERR_FAIL_INSERT_HOTWORD, 0x3008, "Could not insert hot-word.") \ - APPLY(DS_ERR_FAIL_CLEAR_HOTWORD, 0x3009, "Could not clear hot-words.") \ - APPLY(DS_ERR_FAIL_ERASE_HOTWORD, 0x3010, "Could not erase hot-word.") +#define STT_FOR_EACH_ERROR(APPLY) \ + APPLY(STT_ERR_OK, 0x0000, "No error.") \ + APPLY(STT_ERR_NO_MODEL, 0x1000, "Missing model information.") \ + APPLY(STT_ERR_INVALID_ALPHABET, 0x2000, "Invalid alphabet embedded in model. (Data corruption?)") \ + APPLY(STT_ERR_INVALID_SHAPE, 0x2001, "Invalid model shape.") \ + APPLY(STT_ERR_INVALID_SCORER, 0x2002, "Invalid scorer file.") \ + APPLY(STT_ERR_MODEL_INCOMPATIBLE, 0x2003, "Incompatible model.") \ + APPLY(STT_ERR_SCORER_NOT_ENABLED, 0x2004, "External scorer is not enabled.") \ + APPLY(STT_ERR_SCORER_UNREADABLE, 0x2005, "Could not read scorer file.") \ + APPLY(STT_ERR_SCORER_INVALID_LM, 0x2006, "Could not recognize language model header in scorer.") \ + APPLY(STT_ERR_SCORER_NO_TRIE, 0x2007, "Reached end of scorer file before loading vocabulary trie.") \ + APPLY(STT_ERR_SCORER_INVALID_TRIE, 0x2008, "Invalid magic in trie header.") \ + APPLY(STT_ERR_SCORER_VERSION_MISMATCH, 0x2009, "Scorer file version does not match expected version.") \ + APPLY(STT_ERR_FAIL_INIT_MMAP, 0x3000, "Failed to initialize memory mapped model.") \ + APPLY(STT_ERR_FAIL_INIT_SESS, 0x3001, "Failed to initialize the session.") \ + APPLY(STT_ERR_FAIL_INTERPRETER, 0x3002, "Interpreter failed.") \ + APPLY(STT_ERR_FAIL_RUN_SESS, 0x3003, "Failed to run the session.") \ + APPLY(STT_ERR_FAIL_CREATE_STREAM, 0x3004, "Error creating the stream.") \ + APPLY(STT_ERR_FAIL_READ_PROTOBUF, 0x3005, "Error reading the proto buffer model file.") \ + APPLY(STT_ERR_FAIL_CREATE_SESS, 0x3006, "Failed to create session.") \ + APPLY(STT_ERR_FAIL_CREATE_MODEL, 0x3007, "Could not allocate model state.") \ + APPLY(STT_ERR_FAIL_INSERT_HOTWORD, 0x3008, "Could not insert hot-word.") \ + APPLY(STT_ERR_FAIL_CLEAR_HOTWORD, 0x3009, "Could not clear hot-words.") \ + APPLY(STT_ERR_FAIL_ERASE_HOTWORD, 0x3010, "Could not erase hot-word.") // sphinx-doc: error_code_listing_end -enum DeepSpeech_Error_Codes +enum STT_Error_Codes { #define DEFINE(NAME, VALUE, DESC) NAME = VALUE, -DS_FOR_EACH_ERROR(DEFINE) +STT_FOR_EACH_ERROR(DEFINE) #undef DEFINE }; @@ -104,49 +104,49 @@ DS_FOR_EACH_ERROR(DEFINE) * @return Zero on success, non-zero on failure. */ STT_EXPORT -int DS_CreateModel(const char* aModelPath, +int STT_CreateModel(const char* aModelPath, ModelState** retval); /** - * @brief Get beam width value used by the model. If {@link DS_SetModelBeamWidth} + * @brief Get beam width value used by the model. If {@link STT_SetModelBeamWidth} * was not called before, will return the default value loaded from the * model file. * - * @param aCtx A ModelState pointer created with {@link DS_CreateModel}. + * @param aCtx A ModelState pointer created with {@link STT_CreateModel}. * * @return Beam width value used by the model. */ STT_EXPORT -unsigned int DS_GetModelBeamWidth(const ModelState* aCtx); +unsigned int STT_GetModelBeamWidth(const ModelState* aCtx); /** * @brief Set beam width value used by the model. * - * @param aCtx A ModelState pointer created with {@link DS_CreateModel}. + * @param aCtx A ModelState pointer created with {@link STT_CreateModel}. * @param aBeamWidth The beam width used by the model. A larger beam width value * generates better results at the cost of decoding time. * * @return Zero on success, non-zero on failure. */ STT_EXPORT -int DS_SetModelBeamWidth(ModelState* aCtx, +int STT_SetModelBeamWidth(ModelState* aCtx, unsigned int aBeamWidth); /** * @brief Return the sample rate expected by a model. * - * @param aCtx A ModelState pointer created with {@link DS_CreateModel}. + * @param aCtx A ModelState pointer created with {@link STT_CreateModel}. * * @return Sample rate expected by the model for its input. */ STT_EXPORT -int DS_GetModelSampleRate(const ModelState* aCtx); +int STT_GetModelSampleRate(const ModelState* aCtx); /** * @brief Frees associated resources and destroys model object. */ STT_EXPORT -void DS_FreeModel(ModelState* ctx); +void STT_FreeModel(ModelState* ctx); /** * @brief Enable decoding using an external scorer. @@ -157,7 +157,7 @@ void DS_FreeModel(ModelState* ctx); * @return Zero on success, non-zero on failure (invalid arguments). */ STT_EXPORT -int DS_EnableExternalScorer(ModelState* aCtx, +int STT_EnableExternalScorer(ModelState* aCtx, const char* aScorerPath); /** @@ -172,7 +172,7 @@ int DS_EnableExternalScorer(ModelState* aCtx, * @return Zero on success, non-zero on failure (invalid arguments). */ STT_EXPORT -int DS_AddHotWord(ModelState* aCtx, +int STT_AddHotWord(ModelState* aCtx, const char* word, float boost); @@ -185,7 +185,7 @@ int DS_AddHotWord(ModelState* aCtx, * @return Zero on success, non-zero on failure (invalid arguments). */ STT_EXPORT -int DS_EraseHotWord(ModelState* aCtx, +int STT_EraseHotWord(ModelState* aCtx, const char* word); /** @@ -196,7 +196,7 @@ int DS_EraseHotWord(ModelState* aCtx, * @return Zero on success, non-zero on failure (invalid arguments). */ STT_EXPORT -int DS_ClearHotWords(ModelState* aCtx); +int STT_ClearHotWords(ModelState* aCtx); /** * @brief Disable decoding using an external scorer. @@ -206,7 +206,7 @@ int DS_ClearHotWords(ModelState* aCtx); * @return Zero on success, non-zero on failure. */ STT_EXPORT -int DS_DisableExternalScorer(ModelState* aCtx); +int STT_DisableExternalScorer(ModelState* aCtx); /** * @brief Set hyperparameters alpha and beta of the external scorer. @@ -218,7 +218,7 @@ int DS_DisableExternalScorer(ModelState* aCtx); * @return Zero on success, non-zero on failure. */ STT_EXPORT -int DS_SetScorerAlphaBeta(ModelState* aCtx, +int STT_SetScorerAlphaBeta(ModelState* aCtx, float aAlpha, float aBeta); @@ -231,10 +231,10 @@ int DS_SetScorerAlphaBeta(ModelState* aCtx, * @param aBufferSize The number of samples in the audio signal. * * @return The STT result. The user is responsible for freeing the string using - * {@link DS_FreeString()}. Returns NULL on error. + * {@link STT_FreeString()}. Returns NULL on error. */ STT_EXPORT -char* DS_SpeechToText(ModelState* aCtx, +char* STT_SpeechToText(ModelState* aCtx, const short* aBuffer, unsigned int aBufferSize); @@ -250,19 +250,19 @@ char* DS_SpeechToText(ModelState* aCtx, * * @return Metadata struct containing multiple CandidateTranscript structs. Each * transcript has per-token metadata including timing information. The - * user is responsible for freeing Metadata by calling {@link DS_FreeMetadata()}. + * user is responsible for freeing Metadata by calling {@link STT_FreeMetadata()}. * Returns NULL on error. */ STT_EXPORT -Metadata* DS_SpeechToTextWithMetadata(ModelState* aCtx, +Metadata* STT_SpeechToTextWithMetadata(ModelState* aCtx, const short* aBuffer, unsigned int aBufferSize, unsigned int aNumResults); /** * @brief Create a new streaming inference state. The streaming state returned - * by this function can then be passed to {@link DS_FeedAudioContent()} - * and {@link DS_FinishStream()}. + * by this function can then be passed to {@link STT_FeedAudioContent()} + * and {@link STT_FinishStream()}. * * @param aCtx The ModelState pointer for the model to use. * @param[out] retval an opaque pointer that represents the streaming state. Can @@ -271,80 +271,80 @@ Metadata* DS_SpeechToTextWithMetadata(ModelState* aCtx, * @return Zero for success, non-zero on failure. */ STT_EXPORT -int DS_CreateStream(ModelState* aCtx, +int STT_CreateStream(ModelState* aCtx, StreamingState** retval); /** * @brief Feed audio samples to an ongoing streaming inference. * - * @param aSctx A streaming state pointer returned by {@link DS_CreateStream()}. + * @param aSctx A streaming state pointer returned by {@link STT_CreateStream()}. * @param aBuffer An array of 16-bit, mono raw audio samples at the * appropriate sample rate (matching what the model was trained on). * @param aBufferSize The number of samples in @p aBuffer. */ STT_EXPORT -void DS_FeedAudioContent(StreamingState* aSctx, +void STT_FeedAudioContent(StreamingState* aSctx, const short* aBuffer, unsigned int aBufferSize); /** * @brief Compute the intermediate decoding of an ongoing streaming inference. * - * @param aSctx A streaming state pointer returned by {@link DS_CreateStream()}. + * @param aSctx A streaming state pointer returned by {@link STT_CreateStream()}. * * @return The STT intermediate result. The user is responsible for freeing the - * string using {@link DS_FreeString()}. + * string using {@link STT_FreeString()}. */ STT_EXPORT -char* DS_IntermediateDecode(const StreamingState* aSctx); +char* STT_IntermediateDecode(const StreamingState* aSctx); /** * @brief Compute the intermediate decoding of an ongoing streaming inference, * return results including metadata. * - * @param aSctx A streaming state pointer returned by {@link DS_CreateStream()}. + * @param aSctx A streaming state pointer returned by {@link STT_CreateStream()}. * @param aNumResults The number of candidate transcripts to return. * * @return Metadata struct containing multiple candidate transcripts. Each transcript * has per-token metadata including timing information. The user is - * responsible for freeing Metadata by calling {@link DS_FreeMetadata()}. + * responsible for freeing Metadata by calling {@link STT_FreeMetadata()}. * Returns NULL on error. */ STT_EXPORT -Metadata* DS_IntermediateDecodeWithMetadata(const StreamingState* aSctx, +Metadata* STT_IntermediateDecodeWithMetadata(const StreamingState* aSctx, unsigned int aNumResults); /** * @brief Compute the final decoding of an ongoing streaming inference and return * the result. Signals the end of an ongoing streaming inference. * - * @param aSctx A streaming state pointer returned by {@link DS_CreateStream()}. + * @param aSctx A streaming state pointer returned by {@link STT_CreateStream()}. * * @return The STT result. The user is responsible for freeing the string using - * {@link DS_FreeString()}. + * {@link STT_FreeString()}. * * @note This method will free the state pointer (@p aSctx). */ STT_EXPORT -char* DS_FinishStream(StreamingState* aSctx); +char* STT_FinishStream(StreamingState* aSctx); /** * @brief Compute the final decoding of an ongoing streaming inference and return * results including metadata. Signals the end of an ongoing streaming * inference. * - * @param aSctx A streaming state pointer returned by {@link DS_CreateStream()}. + * @param aSctx A streaming state pointer returned by {@link STT_CreateStream()}. * @param aNumResults The number of candidate transcripts to return. * * @return Metadata struct containing multiple candidate transcripts. Each transcript * has per-token metadata including timing information. The user is - * responsible for freeing Metadata by calling {@link DS_FreeMetadata()}. + * responsible for freeing Metadata by calling {@link STT_FreeMetadata()}. * Returns NULL on error. * * @note This method will free the state pointer (@p aSctx). */ STT_EXPORT -Metadata* DS_FinishStreamWithMetadata(StreamingState* aSctx, +Metadata* STT_FinishStreamWithMetadata(StreamingState* aSctx, unsigned int aNumResults); /** @@ -352,42 +352,42 @@ Metadata* DS_FinishStreamWithMetadata(StreamingState* aSctx, * can be used if you no longer need the result of an ongoing streaming * inference and don't want to perform a costly decode operation. * - * @param aSctx A streaming state pointer returned by {@link DS_CreateStream()}. + * @param aSctx A streaming state pointer returned by {@link STT_CreateStream()}. * * @note This method will free the state pointer (@p aSctx). */ STT_EXPORT -void DS_FreeStream(StreamingState* aSctx); +void STT_FreeStream(StreamingState* aSctx); /** * @brief Free memory allocated for metadata information. */ STT_EXPORT -void DS_FreeMetadata(Metadata* m); +void STT_FreeMetadata(Metadata* m); /** * @brief Free a char* string returned by the Coqui STT API. */ STT_EXPORT -void DS_FreeString(char* str); +void STT_FreeString(char* str); /** * @brief Returns the version of this library. The returned version is a semantic - * version (SemVer 2.0.0). The string returned must be freed with {@link DS_FreeString()}. + * version (SemVer 2.0.0). The string returned must be freed with {@link STT_FreeString()}. * * @return The version string. */ STT_EXPORT -char* DS_Version(); +char* STT_Version(); /** * @brief Returns a textual description corresponding to an error code. - * The string returned must be freed with @{link DS_FreeString()}. + * The string returned must be freed with @{link STT_FreeString()}. * * @return The error description. */ STT_EXPORT -char* DS_ErrorCodeToErrorMessage(int aErrorCode); +char* STT_ErrorCodeToErrorMessage(int aErrorCode); #undef STT_EXPORT diff --git a/native_client/ctcdecode/__init__.py b/native_client/ctcdecode/__init__.py index 80edc51d..fc8f3255 100644 --- a/native_client/ctcdecode/__init__.py +++ b/native_client/ctcdecode/__init__.py @@ -9,7 +9,7 @@ __version__ = swigwrapper.__version__.decode('utf-8') # Hack: import error codes by matching on their names, as SWIG unfortunately # does not support binding enums to Python in a scoped manner yet. for symbol in dir(swigwrapper): - if symbol.startswith('DS_ERR_'): + if symbol.startswith('STT_ERR_'): globals()[symbol] = getattr(swigwrapper, symbol) class Scorer(swigwrapper.Scorer): diff --git a/native_client/ctcdecode/scorer.cpp b/native_client/ctcdecode/scorer.cpp index b77c63f7..e5c6c359 100644 --- a/native_client/ctcdecode/scorer.cpp +++ b/native_client/ctcdecode/scorer.cpp @@ -74,13 +74,13 @@ int Scorer::load_lm(const std::string& lm_path) // Check if file is readable to avoid KenLM throwing an exception const char* filename = lm_path.c_str(); if (access(filename, R_OK) != 0) { - return DS_ERR_SCORER_UNREADABLE; + return STT_ERR_SCORER_UNREADABLE; } // Check if the file format is valid to avoid KenLM throwing an exception lm::ngram::ModelType model_type; if (!lm::ngram::RecognizeBinary(filename, model_type)) { - return DS_ERR_SCORER_INVALID_LM; + return STT_ERR_SCORER_INVALID_LM; } // Load the LM @@ -97,7 +97,7 @@ int Scorer::load_lm(const std::string& lm_path) uint64_t trie_offset = language_model_->GetEndOfSearchOffset(); if (package_size <= trie_offset) { // File ends without a trie structure - return DS_ERR_SCORER_NO_TRIE; + return STT_ERR_SCORER_NO_TRIE; } // Read metadata and trie from file @@ -113,7 +113,7 @@ int Scorer::load_trie(std::ifstream& fin, const std::string& file_path) if (magic != MAGIC) { std::cerr << "Error: Can't parse scorer file, invalid header. Try updating " "your scorer file." << std::endl; - return DS_ERR_SCORER_INVALID_TRIE; + return STT_ERR_SCORER_INVALID_TRIE; } int version; @@ -128,7 +128,7 @@ int Scorer::load_trie(std::ifstream& fin, const std::string& file_path) std::cerr << "Downgrade your scorer file or update your version of Coqui STT."; } std::cerr << std::endl; - return DS_ERR_SCORER_VERSION_MISMATCH; + return STT_ERR_SCORER_VERSION_MISMATCH; } fin.read(reinterpret_cast(&is_utf8_mode_), sizeof(is_utf8_mode_)); @@ -143,7 +143,7 @@ int Scorer::load_trie(std::ifstream& fin, const std::string& file_path) opt.mode = fst::FstReadOptions::MAP; opt.source = file_path; dictionary.reset(FstType::Read(fin, opt)); - return DS_ERR_OK; + return STT_ERR_OK; } bool Scorer::save_dictionary(const std::string& path, bool append_instead_of_overwrite) diff --git a/native_client/dotnet/DeepSpeechClient/Enums/ErrorCodes.cs b/native_client/dotnet/DeepSpeechClient/Enums/ErrorCodes.cs deleted file mode 100644 index cbcb8f43..00000000 --- a/native_client/dotnet/DeepSpeechClient/Enums/ErrorCodes.cs +++ /dev/null @@ -1,33 +0,0 @@ -namespace DeepSpeechClient.Enums -{ - /// - /// Error codes from the native DeepSpeech binary. - /// - internal enum ErrorCodes - { - // OK - DS_ERR_OK = 0x0000, - - // Missing invormations - DS_ERR_NO_MODEL = 0x1000, - - // Invalid parameters - DS_ERR_INVALID_ALPHABET = 0x2000, - DS_ERR_INVALID_SHAPE = 0x2001, - DS_ERR_INVALID_SCORER = 0x2002, - DS_ERR_MODEL_INCOMPATIBLE = 0x2003, - DS_ERR_SCORER_NOT_ENABLED = 0x2004, - - // Runtime failures - DS_ERR_FAIL_INIT_MMAP = 0x3000, - DS_ERR_FAIL_INIT_SESS = 0x3001, - DS_ERR_FAIL_INTERPRETER = 0x3002, - DS_ERR_FAIL_RUN_SESS = 0x3003, - DS_ERR_FAIL_CREATE_STREAM = 0x3004, - DS_ERR_FAIL_READ_PROTOBUF = 0x3005, - DS_ERR_FAIL_CREATE_SESS = 0x3006, - DS_ERR_FAIL_INSERT_HOTWORD = 0x3008, - DS_ERR_FAIL_CLEAR_HOTWORD = 0x3009, - DS_ERR_FAIL_ERASE_HOTWORD = 0x3010 - } -} diff --git a/native_client/dotnet/DeepSpeech.sln b/native_client/dotnet/STT.sln similarity index 79% rename from native_client/dotnet/DeepSpeech.sln rename to native_client/dotnet/STT.sln index 78afe7db..58fd6c8e 100644 --- a/native_client/dotnet/DeepSpeech.sln +++ b/native_client/dotnet/STT.sln @@ -2,9 +2,9 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio Version 16 VisualStudioVersion = 16.0.30204.135 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DeepSpeechClient", "DeepSpeechClient\DeepSpeechClient.csproj", "{56DE4091-BBBE-47E4-852D-7268B33B971F}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "STTClient", "STTClient\STTClient.csproj", "{56DE4091-BBBE-47E4-852D-7268B33B971F}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DeepSpeechConsole", "DeepSpeechConsole\DeepSpeechConsole.csproj", "{312965E5-C4F6-4D95-BA64-79906B8BC7AC}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "STTConsole", "STTConsole\STTConsole.csproj", "{312965E5-C4F6-4D95-BA64-79906B8BC7AC}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution diff --git a/native_client/dotnet/STTClient/Enums/ErrorCodes.cs b/native_client/dotnet/STTClient/Enums/ErrorCodes.cs new file mode 100644 index 00000000..b3e76456 --- /dev/null +++ b/native_client/dotnet/STTClient/Enums/ErrorCodes.cs @@ -0,0 +1,33 @@ +namespace STTClient.Enums +{ + /// + /// Error codes from the native Coqui STT binary. + /// + internal enum ErrorCodes + { + // OK + STT_ERR_OK = 0x0000, + + // Missing invormations + STT_ERR_NO_MODEL = 0x1000, + + // Invalid parameters + STT_ERR_INVALID_ALPHABET = 0x2000, + STT_ERR_INVALID_SHAPE = 0x2001, + STT_ERR_INVALID_SCORER = 0x2002, + STT_ERR_MODEL_INCOMPATIBLE = 0x2003, + STT_ERR_SCORER_NOT_ENABLED = 0x2004, + + // Runtime failures + STT_ERR_FAIL_INIT_MMAP = 0x3000, + STT_ERR_FAIL_INIT_SESS = 0x3001, + STT_ERR_FAIL_INTERPRETER = 0x3002, + STT_ERR_FAIL_RUN_SESS = 0x3003, + STT_ERR_FAIL_CREATE_STREAM = 0x3004, + STT_ERR_FAIL_READ_PROTOBUF = 0x3005, + STT_ERR_FAIL_CREATE_SESS = 0x3006, + STT_ERR_FAIL_INSERT_HOTWORD = 0x3008, + STT_ERR_FAIL_CLEAR_HOTWORD = 0x3009, + STT_ERR_FAIL_ERASE_HOTWORD = 0x3010 + } +} diff --git a/native_client/dotnet/DeepSpeechClient/Extensions/NativeExtensions.cs b/native_client/dotnet/STTClient/Extensions/NativeExtensions.cs similarity index 95% rename from native_client/dotnet/DeepSpeechClient/Extensions/NativeExtensions.cs rename to native_client/dotnet/STTClient/Extensions/NativeExtensions.cs index 9325f4b8..297a311d 100644 --- a/native_client/dotnet/DeepSpeechClient/Extensions/NativeExtensions.cs +++ b/native_client/dotnet/STTClient/Extensions/NativeExtensions.cs @@ -1,9 +1,9 @@ -using DeepSpeechClient.Structs; +using STTClient.Structs; using System; using System.Runtime.InteropServices; using System.Text; -namespace DeepSpeechClient.Extensions +namespace STTClient.Extensions { internal static class NativeExtensions { @@ -20,7 +20,7 @@ namespace DeepSpeechClient.Extensions byte[] buffer = new byte[len]; Marshal.Copy(intPtr, buffer, 0, buffer.Length); if (releasePtr) - NativeImp.DS_FreeString(intPtr); + NativeImp.STT_FreeString(intPtr); string result = Encoding.UTF8.GetString(buffer); return result; } @@ -86,7 +86,7 @@ namespace DeepSpeechClient.Extensions metadata.transcripts += sizeOfCandidateTranscript; } - NativeImp.DS_FreeMetadata(intPtr); + NativeImp.STT_FreeMetadata(intPtr); return managedMetadata; } } diff --git a/native_client/dotnet/DeepSpeechClient/Interfaces/IDeepSpeech.cs b/native_client/dotnet/STTClient/Interfaces/ISTT.cs similarity index 88% rename from native_client/dotnet/DeepSpeechClient/Interfaces/IDeepSpeech.cs rename to native_client/dotnet/STTClient/Interfaces/ISTT.cs index fca21a57..7486796d 100644 --- a/native_client/dotnet/DeepSpeechClient/Interfaces/IDeepSpeech.cs +++ b/native_client/dotnet/STTClient/Interfaces/ISTT.cs @@ -1,13 +1,13 @@ -using DeepSpeechClient.Models; +using STTClient.Models; using System; using System.IO; -namespace DeepSpeechClient.Interfaces +namespace STTClient.Interfaces { /// - /// Client interface for DeepSpeech + /// Client interface for Coqui STT /// - public interface IDeepSpeech : IDisposable + public interface ISTT : IDisposable { /// /// Return version of this library. The returned version is a semantic version @@ -80,7 +80,7 @@ namespace DeepSpeechClient.Interfaces unsafe void SetScorerAlphaBeta(float aAlpha, float aBeta); /// - /// Use the DeepSpeech model to perform Speech-To-Text. + /// Use the STT model to perform Speech-To-Text. /// /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). /// The number of samples in the audio signal. @@ -89,7 +89,7 @@ namespace DeepSpeechClient.Interfaces uint aBufferSize); /// - /// Use the DeepSpeech model to perform Speech-To-Text, return results including metadata. + /// Use the STT model to perform Speech-To-Text, return results including metadata. /// /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). /// The number of samples in the audio signal. @@ -104,26 +104,26 @@ namespace DeepSpeechClient.Interfaces /// This can be used if you no longer need the result of an ongoing streaming /// inference and don't want to perform a costly decode operation. /// - unsafe void FreeStream(DeepSpeechStream stream); + unsafe void FreeStream(Stream stream); /// /// Creates a new streaming inference state. /// - unsafe DeepSpeechStream CreateStream(); + unsafe Stream CreateStream(); /// /// Feeds audio samples to an ongoing streaming inference. /// /// Instance of the stream to feed the data. /// An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on). - unsafe void FeedAudioContent(DeepSpeechStream stream, short[] aBuffer, uint aBufferSize); + unsafe void FeedAudioContent(Stream stream, short[] aBuffer, uint aBufferSize); /// /// Computes the intermediate decoding of an ongoing streaming inference. /// /// Instance of the stream to decode. /// The STT intermediate result. - unsafe string IntermediateDecode(DeepSpeechStream stream); + unsafe string IntermediateDecode(Stream stream); /// /// Computes the intermediate decoding of an ongoing streaming inference, including metadata. @@ -131,14 +131,14 @@ namespace DeepSpeechClient.Interfaces /// Instance of the stream to decode. /// Maximum number of candidate transcripts to return. Returned list might be smaller than this. /// The extended metadata result. - unsafe Metadata IntermediateDecodeWithMetadata(DeepSpeechStream stream, uint aNumResults); + unsafe Metadata IntermediateDecodeWithMetadata(Stream stream, uint aNumResults); /// /// Closes the ongoing streaming inference, returns the STT result over the whole audio signal. /// /// Instance of the stream to finish. /// The STT result. - unsafe string FinishStream(DeepSpeechStream stream); + unsafe string FinishStream(Stream stream); /// /// Closes the ongoing streaming inference, returns the STT result over the whole audio signal, including metadata. @@ -146,6 +146,6 @@ namespace DeepSpeechClient.Interfaces /// Instance of the stream to finish. /// Maximum number of candidate transcripts to return. Returned list might be smaller than this. /// The extended metadata result. - unsafe Metadata FinishStreamWithMetadata(DeepSpeechStream stream, uint aNumResults); + unsafe Metadata FinishStreamWithMetadata(Stream stream, uint aNumResults); } } diff --git a/native_client/dotnet/DeepSpeechClient/Models/CandidateTranscript.cs b/native_client/dotnet/STTClient/Models/CandidateTranscript.cs similarity index 93% rename from native_client/dotnet/DeepSpeechClient/Models/CandidateTranscript.cs rename to native_client/dotnet/STTClient/Models/CandidateTranscript.cs index cc6b5d28..f158e2c2 100644 --- a/native_client/dotnet/DeepSpeechClient/Models/CandidateTranscript.cs +++ b/native_client/dotnet/STTClient/Models/CandidateTranscript.cs @@ -1,4 +1,4 @@ -namespace DeepSpeechClient.Models +namespace STTClient.Models { /// /// Stores the entire CTC output as an array of character metadata objects. diff --git a/native_client/dotnet/DeepSpeechClient/Models/Metadata.cs b/native_client/dotnet/STTClient/Models/Metadata.cs similarity index 89% rename from native_client/dotnet/DeepSpeechClient/Models/Metadata.cs rename to native_client/dotnet/STTClient/Models/Metadata.cs index fb6c613d..537a22e8 100644 --- a/native_client/dotnet/DeepSpeechClient/Models/Metadata.cs +++ b/native_client/dotnet/STTClient/Models/Metadata.cs @@ -1,4 +1,4 @@ -namespace DeepSpeechClient.Models +namespace STTClient.Models { /// /// Stores the entire CTC output as an array of character metadata objects. diff --git a/native_client/dotnet/DeepSpeechClient/Models/DeepSpeechStream.cs b/native_client/dotnet/STTClient/Models/Stream.cs similarity index 81% rename from native_client/dotnet/DeepSpeechClient/Models/DeepSpeechStream.cs rename to native_client/dotnet/STTClient/Models/Stream.cs index e4605f5e..49f92dfa 100644 --- a/native_client/dotnet/DeepSpeechClient/Models/DeepSpeechStream.cs +++ b/native_client/dotnet/STTClient/Models/Stream.cs @@ -1,19 +1,19 @@ using System; -namespace DeepSpeechClient.Models +namespace STTClient.Models { /// /// Wrapper of the pointer used for the decoding stream. /// - public class DeepSpeechStream : IDisposable + public class Stream : IDisposable { private unsafe IntPtr** _streamingStatePp; /// - /// Initializes a new instance of . + /// Initializes a new instance of . /// /// Native pointer of the native stream. - public unsafe DeepSpeechStream(IntPtr** streamingStatePP) + public unsafe Stream(IntPtr** streamingStatePP) { _streamingStatePp = streamingStatePP; } diff --git a/native_client/dotnet/DeepSpeechClient/Models/TokenMetadata.cs b/native_client/dotnet/STTClient/Models/TokenMetadata.cs similarity index 93% rename from native_client/dotnet/DeepSpeechClient/Models/TokenMetadata.cs rename to native_client/dotnet/STTClient/Models/TokenMetadata.cs index 5f2dea56..c5ef94d8 100644 --- a/native_client/dotnet/DeepSpeechClient/Models/TokenMetadata.cs +++ b/native_client/dotnet/STTClient/Models/TokenMetadata.cs @@ -1,4 +1,4 @@ -namespace DeepSpeechClient.Models +namespace STTClient.Models { /// /// Stores each individual character, along with its timing information. diff --git a/native_client/dotnet/DeepSpeechClient/NativeImp.cs b/native_client/dotnet/STTClient/NativeImp.cs similarity index 59% rename from native_client/dotnet/DeepSpeechClient/NativeImp.cs rename to native_client/dotnet/STTClient/NativeImp.cs index 49532360..a3491171 100644 --- a/native_client/dotnet/DeepSpeechClient/NativeImp.cs +++ b/native_client/dotnet/STTClient/NativeImp.cs @@ -1,9 +1,9 @@ -using DeepSpeechClient.Enums; +using STTClient.Enums; using System; using System.Runtime.InteropServices; -namespace DeepSpeechClient +namespace STTClient { /// /// Wrapper for the native implementation of "libstt.so" @@ -13,101 +13,101 @@ namespace DeepSpeechClient #region Native Implementation [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] - internal static extern IntPtr DS_Version(); + internal static extern IntPtr STT_Version(); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal unsafe static extern ErrorCodes DS_CreateModel(string aModelPath, + internal unsafe static extern ErrorCodes STT_CreateModel(string aModelPath, ref IntPtr** pint); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal unsafe static extern IntPtr DS_ErrorCodeToErrorMessage(int aErrorCode); + internal unsafe static extern IntPtr STT_ErrorCodeToErrorMessage(int aErrorCode); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal unsafe static extern uint DS_GetModelBeamWidth(IntPtr** aCtx); + internal unsafe static extern uint STT_GetModelBeamWidth(IntPtr** aCtx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal unsafe static extern ErrorCodes DS_SetModelBeamWidth(IntPtr** aCtx, + internal unsafe static extern ErrorCodes STT_SetModelBeamWidth(IntPtr** aCtx, uint aBeamWidth); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal unsafe static extern ErrorCodes DS_CreateModel(string aModelPath, + internal unsafe static extern ErrorCodes STT_CreateModel(string aModelPath, uint aBeamWidth, ref IntPtr** pint); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal unsafe static extern int DS_GetModelSampleRate(IntPtr** aCtx); + internal unsafe static extern int STT_GetModelSampleRate(IntPtr** aCtx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern ErrorCodes DS_EnableExternalScorer(IntPtr** aCtx, + internal static unsafe extern ErrorCodes STT_EnableExternalScorer(IntPtr** aCtx, string aScorerPath); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern ErrorCodes DS_AddHotWord(IntPtr** aCtx, + internal static unsafe extern ErrorCodes STT_AddHotWord(IntPtr** aCtx, string aWord, float aBoost); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern ErrorCodes DS_EraseHotWord(IntPtr** aCtx, + internal static unsafe extern ErrorCodes STT_EraseHotWord(IntPtr** aCtx, string aWord); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern ErrorCodes DS_ClearHotWords(IntPtr** aCtx); + internal static unsafe extern ErrorCodes STT_ClearHotWords(IntPtr** aCtx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern ErrorCodes DS_DisableExternalScorer(IntPtr** aCtx); + internal static unsafe extern ErrorCodes STT_DisableExternalScorer(IntPtr** aCtx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern ErrorCodes DS_SetScorerAlphaBeta(IntPtr** aCtx, + internal static unsafe extern ErrorCodes STT_SetScorerAlphaBeta(IntPtr** aCtx, float aAlpha, float aBeta); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] - internal static unsafe extern IntPtr DS_SpeechToText(IntPtr** aCtx, + internal static unsafe extern IntPtr STT_SpeechToText(IntPtr** aCtx, short[] aBuffer, uint aBufferSize); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, SetLastError = true)] - internal static unsafe extern IntPtr DS_SpeechToTextWithMetadata(IntPtr** aCtx, + internal static unsafe extern IntPtr STT_SpeechToTextWithMetadata(IntPtr** aCtx, short[] aBuffer, uint aBufferSize, uint aNumResults); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern void DS_FreeModel(IntPtr** aCtx); + internal static unsafe extern void STT_FreeModel(IntPtr** aCtx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern ErrorCodes DS_CreateStream(IntPtr** aCtx, + internal static unsafe extern ErrorCodes STT_CreateStream(IntPtr** aCtx, ref IntPtr** retval); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern void DS_FreeStream(IntPtr** aSctx); + internal static unsafe extern void STT_FreeStream(IntPtr** aSctx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern void DS_FreeMetadata(IntPtr metadata); + internal static unsafe extern void STT_FreeMetadata(IntPtr metadata); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern void DS_FreeString(IntPtr str); + internal static unsafe extern void STT_FreeString(IntPtr str); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] - internal static unsafe extern void DS_FeedAudioContent(IntPtr** aSctx, + internal static unsafe extern void STT_FeedAudioContent(IntPtr** aSctx, short[] aBuffer, uint aBufferSize); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern IntPtr DS_IntermediateDecode(IntPtr** aSctx); + internal static unsafe extern IntPtr STT_IntermediateDecode(IntPtr** aSctx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern IntPtr DS_IntermediateDecodeWithMetadata(IntPtr** aSctx, + internal static unsafe extern IntPtr STT_IntermediateDecodeWithMetadata(IntPtr** aSctx, uint aNumResults); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, SetLastError = true)] - internal static unsafe extern IntPtr DS_FinishStream(IntPtr** aSctx); + internal static unsafe extern IntPtr STT_FinishStream(IntPtr** aSctx); [DllImport("libstt.so", CallingConvention = CallingConvention.Cdecl)] - internal static unsafe extern IntPtr DS_FinishStreamWithMetadata(IntPtr** aSctx, + internal static unsafe extern IntPtr STT_FinishStreamWithMetadata(IntPtr** aSctx, uint aNumResults); #endregion } diff --git a/native_client/dotnet/DeepSpeechClient/DeepSpeech.cs b/native_client/dotnet/STTClient/STT.cs similarity index 75% rename from native_client/dotnet/DeepSpeechClient/DeepSpeech.cs rename to native_client/dotnet/STTClient/STT.cs index 79b276c2..60eeda9f 100644 --- a/native_client/dotnet/DeepSpeechClient/DeepSpeech.cs +++ b/native_client/dotnet/STTClient/STT.cs @@ -1,34 +1,34 @@ -using DeepSpeechClient.Interfaces; -using DeepSpeechClient.Extensions; +using STTClient.Interfaces; +using STTClient.Extensions; using System; using System.IO; -using DeepSpeechClient.Enums; -using DeepSpeechClient.Models; +using STTClient.Enums; +using STTClient.Models; -namespace DeepSpeechClient +namespace STTClient { /// - /// Concrete implementation of . + /// Concrete implementation of . /// - public class DeepSpeech : IDeepSpeech + public class STT : ISTT { private unsafe IntPtr** _modelStatePP; - + /// - /// Initializes a new instance of class and creates a new acoustic model. + /// Initializes a new instance of class and creates a new acoustic model. /// /// The path to the frozen model graph. /// Thrown when the native binary failed to create the model. - public DeepSpeech(string aModelPath) + public STT(string aModelPath) { CreateModel(aModelPath); } - #region IDeepSpeech + #region ISTT /// - /// Create an object providing an interface to a trained DeepSpeech model. + /// Create an object providing an interface to a trained STT model. /// /// The path to the frozen model graph. /// Thrown when the native binary failed to create the model. @@ -48,7 +48,7 @@ namespace DeepSpeechClient { throw new FileNotFoundException(exceptionMessage); } - var resultCode = NativeImp.DS_CreateModel(aModelPath, + var resultCode = NativeImp.STT_CreateModel(aModelPath, ref _modelStatePP); EvaluateResultCode(resultCode); } @@ -60,7 +60,7 @@ namespace DeepSpeechClient /// Beam width value used by the model. public unsafe uint GetModelBeamWidth() { - return NativeImp.DS_GetModelBeamWidth(_modelStatePP); + return NativeImp.STT_GetModelBeamWidth(_modelStatePP); } /// @@ -70,13 +70,13 @@ namespace DeepSpeechClient /// Thrown on failure. public unsafe void SetModelBeamWidth(uint aBeamWidth) { - var resultCode = NativeImp.DS_SetModelBeamWidth(_modelStatePP, aBeamWidth); + var resultCode = NativeImp.STT_SetModelBeamWidth(_modelStatePP, aBeamWidth); EvaluateResultCode(resultCode); } /// /// Add a hot-word. - /// + /// /// Words that don't occur in the scorer (e.g. proper nouns) or strings that contain spaces won't be taken into account. /// /// Some word @@ -84,7 +84,7 @@ namespace DeepSpeechClient /// Thrown on failure. public unsafe void AddHotWord(string aWord, float aBoost) { - var resultCode = NativeImp.DS_AddHotWord(_modelStatePP, aWord, aBoost); + var resultCode = NativeImp.STT_AddHotWord(_modelStatePP, aWord, aBoost); EvaluateResultCode(resultCode); } @@ -95,7 +95,7 @@ namespace DeepSpeechClient /// Thrown on failure. public unsafe void EraseHotWord(string aWord) { - var resultCode = NativeImp.DS_EraseHotWord(_modelStatePP, aWord); + var resultCode = NativeImp.STT_EraseHotWord(_modelStatePP, aWord); EvaluateResultCode(resultCode); } @@ -105,7 +105,7 @@ namespace DeepSpeechClient /// Thrown on failure. public unsafe void ClearHotWords() { - var resultCode = NativeImp.DS_ClearHotWords(_modelStatePP); + var resultCode = NativeImp.STT_ClearHotWords(_modelStatePP); EvaluateResultCode(resultCode); } @@ -115,7 +115,7 @@ namespace DeepSpeechClient /// Sample rate. public unsafe int GetModelSampleRate() { - return NativeImp.DS_GetModelSampleRate(_modelStatePP); + return NativeImp.STT_GetModelSampleRate(_modelStatePP); } /// @@ -124,9 +124,9 @@ namespace DeepSpeechClient /// Native result code. private void EvaluateResultCode(ErrorCodes resultCode) { - if (resultCode != ErrorCodes.DS_ERR_OK) + if (resultCode != ErrorCodes.STT_ERR_OK) { - throw new ArgumentException(NativeImp.DS_ErrorCodeToErrorMessage((int)resultCode).PtrToString()); + throw new ArgumentException(NativeImp.STT_ErrorCodeToErrorMessage((int)resultCode).PtrToString()); } } @@ -135,7 +135,7 @@ namespace DeepSpeechClient /// public unsafe void Dispose() { - NativeImp.DS_FreeModel(_modelStatePP); + NativeImp.STT_FreeModel(_modelStatePP); } /// @@ -155,7 +155,7 @@ namespace DeepSpeechClient throw new FileNotFoundException($"Cannot find the scorer file: {aScorerPath}"); } - var resultCode = NativeImp.DS_EnableExternalScorer(_modelStatePP, aScorerPath); + var resultCode = NativeImp.STT_EnableExternalScorer(_modelStatePP, aScorerPath); EvaluateResultCode(resultCode); } @@ -165,7 +165,7 @@ namespace DeepSpeechClient /// Thrown when an external scorer is not enabled. public unsafe void DisableExternalScorer() { - var resultCode = NativeImp.DS_DisableExternalScorer(_modelStatePP); + var resultCode = NativeImp.STT_DisableExternalScorer(_modelStatePP); EvaluateResultCode(resultCode); } @@ -177,7 +177,7 @@ namespace DeepSpeechClient /// Thrown when an external scorer is not enabled. public unsafe void SetScorerAlphaBeta(float aAlpha, float aBeta) { - var resultCode = NativeImp.DS_SetScorerAlphaBeta(_modelStatePP, + var resultCode = NativeImp.STT_SetScorerAlphaBeta(_modelStatePP, aAlpha, aBeta); EvaluateResultCode(resultCode); @@ -188,9 +188,9 @@ namespace DeepSpeechClient /// /// Instance of the stream to feed the data. /// An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on). - public unsafe void FeedAudioContent(DeepSpeechStream stream, short[] aBuffer, uint aBufferSize) + public unsafe void FeedAudioContent(Stream stream, short[] aBuffer, uint aBufferSize) { - NativeImp.DS_FeedAudioContent(stream.GetNativePointer(), aBuffer, aBufferSize); + NativeImp.STT_FeedAudioContent(stream.GetNativePointer(), aBuffer, aBufferSize); } /// @@ -198,9 +198,9 @@ namespace DeepSpeechClient /// /// Instance of the stream to finish. /// The STT result. - public unsafe string FinishStream(DeepSpeechStream stream) + public unsafe string FinishStream(Stream stream) { - return NativeImp.DS_FinishStream(stream.GetNativePointer()).PtrToString(); + return NativeImp.STT_FinishStream(stream.GetNativePointer()).PtrToString(); } /// @@ -209,9 +209,9 @@ namespace DeepSpeechClient /// Instance of the stream to finish. /// Maximum number of candidate transcripts to return. Returned list might be smaller than this. /// The extended metadata result. - public unsafe Metadata FinishStreamWithMetadata(DeepSpeechStream stream, uint aNumResults) + public unsafe Metadata FinishStreamWithMetadata(Stream stream, uint aNumResults) { - return NativeImp.DS_FinishStreamWithMetadata(stream.GetNativePointer(), aNumResults).PtrToMetadata(); + return NativeImp.STT_FinishStreamWithMetadata(stream.GetNativePointer(), aNumResults).PtrToMetadata(); } /// @@ -219,9 +219,9 @@ namespace DeepSpeechClient /// /// Instance of the stream to decode. /// The STT intermediate result. - public unsafe string IntermediateDecode(DeepSpeechStream stream) + public unsafe string IntermediateDecode(Stream stream) { - return NativeImp.DS_IntermediateDecode(stream.GetNativePointer()).PtrToString(); + return NativeImp.STT_IntermediateDecode(stream.GetNativePointer()).PtrToString(); } /// @@ -230,9 +230,9 @@ namespace DeepSpeechClient /// Instance of the stream to decode. /// Maximum number of candidate transcripts to return. Returned list might be smaller than this. /// The STT intermediate result. - public unsafe Metadata IntermediateDecodeWithMetadata(DeepSpeechStream stream, uint aNumResults) + public unsafe Metadata IntermediateDecodeWithMetadata(Stream stream, uint aNumResults) { - return NativeImp.DS_IntermediateDecodeWithMetadata(stream.GetNativePointer(), aNumResults).PtrToMetadata(); + return NativeImp.STT_IntermediateDecodeWithMetadata(stream.GetNativePointer(), aNumResults).PtrToMetadata(); } /// @@ -241,18 +241,18 @@ namespace DeepSpeechClient /// public unsafe string Version() { - return NativeImp.DS_Version().PtrToString(); + return NativeImp.STT_Version().PtrToString(); } /// /// Creates a new streaming inference state. /// - public unsafe DeepSpeechStream CreateStream() + public unsafe Stream CreateStream() { IntPtr** streamingStatePointer = null; - var resultCode = NativeImp.DS_CreateStream(_modelStatePP, ref streamingStatePointer); + var resultCode = NativeImp.STT_CreateStream(_modelStatePP, ref streamingStatePointer); EvaluateResultCode(resultCode); - return new DeepSpeechStream(streamingStatePointer); + return new Stream(streamingStatePointer); } /// @@ -260,25 +260,25 @@ namespace DeepSpeechClient /// This can be used if you no longer need the result of an ongoing streaming /// inference and don't want to perform a costly decode operation. /// - public unsafe void FreeStream(DeepSpeechStream stream) + public unsafe void FreeStream(Stream stream) { - NativeImp.DS_FreeStream(stream.GetNativePointer()); + NativeImp.STT_FreeStream(stream.GetNativePointer()); stream.Dispose(); } /// - /// Use the DeepSpeech model to perform Speech-To-Text. + /// Use the STT model to perform Speech-To-Text. /// /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). /// The number of samples in the audio signal. /// The STT result. Returns NULL on error. public unsafe string SpeechToText(short[] aBuffer, uint aBufferSize) { - return NativeImp.DS_SpeechToText(_modelStatePP, aBuffer, aBufferSize).PtrToString(); + return NativeImp.STT_SpeechToText(_modelStatePP, aBuffer, aBufferSize).PtrToString(); } /// - /// Use the DeepSpeech model to perform Speech-To-Text, return results including metadata. + /// Use the STT model to perform Speech-To-Text, return results including metadata. /// /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on). /// The number of samples in the audio signal. @@ -286,7 +286,7 @@ namespace DeepSpeechClient /// The extended metadata. Returns NULL on error. public unsafe Metadata SpeechToTextWithMetadata(short[] aBuffer, uint aBufferSize, uint aNumResults) { - return NativeImp.DS_SpeechToTextWithMetadata(_modelStatePP, aBuffer, aBufferSize, aNumResults).PtrToMetadata(); + return NativeImp.STT_SpeechToTextWithMetadata(_modelStatePP, aBuffer, aBufferSize, aNumResults).PtrToMetadata(); } #endregion diff --git a/native_client/dotnet/DeepSpeechClient/DeepSpeechClient.csproj b/native_client/dotnet/STTClient/STTClient.csproj similarity index 100% rename from native_client/dotnet/DeepSpeechClient/DeepSpeechClient.csproj rename to native_client/dotnet/STTClient/STTClient.csproj diff --git a/native_client/dotnet/DeepSpeechClient/Structs/CandidateTranscript.cs b/native_client/dotnet/STTClient/Structs/CandidateTranscript.cs similarity index 94% rename from native_client/dotnet/DeepSpeechClient/Structs/CandidateTranscript.cs rename to native_client/dotnet/STTClient/Structs/CandidateTranscript.cs index 54581f6f..4743810b 100644 --- a/native_client/dotnet/DeepSpeechClient/Structs/CandidateTranscript.cs +++ b/native_client/dotnet/STTClient/Structs/CandidateTranscript.cs @@ -1,7 +1,7 @@ using System; using System.Runtime.InteropServices; -namespace DeepSpeechClient.Structs +namespace STTClient.Structs { [StructLayout(LayoutKind.Sequential)] internal unsafe struct CandidateTranscript diff --git a/native_client/dotnet/DeepSpeechClient/Structs/Metadata.cs b/native_client/dotnet/STTClient/Structs/Metadata.cs similarity index 92% rename from native_client/dotnet/DeepSpeechClient/Structs/Metadata.cs rename to native_client/dotnet/STTClient/Structs/Metadata.cs index 0a9beddc..f2db6bcd 100644 --- a/native_client/dotnet/DeepSpeechClient/Structs/Metadata.cs +++ b/native_client/dotnet/STTClient/Structs/Metadata.cs @@ -1,7 +1,7 @@ using System; using System.Runtime.InteropServices; -namespace DeepSpeechClient.Structs +namespace STTClient.Structs { [StructLayout(LayoutKind.Sequential)] internal unsafe struct Metadata diff --git a/native_client/dotnet/DeepSpeechClient/Structs/TokenMetadata.cs b/native_client/dotnet/STTClient/Structs/TokenMetadata.cs similarity index 93% rename from native_client/dotnet/DeepSpeechClient/Structs/TokenMetadata.cs rename to native_client/dotnet/STTClient/Structs/TokenMetadata.cs index 1c660c71..a21c1d26 100644 --- a/native_client/dotnet/DeepSpeechClient/Structs/TokenMetadata.cs +++ b/native_client/dotnet/STTClient/Structs/TokenMetadata.cs @@ -1,7 +1,7 @@ using System; using System.Runtime.InteropServices; -namespace DeepSpeechClient.Structs +namespace STTClient.Structs { [StructLayout(LayoutKind.Sequential)] internal unsafe struct TokenMetadata diff --git a/native_client/dotnet/DeepSpeechConsole/App.config b/native_client/dotnet/STTConsole/App.config similarity index 100% rename from native_client/dotnet/DeepSpeechConsole/App.config rename to native_client/dotnet/STTConsole/App.config diff --git a/native_client/dotnet/DeepSpeechConsole/Program.cs b/native_client/dotnet/STTConsole/Program.cs similarity index 96% rename from native_client/dotnet/DeepSpeechConsole/Program.cs rename to native_client/dotnet/STTConsole/Program.cs index 55bd8fd5..e09d0c1f 100644 --- a/native_client/dotnet/DeepSpeechConsole/Program.cs +++ b/native_client/dotnet/STTConsole/Program.cs @@ -1,6 +1,6 @@ -using DeepSpeechClient; -using DeepSpeechClient.Interfaces; -using DeepSpeechClient.Models; +using STTClient; +using STTClient.Interfaces; +using STTClient.Models; using NAudio.Wave; using System; using System.Collections.Generic; @@ -54,7 +54,7 @@ namespace CSharpExamples Console.WriteLine("Loading model..."); stopwatch.Start(); // sphinx-doc: csharp_ref_model_start - using (IDeepSpeech sttClient = new DeepSpeech(model ?? "output_graph.pbmm")) + using (ISTT sttClient = new STT(model ?? "output_graph.pbmm")) { // sphinx-doc: csharp_ref_model_stop stopwatch.Stop(); diff --git a/native_client/dotnet/DeepSpeechConsole/Properties/AssemblyInfo.cs b/native_client/dotnet/STTConsole/Properties/AssemblyInfo.cs similarity index 85% rename from native_client/dotnet/DeepSpeechConsole/Properties/AssemblyInfo.cs rename to native_client/dotnet/STTConsole/Properties/AssemblyInfo.cs index 845851a1..f7600c7c 100644 --- a/native_client/dotnet/DeepSpeechConsole/Properties/AssemblyInfo.cs +++ b/native_client/dotnet/STTConsole/Properties/AssemblyInfo.cs @@ -5,12 +5,12 @@ using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. -[assembly: AssemblyTitle("DeepSpeechConsole")] +[assembly: AssemblyTitle("STTConsole")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("CSharpExamples")] -[assembly: AssemblyCopyright("Copyright © 2018")] +[assembly: AssemblyCompany("Coqui GmbH")] +[assembly: AssemblyProduct("STTConsole")] +[assembly: AssemblyCopyright("Copyright © 2018-2020 Mozilla, © 2021 Coqui GmbH")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] diff --git a/native_client/dotnet/DeepSpeechConsole/DeepSpeechConsole.csproj b/native_client/dotnet/STTConsole/STTConsole.csproj similarity index 93% rename from native_client/dotnet/DeepSpeechConsole/DeepSpeechConsole.csproj rename to native_client/dotnet/STTConsole/STTConsole.csproj index a05fca61..54e11eb0 100644 --- a/native_client/dotnet/DeepSpeechConsole/DeepSpeechConsole.csproj +++ b/native_client/dotnet/STTConsole/STTConsole.csproj @@ -6,8 +6,8 @@ AnyCPU {312965E5-C4F6-4D95-BA64-79906B8BC7AC} Exe - DeepSpeechConsole - DeepSpeechConsole + STTConsole + STTConsole v4.6.2 512 true @@ -56,9 +56,9 @@ - + {56DE4091-BBBE-47E4-852D-7268B33B971F} - DeepSpeechClient + STTClient diff --git a/native_client/dotnet/DeepSpeechConsole/arctic_a0024.wav b/native_client/dotnet/STTConsole/arctic_a0024.wav similarity index 100% rename from native_client/dotnet/DeepSpeechConsole/arctic_a0024.wav rename to native_client/dotnet/STTConsole/arctic_a0024.wav diff --git a/native_client/dotnet/DeepSpeechConsole/packages.config b/native_client/dotnet/STTConsole/packages.config similarity index 100% rename from native_client/dotnet/DeepSpeechConsole/packages.config rename to native_client/dotnet/STTConsole/packages.config diff --git a/native_client/dotnet/DeepSpeechWPF/.gitignore b/native_client/dotnet/STTWPF/.gitignore similarity index 100% rename from native_client/dotnet/DeepSpeechWPF/.gitignore rename to native_client/dotnet/STTWPF/.gitignore diff --git a/native_client/dotnet/DeepSpeechWPF/App.config b/native_client/dotnet/STTWPF/App.config similarity index 100% rename from native_client/dotnet/DeepSpeechWPF/App.config rename to native_client/dotnet/STTWPF/App.config diff --git a/native_client/dotnet/DeepSpeechWPF/App.xaml b/native_client/dotnet/STTWPF/App.xaml similarity index 74% rename from native_client/dotnet/DeepSpeechWPF/App.xaml rename to native_client/dotnet/STTWPF/App.xaml index 16ebb0d4..97292db8 100644 --- a/native_client/dotnet/DeepSpeechWPF/App.xaml +++ b/native_client/dotnet/STTWPF/App.xaml @@ -1,8 +1,8 @@  diff --git a/native_client/dotnet/DeepSpeechWPF/App.xaml.cs b/native_client/dotnet/STTWPF/App.xaml.cs similarity index 61% rename from native_client/dotnet/DeepSpeechWPF/App.xaml.cs rename to native_client/dotnet/STTWPF/App.xaml.cs index d4b87d6e..80dd818a 100644 --- a/native_client/dotnet/DeepSpeechWPF/App.xaml.cs +++ b/native_client/dotnet/STTWPF/App.xaml.cs @@ -1,10 +1,10 @@ using CommonServiceLocator; -using DeepSpeech.WPF.ViewModels; -using DeepSpeechClient.Interfaces; +using STT.WPF.ViewModels; +using STTClient.Interfaces; using GalaSoft.MvvmLight.Ioc; using System.Windows; -namespace DeepSpeechWPF +namespace STTWPF { /// /// Interaction logic for App.xaml @@ -18,11 +18,11 @@ namespace DeepSpeechWPF try { - //Register instance of DeepSpeech - DeepSpeechClient.DeepSpeech deepSpeechClient = - new DeepSpeechClient.DeepSpeech("deepspeech-0.8.0-models.pbmm"); + //Register instance of STT + STTClient.STT client = + new STTClient.STT("coqui-stt-0.8.0-models.pbmm"); - SimpleIoc.Default.Register(() => deepSpeechClient); + SimpleIoc.Default.Register(() => client); SimpleIoc.Default.Register(); } catch (System.Exception ex) @@ -35,8 +35,8 @@ namespace DeepSpeechWPF protected override void OnExit(ExitEventArgs e) { base.OnExit(e); - //Dispose instance of DeepSpeech - ServiceLocator.Current.GetInstance()?.Dispose(); + //Dispose instance of STT + ServiceLocator.Current.GetInstance()?.Dispose(); } } } diff --git a/native_client/dotnet/DeepSpeechWPF/MainWindow.xaml b/native_client/dotnet/STTWPF/MainWindow.xaml similarity index 98% rename from native_client/dotnet/DeepSpeechWPF/MainWindow.xaml rename to native_client/dotnet/STTWPF/MainWindow.xaml index 4fbe5e72..569f6ad2 100644 --- a/native_client/dotnet/DeepSpeechWPF/MainWindow.xaml +++ b/native_client/dotnet/STTWPF/MainWindow.xaml @@ -1,10 +1,10 @@  /// Interaction logic for MainWindow.xaml diff --git a/native_client/dotnet/DeepSpeechWPF/Properties/AssemblyInfo.cs b/native_client/dotnet/STTWPF/Properties/AssemblyInfo.cs similarity index 91% rename from native_client/dotnet/DeepSpeechWPF/Properties/AssemblyInfo.cs rename to native_client/dotnet/STTWPF/Properties/AssemblyInfo.cs index f9ae7d76..f2e32102 100644 --- a/native_client/dotnet/DeepSpeechWPF/Properties/AssemblyInfo.cs +++ b/native_client/dotnet/STTWPF/Properties/AssemblyInfo.cs @@ -7,12 +7,12 @@ using System.Windows; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. -[assembly: AssemblyTitle("DeepSpeech.WPF")] +[assembly: AssemblyTitle("STT.WPF")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] -[assembly: AssemblyCompany("")] -[assembly: AssemblyProduct("DeepSpeech.WPF.SingleFiles")] -[assembly: AssemblyCopyright("Copyright © 2018")] +[assembly: AssemblyCompany("Coqui GmbH")] +[assembly: AssemblyProduct("STT.WPF.SingleFiles")] +[assembly: AssemblyCopyright("Copyright © 2018-2020 Mozilla, © 2021 Coqui GmbH")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] diff --git a/native_client/dotnet/DeepSpeechWPF/Properties/Resources.Designer.cs b/native_client/dotnet/STTWPF/Properties/Resources.Designer.cs similarity index 94% rename from native_client/dotnet/DeepSpeechWPF/Properties/Resources.Designer.cs rename to native_client/dotnet/STTWPF/Properties/Resources.Designer.cs index 2da2b4b2..2478decd 100644 --- a/native_client/dotnet/DeepSpeechWPF/Properties/Resources.Designer.cs +++ b/native_client/dotnet/STTWPF/Properties/Resources.Designer.cs @@ -8,7 +8,7 @@ // //------------------------------------------------------------------------------ -namespace DeepSpeech.WPF.Properties { +namespace STT.WPF.Properties { using System; @@ -39,7 +39,7 @@ namespace DeepSpeech.WPF.Properties { internal static global::System.Resources.ResourceManager ResourceManager { get { if (object.ReferenceEquals(resourceMan, null)) { - global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("DeepSpeech.WPF.Properties.Resources", typeof(Resources).Assembly); + global::System.Resources.ResourceManager temp = new global::System.Resources.ResourceManager("STT.WPF.Properties.Resources", typeof(Resources).Assembly); resourceMan = temp; } return resourceMan; diff --git a/native_client/dotnet/DeepSpeechWPF/Properties/Resources.resx b/native_client/dotnet/STTWPF/Properties/Resources.resx similarity index 100% rename from native_client/dotnet/DeepSpeechWPF/Properties/Resources.resx rename to native_client/dotnet/STTWPF/Properties/Resources.resx diff --git a/native_client/dotnet/DeepSpeechWPF/Properties/Settings.Designer.cs b/native_client/dotnet/STTWPF/Properties/Settings.Designer.cs similarity index 96% rename from native_client/dotnet/DeepSpeechWPF/Properties/Settings.Designer.cs rename to native_client/dotnet/STTWPF/Properties/Settings.Designer.cs index 0f464bc4..de63d157 100644 --- a/native_client/dotnet/DeepSpeechWPF/Properties/Settings.Designer.cs +++ b/native_client/dotnet/STTWPF/Properties/Settings.Designer.cs @@ -8,7 +8,7 @@ // //------------------------------------------------------------------------------ -namespace DeepSpeech.WPF.Properties { +namespace STT.WPF.Properties { [global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()] diff --git a/native_client/dotnet/DeepSpeechWPF/Properties/Settings.settings b/native_client/dotnet/STTWPF/Properties/Settings.settings similarity index 100% rename from native_client/dotnet/DeepSpeechWPF/Properties/Settings.settings rename to native_client/dotnet/STTWPF/Properties/Settings.settings diff --git a/native_client/dotnet/DeepSpeechWPF/DeepSpeech.WPF.csproj b/native_client/dotnet/STTWPF/STT.WPF.csproj similarity index 95% rename from native_client/dotnet/DeepSpeechWPF/DeepSpeech.WPF.csproj rename to native_client/dotnet/STTWPF/STT.WPF.csproj index 7f46a31e..160adafe 100644 --- a/native_client/dotnet/DeepSpeechWPF/DeepSpeech.WPF.csproj +++ b/native_client/dotnet/STTWPF/STT.WPF.csproj @@ -6,8 +6,8 @@ AnyCPU {54BFD766-4305-4F4C-BA59-AF45505DF3C1} WinExe - DeepSpeech.WPF - DeepSpeech.WPF + STT.WPF + STT.WPF v4.6.2 512 {60dc8134-eba5-43b8-bcc9-bb4bc16c2548};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC} @@ -131,9 +131,9 @@ - + {56de4091-bbbe-47e4-852d-7268b33b971f} - DeepSpeechClient + STTClient diff --git a/native_client/dotnet/DeepSpeechWPF/DeepSpeech.WPF.sln b/native_client/dotnet/STTWPF/STT.WPF.sln similarity index 80% rename from native_client/dotnet/DeepSpeechWPF/DeepSpeech.WPF.sln rename to native_client/dotnet/STTWPF/STT.WPF.sln index cd29025e..96c87ee5 100644 --- a/native_client/dotnet/DeepSpeechWPF/DeepSpeech.WPF.sln +++ b/native_client/dotnet/STTWPF/STT.WPF.sln @@ -3,9 +3,9 @@ Microsoft Visual Studio Solution File, Format Version 12.00 # Visual Studio 15 VisualStudioVersion = 15.0.28307.421 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DeepSpeech.WPF", "DeepSpeech.WPF.csproj", "{54BFD766-4305-4F4C-BA59-AF45505DF3C1}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "STT.WPF", "STT.WPF.csproj", "{54BFD766-4305-4F4C-BA59-AF45505DF3C1}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "DeepSpeechClient", "..\DeepSpeechClient\DeepSpeechClient.csproj", "{56DE4091-BBBE-47E4-852D-7268B33B971F}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "STTClient", "..\STTClient\STTClient.csproj", "{56DE4091-BBBE-47E4-852D-7268B33B971F}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution diff --git a/native_client/dotnet/DeepSpeechWPF/ViewModels/BindableBase.cs b/native_client/dotnet/STTWPF/ViewModels/BindableBase.cs similarity index 98% rename from native_client/dotnet/DeepSpeechWPF/ViewModels/BindableBase.cs rename to native_client/dotnet/STTWPF/ViewModels/BindableBase.cs index 909327ee..e5187cd6 100644 --- a/native_client/dotnet/DeepSpeechWPF/ViewModels/BindableBase.cs +++ b/native_client/dotnet/STTWPF/ViewModels/BindableBase.cs @@ -3,7 +3,7 @@ using System.Collections.Generic; using System.ComponentModel; using System.Runtime.CompilerServices; -namespace DeepSpeech.WPF.ViewModels +namespace STT.WPF.ViewModels { /// /// Implementation of to simplify models. diff --git a/native_client/dotnet/DeepSpeechWPF/ViewModels/MainWindowViewModel.cs b/native_client/dotnet/STTWPF/ViewModels/MainWindowViewModel.cs similarity index 97% rename from native_client/dotnet/DeepSpeechWPF/ViewModels/MainWindowViewModel.cs rename to native_client/dotnet/STTWPF/ViewModels/MainWindowViewModel.cs index 230fd42a..0ed4822b 100644 --- a/native_client/dotnet/DeepSpeechWPF/ViewModels/MainWindowViewModel.cs +++ b/native_client/dotnet/STTWPF/ViewModels/MainWindowViewModel.cs @@ -3,8 +3,8 @@ using CSCore; using CSCore.CoreAudioAPI; using CSCore.SoundIn; using CSCore.Streams; -using DeepSpeechClient.Interfaces; -using DeepSpeechClient.Models; +using STTClient.Interfaces; +using STTClient.Models; using GalaSoft.MvvmLight.CommandWpf; using Microsoft.Win32; using System; @@ -15,7 +15,7 @@ using System.IO; using System.Threading; using System.Threading.Tasks; -namespace DeepSpeech.WPF.ViewModels +namespace STT.WPF.ViewModels { /// /// View model of the MainWindow View. @@ -27,7 +27,7 @@ namespace DeepSpeech.WPF.ViewModels private const string ScorerPath = "kenlm.scorer"; #endregion - private readonly IDeepSpeech _sttClient; + private readonly ISTT _sttClient; #region Commands /// @@ -62,7 +62,7 @@ namespace DeepSpeech.WPF.ViewModels /// /// Stream used to feed data into the acoustic model. /// - private DeepSpeechStream _sttStream; + private Stream _sttStream; /// /// Records the audio of the selected device. @@ -75,7 +75,7 @@ namespace DeepSpeech.WPF.ViewModels private SoundInSource _soundInSource; /// - /// Target wave source.(16KHz Mono 16bit for DeepSpeech) + /// Target wave source.(16KHz Mono 16bit for STT) /// private IWaveSource _convertedSource; @@ -200,7 +200,7 @@ namespace DeepSpeech.WPF.ViewModels #endregion #region Ctors - public MainWindowViewModel(IDeepSpeech sttClient) + public MainWindowViewModel(ISTT sttClient) { _sttClient = sttClient; @@ -290,7 +290,7 @@ namespace DeepSpeech.WPF.ViewModels //read data from the converedSource //important: don't use the e.Data here //the e.Data contains the raw data provided by the - //soundInSource which won't have the deepspeech required audio format + //soundInSource which won't have the STT required audio format byte[] buffer = new byte[_convertedSource.WaveFormat.BytesPerSecond / 2]; int read; diff --git a/native_client/dotnet/DeepSpeechWPF/packages.config b/native_client/dotnet/STTWPF/packages.config similarity index 100% rename from native_client/dotnet/DeepSpeechWPF/packages.config rename to native_client/dotnet/STTWPF/packages.config diff --git a/native_client/dotnet/nupkg/stt.nuspec.in b/native_client/dotnet/nupkg/STT.spec.in similarity index 100% rename from native_client/dotnet/nupkg/stt.nuspec.in rename to native_client/dotnet/nupkg/STT.spec.in diff --git a/native_client/dotnet/nupkg/build/DeepSpeech.targets b/native_client/dotnet/nupkg/build/STT.targets similarity index 100% rename from native_client/dotnet/nupkg/build/DeepSpeech.targets rename to native_client/dotnet/nupkg/build/STT.targets diff --git a/native_client/generate_scorer_package.cpp b/native_client/generate_scorer_package.cpp index 0cadb429..dbc4bcd9 100644 --- a/native_client/generate_scorer_package.cpp +++ b/native_client/generate_scorer_package.cpp @@ -66,9 +66,9 @@ create_package(absl::optional alphabet_path, scorer.set_utf8_mode(force_bytes_output_mode.value()); scorer.reset_params(default_alpha, default_beta); int err = scorer.load_lm(lm_path); - if (err != DS_ERR_SCORER_NO_TRIE) { + if (err != STT_ERR_SCORER_NO_TRIE) { cerr << "Error loading language model file: " - << (err == DS_ERR_SCORER_UNREADABLE ? "Can't open binary LM file." : DS_ErrorCodeToErrorMessage(err)) + << (err == STT_ERR_SCORER_UNREADABLE ? "Can't open binary LM file." : STT_ErrorCodeToErrorMessage(err)) << "\n"; return 1; } diff --git a/native_client/java/app/src/main/AndroidManifest.xml b/native_client/java/app/src/main/AndroidManifest.xml index e9a371d1..dcf69307 100644 --- a/native_client/java/app/src/main/AndroidManifest.xml +++ b/native_client/java/app/src/main/AndroidManifest.xml @@ -9,7 +9,7 @@ android:roundIcon="@mipmap/ic_launcher_round" android:supportsRtl="true" android:theme="@style/AppTheme"> - + diff --git a/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/DeepSpeechActivity.java b/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/STTActivity.java similarity index 95% rename from native_client/java/app/src/main/java/ai/coqui/sttexampleapp/DeepSpeechActivity.java rename to native_client/java/app/src/main/java/ai/coqui/sttexampleapp/STTActivity.java index 7f0836d9..32395fdf 100644 --- a/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/DeepSpeechActivity.java +++ b/native_client/java/app/src/main/java/ai/coqui/sttexampleapp/STTActivity.java @@ -16,11 +16,11 @@ import java.io.IOException; import java.nio.ByteOrder; import java.nio.ByteBuffer; -import ai.coqui.libstt.DeepSpeechModel; +import ai.coqui.libstt.STTModel; -public class DeepSpeechActivity extends AppCompatActivity { +public class STTActivity extends AppCompatActivity { - DeepSpeechModel _m = null; + STTModel _m = null; EditText _tfliteModel; EditText _audioFile; @@ -50,7 +50,7 @@ public class DeepSpeechActivity extends AppCompatActivity { this._tfliteStatus.setText("Creating model"); if (this._m == null) { // sphinx-doc: java_ref_model_start - this._m = new DeepSpeechModel(tfliteModel); + this._m = new STTModel(tfliteModel); this._m.setBeamWidth(BEAM_WIDTH); // sphinx-doc: java_ref_model_stop } @@ -124,7 +124,7 @@ public class DeepSpeechActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); - setContentView(R.layout.activity_deep_speech); + setContentView(R.layout.activity_stt); this._decodedString = (TextView) findViewById(R.id.decodedString); this._tfliteStatus = (TextView) findViewById(R.id.tfliteStatus); diff --git a/native_client/java/app/src/main/res/layout/activity_deep_speech.xml b/native_client/java/app/src/main/res/layout/activity_stt.xml similarity index 99% rename from native_client/java/app/src/main/res/layout/activity_deep_speech.xml rename to native_client/java/app/src/main/res/layout/activity_stt.xml index 02c383d4..849b9e8d 100644 --- a/native_client/java/app/src/main/res/layout/activity_deep_speech.xml +++ b/native_client/java/app/src/main/res/layout/activity_stt.xml @@ -4,7 +4,7 @@ xmlns:tools="http://schemas.android.com/tools" android:layout_width="match_parent" android:layout_height="match_parent" - tools:context=".DeepSpeechActivity"> + tools:context=".STTActivity">