From f9e05fe0c346aaa3303047a6014e48079f72c551 Mon Sep 17 00:00:00 2001 From: Alexandre Lissy Date: Tue, 10 Mar 2020 12:13:12 +0100 Subject: [PATCH 1/3] Share argparser amongst importers --- bin/import_aidatatang.py | 4 ++-- bin/import_aishell.py | 4 ++-- bin/import_cv2.py | 6 +++--- bin/import_freestmandarin.py | 4 ++-- bin/import_gram_vaani.py | 10 +++++++--- bin/import_lingua_libre.py | 7 +++---- bin/import_m-ailabs.py | 5 +++-- bin/import_magicdata.py | 4 ++-- bin/import_primewords.py | 4 ++-- bin/import_slr57.py | 7 +++---- bin/import_ts.py | 7 +++---- util/importers.py | 28 ++++++++++++++++++++++++++++ util/test_importers.py | 12 ++++++++++++ util/text.py | 23 ----------------------- 14 files changed, 72 insertions(+), 53 deletions(-) create mode 100644 util/importers.py create mode 100644 util/test_importers.py diff --git a/bin/import_aidatatang.py b/bin/import_aidatatang.py index d1367281..703c570f 100755 --- a/bin/import_aidatatang.py +++ b/bin/import_aidatatang.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -import argparse +from util.importers import get_importers_parser import glob import pandas import tarfile @@ -81,7 +81,7 @@ def preprocess_data(tgz_file, target_dir): def main(): # https://www.openslr.org/62/ - parser = argparse.ArgumentParser(description='Import aidatatang_200zh corpus') + parser = get_importers_parser(description='Import aidatatang_200zh corpus') parser.add_argument('tgz_file', help='Path to aidatatang_200zh.tgz') parser.add_argument('--target_dir', default='', help='Target folder to extract files into and put the resulting CSVs. Defaults to same folder as the main archive.') params = parser.parse_args() diff --git a/bin/import_aishell.py b/bin/import_aishell.py index 5de1121b..939b5c92 100755 --- a/bin/import_aishell.py +++ b/bin/import_aishell.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -import argparse +from util.importers import get_importers_parser import glob import tarfile import pandas @@ -80,7 +80,7 @@ def preprocess_data(tgz_file, target_dir): def main(): # http://www.openslr.org/33/ - parser = argparse.ArgumentParser(description='Import AISHELL corpus') + parser = get_importers_parser(description='Import AISHELL corpus') parser.add_argument('aishell_tgz_file', help='Path to data_aishell.tgz') parser.add_argument('--target_dir', default='', help='Target folder to extract files into and put the resulting CSVs. Defaults to same folder as the main archive.') params = parser.parse_args() diff --git a/bin/import_cv2.py b/bin/import_cv2.py index acea122b..a4aba6bc 100755 --- a/bin/import_cv2.py +++ b/bin/import_cv2.py @@ -16,7 +16,6 @@ sys.path.insert(1, os.path.join(sys.path[0], '..')) import csv import sox -import argparse import subprocess import progressbar import unicodedata @@ -26,7 +25,8 @@ from threading import RLock from multiprocessing.dummy import Pool from multiprocessing import cpu_count from util.downloader import SIMPLE_BAR -from util.text import Alphabet, validate_label +from util.text import Alphabet +from util.importers import get_importers_parser, validate_label_eng as validate_label from util.helpers import secs_to_hours @@ -136,7 +136,7 @@ def _maybe_convert_wav(mp3_filename, wav_filename): if __name__ == "__main__": - PARSER = argparse.ArgumentParser(description='Import CommonVoice v2.0 corpora') + PARSER = get_importers_parser(description='Import CommonVoice v2.0 corpora') PARSER.add_argument('tsv_dir', help='Directory containing tsv files') PARSER.add_argument('--audio_dir', help='Directory containing the audio clips - defaults to "/clips"') PARSER.add_argument('--filter_alphabet', help='Exclude samples with characters not in provided alphabet') diff --git a/bin/import_freestmandarin.py b/bin/import_freestmandarin.py index e600befb..8e6f5615 100755 --- a/bin/import_freestmandarin.py +++ b/bin/import_freestmandarin.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -import argparse +from util.importers import get_importers_parser import glob import numpy as np import pandas @@ -81,7 +81,7 @@ def preprocess_data(tgz_file, target_dir): def main(): # https://www.openslr.org/38/ - parser = argparse.ArgumentParser(description='Import Free ST Chinese Mandarin corpus') + parser = get_importers_parser(description='Import Free ST Chinese Mandarin corpus') parser.add_argument('tgz_file', help='Path to ST-CMDS-20170001_1-OS.tar.gz') parser.add_argument('--target_dir', default='', help='Target folder to extract files into and put the resulting CSVs. Defaults to same folder as the main archive.') params = parser.parse_args() diff --git a/bin/import_gram_vaani.py b/bin/import_gram_vaani.py index e1fdd078..500ed5de 100755 --- a/bin/import_gram_vaani.py +++ b/bin/import_gram_vaani.py @@ -1,12 +1,16 @@ #!/usr/bin/env python +# Make sure we can import stuff from util/ +# This script needs to be run from the root of the DeepSpeech repository import os -import csv import sys +sys.path.insert(1, os.path.join(sys.path[0], '..')) + +import csv import math import urllib import logging -import argparse +from util.importers import get_importers_parser import subprocess from os import path from pathlib import Path @@ -38,7 +42,7 @@ def parse_args(args): Returns: :obj:`argparse.Namespace`: command line parameters namespace """ - parser = argparse.ArgumentParser( + parser = get_importers_parser( description="Imports GramVaani data for Deep Speech" ) parser.add_argument( diff --git a/bin/import_lingua_libre.py b/bin/import_lingua_libre.py index ae893350..b9d6106a 100755 --- a/bin/import_lingua_libre.py +++ b/bin/import_lingua_libre.py @@ -3,13 +3,12 @@ from __future__ import absolute_import, division, print_function # Make sure we can import stuff from util/ # This script needs to be run from the root of the DeepSpeech repository -import argparse import os import sys - - sys.path.insert(1, os.path.join(sys.path[0], '..')) +from util.importers import get_importers_parser + import csv import re import sox @@ -173,7 +172,7 @@ def _maybe_convert_wav(ogg_filename, wav_filename): print('SoX processing error', ex, ogg_filename, wav_filename) def handle_args(): - parser = argparse.ArgumentParser(description='Importer for LinguaLibre dataset. Check https://lingualibre.fr/wiki/Help:Download_from_LinguaLibre for details.') + parser = get_importers_parser(description='Importer for LinguaLibre dataset. Check https://lingualibre.fr/wiki/Help:Download_from_LinguaLibre for details.') parser.add_argument(dest='target_dir') parser.add_argument('--qId', type=int, required=True, help='LinguaLibre language qId') parser.add_argument('--iso639-3', type=str, required=True, help='ISO639-3 language code') diff --git a/bin/import_m-ailabs.py b/bin/import_m-ailabs.py index 060e8f2a..16d1bf54 100755 --- a/bin/import_m-ailabs.py +++ b/bin/import_m-ailabs.py @@ -4,12 +4,13 @@ from __future__ import absolute_import, division, print_function # Make sure we can import stuff from util/ # This script needs to be run from the root of the DeepSpeech repository -import argparse import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) +from util.importers import get_importers_parser + import csv import subprocess import progressbar @@ -168,7 +169,7 @@ def _maybe_convert_sets(target_dir, extracted_data): def handle_args(): - parser = argparse.ArgumentParser(description='Importer for M-AILABS dataset. https://www.caito.de/2019/01/the-m-ailabs-speech-dataset/.') + parser = get_importers_parser(description='Importer for M-AILABS dataset. https://www.caito.de/2019/01/the-m-ailabs-speech-dataset/.') parser.add_argument(dest='target_dir') parser.add_argument('--filter_alphabet', help='Exclude samples with characters not in provided alphabet') parser.add_argument('--normalize', action='store_true', help='Converts diacritic characters to their base ones') diff --git a/bin/import_magicdata.py b/bin/import_magicdata.py index 2ec01549..27dbf74a 100755 --- a/bin/import_magicdata.py +++ b/bin/import_magicdata.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -import argparse +from util.importers import get_importers_parser import glob import pandas import tarfile @@ -99,7 +99,7 @@ def preprocess_data(folder_with_archives, target_dir): def main(): # https://openslr.org/68/ - parser = argparse.ArgumentParser(description='Import MAGICDATA corpus') + parser = get_importers_parser(description='Import MAGICDATA corpus') parser.add_argument('folder_with_archives', help='Path to folder containing magicdata_{train,dev,test}.tar.gz') parser.add_argument('--target_dir', default='', help='Target folder to extract files into and put the resulting CSVs. Defaults to a folder called magicdata next to the archives') params = parser.parse_args() diff --git a/bin/import_primewords.py b/bin/import_primewords.py index 63f21cf7..0d6fdc52 100755 --- a/bin/import_primewords.py +++ b/bin/import_primewords.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -import argparse +from util.importers import get_importers_parser import glob import json import numpy as np @@ -93,7 +93,7 @@ def preprocess_data(tgz_file, target_dir): def main(): # https://www.openslr.org/47/ - parser = argparse.ArgumentParser(description='Import Primewords Chinese corpus set 1') + parser = get_importers_parser(description='Import Primewords Chinese corpus set 1') parser.add_argument('tgz_file', help='Path to primewords_md_2018_set1.tar.gz') parser.add_argument('--target_dir', default='', help='Target folder to extract files into and put the resulting CSVs. Defaults to same folder as the main archive.') params = parser.parse_args() diff --git a/bin/import_slr57.py b/bin/import_slr57.py index 5dde767a..16bac05b 100755 --- a/bin/import_slr57.py +++ b/bin/import_slr57.py @@ -3,13 +3,12 @@ from __future__ import absolute_import, division, print_function # Make sure we can import stuff from util/ # This script needs to be run from the root of the DeepSpeech repository -import argparse import os import sys - - sys.path.insert(1, os.path.join(sys.path[0], '..')) +from util.importers import get_importers_parser + import csv import re import sox @@ -195,7 +194,7 @@ def _maybe_convert_sets(target_dir, extracted_data): print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE)) def handle_args(): - parser = argparse.ArgumentParser(description='Importer for African Accented French dataset. More information on http://www.openslr.org/57/.') + parser = get_importers_parser(description='Importer for African Accented French dataset. More information on http://www.openslr.org/57/.') parser.add_argument(dest='target_dir') parser.add_argument('--filter_alphabet', help='Exclude samples with characters not in provided alphabet') parser.add_argument('--normalize', action='store_true', help='Converts diacritic characters to their base ones') diff --git a/bin/import_ts.py b/bin/import_ts.py index 4aaa058c..363e639e 100755 --- a/bin/import_ts.py +++ b/bin/import_ts.py @@ -3,14 +3,13 @@ from __future__ import absolute_import, division, print_function # Make sure we can import stuff from util/ # This script needs to be run from the root of the DeepSpeech repository -import argparse import os import re import sys - - sys.path.insert(1, os.path.join(sys.path[0], '..')) +from util.importers import get_importers_parser + import csv import unidecode import zipfile @@ -186,7 +185,7 @@ def cleanup_transcript(text, english_compatible=False): def handle_args(): - parser = argparse.ArgumentParser(description='Importer for TrainingSpeech dataset.') + parser = get_importers_parser(description='Importer for TrainingSpeech dataset.') parser.add_argument(dest='target_dir') parser.add_argument('--english-compatible', action='store_true', dest='english_compatible', help='Remove diactrics and other non-ascii chars.') return parser.parse_args() diff --git a/util/importers.py b/util/importers.py new file mode 100644 index 00000000..9f7ba8df --- /dev/null +++ b/util/importers.py @@ -0,0 +1,28 @@ +import argparse +import re + +def get_importers_parser(description): + parser = argparse.ArgumentParser(description=description) + return parser + +# Validate and normalize transcriptions. Returns a cleaned version of the label +# or None if it's invalid. +def validate_label_eng(label): + # For now we can only handle [a-z '] + if re.search(r"[0-9]|[(<\[\]&*{]", label) is not None: + return None + + label = label.replace("-", " ") + label = label.replace("_", " ") + label = re.sub("[ ]{2,}", " ", label) + label = label.replace(".", "") + label = label.replace(",", "") + label = label.replace(";", "") + label = label.replace("?", "") + label = label.replace("!", "") + label = label.replace(":", "") + label = label.replace("\"", "") + label = label.strip() + label = label.lower() + + return label if label else None diff --git a/util/test_importers.py b/util/test_importers.py new file mode 100644 index 00000000..884c2193 --- /dev/null +++ b/util/test_importers.py @@ -0,0 +1,12 @@ +import unittest + +from .importers import validate_label_eng + +class TestValidateLabelEng(unittest.TestCase): + + def test_numbers(self): + label = validate_label_eng("this is a 1 2 3 test") + self.assertEqual(label, None) + +if __name__ == '__main__': + unittest.main() diff --git a/util/text.py b/util/text.py index af958191..60bfe9f1 100644 --- a/util/text.py +++ b/util/text.py @@ -1,7 +1,6 @@ from __future__ import absolute_import, division, print_function import numpy as np -import re import struct from six.moves import range @@ -166,25 +165,3 @@ def levenshtein(a, b): current[j] = min(add, delete, change) return current[n] - -# Validate and normalize transcriptions. Returns a cleaned version of the label -# or None if it's invalid. -def validate_label(label): - # For now we can only handle [a-z '] - if re.search(r"[0-9]|[(<\[\]&*{]", label) is not None: - return None - - label = label.replace("-", " ") - label = label.replace("_", " ") - label = re.sub("[ ]{2,}", " ", label) - label = label.replace(".", "") - label = label.replace(",", "") - label = label.replace(";", "") - label = label.replace("?", "") - label = label.replace("!", "") - label = label.replace(":", "") - label = label.replace("\"", "") - label = label.strip() - label = label.lower() - - return label if label else None From ce59228824b4a6430b03cbbc88ebbb0321642d0b Mon Sep 17 00:00:00 2001 From: Alexandre Lissy Date: Tue, 10 Mar 2020 16:22:08 +0100 Subject: [PATCH 2/3] Localizeable validate_label Fixes #2804 --- bin/import_cv.py | 2 +- bin/import_cv2.py | 3 ++- bin/import_fisher.py | 2 +- bin/import_gram_vaani.py | 5 ++--- bin/import_lingua_libre.py | 6 ++++-- bin/import_m-ailabs.py | 5 +++-- bin/import_slr57.py | 5 +++-- bin/import_swb.py | 2 +- bin/import_swc.py | 3 ++- bin/import_ts.py | 4 ++-- bin/import_tuda.py | 3 ++- requirements_tests.txt | 1 + util/importers.py | 28 +++++++++++++++++++++++++++ util/test_data/validate_locale_fra.py | 2 ++ util/test_importers.py | 28 ++++++++++++++++++++++++++- 15 files changed, 81 insertions(+), 18 deletions(-) create mode 100644 util/test_data/validate_locale_fra.py diff --git a/bin/import_cv.py b/bin/import_cv.py index 7dd04d84..a9b9447e 100755 --- a/bin/import_cv.py +++ b/bin/import_cv.py @@ -18,7 +18,7 @@ from os import path from threading import RLock from multiprocessing.dummy import Pool from multiprocessing import cpu_count -from util.text import validate_label +from util.importers import validate_label_eng as validate_label from util.downloader import maybe_download, SIMPLE_BAR FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] diff --git a/bin/import_cv2.py b/bin/import_cv2.py index a4aba6bc..7f8222d7 100755 --- a/bin/import_cv2.py +++ b/bin/import_cv2.py @@ -26,7 +26,7 @@ from multiprocessing.dummy import Pool from multiprocessing import cpu_count from util.downloader import SIMPLE_BAR from util.text import Alphabet -from util.importers import get_importers_parser, validate_label_eng as validate_label +from util.importers import get_importers_parser, get_validate_label from util.helpers import secs_to_hours @@ -144,6 +144,7 @@ if __name__ == "__main__": PARSER.add_argument('--space_after_every_character', action='store_true', help='To help transcript join by white space') PARAMS = PARSER.parse_args() + validate_label = get_validate_label(PARAMS) AUDIO_DIR = PARAMS.audio_dir if PARAMS.audio_dir else os.path.join(PARAMS.tsv_dir, 'clips') ALPHABET = Alphabet(PARAMS.filter_alphabet) if PARAMS.filter_alphabet else None diff --git a/bin/import_fisher.py b/bin/import_fisher.py index e3340244..dd054765 100755 --- a/bin/import_fisher.py +++ b/bin/import_fisher.py @@ -19,7 +19,7 @@ import unicodedata import librosa import soundfile # <= Has an external dependency on libsndfile -from util.text import validate_label +from util.importers import validate_label_eng as validate_label def _download_and_preprocess_data(data_dir): # Assume data_dir contains extracted LDC2004S13, LDC2004T19, LDC2005S13, LDC2005T19 diff --git a/bin/import_gram_vaani.py b/bin/import_gram_vaani.py index 500ed5de..141478b8 100755 --- a/bin/import_gram_vaani.py +++ b/bin/import_gram_vaani.py @@ -10,7 +10,7 @@ import csv import math import urllib import logging -from util.importers import get_importers_parser +from util.importers import get_importers_parser, get_validate_label import subprocess from os import path from pathlib import Path @@ -19,8 +19,6 @@ import swifter import pandas as pd from sox import Transformer -from util.text import validate_label - __version__ = "0.1.0" _logger = logging.getLogger(__name__) @@ -290,6 +288,7 @@ def main(args): args ([str]): command line parameter list """ args = parse_args(args) + validate_label = get_validate_label(args) setup_logging(args.loglevel) _logger.info("Starting GramVaani importer...") _logger.info("Starting loading GramVaani csv...") diff --git a/bin/import_lingua_libre.py b/bin/import_lingua_libre.py index b9d6106a..bc11203d 100755 --- a/bin/import_lingua_libre.py +++ b/bin/import_lingua_libre.py @@ -7,8 +7,9 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser +from util.importers import get_importers_parser, get_validate_label +import argparse import csv import re import sox @@ -26,7 +27,7 @@ from os import path from glob import glob from util.downloader import maybe_download -from util.text import Alphabet, validate_label +from util.text import Alphabet from util.helpers import secs_to_hours FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] @@ -185,6 +186,7 @@ def handle_args(): if __name__ == "__main__": CLI_ARGS = handle_args() ALPHABET = Alphabet(CLI_ARGS.filter_alphabet) if CLI_ARGS.filter_alphabet else None + validate_label = get_validate_label(CLI_ARGS) bogus_regexes = [] if CLI_ARGS.bogus_records: diff --git a/bin/import_m-ailabs.py b/bin/import_m-ailabs.py index 16d1bf54..540c8139 100755 --- a/bin/import_m-ailabs.py +++ b/bin/import_m-ailabs.py @@ -9,7 +9,7 @@ import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser +from util.importers import get_importers_parser, get_validate_label import csv import subprocess @@ -26,7 +26,7 @@ from os import path from glob import glob from util.downloader import maybe_download -from util.text import Alphabet, validate_label +from util.text import Alphabet from util.helpers import secs_to_hours FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] @@ -182,6 +182,7 @@ if __name__ == "__main__": CLI_ARGS = handle_args() ALPHABET = Alphabet(CLI_ARGS.filter_alphabet) if CLI_ARGS.filter_alphabet else None SKIP_LIST = filter(None, CLI_ARGS.skiplist.split(',')) + validate_label = get_validate_label(CLI_ARGS) def label_filter(label): if CLI_ARGS.normalize: diff --git a/bin/import_slr57.py b/bin/import_slr57.py index 16bac05b..b5bbef9c 100755 --- a/bin/import_slr57.py +++ b/bin/import_slr57.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser +from util.importers import get_importers_parser, get_validate_label import csv import re @@ -27,7 +27,7 @@ from os import path from glob import glob from util.downloader import maybe_download -from util.text import Alphabet, validate_label +from util.text import Alphabet from util.helpers import secs_to_hours FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] @@ -203,6 +203,7 @@ def handle_args(): if __name__ == "__main__": CLI_ARGS = handle_args() ALPHABET = Alphabet(CLI_ARGS.filter_alphabet) if CLI_ARGS.filter_alphabet else None + validate_label = get_validate_label(CLI_ARGS) def label_filter(label): if CLI_ARGS.normalize: diff --git a/bin/import_swb.py b/bin/import_swb.py index e4261aa2..b682ae30 100755 --- a/bin/import_swb.py +++ b/bin/import_swb.py @@ -20,7 +20,7 @@ import wave import codecs import tarfile import requests -from util.text import validate_label +from util.importers import validate_label_eng as validate_label import librosa import soundfile # <= Has an external dependency on libsndfile diff --git a/bin/import_swc.py b/bin/import_swc.py index 93410805..e5114156 100755 --- a/bin/import_swc.py +++ b/bin/import_swc.py @@ -27,7 +27,8 @@ from os import path from glob import glob from collections import Counter from multiprocessing.pool import ThreadPool -from util.text import Alphabet, validate_label +from util.text import Alphabet +from util.importers import validate_label_eng as validate_label from util.downloader import maybe_download, SIMPLE_BAR SWC_URL = "https://www2.informatik.uni-hamburg.de/nats/pub/SWC/SWC_{language}.tar" diff --git a/bin/import_ts.py b/bin/import_ts.py index 363e639e..d899f1a3 100755 --- a/bin/import_ts.py +++ b/bin/import_ts.py @@ -8,7 +8,7 @@ import re import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser +from util.importers import get_importers_parser, get_validate_label import csv import unidecode @@ -25,7 +25,6 @@ from util.downloader import SIMPLE_BAR from os import path from util.downloader import maybe_download -from util.text import validate_label from util.helpers import secs_to_hours FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] @@ -193,4 +192,5 @@ def handle_args(): if __name__ == "__main__": cli_args = handle_args() + validate_label = get_validate_label(cli_args) _download_and_preprocess_data(cli_args.target_dir, cli_args.english_compatible) diff --git a/bin/import_tuda.py b/bin/import_tuda.py index 89590144..857be405 100755 --- a/bin/import_tuda.py +++ b/bin/import_tuda.py @@ -21,7 +21,8 @@ import xml.etree.cElementTree as ET from os import path from collections import Counter -from util.text import Alphabet, validate_label +from util.text import Alphabet +from util.importers import validate_label_eng as validate_label from util.downloader import maybe_download, SIMPLE_BAR TUDA_VERSION = 'v2' diff --git a/requirements_tests.txt b/requirements_tests.txt index b998a06a..1e472e22 100644 --- a/requirements_tests.txt +++ b/requirements_tests.txt @@ -1 +1,2 @@ absl-py +argparse diff --git a/util/importers.py b/util/importers.py index 9f7ba8df..3efec973 100644 --- a/util/importers.py +++ b/util/importers.py @@ -1,10 +1,38 @@ import argparse +import importlib +import os import re +import sys def get_importers_parser(description): parser = argparse.ArgumentParser(description=description) + parser.add_argument('--validate_label_locale', help='Path to a Python file defining a |validate_label| function for your locale. WARNING: THIS WILL ADD THIS FILE\'s DIRECTORY INTO PYTHONPATH.') return parser +def get_validate_label(args): + """ + Expects an argparse.Namespace argument to search for validate_label_locale parameter. + If found, this will modify Python's library search path and add the directory of the + file pointed by the validate_label_locale argument. + + :param args: The importer's CLI argument object + :type args: argparse.Namespace + + :return: The user-supplied validate_label function + :type: function + """ + if 'validate_label_locale' not in args or (args.validate_label_locale is None): + print('WARNING: No --validate_label_locale specified, your might end with inconsistent dataset.') + return validate_label_eng + if not os.path.exists(os.path.abspath(args.validate_label_locale)): + print('ERROR: Inexistent --validate_label_locale specified. Please check.') + return None + module_dir = os.path.abspath(os.path.dirname(args.validate_label_locale)) + sys.path.insert(1, module_dir) + fname = os.path.basename(args.validate_label_locale).replace('.py', '') + locale_module = importlib.import_module(fname, package=None) + return locale_module.validate_label + # Validate and normalize transcriptions. Returns a cleaned version of the label # or None if it's invalid. def validate_label_eng(label): diff --git a/util/test_data/validate_locale_fra.py b/util/test_data/validate_locale_fra.py new file mode 100644 index 00000000..4265fcde --- /dev/null +++ b/util/test_data/validate_locale_fra.py @@ -0,0 +1,2 @@ +def validate_label(label): + return label diff --git a/util/test_importers.py b/util/test_importers.py index 884c2193..281e4ee1 100644 --- a/util/test_importers.py +++ b/util/test_importers.py @@ -1,6 +1,7 @@ import unittest -from .importers import validate_label_eng +from argparse import Namespace +from .importers import validate_label_eng, get_validate_label class TestValidateLabelEng(unittest.TestCase): @@ -8,5 +9,30 @@ class TestValidateLabelEng(unittest.TestCase): label = validate_label_eng("this is a 1 2 3 test") self.assertEqual(label, None) +class TestGetValidateLabel(unittest.TestCase): + + def test_no_validate_label_locale(self): + f = get_validate_label(Namespace()) + self.assertEqual(f('toto'), 'toto') + self.assertEqual(f('toto1234'), None) + self.assertEqual(f('toto1234[{[{[]'), None) + + def test_validate_label_locale_default(self): + f = get_validate_label(Namespace(validate_label_locale=None)) + self.assertEqual(f('toto'), 'toto') + self.assertEqual(f('toto1234'), None) + self.assertEqual(f('toto1234[{[{[]'), None) + + def test_get_validate_label_missing(self): + args = Namespace(validate_label_locale='util/test_data/validate_locale_ger.py') + f = get_validate_label(args) + self.assertEqual(f, None) + + def test_get_validate_label(self): + args = Namespace(validate_label_locale='util/test_data/validate_locale_fra.py') + f = get_validate_label(args) + l = f('toto') + self.assertEqual(l, 'toto') + if __name__ == '__main__': unittest.main() From 7b2a409f9fcc9a69d30c0d5e6202634e928ba670 Mon Sep 17 00:00:00 2001 From: Alexandre Lissy Date: Wed, 11 Mar 2020 13:37:18 +0100 Subject: [PATCH 3/3] Converting importers from multiprocessing.dummy to multiprocessing Fixes #2817 --- bin/import_cv.py | 93 +++++++++++++++---------------- bin/import_cv2.py | 110 ++++++++++++++++++------------------- bin/import_lingua_libre.py | 100 ++++++++++++++++----------------- bin/import_m-ailabs.py | 91 +++++++++++++++--------------- bin/import_slr57.py | 89 +++++++++++++++--------------- bin/import_ts.py | 109 ++++++++++++++++++------------------ bin/import_vctk.py | 66 +++++++++++----------- requirements_tests.txt | 1 + util/importers.py | 21 +++++++ 9 files changed, 340 insertions(+), 340 deletions(-) diff --git a/bin/import_cv.py b/bin/import_cv.py index a9b9447e..ec326d8c 100755 --- a/bin/import_cv.py +++ b/bin/import_cv.py @@ -15,10 +15,8 @@ import progressbar from glob import glob from os import path -from threading import RLock -from multiprocessing.dummy import Pool -from multiprocessing import cpu_count -from util.importers import validate_label_eng as validate_label +from multiprocessing import Pool +from util.importers import validate_label_eng as validate_label, get_counter, get_imported_samples, print_import_report from util.downloader import maybe_download, SIMPLE_BAR FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] @@ -53,6 +51,38 @@ def _maybe_convert_sets(target_dir, extracted_data): for source_csv in glob(path.join(extracted_dir, '*.csv')): _maybe_convert_set(extracted_dir, source_csv, path.join(target_dir, os.path.split(source_csv)[-1])) +def one_sample(sample): + mp3_filename = sample[0] + # Storing wav files next to the mp3 ones - just with a different suffix + wav_filename = path.splitext(mp3_filename)[0] + ".wav" + _maybe_convert_wav(mp3_filename, wav_filename) + frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) + file_size = -1 + if path.exists(wav_filename): + file_size = path.getsize(wav_filename) + frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) + label = validate_label(sample[1]) + rows = [] + counter = get_counter() + if file_size == -1: + # Excluding samples that failed upon conversion + counter['failed'] += 1 + elif label is None: + # Excluding samples that failed on label validation + counter['invalid_label'] += 1 + elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): + # Excluding samples that are too short to fit the transcript + counter['too_short'] += 1 + elif frames/SAMPLE_RATE > MAX_SECS: + # Excluding very long samples to keep a reasonable batch-size + counter['too_long'] += 1 + else: + # This one is good - keep it for the target CSV + rows.append((wav_filename, file_size, label)) + counter['all'] += 1 + counter['total_time'] += frames + return (counter, rows) + def _maybe_convert_set(extracted_dir, source_csv, target_csv): print() if path.exists(target_csv): @@ -63,48 +93,19 @@ def _maybe_convert_set(extracted_dir, source_csv, target_csv): with open(source_csv) as source_csv_file: reader = csv.DictReader(source_csv_file) for row in reader: - samples.append((row['filename'], row['text'])) + samples.append((os.path.join(extracted_dir, row['filename']), row['text'])) # Mutable counters for the concurrent embedded routine - counter = { 'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0 } - lock = RLock() + counter = get_counter() num_samples = len(samples) rows = [] - def one_sample(sample): - mp3_filename = path.join(*(sample[0].split('/'))) - mp3_filename = path.join(extracted_dir, mp3_filename) - # Storing wav files next to the mp3 ones - just with a different suffix - wav_filename = path.splitext(mp3_filename)[0] + ".wav" - _maybe_convert_wav(mp3_filename, wav_filename) - frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) - file_size = -1 - if path.exists(wav_filename): - file_size = path.getsize(wav_filename) - frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) - label = validate_label(sample[1]) - with lock: - if file_size == -1: - # Excluding samples that failed upon conversion - counter['failed'] += 1 - elif label is None: - # Excluding samples that failed on label validation - counter['invalid_label'] += 1 - elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): - # Excluding samples that are too short to fit the transcript - counter['too_short'] += 1 - elif frames/SAMPLE_RATE > MAX_SECS: - # Excluding very long samples to keep a reasonable batch-size - counter['too_long'] += 1 - else: - # This one is good - keep it for the target CSV - rows.append((wav_filename, file_size, label)) - counter['all'] += 1 - print('Importing mp3 files...') - pool = Pool(cpu_count()) + pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) - for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): + for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): + counter += processed[0] + rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() @@ -118,15 +119,11 @@ def _maybe_convert_set(extracted_dir, source_csv, target_csv): for filename, file_size, transcript in bar(rows): writer.writerow({ 'wav_filename': filename, 'wav_filesize': file_size, 'transcript': transcript }) - print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) - if counter['failed'] > 0: - print('Skipped %d samples that failed upon conversion.' % counter['failed']) - if counter['invalid_label'] > 0: - print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) - if counter['too_short'] > 0: - print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) - if counter['too_long'] > 0: - print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) + imported_samples = get_imported_samples(counter) + assert counter['all'] == num_samples + assert len(rows) == imported_samples + + print_import_report(counter, SAMPLE_RATE, MAX_SECS) def _maybe_convert_wav(mp3_filename, wav_filename): if not path.exists(wav_filename): diff --git a/bin/import_cv2.py b/bin/import_cv2.py index 7f8222d7..474202be 100755 --- a/bin/import_cv2.py +++ b/bin/import_cv2.py @@ -21,13 +21,10 @@ import progressbar import unicodedata from os import path -from threading import RLock -from multiprocessing.dummy import Pool -from multiprocessing import cpu_count +from multiprocessing import Pool from util.downloader import SIMPLE_BAR from util.text import Alphabet -from util.importers import get_importers_parser, get_validate_label -from util.helpers import secs_to_hours +from util.importers import get_importers_parser, get_validate_label, get_counter, get_imported_samples, print_import_report FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] @@ -35,15 +32,50 @@ SAMPLE_RATE = 16000 MAX_SECS = 10 -def _preprocess_data(tsv_dir, audio_dir, label_filter, space_after_every_character=False): +def _preprocess_data(tsv_dir, audio_dir, space_after_every_character=False): for dataset in ['train', 'test', 'dev', 'validated', 'other']: input_tsv = path.join(path.abspath(tsv_dir), dataset+".tsv") if os.path.isfile(input_tsv): print("Loading TSV file: ", input_tsv) - _maybe_convert_set(input_tsv, audio_dir, label_filter, space_after_every_character) + _maybe_convert_set(input_tsv, audio_dir, space_after_every_character) +def one_sample(sample): + """ Take a audio file, and optionally convert it to 16kHz WAV """ + mp3_filename = sample[0] + if not path.splitext(mp3_filename.lower())[1] == '.mp3': + mp3_filename += ".mp3" + # Storing wav files next to the mp3 ones - just with a different suffix + wav_filename = path.splitext(mp3_filename)[0] + ".wav" + _maybe_convert_wav(mp3_filename, wav_filename) + file_size = -1 + frames = 0 + if path.exists(wav_filename): + file_size = path.getsize(wav_filename) + frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) + label = label_filter_fun(sample[1]) + rows = [] + counter = get_counter() + if file_size == -1: + # Excluding samples that failed upon conversion + counter['failed'] += 1 + elif label is None: + # Excluding samples that failed on label validation + counter['invalid_label'] += 1 + elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): + # Excluding samples that are too short to fit the transcript + counter['too_short'] += 1 + elif frames/SAMPLE_RATE > MAX_SECS: + # Excluding very long samples to keep a reasonable batch-size + counter['too_long'] += 1 + else: + # This one is good - keep it for the target CSV + rows.append((os.path.split(wav_filename)[-1], file_size, label)) + counter['all'] += 1 + counter['total_time'] += frames -def _maybe_convert_set(input_tsv, audio_dir, label_filter, space_after_every_character=None): + return (counter, rows) + +def _maybe_convert_set(input_tsv, audio_dir, space_after_every_character=None): output_csv = path.join(audio_dir, os.path.split(input_tsv)[-1].replace('tsv', 'csv')) print("Saving new DeepSpeech-formatted CSV file to: ", output_csv) @@ -52,51 +84,18 @@ def _maybe_convert_set(input_tsv, audio_dir, label_filter, space_after_every_cha with open(input_tsv, encoding='utf-8') as input_tsv_file: reader = csv.DictReader(input_tsv_file, delimiter='\t') for row in reader: - samples.append((row['path'], row['sentence'])) + samples.append((path.join(audio_dir, row['path']), row['sentence'])) - # Keep track of how many samples are good vs. problematic - counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0} - lock = RLock() + counter = get_counter() num_samples = len(samples) rows = [] - def one_sample(sample): - """ Take a audio file, and optionally convert it to 16kHz WAV """ - mp3_filename = path.join(audio_dir, sample[0]) - if not path.splitext(mp3_filename.lower())[1] == '.mp3': - mp3_filename += ".mp3" - # Storing wav files next to the mp3 ones - just with a different suffix - wav_filename = path.splitext(mp3_filename)[0] + ".wav" - _maybe_convert_wav(mp3_filename, wav_filename) - file_size = -1 - frames = 0 - if path.exists(wav_filename): - file_size = path.getsize(wav_filename) - frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) - label = label_filter(sample[1]) - with lock: - if file_size == -1: - # Excluding samples that failed upon conversion - counter['failed'] += 1 - elif label is None: - # Excluding samples that failed on label validation - counter['invalid_label'] += 1 - elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): - # Excluding samples that are too short to fit the transcript - counter['too_short'] += 1 - elif frames/SAMPLE_RATE > MAX_SECS: - # Excluding very long samples to keep a reasonable batch-size - counter['too_long'] += 1 - else: - # This one is good - keep it for the target CSV - rows.append((os.path.split(wav_filename)[-1], file_size, label)) - counter['all'] += 1 - counter['total_time'] += frames - print("Importing mp3 files...") - pool = Pool(cpu_count()) + pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) - for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): + for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): + counter += processed[0] + rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() @@ -113,16 +112,11 @@ def _maybe_convert_set(input_tsv, audio_dir, label_filter, space_after_every_cha else: writer.writerow({'wav_filename': filename, 'wav_filesize': file_size, 'transcript': transcript}) - print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) - if counter['failed'] > 0: - print('Skipped %d samples that failed upon conversion.' % counter['failed']) - if counter['invalid_label'] > 0: - print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) - if counter['too_short'] > 0: - print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) - if counter['too_long'] > 0: - print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) - print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE)) + imported_samples = get_imported_samples(counter) + assert counter['all'] == num_samples + assert len(rows) == imported_samples + + print_import_report(counter, SAMPLE_RATE, MAX_SECS) def _maybe_convert_wav(mp3_filename, wav_filename): @@ -162,4 +156,4 @@ if __name__ == "__main__": label = None return label - _preprocess_data(PARAMS.tsv_dir, AUDIO_DIR, label_filter_fun, PARAMS.space_after_every_character) + _preprocess_data(PARAMS.tsv_dir, AUDIO_DIR, PARAMS.space_after_every_character) diff --git a/bin/import_lingua_libre.py b/bin/import_lingua_libre.py index bc11203d..493f28a0 100755 --- a/bin/import_lingua_libre.py +++ b/bin/import_lingua_libre.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser, get_validate_label +from util.importers import get_importers_parser, get_validate_label, get_counter, get_imported_samples, print_import_report import argparse import csv @@ -18,9 +18,7 @@ import subprocess import progressbar import unicodedata -from threading import RLock -from multiprocessing.dummy import Pool -from multiprocessing import cpu_count +from multiprocessing import Pool from util.downloader import SIMPLE_BAR from os import path @@ -28,7 +26,6 @@ from glob import glob from util.downloader import maybe_download from util.text import Alphabet -from util.helpers import secs_to_hours FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] SAMPLE_RATE = 16000 @@ -61,6 +58,41 @@ def _maybe_extract(target_dir, extracted_data, archive_path): else: print('Found directory "%s" - not extracting it from archive.' % archive_path) +def one_sample(sample): + """ Take a audio file, and optionally convert it to 16kHz WAV """ + ogg_filename = sample[0] + # Storing wav files next to the ogg ones - just with a different suffix + wav_filename = path.splitext(ogg_filename)[0] + ".wav" + _maybe_convert_wav(ogg_filename, wav_filename) + file_size = -1 + frames = 0 + if path.exists(wav_filename): + file_size = path.getsize(wav_filename) + frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) + label = label_filter(sample[1]) + rows = [] + counter = get_counter() + + if file_size == -1: + # Excluding samples that failed upon conversion + counter['failed'] += 1 + elif label is None: + # Excluding samples that failed on label validation + counter['invalid_label'] += 1 + elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): + # Excluding samples that are too short to fit the transcript + counter['too_short'] += 1 + elif frames/SAMPLE_RATE > MAX_SECS: + # Excluding very long samples to keep a reasonable batch-size + counter['too_long'] += 1 + else: + # This one is good - keep it for the target CSV + rows.append((wav_filename, file_size, label)) + counter['all'] += 1 + counter['total_time'] += frames + + return (counter, rows) + def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one @@ -76,49 +108,18 @@ def _maybe_convert_sets(target_dir, extracted_data): for record in glob(glob_dir, recursive=True): record_file = record.replace(ogg_root_dir + os.path.sep, '') if record_filter(record_file): - samples.append((record_file, os.path.splitext(os.path.basename(record_file))[0])) + samples.append((os.path.join(ogg_root_dir, record_file), os.path.splitext(os.path.basename(record_file))[0])) - # Keep track of how many samples are good vs. problematic - counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0} - lock = RLock() + counter = get_counter() num_samples = len(samples) rows = [] - def one_sample(sample): - """ Take a audio file, and optionally convert it to 16kHz WAV """ - ogg_filename = path.join(ogg_root_dir, sample[0]) - # Storing wav files next to the ogg ones - just with a different suffix - wav_filename = path.splitext(ogg_filename)[0] + ".wav" - _maybe_convert_wav(ogg_filename, wav_filename) - file_size = -1 - frames = 0 - if path.exists(wav_filename): - file_size = path.getsize(wav_filename) - frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) - label = label_filter(sample[1]) - with lock: - if file_size == -1: - # Excluding samples that failed upon conversion - counter['failed'] += 1 - elif label is None: - # Excluding samples that failed on label validation - counter['invalid_label'] += 1 - elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): - # Excluding samples that are too short to fit the transcript - counter['too_short'] += 1 - elif frames/SAMPLE_RATE > MAX_SECS: - # Excluding very long samples to keep a reasonable batch-size - counter['too_long'] += 1 - else: - # This one is good - keep it for the target CSV - rows.append((wav_filename, file_size, label)) - counter['all'] += 1 - counter['total_time'] += frames - print("Importing ogg files...") - pool = Pool(cpu_count()) + pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) - for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): + for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): + counter += processed[0] + rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() @@ -152,16 +153,11 @@ def _maybe_convert_sets(target_dir, extracted_data): transcript=transcript, )) - print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) - if counter['failed'] > 0: - print('Skipped %d samples that failed upon conversion.' % counter['failed']) - if counter['invalid_label'] > 0: - print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) - if counter['too_short'] > 0: - print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) - if counter['too_long'] > 0: - print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) - print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE)) + imported_samples = get_imported_samples(counter) + assert counter['all'] == num_samples + assert len(rows) == imported_samples + + print_import_report(counter, SAMPLE_RATE, MAX_SECS) def _maybe_convert_wav(ogg_filename, wav_filename): if not path.exists(wav_filename): diff --git a/bin/import_m-ailabs.py b/bin/import_m-ailabs.py index 540c8139..dc5b7cfe 100755 --- a/bin/import_m-ailabs.py +++ b/bin/import_m-ailabs.py @@ -9,7 +9,7 @@ import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser, get_validate_label +from util.importers import get_importers_parser, get_validate_label, get_counter, get_imported_samples, print_import_report import csv import subprocess @@ -17,9 +17,7 @@ import progressbar import unicodedata import tarfile -from threading import RLock -from multiprocessing.dummy import Pool -from multiprocessing import cpu_count +from multiprocessing import Pool from util.downloader import SIMPLE_BAR from os import path @@ -27,7 +25,6 @@ from glob import glob from util.downloader import maybe_download from util.text import Alphabet -from util.helpers import secs_to_hours FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] SAMPLE_RATE = 16000 @@ -63,6 +60,38 @@ def _maybe_extract(target_dir, extracted_data, archive_path): print('Found directory "%s" - not extracting it from archive.' % archive_path) +def one_sample(sample): + """ Take a audio file, and optionally convert it to 16kHz WAV """ + wav_filename = sample[0] + file_size = -1 + frames = 0 + if path.exists(wav_filename): + file_size = path.getsize(wav_filename) + frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) + label = label_filter(sample[1]) + counter = get_counter() + rows = [] + + if file_size == -1: + # Excluding samples that failed upon conversion + print("conversion failure", wav_filename) + counter['failed'] += 1 + elif label is None: + # Excluding samples that failed on label validation + counter['invalid_label'] += 1 + elif int(frames/SAMPLE_RATE*1000/15/2) < len(str(label)): + # Excluding samples that are too short to fit the transcript + counter['too_short'] += 1 + elif frames/SAMPLE_RATE > MAX_SECS: + # Excluding very long samples to keep a reasonable batch-size + counter['too_long'] += 1 + else: + # This one is good - keep it for the target CSV + rows.append((wav_filename, file_size, label)) + counter['all'] += 1 + counter['total_time'] += frames + return (counter, rows) + def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one @@ -85,44 +114,16 @@ def _maybe_convert_sets(target_dir, extracted_data): transcript = re[2] samples.append((audio, transcript)) - # Keep track of how many samples are good vs. problematic - counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0} - lock = RLock() + counter = get_counter() num_samples = len(samples) rows = [] - def one_sample(sample): - """ Take a audio file, and optionally convert it to 16kHz WAV """ - wav_filename = sample[0] - file_size = -1 - frames = 0 - if path.exists(wav_filename): - file_size = path.getsize(wav_filename) - frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) - label = label_filter(sample[1]) - with lock: - if file_size == -1: - # Excluding samples that failed upon conversion - counter['failed'] += 1 - elif label is None: - # Excluding samples that failed on label validation - counter['invalid_label'] += 1 - elif int(frames/SAMPLE_RATE*1000/15/2) < len(str(label)): - # Excluding samples that are too short to fit the transcript - counter['too_short'] += 1 - elif frames/SAMPLE_RATE > MAX_SECS: - # Excluding very long samples to keep a reasonable batch-size - counter['too_long'] += 1 - else: - # This one is good - keep it for the target CSV - rows.append((wav_filename, file_size, label)) - counter['all'] += 1 - counter['total_time'] += frames - print("Importing WAV files...") - pool = Pool(cpu_count()) + pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) - for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): + for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): + counter += processed[0] + rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() @@ -156,17 +157,11 @@ def _maybe_convert_sets(target_dir, extracted_data): transcript=transcript, )) - print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) - if counter['failed'] > 0: - print('Skipped %d samples that failed upon conversion.' % counter['failed']) - if counter['invalid_label'] > 0: - print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) - if counter['too_short'] > 0: - print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) - if counter['too_long'] > 0: - print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) - print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE)) + imported_samples = get_imported_samples(counter) + assert counter['all'] == num_samples + assert len(rows) == imported_samples + print_import_report(counter, SAMPLE_RATE, MAX_SECS) def handle_args(): parser = get_importers_parser(description='Importer for M-AILABS dataset. https://www.caito.de/2019/01/the-m-ailabs-speech-dataset/.') diff --git a/bin/import_slr57.py b/bin/import_slr57.py index b5bbef9c..f11a78ed 100755 --- a/bin/import_slr57.py +++ b/bin/import_slr57.py @@ -7,7 +7,7 @@ import os import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser, get_validate_label +from util.importers import get_importers_parser, get_validate_label, get_counter, get_imported_samples, print_import_report import csv import re @@ -18,9 +18,7 @@ import progressbar import unicodedata import tarfile -from threading import RLock -from multiprocessing.dummy import Pool -from multiprocessing import cpu_count +from multiprocessing import Pool from util.downloader import SIMPLE_BAR from os import path @@ -62,6 +60,37 @@ def _maybe_extract(target_dir, extracted_data, archive_path): else: print('Found directory "%s" - not extracting it from archive.' % archive_path) +def one_sample(sample): + """ Take a audio file, and optionally convert it to 16kHz WAV """ + wav_filename = sample[0] + file_size = -1 + frames = 0 + if path.exists(wav_filename): + file_size = path.getsize(wav_filename) + frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) + label = label_filter(sample[1]) + counter = get_counter() + rows = [] + if file_size == -1: + # Excluding samples that failed upon conversion + counter['failed'] += 1 + elif label is None: + # Excluding samples that failed on label validation + counter['invalid_label'] += 1 + elif int(frames/SAMPLE_RATE*1000/15/2) < len(str(label)): + # Excluding samples that are too short to fit the transcript + counter['too_short'] += 1 + elif frames/SAMPLE_RATE > MAX_SECS: + # Excluding very long samples to keep a reasonable batch-size + counter['too_long'] += 1 + else: + # This one is good - keep it for the target CSV + rows.append((wav_filename, file_size, label)) + counter['all'] += 1 + counter['total_time'] += frames + + return (counter, rows) + def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one @@ -112,43 +141,16 @@ def _maybe_convert_sets(target_dir, extracted_data): samples.append((record, transcripts[record_file])) # Keep track of how many samples are good vs. problematic - counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0} - lock = RLock() + counter = get_counter() num_samples = len(samples) rows = [] - def one_sample(sample): - """ Take a audio file, and optionally convert it to 16kHz WAV """ - wav_filename = sample[0] - file_size = -1 - frames = 0 - if path.exists(wav_filename): - file_size = path.getsize(wav_filename) - frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) - label = label_filter(sample[1]) - with lock: - if file_size == -1: - # Excluding samples that failed upon conversion - counter['failed'] += 1 - elif label is None: - # Excluding samples that failed on label validation - counter['invalid_label'] += 1 - elif int(frames/SAMPLE_RATE*1000/15/2) < len(str(label)): - # Excluding samples that are too short to fit the transcript - counter['too_short'] += 1 - elif frames/SAMPLE_RATE > MAX_SECS: - # Excluding very long samples to keep a reasonable batch-size - counter['too_long'] += 1 - else: - # This one is good - keep it for the target CSV - rows.append((wav_filename, file_size, label)) - counter['all'] += 1 - counter['total_time'] += frames - print("Importing WAV files...") - pool = Pool(cpu_count()) + pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) - for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): + for i, processed in enumerate(pool.imap_unordered(one_sample, samples), start=1): + counter += processed[0] + rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() @@ -182,16 +184,11 @@ def _maybe_convert_sets(target_dir, extracted_data): transcript=transcript, )) - print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) - if counter['failed'] > 0: - print('Skipped %d samples that failed upon conversion.' % counter['failed']) - if counter['invalid_label'] > 0: - print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) - if counter['too_short'] > 0: - print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) - if counter['too_long'] > 0: - print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) - print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE)) + imported_samples = get_imported_samples(counter) + assert counter['all'] == num_samples + assert len(rows) == imported_samples + + print_import_report(counter, SAMPLE_RATE, MAX_SECS) def handle_args(): parser = get_importers_parser(description='Importer for African Accented French dataset. More information on http://www.openslr.org/57/.') diff --git a/bin/import_ts.py b/bin/import_ts.py index d899f1a3..a1f0d3b9 100755 --- a/bin/import_ts.py +++ b/bin/import_ts.py @@ -8,7 +8,7 @@ import re import sys sys.path.insert(1, os.path.join(sys.path[0], '..')) -from util.importers import get_importers_parser, get_validate_label +from util.importers import get_importers_parser, get_validate_label, get_counter, get_imported_samples, print_import_report import csv import unidecode @@ -17,15 +17,12 @@ import sox import subprocess import progressbar -from threading import RLock -from multiprocessing.dummy import Pool -from multiprocessing import cpu_count +from multiprocessing import Pool from util.downloader import SIMPLE_BAR from os import path from util.downloader import maybe_download -from util.helpers import secs_to_hours FIELDNAMES = ['wav_filename', 'wav_filesize', 'transcript'] SAMPLE_RATE = 16000 @@ -59,6 +56,44 @@ def _maybe_extract(target_dir, extracted_data, archive_path): print('Found directory "%s" - not extracting it from archive.' % archive_path) +def one_sample(sample): + """ Take a audio file, and optionally convert it to 16kHz WAV """ + orig_filename = sample['path'] + # Storing wav files next to the wav ones - just with a different suffix + wav_filename = path.splitext(orig_filename)[0] + ".converted.wav" + _maybe_convert_wav(orig_filename, wav_filename) + file_size = -1 + frames = 0 + if path.exists(wav_filename): + file_size = path.getsize(wav_filename) + frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) + label = sample['text'] + + rows = [] + + # Keep track of how many samples are good vs. problematic + counter = get_counter() + if file_size == -1: + # Excluding samples that failed upon conversion + counter['failed'] += 1 + elif label is None: + # Excluding samples that failed on label validation + counter['invalid_label'] += 1 + elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): + # Excluding samples that are too short to fit the transcript + counter['too_short'] += 1 + elif frames/SAMPLE_RATE > MAX_SECS: + # Excluding very long samples to keep a reasonable batch-size + counter['too_long'] += 1 + else: + # This one is good - keep it for the target CSV + rows.append((wav_filename, file_size, label)) + counter['all'] += 1 + counter['total_time'] += frames + + return (counter, rows) + + def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): extracted_dir = path.join(target_dir, extracted_data) # override existing CSV with normalized one @@ -72,49 +107,19 @@ def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): if float(d['duration']) <= MAX_SECS ] - # Keep track of how many samples are good vs. problematic - counter = {'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0} - lock = RLock() + for line in data: + line['path'] = os.path.join(extracted_dir, line['path']) + num_samples = len(data) rows = [] + counter = get_counter() - wav_root_dir = extracted_dir - - def one_sample(sample): - """ Take a audio file, and optionally convert it to 16kHz WAV """ - orig_filename = path.join(wav_root_dir, sample['path']) - # Storing wav files next to the wav ones - just with a different suffix - wav_filename = path.splitext(orig_filename)[0] + ".converted.wav" - _maybe_convert_wav(orig_filename, wav_filename) - file_size = -1 - frames = 0 - if path.exists(wav_filename): - file_size = path.getsize(wav_filename) - frames = int(subprocess.check_output(['soxi', '-s', wav_filename], stderr=subprocess.STDOUT)) - label = sample['text'] - with lock: - if file_size == -1: - # Excluding samples that failed upon conversion - counter['failed'] += 1 - elif label is None: - # Excluding samples that failed on label validation - counter['invalid_label'] += 1 - elif int(frames/SAMPLE_RATE*1000/10/2) < len(str(label)): - # Excluding samples that are too short to fit the transcript - counter['too_short'] += 1 - elif frames/SAMPLE_RATE > MAX_SECS: - # Excluding very long samples to keep a reasonable batch-size - counter['too_long'] += 1 - else: - # This one is good - keep it for the target CSV - rows.append((wav_filename, file_size, label)) - counter['all'] += 1 - counter['total_time'] += frames - - print("Importing wav files...") - pool = Pool(cpu_count()) + print("Importing {} wav files...".format(num_samples)) + pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) - for i, _ in enumerate(pool.imap_unordered(one_sample, data), start=1): + for i, processed in enumerate(pool.imap_unordered(one_sample, data), start=1): + counter += processed[0] + rows += processed[1] bar.update(i) bar.update(num_samples) pool.close() @@ -131,7 +136,6 @@ def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): test_writer.writeheader() for i, item in enumerate(rows): - print('item', item) transcript = validate_label(cleanup_transcript(item[2], english_compatible=english_compatible)) if not transcript: continue @@ -149,16 +153,11 @@ def _maybe_convert_sets(target_dir, extracted_data, english_compatible=False): transcript=transcript, )) - print('Imported %d samples.' % (counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'])) - if counter['failed'] > 0: - print('Skipped %d samples that failed upon conversion.' % counter['failed']) - if counter['invalid_label'] > 0: - print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) - if counter['too_short'] > 0: - print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) - if counter['too_long'] > 0: - print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], MAX_SECS)) - print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / SAMPLE_RATE)) + imported_samples = get_imported_samples(counter) + assert counter['all'] == num_samples + assert len(rows) == imported_samples + + print_import_report(counter, SAMPLE_RATE, MAX_SECS) def _maybe_convert_wav(orig_filename, wav_filename): if not path.exists(wav_filename): diff --git a/bin/import_vctk.py b/bin/import_vctk.py index 59e1fafe..68477706 100755 --- a/bin/import_vctk.py +++ b/bin/import_vctk.py @@ -14,13 +14,14 @@ import sys sys.path.insert(1, os.path.join(sys.path[0], "..")) +from util.importers import get_counter, get_imported_samples, print_import_report + import re import librosa import progressbar from os import path -from multiprocessing.dummy import Pool -from multiprocessing import cpu_count +from multiprocessing import Pool from util.downloader import maybe_download, SIMPLE_BAR from zipfile import ZipFile @@ -61,47 +62,46 @@ def _maybe_convert_sets(target_dir, extracted_data): extracted_dir = path.join(target_dir, extracted_data, "wav48") txt_dir = path.join(target_dir, extracted_data, "txt") - cnt = 1 directory = os.path.expanduser(extracted_dir) srtd = len(sorted(os.listdir(directory))) + all_samples = [] for target in sorted(os.listdir(directory)): - print(f"\nSpeaker {cnt} of {srtd}") - _maybe_convert_set(path.join(extracted_dir, os.path.split(target)[-1])) - cnt += 1 - - _write_csv(extracted_dir, txt_dir, target_dir) - - -def _maybe_convert_set(target_csv): - def one_sample(sample): - if is_audio_file(sample): - sample = os.path.join(target_csv, sample) - - y, sr = librosa.load(sample, sr=16000) - - # Trim the beginning and ending silence - yt, index = librosa.effects.trim(y) # pylint: disable=unused-variable - - duration = librosa.get_duration(yt, sr) - if duration > MAX_SECS or duration < MIN_SECS: - os.remove(sample) - else: - librosa.output.write_wav(sample, yt, sr) - - samples = sorted(os.listdir(target_csv)) - - num_samples = len(samples) + all_samples += _maybe_prepare_set(path.join(extracted_dir, os.path.split(target)[-1])) + num_samples = len(all_samples) print(f"Converting wav files to {SAMPLE_RATE}hz...") - pool = Pool(cpu_count()) + pool = Pool() bar = progressbar.ProgressBar(max_value=num_samples, widgets=SIMPLE_BAR) - for i, _ in enumerate(pool.imap_unordered(one_sample, samples), start=1): + for i, _ in enumerate(pool.imap_unordered(one_sample, all_samples), start=1): bar.update(i) bar.update(num_samples) pool.close() pool.join() + _write_csv(extracted_dir, txt_dir, target_dir) + +def one_sample(sample): + if is_audio_file(sample): + y, sr = librosa.load(sample, sr=16000) + + # Trim the beginning and ending silence + yt, index = librosa.effects.trim(y) # pylint: disable=unused-variable + + duration = librosa.get_duration(yt, sr) + if duration > MAX_SECS or duration < MIN_SECS: + os.remove(sample) + else: + librosa.output.write_wav(sample, yt, sr) + + +def _maybe_prepare_set(target_csv): + samples = sorted(os.listdir(target_csv)) + new_samples = [] + for s in samples: + new_samples.append(os.path.join(target_csv, s)) + samples = new_samples + return samples def _write_csv(extracted_dir, txt_dir, target_dir): print(f"Writing CSV file") @@ -196,8 +196,8 @@ def load_txts(directory): AUDIO_EXTENSIONS = [".wav", "WAV"] -def is_audio_file(filename): - return any(filename.endswith(extension) for extension in AUDIO_EXTENSIONS) +def is_audio_file(filepath): + return any(os.path.basename(filepath).endswith(extension) for extension in AUDIO_EXTENSIONS) if __name__ == "__main__": diff --git a/requirements_tests.txt b/requirements_tests.txt index 1e472e22..de689076 100644 --- a/requirements_tests.txt +++ b/requirements_tests.txt @@ -1,2 +1,3 @@ absl-py argparse +semver diff --git a/util/importers.py b/util/importers.py index 3efec973..50b87fa0 100644 --- a/util/importers.py +++ b/util/importers.py @@ -4,6 +4,27 @@ import os import re import sys +from util.helpers import secs_to_hours +from collections import Counter + +def get_counter(): + return Counter({'all': 0, 'failed': 0, 'invalid_label': 0, 'too_short': 0, 'too_long': 0, 'total_time': 0}) + +def get_imported_samples(counter): + return counter['all'] - counter['failed'] - counter['too_short'] - counter['too_long'] - counter['invalid_label'] + +def print_import_report(counter, sample_rate, max_secs): + print('Imported %d samples.' % (get_imported_samples(counter))) + if counter['failed'] > 0: + print('Skipped %d samples that failed upon conversion.' % counter['failed']) + if counter['invalid_label'] > 0: + print('Skipped %d samples that failed on transcript validation.' % counter['invalid_label']) + if counter['too_short'] > 0: + print('Skipped %d samples that were too short to match the transcript.' % counter['too_short']) + if counter['too_long'] > 0: + print('Skipped %d samples that were longer than %d seconds.' % (counter['too_long'], max_secs)) + print('Final amount of imported audio: %s.' % secs_to_hours(counter['total_time'] / sample_rate)) + def get_importers_parser(description): parser = argparse.ArgumentParser(description=description) parser.add_argument('--validate_label_locale', help='Path to a Python file defining a |validate_label| function for your locale. WARNING: THIS WILL ADD THIS FILE\'s DIRECTORY INTO PYTHONPATH.')