diff --git a/bin/import_fisher.py b/bin/import_fisher.py index 9a9a2eef..e4f95983 100755 --- a/bin/import_fisher.py +++ b/bin/import_fisher.py @@ -16,8 +16,8 @@ import os import pandas import subprocess import unicodedata -import wave -import audioop +import librosa +import resampy from util.text import validate_label @@ -142,7 +142,7 @@ def _split_wav_and_sentences(data_dir, trans_data, original_data, converted_data print("splitting {} according to {}".format(wav_files, trans_file)) - origAudios = [wave.open(wav_file, "r") for wav_file in wav_files] + origAudios = [librosa.load(wav_file, sr=None, mono=False) for wav_file in wav_files] # Loop over segments and split wav_file for each segment for segment in segments: @@ -160,26 +160,20 @@ def _split_wav_and_sentences(data_dir, trans_data, original_data, converted_data if transcript != None: files.append((os.path.abspath(new_wav_file), new_wav_filesize, transcript)) - # Close origAudios - for origAudio in origAudios: - origAudio.close() - return pandas.DataFrame(data=files, columns=["wav_filename", "wav_filesize", "transcript"]) +def _split_audio(origAudio, start_time, stop_time): + audioData, frameRate = origAudio + nChannels = len(audioData.shape) + startIndex = int(start_time * frameRate) + stopIndex = int(stop_time * frameRate) + return audioData[startIndex: stopIndex] if 1 == nChannels else audioData[:, startIndex: stopIndex] + def _split_and_resample_wav(origAudio, start_time, stop_time, new_wav_file): - nChannels = origAudio.getnchannels() - sampleWidth = origAudio.getsampwidth() - frameRate = origAudio.getframerate() - origAudio.setpos(int(start_time * frameRate)) - chunkData = origAudio.readframes(int((stop_time - start_time) * frameRate)) - # by doubling the frame-rate we effectively go from 8 kHz to 16 kHz - chunkData, _ = audioop.ratecv(chunkData, sampleWidth, nChannels, frameRate, 2 * frameRate, None) - chunkAudio = wave.open(new_wav_file, "w") - chunkAudio.setnchannels(nChannels) - chunkAudio.setsampwidth(sampleWidth) - chunkAudio.setframerate(2 * frameRate) - chunkAudio.writeframes(chunkData) - chunkAudio.close() + frameRate = origAudio[1] + chunkData = _split_audio(origAudio, start_time, stop_time) + chunkData = resampy.resample(chunkData, frameRate, 16000) + librosa.output.write_wav(new_wav_file, chunkData, 16000) def _split_sets(filelist): # We initially split the entire set into 80% train and 20% test, then diff --git a/requirements.txt b/requirements.txt index 8feeb860..22f03afd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,3 +15,5 @@ requests tables attrdict setuptools +librosa +resampy