Remove sample rate parameter usage from examples

This commit is contained in:
Reuben Morais 2019-10-10 14:20:44 +02:00
parent 11ad23cc1f
commit baaa5842b2
4 changed files with 7 additions and 7 deletions

View File

@ -95,7 +95,7 @@ const ffmpeg = spawn('ffmpeg', [
]); ]);
let audioLength = 0; let audioLength = 0;
let sctx = model.createStream(AUDIO_SAMPLE_RATE); let sctx = model.createStream();
function finishStream() { function finishStream() {
const model_load_start = process.hrtime(); const model_load_start = process.hrtime();
@ -108,7 +108,7 @@ function finishStream() {
function intermediateDecode() { function intermediateDecode() {
finishStream(); finishStream();
sctx = model.createStream(AUDIO_SAMPLE_RATE); sctx = model.createStream();
} }
function feedAudioContent(chunk) { function feedAudioContent(chunk) {

View File

@ -130,7 +130,7 @@ namespace DeepSpeechWPF
watch.Start(); watch.Start();
await Task.Run(() => await Task.Run(() =>
{ {
string speechResult = _sttClient.SpeechToText(waveBuffer.ShortBuffer, Convert.ToUInt32(waveBuffer.MaxSize / 2), 16000); string speechResult = _sttClient.SpeechToText(waveBuffer.ShortBuffer, Convert.ToUInt32(waveBuffer.MaxSize / 2));
watch.Stop(); watch.Stop();
Dispatcher.Invoke(() => Dispatcher.Invoke(() =>
{ {
@ -250,7 +250,7 @@ namespace DeepSpeechWPF
private void BtnStartRecording_Click(object sender, RoutedEventArgs e) private void BtnStartRecording_Click(object sender, RoutedEventArgs e)
{ {
_sttClient.CreateStream(16000); _sttClient.CreateStream();
_audioCapture.Start(); _audioCapture.Start();
btnStartRecording.IsEnabled = false; btnStartRecording.IsEnabled = false;
btnStopRecording.IsEnabled = true; btnStopRecording.IsEnabled = true;

View File

@ -64,7 +64,7 @@ audioStream.on('finish', () => {
const audioLength = (audioBuffer.length / 2) * ( 1 / 16000); const audioLength = (audioBuffer.length / 2) * ( 1 / 16000);
console.log('audio length', audioLength); console.log('audio length', audioLength);
let result = model.stt(audioBuffer.slice(0, audioBuffer.length / 2), 16000); let result = model.stt(audioBuffer.slice(0, audioBuffer.length / 2));
console.log('result:', result); console.log('result:', result);
}); });

View File

@ -44,12 +44,12 @@ Returns a list [Inference, Inference Time, Audio Length]
''' '''
def stt(ds, audio, fs): def stt(ds, audio, fs):
inference_time = 0.0 inference_time = 0.0
audio_length = len(audio) * (1 / 16000) audio_length = len(audio) * (1 / fs)
# Run Deepspeech # Run Deepspeech
logging.debug('Running inference...') logging.debug('Running inference...')
inference_start = timer() inference_start = timer()
output = ds.stt(audio, fs) output = ds.stt(audio)
inference_end = timer() - inference_start inference_end = timer() - inference_start
inference_time += inference_end inference_time += inference_end
logging.debug('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length)) logging.debug('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length))