From baaa5842b28aa14829739ece4f40f7c1b8e05635 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Thu, 10 Oct 2019 14:20:44 +0200 Subject: [PATCH] Remove sample rate parameter usage from examples --- examples/ffmpeg_vad_streaming/index.js | 4 ++-- examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs | 4 ++-- examples/nodejs_wav/index.js | 2 +- examples/vad_transcriber/wavTranscriber.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/ffmpeg_vad_streaming/index.js b/examples/ffmpeg_vad_streaming/index.js index 8aef749b..d64cc9f8 100644 --- a/examples/ffmpeg_vad_streaming/index.js +++ b/examples/ffmpeg_vad_streaming/index.js @@ -95,7 +95,7 @@ const ffmpeg = spawn('ffmpeg', [ ]); let audioLength = 0; -let sctx = model.createStream(AUDIO_SAMPLE_RATE); +let sctx = model.createStream(); function finishStream() { const model_load_start = process.hrtime(); @@ -108,7 +108,7 @@ function finishStream() { function intermediateDecode() { finishStream(); - sctx = model.createStream(AUDIO_SAMPLE_RATE); + sctx = model.createStream(); } function feedAudioContent(chunk) { diff --git a/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs b/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs index 8b38316c..e332da6d 100644 --- a/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs +++ b/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs @@ -130,7 +130,7 @@ namespace DeepSpeechWPF watch.Start(); await Task.Run(() => { - string speechResult = _sttClient.SpeechToText(waveBuffer.ShortBuffer, Convert.ToUInt32(waveBuffer.MaxSize / 2), 16000); + string speechResult = _sttClient.SpeechToText(waveBuffer.ShortBuffer, Convert.ToUInt32(waveBuffer.MaxSize / 2)); watch.Stop(); Dispatcher.Invoke(() => { @@ -250,7 +250,7 @@ namespace DeepSpeechWPF private void BtnStartRecording_Click(object sender, RoutedEventArgs e) { - _sttClient.CreateStream(16000); + _sttClient.CreateStream(); _audioCapture.Start(); btnStartRecording.IsEnabled = false; btnStopRecording.IsEnabled = true; diff --git a/examples/nodejs_wav/index.js b/examples/nodejs_wav/index.js index a5432217..20ccb2ab 100644 --- a/examples/nodejs_wav/index.js +++ b/examples/nodejs_wav/index.js @@ -64,7 +64,7 @@ audioStream.on('finish', () => { const audioLength = (audioBuffer.length / 2) * ( 1 / 16000); console.log('audio length', audioLength); - let result = model.stt(audioBuffer.slice(0, audioBuffer.length / 2), 16000); + let result = model.stt(audioBuffer.slice(0, audioBuffer.length / 2)); console.log('result:', result); }); diff --git a/examples/vad_transcriber/wavTranscriber.py b/examples/vad_transcriber/wavTranscriber.py index 9f21f362..727dc5cf 100644 --- a/examples/vad_transcriber/wavTranscriber.py +++ b/examples/vad_transcriber/wavTranscriber.py @@ -44,12 +44,12 @@ Returns a list [Inference, Inference Time, Audio Length] ''' def stt(ds, audio, fs): inference_time = 0.0 - audio_length = len(audio) * (1 / 16000) + audio_length = len(audio) * (1 / fs) # Run Deepspeech logging.debug('Running inference...') inference_start = timer() - output = ds.stt(audio, fs) + output = ds.stt(audio) inference_end = timer() - inference_start inference_time += inference_end logging.debug('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length))