diff --git a/examples/ffmpeg_vad_streaming/index.js b/examples/ffmpeg_vad_streaming/index.js index 8aef749b..d64cc9f8 100644 --- a/examples/ffmpeg_vad_streaming/index.js +++ b/examples/ffmpeg_vad_streaming/index.js @@ -95,7 +95,7 @@ const ffmpeg = spawn('ffmpeg', [ ]); let audioLength = 0; -let sctx = model.createStream(AUDIO_SAMPLE_RATE); +let sctx = model.createStream(); function finishStream() { const model_load_start = process.hrtime(); @@ -108,7 +108,7 @@ function finishStream() { function intermediateDecode() { finishStream(); - sctx = model.createStream(AUDIO_SAMPLE_RATE); + sctx = model.createStream(); } function feedAudioContent(chunk) { diff --git a/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs b/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs index 8b38316c..e332da6d 100644 --- a/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs +++ b/examples/net_framework/DeepSpeechWPF/MainWindow.xaml.cs @@ -130,7 +130,7 @@ namespace DeepSpeechWPF watch.Start(); await Task.Run(() => { - string speechResult = _sttClient.SpeechToText(waveBuffer.ShortBuffer, Convert.ToUInt32(waveBuffer.MaxSize / 2), 16000); + string speechResult = _sttClient.SpeechToText(waveBuffer.ShortBuffer, Convert.ToUInt32(waveBuffer.MaxSize / 2)); watch.Stop(); Dispatcher.Invoke(() => { @@ -250,7 +250,7 @@ namespace DeepSpeechWPF private void BtnStartRecording_Click(object sender, RoutedEventArgs e) { - _sttClient.CreateStream(16000); + _sttClient.CreateStream(); _audioCapture.Start(); btnStartRecording.IsEnabled = false; btnStopRecording.IsEnabled = true; diff --git a/examples/nodejs_wav/index.js b/examples/nodejs_wav/index.js index a5432217..20ccb2ab 100644 --- a/examples/nodejs_wav/index.js +++ b/examples/nodejs_wav/index.js @@ -64,7 +64,7 @@ audioStream.on('finish', () => { const audioLength = (audioBuffer.length / 2) * ( 1 / 16000); console.log('audio length', audioLength); - let result = model.stt(audioBuffer.slice(0, audioBuffer.length / 2), 16000); + let result = model.stt(audioBuffer.slice(0, audioBuffer.length / 2)); console.log('result:', result); }); diff --git a/examples/vad_transcriber/wavTranscriber.py b/examples/vad_transcriber/wavTranscriber.py index 9f21f362..727dc5cf 100644 --- a/examples/vad_transcriber/wavTranscriber.py +++ b/examples/vad_transcriber/wavTranscriber.py @@ -44,12 +44,12 @@ Returns a list [Inference, Inference Time, Audio Length] ''' def stt(ds, audio, fs): inference_time = 0.0 - audio_length = len(audio) * (1 / 16000) + audio_length = len(audio) * (1 / fs) # Run Deepspeech logging.debug('Running inference...') inference_start = timer() - output = ds.stt(audio, fs) + output = ds.stt(audio) inference_end = timer() - inference_start inference_time += inference_end logging.debug('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length))