diff --git a/native_client/javascript/client.js b/native_client/javascript/client.js index 8203df12..b504650f 100644 --- a/native_client/javascript/client.js +++ b/native_client/javascript/client.js @@ -109,12 +109,10 @@ audioStream.on('finish', () => { console.error('Running inference.'); const audioLength = (audioBuffer.length / 2) * (1 / desired_sample_rate); - // We take half of the buffer_size because buffer is a char* while - // LocalDsSTT() expected a short* if (args['extended']) { - console.log(metadataToString(model.sttWithMetadata(audioBuffer.slice(0, audioBuffer.length / 2)))); + console.log(metadataToString(model.sttWithMetadata(audioBuffer))); } else { - console.log(model.stt(audioBuffer.slice(0, audioBuffer.length / 2))); + console.log(model.stt(audioBuffer)); } const inference_stop = process.hrtime(inference_start); console.error('Inference took %ds for %ds audio file.', totalTime(inference_stop), audioLength.toPrecision(4)); diff --git a/native_client/javascript/deepspeech.i b/native_client/javascript/deepspeech.i index 80b00a27..006f78cf 100644 --- a/native_client/javascript/deepspeech.i +++ b/native_client/javascript/deepspeech.i @@ -18,8 +18,12 @@ using namespace node; char* bufferData = Buffer::Data(bufferObj); size_t bufferLength = Buffer::Length(bufferObj); + if (bufferLength % 2 != 0) { + SWIG_exception_fail(SWIG_ERROR, "Buffer length must be even. Make sure your input audio is 16-bits per sample."); + } + $1 = ($1_ltype)bufferData; - $2 = ($2_ltype)bufferLength; + $2 = ($2_ltype)(bufferLength / 2); } // apply to DS_FeedAudioContent and DS_SpeechToText