Adjust Buffer length to account for element size inside the JS binding

This commit is contained in:
Reuben Morais 2020-01-27 18:20:57 +01:00
parent 502436f8f3
commit 8d42c2bdd9
2 changed files with 7 additions and 5 deletions

View File

@ -109,12 +109,10 @@ audioStream.on('finish', () => {
console.error('Running inference.');
const audioLength = (audioBuffer.length / 2) * (1 / desired_sample_rate);
// We take half of the buffer_size because buffer is a char* while
// LocalDsSTT() expected a short*
if (args['extended']) {
console.log(metadataToString(model.sttWithMetadata(audioBuffer.slice(0, audioBuffer.length / 2))));
console.log(metadataToString(model.sttWithMetadata(audioBuffer)));
} else {
console.log(model.stt(audioBuffer.slice(0, audioBuffer.length / 2)));
console.log(model.stt(audioBuffer));
}
const inference_stop = process.hrtime(inference_start);
console.error('Inference took %ds for %ds audio file.', totalTime(inference_stop), audioLength.toPrecision(4));

View File

@ -18,8 +18,12 @@ using namespace node;
char* bufferData = Buffer::Data(bufferObj);
size_t bufferLength = Buffer::Length(bufferObj);
if (bufferLength % 2 != 0) {
SWIG_exception_fail(SWIG_ERROR, "Buffer length must be even. Make sure your input audio is 16-bits per sample.");
}
$1 = ($1_ltype)bufferData;
$2 = ($2_ltype)bufferLength;
$2 = ($2_ltype)(bufferLength / 2);
}
// apply to DS_FeedAudioContent and DS_SpeechToText