Merge pull request #2981 from mozilla/pr2980-tests
PR #2980 + tests (Fixes #2979)
This commit is contained in:
commit
9a7ec1ae0d
@ -134,6 +134,11 @@ if (!args['stream']) {
|
||||
stream.feedAudioContent(chunk);
|
||||
});
|
||||
conversionStream.on('end', () => {
|
||||
console.log(stream.finishStream());
|
||||
if (args['extended']) {
|
||||
let metadata = stream.finishStreamWithMetadata();
|
||||
console.log(candidateTranscriptToString(metadata.transcripts[0]));
|
||||
} else {
|
||||
console.log(stream.finishStream());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
8
native_client/javascript/index.d.ts
vendored
8
native_client/javascript/index.d.ts
vendored
@ -103,7 +103,7 @@ stt(aBuffer: object): string;
|
||||
* @return :js:func:`Metadata` object containing multiple candidate transcripts. Each transcript has per-token metadata including timing information.
|
||||
* The user is responsible for freeing Metadata by calling :js:func:`FreeMetadata`. Returns undefined on error.
|
||||
*/
|
||||
sttWithMetadata(aBuffer: object, aNumResults: number): Metadata;
|
||||
sttWithMetadata(aBuffer: object, aNumResults?: number): Metadata;
|
||||
|
||||
/**
|
||||
* Create a new streaming inference state. One can then call :js:func:`Stream.feedAudioContent` and :js:func:`Stream.finishStream` on the returned stream object.
|
||||
@ -143,7 +143,7 @@ intermediateDecode(aSctx: Stream): string;
|
||||
*
|
||||
* @return :js:func:`Metadata` object containing multiple candidate transcripts. Each transcript has per-token metadata including timing information. The user is responsible for freeing Metadata by calling :js:func:`FreeMetadata`. Returns undefined on error.
|
||||
*/
|
||||
intermediateDecodeWithMetadata (aNumResults: number): Metadata;
|
||||
intermediateDecodeWithMetadata (aNumResults?: number): Metadata;
|
||||
|
||||
/**
|
||||
* Compute the final decoding of an ongoing streaming inference and return the result. Signals the end of an ongoing streaming inference.
|
||||
@ -163,7 +163,7 @@ finishStream(): string;
|
||||
*
|
||||
* This method will free the stream, it must not be used after this method is called.
|
||||
*/
|
||||
finishStreamWithMetadata(aNumResults: number): Metadata;
|
||||
finishStreamWithMetadata(aNumResults?: number): Metadata;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -177,7 +177,7 @@ export function FreeModel(model: Model): void;
|
||||
/**
|
||||
* Free memory allocated for metadata information.
|
||||
*
|
||||
* @param metadata Object containing metadata as returned by :js:func:`Model.sttWithMetadata` or :js:func:`Model.finishStreamWithMetadata`
|
||||
* @param metadata Object containing metadata as returned by :js:func:`Model.sttWithMetadata` or :js:func:`Stream.finishStreamWithMetadata`
|
||||
*/
|
||||
export function FreeMetadata(metadata: Metadata): void;
|
||||
|
||||
|
@ -211,7 +211,7 @@ Stream.prototype.finishStream = function() {
|
||||
*/
|
||||
Stream.prototype.finishStreamWithMetadata = function(aNumResults) {
|
||||
aNumResults = aNumResults || 1;
|
||||
result = binding.FinishStreamWithMetadata(this._impl, aNumResults);
|
||||
let result = binding.FinishStreamWithMetadata(this._impl, aNumResults);
|
||||
this._impl = null;
|
||||
return result;
|
||||
}
|
||||
@ -230,7 +230,7 @@ function FreeModel(model) {
|
||||
/**
|
||||
* Free memory allocated for metadata information.
|
||||
*
|
||||
* @param {object} metadata Object containing metadata as returned by :js:func:`Model.sttWithMetadata` or :js:func:`Model.finishStreamWithMetadata`
|
||||
* @param {object} metadata Object containing metadata as returned by :js:func:`Model.sttWithMetadata` or :js:func:`Stream.finishStreamWithMetadata`
|
||||
*/
|
||||
function FreeMetadata(metadata) {
|
||||
return binding.FreeMetadata(metadata);
|
||||
|
@ -519,6 +519,12 @@ run_js_streaming_inference_tests()
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm}" "$status"
|
||||
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_lm "${phrase_pbmodel_withlm}" "$status"
|
||||
}
|
||||
|
||||
run_js_streaming_prod_inference_tests()
|
||||
@ -529,4 +535,11 @@ run_js_streaming_prod_inference_tests()
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
|
||||
local _bitrate=$1
|
||||
set +e
|
||||
phrase_pbmodel_withlm=$(deepspeech --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} --scorer ${TASKCLUSTER_TMP_DIR}/kenlm.scorer --audio ${TASKCLUSTER_TMP_DIR}/${ldc93s1_sample_filename} --stream --extended 2>${TASKCLUSTER_TMP_DIR}/stderr | tail -n 1)
|
||||
status=$?
|
||||
set -e
|
||||
assert_correct_ldc93s1_prodmodel "${phrase_pbmodel_withlm}" "$status" "${_bitrate}"
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user