Fix #2888: Use start-after / end-before for API example line references

This commit is contained in:
Alexandre Lissy 2020-04-07 13:30:25 +02:00
parent b5a805056f
commit 5e3c5e9131
8 changed files with 48 additions and 8 deletions

View File

@ -1,13 +1,17 @@
C API Usage example C API Usage example
=================== ===================
Examples are from `native_client/client.cc`.
Creating a model instance and loading model Creating a model instance and loading model
------------------------------------------- -------------------------------------------
.. literalinclude:: ../native_client/client.cc .. literalinclude:: ../native_client/client.cc
:language: c :language: c
:linenos: :linenos:
:lines: 370-375,386-390 :lineno-match:
:start-after: sphinx-doc: c_ref_model_start
:end-before: sphinx-doc: c_ref_model_stop
Performing inference Performing inference
-------------------- --------------------
@ -15,7 +19,9 @@ Performing inference
.. literalinclude:: ../native_client/client.cc .. literalinclude:: ../native_client/client.cc
:language: c :language: c
:linenos: :linenos:
:lines: 59-94 :lineno-match:
:start-after: sphinx-doc: c_ref_inference_start
:end-before: sphinx-doc: c_ref_inference_stop
Full source code Full source code
---------------- ----------------

View File

@ -1,13 +1,17 @@
Java API Usage example Java API Usage example
====================== ======================
Examples are from `native_client/java/app/src/main/java/org/mozilla/deepspeech/DeepSpeechActivity.java`.
Creating a model instance and loading model Creating a model instance and loading model
------------------------------------------- -------------------------------------------
.. literalinclude:: ../native_client/java/app/src/main/java/org/mozilla/deepspeech/DeepSpeechActivity.java .. literalinclude:: ../native_client/java/app/src/main/java/org/mozilla/deepspeech/DeepSpeechActivity.java
:language: java :language: java
:linenos: :linenos:
:lines: 52 :lineno-match:
:start-after: sphinx-doc: java_ref_model_start
:end-before: sphinx-doc: java_ref_model_stop
Performing inference Performing inference
-------------------- --------------------
@ -15,7 +19,9 @@ Performing inference
.. literalinclude:: ../native_client/java/app/src/main/java/org/mozilla/deepspeech/DeepSpeechActivity.java .. literalinclude:: ../native_client/java/app/src/main/java/org/mozilla/deepspeech/DeepSpeechActivity.java
:language: java :language: java
:linenos: :linenos:
:lines: 101 :lineno-match:
:start-after: sphinx-doc: java_ref_inference_start
:end-before: sphinx-doc: java_ref_inference_stop
Full source code Full source code
---------------- ----------------

View File

@ -1,13 +1,17 @@
JavaScript API Usage example JavaScript API Usage example
============================= =============================
Examples are from `native_client/javascript/client.ts`.
Creating a model instance and loading model Creating a model instance and loading model
------------------------------------------- -------------------------------------------
.. literalinclude:: ../native_client/javascript/client.ts .. literalinclude:: ../native_client/javascript/client.ts
:language: javascript :language: javascript
:linenos: :linenos:
:lines: 49,54 :lineno-match:
:start-after: sphinx-doc: js_ref_model_start
:end-before: sphinx-doc: js_ref_model_stop
Performing inference Performing inference
-------------------- --------------------
@ -15,7 +19,9 @@ Performing inference
.. literalinclude:: ../native_client/javascript/client.ts .. literalinclude:: ../native_client/javascript/client.ts
:language: javascript :language: javascript
:linenos: :linenos:
:lines: 114,118 :lineno-match:
:start-after: sphinx-doc: js_ref_inference_start
:end-before: sphinx-doc: js_ref_inference_stop
Full source code Full source code
---------------- ----------------

View File

@ -1,13 +1,17 @@
Python API Usage example Python API Usage example
======================== ========================
Examples are from `native_client/python/client.cc`.
Creating a model instance and loading model Creating a model instance and loading model
------------------------------------------- -------------------------------------------
.. literalinclude:: ../native_client/python/client.py .. literalinclude:: ../native_client/python/client.py
:language: python :language: python
:linenos: :linenos:
:lines: 111,123 :lineno-match:
:start-after: sphinx-doc: python_ref_model_start
:end-before: sphinx-doc: python_ref_model_stop
Performing inference Performing inference
-------------------- --------------------
@ -15,7 +19,9 @@ Performing inference
.. literalinclude:: ../native_client/python/client.py .. literalinclude:: ../native_client/python/client.py
:language: python :language: python
:linenos: :linenos:
:lines: 143-148 :lineno-match:
:start-after: sphinx-doc: python_ref_inference_start
:end-before: sphinx-doc: python_ref_inference_stop
Full source code Full source code
---------------- ----------------

View File

@ -162,6 +162,7 @@ LocalDsSTT(ModelState* aCtx, const short* aBuffer, size_t aBufferSize,
clock_t ds_start_time = clock(); clock_t ds_start_time = clock();
// sphinx-doc: c_ref_inference_start
if (extended_output) { if (extended_output) {
Metadata *result = DS_SpeechToTextWithMetadata(aCtx, aBuffer, aBufferSize, 1); Metadata *result = DS_SpeechToTextWithMetadata(aCtx, aBuffer, aBufferSize, 1);
res.string = CandidateTranscriptToString(&result->transcripts[0]); res.string = CandidateTranscriptToString(&result->transcripts[0]);
@ -198,6 +199,7 @@ LocalDsSTT(ModelState* aCtx, const short* aBuffer, size_t aBufferSize,
} else { } else {
res.string = DS_SpeechToText(aCtx, aBuffer, aBufferSize); res.string = DS_SpeechToText(aCtx, aBuffer, aBufferSize);
} }
// sphinx-doc: c_ref_inference_stop
clock_t ds_end_infer = clock(); clock_t ds_end_infer = clock();
@ -393,6 +395,7 @@ main(int argc, char **argv)
// Initialise DeepSpeech // Initialise DeepSpeech
ModelState* ctx; ModelState* ctx;
// sphinx-doc: c_ref_model_start
int status = DS_CreateModel(model, &ctx); int status = DS_CreateModel(model, &ctx);
if (status != 0) { if (status != 0) {
fprintf(stderr, "Could not create model.\n"); fprintf(stderr, "Could not create model.\n");
@ -421,6 +424,7 @@ main(int argc, char **argv)
} }
} }
} }
// sphinx-doc: c_ref_model_stop
#ifndef NO_SOX #ifndef NO_SOX
// Initialise SOX // Initialise SOX

View File

@ -49,8 +49,10 @@ public class DeepSpeechActivity extends AppCompatActivity {
private void newModel(String tfliteModel) { private void newModel(String tfliteModel) {
this._tfliteStatus.setText("Creating model"); this._tfliteStatus.setText("Creating model");
if (this._m == null) { if (this._m == null) {
// sphinx-doc: java_ref_model_start
this._m = new DeepSpeechModel(tfliteModel); this._m = new DeepSpeechModel(tfliteModel);
this._m.setBeamWidth(BEAM_WIDTH); this._m.setBeamWidth(BEAM_WIDTH);
// sphinx-doc: java_ref_model_stop
} }
} }
@ -98,7 +100,9 @@ public class DeepSpeechActivity extends AppCompatActivity {
long inferenceStartTime = System.currentTimeMillis(); long inferenceStartTime = System.currentTimeMillis();
// sphinx-doc: java_ref_inference_start
String decoded = this._m.stt(shorts, shorts.length); String decoded = this._m.stt(shorts, shorts.length);
// sphinx-doc: java_ref_inference_stop
inferenceExecTime = System.currentTimeMillis() - inferenceStartTime; inferenceExecTime = System.currentTimeMillis() - inferenceStartTime;

View File

@ -44,6 +44,7 @@ function candidateTranscriptToString(transcript: Ds.CandidateTranscript): string
return retval; return retval;
} }
// sphinx-doc: js_ref_model_start
console.error('Loading model from file %s', args['model']); console.error('Loading model from file %s', args['model']);
const model_load_start = process.hrtime(); const model_load_start = process.hrtime();
let model = new Ds.Model(args['model']); let model = new Ds.Model(args['model']);
@ -53,6 +54,7 @@ console.error('Loaded model in %ds.', totalTime(model_load_end));
if (args['beam_width']) { if (args['beam_width']) {
model.setBeamWidth(args['beam_width']); model.setBeamWidth(args['beam_width']);
} }
// sphinx-doc: js_ref_model_stop
let desired_sample_rate = model.sampleRate(); let desired_sample_rate = model.sampleRate();
@ -110,6 +112,7 @@ audioStream.on('finish', () => {
console.error('Running inference.'); console.error('Running inference.');
const audioLength = (audioBuffer.length / 2) * (1 / desired_sample_rate); const audioLength = (audioBuffer.length / 2) * (1 / desired_sample_rate);
// sphinx-doc: js_ref_inference_start
if (args['extended']) { if (args['extended']) {
let metadata = model.sttWithMetadata(audioBuffer, 1); let metadata = model.sttWithMetadata(audioBuffer, 1);
console.log(candidateTranscriptToString(metadata.transcripts[0])); console.log(candidateTranscriptToString(metadata.transcripts[0]));
@ -117,6 +120,7 @@ audioStream.on('finish', () => {
} else { } else {
console.log(model.stt(audioBuffer)); console.log(model.stt(audioBuffer));
} }
// sphinx-doc: js_ref_inference_stop
const inference_stop = process.hrtime(inference_start); const inference_stop = process.hrtime(inference_start);
console.error('Inference took %ds for %ds audio file.', totalTime(inference_stop), audioLength.toPrecision(4)); console.error('Inference took %ds for %ds audio file.', totalTime(inference_stop), audioLength.toPrecision(4));
Ds.FreeModel(model); Ds.FreeModel(model);

View File

@ -111,7 +111,9 @@ def main():
print('Loading model from file {}'.format(args.model), file=sys.stderr) print('Loading model from file {}'.format(args.model), file=sys.stderr)
model_load_start = timer() model_load_start = timer()
# sphinx-doc: python_ref_model_start
ds = Model(args.model) ds = Model(args.model)
# sphinx-doc: python_ref_model_stop
model_load_end = timer() - model_load_start model_load_end = timer() - model_load_start
print('Loaded model in {:.3}s.'.format(model_load_end), file=sys.stderr) print('Loaded model in {:.3}s.'.format(model_load_end), file=sys.stderr)
@ -143,12 +145,14 @@ def main():
print('Running inference.', file=sys.stderr) print('Running inference.', file=sys.stderr)
inference_start = timer() inference_start = timer()
# sphinx-doc: python_ref_inference_start
if args.extended: if args.extended:
print(metadata_to_string(ds.sttWithMetadata(audio, 1).transcripts[0])) print(metadata_to_string(ds.sttWithMetadata(audio, 1).transcripts[0]))
elif args.json: elif args.json:
print(metadata_json_output(ds.sttWithMetadata(audio, 3))) print(metadata_json_output(ds.sttWithMetadata(audio, 3)))
else: else:
print(ds.stt(audio)) print(ds.stt(audio))
# sphinx-doc: python_ref_inference_stop
inference_end = timer() - inference_start inference_end = timer() - inference_start
print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length), file=sys.stderr) print('Inference took %0.3fs for %0.3fs audio file.' % (inference_end, audio_length), file=sys.stderr)