Sync all the docs with sample rate changes
X-DeepSpeech: NOBUILD
This commit is contained in:
parent
9200b720c3
commit
2b68c56025
|
@ -103,7 +103,7 @@ void DS_DiscardStream(StreamingState* aSctx)
|
|||
*
|
||||
* @param aCtx The ModelState pointer for the model to use.
|
||||
* @param aBuffer A 16-bit, mono raw audio signal at the appropriate
|
||||
* sample rate.
|
||||
* sample rate (matching what the model was trained on).
|
||||
* @param aBufferSize The number of samples in the audio signal.
|
||||
* @param aSampleRate UNUSED, DEPRECATED.
|
||||
*
|
||||
|
@ -124,7 +124,7 @@ char* DS_SpeechToText(ModelState* aCtx,
|
|||
*
|
||||
* @param aCtx The ModelState pointer for the model to use.
|
||||
* @param aBuffer A 16-bit, mono raw audio signal at the appropriate
|
||||
* sample rate.
|
||||
* sample rate (matching what the model was trained on).
|
||||
* @param aBufferSize The number of samples in the audio signal.
|
||||
* @param aSampleRate UNUSED, DEPRECATED.
|
||||
*
|
||||
|
|
|
@ -148,7 +148,7 @@ namespace DeepSpeechClient
|
|||
/// <summary>
|
||||
/// Feeds audio samples to an ongoing streaming inference.
|
||||
/// </summary>
|
||||
/// <param name="aBuffer">An array of 16-bit, mono raw audio samples at the appropriate sample rate.</param>
|
||||
/// <param name="aBuffer">An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on).</param>
|
||||
public unsafe void FeedAudioContent(short[] aBuffer, uint aBufferSize)
|
||||
{
|
||||
NativeImp.DS_FeedAudioContent(_streamingStatePP, aBuffer, aBufferSize);
|
||||
|
@ -229,7 +229,7 @@ namespace DeepSpeechClient
|
|||
/// <summary>
|
||||
/// Use the DeepSpeech model to perform Speech-To-Text.
|
||||
/// </summary>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate.</param>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).</param>
|
||||
/// <param name="aBufferSize">The number of samples in the audio signal.</param>
|
||||
/// <returns>The STT result. The user is responsible for freeing the string. Returns NULL on error.</returns>
|
||||
public unsafe string SpeechToText(short[] aBuffer, uint aBufferSize)
|
||||
|
@ -240,7 +240,7 @@ namespace DeepSpeechClient
|
|||
/// <summary>
|
||||
/// Use the DeepSpeech model to perform Speech-To-Text.
|
||||
/// </summary>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate.</param>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).</param>
|
||||
/// <param name="aBufferSize">The number of samples in the audio signal.</param>
|
||||
/// <returns>The extended metadata. The user is responsible for freeing the struct. Returns NULL on error.</returns>
|
||||
public unsafe Models.Metadata SpeechToTextWithMetadata(short[] aBuffer, uint aBufferSize)
|
||||
|
|
|
@ -40,7 +40,7 @@ namespace DeepSpeechClient.Interfaces
|
|||
/// <summary>
|
||||
/// Use the DeepSpeech model to perform Speech-To-Text.
|
||||
/// </summary>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate.</param>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).</param>
|
||||
/// <param name="aBufferSize">The number of samples in the audio signal.</param>
|
||||
/// <returns>The STT result. The user is responsible for freeing the string. Returns NULL on error.</returns>
|
||||
unsafe string SpeechToText(short[] aBuffer,
|
||||
|
@ -49,7 +49,7 @@ namespace DeepSpeechClient.Interfaces
|
|||
/// <summary>
|
||||
/// Use the DeepSpeech model to perform Speech-To-Text.
|
||||
/// </summary>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate.</param>
|
||||
/// <param name="aBuffer">A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).</param>
|
||||
/// <param name="aBufferSize">The number of samples in the audio signal.</param>
|
||||
/// <returns>The extended metadata result. The user is responsible for freeing the struct. Returns NULL on error.</returns>
|
||||
unsafe Metadata SpeechToTextWithMetadata(short[] aBuffer,
|
||||
|
@ -81,7 +81,7 @@ namespace DeepSpeechClient.Interfaces
|
|||
/// <summary>
|
||||
/// Feeds audio samples to an ongoing streaming inference.
|
||||
/// </summary>
|
||||
/// <param name="aBuffer">An array of 16-bit, mono raw audio samples at the appropriate sample rate.</param>
|
||||
/// <param name="aBuffer">An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on).</param>
|
||||
unsafe void FeedAudioContent(short[] aBuffer, uint aBufferSize);
|
||||
|
||||
/// <summary>
|
||||
|
|
|
@ -57,7 +57,7 @@ public class DeepSpeechModel {
|
|||
* @brief Use the DeepSpeech model to perform Speech-To-Text.
|
||||
*
|
||||
* @param buffer A 16-bit, mono raw audio signal at the appropriate
|
||||
* sample rate.
|
||||
* sample rate (matching what the model was trained on).
|
||||
* @param buffer_size The number of samples in the audio signal.
|
||||
*
|
||||
* @return The STT result.
|
||||
|
@ -71,7 +71,7 @@ public class DeepSpeechModel {
|
|||
* about the results.
|
||||
*
|
||||
* @param buffer A 16-bit, mono raw audio signal at the appropriate
|
||||
* sample rate.
|
||||
* sample rate (matching what the model was trained on).
|
||||
* @param buffer_size The number of samples in the audio signal.
|
||||
*
|
||||
* @return Outputs a Metadata object of individual letters along with their timing information.
|
||||
|
@ -98,7 +98,7 @@ public class DeepSpeechModel {
|
|||
*
|
||||
* @param cctx A streaming state pointer returned by createStream().
|
||||
* @param buffer An array of 16-bit, mono raw audio samples at the
|
||||
* appropriate sample rate.
|
||||
* appropriate sample rate (matching what the model was trained on).
|
||||
* @param buffer_size The number of samples in @p buffer.
|
||||
*/
|
||||
public void feedAudioContent(DeepSpeechStreamingState ctx, short[] buffer, int buffer_size) {
|
||||
|
|
|
@ -64,7 +64,7 @@ Model.prototype.enableDecoderWithLM = function() {
|
|||
/**
|
||||
* Use the DeepSpeech model to perform Speech-To-Text.
|
||||
*
|
||||
* @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate.
|
||||
* @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
|
||||
* @param {number} aBufferSize The number of samples in the audio signal.
|
||||
*
|
||||
* @return {string} The STT result. Returns undefined on error.
|
||||
|
@ -78,7 +78,7 @@ Model.prototype.stt = function() {
|
|||
* Use the DeepSpeech model to perform Speech-To-Text and output metadata
|
||||
* about the results.
|
||||
*
|
||||
* @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate.
|
||||
* @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
|
||||
* @param {number} aBufferSize The number of samples in the audio signal.
|
||||
*
|
||||
* @return {object} Outputs a :js:func:`Metadata` struct of individual letters along with their timing information. The user is responsible for freeing Metadata by calling :js:func:`FreeMetadata`. Returns undefined on error.
|
||||
|
@ -111,7 +111,7 @@ Model.prototype.createStream = function() {
|
|||
*
|
||||
* @param {object} aSctx A streaming state returned by :js:func:`Model.setupStream`.
|
||||
* @param {buffer} aBuffer An array of 16-bit, mono raw audio samples at the
|
||||
* appropriate sample rate.
|
||||
* appropriate sample rate (matching what the model was trained on).
|
||||
* @param {number} aBufferSize The number of samples in @param aBuffer.
|
||||
*/
|
||||
Model.prototype.feedAudioContent = function() {
|
||||
|
|
|
@ -69,7 +69,7 @@ class Model(object):
|
|||
"""
|
||||
Use the DeepSpeech model to perform Speech-To-Text.
|
||||
|
||||
:param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate.
|
||||
:param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
|
||||
:type aBuffer: int array
|
||||
|
||||
:param aBufferSize: The number of samples in the audio signal.
|
||||
|
@ -84,7 +84,7 @@ class Model(object):
|
|||
"""
|
||||
Use the DeepSpeech model to perform Speech-To-Text and output metadata about the results.
|
||||
|
||||
:param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate.
|
||||
:param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
|
||||
:type aBuffer: int array
|
||||
|
||||
:param aBufferSize: The number of samples in the audio signal.
|
||||
|
@ -117,7 +117,7 @@ class Model(object):
|
|||
:param aSctx: A streaming state pointer returned by :func:`createStream()`.
|
||||
:type aSctx: object
|
||||
|
||||
:param aBuffer: An array of 16-bit, mono raw audio samples at the appropriate sample rate.
|
||||
:param aBuffer: An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on).
|
||||
:type aBuffer: int array
|
||||
|
||||
:param aBufferSize: The number of samples in @p aBuffer.
|
||||
|
|
Loading…
Reference in New Issue