diff --git a/native_client/deepspeech_compat.h b/native_client/deepspeech_compat.h
index 3709ae4f..57f3d16c 100644
--- a/native_client/deepspeech_compat.h
+++ b/native_client/deepspeech_compat.h
@@ -103,7 +103,7 @@ void DS_DiscardStream(StreamingState* aSctx)
*
* @param aCtx The ModelState pointer for the model to use.
* @param aBuffer A 16-bit, mono raw audio signal at the appropriate
- * sample rate.
+ * sample rate (matching what the model was trained on).
* @param aBufferSize The number of samples in the audio signal.
* @param aSampleRate UNUSED, DEPRECATED.
*
@@ -124,7 +124,7 @@ char* DS_SpeechToText(ModelState* aCtx,
*
* @param aCtx The ModelState pointer for the model to use.
* @param aBuffer A 16-bit, mono raw audio signal at the appropriate
- * sample rate.
+ * sample rate (matching what the model was trained on).
* @param aBufferSize The number of samples in the audio signal.
* @param aSampleRate UNUSED, DEPRECATED.
*
diff --git a/native_client/dotnet/DeepSpeechClient/DeepSpeech.cs b/native_client/dotnet/DeepSpeechClient/DeepSpeech.cs
index 84a7fa2b..25fcc109 100644
--- a/native_client/dotnet/DeepSpeechClient/DeepSpeech.cs
+++ b/native_client/dotnet/DeepSpeechClient/DeepSpeech.cs
@@ -148,7 +148,7 @@ namespace DeepSpeechClient
///
/// Feeds audio samples to an ongoing streaming inference.
///
- /// An array of 16-bit, mono raw audio samples at the appropriate sample rate.
+ /// An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on).
public unsafe void FeedAudioContent(short[] aBuffer, uint aBufferSize)
{
NativeImp.DS_FeedAudioContent(_streamingStatePP, aBuffer, aBufferSize);
@@ -229,7 +229,7 @@ namespace DeepSpeechClient
///
/// Use the DeepSpeech model to perform Speech-To-Text.
///
- /// A 16-bit, mono raw audio signal at the appropriate sample rate.
+ /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
/// The number of samples in the audio signal.
/// The STT result. The user is responsible for freeing the string. Returns NULL on error.
public unsafe string SpeechToText(short[] aBuffer, uint aBufferSize)
@@ -240,7 +240,7 @@ namespace DeepSpeechClient
///
/// Use the DeepSpeech model to perform Speech-To-Text.
///
- /// A 16-bit, mono raw audio signal at the appropriate sample rate.
+ /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
/// The number of samples in the audio signal.
/// The extended metadata. The user is responsible for freeing the struct. Returns NULL on error.
public unsafe Models.Metadata SpeechToTextWithMetadata(short[] aBuffer, uint aBufferSize)
diff --git a/native_client/dotnet/DeepSpeechClient/Interfaces/IDeepSpeech.cs b/native_client/dotnet/DeepSpeechClient/Interfaces/IDeepSpeech.cs
index da18a8ba..79af2964 100644
--- a/native_client/dotnet/DeepSpeechClient/Interfaces/IDeepSpeech.cs
+++ b/native_client/dotnet/DeepSpeechClient/Interfaces/IDeepSpeech.cs
@@ -40,7 +40,7 @@ namespace DeepSpeechClient.Interfaces
///
/// Use the DeepSpeech model to perform Speech-To-Text.
///
- /// A 16-bit, mono raw audio signal at the appropriate sample rate.
+ /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
/// The number of samples in the audio signal.
/// The STT result. The user is responsible for freeing the string. Returns NULL on error.
unsafe string SpeechToText(short[] aBuffer,
@@ -49,7 +49,7 @@ namespace DeepSpeechClient.Interfaces
///
/// Use the DeepSpeech model to perform Speech-To-Text.
///
- /// A 16-bit, mono raw audio signal at the appropriate sample rate.
+ /// A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
/// The number of samples in the audio signal.
/// The extended metadata result. The user is responsible for freeing the struct. Returns NULL on error.
unsafe Metadata SpeechToTextWithMetadata(short[] aBuffer,
@@ -81,7 +81,7 @@ namespace DeepSpeechClient.Interfaces
///
/// Feeds audio samples to an ongoing streaming inference.
///
- /// An array of 16-bit, mono raw audio samples at the appropriate sample rate.
+ /// An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on).
unsafe void FeedAudioContent(short[] aBuffer, uint aBufferSize);
///
diff --git a/native_client/java/libdeepspeech/src/main/java/org/mozilla/deepspeech/libdeepspeech/DeepSpeechModel.java b/native_client/java/libdeepspeech/src/main/java/org/mozilla/deepspeech/libdeepspeech/DeepSpeechModel.java
index df0c047b..3a665c5e 100644
--- a/native_client/java/libdeepspeech/src/main/java/org/mozilla/deepspeech/libdeepspeech/DeepSpeechModel.java
+++ b/native_client/java/libdeepspeech/src/main/java/org/mozilla/deepspeech/libdeepspeech/DeepSpeechModel.java
@@ -57,7 +57,7 @@ public class DeepSpeechModel {
* @brief Use the DeepSpeech model to perform Speech-To-Text.
*
* @param buffer A 16-bit, mono raw audio signal at the appropriate
- * sample rate.
+ * sample rate (matching what the model was trained on).
* @param buffer_size The number of samples in the audio signal.
*
* @return The STT result.
@@ -71,7 +71,7 @@ public class DeepSpeechModel {
* about the results.
*
* @param buffer A 16-bit, mono raw audio signal at the appropriate
- * sample rate.
+ * sample rate (matching what the model was trained on).
* @param buffer_size The number of samples in the audio signal.
*
* @return Outputs a Metadata object of individual letters along with their timing information.
@@ -98,7 +98,7 @@ public class DeepSpeechModel {
*
* @param cctx A streaming state pointer returned by createStream().
* @param buffer An array of 16-bit, mono raw audio samples at the
- * appropriate sample rate.
+ * appropriate sample rate (matching what the model was trained on).
* @param buffer_size The number of samples in @p buffer.
*/
public void feedAudioContent(DeepSpeechStreamingState ctx, short[] buffer, int buffer_size) {
diff --git a/native_client/javascript/index.js b/native_client/javascript/index.js
index ab7f2e2d..f6446f4d 100644
--- a/native_client/javascript/index.js
+++ b/native_client/javascript/index.js
@@ -64,7 +64,7 @@ Model.prototype.enableDecoderWithLM = function() {
/**
* Use the DeepSpeech model to perform Speech-To-Text.
*
- * @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate.
+ * @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
* @param {number} aBufferSize The number of samples in the audio signal.
*
* @return {string} The STT result. Returns undefined on error.
@@ -78,7 +78,7 @@ Model.prototype.stt = function() {
* Use the DeepSpeech model to perform Speech-To-Text and output metadata
* about the results.
*
- * @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate.
+ * @param {object} aBuffer A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
* @param {number} aBufferSize The number of samples in the audio signal.
*
* @return {object} Outputs a :js:func:`Metadata` struct of individual letters along with their timing information. The user is responsible for freeing Metadata by calling :js:func:`FreeMetadata`. Returns undefined on error.
@@ -111,7 +111,7 @@ Model.prototype.createStream = function() {
*
* @param {object} aSctx A streaming state returned by :js:func:`Model.setupStream`.
* @param {buffer} aBuffer An array of 16-bit, mono raw audio samples at the
- * appropriate sample rate.
+ * appropriate sample rate (matching what the model was trained on).
* @param {number} aBufferSize The number of samples in @param aBuffer.
*/
Model.prototype.feedAudioContent = function() {
diff --git a/native_client/python/__init__.py b/native_client/python/__init__.py
index 14618ded..62ea1eb5 100644
--- a/native_client/python/__init__.py
+++ b/native_client/python/__init__.py
@@ -69,7 +69,7 @@ class Model(object):
"""
Use the DeepSpeech model to perform Speech-To-Text.
- :param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate.
+ :param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
:type aBuffer: int array
:param aBufferSize: The number of samples in the audio signal.
@@ -84,7 +84,7 @@ class Model(object):
"""
Use the DeepSpeech model to perform Speech-To-Text and output metadata about the results.
- :param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate.
+ :param aBuffer: A 16-bit, mono raw audio signal at the appropriate sample rate (matching what the model was trained on).
:type aBuffer: int array
:param aBufferSize: The number of samples in the audio signal.
@@ -117,7 +117,7 @@ class Model(object):
:param aSctx: A streaming state pointer returned by :func:`createStream()`.
:type aSctx: object
- :param aBuffer: An array of 16-bit, mono raw audio samples at the appropriate sample rate.
+ :param aBuffer: An array of 16-bit, mono raw audio samples at the appropriate sample rate (matching what the model was trained on).
:type aBuffer: int array
:param aBufferSize: The number of samples in @p aBuffer.