Adapt Java bindings to const structs

This commit is contained in:
Reuben Morais 2020-03-18 19:49:14 +01:00
parent 1547498e82
commit ee30a1c9de
7 changed files with 122 additions and 106 deletions

View File

@ -6,6 +6,8 @@
%} %}
%include "typemaps.i" %include "typemaps.i"
%include "enums.swg"
%javaconst(1);
%include "arrays_java.i" %include "arrays_java.i"
// apply to DS_FeedAudioContent and DS_SpeechToText // apply to DS_FeedAudioContent and DS_SpeechToText
@ -15,12 +17,6 @@
%pointer_functions(ModelState*, modelstatep); %pointer_functions(ModelState*, modelstatep);
%pointer_functions(StreamingState*, streamingstatep); %pointer_functions(StreamingState*, streamingstatep);
%typemap(newfree) char* "DS_FreeString($1);";
%include "carrays.i"
%array_functions(struct TokenMetadata, TokenMetadata_array);
%array_functions(struct CandidateTranscript, CandidateTranscript_array);
%extend struct CandidateTranscript { %extend struct CandidateTranscript {
/** /**
* Retrieve one TokenMetadata element * Retrieve one TokenMetadata element
@ -29,8 +25,8 @@
* *
* @return The TokenMetadata requested or null * @return The TokenMetadata requested or null
*/ */
TokenMetadata getToken(int i) { const TokenMetadata& getToken(int i) {
return TokenMetadata_array_getitem(self->tokens, i); return self->tokens[i];
} }
} }
@ -42,8 +38,8 @@
* *
* @return The CandidateTranscript requested or null * @return The CandidateTranscript requested or null
*/ */
CandidateTranscript getTranscript(int i) { const CandidateTranscript& getTranscript(int i) {
return CandidateTranscript_array_getitem(self->transcripts, i); return self->transcripts[i];
} }
~Metadata() { ~Metadata() {
@ -58,9 +54,11 @@
%nodefaultctor TokenMetadata; %nodefaultctor TokenMetadata;
%nodefaultdtor TokenMetadata; %nodefaultdtor TokenMetadata;
%typemap(newfree) char* "DS_FreeString($1);";
%newobject DS_SpeechToText; %newobject DS_SpeechToText;
%newobject DS_IntermediateDecode; %newobject DS_IntermediateDecode;
%newobject DS_FinishStream; %newobject DS_FinishStream;
%newobject DS_ErrorCodeToErrorMessage;
%rename ("%(strip:[DS_])s") ""; %rename ("%(strip:[DS_])s") "";

View File

@ -11,8 +11,15 @@ public class DeepSpeechModel {
} }
// FIXME: We should have something better than those SWIGTYPE_* // FIXME: We should have something better than those SWIGTYPE_*
SWIGTYPE_p_p_ModelState _mspp; private SWIGTYPE_p_p_ModelState _mspp;
SWIGTYPE_p_ModelState _msp; private SWIGTYPE_p_ModelState _msp;
private void evaluateErrorCode(int errorCode) {
DeepSpeech_Error_Codes code = DeepSpeech_Error_Codes.swigToEnum(errorCode);
if (code != DeepSpeech_Error_Codes.ERR_OK) {
throw new RuntimeException("Error: " + impl.ErrorCodeToErrorMessage(errorCode) + " (0x" + Integer.toHexString(errorCode) + ").");
}
}
/** /**
* @brief An object providing an interface to a trained DeepSpeech model. * @brief An object providing an interface to a trained DeepSpeech model.
@ -20,10 +27,12 @@ public class DeepSpeechModel {
* @constructor * @constructor
* *
* @param modelPath The path to the frozen model graph. * @param modelPath The path to the frozen model graph.
*
* @throws RuntimeException on failure.
*/ */
public DeepSpeechModel(String modelPath) { public DeepSpeechModel(String modelPath) {
this._mspp = impl.new_modelstatep(); this._mspp = impl.new_modelstatep();
impl.CreateModel(modelPath, this._mspp); evaluateErrorCode(impl.CreateModel(modelPath, this._mspp));
this._msp = impl.modelstatep_value(this._mspp); this._msp = impl.modelstatep_value(this._mspp);
} }
@ -43,10 +52,10 @@ public class DeepSpeechModel {
* @param aBeamWidth The beam width used by the model. A larger beam width value * @param aBeamWidth The beam width used by the model. A larger beam width value
* generates better results at the cost of decoding time. * generates better results at the cost of decoding time.
* *
* @return Zero on success, non-zero on failure. * @throws RuntimeException on failure.
*/ */
public int setBeamWidth(long beamWidth) { public void setBeamWidth(long beamWidth) {
return impl.SetModelBeamWidth(this._msp, beamWidth); evaluateErrorCode(impl.SetModelBeamWidth(this._msp, beamWidth));
} }
/** /**
@ -70,19 +79,19 @@ public class DeepSpeechModel {
* *
* @param scorer The path to the external scorer file. * @param scorer The path to the external scorer file.
* *
* @return Zero on success, non-zero on failure (invalid arguments). * @throws RuntimeException on failure.
*/ */
public void enableExternalScorer(String scorer) { public void enableExternalScorer(String scorer) {
impl.EnableExternalScorer(this._msp, scorer); evaluateErrorCode(impl.EnableExternalScorer(this._msp, scorer));
} }
/** /**
* @brief Disable decoding using an external scorer. * @brief Disable decoding using an external scorer.
* *
* @return Zero on success, non-zero on failure (invalid arguments). * @throws RuntimeException on failure.
*/ */
public void disableExternalScorer() { public void disableExternalScorer() {
impl.DisableExternalScorer(this._msp); evaluateErrorCode(impl.DisableExternalScorer(this._msp));
} }
/** /**
@ -91,10 +100,10 @@ public class DeepSpeechModel {
* @param alpha The alpha hyperparameter of the decoder. Language model weight. * @param alpha The alpha hyperparameter of the decoder. Language model weight.
* @param beta The beta hyperparameter of the decoder. Word insertion weight. * @param beta The beta hyperparameter of the decoder. Word insertion weight.
* *
* @return Zero on success, non-zero on failure (invalid arguments). * @throws RuntimeException on failure.
*/ */
public void setScorerAlphaBeta(float alpha, float beta) { public void setScorerAlphaBeta(float alpha, float beta) {
impl.SetScorerAlphaBeta(this._msp, alpha, beta); evaluateErrorCode(impl.SetScorerAlphaBeta(this._msp, alpha, beta));
} }
/* /*
@ -132,10 +141,12 @@ public class DeepSpeechModel {
* and finishStream(). * and finishStream().
* *
* @return An opaque object that represents the streaming state. * @return An opaque object that represents the streaming state.
*
* @throws RuntimeException on failure.
*/ */
public DeepSpeechStreamingState createStream() { public DeepSpeechStreamingState createStream() {
SWIGTYPE_p_p_StreamingState ssp = impl.new_streamingstatep(); SWIGTYPE_p_p_StreamingState ssp = impl.new_streamingstatep();
impl.CreateStream(this._msp, ssp); evaluateErrorCode(impl.CreateStream(this._msp, ssp));
return new DeepSpeechStreamingState(impl.streamingstatep_value(ssp)); return new DeepSpeechStreamingState(impl.streamingstatep_value(ssp));
} }

View File

@ -9,8 +9,8 @@
package org.mozilla.deepspeech.libdeepspeech; package org.mozilla.deepspeech.libdeepspeech;
/** /**
* A single transcript computed by the model, including a confidence value and * A single transcript computed by the model, including a confidence<br>
* the metadata for its constituent tokens. * value and the metadata for its constituent tokens.
*/ */
public class CandidateTranscript { public class CandidateTranscript {
private transient long swigCPtr; private transient long swigCPtr;
@ -35,13 +35,6 @@ public class CandidateTranscript {
} }
} }
/**
* Array of TokenMetadata objects
*/
public void setTokens(TokenMetadata value) {
implJNI.CandidateTranscript_tokens_set(swigCPtr, this, TokenMetadata.getCPtr(value), value);
}
/** /**
* Array of TokenMetadata objects * Array of TokenMetadata objects
*/ */
@ -53,29 +46,13 @@ public class CandidateTranscript {
/** /**
* Size of the tokens array * Size of the tokens array
*/ */
public void setNum_tokens(int value) { public long getNum_tokens() {
implJNI.CandidateTranscript_num_tokens_set(swigCPtr, this, value);
}
/**
* Size of the tokens array
*/
public int getNum_tokens() {
return implJNI.CandidateTranscript_num_tokens_get(swigCPtr, this); return implJNI.CandidateTranscript_num_tokens_get(swigCPtr, this);
} }
/** /**
* Approximated confidence value for this transcript. This is roughly the * Approximated confidence value for this transcript. This is roughly the<br>
* sum of the acoustic model logit values for each timestep/character that * sum of the acoustic model logit values for each timestep/character that<br>
* contributed to the creation of this transcript.
*/
public void setConfidence(double value) {
implJNI.CandidateTranscript_confidence_set(swigCPtr, this, value);
}
/**
* Approximated confidence value for this transcript. This is roughly the
* sum of the acoustic model logit values for each timestep/character that
* contributed to the creation of this transcript. * contributed to the creation of this transcript.
*/ */
public double getConfidence() { public double getConfidence() {
@ -83,14 +60,14 @@ public class CandidateTranscript {
} }
/** /**
* Retrieve one TokenMetadata element * Retrieve one TokenMetadata element<br>
* * <br>
* @param i Array index of the TokenMetadata to get * @param i Array index of the TokenMetadata to get<br>
* * <br>
* @return The TokenMetadata requested or null * @return The TokenMetadata requested or null
*/ */
public TokenMetadata getToken(int i) { public TokenMetadata getToken(int i) {
return new TokenMetadata(implJNI.CandidateTranscript_getToken(swigCPtr, this, i), true); return new TokenMetadata(implJNI.CandidateTranscript_getToken(swigCPtr, this, i), false);
} }
} }

View File

@ -0,0 +1,65 @@
/* ----------------------------------------------------------------------------
* This file was automatically generated by SWIG (http://www.swig.org).
* Version 4.0.1
*
* Do not make changes to this file unless you know what you are doing--modify
* the SWIG interface file instead.
* ----------------------------------------------------------------------------- */
package org.mozilla.deepspeech.libdeepspeech;
public enum DeepSpeech_Error_Codes {
ERR_OK(0x0000),
ERR_NO_MODEL(0x1000),
ERR_INVALID_ALPHABET(0x2000),
ERR_INVALID_SHAPE(0x2001),
ERR_INVALID_SCORER(0x2002),
ERR_MODEL_INCOMPATIBLE(0x2003),
ERR_SCORER_NOT_ENABLED(0x2004),
ERR_FAIL_INIT_MMAP(0x3000),
ERR_FAIL_INIT_SESS(0x3001),
ERR_FAIL_INTERPRETER(0x3002),
ERR_FAIL_RUN_SESS(0x3003),
ERR_FAIL_CREATE_STREAM(0x3004),
ERR_FAIL_READ_PROTOBUF(0x3005),
ERR_FAIL_CREATE_SESS(0x3006),
ERR_FAIL_CREATE_MODEL(0x3007);
public final int swigValue() {
return swigValue;
}
public static DeepSpeech_Error_Codes swigToEnum(int swigValue) {
DeepSpeech_Error_Codes[] swigValues = DeepSpeech_Error_Codes.class.getEnumConstants();
if (swigValue < swigValues.length && swigValue >= 0 && swigValues[swigValue].swigValue == swigValue)
return swigValues[swigValue];
for (DeepSpeech_Error_Codes swigEnum : swigValues)
if (swigEnum.swigValue == swigValue)
return swigEnum;
throw new IllegalArgumentException("No enum " + DeepSpeech_Error_Codes.class + " with value " + swigValue);
}
@SuppressWarnings("unused")
private DeepSpeech_Error_Codes() {
this.swigValue = SwigNext.next++;
}
@SuppressWarnings("unused")
private DeepSpeech_Error_Codes(int swigValue) {
this.swigValue = swigValue;
SwigNext.next = swigValue+1;
}
@SuppressWarnings("unused")
private DeepSpeech_Error_Codes(DeepSpeech_Error_Codes swigEnum) {
this.swigValue = swigEnum.swigValue;
SwigNext.next = this.swigValue+1;
}
private final int swigValue;
private static class SwigNext {
private static int next = 0;
}
}

View File

@ -39,13 +39,6 @@ public class Metadata {
} }
} }
/**
* Array of CandidateTranscript objects
*/
public void setTranscripts(CandidateTranscript value) {
implJNI.Metadata_transcripts_set(swigCPtr, this, CandidateTranscript.getCPtr(value), value);
}
/** /**
* Array of CandidateTranscript objects * Array of CandidateTranscript objects
*/ */
@ -57,26 +50,19 @@ public class Metadata {
/** /**
* Size of the transcripts array * Size of the transcripts array
*/ */
public void setNum_transcripts(int value) { public long getNum_transcripts() {
implJNI.Metadata_num_transcripts_set(swigCPtr, this, value);
}
/**
* Size of the transcripts array
*/
public int getNum_transcripts() {
return implJNI.Metadata_num_transcripts_get(swigCPtr, this); return implJNI.Metadata_num_transcripts_get(swigCPtr, this);
} }
/** /**
* Retrieve one CandidateTranscript element * Retrieve one CandidateTranscript element<br>
* * <br>
* @param i Array index of the CandidateTranscript to get * @param i Array index of the CandidateTranscript to get<br>
* * <br>
* @return The CandidateTranscript requested or null * @return The CandidateTranscript requested or null
*/ */
public CandidateTranscript getTranscript(int i) { public CandidateTranscript getTranscript(int i) {
return new CandidateTranscript(implJNI.Metadata_getTranscript(swigCPtr, this, i), true); return new CandidateTranscript(implJNI.Metadata_getTranscript(swigCPtr, this, i), false);
} }
} }

View File

@ -4,7 +4,7 @@ Javadoc for Sphinx
This code is only here for reference for documentation generation. This code is only here for reference for documentation generation.
To update, please build SWIG (4.0 at least) and then run from native_client/java: To update, please install SWIG (4.0 at least) and then run from native_client/java:
.. code-block:: .. code-block::

View File

@ -34,13 +34,6 @@ public class TokenMetadata {
} }
} }
/**
* The text corresponding to this token
*/
public void setText(String value) {
implJNI.TokenMetadata_text_set(swigCPtr, this, value);
}
/** /**
* The text corresponding to this token * The text corresponding to this token
*/ */
@ -51,24 +44,10 @@ public class TokenMetadata {
/** /**
* Position of the token in units of 20ms * Position of the token in units of 20ms
*/ */
public void setTimestep(int value) { public long getTimestep() {
implJNI.TokenMetadata_timestep_set(swigCPtr, this, value);
}
/**
* Position of the token in units of 20ms
*/
public int getTimestep() {
return implJNI.TokenMetadata_timestep_get(swigCPtr, this); return implJNI.TokenMetadata_timestep_get(swigCPtr, this);
} }
/**
* Position of the token in seconds
*/
public void setStart_time(float value) {
implJNI.TokenMetadata_start_time_set(swigCPtr, this, value);
}
/** /**
* Position of the token in seconds * Position of the token in seconds
*/ */