Merge pull request #1938 from coqui-ai/non-quantized-export

Non quantized export + Better error message on missing alphabet
This commit is contained in:
Reuben Morais 2021-08-12 11:29:21 +02:00 committed by GitHub
commit 4fc60bf5e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 13 additions and 2 deletions

View File

@ -1116,7 +1116,10 @@ def export():
input_tensors=inputs.values(), input_tensors=inputs.values(),
output_tensors=outputs.values(), output_tensors=outputs.values(),
) )
if Config.export_quantize:
converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.optimizations = [tf.lite.Optimize.DEFAULT]
# AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite # AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite
converter.allow_custom_ops = True converter.allow_custom_ops = True
tflite_model = converter.convert() tflite_model = converter.convert()
@ -1256,6 +1259,10 @@ def early_training_checks():
"for loading and saving." "for loading and saving."
) )
if not Config.alphabet_config_path and not Config.bytes_output_mode:
log_error("Missing --alphabet_config_path flag, can't continue")
sys.exit(1)
def main(): def main():
initialize_globals_from_cli() initialize_globals_from_cli()

View File

@ -22,7 +22,7 @@ class _ConfigSingleton:
_config = None _config = None
def __getattr__(self, name): def __getattr__(self, name):
if not _ConfigSingleton._config: if _ConfigSingleton._config is None:
raise RuntimeError("Global configuration not yet initialized.") raise RuntimeError("Global configuration not yet initialized.")
if not hasattr(_ConfigSingleton._config, name): if not hasattr(_ConfigSingleton._config, name):
raise RuntimeError( raise RuntimeError(
@ -478,6 +478,10 @@ class _SttConfig(Coqpit):
export_tflite: bool = field( export_tflite: bool = field(
default=False, metadata=dict(help="export a graph ready for TF Lite engine") default=False, metadata=dict(help="export a graph ready for TF Lite engine")
) )
export_quantize: bool = field(
default=True,
metadata=dict(help="export a quantized model (optimized for size)"),
)
n_steps: int = field( n_steps: int = field(
default=16, default=16,
metadata=dict( metadata=dict(