Add an --export_quantize flag to control TFLite export quantization
This commit is contained in:
parent
c3cc7aae2e
commit
3cff3dd0de
|
@ -1116,7 +1116,10 @@ def export():
|
|||
input_tensors=inputs.values(),
|
||||
output_tensors=outputs.values(),
|
||||
)
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
|
||||
if Config.export_quantize:
|
||||
converter.optimizations = [tf.lite.Optimize.DEFAULT]
|
||||
|
||||
# AudioSpectrogram and Mfcc ops are custom but have built-in kernels in TFLite
|
||||
converter.allow_custom_ops = True
|
||||
tflite_model = converter.convert()
|
||||
|
|
|
@ -22,7 +22,7 @@ class _ConfigSingleton:
|
|||
_config = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
if not _ConfigSingleton._config:
|
||||
if _ConfigSingleton._config is None:
|
||||
raise RuntimeError("Global configuration not yet initialized.")
|
||||
if not hasattr(_ConfigSingleton._config, name):
|
||||
raise RuntimeError(
|
||||
|
@ -478,6 +478,10 @@ class _SttConfig(Coqpit):
|
|||
export_tflite: bool = field(
|
||||
default=False, metadata=dict(help="export a graph ready for TF Lite engine")
|
||||
)
|
||||
export_quantize: bool = field(
|
||||
default=True,
|
||||
metadata=dict(help="export a quantized model (optimized for size)"),
|
||||
)
|
||||
n_steps: int = field(
|
||||
default=16,
|
||||
metadata=dict(
|
||||
|
|
Loading…
Reference in New Issue