[lite] Add allow_fp16_precision_for_fp32 field in NNAPI Delegate settings,

and deprecate the corresponding field in the general TF Lite settings.

PiperOrigin-RevId: 337828762
Change-Id: I84e8fbf359d3da66d51b825af2a6c738f624828d
This commit is contained in:
Fergus Henderson 2020-10-19 04:31:46 -07:00 committed by TensorFlower Gardener
parent 374eaebcef
commit ab4fc59e91
2 changed files with 6 additions and 0 deletions

View File

@ -127,6 +127,11 @@ message NNAPISettings {
// dynamic dimensions of the model.
// By default this is set to false.
optional bool allow_dynamic_dimensions = 9;
// Whether to allow the NNAPI accelerator to optionally use lower-precision
// float16 (16-bit floating point) arithmetic when doing calculations on
// float32 (32-bit floating point).
optional bool allow_fp16_precision_for_fp32 = 10;
}
// Which GPU backend to select. Default behaviour on Android is to try OpenCL

View File

@ -96,6 +96,7 @@ class NnapiPlugin : public DelegatePluginInterface {
!nnapi_settings->allow_nnapi_cpu_on_android_10_plus();
options_.execution_priority =
ConvertExecutionPriority(nnapi_settings->execution_priority());
options_.allow_fp16 = nnapi_settings->allow_fp16_precision_for_fp32();
}
private: