Use explicit inference priorities instead of setting just allow_precision_loss
PiperOrigin-RevId: 283556973 Change-Id: If0f38f068e9a743ebb32f149a98759ce1ea4abef
This commit is contained in:
parent
6547b6511b
commit
e2c953b991
@ -60,7 +60,9 @@ TfLiteDelegatePtr CreateGPUDelegate(Settings* s) {
|
||||
TfLiteGpuDelegateOptionsV2 gpu_opts = TfLiteGpuDelegateOptionsV2Default();
|
||||
gpu_opts.inference_preference =
|
||||
TFLITE_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
|
||||
gpu_opts.is_precision_loss_allowed = s->allow_fp16 ? 1 : 0;
|
||||
gpu_opts.inference_priority1 =
|
||||
s->allow_fp16 ? TFLITE_GPU_INFERENCE_PRIORITY_MIN_LATENCY
|
||||
: TFLITE_GPU_INFERENCE_PRIORITY_MAX_PRECISION;
|
||||
return evaluation::CreateGPUDelegate(s->model, &gpu_opts);
|
||||
#else
|
||||
return evaluation::CreateGPUDelegate(s->model);
|
||||
|
Loading…
Reference in New Issue
Block a user