Avoid using Eigen ThreadPool when thread count is 1

PiperOrigin-RevId: 239232993
This commit is contained in:
Jared Duke 2019-03-19 11:28:41 -07:00 committed by TensorFlower Gardener
parent 067a3a6c3f
commit eb1d8f7954

View File

@ -526,15 +526,19 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
float output_activation_min, output_activation_max; float output_activation_min, output_activation_max;
CalculateActivationRange(params->activation, &output_activation_min, CalculateActivationRange(params->activation, &output_activation_min,
&output_activation_max); &output_activation_max);
KernelType effective_kernel_type; KernelType effective_kernel_type = kernel_type;
if ((kernel_type == kMultithreadOptimized) && if (kernel_type == kMultithreadOptimized) {
(params->dilation_width_factor != 1 || if (context->recommended_num_threads == 1) {
params->dilation_height_factor != 1)) { // Use of kMultithreadOptimized is precomputed during |Prepare()|, whereas
// the actual thread count can change at any time. If the client requests
// a single thread (after Prepare()), fall back to optimized.
effective_kernel_type = kGenericOptimized;
} else if ((params->dilation_width_factor != 1) ||
(params->dilation_height_factor != 1)) {
// kMultithreadOptimized does not support dilation. // kMultithreadOptimized does not support dilation.
// Therefore, fallback to optimized. // Therefore, fallback to optimized.
effective_kernel_type = kGenericOptimized; effective_kernel_type = kGenericOptimized;
} else { }
effective_kernel_type = kernel_type;
} }
ConvParams op_params; ConvParams op_params;
op_params.padding_type = RuntimePaddingType(params->padding); op_params.padding_type = RuntimePaddingType(params->padding);