Update TFLite NNAPI delegate with NNAPI 1.2 features.

- Only try to delegate to NNAPI 1.2+ if there is at least one accelerator available.

PiperOrigin-RevId: 238036055
This commit is contained in:
A. Unique TensorFlower 2019-03-12 09:52:13 -07:00 committed by TensorFlower Gardener
parent f1d30ce1be
commit bb415e41f6
4 changed files with 51 additions and 1 deletions

View File

@ -1286,7 +1286,18 @@ TfLiteDelegate* NnApiDelegate() {
!nnapi->nnapi_exists) {
return kTfLiteOk;
}
// For NNAPI 1.2+, check if there is any accelerator available.
// If not, don't delegate to NNAPI's CPU reference implementation.
if (nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12) {
uint32_t device_count = 0;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworks_getDeviceCount(&device_count));
// Any available accelerator will make the device_count larger than 1.
// More sophisticated check and whitelisting can be added later.
if (device_count <= 1) {
return kTfLiteOk;
}
}
// Allocate one element in vector already since TensorFlow Lite uses
// the first value as the number of nodes. The actual value will be set
// later, after the vector has been filled.

View File

@ -132,6 +132,25 @@ enum {
ANEURALNETWORKS_PADDING_VALID = 2,
};
/**
* Device types.
*
* The type of NNAPI device.
*/
enum {
/** The device type cannot be provided. */
ANEURALNETWORKS_DEVICE_UNKNOWN = 0,
/** The device does not fall into any category below. */
ANEURALNETWORKS_DEVICE_OTHER = 1,
/** The device runs NNAPI models on single or multi-core CPU. */
ANEURALNETWORKS_DEVICE_CPU = 2,
/** The device can run NNAPI models and also accelerate graphics APIs such
* as OpenGL ES and Vulkan. */
ANEURALNETWORKS_DEVICE_GPU = 3,
/** Dedicated accelerator for Machine Learning workloads. */
ANEURALNETWORKS_DEVICE_ACCELERATOR = 4,
};
/**
* ANeuralNetworksMemory is an opaque type that represents memory.
*

View File

@ -170,6 +170,7 @@ const NnApi LoadNnApi() {
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getVersion);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksDevice_getFeatureLevel);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks, ANeuralNetworksDevice_getType);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,
ANeuralNetworksModel_getSupportedOperationsForDevices);
LOAD_FUNCTION_OPTIONAL(libneuralnetworks,

View File

@ -694,6 +694,25 @@ struct NnApi {
int (*ANeuralNetworksDevice_getFeatureLevel)(
const ANeuralNetworksDevice* device, int64_t* featureLevel);
/**
* Get the type of a given device.
*
* The device type can be used to help application developers to distribute
* Machine Learning workloads and other workloads such as graphical rendering.
* E.g., for an app which renders AR scenes based on real time object
* detection results, the developer could choose an ACCELERATOR type device
* for ML workloads, and reserve GPU for graphical rendering.
*
* @param device The representation of the specified device.
* @param type The returned {@link DeviceTypeCode} of the specified device.
*
* @return ANEURALNETWORKS_NO_ERROR if successful.
*
* Available since API level 29.
*/
int (*ANeuralNetworksDevice_getType)(const ANeuralNetworksDevice* device,
int32_t* type);
/**
* Get the supported operations for a specified set of devices. If multiple
* devices are selected, the supported operation list is a union of supported