Fix model delegation failure when accelerator name is specified for NNAPI version < 1.2
Before NNAPI 1.2 it was not possible to query drivers for their supported operation. thus the delegate was returning error when calling the GetNodesSupportedByAccelerator function. With the fix the check for supported operation is done only if the current NNAPI version is at least 1.2. PiperOrigin-RevId: 304343270 Change-Id: I090a4ec6dbb62919b2edc681fc011329eb14a668
This commit is contained in:
parent
f0412987d7
commit
6e38f2672b
@ -4260,8 +4260,10 @@ TfLiteStatus StatefulNnApiDelegate::DoPrepare(TfLiteContext* context,
|
||||
|
||||
int num_partitions;
|
||||
TfLiteDelegateParams* params_array;
|
||||
if (is_accelerator_specified) {
|
||||
// Filtering out nodes not supported by target accelerators
|
||||
if (is_accelerator_specified &&
|
||||
nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12) {
|
||||
// Filtering out nodes not supported by target accelerators.
|
||||
// Cannot query supported operation before NNAPI 1.2
|
||||
TF_LITE_ENSURE_STATUS(GetNodesSupportedByAccelerator(
|
||||
context, delegate, nnapi, supported_nodes, &nodes_to_delegate,
|
||||
&num_partitions, ¶ms_array, nnapi_errno));
|
||||
|
@ -546,6 +546,26 @@ TEST_F(UnsupportedOperationOnDeviceTest, ShouldCacheModelCompilation) {
|
||||
EXPECT_EQ(should_cache_model_compilation_model_create_count, 1);
|
||||
}
|
||||
|
||||
TEST_F(UnsupportedOperationOnDeviceTest,
|
||||
ShouldNotApplySupportedOperationsFilterBeforeAndroidSdk29) {
|
||||
nnapi_mock_->SetAndroidSdkVersion(28, /*set_unsupported_ops_to_null=*/true);
|
||||
nnapi_mock_->ModelCreateReturns<0>();
|
||||
AddSubOpsAcceleratedModel m(
|
||||
{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
|
||||
{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {}},
|
||||
ActivationFunctionType_NONE, nnapi_mock_->GetNnApi(),
|
||||
/*accelerator_name=*/"test-device");
|
||||
std::vector<float> input1{-2.0, 0.2, 0.7, 0.9};
|
||||
std::vector<float> input2{0.1, 0.2, 0.3, 0.5};
|
||||
m.PopulateTensor<float>(m.input1(), input1);
|
||||
m.PopulateTensor<float>(m.input2(), input2);
|
||||
m.PopulateTensor<float>(m.input3(), input2);
|
||||
m.Invoke();
|
||||
|
||||
// Delegation succeded without failures and all nodes have been delegated.
|
||||
ASSERT_EQ(m.CountOpsExecutedByCpuKernel(), 0);
|
||||
}
|
||||
|
||||
// Model with a chain of no-op (add with zero operations)
|
||||
// interleaved with no-op custom nodes.
|
||||
class LongIdentityModel : public MultiOpModel, public AcceleratedModel {
|
||||
|
@ -52,6 +52,7 @@ class NnApiMock : public ::tflite::nnapi::NnApiHandler {
|
||||
nnapi_->ASharedMemory_create = [](const char* name, size_t size) -> int {
|
||||
return open("/dev/zero", O_RDWR);
|
||||
};
|
||||
nnapi_->ANeuralNetworksEvent_free = [](ANeuralNetworksEvent* event) {};
|
||||
|
||||
ModelCreateReturns<ANEURALNETWORKS_NO_ERROR>();
|
||||
AddOperandReturns<ANEURALNETWORKS_NO_ERROR>();
|
||||
@ -68,6 +69,8 @@ class NnApiMock : public ::tflite::nnapi::NnApiHandler {
|
||||
ExecutionSetInputFromMemoryReturns<ANEURALNETWORKS_NO_ERROR>();
|
||||
ExecutionSetOutputFromMemoryReturns<ANEURALNETWORKS_NO_ERROR>();
|
||||
ExecutionComputeReturns<ANEURALNETWORKS_NO_ERROR>();
|
||||
ExecutionStartComputeReturns<ANEURALNETWORKS_NO_ERROR>();
|
||||
EventWaitReturns<ANEURALNETWORKS_NO_ERROR>();
|
||||
SetNnapiSupportedDevice("test-device", android_sdk_version);
|
||||
}
|
||||
|
||||
|
@ -50,8 +50,41 @@ void NnApiHandler::Reset() {
|
||||
*nnapi_ = *NnApiPassthroughInstance();
|
||||
}
|
||||
|
||||
void NnApiHandler::SetAndroidSdkVersion(int version) {
|
||||
void NnApiHandler::SetAndroidSdkVersion(int version,
|
||||
bool set_unsupported_ops_to_null) {
|
||||
nnapi_->android_sdk_version = version;
|
||||
|
||||
if (!set_unsupported_ops_to_null) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (version < 29) {
|
||||
nnapi_->ANeuralNetworks_getDeviceCount = nullptr;
|
||||
nnapi_->ANeuralNetworks_getDevice = nullptr;
|
||||
nnapi_->ANeuralNetworksDevice_getName = nullptr;
|
||||
nnapi_->ANeuralNetworksDevice_getVersion = nullptr;
|
||||
nnapi_->ANeuralNetworksDevice_getFeatureLevel = nullptr;
|
||||
nnapi_->ANeuralNetworksDevice_getType = nullptr;
|
||||
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = nullptr;
|
||||
nnapi_->ANeuralNetworksCompilation_createForDevices = nullptr;
|
||||
nnapi_->ANeuralNetworksCompilation_setCaching = nullptr;
|
||||
nnapi_->ANeuralNetworksExecution_compute = nullptr;
|
||||
nnapi_->ANeuralNetworksExecution_getOutputOperandRank = nullptr;
|
||||
nnapi_->ANeuralNetworksExecution_getOutputOperandDimensions = nullptr;
|
||||
nnapi_->ANeuralNetworksBurst_create = nullptr;
|
||||
nnapi_->ANeuralNetworksBurst_free = nullptr;
|
||||
nnapi_->ANeuralNetworksExecution_burstCompute = nullptr;
|
||||
nnapi_->ANeuralNetworksMemory_createFromAHardwareBuffer = nullptr;
|
||||
nnapi_->ANeuralNetworksExecution_setMeasureTiming = nullptr;
|
||||
nnapi_->ANeuralNetworksExecution_getDuration = nullptr;
|
||||
nnapi_->ANeuralNetworksDevice_getExtensionSupport = nullptr;
|
||||
nnapi_->ANeuralNetworksModel_getExtensionOperandType = nullptr;
|
||||
nnapi_->ANeuralNetworksModel_getExtensionOperationType = nullptr;
|
||||
nnapi_->ANeuralNetworksModel_setOperandExtensionData = nullptr;
|
||||
}
|
||||
if (version < 28) {
|
||||
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void NnApiHandler::SetDeviceName(const std::string& name) {
|
||||
|
@ -252,7 +252,29 @@ class NnApiHandler {
|
||||
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = stub;
|
||||
}
|
||||
|
||||
void SetAndroidSdkVersion(int version);
|
||||
template <int Value>
|
||||
void ExecutionStartComputeReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_startCompute =
|
||||
[](ANeuralNetworksExecution* execution, ANeuralNetworksEvent** event) {
|
||||
*event = reinterpret_cast<ANeuralNetworksEvent*>(1);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void EventWaitReturns() {
|
||||
nnapi_->ANeuralNetworksEvent_wait = [](ANeuralNetworksEvent* event) {
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the SDK Version in the nnapi structure.
|
||||
* If set_unsupported_ops_to_null is set to true, all the functions not
|
||||
* available at the given sdk level will be set to null too.
|
||||
*/
|
||||
void SetAndroidSdkVersion(int version,
|
||||
bool set_unsupported_ops_to_null = false);
|
||||
|
||||
const NnApi* GetNnApi() { return nnapi_; }
|
||||
|
||||
|
@ -85,6 +85,93 @@ TEST_F(NnApiHandlerTest, ShouldSupportPassthroughCalls) {
|
||||
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount(&device_count), Eq(1));
|
||||
}
|
||||
|
||||
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI11) {
|
||||
auto* handler = NnApiHandler::Instance();
|
||||
|
||||
// Setting non null values for nnapi functions
|
||||
handler->SetNnapiSupportedDevice("devvice", 1000);
|
||||
handler->GetSupportedOperationsForDevicesReturns<1>();
|
||||
handler->CompilationCreateForDevicesReturns<1>();
|
||||
handler->ExecutionComputeReturns<1>();
|
||||
handler->MemoryCreateFromFdReturns<1>();
|
||||
|
||||
handler->SetAndroidSdkVersion(28, /*set_unsupported_ops_to_null=*/true);
|
||||
|
||||
const NnApi* nnapi = NnApiImplementation();
|
||||
|
||||
using ::testing::IsNull;
|
||||
|
||||
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
|
||||
IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
|
||||
IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
|
||||
}
|
||||
|
||||
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI10) {
|
||||
auto* handler = NnApiHandler::Instance();
|
||||
|
||||
// Setting non null values for nnapi functions
|
||||
handler->SetNnapiSupportedDevice("devvice", 1000);
|
||||
handler->GetSupportedOperationsForDevicesReturns<1>();
|
||||
handler->CompilationCreateForDevicesReturns<1>();
|
||||
handler->ExecutionComputeReturns<1>();
|
||||
handler->MemoryCreateFromFdReturns<1>();
|
||||
|
||||
handler->SetAndroidSdkVersion(27, /*set_unsupported_ops_to_null=*/true);
|
||||
|
||||
const NnApi* nnapi = NnApiImplementation();
|
||||
|
||||
using ::testing::IsNull;
|
||||
|
||||
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
|
||||
IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
|
||||
IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
|
||||
|
||||
EXPECT_THAT(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
|
||||
IsNull());
|
||||
}
|
||||
|
||||
void ExpectEquals(const NnApi& left, const NnApi& right) {
|
||||
#define EXPECT_NNAPI_MEMBER_EQ(name) EXPECT_EQ(left.name, right.name)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user