Refactors NnApiMock to extract a class to be used to do failure injection on NNAPI in native tests
PiperOrigin-RevId: 285772507 Change-Id: If71fda67779695bc0dff1ef3347540c2e00c8554
This commit is contained in:
parent
0463caa7bc
commit
993c3f5545
@ -103,6 +103,7 @@ cc_library(
|
||||
}),
|
||||
deps = [
|
||||
":nnapi_delegate",
|
||||
"//tensorflow/lite/nnapi:nnapi_handler",
|
||||
"//tensorflow/lite/nnapi:nnapi_implementation",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_googletest//:gtest",
|
||||
@ -121,7 +122,6 @@ cc_test(
|
||||
],
|
||||
deps = [
|
||||
":nnapi_delegate",
|
||||
":nnapi_delegate_mock_test",
|
||||
"//tensorflow/lite:framework",
|
||||
"//tensorflow/lite:minimal_logging",
|
||||
"//tensorflow/lite/c:common",
|
||||
|
@ -28,134 +28,17 @@ limitations under the License.
|
||||
#include <gtest/gtest.h>
|
||||
#include "absl/memory/memory.h"
|
||||
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
|
||||
#include "tensorflow/lite/nnapi/nnapi_handler.h"
|
||||
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace delegate {
|
||||
namespace nnapi {
|
||||
|
||||
class NnApiMock {
|
||||
class NnApiMock : public ::tflite::nnapi::NnApiHandler {
|
||||
public:
|
||||
template <int Value>
|
||||
void GetDeviceCountReturns() {
|
||||
nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
|
||||
*numDevices = 2;
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ModelCreateReturns() {
|
||||
nnapi_->ANeuralNetworksModel_create = [](ANeuralNetworksModel** model) {
|
||||
*model = reinterpret_cast<ANeuralNetworksModel*>(1);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void AddOperandReturns() {
|
||||
nnapi_->ANeuralNetworksModel_addOperand =
|
||||
[](ANeuralNetworksModel* model,
|
||||
const ANeuralNetworksOperandType* type) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void SetOperandValueReturns() {
|
||||
nnapi_->ANeuralNetworksModel_setOperandValue =
|
||||
[](ANeuralNetworksModel* model, int32_t index, const void* buffer,
|
||||
size_t length) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void AddOperationReturns() {
|
||||
nnapi_->ANeuralNetworksModel_addOperation =
|
||||
[](ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
|
||||
uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
|
||||
const uint32_t* outputs) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void IdentifyInputAndOutputsReturns() {
|
||||
nnapi_->ANeuralNetworksModel_identifyInputsAndOutputs =
|
||||
[](ANeuralNetworksModel* model, uint32_t inputCount,
|
||||
const uint32_t* inputs, uint32_t outputCount,
|
||||
const uint32_t* outputs) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void RelaxComputationFloatReturns() {
|
||||
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 =
|
||||
[](ANeuralNetworksModel* model, bool allow) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ModelFinishReturns() {
|
||||
nnapi_->ANeuralNetworksModel_finish = [](ANeuralNetworksModel* model) {
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void MemoryCreateFromFdReturns() {
|
||||
nnapi_->ANeuralNetworksMemory_createFromFd =
|
||||
[](size_t size, int protect, int fd, size_t offset,
|
||||
ANeuralNetworksMemory** memory) {
|
||||
*memory = reinterpret_cast<ANeuralNetworksMemory*>(2);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void CompilationCreateReturns() {
|
||||
nnapi_->ANeuralNetworksCompilation_create =
|
||||
[](ANeuralNetworksModel* model,
|
||||
ANeuralNetworksCompilation** compilation) {
|
||||
*compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void CompilationFinishReturns() {
|
||||
nnapi_->ANeuralNetworksCompilation_finish =
|
||||
[](ANeuralNetworksCompilation* compilation) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ExecutionCreateReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_create =
|
||||
[](ANeuralNetworksCompilation* compilation,
|
||||
ANeuralNetworksExecution** execution) {
|
||||
if (compilation == nullptr) return 1;
|
||||
*execution = reinterpret_cast<ANeuralNetworksExecution*>(4);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
template <int Value>
|
||||
void ExecutionSetInputFromMemoryReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_setInputFromMemory =
|
||||
[](ANeuralNetworksExecution* execution, int32_t index,
|
||||
const ANeuralNetworksOperandType* type,
|
||||
const ANeuralNetworksMemory* memory, size_t offset,
|
||||
size_t length) { return Value; };
|
||||
}
|
||||
template <int Value>
|
||||
void ExecutionSetOutputFromMemoryReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_setOutputFromMemory =
|
||||
[](ANeuralNetworksExecution* execution, int32_t index,
|
||||
const ANeuralNetworksOperandType* type,
|
||||
const ANeuralNetworksMemory* memory, size_t offset,
|
||||
size_t length) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ExecutionComputeReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_compute =
|
||||
[](ANeuralNetworksExecution* execution) { return Value; };
|
||||
}
|
||||
|
||||
explicit NnApiMock(NnApi* nnapi, int android_sdk_version = 29)
|
||||
: nnapi_(nnapi), prev_nnapi_(*nnapi) {
|
||||
: ::tflite::nnapi::NnApiHandler(nnapi) {
|
||||
nnapi_->nnapi_exists = true;
|
||||
nnapi_->android_sdk_version = android_sdk_version;
|
||||
|
||||
@ -186,14 +69,7 @@ class NnApiMock {
|
||||
ExecutionComputeReturns<0>();
|
||||
}
|
||||
|
||||
~NnApiMock() {
|
||||
// Restores global NNAPI to original value for non mocked tests
|
||||
*nnapi_ = prev_nnapi_;
|
||||
}
|
||||
|
||||
private:
|
||||
NnApi* nnapi_;
|
||||
NnApi prev_nnapi_;
|
||||
~NnApiMock() { Reset(); }
|
||||
};
|
||||
|
||||
class NnApiDelegateMockTest : public ::testing::Test {
|
||||
|
@ -57,7 +57,7 @@ cc_library(
|
||||
"//conditions:default": ["-lrt"],
|
||||
}),
|
||||
deps = [
|
||||
"//tensorflow/lite/nnapi:nnapi_lib",
|
||||
":nnapi_lib",
|
||||
],
|
||||
)
|
||||
|
||||
@ -76,7 +76,43 @@ cc_test(
|
||||
name = "nnapi_implementation_test",
|
||||
srcs = ["nnapi_implementation_test.cc"],
|
||||
deps = [
|
||||
"//tensorflow/lite/nnapi:nnapi_implementation",
|
||||
":nnapi_implementation",
|
||||
"@com_google_googletest//:gtest_main",
|
||||
],
|
||||
)
|
||||
|
||||
# Cannot inject NNAPI instance on ios and windows
|
||||
cc_library(
|
||||
name = "nnapi_handler",
|
||||
srcs = select({
|
||||
"//tensorflow:ios": [],
|
||||
"//tensorflow:windows": [],
|
||||
"//conditions:default": ["nnapi_handler.cc"],
|
||||
}),
|
||||
hdrs = select({
|
||||
"//tensorflow:ios": [],
|
||||
"//tensorflow:windows": [],
|
||||
"//conditions:default": ["nnapi_handler.h"],
|
||||
}),
|
||||
deps = [
|
||||
":nnapi_implementation",
|
||||
":nnapi_lib",
|
||||
"//tensorflow/core/platform:logging",
|
||||
"//tensorflow/lite:framework",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "nnapi_handler_test",
|
||||
srcs = ["nnapi_handler_test.cc"],
|
||||
tags = [
|
||||
"no_mac",
|
||||
"no_windows",
|
||||
"tflite_not_portable_ios",
|
||||
],
|
||||
deps = [
|
||||
":nnapi_handler",
|
||||
":nnapi_implementation",
|
||||
"@com_google_googletest//:gtest_main",
|
||||
],
|
||||
)
|
||||
|
44
tensorflow/lite/nnapi/nnapi_handler.cc
Normal file
44
tensorflow/lite/nnapi/nnapi_handler.cc
Normal file
@ -0,0 +1,44 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/nnapi/nnapi_handler.h"
|
||||
|
||||
#include <cstdio>
|
||||
|
||||
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace nnapi {
|
||||
|
||||
const NnApi* NnApiPassthroughInstance() {
|
||||
static const NnApi orig_nnapi_copy = *NnApiImplementation();
|
||||
return &orig_nnapi_copy;
|
||||
}
|
||||
|
||||
// static
|
||||
NnApiHandler* NnApiHandler::Instance() {
|
||||
// Ensuring that the original copy of nnapi is saved before we return
|
||||
// access to NnApiHandler
|
||||
NnApiPassthroughInstance();
|
||||
static NnApiHandler handler{const_cast<NnApi*>(NnApiImplementation())};
|
||||
return &handler;
|
||||
}
|
||||
|
||||
void NnApiHandler::Reset() {
|
||||
// Restores global NNAPI to original value
|
||||
*nnapi_ = *NnApiPassthroughInstance();
|
||||
}
|
||||
|
||||
} // namespace nnapi
|
||||
} // namespace tflite
|
197
tensorflow/lite/nnapi/nnapi_handler.h
Normal file
197
tensorflow/lite/nnapi/nnapi_handler.h
Normal file
@ -0,0 +1,197 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
|
||||
#define TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
|
||||
|
||||
#include "tensorflow/core/platform/logging.h"
|
||||
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace nnapi {
|
||||
|
||||
// Offers an interface to alter the behaviour of the NNAPI instance.
|
||||
// As for NNAPI, it is designed to be a singleton.
|
||||
// It allows to change the behaviour of some of the methods with some stub
|
||||
// implementation and then to reset the behavior to the original one using
|
||||
// Reset().
|
||||
//
|
||||
class NnApiHandler {
|
||||
public:
|
||||
// No destructor defined to allow this class to be used as singleton.
|
||||
|
||||
// Factory method, only one instance per process/jni library.
|
||||
static NnApiHandler* Instance();
|
||||
|
||||
// Makes the current object a transparent proxy again, resetting any
|
||||
// applied changes to its methods.
|
||||
void Reset();
|
||||
|
||||
// Using templates in the ...Returns methods because the functions need to be
|
||||
// stateless and the template generated code is more readable than using a
|
||||
// file-local variable in the method implementation to store the configured
|
||||
// result.
|
||||
|
||||
template <int Value>
|
||||
void GetDeviceCountReturns() {
|
||||
nnapi_->ANeuralNetworks_getDeviceCount = [](uint32_t* numDevices) -> int {
|
||||
*numDevices = 2;
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
void StubGetDeviceCountWith(int(stub)(uint32_t*)) {
|
||||
nnapi_->ANeuralNetworks_getDeviceCount = stub;
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ModelCreateReturns() {
|
||||
nnapi_->ANeuralNetworksModel_create = [](ANeuralNetworksModel** model) {
|
||||
*model = reinterpret_cast<ANeuralNetworksModel*>(1);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void AddOperandReturns() {
|
||||
nnapi_->ANeuralNetworksModel_addOperand =
|
||||
[](ANeuralNetworksModel* model,
|
||||
const ANeuralNetworksOperandType* type) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void SetOperandValueReturns() {
|
||||
nnapi_->ANeuralNetworksModel_setOperandValue =
|
||||
[](ANeuralNetworksModel* model, int32_t index, const void* buffer,
|
||||
size_t length) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void AddOperationReturns() {
|
||||
nnapi_->ANeuralNetworksModel_addOperation =
|
||||
[](ANeuralNetworksModel* model, ANeuralNetworksOperationType type,
|
||||
uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
|
||||
const uint32_t* outputs) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void IdentifyInputAndOutputsReturns() {
|
||||
nnapi_->ANeuralNetworksModel_identifyInputsAndOutputs =
|
||||
[](ANeuralNetworksModel* model, uint32_t inputCount,
|
||||
const uint32_t* inputs, uint32_t outputCount,
|
||||
const uint32_t* outputs) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void RelaxComputationFloatReturns() {
|
||||
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 =
|
||||
[](ANeuralNetworksModel* model, bool allow) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ModelFinishReturns() {
|
||||
nnapi_->ANeuralNetworksModel_finish = [](ANeuralNetworksModel* model) {
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void MemoryCreateFromFdReturns() {
|
||||
nnapi_->ANeuralNetworksMemory_createFromFd =
|
||||
[](size_t size, int protect, int fd, size_t offset,
|
||||
ANeuralNetworksMemory** memory) {
|
||||
*memory = reinterpret_cast<ANeuralNetworksMemory*>(2);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void CompilationCreateReturns() {
|
||||
nnapi_->ANeuralNetworksCompilation_create =
|
||||
[](ANeuralNetworksModel* model,
|
||||
ANeuralNetworksCompilation** compilation) {
|
||||
*compilation = reinterpret_cast<ANeuralNetworksCompilation*>(3);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void CompilationFinishReturns() {
|
||||
nnapi_->ANeuralNetworksCompilation_finish =
|
||||
[](ANeuralNetworksCompilation* compilation) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ExecutionCreateReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_create =
|
||||
[](ANeuralNetworksCompilation* compilation,
|
||||
ANeuralNetworksExecution** execution) {
|
||||
if (compilation == nullptr) return 1;
|
||||
*execution = reinterpret_cast<ANeuralNetworksExecution*>(4);
|
||||
return Value;
|
||||
};
|
||||
}
|
||||
template <int Value>
|
||||
void ExecutionSetInputFromMemoryReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_setInputFromMemory =
|
||||
[](ANeuralNetworksExecution* execution, int32_t index,
|
||||
const ANeuralNetworksOperandType* type,
|
||||
const ANeuralNetworksMemory* memory, size_t offset,
|
||||
size_t length) { return Value; };
|
||||
}
|
||||
template <int Value>
|
||||
void ExecutionSetOutputFromMemoryReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_setOutputFromMemory =
|
||||
[](ANeuralNetworksExecution* execution, int32_t index,
|
||||
const ANeuralNetworksOperandType* type,
|
||||
const ANeuralNetworksMemory* memory, size_t offset,
|
||||
size_t length) { return Value; };
|
||||
}
|
||||
|
||||
template <int Value>
|
||||
void ExecutionComputeReturns() {
|
||||
nnapi_->ANeuralNetworksExecution_compute =
|
||||
[](ANeuralNetworksExecution* execution) { return Value; };
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit NnApiHandler(NnApi* nnapi) : nnapi_(nnapi) { DCHECK(nnapi); }
|
||||
|
||||
NnApi* nnapi_;
|
||||
};
|
||||
|
||||
// Returns a pointer to an unaltered instance of NNAPI. Is intended
|
||||
// to be used by stub methods when wanting to pass-through to original
|
||||
// implementation for example:
|
||||
//
|
||||
// NnApiTestUtility()->StubGetDeviceWith(
|
||||
// [](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
|
||||
// static int count = 0;
|
||||
// if (count++ < 1) {
|
||||
// NnApiPassthroughInstance()->ANeuralNetworks_getDevice(
|
||||
// devIndex, device);
|
||||
// } else {
|
||||
// return ANEURALNETWORKS_BAD_DATA;
|
||||
// }
|
||||
// });
|
||||
const NnApi* NnApiPassthroughInstance();
|
||||
|
||||
// Returns an instance of NnApiProxy that can be used to alter
|
||||
// the behaviour of the TFLite wide instance of NnApi.
|
||||
NnApiHandler* NnApiProxyInstance();
|
||||
|
||||
} // namespace nnapi
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_NNAPI_NNAPI_HANDLER_H_
|
143
tensorflow/lite/nnapi/nnapi_handler_test.cc
Normal file
143
tensorflow/lite/nnapi/nnapi_handler_test.cc
Normal file
@ -0,0 +1,143 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/nnapi/nnapi_handler.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
|
||||
#include <gmock/gmock.h>
|
||||
#include <gtest/gtest.h>
|
||||
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace nnapi {
|
||||
|
||||
using testing::Eq;
|
||||
using testing::Ne;
|
||||
using testing::NotNull;
|
||||
|
||||
void ExpectEquals(const NnApi& left, const NnApi& right);
|
||||
|
||||
class NnApiHandlerTest : public ::testing::Test {
|
||||
protected:
|
||||
~NnApiHandlerTest() override { NnApiHandler::Instance()->Reset(); }
|
||||
};
|
||||
|
||||
TEST_F(NnApiHandlerTest, ShouldAlterNnApiInstanceBehaviour) {
|
||||
const NnApi* nnapi = NnApiImplementation();
|
||||
|
||||
const auto device_count_stub = [](uint32_t* device_count) -> int {
|
||||
*device_count = 999;
|
||||
return ANEURALNETWORKS_NO_ERROR;
|
||||
};
|
||||
|
||||
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_stub);
|
||||
|
||||
ASSERT_THAT(nnapi->ANeuralNetworks_getDeviceCount, NotNull());
|
||||
|
||||
uint32_t device_count = 0;
|
||||
nnapi->ANeuralNetworks_getDeviceCount(&device_count);
|
||||
EXPECT_THAT(device_count, Eq(999));
|
||||
}
|
||||
|
||||
TEST_F(NnApiHandlerTest, ShouldRestoreNnApiToItsOriginalValueWithReset) {
|
||||
NnApi nnapi_orig_copy = *NnApiImplementation();
|
||||
|
||||
auto device_count_override = [](uint32_t* device_count) -> int {
|
||||
*device_count = 777;
|
||||
return ANEURALNETWORKS_NO_ERROR;
|
||||
};
|
||||
|
||||
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_override);
|
||||
|
||||
EXPECT_THAT(nnapi_orig_copy.ANeuralNetworks_getDeviceCount,
|
||||
Ne(NnApiImplementation()->ANeuralNetworks_getDeviceCount));
|
||||
|
||||
NnApiHandler::Instance()->Reset();
|
||||
|
||||
ExpectEquals(nnapi_orig_copy, *NnApiImplementation());
|
||||
}
|
||||
|
||||
int (*device_count_ptr)(uint32_t*);
|
||||
TEST_F(NnApiHandlerTest, ShouldSupportPassthroughCalls) {
|
||||
const NnApi* nnapi = NnApiImplementation();
|
||||
device_count_ptr = nnapi->ANeuralNetworks_getDeviceCount;
|
||||
|
||||
NnApiHandler::Instance()->StubGetDeviceCountWith(
|
||||
[](uint32_t* device_count) -> int {
|
||||
return NnApiPassthroughInstance()->ANeuralNetworks_getDeviceCount ==
|
||||
device_count_ptr;
|
||||
});
|
||||
|
||||
uint32_t device_count = 0;
|
||||
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount(&device_count), Eq(1));
|
||||
}
|
||||
|
||||
void ExpectEquals(const NnApi& left, const NnApi& right) {
|
||||
#define EXPECT_NNAPI_MEMBER_EQ(name) EXPECT_EQ(left.name, right.name)
|
||||
|
||||
EXPECT_NNAPI_MEMBER_EQ(nnapi_exists);
|
||||
EXPECT_NNAPI_MEMBER_EQ(android_sdk_version);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromFd);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_free);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_create);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_free);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_finish);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperand);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValue);
|
||||
EXPECT_NNAPI_MEMBER_EQ(
|
||||
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValueFromMemory);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperation);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_identifyInputsAndOutputs);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_create);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_free);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setPreference);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_finish);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_create);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_free);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInput);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInputFromMemory);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutput);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutputFromMemory);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_startCompute);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_wait);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_free);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ASharedMemory_create);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDeviceCount);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDevice);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getName);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getVersion);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getFeatureLevel);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getType);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_createForDevices);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setCaching);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_compute);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandRank);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandDimensions);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_create);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_free);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_burstCompute);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setMeasureTiming);
|
||||
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getDuration);
|
||||
|
||||
#undef EXPECT_NNAPI_MEMBER_EQ
|
||||
}
|
||||
|
||||
} // namespace nnapi
|
||||
} // namespace tflite
|
Loading…
Reference in New Issue
Block a user