Make lite/tools/evaluation:utils depend on the NNAPI delegate only when building for Android.

PiperOrigin-RevId: 313615943
Change-Id: Idf3b05cfea63c9578c726e5ed7b5afacd9e1a495
This commit is contained in:
Robert David 2020-05-28 10:39:06 -07:00 committed by TensorFlower Gardener
parent c3d3ef1775
commit f1e137db12
5 changed files with 31 additions and 21 deletions

View File

@ -26,6 +26,7 @@ namespace tools {
class NnapiDelegateProvider : public DelegateProvider {
public:
NnapiDelegateProvider() {
#if defined(__ANDROID__)
default_params_.AddParam("use_nnapi", ToolParam::Create<bool>(false));
default_params_.AddParam("nnapi_execution_preference",
ToolParam::Create<std::string>(""));
@ -35,6 +36,7 @@ class NnapiDelegateProvider : public DelegateProvider {
ToolParam::Create<bool>(false));
default_params_.AddParam("nnapi_allow_fp16",
ToolParam::Create<bool>(false));
#endif
}
std::vector<Flag> CreateFlags(ToolParams* params) const final;
@ -49,18 +51,21 @@ REGISTER_DELEGATE_PROVIDER(NnapiDelegateProvider);
std::vector<Flag> NnapiDelegateProvider::CreateFlags(ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<bool>("use_nnapi", params, "use nnapi delegate api"),
CreateFlag<std::string>("nnapi_execution_preference", params,
"execution preference for nnapi delegate. Should "
"be one of the following: fast_single_answer, "
"sustained_speed, low_power, undefined"),
CreateFlag<std::string>(
"nnapi_accelerator_name", params,
"the name of the nnapi accelerator to use (requires Android Q+)"),
CreateFlag<bool>("disable_nnapi_cpu", params,
"Disable the NNAPI CPU device"),
CreateFlag<bool>("nnapi_allow_fp16", params,
"Allow fp32 computation to be run in fp16")};
#if defined(__ANDROID__)
CreateFlag<bool>("use_nnapi", params, "use nnapi delegate api"),
CreateFlag<std::string>("nnapi_execution_preference", params,
"execution preference for nnapi delegate. Should "
"be one of the following: fast_single_answer, "
"sustained_speed, low_power, undefined"),
CreateFlag<std::string>(
"nnapi_accelerator_name", params,
"the name of the nnapi accelerator to use (requires Android Q+)"),
CreateFlag<bool>("disable_nnapi_cpu", params,
"Disable the NNAPI CPU device"),
CreateFlag<bool>("nnapi_allow_fp16", params,
"Allow fp32 computation to be run in fp16")
#endif
};
return flags;
}
@ -98,6 +103,7 @@ void NnapiDelegateProvider::LogParams(const ToolParams& params) const {
TfLiteDelegatePtr NnapiDelegateProvider::CreateTfLiteDelegate(
const ToolParams& params) const {
TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {});
#if defined(__ANDROID__)
if (params.Get<bool>("use_nnapi")) {
StatefulNnApiDelegate::Options options;
std::string accelerator_name =
@ -157,7 +163,7 @@ TfLiteDelegatePtr NnapiDelegateProvider::CreateTfLiteDelegate(
<< params.Get<std::string>("nnapi_execution_preference")
<< ") to be used.";
}
#endif
return delegate;
}

View File

@ -41,10 +41,10 @@ cc_library(
copts = tflite_copts(),
deps = [
"//tensorflow/lite/c:common",
"//tensorflow/lite/delegates/nnapi:nnapi_delegate",
] + select({
"//tensorflow:android": [
"//tensorflow/lite/delegates/gpu:delegate",
"//tensorflow/lite/delegates/nnapi:nnapi_delegate",
],
"//conditions:default": [],
}) + select({

View File

@ -43,7 +43,11 @@ TEST(EvaluationDelegateProviderTest, CreateTfLiteDelegate) {
TEST(EvaluationDelegateProviderTest, DelegateProvidersParams) {
DelegateProviders providers;
const auto& params = providers.GetAllParams();
#if defined(__ANDROID__)
EXPECT_TRUE(params.HasParam("use_nnapi"));
#else
EXPECT_FALSE(params.HasParam("use_nnapi"));
#endif
EXPECT_TRUE(params.HasParam("use_gpu"));
int argc = 3;

View File

@ -101,20 +101,18 @@ TfLiteDelegatePtr CreateNNAPIDelegate() {
// NnApiDelegate() returns a singleton, so provide a no-op deleter.
[](TfLiteDelegate*) {});
#else
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
return CreateNullDelegate();
#endif // defined(__ANDROID__)
}
TfLiteDelegatePtr CreateNNAPIDelegate(StatefulNnApiDelegate::Options options) {
#if defined(__ANDROID__)
TfLiteDelegatePtr CreateNNAPIDelegate(StatefulNnApiDelegate::Options options) {
return TfLiteDelegatePtr(
new StatefulNnApiDelegate(options), [](TfLiteDelegate* delegate) {
delete reinterpret_cast<StatefulNnApiDelegate*>(delegate);
});
#else
return CreateNullDelegate();
#endif // defined(__ANDROID__)
}
#endif // defined(__ANDROID__)
#if defined(__ANDROID__)
TfLiteDelegatePtr CreateGPUDelegate(TfLiteGpuDelegateOptionsV2* options) {

View File

@ -16,12 +16,14 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_TOOLS_EVALUATION_UTILS_H_
#define TENSORFLOW_LITE_TOOLS_EVALUATION_UTILS_H_
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#if defined(__ANDROID__)
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#if (defined(__arm__) || defined(__aarch64__))
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
#endif
@ -33,7 +35,6 @@ limitations under the License.
#endif
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
namespace tflite {
namespace evaluation {
@ -61,8 +62,9 @@ inline TfLiteStatus GetSortedFileNames(const std::string& directory,
}
TfLiteDelegatePtr CreateNNAPIDelegate();
#if defined(__ANDROID__)
TfLiteDelegatePtr CreateNNAPIDelegate(StatefulNnApiDelegate::Options options);
#endif
TfLiteDelegatePtr CreateGPUDelegate();
#if defined(__ANDROID__)