1. Make delegate provider free of benchmark_model dependency.
2. Make common delegate-related parameters into a single delegate_provider module. 3. Changed the delegate provider API to clearly show default params and their values. PiperOrigin-RevId: 305844062 Change-Id: Iebae441348197803d092d7af0fe84c2c04b90d0d
This commit is contained in:
parent
5546f82d95
commit
f74b28c47e
@ -147,31 +147,23 @@ cc_library(
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
deps = [
|
||||
":profiling_listener",
|
||||
":benchmark_model_lib",
|
||||
":benchmark_utils",
|
||||
":delegate_provider_hdr",
|
||||
":gpu_delegate_provider",
|
||||
":hexagon_delegate_provider",
|
||||
":external_delegate_provider",
|
||||
":logging",
|
||||
":nnapi_delegate_provider",
|
||||
"@com_google_absl//absl/base:core_headers",
|
||||
"@com_google_absl//absl/strings",
|
||||
":profiling_listener",
|
||||
":tflite_execution_providers",
|
||||
"//tensorflow/lite:framework",
|
||||
"//tensorflow/lite:string_util",
|
||||
"@ruy//ruy/profiler",
|
||||
"//tensorflow/lite/kernels:builtin_ops",
|
||||
"//tensorflow/lite/profiling:platform_profiler",
|
||||
"//tensorflow/lite/profiling:profiler",
|
||||
"//tensorflow/lite/profiling:profile_summary_formatter",
|
||||
"//tensorflow/lite/profiling:profiler",
|
||||
"//tensorflow/lite/tools/evaluation:utils",
|
||||
] + select({
|
||||
"//tensorflow:fuchsia": [],
|
||||
"//conditions:default": [
|
||||
":xnnpack_delegate_provider",
|
||||
],
|
||||
}),
|
||||
"@com_google_absl//absl/base:core_headers",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@ruy//ruy/profiler",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -237,9 +229,41 @@ cc_library(
|
||||
":benchmark_params",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/tools:command_line_flags",
|
||||
"//tensorflow/lite/tools/benchmark:logging",
|
||||
],
|
||||
)
|
||||
|
||||
# A convenient library for all inference execution providers.
|
||||
cc_library(
|
||||
name = "tflite_execution_providers",
|
||||
copts = tflite_copts(),
|
||||
deps = [
|
||||
":default_execution_provider",
|
||||
":external_delegate_provider",
|
||||
":gpu_delegate_provider",
|
||||
":hexagon_delegate_provider",
|
||||
":nnapi_delegate_provider",
|
||||
] + select({
|
||||
"//tensorflow:fuchsia": [],
|
||||
"//conditions:default": [
|
||||
":xnnpack_delegate_provider",
|
||||
],
|
||||
}),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "default_execution_provider",
|
||||
srcs = ["default_execution_provider.cc"],
|
||||
copts = tflite_copts(),
|
||||
linkstatic = True,
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":delegate_provider_hdr",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "gpu_delegate_provider",
|
||||
srcs = ["gpu_delegate_provider.cc"],
|
||||
@ -250,10 +274,7 @@ cc_library(
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
deps = [
|
||||
":benchmark_model_lib",
|
||||
":benchmark_params",
|
||||
":delegate_provider_hdr",
|
||||
":logging",
|
||||
"//tensorflow/lite/tools/evaluation:utils",
|
||||
] + select({
|
||||
"//tensorflow:android": [
|
||||
@ -272,10 +293,7 @@ cc_library(
|
||||
srcs = ["nnapi_delegate_provider.cc"],
|
||||
copts = common_copts,
|
||||
deps = [
|
||||
":benchmark_model_lib",
|
||||
":benchmark_params",
|
||||
":delegate_provider_hdr",
|
||||
":logging",
|
||||
"//tensorflow/lite/tools/evaluation:utils",
|
||||
],
|
||||
alwayslink = 1,
|
||||
@ -286,10 +304,7 @@ cc_library(
|
||||
srcs = ["hexagon_delegate_provider.cc"],
|
||||
copts = common_copts,
|
||||
deps = [
|
||||
":benchmark_model_lib",
|
||||
":benchmark_params",
|
||||
":delegate_provider_hdr",
|
||||
":logging",
|
||||
"//tensorflow/lite/tools/evaluation:utils",
|
||||
],
|
||||
alwayslink = 1,
|
||||
@ -302,9 +317,7 @@ cc_library(
|
||||
linkstatic = True,
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":benchmark_model_lib",
|
||||
":delegate_provider_hdr",
|
||||
":logging",
|
||||
"//tensorflow/lite/tools/evaluation:utils",
|
||||
],
|
||||
alwayslink = 1,
|
||||
@ -317,9 +330,7 @@ cc_library(
|
||||
linkstatic = True,
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":benchmark_model_lib",
|
||||
":delegate_provider_hdr",
|
||||
":logging",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -81,14 +81,13 @@ BenchmarkParams CreateParams(int32_t num_runs, float min_secs, float max_secs,
|
||||
params.AddParam("enable_op_profiling", BenchmarkParam::Create<bool>(false));
|
||||
params.AddParam("max_profiling_buffer_entries",
|
||||
BenchmarkParam::Create<int32_t>(1024));
|
||||
params.AddParam("max_delegated_partitions", BenchmarkParam::Create<int>(0));
|
||||
params.AddParam("profiling_output_csv_file",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
params.AddParam("enable_platform_tracing",
|
||||
BenchmarkParam::Create<bool>(false));
|
||||
|
||||
for (const auto& delegate_provider : GetRegisteredDelegateProviders()) {
|
||||
delegate_provider->AddParams(¶ms);
|
||||
params.Merge(delegate_provider->DefaultParams());
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ limitations under the License.
|
||||
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
|
||||
#include "tensorflow/lite/tools/benchmark/logging.h"
|
||||
#include "tensorflow/lite/tools/benchmark/profiling_listener.h"
|
||||
#include "tensorflow/lite/tools/evaluation/utils.h"
|
||||
|
||||
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
|
||||
|
||||
@ -270,13 +269,11 @@ BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
|
||||
BenchmarkParam::Create<int32_t>(1024));
|
||||
default_params.AddParam("profiling_output_csv_file",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
default_params.AddParam("max_delegated_partitions",
|
||||
BenchmarkParam::Create<int32_t>(0));
|
||||
default_params.AddParam("enable_platform_tracing",
|
||||
BenchmarkParam::Create<bool>(false));
|
||||
|
||||
for (const auto& delegate_util : GetRegisteredDelegateProviders()) {
|
||||
delegate_util->AddParams(&default_params);
|
||||
default_params.Merge(delegate_util->DefaultParams());
|
||||
}
|
||||
|
||||
return default_params;
|
||||
@ -296,7 +293,7 @@ void BenchmarkTfLiteModel::CleanUp() {
|
||||
BenchmarkTfLiteModel::~BenchmarkTfLiteModel() { CleanUp(); }
|
||||
|
||||
std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
|
||||
std::vector<Flag> flags = BenchmarkTfLiteModel::BenchmarkModel::GetFlags();
|
||||
std::vector<Flag> flags = BenchmarkModel::GetFlags();
|
||||
std::vector<Flag> specific_flags = {
|
||||
CreateFlag<std::string>("graph", ¶ms_, "graph file name"),
|
||||
CreateFlag<std::string>("input_layer", ¶ms_, "input layer names"),
|
||||
@ -329,8 +326,6 @@ std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
|
||||
"profiling_output_csv_file", ¶ms_,
|
||||
"File path to export profile data as CSV, if not set "
|
||||
"prints to stdout."),
|
||||
CreateFlag<int>("max_delegated_partitions", ¶ms_,
|
||||
"Max partitions to be delegated."),
|
||||
CreateFlag<bool>("enable_platform_tracing", ¶ms_,
|
||||
"enable platform-wide tracing, only meaningful when "
|
||||
"--enable_op_profiling is set to true.")};
|
||||
@ -374,8 +369,6 @@ void BenchmarkTfLiteModel::LogParams() {
|
||||
TFLITE_LOG(INFO) << "CSV File to export profiling data to: ["
|
||||
<< params_.Get<std::string>("profiling_output_csv_file")
|
||||
<< "]";
|
||||
TFLITE_LOG(INFO) << "Max number of delegated partitions : ["
|
||||
<< params_.Get<int32_t>("max_delegated_partitions") << "]";
|
||||
TFLITE_LOG(INFO) << "Enable platform-wide tracing: ["
|
||||
<< params_.Get<bool>("enable_platform_tracing") << "]";
|
||||
|
||||
|
@ -0,0 +1,64 @@
|
||||
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace benchmark {
|
||||
|
||||
// This class actually doesn't provide any TFLite delegate instances, it simply
|
||||
// provides common params and flags that are common to all actual delegate
|
||||
// providers.
|
||||
class DefaultExecutionProvider : public DelegateProvider {
|
||||
public:
|
||||
DefaultExecutionProvider() {
|
||||
default_params_.AddParam("num_threads", BenchmarkParam::Create<int32_t>(1));
|
||||
default_params_.AddParam("max_delegated_partitions",
|
||||
BenchmarkParam::Create<int32_t>(0));
|
||||
}
|
||||
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
void LogParams(const BenchmarkParams& params) const final;
|
||||
TfLiteDelegatePtr CreateTfLiteDelegate(
|
||||
const BenchmarkParams& params) const final;
|
||||
std::string GetName() const final { return "Default-NoDelegate"; }
|
||||
};
|
||||
REGISTER_DELEGATE_PROVIDER(DefaultExecutionProvider);
|
||||
|
||||
std::vector<Flag> DefaultExecutionProvider::CreateFlags(
|
||||
BenchmarkParams* params) const {
|
||||
std::vector<Flag> flags = {
|
||||
CreateFlag<int32_t>("num_threads", params,
|
||||
"number of threads used for inference on CPU."),
|
||||
CreateFlag<int32_t>("max_delegated_partitions", params,
|
||||
"Max number of partitions to be delegated.")};
|
||||
return flags;
|
||||
}
|
||||
|
||||
void DefaultExecutionProvider::LogParams(const BenchmarkParams& params) const {
|
||||
TFLITE_LOG(INFO) << "#threads used for CPU inference: ["
|
||||
<< params.Get<int32_t>("num_threads") << "]";
|
||||
TFLITE_LOG(INFO) << "Max number of delegated partitions : ["
|
||||
<< params.Get<int32_t>("max_delegated_partitions") << "]";
|
||||
}
|
||||
|
||||
TfLiteDelegatePtr DefaultExecutionProvider::CreateTfLiteDelegate(
|
||||
const BenchmarkParams& params) const {
|
||||
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
|
||||
}
|
||||
|
||||
} // namespace benchmark
|
||||
} // namespace tflite
|
@ -21,6 +21,7 @@ limitations under the License.
|
||||
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
|
||||
#include "tensorflow/lite/tools/benchmark/logging.h"
|
||||
#include "tensorflow/lite/tools/command_line_flags.h"
|
||||
|
||||
namespace tflite {
|
||||
@ -40,9 +41,6 @@ class DelegateProvider {
|
||||
// value.
|
||||
virtual std::vector<Flag> CreateFlags(BenchmarkParams* params) const = 0;
|
||||
|
||||
// Add delegate-specific benchmark pararms to 'params'
|
||||
virtual void AddParams(BenchmarkParams* params) const = 0;
|
||||
|
||||
// Log benchmark params.
|
||||
virtual void LogParams(const BenchmarkParams& params) const = 0;
|
||||
|
||||
@ -51,6 +49,18 @@ class DelegateProvider {
|
||||
const BenchmarkParams& params) const = 0;
|
||||
|
||||
virtual std::string GetName() const = 0;
|
||||
|
||||
const BenchmarkParams& DefaultParams() const { return default_params_; }
|
||||
|
||||
protected:
|
||||
template <typename T>
|
||||
Flag CreateFlag(const char* name, BenchmarkParams* params,
|
||||
const std::string& usage) const {
|
||||
return Flag(
|
||||
name, [params, name](const T& val) { params->Set<T>(name, val); },
|
||||
default_params_.Get<T>(name), usage, Flag::OPTIONAL);
|
||||
}
|
||||
BenchmarkParams default_params_;
|
||||
};
|
||||
|
||||
using DelegateProviderPtr = std::unique_ptr<DelegateProvider>;
|
||||
|
@ -12,9 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
|
||||
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
|
||||
#include "tensorflow/lite/tools/benchmark/logging.h"
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include <Windows.h>
|
||||
@ -97,9 +95,14 @@ struct ExternalLib {
|
||||
// the generated delegates.
|
||||
class ExternalDelegateProvider : public DelegateProvider {
|
||||
public:
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
ExternalDelegateProvider() {
|
||||
default_params_.AddParam("external_delegate_path",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
default_params_.AddParam("external_delegate_options",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
}
|
||||
|
||||
void AddParams(BenchmarkParams* params) const final;
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
|
||||
void LogParams(const BenchmarkParams& params) const final;
|
||||
|
||||
@ -121,13 +124,6 @@ std::vector<Flag> ExternalDelegateProvider::CreateFlags(
|
||||
return flags;
|
||||
}
|
||||
|
||||
void ExternalDelegateProvider::AddParams(BenchmarkParams* params) const {
|
||||
params->AddParam("external_delegate_path",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
params->AddParam("external_delegate_options",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
}
|
||||
|
||||
void ExternalDelegateProvider::LogParams(const BenchmarkParams& params) const {
|
||||
TFLITE_LOG(INFO) << "External delegate path : ["
|
||||
<< params.Get<std::string>("external_delegate_path") << "]";
|
||||
|
@ -14,9 +14,7 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
|
||||
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
|
||||
#include "tensorflow/lite/tools/benchmark/logging.h"
|
||||
#include "tensorflow/lite/tools/evaluation/utils.h"
|
||||
#if defined(__ANDROID__)
|
||||
#include "tensorflow/lite/delegates/gpu/delegate.h"
|
||||
@ -34,9 +32,19 @@ namespace benchmark {
|
||||
|
||||
class GpuDelegateProvider : public DelegateProvider {
|
||||
public:
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
GpuDelegateProvider() {
|
||||
default_params_.AddParam("use_gpu", BenchmarkParam::Create<bool>(false));
|
||||
#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
|
||||
default_params_.AddParam("gpu_precision_loss_allowed",
|
||||
BenchmarkParam::Create<bool>(true));
|
||||
#endif
|
||||
#if defined(REAL_IPHONE_DEVICE)
|
||||
default_params_.AddParam("gpu_wait_type",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
#endif
|
||||
}
|
||||
|
||||
void AddParams(BenchmarkParams* params) const final;
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
|
||||
void LogParams(const BenchmarkParams& params) const final;
|
||||
|
||||
@ -66,17 +74,6 @@ std::vector<Flag> GpuDelegateProvider::CreateFlags(
|
||||
return flags;
|
||||
}
|
||||
|
||||
void GpuDelegateProvider::AddParams(BenchmarkParams* params) const {
|
||||
params->AddParam("use_gpu", BenchmarkParam::Create<bool>(false));
|
||||
#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
|
||||
params->AddParam("gpu_precision_loss_allowed",
|
||||
BenchmarkParam::Create<bool>(true));
|
||||
#endif
|
||||
#if defined(REAL_IPHONE_DEVICE)
|
||||
params->AddParam("gpu_wait_type", BenchmarkParam::Create<std::string>(""));
|
||||
#endif
|
||||
}
|
||||
|
||||
void GpuDelegateProvider::LogParams(const BenchmarkParams& params) const {
|
||||
TFLITE_LOG(INFO) << "Use gpu : [" << params.Get<bool>("use_gpu") << "]";
|
||||
#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
|
||||
|
@ -14,9 +14,7 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
|
||||
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
|
||||
#include "tensorflow/lite/tools/benchmark/logging.h"
|
||||
#include "tensorflow/lite/tools/evaluation/utils.h"
|
||||
|
||||
#if (defined(ANDROID) || defined(__ANDROID__)) && \
|
||||
@ -29,9 +27,19 @@ namespace benchmark {
|
||||
|
||||
class HexagonDelegateProvider : public DelegateProvider {
|
||||
public:
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
HexagonDelegateProvider() {
|
||||
#if defined(TFLITE_ENABLE_HEXAGON)
|
||||
default_params_.AddParam("use_hexagon",
|
||||
BenchmarkParam::Create<bool>(false));
|
||||
default_params_.AddParam(
|
||||
"hexagon_lib_path",
|
||||
BenchmarkParam::Create<std::string>("/data/local/tmp"));
|
||||
default_params_.AddParam("hexagon_profiling",
|
||||
BenchmarkParam::Create<bool>(false));
|
||||
#endif
|
||||
}
|
||||
|
||||
void AddParams(BenchmarkParams* params) const final;
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
|
||||
void LogParams(const BenchmarkParams& params) const final;
|
||||
|
||||
@ -58,15 +66,6 @@ std::vector<Flag> HexagonDelegateProvider::CreateFlags(
|
||||
#endif
|
||||
}
|
||||
|
||||
void HexagonDelegateProvider::AddParams(BenchmarkParams* params) const {
|
||||
#if defined(TFLITE_ENABLE_HEXAGON)
|
||||
params->AddParam("use_hexagon", BenchmarkParam::Create<bool>(false));
|
||||
params->AddParam("hexagon_lib_path",
|
||||
BenchmarkParam::Create<std::string>("/data/local/tmp"));
|
||||
params->AddParam("hexagon_profiling", BenchmarkParam::Create<bool>(false));
|
||||
#endif
|
||||
}
|
||||
|
||||
void HexagonDelegateProvider::LogParams(const BenchmarkParams& params) const {
|
||||
#if defined(TFLITE_ENABLE_HEXAGON)
|
||||
TFLITE_LOG(INFO) << "Use Hexagon : [" << params.Get<bool>("use_hexagon")
|
||||
|
@ -14,9 +14,7 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
|
||||
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
|
||||
#include "tensorflow/lite/tools/benchmark/logging.h"
|
||||
#include "tensorflow/lite/tools/evaluation/utils.h"
|
||||
#if defined(__ANDROID__)
|
||||
#include "tensorflow/lite/nnapi/nnapi_util.h"
|
||||
@ -27,9 +25,17 @@ namespace benchmark {
|
||||
|
||||
class NnapiDelegateProvider : public DelegateProvider {
|
||||
public:
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
NnapiDelegateProvider() {
|
||||
default_params_.AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
|
||||
default_params_.AddParam("nnapi_execution_preference",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
default_params_.AddParam("nnapi_accelerator_name",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
default_params_.AddParam("disable_nnapi_cpu",
|
||||
BenchmarkParam::Create<bool>(false));
|
||||
}
|
||||
|
||||
void AddParams(BenchmarkParams* params) const final;
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
|
||||
void LogParams(const BenchmarkParams& params) const final;
|
||||
|
||||
@ -57,15 +63,6 @@ std::vector<Flag> NnapiDelegateProvider::CreateFlags(
|
||||
return flags;
|
||||
}
|
||||
|
||||
void NnapiDelegateProvider::AddParams(BenchmarkParams* params) const {
|
||||
params->AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
|
||||
params->AddParam("nnapi_execution_preference",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
params->AddParam("nnapi_accelerator_name",
|
||||
BenchmarkParam::Create<std::string>(""));
|
||||
params->AddParam("disable_nnapi_cpu", BenchmarkParam::Create<bool>(false));
|
||||
}
|
||||
|
||||
void NnapiDelegateProvider::LogParams(const BenchmarkParams& params) const {
|
||||
#if defined(__ANDROID__)
|
||||
TFLITE_LOG(INFO) << "Use nnapi : [" << params.Get<bool>("use_nnapi") << "]";
|
||||
|
@ -14,9 +14,7 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
|
||||
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
|
||||
#include "tensorflow/lite/tools/benchmark/logging.h"
|
||||
#include "tensorflow/lite/tools/evaluation/utils.h"
|
||||
|
||||
namespace tflite {
|
||||
@ -24,9 +22,12 @@ namespace benchmark {
|
||||
|
||||
class XnnpackDelegateProvider : public DelegateProvider {
|
||||
public:
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
XnnpackDelegateProvider() {
|
||||
default_params_.AddParam("use_xnnpack",
|
||||
BenchmarkParam::Create<bool>(false));
|
||||
}
|
||||
|
||||
void AddParams(BenchmarkParams* params) const final;
|
||||
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
|
||||
|
||||
void LogParams(const BenchmarkParams& params) const final;
|
||||
|
||||
@ -44,10 +45,6 @@ std::vector<Flag> XnnpackDelegateProvider::CreateFlags(
|
||||
return flags;
|
||||
}
|
||||
|
||||
void XnnpackDelegateProvider::AddParams(BenchmarkParams* params) const {
|
||||
params->AddParam("use_xnnpack", BenchmarkParam::Create<bool>(false));
|
||||
}
|
||||
|
||||
void XnnpackDelegateProvider::LogParams(const BenchmarkParams& params) const {
|
||||
TFLITE_LOG(INFO) << "Use xnnpack : [" << params.Get<bool>("use_xnnpack")
|
||||
<< "]";
|
||||
|
@ -212,6 +212,7 @@ TF_LITE_CC_SRCS := $(filter-out $(CORE_CC_EXCLUDE_SRCS), $(CORE_CC_ALL_SRCS))
|
||||
|
||||
# Benchmark sources
|
||||
BENCHMARK_SRCS_DIR := tensorflow/lite/tools/benchmark
|
||||
DELEGATE_PROVIDER_SRCS_DIR := tensorflow/lite/tools/benchmark
|
||||
EVALUATION_UTILS_SRCS := \
|
||||
tensorflow/lite/tools/evaluation/utils.cc
|
||||
BENCHMARK_ALL_SRCS := \
|
||||
@ -228,11 +229,12 @@ BENCHMARK_LIB_SRCS := $(filter-out \
|
||||
$(BENCHMARK_MAIN_SRC) \
|
||||
$(BENCHMARK_PERF_OPTIONS_SRC) \
|
||||
$(BENCHMARK_SRCS_DIR)/benchmark_plus_flex_main.cc \
|
||||
$(BENCHMARK_SRCS_DIR)/external_delegate_provider.cc \
|
||||
$(BENCHMARK_SRCS_DIR)/gpu_delegate_provider.cc \
|
||||
$(BENCHMARK_SRCS_DIR)/hexagon_delegate_provider.cc \
|
||||
$(BENCHMARK_SRCS_DIR)/nnapi_delegate_provider.cc \
|
||||
$(BENCHMARK_SRCS_DIR)/xnnpack_delegate_provider.cc, \
|
||||
$(DELEGATE_PROVIDER_SRCS_DIR)/default_execution_provider.cc \
|
||||
$(DELEGATE_PROVIDER_SRCS_DIR)/external_delegate_provider.cc \
|
||||
$(DELEGATE_PROVIDER_SRCS_DIR)/gpu_delegate_provider.cc \
|
||||
$(DELEGATE_PROVIDER_SRCS_DIR)/hexagon_delegate_provider.cc \
|
||||
$(DELEGATE_PROVIDER_SRCS_DIR)/nnapi_delegate_provider.cc \
|
||||
$(DELEGATE_PROVIDER_SRCS_DIR)/xnnpack_delegate_provider.cc, \
|
||||
$(BENCHMARK_ALL_SRCS))
|
||||
|
||||
# These target-specific makefiles should modify or replace options like
|
||||
|
Loading…
x
Reference in New Issue
Block a user