Move delegate providers into a separate directory as they are used for both evaluation tools and benchmark tools.

PiperOrigin-RevId: 307549889
Change-Id: I9234d5e187155b52a7518586dff01266fb426168
This commit is contained in:
Chao Mei 2020-04-20 23:38:40 -07:00 committed by TensorFlower Gardener
parent 0ede71e954
commit d524812b82
16 changed files with 277 additions and 279 deletions

View File

@ -98,13 +98,13 @@ cc_test(
deps = [
":benchmark_performance_options",
":benchmark_tflite_model_lib",
":delegate_provider_hdr",
"//tensorflow/lite:framework",
"//tensorflow/lite:string_util",
"//tensorflow/lite/c:common",
"//tensorflow/lite/testing:util",
"//tensorflow/lite/tools:command_line_flags",
"//tensorflow/lite/tools:logging",
"//tensorflow/lite/tools/delegates:delegate_provider_hdr",
"@com_google_absl//absl/algorithm",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings:str_format",
@ -139,10 +139,7 @@ cc_library(
deps = [
":benchmark_model_lib",
":benchmark_utils",
":coreml_delegate_provider",
":delegate_provider_hdr",
":profiling_listener",
":tflite_execution_providers",
"//tensorflow/lite:framework",
"//tensorflow/lite:string_util",
"//tensorflow/lite/kernels:builtin_ops",
@ -150,6 +147,8 @@ cc_library(
"//tensorflow/lite/profiling:profile_summary_formatter",
"//tensorflow/lite/profiling:profiler",
"//tensorflow/lite/tools:logging",
"//tensorflow/lite/tools/delegates:delegate_provider_hdr",
"//tensorflow/lite/tools/delegates:tflite_execution_providers",
"//tensorflow/lite/tools/evaluation:utils",
"@com_google_absl//absl/base:core_headers",
"@com_google_absl//absl/strings",
@ -210,152 +209,6 @@ cc_library(
],
)
cc_library(
name = "delegate_provider_hdr",
hdrs = [
"delegate_provider.h",
],
copts = common_copts,
deps = [
":benchmark_params",
"//tensorflow/lite/c:common",
"//tensorflow/lite/tools:command_line_flags",
"//tensorflow/lite/tools:logging",
],
)
# A convenient library for all inference execution providers.
cc_library(
name = "tflite_execution_providers",
copts = tflite_copts(),
deps = [
":default_execution_provider",
":external_delegate_provider",
":gpu_delegate_provider",
":hexagon_delegate_provider",
":nnapi_delegate_provider",
] + select({
"//tensorflow:fuchsia": [],
"//tensorflow:windows": [],
"//conditions:default": [
":xnnpack_delegate_provider",
],
}),
alwayslink = 1,
)
cc_library(
name = "default_execution_provider",
srcs = ["default_execution_provider.cc"],
copts = tflite_copts(),
linkstatic = True,
visibility = ["//visibility:public"],
deps = [
":delegate_provider_hdr",
],
alwayslink = 1,
)
cc_library(
name = "gpu_delegate_provider",
srcs = ["gpu_delegate_provider.cc"],
copts = common_copts + select({
"//tensorflow:ios": [
"-xobjective-c++",
],
"//conditions:default": [],
}),
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
] + select({
"//tensorflow:android": [
"//tensorflow/lite/delegates/gpu:delegate",
],
"//tensorflow:ios": [
"//tensorflow/lite/delegates/gpu:metal_delegate",
],
"//conditions:default": [],
}),
alwayslink = 1,
)
cc_library(
name = "nnapi_delegate_provider",
srcs = ["nnapi_delegate_provider.cc"],
copts = common_copts,
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
],
alwayslink = 1,
)
cc_library(
name = "hexagon_delegate_provider",
srcs = ["hexagon_delegate_provider.cc"],
copts = common_copts,
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
] + select({
"//tensorflow:android_arm": [
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate",
],
"//tensorflow:android_arm64": [
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate",
],
"//conditions:default": [],
}),
alwayslink = 1,
)
cc_library(
name = "coreml_delegate_provider",
srcs = ["coreml_delegate_provider.cc"],
copts = common_copts + select({
"//tensorflow:ios": [
"-xobjective-c++",
],
"//conditions:default": [],
}),
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
] + select({
"//tensorflow:ios": [
"//tensorflow/lite/experimental/delegates/coreml:coreml_delegate",
],
"//conditions:default": [],
}),
alwayslink = 1,
)
cc_library(
name = "xnnpack_delegate_provider",
srcs = ["xnnpack_delegate_provider.cc"],
copts = tflite_copts(),
linkstatic = True,
visibility = ["//visibility:public"],
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
],
alwayslink = 1,
)
cc_library(
name = "external_delegate_provider",
srcs = ["external_delegate_provider.cc"],
copts = tflite_copts(),
linkstatic = True,
visibility = ["//visibility:public"],
deps = [
":delegate_provider_hdr",
],
alwayslink = 1,
)
cc_library(
name = "benchmark_utils",
srcs = [

View File

@ -29,8 +29,8 @@ limitations under the License.
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/lite/tools/benchmark/benchmark_performance_options.h"
#include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
namespace {
@ -88,7 +88,8 @@ BenchmarkParams CreateParams(int32_t num_runs, float min_secs, float max_secs,
params.AddParam("enable_platform_tracing",
BenchmarkParam::Create<bool>(false));
for (const auto& delegate_provider : GetRegisteredDelegateProviders()) {
for (const auto& delegate_provider :
tools::GetRegisteredDelegateProviders()) {
params.Merge(delegate_provider->DefaultParams());
}
return params;

View File

@ -36,8 +36,8 @@ limitations under the License.
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/benchmark/profiling_listener.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
@ -272,8 +272,9 @@ BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
default_params.AddParam("enable_platform_tracing",
BenchmarkParam::Create<bool>(false));
for (const auto& delegate_util : GetRegisteredDelegateProviders()) {
default_params.Merge(delegate_util->DefaultParams());
for (const auto& delegate_provider :
tools::GetRegisteredDelegateProviders()) {
default_params.Merge(delegate_provider->DefaultParams());
}
return default_params;
@ -332,8 +333,9 @@ std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
flags.insert(flags.end(), specific_flags.begin(), specific_flags.end());
for (const auto& delegate_util : GetRegisteredDelegateProviders()) {
auto delegate_flags = delegate_util->CreateFlags(&params_);
for (const auto& delegate_provider :
tools::GetRegisteredDelegateProviders()) {
auto delegate_flags = delegate_provider->CreateFlags(&params_);
flags.insert(flags.end(), delegate_flags.begin(), delegate_flags.end());
}
@ -372,8 +374,9 @@ void BenchmarkTfLiteModel::LogParams() {
TFLITE_LOG(INFO) << "Enable platform-wide tracing: ["
<< params_.Get<bool>("enable_platform_tracing") << "]";
for (const auto& delegate_util : GetRegisteredDelegateProviders()) {
delegate_util->LogParams(params_);
for (const auto& delegate_provider :
tools::GetRegisteredDelegateProviders()) {
delegate_provider->LogParams(params_);
}
}
@ -615,7 +618,8 @@ TfLiteStatus BenchmarkTfLiteModel::Init() {
interpreter_->SetAllowFp16PrecisionForFp32(params_.Get<bool>("allow_fp16"));
owned_delegates_.clear();
for (const auto& delegate_provider : GetRegisteredDelegateProviders()) {
for (const auto& delegate_provider :
tools::GetRegisteredDelegateProviders()) {
auto delegate = delegate_provider->CreateTfLiteDelegate(params_);
// It's possible that a delegate of certain type won't be created as
// user-specified benchmark params tells not to.

View File

@ -0,0 +1,152 @@
load("//tensorflow/lite:build_def.bzl", "tflite_copts")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"], # Apache 2.0
)
common_copts = ["-Wall"] + tflite_copts()
cc_library(
name = "delegate_provider_hdr",
hdrs = [
"delegate_provider.h",
],
copts = common_copts,
deps = [
"//tensorflow/lite/c:common",
"//tensorflow/lite/tools:command_line_flags",
"//tensorflow/lite/tools:logging",
"//tensorflow/lite/tools:tool_params",
],
)
# A convenient library for all inference execution providers.
cc_library(
name = "tflite_execution_providers",
copts = tflite_copts(),
deps = [
":coreml_delegate_provider",
":default_execution_provider",
":external_delegate_provider",
":gpu_delegate_provider",
":hexagon_delegate_provider",
":nnapi_delegate_provider",
":xnnpack_delegate_provider",
],
alwayslink = 1,
)
cc_library(
name = "default_execution_provider",
srcs = ["default_execution_provider.cc"],
copts = tflite_copts(),
linkstatic = True,
visibility = ["//visibility:public"],
deps = [
":delegate_provider_hdr",
],
alwayslink = 1,
)
cc_library(
name = "gpu_delegate_provider",
srcs = ["gpu_delegate_provider.cc"],
copts = common_copts + select({
"//tensorflow:ios": [
"-xobjective-c++",
],
"//conditions:default": [],
}),
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
] + select({
"//tensorflow:android": [
"//tensorflow/lite/delegates/gpu:delegate",
],
"//tensorflow:ios": [
"//tensorflow/lite/delegates/gpu:metal_delegate",
],
"//conditions:default": [],
}),
alwayslink = 1,
)
cc_library(
name = "nnapi_delegate_provider",
srcs = ["nnapi_delegate_provider.cc"],
copts = common_copts,
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
],
alwayslink = 1,
)
cc_library(
name = "hexagon_delegate_provider",
srcs = ["hexagon_delegate_provider.cc"],
copts = common_copts,
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
] + select({
"//tensorflow:android_arm": [
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate",
],
"//tensorflow:android_arm64": [
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate",
],
"//conditions:default": [],
}),
alwayslink = 1,
)
cc_library(
name = "coreml_delegate_provider",
srcs = ["coreml_delegate_provider.cc"],
copts = common_copts + select({
"//tensorflow:ios": [
"-xobjective-c++",
],
"//conditions:default": [],
}),
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
] + select({
"//tensorflow:ios": [
"//tensorflow/lite/experimental/delegates/coreml:coreml_delegate",
],
"//conditions:default": [],
}),
alwayslink = 1,
)
cc_library(
name = "xnnpack_delegate_provider",
srcs = ["xnnpack_delegate_provider.cc"],
copts = tflite_copts(),
linkstatic = True,
visibility = ["//visibility:public"],
deps = [
":delegate_provider_hdr",
"//tensorflow/lite/tools/evaluation:utils",
],
alwayslink = 1,
)
cc_library(
name = "external_delegate_provider",
srcs = ["external_delegate_provider.cc"],
copts = tflite_copts(),
linkstatic = True,
visibility = ["//visibility:public"],
deps = [
":delegate_provider_hdr",
],
alwayslink = 1,
)

View File

@ -14,7 +14,7 @@ limitations under the License.
==============================================================================*/
#include <string>
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#if defined(__APPLE__)
#if TARGET_OS_IPHONE && !TARGET_IPHONE_SIMULATOR
@ -25,28 +25,27 @@ limitations under the License.
#endif
namespace tflite {
namespace benchmark {
namespace tools {
class CoreMlDelegateProvider : public DelegateProvider {
public:
CoreMlDelegateProvider() {
#if defined(REAL_IPHONE_DEVICE)
default_params_.AddParam("use_coreml", BenchmarkParam::Create<bool>(true));
default_params_.AddParam("use_coreml", ToolParam::Create<bool>(true));
#endif
}
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const BenchmarkParams& params) const final;
void LogParams(const ToolParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::string GetName() const final { return "COREML"; }
};
REGISTER_DELEGATE_PROVIDER(CoreMlDelegateProvider);
std::vector<Flag> CoreMlDelegateProvider::CreateFlags(
BenchmarkParams* params) const {
ToolParams* params) const {
#if defined(REAL_IPHONE_DEVICE)
std::vector<Flag> flags = {
CreateFlag<bool>("use_coreml", params, "use Core ML"),
@ -57,7 +56,7 @@ std::vector<Flag> CoreMlDelegateProvider::CreateFlags(
#endif
}
void CoreMlDelegateProvider::LogParams(const BenchmarkParams& params) const {
void CoreMlDelegateProvider::LogParams(const ToolParams& params) const {
#if defined(REAL_IPHONE_DEVICE)
TFLITE_LOG(INFO) << "Use Core ML : [" << params.Get<bool>("use_coreml")
<< "]";
@ -65,7 +64,7 @@ void CoreMlDelegateProvider::LogParams(const BenchmarkParams& params) const {
}
TfLiteDelegatePtr CoreMlDelegateProvider::CreateTfLiteDelegate(
const BenchmarkParams& params) const {
const ToolParams& params) const {
TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {});
#if defined(REAL_IPHONE_DEVICE)
@ -88,5 +87,5 @@ TfLiteDelegatePtr CoreMlDelegateProvider::CreateTfLiteDelegate(
return delegate;
}
} // namespace benchmark
} // namespace tools
} // namespace tflite

View File

@ -14,10 +14,10 @@ limitations under the License.
==============================================================================*/
#include <string>
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
namespace tflite {
namespace benchmark {
namespace tools {
// This class actually doesn't provide any TFLite delegate instances, it simply
// provides common params and flags that are common to all actual delegate
@ -25,23 +25,22 @@ namespace benchmark {
class DefaultExecutionProvider : public DelegateProvider {
public:
DefaultExecutionProvider() {
default_params_.AddParam("num_threads", BenchmarkParam::Create<int32_t>(1));
default_params_.AddParam("num_threads", ToolParam::Create<int32_t>(1));
default_params_.AddParam("max_delegated_partitions",
BenchmarkParam::Create<int32_t>(0));
ToolParam::Create<int32_t>(0));
default_params_.AddParam("min_nodes_per_partition",
BenchmarkParam::Create<int32_t>(0));
ToolParam::Create<int32_t>(0));
}
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
void LogParams(const BenchmarkParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const final;
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const ToolParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::string GetName() const final { return "Default-NoDelegate"; }
};
REGISTER_DELEGATE_PROVIDER(DefaultExecutionProvider);
std::vector<Flag> DefaultExecutionProvider::CreateFlags(
BenchmarkParams* params) const {
ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<int32_t>("num_threads", params,
"number of threads used for inference on CPU."),
@ -55,7 +54,7 @@ std::vector<Flag> DefaultExecutionProvider::CreateFlags(
return flags;
}
void DefaultExecutionProvider::LogParams(const BenchmarkParams& params) const {
void DefaultExecutionProvider::LogParams(const ToolParams& params) const {
TFLITE_LOG(INFO) << "#threads used for CPU inference: ["
<< params.Get<int32_t>("num_threads") << "]";
TFLITE_LOG(INFO) << "Max number of delegated partitions : ["
@ -65,9 +64,9 @@ void DefaultExecutionProvider::LogParams(const BenchmarkParams& params) const {
}
TfLiteDelegatePtr DefaultExecutionProvider::CreateTfLiteDelegate(
const BenchmarkParams& params) const {
const ToolParams& params) const {
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
} // namespace benchmark
} // namespace tools
} // namespace tflite

View File

@ -13,19 +13,19 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_TOOLS_BENCHMARK_DELEGATE_PROVIDER_H_
#define TENSORFLOW_LITE_TOOLS_BENCHMARK_DELEGATE_PROVIDER_H_
#ifndef TENSORFLOW_LITE_TOOLS_DELEGATES_DELEGATE_PROVIDER_H_
#define TENSORFLOW_LITE_TOOLS_DELEGATES_DELEGATE_PROVIDER_H_
#include <string>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace benchmark {
namespace tools {
// Same w/ Interpreter::TfLiteDelegatePtr to avoid pulling
// tensorflow/lite/interpreter.h dependency
@ -36,31 +36,30 @@ class DelegateProvider {
public:
virtual ~DelegateProvider() {}
// Create a list of command-line parsable flags based on benchmark params
// inside 'params' whose value will be set to the corresponding runtime flag
// value.
virtual std::vector<Flag> CreateFlags(BenchmarkParams* params) const = 0;
// Create a list of command-line parsable flags based on tool params inside
// 'params' whose value will be set to the corresponding runtime flag value.
virtual std::vector<Flag> CreateFlags(ToolParams* params) const = 0;
// Log benchmark params.
virtual void LogParams(const BenchmarkParams& params) const = 0;
// Log tool params.
virtual void LogParams(const ToolParams& params) const = 0;
// Create a TfLiteDelegate based on benchmark params.
// Create a TfLiteDelegate based on tool params.
virtual TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const = 0;
const ToolParams& params) const = 0;
virtual std::string GetName() const = 0;
const BenchmarkParams& DefaultParams() const { return default_params_; }
const ToolParams& DefaultParams() const { return default_params_; }
protected:
template <typename T>
Flag CreateFlag(const char* name, BenchmarkParams* params,
Flag CreateFlag(const char* name, ToolParams* params,
const std::string& usage) const {
return Flag(
name, [params, name](const T& val) { params->Set<T>(name, val); },
default_params_.Get<T>(name), usage, Flag::kOptional);
}
BenchmarkParams default_params_;
ToolParams default_params_;
};
using DelegateProviderPtr = std::unique_ptr<DelegateProvider>;
@ -102,7 +101,7 @@ class DelegateProviderRegistrar {
inline const DelegateProviderList& GetRegisteredDelegateProviders() {
return DelegateProviderRegistrar::GetProviders();
}
} // namespace benchmark
} // namespace tools
} // namespace tflite
#endif // TENSORFLOW_LITE_TOOLS_BENCHMARK_DELEGATE_PROVIDER_H_
#endif // TENSORFLOW_LITE_TOOLS_DELEGATES_DELEGATE_PROVIDER_H_

View File

@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#if defined(_WIN32)
#include <Windows.h>
@ -25,7 +25,7 @@ limitations under the License.
#include <vector>
namespace tflite {
namespace benchmark {
namespace tools {
namespace {
// Library Support construct to handle dynamic library operations
#if defined(_WIN32)
@ -97,24 +97,23 @@ class ExternalDelegateProvider : public DelegateProvider {
public:
ExternalDelegateProvider() {
default_params_.AddParam("external_delegate_path",
BenchmarkParam::Create<std::string>(""));
ToolParam::Create<std::string>(""));
default_params_.AddParam("external_delegate_options",
BenchmarkParam::Create<std::string>(""));
ToolParam::Create<std::string>(""));
}
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const BenchmarkParams& params) const final;
void LogParams(const ToolParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::string GetName() const final { return "EXTERNAL"; }
};
REGISTER_DELEGATE_PROVIDER(ExternalDelegateProvider);
std::vector<Flag> ExternalDelegateProvider::CreateFlags(
BenchmarkParams* params) const {
ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<std::string>("external_delegate_path", params,
"The library path for the underlying external."),
@ -124,7 +123,7 @@ std::vector<Flag> ExternalDelegateProvider::CreateFlags(
return flags;
}
void ExternalDelegateProvider::LogParams(const BenchmarkParams& params) const {
void ExternalDelegateProvider::LogParams(const ToolParams& params) const {
TFLITE_LOG(INFO) << "External delegate path : ["
<< params.Get<std::string>("external_delegate_path") << "]";
TFLITE_LOG(INFO) << "External delegate options : ["
@ -133,7 +132,7 @@ void ExternalDelegateProvider::LogParams(const BenchmarkParams& params) const {
}
TfLiteDelegatePtr ExternalDelegateProvider::CreateTfLiteDelegate(
const BenchmarkParams& params) const {
const ToolParams& params) const {
TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {});
std::string lib_path = params.Get<std::string>("external_delegate_path");
if (!lib_path.empty()) {
@ -167,5 +166,5 @@ TfLiteDelegatePtr ExternalDelegateProvider::CreateTfLiteDelegate(
}
return delegate;
}
} // namespace benchmark
} // namespace tools
} // namespace tflite

View File

@ -14,7 +14,7 @@ limitations under the License.
==============================================================================*/
#include <string>
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#if defined(__ANDROID__)
#include "tensorflow/lite/delegates/gpu/delegate.h"
@ -28,39 +28,37 @@ limitations under the License.
#endif
namespace tflite {
namespace benchmark {
namespace tools {
class GpuDelegateProvider : public DelegateProvider {
public:
GpuDelegateProvider() {
default_params_.AddParam("use_gpu", BenchmarkParam::Create<bool>(false));
default_params_.AddParam("use_gpu", ToolParam::Create<bool>(false));
#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
default_params_.AddParam("gpu_precision_loss_allowed",
BenchmarkParam::Create<bool>(true));
ToolParam::Create<bool>(true));
#endif
#if defined(__ANDROID__)
default_params_.AddParam("gpu_experimental_enable_quant",
BenchmarkParam::Create<bool>(true));
ToolParam::Create<bool>(true));
#endif
#if defined(REAL_IPHONE_DEVICE)
default_params_.AddParam("gpu_wait_type",
BenchmarkParam::Create<std::string>(""));
ToolParam::Create<std::string>(""));
#endif
}
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const BenchmarkParams& params) const final;
void LogParams(const ToolParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::string GetName() const final { return "GPU"; }
};
REGISTER_DELEGATE_PROVIDER(GpuDelegateProvider);
std::vector<Flag> GpuDelegateProvider::CreateFlags(
BenchmarkParams* params) const {
std::vector<Flag> GpuDelegateProvider::CreateFlags(ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<bool>("use_gpu", params, "use gpu"),
#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
@ -83,7 +81,7 @@ std::vector<Flag> GpuDelegateProvider::CreateFlags(
return flags;
}
void GpuDelegateProvider::LogParams(const BenchmarkParams& params) const {
void GpuDelegateProvider::LogParams(const ToolParams& params) const {
TFLITE_LOG(INFO) << "Use gpu : [" << params.Get<bool>("use_gpu") << "]";
#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
TFLITE_LOG(INFO) << "Allow lower precision in gpu : ["
@ -100,7 +98,7 @@ void GpuDelegateProvider::LogParams(const BenchmarkParams& params) const {
}
TfLiteDelegatePtr GpuDelegateProvider::CreateTfLiteDelegate(
const BenchmarkParams& params) const {
const ToolParams& params) const {
TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {});
if (params.Get<bool>("use_gpu")) {
@ -139,8 +137,8 @@ TfLiteDelegatePtr GpuDelegateProvider::CreateTfLiteDelegate(
delegate = TfLiteDelegatePtr(TFLGpuDelegateCreate(&gpu_opts),
&TFLGpuDelegateDelete);
#else
TFLITE_LOG(WARN) << "The GPU delegate compile options are only supported "
"to be benchmarked on Android or iOS platforms.";
TFLITE_LOG(WARN) << "The GPU delegate compile options are only supported on"
"Android or iOS platforms.";
delegate = evaluation::CreateGPUDelegate();
#endif
@ -151,5 +149,5 @@ TfLiteDelegatePtr GpuDelegateProvider::CreateTfLiteDelegate(
return delegate;
}
} // namespace benchmark
} // namespace tools
} // namespace tflite

View File

@ -14,7 +14,7 @@ limitations under the License.
==============================================================================*/
#include <string>
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#if (defined(ANDROID) || defined(__ANDROID__)) && \
@ -27,35 +27,32 @@ limitations under the License.
#endif
namespace tflite {
namespace benchmark {
namespace tools {
class HexagonDelegateProvider : public DelegateProvider {
public:
HexagonDelegateProvider() {
#if defined(TFLITE_ENABLE_HEXAGON)
default_params_.AddParam("use_hexagon",
BenchmarkParam::Create<bool>(false));
default_params_.AddParam(
"hexagon_lib_path",
BenchmarkParam::Create<std::string>("/data/local/tmp"));
default_params_.AddParam("use_hexagon", ToolParam::Create<bool>(false));
default_params_.AddParam("hexagon_lib_path",
ToolParam::Create<std::string>("/data/local/tmp"));
default_params_.AddParam("hexagon_profiling",
BenchmarkParam::Create<bool>(false));
ToolParam::Create<bool>(false));
#endif
}
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const BenchmarkParams& params) const final;
void LogParams(const ToolParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::string GetName() const final { return "Hexagon"; }
};
REGISTER_DELEGATE_PROVIDER(HexagonDelegateProvider);
std::vector<Flag> HexagonDelegateProvider::CreateFlags(
BenchmarkParams* params) const {
ToolParams* params) const {
#if defined(TFLITE_ENABLE_HEXAGON)
std::vector<Flag> flags = {
CreateFlag<bool>("use_hexagon", params, "Use Hexagon delegate"),
@ -70,7 +67,7 @@ std::vector<Flag> HexagonDelegateProvider::CreateFlags(
#endif
}
void HexagonDelegateProvider::LogParams(const BenchmarkParams& params) const {
void HexagonDelegateProvider::LogParams(const ToolParams& params) const {
#if defined(TFLITE_ENABLE_HEXAGON)
TFLITE_LOG(INFO) << "Use Hexagon : [" << params.Get<bool>("use_hexagon")
<< "]";
@ -82,7 +79,7 @@ void HexagonDelegateProvider::LogParams(const BenchmarkParams& params) const {
}
TfLiteDelegatePtr HexagonDelegateProvider::CreateTfLiteDelegate(
const BenchmarkParams& params) const {
const ToolParams& params) const {
TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {});
#if defined(TFLITE_ENABLE_HEXAGON)
if (params.Get<bool>("use_hexagon")) {
@ -105,5 +102,5 @@ TfLiteDelegatePtr HexagonDelegateProvider::CreateTfLiteDelegate(
return delegate;
}
} // namespace benchmark
} // namespace tools
} // namespace tflite

View File

@ -14,40 +14,38 @@ limitations under the License.
==============================================================================*/
#include <string>
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#if defined(__ANDROID__)
#include "tensorflow/lite/nnapi/nnapi_util.h"
#endif
namespace tflite {
namespace benchmark {
namespace tools {
class NnapiDelegateProvider : public DelegateProvider {
public:
NnapiDelegateProvider() {
default_params_.AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
default_params_.AddParam("use_nnapi", ToolParam::Create<bool>(false));
default_params_.AddParam("nnapi_execution_preference",
BenchmarkParam::Create<std::string>(""));
ToolParam::Create<std::string>(""));
default_params_.AddParam("nnapi_accelerator_name",
BenchmarkParam::Create<std::string>(""));
ToolParam::Create<std::string>(""));
default_params_.AddParam("disable_nnapi_cpu",
BenchmarkParam::Create<bool>(false));
ToolParam::Create<bool>(false));
}
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const BenchmarkParams& params) const final;
void LogParams(const ToolParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::string GetName() const final { return "NNAPI"; }
};
REGISTER_DELEGATE_PROVIDER(NnapiDelegateProvider);
std::vector<Flag> NnapiDelegateProvider::CreateFlags(
BenchmarkParams* params) const {
std::vector<Flag> NnapiDelegateProvider::CreateFlags(ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<bool>("use_nnapi", params, "use nnapi delegate api"),
CreateFlag<std::string>("nnapi_execution_preference", params,
@ -63,7 +61,7 @@ std::vector<Flag> NnapiDelegateProvider::CreateFlags(
return flags;
}
void NnapiDelegateProvider::LogParams(const BenchmarkParams& params) const {
void NnapiDelegateProvider::LogParams(const ToolParams& params) const {
#if defined(__ANDROID__)
TFLITE_LOG(INFO) << "Use nnapi : [" << params.Get<bool>("use_nnapi") << "]";
if (params.Get<bool>("use_nnapi")) {
@ -90,7 +88,7 @@ void NnapiDelegateProvider::LogParams(const BenchmarkParams& params) const {
}
TfLiteDelegatePtr NnapiDelegateProvider::CreateTfLiteDelegate(
const BenchmarkParams& params) const {
const ToolParams& params) const {
TfLiteDelegatePtr delegate(nullptr, [](TfLiteDelegate*) {});
if (params.Get<bool>("use_nnapi")) {
StatefulNnApiDelegate::Options options;
@ -150,5 +148,5 @@ TfLiteDelegatePtr NnapiDelegateProvider::CreateTfLiteDelegate(
return delegate;
}
} // namespace benchmark
} // namespace tools
} // namespace tflite

View File

@ -14,44 +14,42 @@ limitations under the License.
==============================================================================*/
#include <string>
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
namespace tflite {
namespace benchmark {
namespace tools {
class XnnpackDelegateProvider : public DelegateProvider {
public:
XnnpackDelegateProvider() {
default_params_.AddParam("use_xnnpack",
BenchmarkParam::Create<bool>(false));
default_params_.AddParam("use_xnnpack", ToolParam::Create<bool>(false));
}
std::vector<Flag> CreateFlags(BenchmarkParams* params) const final;
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const BenchmarkParams& params) const final;
void LogParams(const ToolParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(
const BenchmarkParams& params) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::string GetName() const final { return "XNNPACK"; }
};
REGISTER_DELEGATE_PROVIDER(XnnpackDelegateProvider);
std::vector<Flag> XnnpackDelegateProvider::CreateFlags(
BenchmarkParams* params) const {
ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<bool>("use_xnnpack", params, "use XNNPack")};
return flags;
}
void XnnpackDelegateProvider::LogParams(const BenchmarkParams& params) const {
void XnnpackDelegateProvider::LogParams(const ToolParams& params) const {
TFLITE_LOG(INFO) << "Use xnnpack : [" << params.Get<bool>("use_xnnpack")
<< "]";
}
TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate(
const BenchmarkParams& params) const {
const ToolParams& params) const {
if (params.Get<bool>("use_xnnpack")) {
return evaluation::CreateXNNPACKDelegate(
params.Get<int32_t>("num_threads"));
@ -59,5 +57,5 @@ TfLiteDelegatePtr XnnpackDelegateProvider::CreateTfLiteDelegate(
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
} // namespace benchmark
} // namespace tools
} // namespace tflite

View File

@ -57,6 +57,7 @@ cc_library(
"//conditions:default": [],
}) + select({
"//tensorflow:fuchsia": [],
"//tensorflow:windows": [],
"//conditions:default": [
"//tensorflow/lite/delegates/xnnpack:xnnpack_delegate",
],
@ -73,8 +74,8 @@ cc_library(
"//tensorflow/lite/tools:command_line_flags",
"//tensorflow/lite/tools:logging",
"//tensorflow/lite/tools:tool_params",
"//tensorflow/lite/tools/benchmark:delegate_provider_hdr",
"//tensorflow/lite/tools/benchmark:tflite_execution_providers",
"//tensorflow/lite/tools/delegates:delegate_provider_hdr",
"//tensorflow/lite/tools/delegates:tflite_execution_providers",
"//tensorflow/lite/tools/evaluation/proto:evaluation_stages_cc_proto",
],
)

View File

@ -78,7 +78,7 @@ TfLiteDelegatePtr CreateTfLiteDelegate(const TfliteInferenceParams& params,
}
DelegateProviders::DelegateProviders()
: delegates_list_(benchmark::GetRegisteredDelegateProviders()),
: delegates_list_(tools::GetRegisteredDelegateProviders()),
delegates_map_([=]() -> std::unordered_map<std::string, int> {
std::unordered_map<std::string, int> delegates_map;
for (int i = 0; i < delegates_list_.size(); ++i) {

View File

@ -20,7 +20,7 @@ limitations under the License.
#include <unordered_map>
#include <vector>
#include "tensorflow/lite/tools/benchmark/delegate_provider.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/evaluation/proto/evaluation_stages.pb.h"
#include "tensorflow/lite/tools/evaluation/utils.h"
#include "tensorflow/lite/tools/tool_params.h"
@ -73,7 +73,7 @@ class DelegateProviders {
// flags.
tools::ToolParams params_;
const benchmark::DelegateProviderList& delegates_list_;
const tools::DelegateProviderList& delegates_list_;
// Key is the delegate name, and the value is the index to the
// 'delegates_list_'.
const std::unordered_map<std::string, int> delegates_map_;

View File

@ -212,7 +212,7 @@ TF_LITE_CC_SRCS := $(filter-out $(CORE_CC_EXCLUDE_SRCS), $(CORE_CC_ALL_SRCS))
# Benchmark sources
BENCHMARK_SRCS_DIR := tensorflow/lite/tools/benchmark
DELEGATE_PROVIDER_SRCS_DIR := tensorflow/lite/tools/benchmark
DELEGATE_PROVIDER_SRCS_DIR := tensorflow/lite/tools/delegates
EVALUATION_UTILS_SRCS := \
tensorflow/lite/tools/evaluation/utils.cc
BENCHMARK_ALL_SRCS := \