Added support for Hexagon delegate in benchmark_model.

New command-line options:
--use_hexagon=[true|false]

Refer to https://www.tensorflow.org/lite/performance/hexagon_delegate for more information about how to get the required Qualcomm Hexagon libraries on your device.

PiperOrigin-RevId: 289194452
Change-Id: I33d3ea0114172fff5d57d6ad3c3c0c37a3a9f2a0
This commit is contained in:
A. Unique TensorFlower 2020-01-10 17:19:51 -08:00 committed by TensorFlower Gardener
parent a28a77c0dd
commit 9901f967b1
8 changed files with 91 additions and 4 deletions

View File

@ -29,3 +29,16 @@ cc_library(
"@hexagon_nn//:hexagon_nn_header",
],
)
genrule(
name = "libhexagon_interface",
srcs = [] + select({
"//tensorflow:android_arm64": ["@hexagon_nn//:hexagon/arm64-v8a/libhexagon_interface.so"],
"//tensorflow:android_arm": ["@hexagon_nn//:hexagon/armeabi-v7a/libhexagon_interface.so"],
"//conditions:default": [],
}),
outs = ["libhexagon_interface.so"],
cmd = "cp $(SRCS) $(@D)",
local = 1,
output_to_bindir = 1,
)

View File

@ -29,6 +29,7 @@ cc_binary(
"//tensorflow:android": [
"-pie", # Android 5.0 and later supports only PIE
"-lm", # some builtin ops, e.g., tanh, need -lm
"-Wl,--rpath=/data/local/tmp/", # Hexagon delegate libraries should be in /data/local/tmp
],
"//conditions:default": [],
}),

View File

@ -34,6 +34,13 @@ and the following optional parameters:
* `run_delay`: `float` (default=-1.0) \
The delay in seconds between subsequent benchmark runs. Non-positive values
mean use no delay.
* `use_hexagon`: `bool` (default=false) \
Whether to use the Hexagon delegate. Not all devices may support the Hexagon
delegate, refer to the TensorFlow Lite documentation for more information
about which devices/chipsets are supported and about how to get the
required libraries. To use the Hexagon delegate also build the
hexagon_nn:libhexagon_interface.so target and copy the library to the
device. All libraries should be copied to /data/local/tmp on the device.
* `use_nnapi`: `bool` (default=false) \
Whether to use [Android NNAPI](https://developer.android.com/ndk/guides/neuralnetworks/).
This API is available on recent Android devices. Note that some Android P
@ -100,7 +107,18 @@ adb shell chmod +x /data/local/tmp/benchmark_model
adb push mobilenet_quant_v1_224.tflite /data/local/tmp
```
(5) Run the benchmark. For example:
(5) Optionally, install Hexagon libraries on device.
That step is only needed when using the Hexagon delegate.
```
bazel build --config=android_arm \
tensorflow/lite/experimental/delegates/hexagon/hexagon_nn:libhexagon_interface.so
adb push bazel-bin/tensorflow/lite/experimental/delegates/hexagon/hexagon_nn/libhexagon_interface.so /data/local/tmp
adb push libhexagon_nn_skel*.so /data/local/tmp
```
(6) Run the benchmark. For example:
```
adb shell /data/local/tmp/benchmark_model \

View File

@ -61,6 +61,7 @@ BenchmarkParams CreateParams(int32_t num_runs, float min_secs, float max_secs,
params.AddParam("input_layer_shape", BenchmarkParam::Create<std::string>(""));
params.AddParam("input_layer_value_range",
BenchmarkParam::Create<std::string>(""));
params.AddParam("use_hexagon", BenchmarkParam::Create<bool>(false));
params.AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
params.AddParam("allow_fp16", BenchmarkParam::Create<bool>(false));
params.AddParam("require_full_delegation",

View File

@ -223,7 +223,7 @@ TfLiteStatus PopulateInputLayerInfo(
// Populate input value range if it's specified.
std::vector<std::string> value_ranges = Split(value_ranges_string, ':');
for (const auto val : value_ranges) {
for (const auto& val : value_ranges) {
std::vector<std::string> name_range = Split(val, ',');
if (name_range.size() != 3) {
TFLITE_LOG(FATAL) << "Wrong input value range item specified: " << val;
@ -280,6 +280,7 @@ BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_value_range",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("use_hexagon", BenchmarkParam::Create<bool>(false));
default_params.AddParam("use_nnapi", BenchmarkParam::Create<bool>(false));
default_params.AddParam("nnapi_execution_preference",
BenchmarkParam::Create<std::string>(""));
@ -330,6 +331,7 @@ std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
"layers. Each item is separated by ':', and the item value consists of "
"input layer name and integer-only range values (both low and high are "
"inclusive) separated by ',', e.g. input1,1,2:input2,0,254"),
CreateFlag<bool>("use_hexagon", &params_, "Use Hexagon delegate api"),
CreateFlag<bool>("use_nnapi", &params_, "use nnapi delegate api"),
CreateFlag<std::string>(
"nnapi_execution_preference", &params_,
@ -374,6 +376,8 @@ void BenchmarkTfLiteModel::LogParams() {
<< params_.Get<std::string>("input_layer_value_range")
<< "]";
#if defined(__ANDROID__)
TFLITE_LOG(INFO) << "Use Hexagon : [" << params_.Get<bool>("use_hexagon")
<< "]";
TFLITE_LOG(INFO) << "Use nnapi : [" << params_.Get<bool>("use_nnapi") << "]";
if (!params_.Get<std::string>("nnapi_execution_preference").empty()) {
TFLITE_LOG(INFO) << "nnapi execution preference: ["
@ -755,6 +759,22 @@ BenchmarkTfLiteModel::TfLiteDelegatePtrMap BenchmarkTfLiteModel::GetDelegates()
<< params_.Get<std::string>("nnapi_execution_preference")
<< ") to be used.";
}
if (params_.Get<bool>("use_hexagon")) {
const std::string libhexagon_path("/data/local/tmp");
Interpreter::TfLiteDelegatePtr delegate =
evaluation::CreateHexagonDelegate(libhexagon_path);
if (!delegate) {
// Refer to the Tensorflow Lite Hexagon delegate documentation for more
// information about how to get the required libraries.
TFLITE_LOG(WARN)
<< "Could not create Hexagon delegate: platform may not support "
"delegate or required libraries are missing";
} else {
delegates.emplace("Hexagon", std::move(delegate));
}
}
return delegates;
}

View File

@ -46,6 +46,7 @@ cc_library(
] + select({
"//tensorflow:android": [
"//tensorflow/lite/delegates/gpu:delegate",
"//tensorflow/lite/experimental/delegates/hexagon:hexagon_delegate",
],
"//conditions:default": [],
}),

View File

@ -28,6 +28,14 @@ limitations under the License.
namespace tflite {
namespace evaluation {
namespace {
Interpreter::TfLiteDelegatePtr CreateNullDelegate() {
return Interpreter::TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
}
} // namespace
std::string StripTrailingSlashes(const std::string& path) {
int end = path.size();
while (end > 0 && path[end - 1] == '/') {
@ -105,7 +113,7 @@ Interpreter::TfLiteDelegatePtr CreateNNAPIDelegate(
delete reinterpret_cast<StatefulNnApiDelegate*>(delegate);
});
#else
return Interpreter::TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
return CreateNullDelegate();
#endif // defined(__ANDROID__)
}
@ -126,7 +134,28 @@ Interpreter::TfLiteDelegatePtr CreateGPUDelegate() {
return CreateGPUDelegate(&options);
#else
return Interpreter::TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
return CreateNullDelegate();
#endif // defined(__ANDROID__)
}
Interpreter::TfLiteDelegatePtr CreateHexagonDelegate(
const std::string& library_directory_path) {
#if defined(__ANDROID__)
const TfLiteHexagonDelegateOptions options = {0, 0, false, false};
TfLiteDelegate* delegate = TfLiteHexagonDelegateCreate(&options);
if (delegate) {
if (library_directory_path.empty()) {
TfLiteHexagonInit();
} else {
TfLiteHexagonInitWithPath(library_directory_path.c_str());
}
}
return Interpreter::TfLiteDelegatePtr(delegate, [](TfLiteDelegate* delegate) {
TfLiteHexagonTearDown();
TfLiteHexagonDelegateDelete(delegate);
});
#else
return CreateNullDelegate();
#endif // defined(__ANDROID__)
}

View File

@ -22,6 +22,7 @@ limitations under the License.
#if defined(__ANDROID__)
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/experimental/delegates/hexagon/hexagon_delegate.h"
#endif
#include "tensorflow/lite/context.h"
@ -58,6 +59,9 @@ Interpreter::TfLiteDelegatePtr CreateGPUDelegate(
TfLiteGpuDelegateOptionsV2* options);
#endif
Interpreter::TfLiteDelegatePtr CreateHexagonDelegate(
const std::string& library_directory_path);
} // namespace evaluation
} // namespace tflite