Add C++ and Java APIs for GPU delegate whitelisting.

The GPU delegate is not supported on all Android devices due to differences in
OpenGL ES versions, driver capabilities and stability. This change provides a
whitelist that can be used to detect whether the delegate is supported on the
current device.

PiperOrigin-RevId: 314495681
Change-Id: I86c444188ebf998d6cfb1ea27428ce0900e926db
This commit is contained in:
A. Unique TensorFlower 2020-06-03 02:42:05 -07:00 committed by TensorFlower Gardener
parent 1a79e0e2f5
commit 5254b55e35
21 changed files with 1345 additions and 1 deletions

View File

@ -4,6 +4,9 @@ package(
filegroup(
name = "gpu_delegate",
srcs = ["GpuDelegate.java"],
srcs = [
"GpuDelegate.java",
"Whitelist.java",
],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,93 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite.gpu;
import java.io.Closeable;
/**
* GPU Delegate Whitelisting data.
*
* <p>The GPU delegate is not supported on all Android devices, due to differences in available
* OpenGL versions, driver features, and device resources. This class provides information on
* whether the GPU delegate is suitable for the current device.
*
* <p>This API is experimental and subject to change.
*
* <p><b>WARNING:</b> the whitelist is constructed from testing done on a limited set of models. You
* should plan to verify that your own model(s) work.
*
* <p>Example usage:
*
* <pre>{@code
* Interpreter.Options options = new Interpreter.Options();
* try (Whitelist whitelist = new Whitelist()) {
* if (whitelist.isDelegateSupportedOnThisDevice()) {
* GpuDelegate.Options delegateOptions = whitelist.getBestOptionsForThisDevice();
* gpuDelegate = new GpuDelegate(delegateOptions):
* options.addDelegate(gpuDelegate);
* }
* }
* Interpreter interpreter = new Interpreter(modelBuffer, options);
* }</pre>
*/
public class Whitelist implements Closeable {
private static final long INVALID_WHITELIST_HANDLE = 0;
private static final String TFLITE_GPU_LIB = "tensorflowlite_gpu_jni";
private long whitelistHandle = INVALID_WHITELIST_HANDLE;
/** Whether the GPU delegate is supported on this device. */
public boolean isDelegateSupportedOnThisDevice() {
if (whitelistHandle == INVALID_WHITELIST_HANDLE) {
throw new IllegalStateException("Trying to query a closed whitelist.");
}
return nativeIsDelegateSupportedOnThisDevice(whitelistHandle);
}
/** What options should be used for the GPU delegate. */
public GpuDelegate.Options getBestOptionsForThisDevice() {
// For forward compatibility, when the whitelist contains more information.
return new GpuDelegate.Options();
}
public Whitelist() {
whitelistHandle = createWhitelist();
}
/**
* Frees TFLite resources in C runtime.
*
* <p>User is expected to call this method explicitly.
*/
@Override
public void close() {
if (whitelistHandle != INVALID_WHITELIST_HANDLE) {
deleteWhitelist(whitelistHandle);
whitelistHandle = INVALID_WHITELIST_HANDLE;
}
}
static {
System.loadLibrary(TFLITE_GPU_LIB);
}
private static native long createWhitelist();
private static native void deleteWhitelist(long handle);
private static native boolean nativeIsDelegateSupportedOnThisDevice(long handle);
}

View File

@ -26,7 +26,13 @@ cc_library(
],
deps = [
"//tensorflow/lite/delegates/gpu:delegate",
"//tensorflow/lite/delegates/gpu/common:gpu_info",
"//tensorflow/lite/delegates/gpu/gl:egl_environment",
"//tensorflow/lite/delegates/gpu/gl:request_gpu_info",
"//tensorflow/lite/experimental/acceleration/whitelist:android_info",
"//tensorflow/lite/experimental/acceleration/whitelist:gpu_whitelist",
"//tensorflow/lite/java/jni",
"@com_google_absl//absl/status",
],
alwayslink = 1,
)

View File

@ -15,7 +15,13 @@ limitations under the License.
#include <jni.h>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
#include "tensorflow/lite/delegates/gpu/gl/request_gpu_info.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/gpu_whitelist.h"
#ifdef __cplusplus
extern "C" {
@ -44,6 +50,66 @@ JNIEXPORT void JNICALL Java_org_tensorflow_lite_gpu_GpuDelegate_deleteDelegate(
TfLiteGpuDelegateV2Delete(reinterpret_cast<TfLiteDelegate*>(delegate));
}
namespace {
class WhitelistHelper {
public:
absl::Status ReadInfo() {
auto status = tflite::acceleration::RequestAndroidInfo(&android_info_);
if (!status.ok()) return status;
if (android_info_.android_sdk_version < "21") {
// Weakly linked symbols may not be available on pre-21, and the GPU is
// not supported anyway so return early.
return absl::OkStatus();
}
std::unique_ptr<tflite::gpu::gl::EglEnvironment> env;
status = tflite::gpu::gl::EglEnvironment::NewEglEnvironment(&env);
if (!status.ok()) return status;
status = tflite::gpu::gl::RequestGpuInfo(&gpu_info_);
if (!status.ok()) return status;
return absl::OkStatus();
}
bool IsDelegateSupportedOnThisDevice() {
return whitelist_.Includes(android_info_, gpu_info_);
}
private:
tflite::acceleration::AndroidInfo android_info_;
tflite::gpu::GpuInfo gpu_info_;
tflite::acceleration::GPUWhitelist whitelist_;
};
} // namespace
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_gpu_Whitelist_createWhitelist(
JNIEnv* env, jclass clazz) {
WhitelistHelper* whitelist = new WhitelistHelper;
auto status = whitelist->ReadInfo();
// Errors in ReadInfo should almost always be failures to construct the OpenGL
// environment. Treating that as "GPU unsupported" is reasonable, and we can
// swallow the error.
status.IgnoreError();
return reinterpret_cast<jlong>(whitelist);
}
JNIEXPORT jboolean JNICALL
Java_org_tensorflow_lite_gpu_Whitelist_nativeIsDelegateSupportedOnThisDevice(
JNIEnv* env, jclass clazz, jlong whitelist_handle) {
WhitelistHelper* whitelist =
reinterpret_cast<WhitelistHelper*>(whitelist_handle);
return whitelist->IsDelegateSupportedOnThisDevice() ? JNI_TRUE : JNI_FALSE;
}
JNIEXPORT void JNICALL Java_org_tensorflow_lite_gpu_Whitelist_deleteWhitelist(
JNIEnv* env, jclass clazz, jlong whitelist_handle) {
WhitelistHelper* whitelist =
reinterpret_cast<WhitelistHelper*>(whitelist_handle);
delete whitelist;
}
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus

View File

@ -0,0 +1,15 @@
# Accelerator whitelisting
Experimental library and tools for determining whether an accelerator engine
works well on a given device, and for a given model.
## Platform-agnostic, Android-first
Android-focused, since the much smaller set of configurations on iOS means there
is much less need for whitelisting on iOS.
## Not just for TfLite
This code lives in the TfLite codebase, since TfLite is the first open-source
customer. It is however meant to support other users (direct use of NNAPI,
mediapipe).

View File

@ -0,0 +1,157 @@
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
load("//tensorflow/lite:special_rules.bzl", "tflite_portable_test_suite")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"], # Apache 2.0
)
flatbuffer_cc_library(
name = "database_fbs",
srcs = ["database.fbs"],
)
cc_library(
name = "devicedb",
srcs = [
"devicedb.cc",
],
hdrs = [
"devicedb.h",
"variables.h",
],
deps = [
":database_fbs",
],
)
cc_binary(
name = "json_to_fb",
srcs = ["json_to_fb.cc"],
deps = [
"//tensorflow/lite/tools:command_line_flags",
"@flatbuffers",
],
)
genrule(
name = "devicedb-sample_bin",
srcs = [
"database.fbs",
"devicedb-sample.json",
],
outs = ["devicedb-sample.bin"],
cmd = """
$(location :json_to_fb) \
--fbs=$(location :database.fbs) \
--json_input=$(location :devicedb-sample.json) \
--fb_output=$(@)
""",
tools = [":json_to_fb"],
)
genrule(
name = "devicedb-sample_cc",
srcs = ["devicedb-sample.bin"],
outs = [
"devicedb-sample.cc",
"devicedb-sample.h",
],
# convert_file_to_c_source for some reason doesn't define the global with
# 'extern', which is needed for global const variables in C++.
cmd = """
$(location //tensorflow/lite/python:convert_file_to_c_source) \
--input_tflite_file $(location :devicedb-sample.bin) \
--output_header_file $(location :devicedb-sample.h) \
--output_source_file $(location :devicedb-sample.cc) \
--array_variable_name g_tflite_acceleration_devicedb_sample_binary
perl -p -i -e 's/const unsigned char/extern const unsigned char/' $(location :devicedb-sample.cc)
""",
tools = ["//tensorflow/lite/python:convert_file_to_c_source"],
)
cc_test(
name = "devicedb_test",
srcs = [
"devicedb-sample.cc",
"devicedb-sample.h",
"devicedb_test.cc",
],
deps = [
":database_fbs",
":devicedb",
"//tensorflow/lite/testing:util",
"@com_google_googletest//:gtest",
"@flatbuffers",
],
)
genrule(
name = "gpu_whitelist_binary",
srcs = ["gpu_whitelist.bin"],
outs = [
"gpu_whitelist_binary.h",
"gpu_whitelist_binary.cc",
],
# convert_file_to_c_source for some reason doesn't define the global with
# 'extern', which is needed for global const variables in C++.
cmd = """
$(location //tensorflow/lite/python:convert_file_to_c_source) \
--input_tflite_file $(location :gpu_whitelist.bin) \
--output_header_file $(location :gpu_whitelist_binary.h) \
--output_source_file $(location :gpu_whitelist_binary.cc) \
--array_variable_name g_tflite_acceleration_gpu_whitelist_binary
perl -p -i -e 's/const unsigned char/extern const unsigned char/' $(location :gpu_whitelist_binary.cc)
""",
tools = ["//tensorflow/lite/python:convert_file_to_c_source"],
)
cc_library(
name = "android_info",
srcs = ["android_info.cc"],
hdrs = ["android_info.h"],
deps = [
"@com_google_absl//absl/status",
],
)
cc_library(
name = "gpu_whitelist",
srcs = [
"gpu_whitelist.cc",
"gpu_whitelist_binary.cc",
"gpu_whitelist_binary.h",
],
hdrs = [
"gpu_whitelist.h",
],
deps = [
":android_info",
":database_fbs",
":devicedb",
"//tensorflow/lite/delegates/gpu:delegate",
"//tensorflow/lite/delegates/gpu/common:gpu_info",
"@com_google_absl//absl/status",
"@com_google_absl//absl/strings",
"@flatbuffers",
],
)
tflite_portable_test_suite()

View File

@ -0,0 +1,13 @@
# GPU delegate whitelist
This package provides data and code for deciding if the GPU delegate is
supported on a specific Android device.
## Customizing the GPU whitelist
- Convert from checked-in flatbuffer to json by running `flatc -t --raw-binary
--strict-json database.fbs -- gpu_whitelist.bin`
- Edit the json
- Convert from json to flatbuffer `flatc -b database.fbs --
gpu_whitelist.json`
- Rebuild ../../../java:tensorflow-lite-gpu

View File

@ -0,0 +1,52 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
#include <string>
#include "absl/status/status.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif // __ANDROID__
namespace {
std::string GetPropertyValue(const std::string& property) {
#ifdef __ANDROID__
char value[PROP_VALUE_MAX];
__system_property_get(property.c_str(), value);
return std::string(value);
#else // !__ANDROID__
return std::string();
#endif // __ANDROID__
}
} // namespace
namespace tflite {
namespace acceleration {
absl::Status RequestAndroidInfo(AndroidInfo* info_out) {
if (!info_out) {
return absl::InvalidArgumentError("info_out may not be null");
}
info_out->android_sdk_version = GetPropertyValue("ro.build.version.sdk");
info_out->device = GetPropertyValue("ro.product.device");
info_out->model = GetPropertyValue("ro.product.model");
info_out->manufacturer = GetPropertyValue("ro.product.manufacturer");
return absl::OkStatus();
}
} // namespace acceleration
} // namespace tflite

View File

@ -0,0 +1,43 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
#include <string>
#include "absl/status/status.h"
namespace tflite {
namespace acceleration {
// Information about and Android device, used for determining whitelisting
// status.
struct AndroidInfo {
// Property ro.build.version.sdk
std::string android_sdk_version;
// Property ro.product.model
std::string model;
// Property ro.product.device
std::string device;
// Property ro.product.manufacturer
std::string manufacturer;
};
absl::Status RequestAndroidInfo(AndroidInfo* info_out);
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_

View File

@ -0,0 +1,58 @@
// Copyright 2020 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
namespace tflite.acceleration;
enum Comparison : byte {
EQUAL = 0,
MINIMUM = 1,
}
// Mapping from available device features to whitelisting decisions. Basic usage is to:
// 1) Map easily available device data (like Android version,
// Manufacturer, Device) to things like SoC vendor, SoC model.
// 2) Map complete device data to delegate-specific features and support status
// 3) Map delegate-specific features to delegate configuration.
//
// The structure describes a decision tree, with multiple matching branches.
// The branches are applied depth-first.
table DeviceDatabase {
root:[tflite.acceleration.DeviceDecisionTreeNode];
}
table DeviceDecisionTreeNode {
// The variables are strings, as we have multiple clients that want to
// introduce their own fields. Known variables are listed in variables.h.
variable:string (shared);
comparison:tflite.acceleration.Comparison;
items:[tflite.acceleration.DeviceDecisionTreeEdge];
}
table DeviceDecisionTreeEdge {
// Under which variable value does this item match.
value:string (key, shared);
// Which child branches should also be consulted and used to override this
// node.
children:[tflite.acceleration.DeviceDecisionTreeNode];
// What information can be derived about this device.
derived_properties:[tflite.acceleration.DerivedProperty];
}
// Derived variable value to combine with detected variables.
table DerivedProperty {
variable:string (shared);
value:string (shared);
}
root_type DeviceDatabase;

View File

@ -0,0 +1,169 @@
{
"root": [
{
"variable": "tflite.device_model",
"items": [
{
"value": "m712c",
"derived_properties": [
{
"variable": "tflite.soc_model",
"value": "exynos_7872"
}
]
},
{
"value": "sc_02l",
"derived_properties": [
{
"variable": "tflite.soc_model",
"value": "exynos_7885"
}
]
}
]
},
{
"variable": "tflite.opengl_es_version",
"items": [
{
"value": "3.1",
"children": [
{
"variable": "tflite.soc_model",
"items": [
{
"value": "exynos_7872",
"children": [
{
"variable": "tflite.android_sdk_version",
"items": [
{
"value": "24",
"derived_properties": [
{
"variable": "tflite.gpu.status",
"value": "WHITELISTED"
}
]
}
],
"comparison": "MINIMUM"
}
]
},
{
"value": "exynos_7883",
"children": [
{
"variable": "tflite.android_sdk_version",
"items": [
{
"value": "28",
"derived_properties": [
{
"variable": "tflite.gpu.status",
"value": "WHITELISTED"
}
]
}
],
"comparison": "MINIMUM"
}
]
}
]
}
]
}
]
},
{
"variable": "tflite.android_sdk_version",
"items": [
{
"value": "21",
"children": [
{
"variable": "tflite.device_model",
"items": [
{
"value": "huawei_gra_l09",
"children": [
{
"variable": "tflite.device_name",
"items": [
{
"value": "hwgra",
"derived_properties": [
{
"variable": "tflite.gpu.status",
"value": "WHITELISTED"
},
{
"variable": "tflite.gpu.opencl_status",
"value": "WHITELISTED"
}
]
}
]
}
]
}
]
}
]
},
{
"value": "24",
"children": [
{
"variable": "tflite.device_model",
"items": [
{
"value": "sm_j810f",
"children": [
{
"variable": "tflite.device_name",
"items": [
{
"value": "j8y18lte",
"derived_properties": [
{
"variable": "tflite.gpu.status",
"value": "BLACKLISTED"
}
]
}
]
}
]
},
{
"value": "sm_j810m",
"children": [
{
"variable": "tflite.device_name",
"items": [
{
"value": "j8y18lte",
"derived_properties": [
{
"variable": "tflite.gpu.opencl_status",
"value": "WHITELISTED"
}
]
}
]
}
]
}
]
}
]
}
]
}
]
}

View File

@ -0,0 +1,91 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
#include <map>
#include <string>
#include <vector>
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
namespace tflite {
namespace acceleration {
namespace {
std::vector<const DeviceDecisionTreeEdge*> Find(
const DeviceDecisionTreeNode* root, const std::string& value) {
std::vector<const DeviceDecisionTreeEdge*> found;
if (root->comparison() == Comparison_EQUAL) {
// Exact match.
const DeviceDecisionTreeEdge* possible =
root->items()->LookupByKey(value.c_str());
if (possible) {
found.push_back(possible);
}
} else {
// Minimum: value should be at least item's value.
for (const DeviceDecisionTreeEdge* item : *(root->items())) {
if (value >= item->value()->str()) {
found.push_back(item);
}
}
}
return found;
}
void UpdateVariablesFromDeviceDecisionTreeEdges(
std::map<std::string, std::string>* variable_values,
const DeviceDecisionTreeEdge& item) {
if (item.derived_properties()) {
for (const DerivedProperty* p : *(item.derived_properties())) {
(*variable_values)[p->variable()->str()] = p->value()->str();
}
}
}
void Follow(const DeviceDecisionTreeNode* root,
std::map<std::string, std::string>* variable_values) {
if (!root->variable()) {
return;
}
auto possible_value = variable_values->find(root->variable()->str());
if (possible_value == variable_values->end()) {
return;
}
std::vector<const DeviceDecisionTreeEdge*> edges =
Find(root, possible_value->second);
for (const DeviceDecisionTreeEdge* edge : edges) {
UpdateVariablesFromDeviceDecisionTreeEdges(variable_values, *edge);
if (edge->children()) {
for (const DeviceDecisionTreeNode* root : *(edge->children())) {
Follow(root, variable_values);
}
}
}
}
} // namespace
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database) {
if (!database.root()) return;
for (const DeviceDecisionTreeNode* root : *(database.root())) {
Follow(root, variable_values);
}
}
} // namespace acceleration
} // namespace tflite

View File

@ -0,0 +1,38 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_DECISION_TREE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_DECISION_TREE_H_
#include <map>
#include <string>
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
namespace tflite {
namespace acceleration {
// Use the variables in `variable_values` to evaluate the decision tree in
// `database` and update the `variable_values` based on derived properties in
// the decision tree.
//
// See database.fbs for a description of the decision tree.
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database);
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_DECISION_TREE_H_

View File

@ -0,0 +1,142 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb-sample.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/variables.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace acceleration {
namespace {
class DeviceDbTest : public ::testing::Test {
protected:
void LoadSample() {
device_db_ = flatbuffers::GetRoot<DeviceDatabase>(
g_tflite_acceleration_devicedb_sample_binary);
}
const DeviceDatabase* device_db_ = nullptr;
};
TEST_F(DeviceDbTest, Load) {
LoadSample();
ASSERT_TRUE(device_db_);
ASSERT_TRUE(device_db_->root());
EXPECT_EQ(device_db_->root()->size(), 3);
}
TEST_F(DeviceDbTest, SocLookup) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find first device mapping.
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7872");
// Find second device mapping.
variables.clear();
variables[kDeviceModel] = "sc_02l";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7885");
// Make sure no results are returned without a match.
variables.clear();
variables[kDeviceModel] = "nosuch";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(kSoCModel), variables.end());
}
TEST_F(DeviceDbTest, StatusLookupWithSoC) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find exact match.
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7872";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusWhitelisted);
// Ensure no results without a match.
variables[kOpenGLESVersion] = "3.0";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
// Find no results with too low an android version.
variables.clear();
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7883";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
// Find a match with android version above minimum.
variables[kAndroidSdkVersion] = "29";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusWhitelisted);
}
TEST_F(DeviceDbTest, StatusLookupWithDevice) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find blacklisted device (same model, different device).
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810f";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusBlacklisted);
// Find whitelisted device (same model, different device).
variables.clear();
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810m";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kOpenCLStatus], gpu::kStatusWhitelisted);
}
TEST_F(DeviceDbTest, StatusLookupBasedOnDerivedProperties) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find status based on SoC derived from model.
variables[kOpenGLESVersion] = "3.1";
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusWhitelisted);
}
} // namespace
} // namespace acceleration
} // namespace tflite
int main(int argc, char** argv) {
::tflite::LogToStderr();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,99 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/whitelist/gpu_whitelist.h"
#include <cctype>
#include <map>
#include <string>
#include "absl/strings/string_view.h"
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/gpu_whitelist_binary.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/variables.h"
namespace tflite {
namespace acceleration {
namespace {
std::string CanonicalizeValue(absl::string_view input) {
// This assumes ASCII, which holds for all values we have in the whitelist.
std::string output(input);
for (int i = 0; i < output.size(); i++) {
char c = output[i];
if (c == ' ' || c == '-') {
output[i] = '_';
} else if (isalpha(c)) {
output[i] = tolower(c);
}
}
return output;
}
void CanonicalizeValues(std::map<std::string, std::string>* variable_values) {
for (auto& i : *variable_values) {
i.second = CanonicalizeValue(i.second);
}
}
} // namespace
GPUWhitelist::GPUWhitelist()
: GPUWhitelist(g_tflite_acceleration_gpu_whitelist_binary) {}
GPUWhitelist::GPUWhitelist(const unsigned char* whitelist_flatbuffer) {
if (!whitelist_flatbuffer) return;
database_ = flatbuffers::GetRoot<DeviceDatabase>(whitelist_flatbuffer);
}
std::map<std::string, std::string> GPUWhitelist::CalculateVariables(
const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const {
std::map<std::string, std::string> variables;
variables[kAndroidSdkVersion] = android_info.android_sdk_version;
variables[kDeviceModel] = android_info.model;
variables[kDeviceName] = android_info.device;
variables[kManufacturer] = android_info.manufacturer;
variables[kGPUModel] = gpu_info.renderer_name;
char buffer[128];
int len = snprintf(buffer, 128 - 1, "%d.%d", gpu_info.major_version,
gpu_info.minor_version);
buffer[len] = '\0';
variables[kOpenGLESVersion] = std::string(buffer);
CanonicalizeValues(&variables);
if (!database_) return variables;
UpdateVariablesFromDatabase(&variables, *database_);
return variables;
}
bool GPUWhitelist::Includes(const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const {
auto variables = CalculateVariables(android_info, gpu_info);
return variables[gpu::kStatus] == std::string(gpu::kStatusWhitelisted);
}
TfLiteGpuDelegateOptionsV2 GPUWhitelist::GetBestOptionsFor(
const AndroidInfo& /* android_info */,
const ::tflite::gpu::GpuInfo& /* gpu_info */) const {
// This method is for forwards-compatibility: the whitelist may later include
// information about which backend to choose (OpenGL/OpenCL/Vulkan) or other
// options.
return TfLiteGpuDelegateOptionsV2Default();
}
} // namespace acceleration
} // namespace tflite

View File

@ -0,0 +1,85 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_GPU_WHITELIST_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_GPU_WHITELIST_H_
#include <map>
#include <string>
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
namespace tflite {
namespace acceleration {
// This class provides information on GPU delegate support.
//
// The GPU delegate is supported on a subset of Android devices, depending on
// Android version, OpenGL ES version, GPU chipset etc. The support is based on
// measure stability, correctness and peformance. For more detail see README.md.
//
// Example usage:
// tflite::Interpreter* interpreter = ... ;
// tflite::acceleration::AndroidInfo android_info;
// tflite::gpu::GpuInfo gpu_info;
// EXPECT_OK(tflite::acceleration::RequestAndroidInfo(&android_info));
// EXPECT_OK(tflite::gpu::gl::EglEnvironment::NewEglEnvironment(&env));
// EXPECT_OK(tflite::gpu::gl::RequestGpuInfo(&tflite_gpu_info));
// tflite::acceleration::GPUWhitelist whitelist;
// TfLiteDelegate* gpu_delegate = nullptr;
// TfLiteGpuDelegateOptions gpu_options;
// if (whitelist.Includes(android_info, gpu_info)) {
// gpu_options = whitelist.BestOptionsFor(android_info, gpu_info);
// gpu_delegate = TfLiteGpuDelegateCreate(&gpu_options);
// EXPECT_EQ(interpreter->ModifyGraphWithDelegate(gpu_delegate), TfLiteOk);
// } else {
// // Fallback path.
// }
class GPUWhitelist {
public:
// Construct whitelist from bundled data.
GPUWhitelist();
// Returns true if the provided device specs are whitelisted by the database.
bool Includes(const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const;
// Returns the best TfLiteGpuDelegateOptionsV2 for the provided device specs
// based on the database. The output can be modified as desired before passing
// to delegate creation.
TfLiteGpuDelegateOptionsV2 GetBestOptionsFor(
const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const;
// Convert android_info and gpu_info into a set of variables used for querying
// the whitelist, and update variables from whitelist data. See variables.h
// and devicedb.h for more information.
std::map<std::string, std::string> CalculateVariables(
const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const;
GPUWhitelist(const GPUWhitelist&) = delete;
GPUWhitelist& operator=(const GPUWhitelist&) = delete;
protected:
explicit GPUWhitelist(const unsigned char* whitelist_flatbuffer);
const DeviceDatabase* database_;
};
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_GPU_WHITELIST_H_

View File

@ -0,0 +1,92 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Simple program to convert from JSON to binary flatbuffers for given schema.
//
// Used for creating the binary version of a whitelist.
//
// The flatc command line is not available in all build environments.
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "flatbuffers/idl.h" // from @flatbuffers
#include "flatbuffers/reflection.h" // from @flatbuffers
#include "flatbuffers/reflection_generated.h" // from @flatbuffers
#include "flatbuffers/util.h" // from @flatbuffers
#include "tensorflow/lite/tools/command_line_flags.h"
int main(int argc, char** argv) {
std::string json_path, fbs_path, fb_path;
std::vector<tflite::Flag> flags = {
tflite::Flag::CreateFlag("json_input", &json_path,
"Path to input json file."),
tflite::Flag::CreateFlag("fbs", &fbs_path,
"Path to flatbuffer schema to use."),
tflite::Flag::CreateFlag("fb_output", &fb_path,
"Path to a output binary flatbuffer."),
};
const bool parse_result =
tflite::Flags::Parse(&argc, const_cast<const char**>(argv), flags);
if (!parse_result || json_path.empty() || fbs_path.empty() ||
fb_path.empty()) {
std::cerr << tflite::Flags::Usage(argv[0], flags);
return 1;
}
std::string json_contents;
if (!flatbuffers::LoadFile(json_path.c_str(), false, &json_contents)) {
std::cerr << "Unable to load file " << json_path << std::endl;
return 2;
}
std::string fbs_contents;
if (!flatbuffers::LoadFile(fbs_path.c_str(), false, &fbs_contents)) {
std::cerr << "Unable to load file " << fbs_path << std::endl;
return 3;
}
const char* include_directories[] = {nullptr};
flatbuffers::Parser schema_parser;
if (!schema_parser.Parse(fbs_contents.c_str(), include_directories)) {
std::cerr << "Unable to parse schema " << schema_parser.error_ << std::endl;
return 4;
}
schema_parser.Serialize();
auto schema =
reflection::GetSchema(schema_parser.builder_.GetBufferPointer());
auto root_table = schema->root_table();
flatbuffers::Parser parser;
parser.Deserialize(schema_parser.builder_.GetBufferPointer(),
schema_parser.builder_.GetSize());
if (!parser.Parse(json_contents.c_str(), include_directories,
json_path.c_str())) {
std::cerr << "Unable to parse json " << parser.error_ << std::endl;
return 5;
}
// Use CopyTable() to deduplicate the strings.
const uint8_t* buffer = parser.builder_.GetBufferPointer();
flatbuffers::FlatBufferBuilder fbb;
auto root_offset = flatbuffers::CopyTable(
fbb, *schema, *root_table, *flatbuffers::GetAnyRoot(buffer), true);
fbb.Finish(root_offset);
std::string binary(reinterpret_cast<const char*>(fbb.GetBufferPointer()),
fbb.GetSize());
std::ofstream output;
output.open(fb_path);
output << binary;
output.close();
return 0;
}

View File

@ -0,0 +1,87 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_VARIABLES_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_VARIABLES_H_
// This file lists generally useful whitelisting properties.
// Properties starting with "tflite." are reserved.
// Users of the whitelisting library can use arbitrary other property names.
namespace tflite {
namespace acceleration {
// System properties, not specific to any single delegate.
// Android properties.
//
// Android SDK version number. Android system property ro.build.version.sdk.
// E.g., "28".
constexpr char kAndroidSdkVersion[] = "tflite.android_sdk_version";
// SoC model. Looked up from database or possibly returned from Android system
// property ro.board.platform, normalized. E.g., "sdm450".
constexpr char kSoCModel[] = "tflite.soc_model";
// SoC vendor. Looked up from database. E.g., "qualcomm".
constexpr char kSoCVendor[] = "tflite.soc_vendor";
// Device manufacturer. Android API android.os.Build.MANUFACTURER, normalized.
// E.g., "google".
constexpr char kManufacturer[] = "tflite.manufacturer";
// Device model. Android API android.os.Build.MODEL, normalized.
// E.g., "pixel_2".
constexpr char kDeviceModel[] = "tflite.device_model";
// Device name. Android API android.os.Build.DEVICE, normalized.
// E.g., "walleye".
constexpr char kDeviceName[] = "tflite.device_name";
// GPU-related properties.
//
// OpenGL ES version. E.g., 3.2.
constexpr char kOpenGLESVersion[] = "tflite.opengl_es_version";
// GPU model, result of querying GL_RENDERER, normalized. E.g.,
// "adreno_(tm)_505".
constexpr char kGPUModel[] = "tflite.gpu_model";
// GPU vendor, normalized. E.g., "adreno_(tm)_505".
constexpr char kGPUVendor[] = "tflite.gpu_vendor";
// OpenGL driver version, result of querying GL_VERSION. E.g.,
// "opengl_es_3.2_v@328.0_(git@6fb5a5b,_ife855c4895)_(date:08/21/18)"
constexpr char kOpenGLDriverVersion[] = "tflite.opengl_driver_version";
// NNAPI-related properties.
//
// NNAPI accelerator name, returned by ANeuralNetworksDevice_getName. E.g.,
// "qti-dsp".
constexpr char kNNAPIAccelerator[] = "tflite.nnapi_accelerator";
// NNAPI accelerator feature level, returned by
// ANeuralNetworksDevice_getFeatureLevel. E.g., 29. Actual variables are named
// "tflite.nnapi_feature_level.<accelerator name>", e.g.,
// "tflite.nnapi_feature_level.qti-dsp".
constexpr char kNNAPIFeatureLevelPrefix[] = "tflite.nnapi_feature_level";
namespace gpu {
// GPU-delegate derived properties.
// Whether the GPU delegate works in general.
// ("UNSET", "UNKNOWN", "WHITELISTED", "BLACKLISTED").
constexpr char kStatus[] = "tflite.gpu.status";
// Whether OpenCL should be allowed. Possible values are the SupportStatus enums
// ("UNSET", "UNKNOWN", "WHITELISTED", "BLACKLISTED").
constexpr char kOpenCLStatus[] = "tflite.gpu.opencl_status";
constexpr char kStatusWhitelisted[] = "WHITELISTED";
constexpr char kStatusBlacklisted[] = "BLACKLISTED";
} // namespace gpu
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_VARIABLES_H_

View File

@ -355,6 +355,7 @@ filegroup(
srcs = [
"src/test/java/org/tensorflow/lite/InterpreterTestHelper.java",
"src/test/java/org/tensorflow/lite/gpu/GpuDelegateTest.java",
"src/test/java/org/tensorflow/lite/gpu/WhitelistTest.java",
],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,34 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite.gpu;
import static com.google.common.truth.Truth.assertThat;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Unit tests for {@link org.tensorflow.lite.gpu.Whitelist}. */
@RunWith(JUnit4.class)
public final class WhitelistTest {
@Test
public void testBasic() throws Exception {
try (Whitelist whitelist = new Whitelist()) {
assertThat(whitelist.isDelegateSupportedOnThisDevice()).isTrue();
}
}
}