Rename GPU whitelist -> compatibility (list)
PiperOrigin-RevId: 315677454 Change-Id: I5df7e15e95784efd35d6f695328df4c1ec383189
This commit is contained in:
parent
66b90a014d
commit
66579383a8
@ -5,8 +5,8 @@ package(
|
||||
filegroup(
|
||||
name = "gpu_delegate",
|
||||
srcs = [
|
||||
"CompatibilityList.java",
|
||||
"GpuDelegate.java",
|
||||
"Whitelist.java",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -18,7 +18,7 @@ package org.tensorflow.lite.gpu;
|
||||
import java.io.Closeable;
|
||||
|
||||
/**
|
||||
* GPU Delegate Whitelisting data.
|
||||
* GPU Delegate CompatibilityListing data.
|
||||
*
|
||||
* <p>The GPU delegate is not supported on all Android devices, due to differences in available
|
||||
* OpenGL versions, driver features, and device resources. This class provides information on
|
||||
@ -26,16 +26,16 @@ import java.io.Closeable;
|
||||
*
|
||||
* <p>This API is experimental and subject to change.
|
||||
*
|
||||
* <p><b>WARNING:</b> the whitelist is constructed from testing done on a limited set of models. You
|
||||
* should plan to verify that your own model(s) work.
|
||||
* <p><b>WARNING:</b> the compatibilityList is constructed from testing done on a limited set of
|
||||
* models. You should plan to verify that your own model(s) work.
|
||||
*
|
||||
* <p>Example usage:
|
||||
*
|
||||
* <pre>{@code
|
||||
* Interpreter.Options options = new Interpreter.Options();
|
||||
* try (Whitelist whitelist = new Whitelist()) {
|
||||
* if (whitelist.isDelegateSupportedOnThisDevice()) {
|
||||
* GpuDelegate.Options delegateOptions = whitelist.getBestOptionsForThisDevice();
|
||||
* try (CompatibilityList compatibilityList = new CompatibilityList()) {
|
||||
* if (compatibilityList.isDelegateSupportedOnThisDevice()) {
|
||||
* GpuDelegate.Options delegateOptions = compatibilityList.getBestOptionsForThisDevice();
|
||||
* gpuDelegate = new GpuDelegate(delegateOptions):
|
||||
* options.addDelegate(gpuDelegate);
|
||||
* }
|
||||
@ -43,29 +43,29 @@ import java.io.Closeable;
|
||||
* Interpreter interpreter = new Interpreter(modelBuffer, options);
|
||||
* }</pre>
|
||||
*/
|
||||
public class Whitelist implements Closeable {
|
||||
public class CompatibilityList implements Closeable {
|
||||
|
||||
private static final long INVALID_WHITELIST_HANDLE = 0;
|
||||
private static final long INVALID_COMPATIBILITY_LIST_HANDLE = 0;
|
||||
private static final String TFLITE_GPU_LIB = "tensorflowlite_gpu_jni";
|
||||
|
||||
private long whitelistHandle = INVALID_WHITELIST_HANDLE;
|
||||
private long compatibilityListHandle = INVALID_COMPATIBILITY_LIST_HANDLE;
|
||||
|
||||
/** Whether the GPU delegate is supported on this device. */
|
||||
public boolean isDelegateSupportedOnThisDevice() {
|
||||
if (whitelistHandle == INVALID_WHITELIST_HANDLE) {
|
||||
throw new IllegalStateException("Trying to query a closed whitelist.");
|
||||
if (compatibilityListHandle == INVALID_COMPATIBILITY_LIST_HANDLE) {
|
||||
throw new IllegalStateException("Trying to query a closed compatibilityList.");
|
||||
}
|
||||
return nativeIsDelegateSupportedOnThisDevice(whitelistHandle);
|
||||
return nativeIsDelegateSupportedOnThisDevice(compatibilityListHandle);
|
||||
}
|
||||
|
||||
/** What options should be used for the GPU delegate. */
|
||||
public GpuDelegate.Options getBestOptionsForThisDevice() {
|
||||
// For forward compatibility, when the whitelist contains more information.
|
||||
// For forward compatibility, when the compatibilityList contains more information.
|
||||
return new GpuDelegate.Options();
|
||||
}
|
||||
|
||||
public Whitelist() {
|
||||
whitelistHandle = createWhitelist();
|
||||
public CompatibilityList() {
|
||||
compatibilityListHandle = createCompatibilityList();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -75,9 +75,9 @@ public class Whitelist implements Closeable {
|
||||
*/
|
||||
@Override
|
||||
public void close() {
|
||||
if (whitelistHandle != INVALID_WHITELIST_HANDLE) {
|
||||
deleteWhitelist(whitelistHandle);
|
||||
whitelistHandle = INVALID_WHITELIST_HANDLE;
|
||||
if (compatibilityListHandle != INVALID_COMPATIBILITY_LIST_HANDLE) {
|
||||
deleteCompatibilityList(compatibilityListHandle);
|
||||
compatibilityListHandle = INVALID_COMPATIBILITY_LIST_HANDLE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,9 +85,9 @@ public class Whitelist implements Closeable {
|
||||
System.loadLibrary(TFLITE_GPU_LIB);
|
||||
}
|
||||
|
||||
private static native long createWhitelist();
|
||||
private static native long createCompatibilityList();
|
||||
|
||||
private static native void deleteWhitelist(long handle);
|
||||
private static native void deleteCompatibilityList(long handle);
|
||||
|
||||
private static native boolean nativeIsDelegateSupportedOnThisDevice(long handle);
|
||||
}
|
@ -29,8 +29,8 @@ cc_library(
|
||||
"//tensorflow/lite/delegates/gpu/common:gpu_info",
|
||||
"//tensorflow/lite/delegates/gpu/gl:egl_environment",
|
||||
"//tensorflow/lite/delegates/gpu/gl:request_gpu_info",
|
||||
"//tensorflow/lite/experimental/acceleration/whitelist:android_info",
|
||||
"//tensorflow/lite/experimental/acceleration/whitelist:gpu_whitelist",
|
||||
"//tensorflow/lite/experimental/acceleration/compatibility:android_info",
|
||||
"//tensorflow/lite/experimental/acceleration/compatibility:gpu_compatibility",
|
||||
"//tensorflow/lite/java/jni",
|
||||
"@com_google_absl//absl/status",
|
||||
],
|
||||
|
@ -20,8 +20,8 @@ limitations under the License.
|
||||
#include "tensorflow/lite/delegates/gpu/delegate.h"
|
||||
#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
|
||||
#include "tensorflow/lite/delegates/gpu/gl/request_gpu_info.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/gpu_whitelist.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
@ -51,7 +51,7 @@ JNIEXPORT void JNICALL Java_org_tensorflow_lite_gpu_GpuDelegate_deleteDelegate(
|
||||
}
|
||||
|
||||
namespace {
|
||||
class WhitelistHelper {
|
||||
class CompatibilityListHelper {
|
||||
public:
|
||||
absl::Status ReadInfo() {
|
||||
auto status = tflite::acceleration::RequestAndroidInfo(&android_info_);
|
||||
@ -74,40 +74,43 @@ class WhitelistHelper {
|
||||
}
|
||||
|
||||
bool IsDelegateSupportedOnThisDevice() {
|
||||
return whitelist_.Includes(android_info_, gpu_info_);
|
||||
return compatibility_list_.Includes(android_info_, gpu_info_);
|
||||
}
|
||||
|
||||
private:
|
||||
tflite::acceleration::AndroidInfo android_info_;
|
||||
tflite::gpu::GpuInfo gpu_info_;
|
||||
tflite::acceleration::GPUWhitelist whitelist_;
|
||||
tflite::acceleration::GPUCompatibilityList compatibility_list_;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_gpu_Whitelist_createWhitelist(
|
||||
JNIEXPORT jlong JNICALL
|
||||
Java_org_tensorflow_lite_gpu_CompatibilityList_createCompatibilityList(
|
||||
JNIEnv* env, jclass clazz) {
|
||||
WhitelistHelper* whitelist = new WhitelistHelper;
|
||||
auto status = whitelist->ReadInfo();
|
||||
CompatibilityListHelper* compatibility_list = new CompatibilityListHelper;
|
||||
auto status = compatibility_list->ReadInfo();
|
||||
// Errors in ReadInfo should almost always be failures to construct the OpenGL
|
||||
// environment. Treating that as "GPU unsupported" is reasonable, and we can
|
||||
// swallow the error.
|
||||
status.IgnoreError();
|
||||
return reinterpret_cast<jlong>(whitelist);
|
||||
return reinterpret_cast<jlong>(compatibility_list);
|
||||
}
|
||||
|
||||
JNIEXPORT jboolean JNICALL
|
||||
Java_org_tensorflow_lite_gpu_Whitelist_nativeIsDelegateSupportedOnThisDevice(
|
||||
JNIEnv* env, jclass clazz, jlong whitelist_handle) {
|
||||
WhitelistHelper* whitelist =
|
||||
reinterpret_cast<WhitelistHelper*>(whitelist_handle);
|
||||
return whitelist->IsDelegateSupportedOnThisDevice() ? JNI_TRUE : JNI_FALSE;
|
||||
Java_org_tensorflow_lite_gpu_CompatibilityList_nativeIsDelegateSupportedOnThisDevice(
|
||||
JNIEnv* env, jclass clazz, jlong compatibility_list_handle) {
|
||||
CompatibilityListHelper* compatibility_list =
|
||||
reinterpret_cast<CompatibilityListHelper*>(compatibility_list_handle);
|
||||
return compatibility_list->IsDelegateSupportedOnThisDevice() ? JNI_TRUE
|
||||
: JNI_FALSE;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_tensorflow_lite_gpu_Whitelist_deleteWhitelist(
|
||||
JNIEnv* env, jclass clazz, jlong whitelist_handle) {
|
||||
WhitelistHelper* whitelist =
|
||||
reinterpret_cast<WhitelistHelper*>(whitelist_handle);
|
||||
delete whitelist;
|
||||
JNIEXPORT void JNICALL
|
||||
Java_org_tensorflow_lite_gpu_CompatibilityList_deleteCompatibilityList(
|
||||
JNIEnv* env, jclass clazz, jlong compatibility_list_handle) {
|
||||
CompatibilityListHelper* compatibility_list =
|
||||
reinterpret_cast<CompatibilityListHelper*>(compatibility_list_handle);
|
||||
delete compatibility_list;
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -111,20 +111,20 @@ cc_test(
|
||||
)
|
||||
|
||||
genrule(
|
||||
name = "gpu_whitelist_binary",
|
||||
srcs = ["gpu_whitelist.bin"],
|
||||
name = "gpu_compatibility_binary",
|
||||
srcs = ["gpu_compatibility.bin"],
|
||||
outs = [
|
||||
"gpu_whitelist_binary.h",
|
||||
"gpu_whitelist_binary.cc",
|
||||
"gpu_compatibility_binary.h",
|
||||
"gpu_compatibility_binary.cc",
|
||||
],
|
||||
# convert_file_to_c_source for some reason doesn't define the global with
|
||||
# 'extern', which is needed for global const variables in C++.
|
||||
cmd = """
|
||||
$(location :convert_binary_to_cc_source) \
|
||||
--input_binary_file $(location :gpu_whitelist.bin) \
|
||||
--output_header_file $(location :gpu_whitelist_binary.h) \
|
||||
--output_source_file $(location :gpu_whitelist_binary.cc) \
|
||||
--array_variable_name g_tflite_acceleration_gpu_whitelist_binary
|
||||
--input_binary_file $(location :gpu_compatibility.bin) \
|
||||
--output_header_file $(location :gpu_compatibility_binary.h) \
|
||||
--output_source_file $(location :gpu_compatibility_binary.cc) \
|
||||
--array_variable_name g_tflite_acceleration_gpu_compatibility_binary
|
||||
""",
|
||||
tools = [":convert_binary_to_cc_source"],
|
||||
)
|
||||
@ -139,14 +139,14 @@ cc_library(
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "gpu_whitelist",
|
||||
name = "gpu_compatibility",
|
||||
srcs = [
|
||||
"gpu_whitelist.cc",
|
||||
"gpu_whitelist_binary.cc",
|
||||
"gpu_whitelist_binary.h",
|
||||
"gpu_compatibility.cc",
|
||||
"gpu_compatibility_binary.cc",
|
||||
"gpu_compatibility_binary.h",
|
||||
],
|
||||
hdrs = [
|
||||
"gpu_whitelist.h",
|
||||
"gpu_compatibility.h",
|
||||
],
|
||||
deps = [
|
||||
":android_info",
|
@ -1,13 +1,13 @@
|
||||
# GPU delegate whitelist
|
||||
# GPU delegate compatibility database
|
||||
|
||||
This package provides data and code for deciding if the GPU delegate is
|
||||
supported on a specific Android device.
|
||||
|
||||
## Customizing the GPU whitelist
|
||||
## Customizing the database
|
||||
|
||||
- Convert from checked-in flatbuffer to json by running `flatc -t --raw-binary
|
||||
--strict-json database.fbs -- gpu_whitelist.bin`
|
||||
--strict-json database.fbs -- gpu_compatibility.bin`
|
||||
- Edit the json
|
||||
- Convert from json to flatbuffer `flatc -b database.fbs --
|
||||
gpu_whitelist.json`
|
||||
gpu_compatibility.json`
|
||||
- Rebuild ../../../java:tensorflow-lite-gpu
|
@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
|
||||
|
||||
#include <string>
|
||||
|
@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_ANDROID_INFO_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_ANDROID_INFO_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
@ -22,7 +22,7 @@ limitations under the License.
|
||||
namespace tflite {
|
||||
namespace acceleration {
|
||||
|
||||
// Information about and Android device, used for determining whitelisting
|
||||
// Information about and Android device, used for determining compatibility
|
||||
// status.
|
||||
struct AndroidInfo {
|
||||
// Property ro.build.version.sdk
|
||||
@ -40,4 +40,4 @@ absl::Status RequestAndroidInfo(AndroidInfo* info_out);
|
||||
} // namespace acceleration
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_ANDROID_INFO_H_
|
@ -98,7 +98,7 @@ limitations under the License.
|
||||
|
||||
source_template = """{license_text}
|
||||
// This is a binary file that has been converted into a C++ data array using the
|
||||
// //tensorflow/lite/experimental/acceleration/whitelist/convert_binary_to_cc_source.py
|
||||
// //tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py
|
||||
// script. This form is useful for compiling into a binary to simplify
|
||||
// deployment on mobile devices
|
||||
|
||||
@ -131,7 +131,7 @@ extern const int {array_name}_len = {array_length};
|
||||
{license_text}
|
||||
|
||||
// This is a binary file that has been converted into a C++ data array using the
|
||||
// //tensorflow/lite/experimental/acceleration/whitelist/convert_binary_to_cc_source.py
|
||||
// //tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py
|
||||
// script. This form is useful for compiling into a binary to simplify
|
||||
// deployment on mobile devices
|
||||
|
@ -19,7 +19,7 @@ enum Comparison : byte {
|
||||
MINIMUM = 1,
|
||||
}
|
||||
|
||||
// Mapping from available device features to whitelisting decisions. Basic usage is to:
|
||||
// Mapping from available device features to compatibility decisions. Basic usage is to:
|
||||
// 1) Map easily available device data (like Android version,
|
||||
// Manufacturer, Device) to things like SoC vendor, SoC model.
|
||||
// 2) Map complete device data to delegate-specific features and support status
|
@ -43,7 +43,7 @@
|
||||
"derived_properties": [
|
||||
{
|
||||
"variable": "tflite.gpu.status",
|
||||
"value": "WHITELISTED"
|
||||
"value": "SUPPORTED"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -63,7 +63,7 @@
|
||||
"derived_properties": [
|
||||
{
|
||||
"variable": "tflite.gpu.status",
|
||||
"value": "WHITELISTED"
|
||||
"value": "SUPPORTED"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -99,11 +99,11 @@
|
||||
"derived_properties": [
|
||||
{
|
||||
"variable": "tflite.gpu.status",
|
||||
"value": "WHITELISTED"
|
||||
"value": "SUPPORTED"
|
||||
},
|
||||
{
|
||||
"variable": "tflite.gpu.opencl_status",
|
||||
"value": "WHITELISTED"
|
||||
"value": "SUPPORTED"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -132,7 +132,7 @@
|
||||
"derived_properties": [
|
||||
{
|
||||
"variable": "tflite.gpu.status",
|
||||
"value": "BLACKLISTED"
|
||||
"value": "UNSUPPORTED"
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -151,7 +151,7 @@
|
||||
"derived_properties": [
|
||||
{
|
||||
"variable": "tflite.gpu.opencl_status",
|
||||
"value": "WHITELISTED"
|
||||
"value": "SUPPORTED"
|
||||
}
|
||||
]
|
||||
}
|
@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace acceleration {
|
@ -12,13 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_DECISION_TREE_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_DECISION_TREE_H_
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DECISION_TREE_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DECISION_TREE_H_
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace acceleration {
|
||||
@ -35,4 +35,4 @@ void UpdateVariablesFromDatabase(
|
||||
} // namespace acceleration
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_DECISION_TREE_H_
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DECISION_TREE_H_
|
@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb-sample.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/variables.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb-sample.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
|
||||
#include "tensorflow/lite/testing/util.h"
|
||||
|
||||
namespace tflite {
|
||||
@ -78,7 +78,7 @@ TEST_F(DeviceDbTest, StatusLookupWithSoC) {
|
||||
variables[kSoCModel] = "exynos_7872";
|
||||
variables[kAndroidSdkVersion] = "24";
|
||||
UpdateVariablesFromDatabase(&variables, *device_db_);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusWhitelisted);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
|
||||
|
||||
// Ensure no results without a match.
|
||||
variables[kOpenGLESVersion] = "3.0";
|
||||
@ -96,27 +96,27 @@ TEST_F(DeviceDbTest, StatusLookupWithSoC) {
|
||||
// Find a match with android version above minimum.
|
||||
variables[kAndroidSdkVersion] = "29";
|
||||
UpdateVariablesFromDatabase(&variables, *device_db_);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusWhitelisted);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
|
||||
}
|
||||
|
||||
TEST_F(DeviceDbTest, StatusLookupWithDevice) {
|
||||
LoadSample();
|
||||
ASSERT_TRUE(device_db_);
|
||||
std::map<std::string, std::string> variables;
|
||||
// Find blacklisted device (same model, different device).
|
||||
// Find unsupported device (same model, different device).
|
||||
variables[kAndroidSdkVersion] = "24";
|
||||
variables[kDeviceModel] = "sm_j810f";
|
||||
variables[kDeviceName] = "j8y18lte";
|
||||
UpdateVariablesFromDatabase(&variables, *device_db_);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusBlacklisted);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
|
||||
|
||||
// Find whitelisted device (same model, different device).
|
||||
// Find supported device (same model, different device).
|
||||
variables.clear();
|
||||
variables[kAndroidSdkVersion] = "24";
|
||||
variables[kDeviceModel] = "sm_j810m";
|
||||
variables[kDeviceName] = "j8y18lte";
|
||||
UpdateVariablesFromDatabase(&variables, *device_db_);
|
||||
EXPECT_EQ(variables[gpu::kOpenCLStatus], gpu::kStatusWhitelisted);
|
||||
EXPECT_EQ(variables[gpu::kOpenCLStatus], gpu::kStatusSupported);
|
||||
}
|
||||
|
||||
TEST_F(DeviceDbTest, StatusLookupBasedOnDerivedProperties) {
|
||||
@ -128,7 +128,7 @@ TEST_F(DeviceDbTest, StatusLookupBasedOnDerivedProperties) {
|
||||
variables[kAndroidSdkVersion] = "24";
|
||||
variables[kDeviceModel] = "m712c";
|
||||
UpdateVariablesFromDatabase(&variables, *device_db_);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusWhitelisted);
|
||||
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
|
||||
}
|
||||
|
||||
} // namespace
|
Binary file not shown.
@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/gpu_whitelist.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility.h"
|
||||
|
||||
#include <cctype>
|
||||
#include <map>
|
||||
@ -20,17 +20,17 @@ limitations under the License.
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/database_generated.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/gpu_whitelist_binary.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/variables.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility_binary.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace acceleration {
|
||||
namespace {
|
||||
|
||||
std::string CanonicalizeValue(absl::string_view input) {
|
||||
// This assumes ASCII, which holds for all values we have in the whitelist.
|
||||
// This assumes ASCII, which holds for all values we have in the list.
|
||||
std::string output(input);
|
||||
for (int i = 0; i < output.size(); i++) {
|
||||
char c = output[i];
|
||||
@ -51,15 +51,17 @@ void CanonicalizeValues(std::map<std::string, std::string>* variable_values) {
|
||||
|
||||
} // namespace
|
||||
|
||||
GPUWhitelist::GPUWhitelist()
|
||||
: GPUWhitelist(g_tflite_acceleration_gpu_whitelist_binary) {}
|
||||
GPUCompatibilityList::GPUCompatibilityList()
|
||||
: GPUCompatibilityList(g_tflite_acceleration_gpu_compatibility_binary) {}
|
||||
|
||||
GPUWhitelist::GPUWhitelist(const unsigned char* whitelist_flatbuffer) {
|
||||
if (!whitelist_flatbuffer) return;
|
||||
database_ = flatbuffers::GetRoot<DeviceDatabase>(whitelist_flatbuffer);
|
||||
GPUCompatibilityList::GPUCompatibilityList(
|
||||
const unsigned char* compatibility_list_flatbuffer) {
|
||||
if (!compatibility_list_flatbuffer) return;
|
||||
database_ =
|
||||
flatbuffers::GetRoot<DeviceDatabase>(compatibility_list_flatbuffer);
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> GPUWhitelist::CalculateVariables(
|
||||
std::map<std::string, std::string> GPUCompatibilityList::CalculateVariables(
|
||||
const AndroidInfo& android_info,
|
||||
const ::tflite::gpu::GpuInfo& gpu_info) const {
|
||||
std::map<std::string, std::string> variables;
|
||||
@ -80,16 +82,17 @@ std::map<std::string, std::string> GPUWhitelist::CalculateVariables(
|
||||
return variables;
|
||||
}
|
||||
|
||||
bool GPUWhitelist::Includes(const AndroidInfo& android_info,
|
||||
const ::tflite::gpu::GpuInfo& gpu_info) const {
|
||||
bool GPUCompatibilityList::Includes(
|
||||
const AndroidInfo& android_info,
|
||||
const ::tflite::gpu::GpuInfo& gpu_info) const {
|
||||
auto variables = CalculateVariables(android_info, gpu_info);
|
||||
return variables[gpu::kStatus] == std::string(gpu::kStatusWhitelisted);
|
||||
return variables[gpu::kStatus] == std::string(gpu::kStatusSupported);
|
||||
}
|
||||
|
||||
TfLiteGpuDelegateOptionsV2 GPUWhitelist::GetBestOptionsFor(
|
||||
TfLiteGpuDelegateOptionsV2 GPUCompatibilityList::GetBestOptionsFor(
|
||||
const AndroidInfo& /* android_info */,
|
||||
const ::tflite::gpu::GpuInfo& /* gpu_info */) const {
|
||||
// This method is for forwards-compatibility: the whitelist may later include
|
||||
// This method is for forwards-compatibility: the list may later include
|
||||
// information about which backend to choose (OpenGL/OpenCL/Vulkan) or other
|
||||
// options.
|
||||
return TfLiteGpuDelegateOptionsV2Default();
|
@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_GPU_WHITELIST_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_GPU_WHITELIST_H_
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_GPU_COMPATIBILITY_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_GPU_COMPATIBILITY_H_
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
|
||||
#include "tensorflow/lite/delegates/gpu/delegate.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
|
||||
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
|
||||
|
||||
namespace tflite {
|
||||
namespace acceleration {
|
||||
@ -39,21 +39,21 @@ namespace acceleration {
|
||||
// EXPECT_OK(tflite::acceleration::RequestAndroidInfo(&android_info));
|
||||
// EXPECT_OK(tflite::gpu::gl::EglEnvironment::NewEglEnvironment(&env));
|
||||
// EXPECT_OK(tflite::gpu::gl::RequestGpuInfo(&tflite_gpu_info));
|
||||
// tflite::acceleration::GPUWhitelist whitelist;
|
||||
// tflite::acceleration::GPUCompatibilityList list;
|
||||
// TfLiteDelegate* gpu_delegate = nullptr;
|
||||
// TfLiteGpuDelegateOptions gpu_options;
|
||||
// if (whitelist.Includes(android_info, gpu_info)) {
|
||||
// gpu_options = whitelist.BestOptionsFor(android_info, gpu_info);
|
||||
// if (list.Includes(android_info, gpu_info)) {
|
||||
// gpu_options = list.BestOptionsFor(android_info, gpu_info);
|
||||
// gpu_delegate = TfLiteGpuDelegateCreate(&gpu_options);
|
||||
// EXPECT_EQ(interpreter->ModifyGraphWithDelegate(gpu_delegate), TfLiteOk);
|
||||
// } else {
|
||||
// // Fallback path.
|
||||
// }
|
||||
class GPUWhitelist {
|
||||
class GPUCompatibilityList {
|
||||
public:
|
||||
// Construct whitelist from bundled data.
|
||||
GPUWhitelist();
|
||||
// Returns true if the provided device specs are whitelisted by the database.
|
||||
// Construct list from bundled data.
|
||||
GPUCompatibilityList();
|
||||
// Returns true if the provided device specs are supported by the database.
|
||||
bool Includes(const AndroidInfo& android_info,
|
||||
const ::tflite::gpu::GpuInfo& gpu_info) const;
|
||||
|
||||
@ -65,21 +65,22 @@ class GPUWhitelist {
|
||||
const ::tflite::gpu::GpuInfo& gpu_info) const;
|
||||
|
||||
// Convert android_info and gpu_info into a set of variables used for querying
|
||||
// the whitelist, and update variables from whitelist data. See variables.h
|
||||
// the list, and update variables from list data. See variables.h
|
||||
// and devicedb.h for more information.
|
||||
std::map<std::string, std::string> CalculateVariables(
|
||||
const AndroidInfo& android_info,
|
||||
const ::tflite::gpu::GpuInfo& gpu_info) const;
|
||||
|
||||
GPUWhitelist(const GPUWhitelist&) = delete;
|
||||
GPUWhitelist& operator=(const GPUWhitelist&) = delete;
|
||||
GPUCompatibilityList(const GPUCompatibilityList&) = delete;
|
||||
GPUCompatibilityList& operator=(const GPUCompatibilityList&) = delete;
|
||||
|
||||
protected:
|
||||
explicit GPUWhitelist(const unsigned char* whitelist_flatbuffer);
|
||||
explicit GPUCompatibilityList(
|
||||
const unsigned char* compatibility_list_flatbuffer);
|
||||
const DeviceDatabase* database_;
|
||||
};
|
||||
|
||||
} // namespace acceleration
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_GPU_WHITELIST_H_
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_GPU_COMPATIBILITY_H_
|
@ -14,7 +14,7 @@ limitations under the License.
|
||||
==============================================================================*/
|
||||
// Simple program to convert from JSON to binary flatbuffers for given schema.
|
||||
//
|
||||
// Used for creating the binary version of a whitelist.
|
||||
// Used for creating the binary version of a compatibility list.
|
||||
//
|
||||
// The flatc command line is not available in all build environments.
|
||||
#include <fstream>
|
@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_VARIABLES_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_VARIABLES_H_
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_VARIABLES_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_VARIABLES_H_
|
||||
|
||||
// This file lists generally useful whitelisting properties.
|
||||
// This file lists generally useful compatibility properties.
|
||||
// Properties starting with "tflite." are reserved.
|
||||
// Users of the whitelisting library can use arbitrary other property names.
|
||||
// Users of the compatibility library can use arbitrary other property names.
|
||||
|
||||
namespace tflite {
|
||||
namespace acceleration {
|
||||
@ -71,17 +71,17 @@ namespace gpu {
|
||||
// GPU-delegate derived properties.
|
||||
|
||||
// Whether the GPU delegate works in general.
|
||||
// ("UNSET", "UNKNOWN", "WHITELISTED", "BLACKLISTED").
|
||||
// ("UNSET", "UNKNOWN", "SUPPORTED", "UNSUPPORTED").
|
||||
constexpr char kStatus[] = "tflite.gpu.status";
|
||||
|
||||
// Whether OpenCL should be allowed. Possible values are the SupportStatus enums
|
||||
// ("UNSET", "UNKNOWN", "WHITELISTED", "BLACKLISTED").
|
||||
// ("UNSET", "UNKNOWN", "SUPPORTED", "UNSUPPORTED").
|
||||
constexpr char kOpenCLStatus[] = "tflite.gpu.opencl_status";
|
||||
constexpr char kStatusWhitelisted[] = "WHITELISTED";
|
||||
constexpr char kStatusBlacklisted[] = "BLACKLISTED";
|
||||
constexpr char kStatusSupported[] = "SUPPORTED";
|
||||
constexpr char kStatusUnsupported[] = "UNSUPPORTED";
|
||||
} // namespace gpu
|
||||
|
||||
} // namespace acceleration
|
||||
} // namespace tflite
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_VARIABLES_H_
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_VARIABLES_H_
|
@ -365,8 +365,8 @@ filegroup(
|
||||
name = "portable_gpu_tests",
|
||||
srcs = [
|
||||
"src/test/java/org/tensorflow/lite/InterpreterTestHelper.java",
|
||||
"src/test/java/org/tensorflow/lite/gpu/CompatibilityListTest.java",
|
||||
"src/test/java/org/tensorflow/lite/gpu/GpuDelegateTest.java",
|
||||
"src/test/java/org/tensorflow/lite/gpu/WhitelistTest.java",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -21,13 +21,13 @@ import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.JUnit4;
|
||||
|
||||
/** Unit tests for {@link org.tensorflow.lite.gpu.Whitelist}. */
|
||||
/** Unit tests for {@link org.tensorflow.lite.gpu.CompatibilityList}. */
|
||||
@RunWith(JUnit4.class)
|
||||
public final class WhitelistTest {
|
||||
public final class CompatibilityListTest {
|
||||
|
||||
@Test
|
||||
public void testBasic() throws Exception {
|
||||
try (Whitelist whitelist = new Whitelist()) {
|
||||
try (CompatibilityList whitelist = new CompatibilityList()) {
|
||||
assertThat(whitelist.isDelegateSupportedOnThisDevice()).isTrue();
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user