diff --git a/tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu/BUILD b/tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu/BUILD
index ab2ad036d66..fbd7f09ce65 100644
--- a/tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu/BUILD
+++ b/tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu/BUILD
@@ -4,6 +4,9 @@ package(
filegroup(
name = "gpu_delegate",
- srcs = ["GpuDelegate.java"],
+ srcs = [
+ "GpuDelegate.java",
+ "Whitelist.java",
+ ],
visibility = ["//visibility:public"],
)
diff --git a/tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu/Whitelist.java b/tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu/Whitelist.java
new file mode 100644
index 00000000000..c0b3bf2ca37
--- /dev/null
+++ b/tensorflow/lite/delegates/gpu/java/src/main/java/org/tensorflow/lite/gpu/Whitelist.java
@@ -0,0 +1,93 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+package org.tensorflow.lite.gpu;
+
+import java.io.Closeable;
+
+/**
+ * GPU Delegate Whitelisting data.
+ *
+ *
The GPU delegate is not supported on all Android devices, due to differences in available
+ * OpenGL versions, driver features, and device resources. This class provides information on
+ * whether the GPU delegate is suitable for the current device.
+ *
+ *
This API is experimental and subject to change.
+ *
+ *
WARNING: the whitelist is constructed from testing done on a limited set of models. You
+ * should plan to verify that your own model(s) work.
+ *
+ *
Example usage:
+ *
+ *
{@code
+ * Interpreter.Options options = new Interpreter.Options();
+ * try (Whitelist whitelist = new Whitelist()) {
+ * if (whitelist.isDelegateSupportedOnThisDevice()) {
+ * GpuDelegate.Options delegateOptions = whitelist.getBestOptionsForThisDevice();
+ * gpuDelegate = new GpuDelegate(delegateOptions):
+ * options.addDelegate(gpuDelegate);
+ * }
+ * }
+ * Interpreter interpreter = new Interpreter(modelBuffer, options);
+ * }
+ */
+public class Whitelist implements Closeable {
+
+ private static final long INVALID_WHITELIST_HANDLE = 0;
+ private static final String TFLITE_GPU_LIB = "tensorflowlite_gpu_jni";
+
+ private long whitelistHandle = INVALID_WHITELIST_HANDLE;
+
+ /** Whether the GPU delegate is supported on this device. */
+ public boolean isDelegateSupportedOnThisDevice() {
+ if (whitelistHandle == INVALID_WHITELIST_HANDLE) {
+ throw new IllegalStateException("Trying to query a closed whitelist.");
+ }
+ return nativeIsDelegateSupportedOnThisDevice(whitelistHandle);
+ }
+
+ /** What options should be used for the GPU delegate. */
+ public GpuDelegate.Options getBestOptionsForThisDevice() {
+ // For forward compatibility, when the whitelist contains more information.
+ return new GpuDelegate.Options();
+ }
+
+ public Whitelist() {
+ whitelistHandle = createWhitelist();
+ }
+
+ /**
+ * Frees TFLite resources in C runtime.
+ *
+ * User is expected to call this method explicitly.
+ */
+ @Override
+ public void close() {
+ if (whitelistHandle != INVALID_WHITELIST_HANDLE) {
+ deleteWhitelist(whitelistHandle);
+ whitelistHandle = INVALID_WHITELIST_HANDLE;
+ }
+ }
+
+ static {
+ System.loadLibrary(TFLITE_GPU_LIB);
+ }
+
+ private static native long createWhitelist();
+
+ private static native void deleteWhitelist(long handle);
+
+ private static native boolean nativeIsDelegateSupportedOnThisDevice(long handle);
+}
diff --git a/tensorflow/lite/delegates/gpu/java/src/main/native/BUILD b/tensorflow/lite/delegates/gpu/java/src/main/native/BUILD
index 774fd417758..57d6e013a4a 100644
--- a/tensorflow/lite/delegates/gpu/java/src/main/native/BUILD
+++ b/tensorflow/lite/delegates/gpu/java/src/main/native/BUILD
@@ -26,7 +26,13 @@ cc_library(
],
deps = [
"//tensorflow/lite/delegates/gpu:delegate",
+ "//tensorflow/lite/delegates/gpu/common:gpu_info",
+ "//tensorflow/lite/delegates/gpu/gl:egl_environment",
+ "//tensorflow/lite/delegates/gpu/gl:request_gpu_info",
+ "//tensorflow/lite/experimental/acceleration/whitelist:android_info",
+ "//tensorflow/lite/experimental/acceleration/whitelist:gpu_whitelist",
"//tensorflow/lite/java/jni",
+ "@com_google_absl//absl/status",
],
alwayslink = 1,
)
diff --git a/tensorflow/lite/delegates/gpu/java/src/main/native/gpu_delegate_jni.cc b/tensorflow/lite/delegates/gpu/java/src/main/native/gpu_delegate_jni.cc
index 900cc0e0d75..017ffcfcd32 100644
--- a/tensorflow/lite/delegates/gpu/java/src/main/native/gpu_delegate_jni.cc
+++ b/tensorflow/lite/delegates/gpu/java/src/main/native/gpu_delegate_jni.cc
@@ -15,7 +15,13 @@ limitations under the License.
#include
+#include "absl/status/status.h"
+#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/delegate.h"
+#include "tensorflow/lite/delegates/gpu/gl/egl_environment.h"
+#include "tensorflow/lite/delegates/gpu/gl/request_gpu_info.h"
+#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
+#include "tensorflow/lite/experimental/acceleration/whitelist/gpu_whitelist.h"
#ifdef __cplusplus
extern "C" {
@@ -44,6 +50,66 @@ JNIEXPORT void JNICALL Java_org_tensorflow_lite_gpu_GpuDelegate_deleteDelegate(
TfLiteGpuDelegateV2Delete(reinterpret_cast(delegate));
}
+namespace {
+class WhitelistHelper {
+ public:
+ absl::Status ReadInfo() {
+ auto status = tflite::acceleration::RequestAndroidInfo(&android_info_);
+ if (!status.ok()) return status;
+
+ if (android_info_.android_sdk_version < "21") {
+ // Weakly linked symbols may not be available on pre-21, and the GPU is
+ // not supported anyway so return early.
+ return absl::OkStatus();
+ }
+
+ std::unique_ptr env;
+ status = tflite::gpu::gl::EglEnvironment::NewEglEnvironment(&env);
+ if (!status.ok()) return status;
+
+ status = tflite::gpu::gl::RequestGpuInfo(&gpu_info_);
+ if (!status.ok()) return status;
+
+ return absl::OkStatus();
+ }
+
+ bool IsDelegateSupportedOnThisDevice() {
+ return whitelist_.Includes(android_info_, gpu_info_);
+ }
+
+ private:
+ tflite::acceleration::AndroidInfo android_info_;
+ tflite::gpu::GpuInfo gpu_info_;
+ tflite::acceleration::GPUWhitelist whitelist_;
+};
+} // namespace
+
+JNIEXPORT jlong JNICALL Java_org_tensorflow_lite_gpu_Whitelist_createWhitelist(
+ JNIEnv* env, jclass clazz) {
+ WhitelistHelper* whitelist = new WhitelistHelper;
+ auto status = whitelist->ReadInfo();
+ // Errors in ReadInfo should almost always be failures to construct the OpenGL
+ // environment. Treating that as "GPU unsupported" is reasonable, and we can
+ // swallow the error.
+ status.IgnoreError();
+ return reinterpret_cast(whitelist);
+}
+
+JNIEXPORT jboolean JNICALL
+Java_org_tensorflow_lite_gpu_Whitelist_nativeIsDelegateSupportedOnThisDevice(
+ JNIEnv* env, jclass clazz, jlong whitelist_handle) {
+ WhitelistHelper* whitelist =
+ reinterpret_cast(whitelist_handle);
+ return whitelist->IsDelegateSupportedOnThisDevice() ? JNI_TRUE : JNI_FALSE;
+}
+
+JNIEXPORT void JNICALL Java_org_tensorflow_lite_gpu_Whitelist_deleteWhitelist(
+ JNIEnv* env, jclass clazz, jlong whitelist_handle) {
+ WhitelistHelper* whitelist =
+ reinterpret_cast(whitelist_handle);
+ delete whitelist;
+}
+
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
diff --git a/tensorflow/lite/experimental/acceleration/README.md b/tensorflow/lite/experimental/acceleration/README.md
new file mode 100644
index 00000000000..c3209fe99e9
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/README.md
@@ -0,0 +1,15 @@
+# Accelerator whitelisting
+
+Experimental library and tools for determining whether an accelerator engine
+works well on a given device, and for a given model.
+
+## Platform-agnostic, Android-first
+
+Android-focused, since the much smaller set of configurations on iOS means there
+is much less need for whitelisting on iOS.
+
+## Not just for TfLite
+
+This code lives in the TfLite codebase, since TfLite is the first open-source
+customer. It is however meant to support other users (direct use of NNAPI,
+mediapipe).
diff --git a/tensorflow/lite/experimental/acceleration/whitelist/BUILD b/tensorflow/lite/experimental/acceleration/whitelist/BUILD
new file mode 100644
index 00000000000..96c3a6da8c0
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/whitelist/BUILD
@@ -0,0 +1,157 @@
+# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+
+load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library")
+load("//tensorflow/lite:special_rules.bzl", "tflite_portable_test_suite")
+
+package(
+ default_visibility = [
+ "//visibility:public",
+ ],
+ licenses = ["notice"], # Apache 2.0
+)
+
+flatbuffer_cc_library(
+ name = "database_fbs",
+ srcs = ["database.fbs"],
+)
+
+cc_library(
+ name = "devicedb",
+ srcs = [
+ "devicedb.cc",
+ ],
+ hdrs = [
+ "devicedb.h",
+ "variables.h",
+ ],
+ deps = [
+ ":database_fbs",
+ ],
+)
+
+cc_binary(
+ name = "json_to_fb",
+ srcs = ["json_to_fb.cc"],
+ deps = [
+ "//tensorflow/lite/tools:command_line_flags",
+ "@flatbuffers",
+ ],
+)
+
+genrule(
+ name = "devicedb-sample_bin",
+ srcs = [
+ "database.fbs",
+ "devicedb-sample.json",
+ ],
+ outs = ["devicedb-sample.bin"],
+ cmd = """
+ $(location :json_to_fb) \
+ --fbs=$(location :database.fbs) \
+ --json_input=$(location :devicedb-sample.json) \
+ --fb_output=$(@)
+ """,
+ tools = [":json_to_fb"],
+)
+
+genrule(
+ name = "devicedb-sample_cc",
+ srcs = ["devicedb-sample.bin"],
+ outs = [
+ "devicedb-sample.cc",
+ "devicedb-sample.h",
+ ],
+ # convert_file_to_c_source for some reason doesn't define the global with
+ # 'extern', which is needed for global const variables in C++.
+ cmd = """
+ $(location //tensorflow/lite/python:convert_file_to_c_source) \
+ --input_tflite_file $(location :devicedb-sample.bin) \
+ --output_header_file $(location :devicedb-sample.h) \
+ --output_source_file $(location :devicedb-sample.cc) \
+ --array_variable_name g_tflite_acceleration_devicedb_sample_binary
+ perl -p -i -e 's/const unsigned char/extern const unsigned char/' $(location :devicedb-sample.cc)
+ """,
+ tools = ["//tensorflow/lite/python:convert_file_to_c_source"],
+)
+
+cc_test(
+ name = "devicedb_test",
+ srcs = [
+ "devicedb-sample.cc",
+ "devicedb-sample.h",
+ "devicedb_test.cc",
+ ],
+ deps = [
+ ":database_fbs",
+ ":devicedb",
+ "//tensorflow/lite/testing:util",
+ "@com_google_googletest//:gtest",
+ "@flatbuffers",
+ ],
+)
+
+genrule(
+ name = "gpu_whitelist_binary",
+ srcs = ["gpu_whitelist.bin"],
+ outs = [
+ "gpu_whitelist_binary.h",
+ "gpu_whitelist_binary.cc",
+ ],
+ # convert_file_to_c_source for some reason doesn't define the global with
+ # 'extern', which is needed for global const variables in C++.
+ cmd = """
+ $(location //tensorflow/lite/python:convert_file_to_c_source) \
+ --input_tflite_file $(location :gpu_whitelist.bin) \
+ --output_header_file $(location :gpu_whitelist_binary.h) \
+ --output_source_file $(location :gpu_whitelist_binary.cc) \
+ --array_variable_name g_tflite_acceleration_gpu_whitelist_binary
+ perl -p -i -e 's/const unsigned char/extern const unsigned char/' $(location :gpu_whitelist_binary.cc)
+ """,
+ tools = ["//tensorflow/lite/python:convert_file_to_c_source"],
+)
+
+cc_library(
+ name = "android_info",
+ srcs = ["android_info.cc"],
+ hdrs = ["android_info.h"],
+ deps = [
+ "@com_google_absl//absl/status",
+ ],
+)
+
+cc_library(
+ name = "gpu_whitelist",
+ srcs = [
+ "gpu_whitelist.cc",
+ "gpu_whitelist_binary.cc",
+ "gpu_whitelist_binary.h",
+ ],
+ hdrs = [
+ "gpu_whitelist.h",
+ ],
+ deps = [
+ ":android_info",
+ ":database_fbs",
+ ":devicedb",
+ "//tensorflow/lite/delegates/gpu:delegate",
+ "//tensorflow/lite/delegates/gpu/common:gpu_info",
+ "@com_google_absl//absl/status",
+ "@com_google_absl//absl/strings",
+ "@flatbuffers",
+ ],
+)
+
+tflite_portable_test_suite()
diff --git a/tensorflow/lite/experimental/acceleration/whitelist/README.md b/tensorflow/lite/experimental/acceleration/whitelist/README.md
new file mode 100644
index 00000000000..24ee794aef6
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/whitelist/README.md
@@ -0,0 +1,13 @@
+# GPU delegate whitelist
+
+This package provides data and code for deciding if the GPU delegate is
+supported on a specific Android device.
+
+## Customizing the GPU whitelist
+
+- Convert from checked-in flatbuffer to json by running `flatc -t --raw-binary
+ --strict-json database.fbs -- gpu_whitelist.bin`
+- Edit the json
+- Convert from json to flatbuffer `flatc -b database.fbs --
+ gpu_whitelist.json`
+- Rebuild ../../../java:tensorflow-lite-gpu
diff --git a/tensorflow/lite/experimental/acceleration/whitelist/android_info.cc b/tensorflow/lite/experimental/acceleration/whitelist/android_info.cc
new file mode 100644
index 00000000000..4618ac90807
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/whitelist/android_info.cc
@@ -0,0 +1,52 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/acceleration/whitelist/android_info.h"
+
+#include
+
+#include "absl/status/status.h"
+
+#ifdef __ANDROID__
+#include
+#endif // __ANDROID__
+
+namespace {
+std::string GetPropertyValue(const std::string& property) {
+#ifdef __ANDROID__
+ char value[PROP_VALUE_MAX];
+ __system_property_get(property.c_str(), value);
+ return std::string(value);
+#else // !__ANDROID__
+ return std::string();
+#endif // __ANDROID__
+}
+} // namespace
+
+namespace tflite {
+namespace acceleration {
+
+absl::Status RequestAndroidInfo(AndroidInfo* info_out) {
+ if (!info_out) {
+ return absl::InvalidArgumentError("info_out may not be null");
+ }
+ info_out->android_sdk_version = GetPropertyValue("ro.build.version.sdk");
+ info_out->device = GetPropertyValue("ro.product.device");
+ info_out->model = GetPropertyValue("ro.product.model");
+ info_out->manufacturer = GetPropertyValue("ro.product.manufacturer");
+ return absl::OkStatus();
+}
+
+} // namespace acceleration
+} // namespace tflite
diff --git a/tensorflow/lite/experimental/acceleration/whitelist/android_info.h b/tensorflow/lite/experimental/acceleration/whitelist/android_info.h
new file mode 100644
index 00000000000..81b3ee7479c
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/whitelist/android_info.h
@@ -0,0 +1,43 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
+#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
+
+#include
+
+#include "absl/status/status.h"
+
+namespace tflite {
+namespace acceleration {
+
+// Information about and Android device, used for determining whitelisting
+// status.
+struct AndroidInfo {
+ // Property ro.build.version.sdk
+ std::string android_sdk_version;
+ // Property ro.product.model
+ std::string model;
+ // Property ro.product.device
+ std::string device;
+ // Property ro.product.manufacturer
+ std::string manufacturer;
+};
+
+absl::Status RequestAndroidInfo(AndroidInfo* info_out);
+
+} // namespace acceleration
+} // namespace tflite
+
+#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_WHITELIST_ANDROID_INFO_H_
diff --git a/tensorflow/lite/experimental/acceleration/whitelist/database.fbs b/tensorflow/lite/experimental/acceleration/whitelist/database.fbs
new file mode 100644
index 00000000000..6340fcfcf3a
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/whitelist/database.fbs
@@ -0,0 +1,58 @@
+// Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+namespace tflite.acceleration;
+
+enum Comparison : byte {
+ EQUAL = 0,
+ MINIMUM = 1,
+}
+
+// Mapping from available device features to whitelisting decisions. Basic usage is to:
+// 1) Map easily available device data (like Android version,
+// Manufacturer, Device) to things like SoC vendor, SoC model.
+// 2) Map complete device data to delegate-specific features and support status
+// 3) Map delegate-specific features to delegate configuration.
+//
+// The structure describes a decision tree, with multiple matching branches.
+// The branches are applied depth-first.
+table DeviceDatabase {
+ root:[tflite.acceleration.DeviceDecisionTreeNode];
+}
+
+table DeviceDecisionTreeNode {
+ // The variables are strings, as we have multiple clients that want to
+ // introduce their own fields. Known variables are listed in variables.h.
+ variable:string (shared);
+ comparison:tflite.acceleration.Comparison;
+ items:[tflite.acceleration.DeviceDecisionTreeEdge];
+}
+
+table DeviceDecisionTreeEdge {
+ // Under which variable value does this item match.
+ value:string (key, shared);
+ // Which child branches should also be consulted and used to override this
+ // node.
+ children:[tflite.acceleration.DeviceDecisionTreeNode];
+ // What information can be derived about this device.
+ derived_properties:[tflite.acceleration.DerivedProperty];
+}
+
+// Derived variable value to combine with detected variables.
+table DerivedProperty {
+ variable:string (shared);
+ value:string (shared);
+}
+
+root_type DeviceDatabase;
diff --git a/tensorflow/lite/experimental/acceleration/whitelist/devicedb-sample.json b/tensorflow/lite/experimental/acceleration/whitelist/devicedb-sample.json
new file mode 100644
index 00000000000..187989673d1
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/whitelist/devicedb-sample.json
@@ -0,0 +1,169 @@
+{
+ "root": [
+ {
+ "variable": "tflite.device_model",
+ "items": [
+ {
+ "value": "m712c",
+ "derived_properties": [
+ {
+ "variable": "tflite.soc_model",
+ "value": "exynos_7872"
+ }
+ ]
+ },
+ {
+ "value": "sc_02l",
+ "derived_properties": [
+ {
+ "variable": "tflite.soc_model",
+ "value": "exynos_7885"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "variable": "tflite.opengl_es_version",
+ "items": [
+ {
+ "value": "3.1",
+ "children": [
+ {
+ "variable": "tflite.soc_model",
+ "items": [
+ {
+ "value": "exynos_7872",
+ "children": [
+ {
+ "variable": "tflite.android_sdk_version",
+ "items": [
+ {
+ "value": "24",
+ "derived_properties": [
+ {
+ "variable": "tflite.gpu.status",
+ "value": "WHITELISTED"
+ }
+ ]
+ }
+ ],
+ "comparison": "MINIMUM"
+ }
+ ]
+ },
+ {
+ "value": "exynos_7883",
+ "children": [
+ {
+ "variable": "tflite.android_sdk_version",
+ "items": [
+ {
+ "value": "28",
+ "derived_properties": [
+ {
+ "variable": "tflite.gpu.status",
+ "value": "WHITELISTED"
+ }
+ ]
+ }
+ ],
+ "comparison": "MINIMUM"
+ }
+ ]
+ }
+ ]
+ }
+
+ ]
+ }
+ ]
+ },
+ {
+ "variable": "tflite.android_sdk_version",
+ "items": [
+ {
+ "value": "21",
+ "children": [
+ {
+ "variable": "tflite.device_model",
+ "items": [
+ {
+ "value": "huawei_gra_l09",
+ "children": [
+ {
+ "variable": "tflite.device_name",
+ "items": [
+ {
+ "value": "hwgra",
+ "derived_properties": [
+ {
+ "variable": "tflite.gpu.status",
+ "value": "WHITELISTED"
+ },
+ {
+ "variable": "tflite.gpu.opencl_status",
+ "value": "WHITELISTED"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "value": "24",
+ "children": [
+ {
+ "variable": "tflite.device_model",
+ "items": [
+ {
+ "value": "sm_j810f",
+ "children": [
+ {
+ "variable": "tflite.device_name",
+ "items": [
+ {
+ "value": "j8y18lte",
+ "derived_properties": [
+ {
+ "variable": "tflite.gpu.status",
+ "value": "BLACKLISTED"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "value": "sm_j810m",
+ "children": [
+ {
+ "variable": "tflite.device_name",
+ "items": [
+ {
+ "value": "j8y18lte",
+ "derived_properties": [
+ {
+ "variable": "tflite.gpu.opencl_status",
+ "value": "WHITELISTED"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/tensorflow/lite/experimental/acceleration/whitelist/devicedb.cc b/tensorflow/lite/experimental/acceleration/whitelist/devicedb.cc
new file mode 100644
index 00000000000..978495a3234
--- /dev/null
+++ b/tensorflow/lite/experimental/acceleration/whitelist/devicedb.cc
@@ -0,0 +1,91 @@
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+#include "tensorflow/lite/experimental/acceleration/whitelist/devicedb.h"
+
+#include