Migrate the TFLite C API out of lite/experimental
Follow-up work will involve introducing a package target that bundles the native shared library with all necessary headers. RELNOTES: Migrated the TFLite C inference API out of experimental into lite/c. PiperOrigin-RevId: 282827414 Change-Id: Ibbef3dee899576b770c9410d212a0eb4087fe710
This commit is contained in:
parent
904f7fea4c
commit
0699728bf9
@ -153,6 +153,7 @@ def tflite_cc_shared_object(
|
||||
linkstatic = 1,
|
||||
deps = [],
|
||||
visibility = None,
|
||||
per_os_targets = False,
|
||||
tags = None):
|
||||
"""Builds a shared object for TFLite."""
|
||||
tf_cc_shared_object(
|
||||
@ -164,6 +165,7 @@ def tflite_cc_shared_object(
|
||||
deps = deps,
|
||||
visibility = visibility,
|
||||
tags = tags,
|
||||
per_os_targets = per_os_targets,
|
||||
)
|
||||
|
||||
def tf_to_tflite(name, src, options, out):
|
||||
|
@ -1,8 +1,128 @@
|
||||
load(
|
||||
"//tensorflow/lite:build_def.bzl",
|
||||
"tflite_cc_shared_object",
|
||||
"tflite_copts",
|
||||
)
|
||||
|
||||
package(
|
||||
default_visibility = ["//visibility:public"],
|
||||
default_visibility = [":experimental"],
|
||||
licenses = ["notice"], # Apache 2.0
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "experimental",
|
||||
packages = [
|
||||
"//tensorflow/lite/...",
|
||||
"//third_party/dart/tflite_native/...", # whitelisted
|
||||
],
|
||||
)
|
||||
|
||||
# Generates a platform-specific shared library containing the TensorFlow Lite C
|
||||
# API implementation as define in `c_api.h`. The exact output library name
|
||||
# is platform dependent:
|
||||
# - Linux/Android: `libtensorflowlite_c.so`
|
||||
# - Mac: `libtensorflowlite_c.dylib`
|
||||
# - Windows: `tensorflowlite_c.dll`
|
||||
tflite_cc_shared_object(
|
||||
name = "tensorflowlite_c",
|
||||
linkopts = select({
|
||||
"//tensorflow:macos": [
|
||||
"-Wl,-exported_symbols_list,$(location //tensorflow/lite/c:exported_symbols.lds)",
|
||||
],
|
||||
"//tensorflow:windows": [],
|
||||
"//conditions:default": [
|
||||
"-z defs",
|
||||
"-Wl,--version-script,$(location //tensorflow/lite/c:version_script.lds)",
|
||||
],
|
||||
}),
|
||||
per_os_targets = True,
|
||||
deps = [
|
||||
":c_api",
|
||||
":c_api_experimental",
|
||||
":exported_symbols.lds",
|
||||
":version_script.lds",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "c_api_internal",
|
||||
srcs = [
|
||||
"c_api.h",
|
||||
"common.h",
|
||||
],
|
||||
hdrs = ["c_api_internal.h"],
|
||||
copts = tflite_copts(),
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
":common",
|
||||
"//tensorflow/lite:framework",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "c_api",
|
||||
srcs = ["c_api.cc"],
|
||||
hdrs = [
|
||||
"c_api.h",
|
||||
"common.h",
|
||||
],
|
||||
copts = tflite_copts(),
|
||||
visibility = [
|
||||
":experimental",
|
||||
],
|
||||
deps = [
|
||||
":c_api_internal",
|
||||
":common",
|
||||
"//tensorflow/lite:framework",
|
||||
"//tensorflow/lite:version",
|
||||
"//tensorflow/lite/kernels:builtin_ops",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "c_api_experimental",
|
||||
srcs = ["c_api_experimental.cc"],
|
||||
hdrs = ["c_api_experimental.h"],
|
||||
copts = tflite_copts(),
|
||||
deps = [
|
||||
":c_api",
|
||||
":c_api_internal",
|
||||
"//tensorflow/lite:kernel_api",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "c_api_test",
|
||||
size = "small",
|
||||
srcs = ["c_api_test.cc"],
|
||||
data = [
|
||||
"//tensorflow/lite:testdata/add.bin",
|
||||
"//tensorflow/lite:testdata/add_quantized.bin",
|
||||
],
|
||||
deps = [
|
||||
":c_api",
|
||||
"//tensorflow/lite/c:c_api_internal",
|
||||
"//tensorflow/lite/testing:util",
|
||||
"@com_google_googletest//:gtest",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "c_api_experimental_test",
|
||||
size = "small",
|
||||
srcs = ["c_api_experimental_test.cc"],
|
||||
data = ["//tensorflow/lite:testdata/add.bin"],
|
||||
deps = [
|
||||
":c_api",
|
||||
":c_api_experimental",
|
||||
"//tensorflow/lite:kernel_api",
|
||||
"//tensorflow/lite/testing:util",
|
||||
"@com_google_googletest//:gtest",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "common",
|
||||
srcs = ["common.c"],
|
||||
@ -13,6 +133,7 @@ cc_library(
|
||||
visibility = [
|
||||
"//tensorflow/lite:__subpackages__",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
# For use with library targets that can't use relative paths.
|
||||
|
48
tensorflow/lite/c/README.md
Normal file
48
tensorflow/lite/c/README.md
Normal file
@ -0,0 +1,48 @@
|
||||
# TensorFlow Lite C API
|
||||
|
||||
This directory contains C APIs for TensorFlow Lite. This includes C APIs
|
||||
for common types, like kernels and delegates, as well as an explicit C API
|
||||
for inference.
|
||||
|
||||
## Header summary
|
||||
|
||||
Each public C header contains types and methods for specific uses:
|
||||
|
||||
* `common.h` - Contains common C enums, types and methods used throughout
|
||||
TensorFlow Lite. This includes everything from error codes, to the kernel
|
||||
and delegate APIs.
|
||||
* `builtin_op_data.h` - Contains op-specific data that is used for builtin
|
||||
kernels. This should only be used when (re)implementing a builtin operator.
|
||||
* `c_api.h` - Contains the TensorFlow Lite C API for inference. The
|
||||
functionality here is largely equivalent (though a strict subset of) the
|
||||
functionality provided by the C++ `Interpreter` API.
|
||||
* `c_api_experimental.h` - Contains experimental C API methods for inference.
|
||||
These methods are useful and usable, but aren't yet part of the stable API.
|
||||
|
||||
## Using the C API
|
||||
|
||||
See the [`c_api.h`](c_api.h) header for API usage details.
|
||||
|
||||
## Building the C API
|
||||
|
||||
A native shared library target that contains the C API for inference has been
|
||||
provided. Assuming a working [bazel](https://bazel.build/versions/master/docs/install.html)
|
||||
configuration, this can be built as follows:
|
||||
|
||||
```sh
|
||||
bazel build -c opt --cxxopt=--std=c++11 //tensorflow/lite/c:tensorflowlite_c
|
||||
```
|
||||
|
||||
and for Android (replace `android_arm` with `android_arm64` for 64-bit),
|
||||
assuming you've [configured your project for Android builds](../g3doc/guide/android.md):
|
||||
|
||||
```sh
|
||||
bazel build -c opt --cxxopt=--std=c++11 --config=android_arm \
|
||||
//tensorflow/lite/c:tensorflowlite_c
|
||||
```
|
||||
|
||||
The generated shared library will be available in your
|
||||
`bazel-bin/tensorflow/lite/c` directory. A target which packages the shared
|
||||
library together with the necessary headers (`c_api.h`, `c_api_experimental.h`
|
||||
and `common.h`) will be available soon, and will also be released as a prebuilt
|
||||
archive (together with existing prebuilt packages for Android/iOS).
|
@ -12,13 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#include "tensorflow/lite/experimental/c/c_api.h"
|
||||
#include "tensorflow/lite/c/c_api.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "tensorflow/lite/c/c_api_internal.h"
|
||||
#include "tensorflow/lite/error_reporter.h"
|
||||
#include "tensorflow/lite/experimental/c/c_api_internal.h"
|
||||
#include "tensorflow/lite/experimental/c/c_api_types.h"
|
||||
#include "tensorflow/lite/interpreter.h"
|
||||
#include "tensorflow/lite/kernels/register.h"
|
||||
#include "tensorflow/lite/model.h"
|
@ -12,28 +12,59 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_H_
|
||||
#ifndef TENSORFLOW_LITE_C_C_API_H_
|
||||
#define TENSORFLOW_LITE_C_C_API_H_
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// Eventually the various C APIs defined in context.h will be migrated into
|
||||
// the appropriate /c/c_api*.h header. For now, we pull in existing definitions
|
||||
// for convenience.
|
||||
#include "c_api_types.h"
|
||||
#include "common.h"
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Experimental C API for TensorFlowLite.
|
||||
//
|
||||
// The API leans towards simplicity and uniformity instead of convenience, as
|
||||
// most usage will be by language-specific wrappers.
|
||||
//
|
||||
// Conventions:
|
||||
// * We use the prefix TfLite for everything in the API.
|
||||
// * size_t is used to represent byte sizes of objects that are
|
||||
// materialized in the address space of the calling process.
|
||||
// * int is used as an index into arrays.
|
||||
/// C API for TensorFlow Lite.
|
||||
///
|
||||
/// The API leans towards simplicity and uniformity instead of convenience, as
|
||||
/// most usage will be by language-specific wrappers. It provides largely the
|
||||
/// same set of functionality as that of the C++ TensorFlow Lite `Interpreter`
|
||||
/// API, but is useful for shared libraries where having a stable ABI boundary
|
||||
/// is important.
|
||||
///
|
||||
/// Conventions:
|
||||
/// * We use the prefix TfLite for everything in the API.
|
||||
/// * size_t is used to represent byte sizes of objects that are
|
||||
/// materialized in the address space of the calling process.
|
||||
/// * int is used as an index into arrays.
|
||||
///
|
||||
/// Usage:
|
||||
/// <pre><code>
|
||||
/// // Create the model and interpreter options.
|
||||
/// TfLiteModel* model = TfLiteModelCreateFromFile("/path/to/model.tflite");
|
||||
/// TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
|
||||
/// TfLiteInterpreterOptionsSetNumThreads(options, 2);
|
||||
///
|
||||
/// // Create the interpreter.
|
||||
/// TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
|
||||
///
|
||||
/// // Allocate tensors and populate the input tensor data.
|
||||
/// TfLiteInterpreterAllocateTensors(interpreter);
|
||||
/// TfLiteTensor* input_tensor =
|
||||
/// TfLiteInterpreterGetInputTensor(interpreter, 0);
|
||||
/// TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
|
||||
/// input.size() * sizeof(float));
|
||||
///
|
||||
/// // Execute inference.
|
||||
/// TfLiteInterpreterInvoke(interpreter);
|
||||
///
|
||||
/// // Extract the output tensor data.
|
||||
/// TfLiteTensor* output_tensor =
|
||||
// TfLiteInterpreterGetInputTensor(interpreter, 0);
|
||||
/// TfLiteTensorCopyToBuffer(output_tensor, output.data(),
|
||||
/// output.size() * sizeof(float));
|
||||
///
|
||||
/// // Dispose of the model and interpreter objects.
|
||||
/// TfLiteInterpreterDelete(interpreter);
|
||||
/// TfLiteInterpreterOptionsDelete(options);
|
||||
/// TfLiteModelDelete(model);
|
||||
|
||||
#ifdef SWIG
|
||||
#define TFL_CAPI_EXPORT
|
||||
@ -235,4 +266,4 @@ TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyToBuffer(
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_H_
|
||||
#endif // TENSORFLOW_LITE_C_C_API_H_
|
@ -13,9 +13,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/lite/experimental/c/c_api_experimental.h"
|
||||
#include "tensorflow/lite/c/c_api_experimental.h"
|
||||
|
||||
#include "tensorflow/lite/experimental/c/c_api_internal.h"
|
||||
#include "tensorflow/lite/c/c_api_internal.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_EXPERIMENTAL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_EXPERIMENTAL_H_
|
||||
#ifndef TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_
|
||||
#define TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_
|
||||
|
||||
#include "tensorflow/lite/builtin_ops.h"
|
||||
#include "tensorflow/lite/experimental/c/c_api.h"
|
||||
#include "tensorflow/lite/c/c_api.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
@ -13,11 +13,11 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/lite/experimental/c/c_api_experimental.h"
|
||||
#include "tensorflow/lite/c/c_api_experimental.h"
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "tensorflow/lite/builtin_ops.h"
|
||||
#include "tensorflow/lite/experimental/c/c_api.h"
|
||||
#include "tensorflow/lite/c/c_api.h"
|
||||
#include "tensorflow/lite/testing/util.h"
|
||||
|
||||
namespace {
|
@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_INTERNAL_H_
|
||||
#define TENSORFLOW_LITE_EXPERIMENTAL_C_C_API_INTERNAL_H_
|
||||
#ifndef TENSORFLOW_LITE_C_C_API_INTERNAL_H_
|
||||
#define TENSORFLOW_LITE_C_C_API_INTERNAL_H_
|
||||
|
||||
#include "tensorflow/lite/experimental/c/c_api.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/interpreter.h"
|
||||
#include "tensorflow/lite/model.h"
|
||||
#include "tensorflow/lite/op_resolver.h"
|
||||
|
||||
// Internal structures used by the C API. These are likely to change and should
|
||||
// not be depended on.
|
||||
// not be depended on directly by any C API clients.
|
||||
//
|
||||
// NOTE: This header does not follow C conventions and does not define a C API.
|
||||
// It is effectively an (internal) implementation detail of the C API.
|
@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
#include "tensorflow/lite/experimental/c/c_api.h"
|
||||
#include "tensorflow/lite/c/c_api.h"
|
||||
|
||||
#include <array>
|
||||
#include <fstream>
|
@ -1,120 +0,0 @@
|
||||
load(
|
||||
"//tensorflow/lite:build_def.bzl",
|
||||
"tflite_cc_shared_object",
|
||||
"tflite_copts",
|
||||
)
|
||||
|
||||
package(
|
||||
default_visibility = [":experimental"],
|
||||
licenses = ["notice"], # Apache 2.0
|
||||
)
|
||||
|
||||
package_group(
|
||||
name = "experimental",
|
||||
packages = [
|
||||
"//tensorflow/lite/experimental/...",
|
||||
"//third_party/dart/tflite_native/...", # whitelisted
|
||||
],
|
||||
)
|
||||
|
||||
tflite_cc_shared_object(
|
||||
name = "libtensorflowlite_c.so",
|
||||
linkopts = select({
|
||||
"//tensorflow:macos": [
|
||||
"-Wl,-exported_symbols_list,$(location //tensorflow/lite/experimental/c:exported_symbols.lds)",
|
||||
"-Wl,-install_name,@rpath/libtensorflowlite_c.so",
|
||||
],
|
||||
"//tensorflow:windows": [],
|
||||
"//conditions:default": [
|
||||
"-z defs",
|
||||
"-Wl,--version-script,$(location //tensorflow/lite/experimental/c:version_script.lds)",
|
||||
],
|
||||
}),
|
||||
deps = [
|
||||
":c_api",
|
||||
":c_api_experimental",
|
||||
":exported_symbols.lds",
|
||||
":version_script.lds",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "c_api_internal",
|
||||
srcs = [
|
||||
"c_api.h",
|
||||
"c_api_types.h",
|
||||
],
|
||||
hdrs = ["c_api_internal.h"],
|
||||
copts = tflite_copts(),
|
||||
visibility = [
|
||||
"//tensorflow/lite/experimental/c:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//tensorflow/lite:framework",
|
||||
"//tensorflow/lite/c:common",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "c_api",
|
||||
srcs = ["c_api.cc"],
|
||||
hdrs = [
|
||||
"c_api.h",
|
||||
"c_api_types.h",
|
||||
],
|
||||
copts = tflite_copts(),
|
||||
visibility = [
|
||||
":experimental",
|
||||
],
|
||||
deps = [
|
||||
":c_api_internal",
|
||||
"//tensorflow/lite:framework",
|
||||
"//tensorflow/lite:version",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/kernels:builtin_ops",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "c_api_experimental",
|
||||
srcs = ["c_api_experimental.cc"],
|
||||
hdrs = ["c_api_experimental.h"],
|
||||
copts = tflite_copts(),
|
||||
deps = [
|
||||
":c_api",
|
||||
":c_api_internal",
|
||||
"//tensorflow/lite:kernel_api",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "c_api_test",
|
||||
size = "small",
|
||||
srcs = ["c_api_test.cc"],
|
||||
data = [
|
||||
"//tensorflow/lite:testdata/add.bin",
|
||||
"//tensorflow/lite:testdata/add_quantized.bin",
|
||||
],
|
||||
deps = [
|
||||
":c_api",
|
||||
"//tensorflow/lite/c:common",
|
||||
"//tensorflow/lite/testing:util",
|
||||
"@com_google_googletest//:gtest",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "c_api_experimental_test",
|
||||
size = "small",
|
||||
srcs = ["c_api_experimental_test.cc"],
|
||||
data = ["//tensorflow/lite:testdata/add.bin"],
|
||||
deps = [
|
||||
":c_api",
|
||||
":c_api_experimental",
|
||||
"//tensorflow/lite:kernel_api",
|
||||
"//tensorflow/lite/testing:util",
|
||||
"@com_google_googletest//:gtest",
|
||||
],
|
||||
)
|
1
tensorflow/lite/experimental/c/README.md
Normal file
1
tensorflow/lite/experimental/c/README.md
Normal file
@ -0,0 +1 @@
|
||||
The C API has been migrated to [lite/c](../../c/README.md).
|
@ -1,673 +0,0 @@
|
||||
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
==============================================================================*/
|
||||
|
||||
// This file defines common C types and APIs for implementing operations,
|
||||
// delegates and other constructs in TensorFlow Lite. The actual operations and
|
||||
// delegtes can be defined using C++, but the interface between the interpreter
|
||||
// and the operations are C.
|
||||
//
|
||||
// Summary of abstractions
|
||||
// TF_LITE_ENSURE - Self-sufficient error checking
|
||||
// TfLiteStatus - Status reporting
|
||||
// TfLiteIntArray - stores tensor shapes (dims),
|
||||
// TfLiteContext - allows an op to access the tensors
|
||||
// TfLiteTensor - tensor (a multidimensional array)
|
||||
// TfLiteNode - a single node or operation
|
||||
// TfLiteRegistration - the implementation of a conceptual operation.
|
||||
// TfLiteDelegate - allows delegation of nodes to alternative backends.
|
||||
//
|
||||
// Some abstractions in this file are created and managed by Interpreter.
|
||||
|
||||
#ifndef TENSORFLOW_LITE_C_COMMON_H_
|
||||
#define TENSORFLOW_LITE_C_COMMON_H_
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif // __cplusplus
|
||||
|
||||
typedef enum { kTfLiteOk = 0, kTfLiteError = 1 } TfLiteStatus;
|
||||
|
||||
// The list of external context types known to TF Lite. This list exists solely
|
||||
// to avoid conflicts and to ensure ops can share the external contexts they
|
||||
// need. Access to the external contexts is controled by one of the
|
||||
// corresponding support files.
|
||||
typedef enum {
|
||||
kTfLiteEigenContext = 0, // include eigen_support.h to use.
|
||||
kTfLiteGemmLowpContext = 1, // include gemm_support.h to use.
|
||||
kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support.
|
||||
kTfLiteCpuBackendContext = 3, // include cpu_backend_support.h to use.
|
||||
kTfLiteMaxExternalContexts = 4
|
||||
} TfLiteExternalContextType;
|
||||
|
||||
// Forward declare so dependent structs and methods can reference these types
|
||||
// prior to the struct definitions.
|
||||
struct TfLiteContext;
|
||||
struct TfLiteDelegate;
|
||||
struct TfLiteRegistration;
|
||||
|
||||
// An external context is a collection of information unrelated to the TF Lite
|
||||
// framework, but useful to a subset of the ops. TF Lite knows very little
|
||||
// about about the actual contexts, but it keeps a list of them, and is able to
|
||||
// refresh them if configurations like the number of recommended threads
|
||||
// change.
|
||||
typedef struct {
|
||||
TfLiteExternalContextType type;
|
||||
TfLiteStatus (*Refresh)(struct TfLiteContext* context);
|
||||
} TfLiteExternalContext;
|
||||
|
||||
#define kTfLiteOptionalTensor (-1)
|
||||
|
||||
// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
|
||||
// indices
|
||||
typedef struct {
|
||||
int size;
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1
|
||||
int data[0];
|
||||
#else
|
||||
int data[];
|
||||
#endif
|
||||
} TfLiteIntArray;
|
||||
|
||||
// Given the size (number of elements) in a TfLiteIntArray, calculate its size
|
||||
// in bytes.
|
||||
int TfLiteIntArrayGetSizeInBytes(int size);
|
||||
|
||||
// Create a array of a given `size` (uninitialized entries).
|
||||
// This returns a pointer, that you must free using TfLiteIntArrayFree().
|
||||
TfLiteIntArray* TfLiteIntArrayCreate(int size);
|
||||
|
||||
// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
|
||||
int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b);
|
||||
|
||||
// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
|
||||
int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
|
||||
const int b_data[]);
|
||||
|
||||
// Create a copy of an array passed as `src`.
|
||||
// You are expected to free memory with TfLiteIntArrayFree
|
||||
TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src);
|
||||
|
||||
// Free memory of array `a`.
|
||||
void TfLiteIntArrayFree(TfLiteIntArray* a);
|
||||
|
||||
// Fixed size list of floats. Used for per-channel quantization.
|
||||
typedef struct {
|
||||
int size;
|
||||
// gcc 6.1+ have a bug where flexible members aren't properly handled
|
||||
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
|
||||
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
|
||||
__GNUC_MINOR__ >= 1
|
||||
float data[0];
|
||||
#else
|
||||
float data[];
|
||||
#endif
|
||||
} TfLiteFloatArray;
|
||||
|
||||
// Given the size (number of elements) in a TfLiteFloatArray, calculate its size
|
||||
// in bytes.
|
||||
int TfLiteFloatArrayGetSizeInBytes(int size);
|
||||
|
||||
// Create a array of a given `size` (uninitialized entries).
|
||||
// This returns a pointer, that you must free using TfLiteFloatArrayFree().
|
||||
TfLiteFloatArray* TfLiteFloatArrayCreate(int size);
|
||||
|
||||
// Free memory of array `a`.
|
||||
void TfLiteFloatArrayFree(TfLiteFloatArray* a);
|
||||
|
||||
// Since we must not depend on any libraries, define a minimal subset of
|
||||
// error macros while avoiding names that have pre-conceived meanings like
|
||||
// assert and check.
|
||||
|
||||
// Check whether value is true, and if not return kTfLiteError from
|
||||
// the current function (and report the error string msg).
|
||||
#define TF_LITE_ENSURE_MSG(context, value, msg) \
|
||||
do { \
|
||||
if (!(value)) { \
|
||||
(context)->ReportError((context), __FILE__ " " msg); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// Check whether the value `a` is true, and if not return kTfLiteError from
|
||||
// the current function, while also reporting the location of the error.
|
||||
#define TF_LITE_ENSURE(context, a) \
|
||||
do { \
|
||||
if (!(a)) { \
|
||||
(context)->ReportError((context), "%s:%d %s was not true.", __FILE__, \
|
||||
__LINE__, #a); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TF_LITE_ENSURE_STATUS(a) \
|
||||
do { \
|
||||
if ((a) != kTfLiteOk) { \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// Check whether the value `a == b` is true, and if not return kTfLiteError from
|
||||
// the current function, while also reporting the location of the error.
|
||||
// `a` and `b` may be evaluated more than once, so no side effects or
|
||||
// extremely expensive computations should be done.
|
||||
#define TF_LITE_ENSURE_EQ(context, a, b) \
|
||||
do { \
|
||||
if ((a) != (b)) { \
|
||||
(context)->ReportError((context), "%s:%d %s != %s (%d != %d)", __FILE__, \
|
||||
__LINE__, #a, #b, (a), (b)); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \
|
||||
do { \
|
||||
if ((a) != (b)) { \
|
||||
(context)->ReportError((context), "%s:%d %s != %s (%s != %s)", __FILE__, \
|
||||
__LINE__, #a, #b, TfLiteTypeGetName(a), \
|
||||
TfLiteTypeGetName(b)); \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define TF_LITE_ENSURE_OK(context, status) \
|
||||
do { \
|
||||
if ((status) != kTfLiteOk) { \
|
||||
return kTfLiteError; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// Single-precision complex data type compatible with the C99 definition.
|
||||
typedef struct {
|
||||
float re, im; // real and imaginary parts, respectively.
|
||||
} TfLiteComplex64;
|
||||
|
||||
// Half precision data type compatible with the C99 definition.
|
||||
typedef struct {
|
||||
uint16_t data;
|
||||
} TfLiteFloat16;
|
||||
|
||||
// Types supported by tensor
|
||||
typedef enum {
|
||||
kTfLiteNoType = 0,
|
||||
kTfLiteFloat32 = 1,
|
||||
kTfLiteInt32 = 2,
|
||||
kTfLiteUInt8 = 3,
|
||||
kTfLiteInt64 = 4,
|
||||
kTfLiteString = 5,
|
||||
kTfLiteBool = 6,
|
||||
kTfLiteInt16 = 7,
|
||||
kTfLiteComplex64 = 8,
|
||||
kTfLiteInt8 = 9,
|
||||
kTfLiteFloat16 = 10,
|
||||
} TfLiteType;
|
||||
|
||||
// Return the name of a given type, for error reporting purposes.
|
||||
const char* TfLiteTypeGetName(TfLiteType type);
|
||||
|
||||
// SupportedQuantizationTypes.
|
||||
typedef enum {
|
||||
// No quantization.
|
||||
kTfLiteNoQuantization = 0,
|
||||
// Affine quantization (with support for per-channel quantization).
|
||||
// Corresponds to TfLiteAffineQuantization.
|
||||
kTfLiteAffineQuantization = 1,
|
||||
} TfLiteQuantizationType;
|
||||
|
||||
// Structure specifying the quantization used by the tensor, if-any.
|
||||
typedef struct {
|
||||
// The type of quantization held by params.
|
||||
TfLiteQuantizationType type;
|
||||
// Holds a reference to one of the quantization param structures specified
|
||||
// below.
|
||||
void* params;
|
||||
} TfLiteQuantization;
|
||||
|
||||
// Legacy. Will be deprecated in favor of TfLiteAffineQuantization.
|
||||
// If per-layer quantization is specified this field will still be populated in
|
||||
// addition to TfLiteAffineQuantization.
|
||||
// Parameters for asymmetric quantization. Quantized values can be converted
|
||||
// back to float using:
|
||||
// real_value = scale * (quantized_value - zero_point)
|
||||
typedef struct {
|
||||
float scale;
|
||||
int32_t zero_point;
|
||||
} TfLiteQuantizationParams;
|
||||
|
||||
// Parameters for asymmetric quantization across a dimension (i.e per output
|
||||
// channel quantization).
|
||||
// quantized_dimension specifies which dimension the scales and zero_points
|
||||
// correspond to.
|
||||
// For a particular value in quantized_dimension, quantized values can be
|
||||
// converted back to float using:
|
||||
// real_value = scale * (quantized_value - zero_point)
|
||||
typedef struct {
|
||||
TfLiteFloatArray* scale;
|
||||
TfLiteIntArray* zero_point;
|
||||
int32_t quantized_dimension;
|
||||
} TfLiteAffineQuantization;
|
||||
|
||||
/* A union of pointers that points to memory for a given tensor. */
|
||||
typedef union {
|
||||
/* Do not access these members directly, if possible, use
|
||||
* GetTensorData<TYPE>(tensor) instead, otherwise only access .data, as other
|
||||
* members are deprecated. */
|
||||
int32_t* i32;
|
||||
int64_t* i64;
|
||||
float* f;
|
||||
TfLiteFloat16* f16;
|
||||
char* raw;
|
||||
const char* raw_const;
|
||||
uint8_t* uint8;
|
||||
bool* b;
|
||||
int16_t* i16;
|
||||
TfLiteComplex64* c64;
|
||||
int8_t* int8;
|
||||
/* Only use this member. */
|
||||
void* data;
|
||||
} TfLitePtrUnion;
|
||||
|
||||
// Memory allocation strategies. kTfLiteMmapRo is for read-only memory-mapped
|
||||
// data (or data externally allocated). kTfLiteArenaRw is arena allocated
|
||||
// data. kTfLiteDynamic is for tensors that are allocated during evaluation.
|
||||
typedef enum {
|
||||
kTfLiteMemNone = 0,
|
||||
kTfLiteMmapRo,
|
||||
kTfLiteArenaRw,
|
||||
kTfLiteArenaRwPersistent,
|
||||
kTfLiteDynamic,
|
||||
} TfLiteAllocationType;
|
||||
|
||||
// The delegates should use zero or positive integers to represent handles.
|
||||
// -1 is reserved from unallocated status.
|
||||
typedef int TfLiteBufferHandle;
|
||||
enum {
|
||||
kTfLiteNullBufferHandle = -1,
|
||||
};
|
||||
|
||||
// An tensor in the interpreter system which is a wrapper around a buffer of
|
||||
// data including a dimensionality (or NULL if not currently defined).
|
||||
typedef struct {
|
||||
// The data type specification for data stored in `data`. This affects
|
||||
// what member of `data` union should be used.
|
||||
TfLiteType type;
|
||||
// A union of data pointers. The appropriate type should be used for a typed
|
||||
// tensor based on `type`.
|
||||
TfLitePtrUnion data;
|
||||
// A pointer to a structure representing the dimensionality interpretation
|
||||
// that the buffer should have. NOTE: the product of elements of `dims`
|
||||
// and the element datatype size should be equal to `bytes` below.
|
||||
TfLiteIntArray* dims;
|
||||
// Quantization information.
|
||||
TfLiteQuantizationParams params;
|
||||
// How memory is mapped
|
||||
// kTfLiteMmapRo: Memory mapped read only.
|
||||
// i.e. weights
|
||||
// kTfLiteArenaRw: Arena allocated read write memory
|
||||
// (i.e. temporaries, outputs).
|
||||
TfLiteAllocationType allocation_type;
|
||||
// The number of bytes required to store the data of this Tensor. I.e.
|
||||
// (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
|
||||
// type is kTfLiteFloat32 and dims = {3, 2} then
|
||||
// bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
|
||||
size_t bytes;
|
||||
|
||||
// An opaque pointer to a tflite::MMapAllocation
|
||||
const void* allocation;
|
||||
|
||||
// Null-terminated name of this tensor.
|
||||
const char* name;
|
||||
|
||||
// The delegate which knows how to handle `buffer_handle`.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
struct TfLiteDelegate* delegate;
|
||||
|
||||
// An integer buffer handle that can be handled by `delegate`.
|
||||
// The value is valid only when delegate is not null.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteBufferHandle buffer_handle;
|
||||
|
||||
// If the delegate uses its own buffer (e.g. GPU memory), the delegate is
|
||||
// responsible to set data_is_stale to true.
|
||||
// `delegate->CopyFromBufferHandle` can be called to copy the data from
|
||||
// delegate buffer.
|
||||
// WARNING: This is an // experimental interface that is subject to change.
|
||||
bool data_is_stale;
|
||||
|
||||
// True if the tensor is a variable.
|
||||
bool is_variable;
|
||||
|
||||
// Quantization information. Replaces params field above.
|
||||
TfLiteQuantization quantization;
|
||||
} TfLiteTensor;
|
||||
|
||||
// Free data memory of tensor `t`.
|
||||
void TfLiteTensorDataFree(TfLiteTensor* t);
|
||||
|
||||
// Free quantization data.
|
||||
void TfLiteQuantizationFree(TfLiteQuantization* quantization);
|
||||
|
||||
// Free memory of tensor `t`.
|
||||
void TfLiteTensorFree(TfLiteTensor* t);
|
||||
|
||||
// Set all of a tensor's fields (and free any previously allocated data).
|
||||
void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
|
||||
TfLiteQuantizationParams quantization, char* buffer,
|
||||
size_t size, TfLiteAllocationType allocation_type,
|
||||
const void* allocation, bool is_variable,
|
||||
TfLiteTensor* tensor);
|
||||
|
||||
// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
|
||||
// types other than kTfLiteDynamic will be ignored.
|
||||
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
|
||||
|
||||
// A structure representing an instance of a node.
|
||||
// This structure only exhibits the inputs, outputs and user defined data, not
|
||||
// other features like the type.
|
||||
typedef struct {
|
||||
// Inputs to this node expressed as indices into the simulator's tensors.
|
||||
TfLiteIntArray* inputs;
|
||||
|
||||
// Outputs to this node expressed as indices into the simulator's tensors.
|
||||
TfLiteIntArray* outputs;
|
||||
|
||||
// intermediate tensors to this node expressed as indices into the simulator's
|
||||
// tensors.
|
||||
TfLiteIntArray* intermediates;
|
||||
|
||||
// Temporary tensors uses during the computations. This usually contains no
|
||||
// tensors, but ops are allowed to change that if they need scratch space of
|
||||
// any sort.
|
||||
TfLiteIntArray* temporaries;
|
||||
|
||||
// Opaque data provided by the node implementer through `Registration.init`.
|
||||
void* user_data;
|
||||
|
||||
// Opaque data provided to the node if the node is a builtin. This is usually
|
||||
// a structure defined in builtin_op_data.h
|
||||
void* builtin_data;
|
||||
|
||||
// Custom initial data. This is the opaque data provided in the flatbuffer.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
const void* custom_initial_data;
|
||||
int custom_initial_data_size;
|
||||
|
||||
// The pointer to the delegate. This is non-null only when the node is
|
||||
// created by calling `interpreter.ModifyGraphWithDelegate`.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
struct TfLiteDelegate* delegate;
|
||||
} TfLiteNode;
|
||||
|
||||
typedef struct TfLiteContext {
|
||||
// Number of tensors in the context.
|
||||
size_t tensors_size;
|
||||
|
||||
// The execution plan contains a list of the node indices in execution
|
||||
// order. execution_plan->size is the current number of nodes. And,
|
||||
// execution_plan->data[0] is the first node that needs to be run.
|
||||
// TfLiteDelegates can traverse the current execution plan by iterating
|
||||
// through each member of this array and using GetNodeAndRegistration() to
|
||||
// access details about a node. i.e.
|
||||
// TfLiteIntArray* execution_plan;
|
||||
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
|
||||
// for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
|
||||
// int node_index = execution_plan->data[exec_index];
|
||||
// TfLiteNode* node;
|
||||
// TfLiteRegistration* reg;
|
||||
// context->GetNodeAndRegistration(context, node_index, &node, ®);
|
||||
// }
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
|
||||
TfLiteIntArray** execution_plan);
|
||||
|
||||
// An array of tensors in the interpreter context (of length `tensors_size`)
|
||||
TfLiteTensor* tensors;
|
||||
|
||||
// opaque full context ptr (an opaque c++ data structure)
|
||||
void* impl_;
|
||||
|
||||
// Request memory pointer be resized. Updates dimensions on the tensor.
|
||||
// NOTE: ResizeTensor takes ownership of newSize.
|
||||
TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
|
||||
TfLiteIntArray* new_size);
|
||||
// Request that an error be reported with format string msg.
|
||||
void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
|
||||
|
||||
// Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If
|
||||
// non-null, the value pointed to by `first_new_tensor_index` will be set to
|
||||
// the index of the first new tensor.
|
||||
TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
|
||||
int* first_new_tensor_index);
|
||||
|
||||
// Get a Tensor node by node_index.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteStatus (*GetNodeAndRegistration)(
|
||||
struct TfLiteContext*, int node_index, TfLiteNode** node,
|
||||
struct TfLiteRegistration** registration);
|
||||
|
||||
// Replace ops with one or more stub delegate operations. This function
|
||||
// does not take ownership of `nodes_to_replace`.
|
||||
TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)(
|
||||
struct TfLiteContext*, struct TfLiteRegistration registration,
|
||||
const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate);
|
||||
|
||||
// Number of threads that are recommended to subsystems like gemmlowp and
|
||||
// eigen.
|
||||
int recommended_num_threads;
|
||||
|
||||
// Access external contexts by type.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
|
||||
TfLiteExternalContextType);
|
||||
// Set the value of a external context. Does not take ownership of the
|
||||
// pointer.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
|
||||
TfLiteExternalContext*);
|
||||
|
||||
// Flag for allowing float16 precision for FP32 calculation.
|
||||
// default: false.
|
||||
// WARNING: This is an experimental API and subject to change.
|
||||
bool allow_fp32_relax_to_fp16;
|
||||
|
||||
// Pointer to the op-level profiler, if set; nullptr otherwise.
|
||||
void* profiler;
|
||||
|
||||
// Allocate memory for op data. This method should only be used in `Init`
|
||||
// method and the allocated memory will be available until `Free` method is
|
||||
// called.
|
||||
// On TFL, it allocates memory from heap using malloc, but for micro, this
|
||||
// will be allocating from the allocator.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
void* (*AllocateOpData)(struct TfLiteContext* ctx, size_t size);
|
||||
|
||||
// Deallocate memory holding op data. This method should only be used inside
|
||||
// `Free` method. Caller needs to make sure that that `buffer` is allocated by
|
||||
// `AllocateOpData` method.
|
||||
// On TFL, it will free the buffer, and for micro, this method is a no-op.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
void (*DeallocateOpData)(struct TfLiteContext* ctx, void* buffer);
|
||||
|
||||
// Allocate a temporary tensor to the node. This method also makes a copy of
|
||||
// the shape array internally so the shape array could be deallocated right
|
||||
// afterwards. WARNING: This is an experimental interface that is subject to
|
||||
// change.
|
||||
TfLiteStatus (*AllocateTemporaryTensor)(struct TfLiteContext* ctx,
|
||||
TfLiteNode* node, int dims,
|
||||
int* shape, TfLiteType data_type,
|
||||
TfLiteAllocationType allocation_type,
|
||||
int* new_tensor_index);
|
||||
|
||||
// Deallocate all temporary tensors associated to the node (including
|
||||
// kTfLiteArenaRwPersistent persistent tensors). It also deallocates
|
||||
// all the shape tensors.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
void (*DeallocateAllTemporaryTensors)(struct TfLiteContext* ctx,
|
||||
TfLiteNode* node);
|
||||
|
||||
// Resize the memory pointer of the `tensor`. This method behaves the same as
|
||||
// `ResizeTensor`, except that it makes a copy of the shape array internally
|
||||
// so the shape array could be deallocated right afterwards.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx,
|
||||
TfLiteTensor* tensor, int dims,
|
||||
const int* shape);
|
||||
} TfLiteContext;
|
||||
|
||||
typedef struct TfLiteRegistration {
|
||||
// Initializes the op from serialized data.
|
||||
// If a built-in op:
|
||||
// `buffer` is the op's params data (TfLiteLSTMParams*).
|
||||
// `length` is zero.
|
||||
// If custom op:
|
||||
// `buffer` is the op's `custom_options`.
|
||||
// `length` is the size of the buffer.
|
||||
//
|
||||
// Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
|
||||
// or an instance of a struct).
|
||||
//
|
||||
// The returned pointer will be stored with the node in the `user_data` field,
|
||||
// accessible within prepare and invoke functions below.
|
||||
// NOTE: if the data is already in the desired format, simply implement this
|
||||
// function to return `nullptr` and implement the free function to be a no-op.
|
||||
void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
|
||||
|
||||
// The pointer `buffer` is the data previously returned by an init invocation.
|
||||
void (*free)(TfLiteContext* context, void* buffer);
|
||||
|
||||
// prepare is called when the inputs this node depends on have been resized.
|
||||
// context->ResizeTensor() can be called to request output tensors to be
|
||||
// resized.
|
||||
//
|
||||
// Returns kTfLiteOk on success.
|
||||
TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
|
||||
|
||||
// Execute the node (should read node->inputs and output to node->outputs).
|
||||
// Returns kTfLiteOk on success.
|
||||
TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
|
||||
|
||||
// profiling_string is called during summarization of profiling information
|
||||
// in order to group executions together. Providing a value here will cause a
|
||||
// given op to appear multiple times is the profiling report. This is
|
||||
// particularly useful for custom ops that can perform significantly
|
||||
// different calculations depending on their `user-data`.
|
||||
const char* (*profiling_string)(const TfLiteContext* context,
|
||||
const TfLiteNode* node);
|
||||
|
||||
// Builtin codes. If this kernel refers to a builtin this is the code
|
||||
// of the builtin. This is so we can do marshaling to other frameworks like
|
||||
// NN API.
|
||||
// Note: It is the responsibility of the registration binder to set this
|
||||
// properly.
|
||||
int32_t builtin_code;
|
||||
|
||||
// Custom op name. If the op is a builtin, this will be null.
|
||||
// Note: It is the responsibility of the registration binder to set this
|
||||
// properly.
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
const char* custom_name;
|
||||
|
||||
// The version of the op.
|
||||
// Note: It is the responsibility of the registration binder to set this
|
||||
// properly.
|
||||
int version;
|
||||
} TfLiteRegistration;
|
||||
|
||||
// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
|
||||
// values should be 1, 2, 4, 8, ...etc.
|
||||
typedef enum {
|
||||
kTfLiteDelegateFlagsNone = 0,
|
||||
// The flag is set if the delegate can handle dynamic sized tensors.
|
||||
// For example, the output shape of a `Resize` op with non-constant shape
|
||||
// can only be inferred when the op is invoked.
|
||||
// In this case, the Delegate is responsible for calling
|
||||
// `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
|
||||
// `ResizeTensor` when invoking the op.
|
||||
//
|
||||
// If the delegate isn't capable to handle dynamic tensors, this flag need
|
||||
// to be set to false.
|
||||
kTfLiteDelegateFlagsAllowDynamicTensors = 1
|
||||
} TfLiteDelegateFlags;
|
||||
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
typedef struct TfLiteDelegate {
|
||||
// Data that delegate needs to identify itself. This data is owned by the
|
||||
// delegate. The delegate is owned in the user code, so the delegate is
|
||||
// responsible for doing this when it is destroyed.
|
||||
void* data_;
|
||||
|
||||
// Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
|
||||
// delegate a view of the current graph through TfLiteContext*. It typically
|
||||
// will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
|
||||
// to ask the TensorFlow lite runtime to create macro-nodes to represent
|
||||
// delegated subgraphs of the original graph.
|
||||
TfLiteStatus (*Prepare)(TfLiteContext* context,
|
||||
struct TfLiteDelegate* delegate);
|
||||
|
||||
// Copy the data from delegate buffer handle into raw memory of the given
|
||||
// 'tensor'. This cannot be null. The delegate is allowed to allocate the raw
|
||||
// bytes as long as it follows the rules for kTfLiteDynamic tensors.
|
||||
TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
|
||||
struct TfLiteDelegate* delegate,
|
||||
TfLiteBufferHandle buffer_handle,
|
||||
TfLiteTensor* tensor);
|
||||
|
||||
// Copy the data from raw memory of the given 'tensor' to delegate buffer
|
||||
// handle. This can be null if the delegate doesn't use its own buffer.
|
||||
TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
|
||||
struct TfLiteDelegate* delegate,
|
||||
TfLiteBufferHandle buffer_handle,
|
||||
TfLiteTensor* tensor);
|
||||
|
||||
// Free the Delegate Buffer Handle. Note: This only frees the handle, but
|
||||
// this doesn't release the underlying resource (e.g. textures). The
|
||||
// resources are either owned by application layer or the delegate.
|
||||
// This can be null if the delegate doesn't use its own buffer.
|
||||
void (*FreeBufferHandle)(TfLiteContext* context,
|
||||
struct TfLiteDelegate* delegate,
|
||||
TfLiteBufferHandle* handle);
|
||||
|
||||
// Bitmask flags. See the comments in `TfLiteDelegateFlags`.
|
||||
int64_t flags;
|
||||
} TfLiteDelegate;
|
||||
|
||||
// Build a 'null' delegate, with all the fields properly set to their default
|
||||
// values.
|
||||
TfLiteDelegate TfLiteDelegateCreate();
|
||||
|
||||
// WARNING: This is an experimental interface that is subject to change.
|
||||
//
|
||||
// Currently, TfLiteDelegateParams has to be allocated in a way that it's
|
||||
// trivially destructable. It will be stored as `builtin_data` field in
|
||||
// `TfLiteNode` of the delegate node.
|
||||
//
|
||||
// See also the `CreateDelegateParams` function in `interpreter.cc` details.
|
||||
typedef struct {
|
||||
TfLiteDelegate* delegate;
|
||||
TfLiteIntArray* nodes_to_replace;
|
||||
TfLiteIntArray* input_tensors;
|
||||
TfLiteIntArray* output_tensors;
|
||||
} TfLiteDelegateParams;
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif // __cplusplus
|
||||
#endif // TENSORFLOW_LITE_C_COMMON_H_
|
@ -6,21 +6,18 @@ Unity by way of a C# `Interpreter` wrapper.
|
||||
|
||||
Note that the native TF Lite plugin(s) *must* be built before using the Unity
|
||||
Plugin, and placed in Assets/TensorFlowLite/SDK/Plugins/. For the editor (note
|
||||
that this has only been tested on Linux; the syntax may differ on Mac/Windows):
|
||||
that the generated shared library name and suffix are platform-dependent):
|
||||
|
||||
```sh
|
||||
bazel build -c opt --cxxopt=--std=c++11 \
|
||||
//tensorflow/lite/experimental/c:libtensorflowlite_c.so
|
||||
bazel build -c opt --cxxopt=--std=c++11 //tensorflow/lite/c:tensorflowlite_c
|
||||
```
|
||||
|
||||
and for Android (replace `android_arm` with `android_arm64` for 64-bit):
|
||||
|
||||
```sh
|
||||
bazel build -c opt --cxxopt=--std=c++11 --config=android_arm \
|
||||
//tensorflow/lite/experimental/c:libtensorflowlite_c.so
|
||||
//tensorflow/lite/c:tensorflowlite_c
|
||||
```
|
||||
|
||||
If you encounter issues with native plugin discovery on Mac ("Darwin")
|
||||
platforms, try renaming `libtensorflowlite_c.so` to `tensorflowlite_c.bundle`.
|
||||
Similarly, on Windows you'll likely need to rename `libtensorflowlite_c.so` to
|
||||
`tensorflowlite_c.dll`.
|
||||
platforms, try renaming `libtensorflowlite_c.dylib` to `tensorflowlite_c.bundle`.
|
||||
|
@ -5,23 +5,20 @@ load("//tensorflow/lite/experimental/ios:ios.bzl", "TFL_MINIMUM_OS_VERSION")
|
||||
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_static_framework")
|
||||
|
||||
package(
|
||||
default_visibility = ["//tensorflow/lite/experimental/c:experimental"],
|
||||
default_visibility = ["//tensorflow/lite/c:experimental"],
|
||||
licenses = ["notice"], # Apache 2.0
|
||||
)
|
||||
|
||||
TFL_LIBRARY_HDRS = [
|
||||
"//tensorflow/lite/delegates/gpu:metal_delegate.h",
|
||||
"//tensorflow/lite/experimental/c:c_api.h",
|
||||
]
|
||||
|
||||
TFL_FRAMEWORK_HDRS = TFL_LIBRARY_HDRS + [
|
||||
"//tensorflow/lite/experimental/c:c_api_types.h",
|
||||
"//tensorflow/lite/c:c_api.h",
|
||||
"//tensorflow/lite/c:common.h",
|
||||
]
|
||||
|
||||
# bazel build -c opt --config=ios_fat //tensorflow/lite/experimental/ios:TensorFlowLiteC_framework
|
||||
ios_static_framework(
|
||||
name = "TensorFlowLiteC_framework",
|
||||
hdrs = TFL_FRAMEWORK_HDRS,
|
||||
hdrs = TFL_LIBRARY_HDRS,
|
||||
bundle_name = "TensorFlowLiteC",
|
||||
minimum_os_version = TFL_MINIMUM_OS_VERSION,
|
||||
deps = [
|
||||
@ -32,7 +29,7 @@ ios_static_framework(
|
||||
# bazel build -c opt --config=ios --ios_multi_cpus=armv7,arm64,x86_64 //tensorflow/lite/experimental/ios:TensorFlowLiteCWithSelectTfOps_framework
|
||||
ios_static_framework(
|
||||
name = "TensorFlowLiteCWithSelectTfOps_framework",
|
||||
hdrs = TFL_FRAMEWORK_HDRS,
|
||||
hdrs = TFL_LIBRARY_HDRS,
|
||||
bundle_name = "TensorFlowLiteC",
|
||||
minimum_os_version = TFL_MINIMUM_OS_VERSION,
|
||||
deps = [
|
||||
@ -68,8 +65,8 @@ cc_library(
|
||||
hdrs = TFL_LIBRARY_HDRS,
|
||||
tags = ["nobuilder"],
|
||||
deps = [
|
||||
"//tensorflow/lite/c:c_api",
|
||||
"//tensorflow/lite/delegates/gpu:metal_delegate",
|
||||
"//tensorflow/lite/experimental/c:c_api",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -44,7 +44,7 @@ RELEASE_COPTS = [
|
||||
# Warns if an @selector() expression is encountered with a method name that hasn't been defined yet.
|
||||
"-Wundeclared-selector",
|
||||
# Turn off warnings for headers not part of TensorFlow Lite Objective-C API.
|
||||
"--system-header-prefix=tensorflow/lite/experimental/c/",
|
||||
"--system-header-prefix=tensorflow/lite/c/",
|
||||
]
|
||||
|
||||
# Compiler flags for building test libraries.
|
||||
@ -63,7 +63,7 @@ objc_library(
|
||||
tags = TFL_DEFAULT_TAGS,
|
||||
visibility = ios_visibility_whitelist(),
|
||||
deps = [
|
||||
"//tensorflow/lite/experimental/c:c_api",
|
||||
"//tensorflow/lite/c:c_api",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"sourceFilters" : [
|
||||
"tensorflow/lite",
|
||||
"tensorflow/lite/experimental/c",
|
||||
"tensorflow/lite/c",
|
||||
"tensorflow/lite/experimental/objc",
|
||||
"tensorflow/lite/experimental/objc/apis",
|
||||
"tensorflow/lite/experimental/objc/apps/TestApp/TestApp",
|
||||
|
@ -25,7 +25,7 @@ Pod::Spec.new do |s|
|
||||
s.source_files = [
|
||||
objc_dir + '{apis,sources}/*.{h,m,mm}',
|
||||
tfl_dir + 'experimental/c/c_api.h',
|
||||
tfl_dir + 'experimental/c/c_api_types.h',
|
||||
tfl_dir + 'experimental/c/common.h',
|
||||
]
|
||||
s.module_map = objc_dir + 'apis/framework.modulemap'
|
||||
s.dependency 'TensorFlowLiteC', "~> #{s.version}"
|
||||
|
@ -25,7 +25,7 @@ Pod::Spec.new do |s|
|
||||
s.source_files = [
|
||||
objc_dir + '{apis,sources}/*.{h,m,mm}',
|
||||
tfl_dir + 'experimental/c/c_api.h',
|
||||
tfl_dir + 'experimental/c/c_api_types.h',
|
||||
tfl_dir + 'experimental/c/common.h',
|
||||
]
|
||||
s.module_map = objc_dir + 'apis/framework.modulemap'
|
||||
s.dependency 'TensorFlowLiteC', "#{s.version}"
|
||||
|
@ -20,7 +20,7 @@
|
||||
#import "tensorflow/lite/experimental/objc/apis/TFLInterpreterOptions.h"
|
||||
#import "tensorflow/lite/experimental/objc/apis/TFLTensor.h"
|
||||
|
||||
#include "tensorflow/lite/experimental/c/c_api.h"
|
||||
#include "tensorflow/lite/c/c_api.h"
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"sourceFilters" : [
|
||||
"tensorflow/lite/experimental/c",
|
||||
"tensorflow/lite/c",
|
||||
"tensorflow/lite/experimental/swift",
|
||||
"tensorflow/lite/experimental/swift/Sources",
|
||||
"tensorflow/lite/experimental/swift/TestApp/TestApp",
|
||||
|
Loading…
Reference in New Issue
Block a user