Implement a telemetry library for delegates to report their settings/status for A@S

PiperOrigin-RevId: 346165056
Change-Id: I31f3b14e9986d0d89d1d568f1c565bcfc6cf2783
This commit is contained in:
Sachin Joglekar 2020-12-07 13:18:54 -08:00 committed by TensorFlower Gardener
parent 07bb67303b
commit d6e53e76f2
11 changed files with 1707 additions and 19 deletions

View File

@ -260,7 +260,7 @@ cc_library(
"//tensorflow/lite/c:common",
"//tensorflow/lite/core/api",
"//tensorflow/lite/core/api:verifier",
"//tensorflow/lite/delegates:status",
"//tensorflow/lite/delegates:telemetry",
"//tensorflow/lite/experimental/resource",
"//tensorflow/lite/kernels/internal:compatibility",
"//tensorflow/lite/profiling:platform_profiler",
@ -347,7 +347,7 @@ cc_library(
"//tensorflow/lite/c:common",
"//tensorflow/lite/core/api",
"//tensorflow/lite/core/api:verifier",
"//tensorflow/lite/delegates:status",
"//tensorflow/lite/delegates:telemetry",
"//tensorflow/lite/experimental/resource",
"//tensorflow/lite/kernels/internal:compatibility",
"//tensorflow/lite/profiling:platform_profiler",

View File

@ -181,12 +181,12 @@ class ScopedRuntimeInstrumentationProfile : public ScopedProfile {
_profile_, __COUNTER__)((profiler), (tag), (node_index))
#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \
profiler, tag, delegate_status, interpreter_status) \
profiler, tag, event_metadata1, event_metadata2) \
do { \
if (!profiler) { \
if (profiler) { \
const auto handle = profiler->BeginEvent( \
tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \
delegate_status, interpreter_status); \
event_metadata1, event_metadata2); \
profiler->EndEvent(handle); \
} \
} while (false);

View File

@ -22,12 +22,31 @@ package(
)
cc_library(
name = "status",
hdrs = ["status.h"],
name = "telemetry",
srcs = ["telemetry.cc"],
hdrs = ["telemetry.h"],
compatible_with = get_compatible_with_portable(),
copts = tflite_copts(),
deps = [
"//tensorflow/lite/c:common",
"//tensorflow/lite/core/api",
"//tensorflow/lite/experimental/acceleration/configuration:configuration_fbs",
],
)
cc_test(
name = "telemetry_test",
srcs = ["telemetry_test.cc"],
linkopts = tflite_linkopts(),
linkstatic = 1,
deps = [
":telemetry",
"//tensorflow/lite/c:common",
"//tensorflow/lite/core/api",
"//tensorflow/lite/experimental/acceleration/configuration:configuration_fbs",
"//tensorflow/lite/profiling:profile_buffer",
"@com_google_googletest//:gtest_main",
"@flatbuffers",
],
)

View File

@ -0,0 +1,49 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/delegates/telemetry.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/profiler.h"
namespace tflite {
namespace delegates {
// TODO(b/153131797): Add an IFTTT here once we have a profiler to interpret
// these events, so that the two components don't go out of sync.
TfLiteStatus ReportDelegateSettings(TfLiteContext* context,
TfLiteDelegate* delegate,
const TFLiteSettings& settings) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
const int64_t event_metadata1 = reinterpret_cast<int64_t>(delegate);
const int64_t event_metadata2 = reinterpret_cast<int64_t>(&settings);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateSettingsTag,
event_metadata1, event_metadata2);
return kTfLiteOk;
}
TfLiteStatus ReportDelegateStatus(TfLiteContext* context,
TfLiteDelegate* delegate,
const DelegateStatus& status) {
auto* profiler = reinterpret_cast<Profiler*>(context->profiler);
TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT(profiler, kDelegateStatusTag,
status.full_status(),
static_cast<int64_t>(kTfLiteOk));
return kTfLiteOk;
}
} // namespace delegates
} // namespace tflite

View File

@ -19,19 +19,24 @@ limitations under the License.
#include <limits>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
// This file defines data structures to represent detailed TFLite delegate
// status, e.g. NNAPI delegate application failure because of a driver issue
// etc. Such status is ONLY to be used for internal APIs.
// Note, we simply use TfLiteStatus to represent high-level status while
// delegate-specific status codes are defined with DelegateStatus.
// This file implements utilities for delegate telemetry. These enable
// representation and reporting of hardware-specific configurations, status
// codes, etc.
// These APIs are for internal use *only*, and should be modified with care to
// avoid incompatibilities between delegates & runtime.
// WARNING: This is an experimental feature that is subject to change.
namespace tflite {
namespace delegates {
// Defines the source of the code where it is generated from. We list all TFLite
// delegates that're officially implemented and available as of April, 2020
// (i.e. w/ 'TFLITE_' prefix to imply this).
// Used to identify specific events for tflite::Profiler.
constexpr char kDelegateSettingsTag[] = "delegate_settings";
constexpr char kDelegateStatusTag[] = "delegate_status";
// Defines the delegate or hardware-specific 'namespace' that a status code
// belongs to. For example, GPU delegate errors might be belong to TFLITE_GPU,
// while OpenCL-specific ones might be TFLITE_GPU_CL.
enum class DelegateStatusSource {
NONE = 0,
TFLITE_GPU = 1,
@ -42,8 +47,16 @@ enum class DelegateStatusSource {
MAX_NUM_SOURCES = std::numeric_limits<int32_t>::max(),
};
// Defines the detailed status that combines a DelegateStatusSource and a
// status int32_t code.
// DelegateStatus defines a namespaced status with a combination of
// DelegateStatusSource & the corresponding fine-grained 32-bit code. Used to
// convert to/from a 64-bit representation as follows:
//
// delegates::DelegateStatus status(
// delegates::DelegateStatusSource::TFLITE_NNAPI,
// ANEURALNETWORKS_OP_FAILED);
// int64_t code = status.full_status();
//
// auto parsed_status = delegates::DelegateStatus(code);
class DelegateStatus {
public:
DelegateStatus() : DelegateStatus(DelegateStatusSource::NONE, 0) {}
@ -77,6 +90,20 @@ class DelegateStatus {
int32_t code_;
};
// Used by delegates to report their configuration/settings to TFLite.
// Calling this method adds a new GENERAL_RUNTIME_INSTRUMENTATION_EVENT to
// the runtime Profiler.
TfLiteStatus ReportDelegateSettings(TfLiteContext* context,
TfLiteDelegate* delegate,
const TFLiteSettings& settings);
// Used by delegates to report their status to the TFLite runtime.
// Calling this method adds a new GENERAL_RUNTIME_INSTRUMENTATION_EVENT to
// the runtime Profiler.
TfLiteStatus ReportDelegateStatus(TfLiteContext* context,
TfLiteDelegate* delegate,
const DelegateStatus& status);
} // namespace delegates
} // namespace tflite

View File

@ -0,0 +1,141 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/delegates/telemetry.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace delegates {
namespace {
constexpr int32_t kDummyCode = 2;
constexpr bool kDummyGpuPrecisionLossAllowed = true;
constexpr tflite::Delegate kDummyDelegate = tflite::Delegate_GPU;
constexpr DelegateStatusSource kDummySource =
DelegateStatusSource::TFLITE_NNAPI;
TEST(TelemetryTest, StatusConversion) {
DelegateStatus status(kDummySource, kDummyCode);
int64_t serialized_int = status.full_status();
DelegateStatus deserialized_status(serialized_int);
EXPECT_EQ(kDummyCode, deserialized_status.code());
EXPECT_EQ(kDummySource, deserialized_status.source());
EXPECT_EQ(serialized_int, deserialized_status.full_status());
}
// Dummy profiler to test delegate reporting.
class DelegateProfiler : public Profiler {
public:
DelegateProfiler() {}
~DelegateProfiler() override = default;
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) override {
int event_handle = -1;
if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateSettingsTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
// event_metadata1 is a pointer to a TfLiteDelegate.
EXPECT_NE(event_metadata1, 0);
auto* delegate = reinterpret_cast<TfLiteDelegate*>(event_metadata1);
EXPECT_EQ(delegate->flags, kTfLiteDelegateFlagsNone);
// event_metadata2 is a pointer to TFLiteSettings.
EXPECT_NE(event_metadata2, 0);
auto* settings = reinterpret_cast<TFLiteSettings*>(event_metadata2);
EXPECT_EQ(settings->delegate(), kDummyDelegate);
EXPECT_EQ(settings->gpu_settings()->is_precision_loss_allowed(),
kDummyGpuPrecisionLossAllowed);
} else if (event_type ==
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT &&
std::string(tag) == kDelegateStatusTag) {
event_buffer_.emplace_back();
event_handle = event_buffer_.size();
EXPECT_EQ(event_metadata2, static_cast<int64_t>(kTfLiteOk));
DelegateStatus reported_status(event_metadata1);
EXPECT_EQ(reported_status.source(), kDummySource);
EXPECT_EQ(reported_status.code(), kDummyCode);
}
EXPECT_NE(-1, event_handle);
return event_handle;
}
void EndEvent(uint32_t event_handle) override {
EXPECT_EQ(event_handle, event_buffer_.size());
}
int NumRecordedEvents() { return event_buffer_.size(); }
private:
std::vector<profiling::ProfileEvent> event_buffer_;
};
TEST(TelemetryTest, DelegateStatusReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 2);
}
TEST(TelemetryTest, DelegateSettingsReport) {
DelegateProfiler profiler;
TfLiteDelegate delegate = TfLiteDelegateCreate();
TfLiteContext context;
context.profiler = &profiler;
flatbuffers::FlatBufferBuilder flatbuffer_builder;
flatbuffers::Offset<tflite::GPUSettings> gpu_settings =
tflite::CreateGPUSettings(
flatbuffer_builder,
/**is_precision_loss_allowed**/ kDummyGpuPrecisionLossAllowed);
auto* tflite_settings_ptr = flatbuffers::GetTemporaryPointer(
flatbuffer_builder,
CreateTFLiteSettings(flatbuffer_builder, kDummyDelegate,
/*nnapi_settings=*/0,
/*gpu_settings=*/gpu_settings));
EXPECT_EQ(ReportDelegateSettings(&context, &delegate, *tflite_settings_ptr),
kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 1);
// Also report status to simulate typical use-case.
DelegateStatus status(kDummySource, kDummyCode);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(ReportDelegateStatus(&context, &delegate, status), kTfLiteOk);
EXPECT_EQ(profiler.NumRecordedEvents(), 3);
}
} // namespace
} // namespace delegates
} // namespace tflite

View File

@ -14,6 +14,7 @@
# ==============================================================================
load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_java_library", "flatc_path")
load("//tensorflow:tensorflow.bzl", "get_compatible_with_portable")
package(
default_visibility = [
@ -32,6 +33,7 @@ genrule(
$(location {}) --proto -o $(@D) $(location :configuration.proto)
perl -p -i -e 's/tflite.proto/tflite/' $(@D)/configuration.fbs
""".format(flatc_path),
compatible_with = get_compatible_with_portable(),
tools = [
flatc_path,
],
@ -68,6 +70,7 @@ java_lite_proto_library(
flatbuffer_cc_library(
name = "configuration_fbs",
srcs = [":configuration.fbs"],
compatible_with = get_compatible_with_portable(),
)
flatbuffer_java_library(

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/delegates/status.h"
#include "tensorflow/lite/delegates/telemetry.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/memory_planner.h"
#include "tensorflow/lite/minimal_logging.h"

View File

@ -345,14 +345,18 @@ all: $(LIB_PATH) $(MINIMAL_BINARY) $(BENCHMARK_BINARY) $(BENCHMARK_PERF_OPTIONS
# The target that's compiled for micro-controllers
micro: $(LIB_PATH)
# Hack for generating schema file bypassing flatbuffer parsing
# Hack for generating schema files bypassing flatbuffer parsing
tensorflow/lite/schema/schema_generated.h:
@cp -u tensorflow/lite/schema/schema_generated.h.oss tensorflow/lite/schema/schema_generated.h
@cp -u tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h.oss tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h
# Gathers together all the objects we've compiled into a single '.a' archive.
$(LIB_PATH): tensorflow/lite/schema/schema_generated.h $(LIB_OBJS)
@mkdir -p $(dir $@)
$(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS)
$(LIB_PATH): tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h $(LIB_OBJS)
@mkdir -p $(dir $@)
$(AR) $(ARFLAGS) $(LIB_PATH) $(LIB_OBJS)
lib: $(LIB_PATH)

View File

@ -18,6 +18,7 @@ tensorflow/lite/core/shims/cc/model_builder.h
tensorflow/lite/delegates/gpu/cl/compiled_program_cache_generated.h
tensorflow/lite/delegates/gpu/cl/serialization_generated.h
tensorflow/lite/delegates/gpu/common/task/serialization_base_generated.h
tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h
tensorflow/lite/micro/build_def.bzl
tensorflow/lite/schema/schema_generated.h
tensorflow/python/autograph/core/config.py