Allow TFLite profiling without a build flag

Introduce a simple profiler interface for allowing
client injection of an arbitrary profiler implementation
at runtime, without requiring special build flags.

PiperOrigin-RevId: 247044534
This commit is contained in:
Jared Duke 2019-05-07 10:26:36 -07:00 committed by TensorFlower Gardener
parent c1b4525808
commit 75bd1d5c92
16 changed files with 311 additions and 214 deletions

View File

@ -17,6 +17,7 @@ cc_library(
"error_reporter.h",
"flatbuffer_conversions.h",
"op_resolver.h",
"profiler.h",
],
copts = tflite_copts(),
deps = [

View File

@ -0,0 +1,85 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_
#define TENSORFLOW_LITE_CORE_API_PROFILER_H_
#include <cstdint>
namespace tflite {
// A simple utility for enabling profiled event tracing in TensorFlow Lite.
class Profiler {
public:
enum class EventType {
// Default event type, the metadata field has no special significance.
DEFAULT = 0,
// The event is an operator invocation and the event_metadata field is the
// index of operator node.
OPERATOR_INVOKE_EVENT = 1
};
virtual ~Profiler() {}
// Signals the beginning of an event, returning a handle to the profile event.
virtual uint32_t BeginEvent(const char* tag, EventType event_type,
uint32_t event_metadata) = 0;
// Signals an end to the specified profile event.
virtual void EndEvent(uint32_t event_handle) = 0;
};
// Adds a profile event to `profiler` that begins with the construction
// of the object and ends when the object goes out of scope.
// The lifetime of tag should be at least the lifetime of `profiler`.
// `profiler` may be null, in which case nothing is profiled.
class ScopedProfile {
public:
ScopedProfile(Profiler* profiler, const char* tag,
Profiler::EventType event_type = Profiler::EventType::DEFAULT,
uint32_t event_metadata = 0)
: profiler_(profiler), event_handle_(0) {
if (profiler) {
event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata);
}
}
~ScopedProfile() {
if (profiler_) {
profiler_->EndEvent(event_handle_);
}
}
private:
Profiler* const profiler_;
uint32_t event_handle_;
};
class ScopedOperatorProfile : public ScopedProfile {
public:
ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index)
: ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
static_cast<uint32_t>(node_index)) {}
};
} // namespace tflite
#define TFLITE_VARNAME_UNIQ(name, ctr) name##ctr
#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \
tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
(profiler), (tag), (node_index))
#define TFLITE_SCOPED_OPERATOR_PROFILE(profiler, node_index) \
TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE((profiler), "OpInvoke", (node_index))
#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_

View File

@ -700,7 +700,7 @@ TfLiteStatus Subgraph::Invoke() {
TfLiteNode& node = nodes_and_registration_[node_index].first;
const TfLiteRegistration& registration =
nodes_and_registration_[node_index].second;
SCOPED_OPERATOR_PROFILE(profiler_, node_index);
TFLITE_SCOPED_OPERATOR_PROFILE(profiler_, node_index);
// TODO(ycling): This is an extra loop through inputs to check if the data
// need to be copied from Delegate buffer to raw memory, which is often not

View File

@ -20,9 +20,9 @@ limitations under the License.
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/memory_planner.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/util.h"
namespace tflite {
@ -276,12 +276,12 @@ class Subgraph {
// WARNING: This is an experimental API and subject to change.
TfLiteStatus ResetVariableTensors();
void SetProfiler(profiling::Profiler* profiler) {
void SetProfiler(Profiler* profiler) {
profiler_ = profiler;
context_->profiler = profiler;
}
profiling::Profiler* GetProfiler() { return profiler_; }
Profiler* GetProfiler() { return profiler_; }
// Returns a pointer to vector of subgraphs.
// WARNING: This is an experimental API and subject to change.
@ -527,7 +527,7 @@ class Subgraph {
TfLiteExternalContext** external_contexts_;
// Profiler for this interpreter instance.
profiling::Profiler* profiler_ = nullptr;
Profiler* profiler_ = nullptr;
// A pointer to vector of subgraphs. The vector is owned by the interpreter.
std::vector<std::unique_ptr<Subgraph>>* subgraphs_ = nullptr;

View File

@ -154,11 +154,11 @@ cc_library(
":delegate_data",
":util",
"@flatbuffers",
"//tensorflow/lite/core/api",
"//tensorflow/lite/c:c_api_internal",
"//tensorflow/lite:kernel_api",
"//tensorflow/lite:string",
"//tensorflow/lite/kernels:kernel_util",
"//tensorflow/lite/profiling:profiler",
] + select({
# TODO(b/111881878): The android_tensorflow_lib target pulls in the full
# set of core TensorFlow kernels. We may want to revisit this dependency

View File

@ -24,10 +24,10 @@ limitations under the License.
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/delegates/flex/delegate_data.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/lite/string.h"
// Note: this is part of TF Lite's Flex delegation code which is to be
@ -529,8 +529,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// Execute the TensorFlow Ops sequentially.
for (auto& node_data : op_data->nodes) {
SCOPED_TAGGED_OPERATOR_PROFILE(
reinterpret_cast<profiling::Profiler*>(context->profiler),
TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(
reinterpret_cast<Profiler*>(context->profiler),
node_data->name().c_str(), node_data->index());
auto status = ExecuteFlexOp(context, buffer_map, node_data.get());

View File

@ -256,11 +256,11 @@ TfLiteStatus Interpreter::GetBufferHandle(int tensor_index,
return kTfLiteOk;
}
void Interpreter::SetProfiler(profiling::Profiler* profiler) {
void Interpreter::SetProfiler(Profiler* profiler) {
for (auto& subgraph : subgraphs_) subgraph->SetProfiler(profiler);
}
profiling::Profiler* Interpreter::GetProfiler() {
Profiler* Interpreter::GetProfiler() {
return primary_subgraph().GetProfiler();
}

View File

@ -402,9 +402,14 @@ class Interpreter {
TfLiteBufferHandle* buffer_handle,
TfLiteDelegate** delegate);
void SetProfiler(profiling::Profiler* profiler);
// Sets the profiler to tracing execution. The caller retains ownership
// of the profiler and must ensure its validity.
// WARNING: This is an experimental API and subject to change.
void SetProfiler(Profiler* profiler);
profiling::Profiler* GetProfiler();
// Gets the profiler used for op tracing.
// WARNING: This is an experimental API and subject to change.
Profiler* GetProfiler();
// The default capacity of `tensors_` vector.
static constexpr int kTensorsReservedCapacity = 128;

View File

@ -10,16 +10,21 @@ common_copts = [
cc_library(
name = "profiler",
hdrs = ["profiler.h"],
hdrs = [
"buffered_profiler.h",
"noop_profiler.h",
"profiler.h",
],
copts = common_copts,
deps = [":profile_buffer"],
deps = [
":profile_buffer",
"//tensorflow/lite/core/api",
],
)
cc_test(
name = "profiler_test",
srcs = ["profiler_test.cc"],
copts = ["-DTFLITE_PROFILING_ENABLED"],
defines = ["TFLITE_PROFILING_ENABLED"],
deps = [
":profiler",
"//tensorflow/lite/testing:util",
@ -31,7 +36,10 @@ cc_library(
name = "profile_buffer",
hdrs = ["profile_buffer.h"],
copts = common_copts,
deps = [":time"],
deps = [
":time",
"//tensorflow/lite/core/api",
],
)
cc_library(
@ -58,7 +66,7 @@ cc_library(
hdrs = ["profile_summarizer.h"],
copts = common_copts,
deps = [
":profiler",
":profile_buffer",
"//tensorflow/core:stats_calculator_portable",
"//tensorflow/lite:framework",
"//tensorflow/lite/schema:schema_fbs",
@ -83,8 +91,6 @@ cc_test(
cc_test(
name = "profile_buffer_test",
srcs = ["profile_buffer_test.cc"],
copts = ["-DTFLITE_PROFILING_ENABLED"],
defines = ["TFLITE_PROFILING_ENABLED"],
deps = [
":profile_buffer",
"//tensorflow/lite/testing:util",

View File

@ -0,0 +1,108 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_PROFILING_BUFFERED_PROFILER_H_
#define TENSORFLOW_LITE_PROFILING_BUFFERED_PROFILER_H_
#include <vector>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace profiling {
// Controls whether profiling is enabled or disabled and collects profiles.
// TFLite is used on platforms that don't have posix threads, so the profiler is
// kept as simple as possible. It is designed to be used only on a single
// thread.
//
// Profiles are collected using Scoped*Profile objects that begin and end a
// profile event.
// An example usage is shown in the example below:
//
// Say Worker class has a DoWork method and we are interested in profiling
// the overall execution time for DoWork and time spent in Task1 and Task2
// functions.
//
// class Worker {
// public:
// void DoWork() {
// ScopedProfile(&controller, "DoWork");
// Task1();
// Task2();
// .....
// }
//
// void Task1() {
// ScopedProfile(&controller, "Task1");
// ....
// }
//
// void Task2() {
// ScopedProfile(&controller, "Task2");
// }
//
// Profiler profiler;
// }
//
// We instrument the functions that need to be profiled.
//
// Profile can be collected by enable profiling and then getting profile
// events.
//
// void ProfileWorker() {
// Worker worker;
// worker.profiler.EnableProfiling();
// worker.DoWork();
// worker.profiler.DisableProfiling();
// // Profiling is complete, extract profiles.
// auto profile_events = worker.profiler.GetProfiles();
// }
//
//
class BufferedProfiler : public tflite::Profiler {
public:
BufferedProfiler() : buffer_(1024, false) {}
uint32_t BeginEvent(const char* tag, EventType event_type,
uint32_t event_metadata) override {
return buffer_.BeginEvent(tag, event_type, event_metadata);
}
void EndEvent(uint32_t event_handle) override {
buffer_.EndEvent(event_handle);
}
void StartProfiling() { buffer_.SetEnabled(true); }
void StopProfiling() { buffer_.SetEnabled(false); }
void Reset() { buffer_.Reset(); }
std::vector<const ProfileEvent*> GetProfileEvents() {
std::vector<const ProfileEvent*> profile_events;
profile_events.reserve(buffer_.Size());
for (size_t i = 0; i < buffer_.Size(); i++) {
profile_events.push_back(buffer_.At(i));
}
return profile_events;
}
private:
ProfileBuffer* GetProfileBuffer() { return &buffer_; }
ProfileBuffer buffer_;
};
} // namespace profiling
} // namespace tflite
#endif // TENSORFLOW_LITE_PROFILING_BUFFERED_PROFILER_H_

View File

@ -0,0 +1,43 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_PROFILING_NOOP_PROFILER_H_
#define TENSORFLOW_LITE_PROFILING_NOOP_PROFILER_H_
#include <vector>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace profiling {
// A noop version of profiler when profiling is disabled.
class NoopProfiler : public tflite::Profiler {
public:
NoopProfiler() {}
uint32_t BeginEvent(const char*, EventType, uint32_t) override { return 0; }
void EndEvent(uint32_t) override {}
void StartProfiling() {}
void StopProfiling() {}
void Reset() {}
std::vector<const ProfileEvent*> GetProfileEvents() { return {}; }
};
} // namespace profiling
} // namespace tflite
#endif // TENSORFLOW_LITE_PROFILING_NOOP_PROFILER_H_

View File

@ -15,27 +15,27 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_PROFILING_PROFILE_BUFFER_H_
#define TENSORFLOW_LITE_PROFILING_PROFILE_BUFFER_H_
#include <sys/time.h>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <vector>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace profiling {
constexpr uint32_t kInvalidEventHandle = static_cast<uint32_t>(~0) - 1;
// A profiling event.
struct ProfileEvent {
// Describes the type of event.
// The event_metadata field may contain additional data for interpreting
// the event.
enum class EventType {
// Default event type, the metadata field has no special significance.
DEFAULT = 0,
// The event is an operator invocation and the event_metadata field is the
// index of operator node.
OPERATOR_INVOKE_EVENT = 1
};
using EventType = tflite::Profiler::EventType;
// Label of the event. This usually describes the event.
const char* tag;
@ -49,17 +49,6 @@ struct ProfileEvent {
// Extra data describing the details of the event.
uint32_t event_metadata;
};
} // namespace profiling
} // namespace tflite
#ifdef TFLITE_PROFILING_ENABLED
#include <sys/time.h>
#include <vector>
namespace tflite {
namespace profiling {
constexpr uint32_t kInvalidEventHandle = static_cast<uint32_t>(~0) - 1;
// A ring buffer of profile events.
// This class is not thread safe.
@ -128,7 +117,7 @@ class ProfileBuffer {
// Returns the profile event at the given index. If the index is invalid a
// nullptr is returned. The return event may get overwritten if more events
// are added to buffer.
const struct ProfileEvent* const At(size_t index) const {
const struct ProfileEvent* At(size_t index) const {
size_t size = Size();
if (index >= size) {
return nullptr;
@ -145,7 +134,8 @@ class ProfileBuffer {
uint32_t current_index_;
std::vector<ProfileEvent> event_buffer_;
};
} // namespace profiling
} // namespace tflite
#endif // TFLITE_PROFILING_ENABLED
#endif // TENSORFLOW_LITE_PROFILING_PROFILE_BUFFER_H_

View File

@ -18,9 +18,9 @@ limitations under the License.
#include <vector>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/profiling/profiler.h"
#include "tensorflow/core/util/stats_calculator.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace profiling {

View File

@ -33,7 +33,6 @@ namespace {
const char* kOpName = "SimpleOpEval";
#ifdef TFLITE_PROFILING_ENABLED
TfLiteStatus SimpleOpEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input1 = tflite::GetInput(context, node, /*index=*/0);
const TfLiteTensor* input2 = tflite::GetInput(context, node, /*index=*/1);
@ -69,7 +68,6 @@ TfLiteRegistration* RegisterSimpleOpWithProfilingDetails() {
1};
return &registration;
}
#endif
class SimpleOpModel : public SingleOpModel {
public:
@ -101,9 +99,8 @@ TEST(ProfileSummarizerTest, Empty) {
EXPECT_GT(output.size(), 0);
}
#ifdef TFLITE_PROFILING_ENABLED
TEST(ProfileSummarizerTest, Interpreter) {
Profiler profiler;
BufferedProfiler profiler;
SimpleOpModel m;
m.Init(RegisterSimpleOp);
auto interpreter = m.GetInterpreter();
@ -124,7 +121,7 @@ TEST(ProfileSummarizerTest, Interpreter) {
}
TEST(ProfileSummarizerTest, InterpreterPlusProfilingDetails) {
Profiler profiler;
BufferedProfiler profiler;
SimpleOpModel m;
m.Init(RegisterSimpleOpWithProfilingDetails);
auto interpreter = m.GetInterpreter();
@ -145,8 +142,6 @@ TEST(ProfileSummarizerTest, InterpreterPlusProfilingDetails) {
<< output;
}
#endif
} // namespace
} // namespace profiling
} // namespace tflite

View File

@ -15,168 +15,23 @@ limitations under the License.
#ifndef TENSORFLOW_LITE_PROFILING_PROFILER_H_
#define TENSORFLOW_LITE_PROFILING_PROFILER_H_
#include <vector>
#include "tensorflow/lite/profiling/buffered_profiler.h"
#include "tensorflow/lite/profiling/noop_profiler.h"
#include "tensorflow/lite/profiling/profile_buffer.h"
namespace tflite {
namespace profiling {
// TODO(b/131688504): Remove this and use runtime flags for profiler selection.
#ifdef TFLITE_PROFILING_ENABLED
namespace tflite {
namespace profiling {
class ScopedProfile;
class ScopedOperatorProfile;
// Controls whether profiling is enabled or disabled and collects profiles.
// TFLite is used on platforms that don't have posix threads, so the profiler is
// kept as simple as possible. It is designed to be used only on a single
// thread.
//
// Profiles are collected using Scoped*Profile objects that begin and end a
// profile event.
// An example usage is shown in the example below:
//
// Say Worker class has a DoWork method and we are interested in profiling
// the overall execution time for DoWork and time spent in Task1 and Task2
// functions.
//
// class Worker {
// public:
// void DoWork() {
// ScopedProfile(&controller, "DoWork");
// Task1();
// Task2();
// .....
// }
//
// void Task1() {
// ScopedProfile(&controller, "Task1");
// ....
// }
//
// void Task2() {
// ScopedProfile(&controller, "Task2");
// }
//
// Profiler profiler;
// }
//
// We instrument the functions that need to be profiled.
//
// Profile can be collected by enable profiling and then getting profile
// events.
//
// void ProfileWorker() {
// Worker worker;
// worker.profiler.EnableProfiling();
// worker.DoWork();
// worker.profiler.DisableProfiling();
// // Profiling is complete, extract profiles.
// auto profile_events = worker.profiler.GetProfiles();
// }
//
//
class Profiler {
public:
Profiler() : buffer_(1024, false) {}
void StartProfiling() { buffer_.SetEnabled(true); }
void StopProfiling() { buffer_.SetEnabled(false); }
void Reset() { buffer_.Reset(); }
std::vector<const ProfileEvent*> GetProfileEvents() {
std::vector<const ProfileEvent*> profile_events;
profile_events.reserve(buffer_.Size());
for (size_t i = 0; i < buffer_.Size(); i++) {
profile_events.push_back(buffer_.At(i));
}
return profile_events;
}
private:
friend class ScopedProfile;
friend class ScopedOperatorProfile;
ProfileBuffer* GetProfileBuffer() { return &buffer_; }
ProfileBuffer buffer_;
};
class ScopedProfile {
public:
// Adds a profile event to profile that begins with the construction
// of object and ends when the object goes out of scope.
// The lifetime of tag should be at least the lifetime of profiler.
ScopedProfile(Profiler* profiler, const char* tag)
: buffer_(nullptr), event_handle_(0) {
if (profiler) {
buffer_ = profiler->GetProfileBuffer();
event_handle_ =
buffer_->BeginEvent(tag, ProfileEvent::EventType::DEFAULT, 0);
}
}
~ScopedProfile() {
if (buffer_) {
buffer_->EndEvent(event_handle_);
}
}
private:
ProfileBuffer* buffer_;
int32_t event_handle_;
};
class ScopedOperatorProfile {
public:
// Adds a profile event to profile that begins with the construction
// of object and ends when the object goes out of scope.
// The lifetime of tag should be at least the lifetime of profiler.
ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index)
: buffer_(nullptr), event_handle_(0) {
if (profiler) {
buffer_ = profiler->GetProfileBuffer();
event_handle_ = buffer_->BeginEvent(
tag, ProfileEvent::EventType::OPERATOR_INVOKE_EVENT, node_index);
}
}
~ScopedOperatorProfile() {
if (buffer_) {
buffer_->EndEvent(event_handle_);
}
}
private:
ProfileBuffer* buffer_;
int32_t event_handle_;
};
} // namespace profiling
} // namespace tflite
#define VARNAME_UNIQ(name, ctr) name##ctr
#define SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \
tflite::profiling::ScopedOperatorProfile VARNAME_UNIQ( \
_profile_, __COUNTER__)((profiler), (tag), (node_index))
#define SCOPED_OPERATOR_PROFILE(profiler, node_index) \
SCOPED_TAGGED_OPERATOR_PROFILE((profiler), "OpInvoke", (node_index))
using Profiler = BufferedProfiler;
#else
namespace tflite {
namespace profiling {
// A noop version of profiler when profiling is disabled.
class Profiler {
public:
Profiler() {}
void StartProfiling() {}
void StopProfiling() {}
void Reset() {}
std::vector<const ProfileEvent*> GetProfileEvents() { return {}; }
};
} // namespace profiling
} // namespace tflite
#define SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index)
#define SCOPED_OPERATOR_PROFILE(profiler, node_index)
using Profiler = NoopProfiler;
#endif // TFLITE_PROFILING_ENABLED
} // namespace profiling
} // namespace tflite
#define SCOPED_TAGGED_OPERATOR_PROFILE TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE
#define SCOPED_OPERATOR_PROFILE TFLITE_SCOPED_OPERATOR_PROFILE
#endif // TENSORFLOW_LITE_PROFILING_PROFILER_H_

View File

@ -31,17 +31,17 @@ double GetDurationOfEventMs(const ProfileEvent* event) {
return (event->end_timestamp_us - event->begin_timestamp_us) / 1e3;
}
void SleepForQuarterSecond(Profiler* profiler) {
void SleepForQuarterSecond(tflite::Profiler* profiler) {
ScopedProfile profile(profiler, "SleepForQuarter");
std::this_thread::sleep_for(std::chrono::milliseconds(250));
}
void ChildFunction(Profiler* profiler) {
void ChildFunction(tflite::Profiler* profiler) {
ScopedProfile profile(profiler, "Child");
SleepForQuarterSecond(profiler);
}
void ParentFunction(Profiler* profiler) {
void ParentFunction(tflite::Profiler* profiler) {
ScopedProfile profile(profiler, "Parent");
for (int i = 0; i < 2; i++) {
ChildFunction(profiler);
@ -49,14 +49,14 @@ void ParentFunction(Profiler* profiler) {
}
TEST(ProfilerTest, NoProfilesAreCollectedWhenDisabled) {
Profiler profiler;
BufferedProfiler profiler;
ParentFunction(&profiler);
auto profile_events = profiler.GetProfileEvents();
EXPECT_EQ(0, profile_events.size());
}
TEST(ProfilingTest, ProfilesAreCollected) {
Profiler profiler;
BufferedProfiler profiler;
profiler.StartProfiling();
ParentFunction(&profiler);
profiler.StopProfiling();
@ -101,7 +101,7 @@ TEST(ProfilingTest, NullProfiler) {
}
TEST(ProfilingTest, ScopedProfile) {
Profiler profiler;
BufferedProfiler profiler;
profiler.StartProfiling();
{ SCOPED_OPERATOR_PROFILE(&profiler, 1); }
profiler.StopProfiling();
@ -109,6 +109,15 @@ TEST(ProfilingTest, ScopedProfile) {
EXPECT_EQ(1, profile_events.size());
}
TEST(ProfilingTest, NoopProfiler) {
NoopProfiler profiler;
profiler.StartProfiling();
{ SCOPED_OPERATOR_PROFILE(&profiler, 1); }
profiler.StopProfiling();
auto profile_events = profiler.GetProfileEvents();
EXPECT_EQ(0, profile_events.size());
}
} // namespace
} // namespace profiling
} // namespace tflite