Remove ProfilerContext (no longer used)

PiperOrigin-RevId: 259983256
This commit is contained in:
A. Unique TensorFlower 2019-07-25 11:11:53 -07:00 committed by TensorFlower Gardener
parent 625f9aad07
commit 4024cedbc1
22 changed files with 71 additions and 154 deletions

View File

@ -32,9 +32,7 @@ void TFE_OpConsumeInput(TFE_Op* op, TFE_TensorHandle* h, TF_Status* status) {
op->operation.ConsumeInput(h->handle);
}
TFE_Profiler* TFE_NewProfiler(TFE_ProfilerContext* ctx) {
return new TFE_Profiler(ctx);
}
TFE_Profiler* TFE_NewProfiler() { return new TFE_Profiler(); }
bool TFE_ProfilerIsOk(TFE_Profiler* profiler) {
return profiler->profiler->Status().ok();
@ -55,23 +53,10 @@ void TFE_ProfilerSerializeToString(TFE_Profiler* profiler, TF_Buffer* buf,
};
}
TFE_ProfilerContext* TFE_NewProfilerContext() {
return new TFE_ProfilerContext;
}
void TFE_ProfilerContextSetEagerContext(TFE_ProfilerContext* profiler_context,
TFE_Context* eager_context) {
profiler_context->profiler_context.eager_context = eager_context->context;
}
void TFE_DeleteProfilerContext(TFE_ProfilerContext* profiler_context) {
delete profiler_context;
}
void TFE_StartProfilerServer(TFE_ProfilerContext* context, int port) {
// Release child thread intentionally. The child thread can be terminate by
void TFE_StartProfilerServer(int port) {
// Release child thread intentionally. The child thread can be terminated by
// terminating the main thread.
tensorflow::StartProfilerServer(&context->profiler_context, port).release();
tensorflow::StartProfilerServer(port).release();
}
void TFE_ContextEnableGraphCollection(TFE_Context* ctx) {

View File

@ -25,8 +25,6 @@ extern "C" {
TF_CAPI_EXPORT extern void TFE_OpConsumeInput(TFE_Op* op, TFE_TensorHandle* h,
TF_Status* status);
typedef struct TFE_ProfilerContext TFE_ProfilerContext;
// A profiler which will start profiling when creating the object and will stop
// when the object is destroyed. It will profile all operations run under the
// given TFE_Context. Multiple instance of it can be created, but at most one
@ -34,7 +32,7 @@ typedef struct TFE_ProfilerContext TFE_ProfilerContext;
// Thread-safety: TFE_Profiler is thread-safe.
typedef struct TFE_Profiler TFE_Profiler;
TF_CAPI_EXPORT extern TFE_Profiler* TFE_NewProfiler(TFE_ProfilerContext* ctx);
TF_CAPI_EXPORT extern TFE_Profiler* TFE_NewProfiler();
TF_CAPI_EXPORT extern bool TFE_ProfilerIsOk(TFE_Profiler* profiler);
TF_CAPI_EXPORT extern void TFE_DeleteProfiler(TFE_Profiler* profiler);
@ -44,27 +42,14 @@ TF_CAPI_EXPORT extern void TFE_ProfilerSerializeToString(TFE_Profiler* profiler,
TF_Buffer* buf,
TF_Status* status);
// Return a new profiler context object.
TF_CAPI_EXPORT extern TFE_ProfilerContext* TFE_NewProfilerContext(void);
// Set the eager context in TFE_ProfilerServerOptions
TF_CAPI_EXPORT extern void TFE_ProfilerContextSetEagerContext(
TFE_ProfilerContext* profiler_context, TFE_Context* eager_context);
// Destroy a profiler context object.
TF_CAPI_EXPORT extern void TFE_DeleteProfilerContext(
TFE_ProfilerContext* profiler_context);
// Start a profiler grpc server which listens to specified port. It will start
// the server on its own thread. It can be shutdown by terminating tensorflow.
// It can be used in both Eager mode and graph mode. Creating multiple profiler
// server is allowed. The service defined in
// tensorflow/contrib/tpu/profiler/tpu_profiler.proto. Please use
// tensorflow/contrib/tpu/profiler/capture_tpu_profile to capture tracable
// file following
// https://cloud.google.com/tpu/docs/cloud-tpu-tools#capture_trace.
TF_CAPI_EXPORT extern void TFE_StartProfilerServer(TFE_ProfilerContext* context,
int port);
// tensorflow/contrib/tpu/profiler/capture_tpu_profile to capture trace file
// following https://cloud.google.com/tpu/docs/cloud-tpu-tools#capture_trace.
TF_CAPI_EXPORT extern void TFE_StartProfilerServer(int port);
// Enables only graph collection in RunMetadata on the functions executed from
// this context.

View File

@ -43,12 +43,9 @@ void ExecuteWithProfiling(bool async) {
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(async));
TFE_Context* ctx = TFE_NewContext(opts, status);
TFE_ProfilerContext* profiler_context = TFE_NewProfilerContext();
TFE_ProfilerContextSetEagerContext(profiler_context, ctx);
TFE_Profiler* profiler = TFE_NewProfiler(profiler_context);
TFE_Profiler* profiler = TFE_NewProfiler();
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_DeleteProfilerContext(profiler_context);
TFE_TensorHandle* m = TestMatrixTensorHandle();
TFE_Op* matmul = MatMulOp(ctx, m, m);
@ -110,27 +107,14 @@ TEST(CAPI, ExecuteWithTracing) { ExecuteWithProfiling(false); }
TEST(CAPI, ExecuteWithTracingAsync) { ExecuteWithProfiling(true); }
TEST(CAPI, MultipleProfilerSession) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_ContextOptionsSetAsync(opts, static_cast<unsigned char>(false));
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_ProfilerContext* profiler_context = TFE_NewProfilerContext();
TFE_ProfilerContextSetEagerContext(profiler_context, ctx);
TFE_Profiler* profiler1 = TFE_NewProfiler(profiler_context);
TFE_Profiler* profiler1 = TFE_NewProfiler();
EXPECT_TRUE(TFE_ProfilerIsOk(profiler1));
TFE_Profiler* profiler2 = TFE_NewProfiler(profiler_context);
TFE_Profiler* profiler2 = TFE_NewProfiler();
EXPECT_FALSE(TFE_ProfilerIsOk(profiler2));
TFE_DeleteProfiler(profiler1);
TFE_DeleteProfiler(profiler2);
TFE_DeleteProfilerContext(profiler_context);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CAPI, MonitoringCounter0) {

View File

@ -130,14 +130,8 @@ struct TFE_Op {
std::unique_ptr<TFE_OpInferenceContext> inference_ctx;
};
struct TFE_ProfilerContext {
tensorflow::ProfilerContext profiler_context;
};
struct TFE_Profiler {
explicit TFE_Profiler(TFE_ProfilerContext* ctx) {
profiler = tensorflow::ProfilerSession::Create(&ctx->profiler_context);
}
explicit TFE_Profiler() { profiler = tensorflow::ProfilerSession::Create(); }
std::unique_ptr<tensorflow::ProfilerSession> profiler;
};

View File

@ -590,7 +590,7 @@ Status DirectSession::RunInternal(
std::unique_ptr<ProfilerSession> profiler_session;
if (run_options.trace_level() >= RunOptions::HARDWARE_TRACE) {
profiler_session = ProfilerSession::Create(/*ProfilerContext*/ nullptr);
profiler_session = ProfilerSession::Create();
}
if (run_options.inter_op_thread_pool() < -1 ||

View File

@ -196,8 +196,7 @@ void Worker::DoRunGraph(CallOptions* opts, RunGraphRequestWrapper* request,
ProfilerSession* profiler_session = nullptr;
if (collector && request->exec_opts().record_timeline()) {
// If timeline was requested, assume we want hardware level tracing.
profiler_session =
ProfilerSession::Create(/*ProfilerContext*/ nullptr).release();
profiler_session = ProfilerSession::Create().release();
}
CancellationManager* cm = new CancellationManager;
opts->SetCancelCallback([this, cm, step_id]() {

View File

@ -653,8 +653,7 @@ Status DeviceTracer::CollectData(RunMetadata* run_metadata) {
} // namespace
// Not in anonymous namespace for testing purposes.
std::unique_ptr<profiler::ProfilerInterface> CreateDeviceTracer(
const ProfilerContext*) {
std::unique_ptr<profiler::ProfilerInterface> CreateDeviceTracer() {
auto status = cuInit(0);
if (status != CUDA_SUCCESS) {
LogIfError(ToStatus(status));

View File

@ -39,15 +39,12 @@ limitations under the License.
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
struct ProfilerContext;
#if GOOGLE_CUDA
std::unique_ptr<profiler::ProfilerInterface> CreateDeviceTracer(
const ProfilerContext*);
std::unique_ptr<profiler::ProfilerInterface> CreateDeviceTracer();
#else
// We don't have device tracer for non-cuda case.
std::unique_ptr<profiler::ProfilerInterface> CreateDeviceTracer(
const ProfilerContext*) {
std::unique_ptr<profiler::ProfilerInterface> CreateDeviceTracer() {
return nullptr;
}
#endif
@ -111,21 +108,21 @@ class DeviceTracerTest : public ::testing::Test {
};
TEST_F(DeviceTracerTest, StartStop) {
auto tracer = CreateDeviceTracer(nullptr);
auto tracer = CreateDeviceTracer();
if (!tracer) return;
TF_EXPECT_OK(tracer->Start());
TF_EXPECT_OK(tracer->Stop());
}
TEST_F(DeviceTracerTest, StopBeforeStart) {
auto tracer = CreateDeviceTracer(nullptr);
auto tracer = CreateDeviceTracer();
if (!tracer) return;
TF_EXPECT_OK(tracer->Stop());
TF_EXPECT_OK(tracer->Stop());
}
TEST_F(DeviceTracerTest, CollectBeforeStart) {
auto tracer = CreateDeviceTracer(nullptr);
auto tracer = CreateDeviceTracer();
if (!tracer) return;
RunMetadata run_metadata;
TF_EXPECT_OK(tracer->CollectData(&run_metadata));
@ -133,7 +130,7 @@ TEST_F(DeviceTracerTest, CollectBeforeStart) {
}
TEST_F(DeviceTracerTest, CollectBeforeStop) {
auto tracer = CreateDeviceTracer(nullptr);
auto tracer = CreateDeviceTracer();
if (!tracer) return;
TF_EXPECT_OK(tracer->Start());
RunMetadata run_metadata;
@ -143,8 +140,8 @@ TEST_F(DeviceTracerTest, CollectBeforeStop) {
}
TEST_F(DeviceTracerTest, StartTwoTracers) {
auto tracer1 = CreateDeviceTracer(nullptr);
auto tracer2 = CreateDeviceTracer(nullptr);
auto tracer1 = CreateDeviceTracer();
auto tracer2 = CreateDeviceTracer();
if (!tracer1 || !tracer2) return;
TF_EXPECT_OK(tracer1->Start());
@ -157,7 +154,7 @@ TEST_F(DeviceTracerTest, StartTwoTracers) {
TEST_F(DeviceTracerTest, RunWithTracer) {
// On non-GPU platforms, we may not support DeviceTracer.
auto tracer = CreateDeviceTracer(nullptr);
auto tracer = CreateDeviceTracer();
if (!tracer) return;
Initialize({3, 2, -1, 0});
@ -184,7 +181,7 @@ TEST_F(DeviceTracerTest, RunWithTracer) {
}
TEST_F(DeviceTracerTest, TraceToStepStatsCollector) {
auto tracer = CreateDeviceTracer(nullptr);
auto tracer = CreateDeviceTracer();
if (!tracer) return;
Initialize({3, 2, -1, 0});

View File

@ -12,11 +12,11 @@ limitations under the License.
#ifndef TENSORFLOW_CORE_PLATFORM_GRPC_SERVICES_H_
#define TENSORFLOW_CORE_PLATFORM_GRPC_SERVICES_H_
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/profiler/profiler_analysis.grpc.pb.h"
#include "tensorflow/core/profiler/profiler_service.grpc.pb.h"
#if !defined(PLATFORM_GOOGLE)
namespace tensorflow {
namespace grpc {

View File

@ -141,7 +141,7 @@ Status HostTracer::CollectData(RunMetadata* run_metadata) {
} // namespace
// Not in anonymous namespace for testing purposes.
std::unique_ptr<ProfilerInterface> CreateHostTracer(const ProfilerContext*) {
std::unique_ptr<ProfilerInterface> CreateHostTracer() {
int host_trace_level = 2;
return absl::make_unique<HostTracer>(host_trace_level);
}

View File

@ -28,7 +28,8 @@ limitations under the License.
namespace tensorflow {
namespace profiler {
namespace cpu {
std::unique_ptr<ProfilerInterface> CreateHostTracer(const ProfilerContext*);
std::unique_ptr<ProfilerInterface> CreateHostTracer();
namespace {
@ -80,7 +81,7 @@ inline ::testing::PolymorphicMatcher<NodeStatsMatcher> EqualsNodeStats(
TEST(HostTracerTest, CollectsTraceMeEvents) {
uint32 thread_id = Env::Default()->GetCurrentThreadId();
auto tracer = CreateHostTracer(nullptr);
auto tracer = CreateHostTracer();
TF_ASSERT_OK(tracer->Start());
{ TraceMe traceme("hello"); }

View File

@ -34,11 +34,10 @@ void RegisterProfilerFactory(ProfilerFactory factory) {
}
void CreateProfilers(
const ProfilerContext* context,
std::vector<std::unique_ptr<profiler::ProfilerInterface>>* result) {
absl::MutexLock lock(GetMutex());
for (auto factory : *GetFactories()) {
if (auto profiler = factory(context)) {
if (auto profiler = factory()) {
result->push_back(std::move(profiler));
}
}

View File

@ -15,15 +15,13 @@ limitations under the License.
#ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_PROFILER_INTERFACE_H_
#define TENSORFLOW_CORE_PROFILER_INTERNAL_PROFILER_INTERFACE_H_
#include <memory>
#include <vector>
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
class EagerContext;
struct ProfilerContext {
EagerContext* eager_context = nullptr;
};
namespace profiler {
// Interface for tensorflow profiler plugins.
@ -50,13 +48,11 @@ class ProfilerInterface {
} // namespace profiler
using ProfilerFactory =
std::unique_ptr<profiler::ProfilerInterface> (*)(const ProfilerContext*);
using ProfilerFactory = std::unique_ptr<profiler::ProfilerInterface> (*)();
void RegisterProfilerFactory(ProfilerFactory factory);
void CreateProfilers(
const ProfilerContext* context,
std::vector<std::unique_ptr<profiler::ProfilerInterface>>* result);
} // namespace tensorflow

View File

@ -130,9 +130,8 @@ void ConvertRunMetadataToTraceEvent(RunMetadata* run_metadata,
}
} // namespace
/*static*/ std::unique_ptr<ProfilerSession> ProfilerSession::Create(
ProfilerContext* const context) {
return absl::WrapUnique(new ProfilerSession(context));
/*static*/ std::unique_ptr<ProfilerSession> ProfilerSession::Create() {
return absl::WrapUnique(new ProfilerSession());
}
Status ProfilerSession::Status() {
@ -173,7 +172,7 @@ Status ProfilerSession::SerializeToString(string* content) {
return Status::OK();
}
ProfilerSession::ProfilerSession(ProfilerContext* const context)
ProfilerSession::ProfilerSession()
: active_(!session_active.exchange(true)),
start_time_micros_(Env::Default()->NowNanos() / EnvTime::kMicrosToNanos) {
if (!active_) {
@ -184,7 +183,7 @@ ProfilerSession::ProfilerSession(ProfilerContext* const context)
LOG(INFO) << "Profiler session started.";
CreateProfilers(context, &profilers_);
CreateProfilers(&profilers_);
status_ = Status::OK();
for (auto& profiler : profilers_) {

View File

@ -32,8 +32,7 @@ namespace tensorflow {
class ProfilerSession {
public:
// Creates and ProfilerSession and starts profiling.
static std::unique_ptr<ProfilerSession> Create(
ProfilerContext* const context);
static std::unique_ptr<ProfilerSession> Create();
// Deletes an exsiting Profiler and enables starting a new one.
~ProfilerSession();
@ -45,9 +44,9 @@ class ProfilerSession {
private:
// Constructs an instance of the class and starts profiling
explicit ProfilerSession(ProfilerContext* const context);
ProfilerSession();
// Profiler is neither copyable or movable.
// ProfilerSession is neither copyable or movable.
ProfilerSession(const ProfilerSession&) = delete;
ProfilerSession& operator=(const ProfilerSession&) = delete;

View File

@ -14,7 +14,7 @@ tf_cuda_library(
"//tensorflow:grpc++",
"//tensorflow/core:framework",
"//tensorflow/core:grpc_services",
"//tensorflow/core/common_runtime/eager:context",
"//tensorflow/core:lib",
"//tensorflow/core/profiler:protos_all_cc",
"//tensorflow/core/profiler/lib:profiler_lib",
"//tensorflow/core/profiler/lib:profiler_session",
@ -32,7 +32,7 @@ tf_cuda_library(
"//tensorflow:grpc++",
"//tensorflow/core:framework",
"//tensorflow/core:grpc_services",
"//tensorflow/core/common_runtime/eager:context",
"//tensorflow/core:lib",
"//tensorflow/core/profiler:protos_all_cc",
"//tensorflow/core/profiler/lib:profiler_lib",
"//tensorflow/core/profiler/lib:profiler_session",

View File

@ -14,27 +14,26 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/core/profiler/rpc/profiler_server.h"
#include <memory>
#include <utility>
#include "grpcpp/grpcpp.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/grpc_services.h"
#include "tensorflow/core/profiler/lib/profiler_session.h"
#include "tensorflow/core/profiler/rpc/profiler_service_impl.h"
#include "tensorflow/core/util/ptr_util.h"
namespace tensorflow {
std::unique_ptr<Thread> StartProfilerServer(
ProfilerContext* const profiler_context, int32 port) {
Env* env = profiler_context->eager_context != nullptr
? profiler_context->eager_context->TFEnv()
: Env::Default();
// Starting the server in the child thread may be delay and user may already
// delete the profiler context at that point. So we need to make a copy.
ProfilerContext ctx = *profiler_context;
return WrapUnique(env->StartThread({}, "profiler server", [ctx, port]() {
std::unique_ptr<Thread> StartProfilerServer(int32 port) {
Env* env = Env::Default();
return WrapUnique(env->StartThread({}, "profiler server", [port]() {
string server_address = strings::StrCat("0.0.0.0:", port);
std::unique_ptr<grpc::ProfilerService::Service> service =
CreateProfilerService(ctx);
CreateProfilerService();
::grpc::ServerBuilder builder;
builder.AddListeningPort(server_address,
::grpc::InsecureServerCredentials());

View File

@ -15,11 +15,16 @@ limitations under the License.
#ifndef TENSORFLOW_CORE_PROFILER_RPC_PROFILER_SERVER_H_
#define TENSORFLOW_CORE_PROFILER_RPC_PROFILER_SERVER_H_
#include "tensorflow/core/profiler/lib/profiler_session.h"
#include <memory>
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
class Thread;
std::unique_ptr<Thread> StartProfilerServer(
ProfilerContext* const profiler_context, int32 port);
std::unique_ptr<Thread> StartProfilerServer(int32 port);
} // namespace tensorflow
#endif // TENSORFLOW_CORE_PROFILER_RPC_PROFILER_SERVER_H_

View File

@ -14,8 +14,9 @@ limitations under the License.
==============================================================================*/
#include "tensorflow/core/profiler/rpc/profiler_service_impl.h"
#include "grpcpp/support/status.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/grpc_services.h"
#include "tensorflow/core/profiler/lib/profiler_session.h"
#include "tensorflow/core/util/ptr_util.h"
@ -25,10 +26,6 @@ namespace {
class ProfilerServiceImpl : public grpc::ProfilerService::Service {
public:
explicit ProfilerServiceImpl(const ProfilerContext& profiler_context)
: profiler_context_(profiler_context) {}
~ProfilerServiceImpl() override {}
::grpc::Status Monitor(::grpc::ServerContext* ctx, const MonitorRequest* req,
MonitorResponse* response) override {
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "unimplemented.");
@ -37,16 +34,13 @@ class ProfilerServiceImpl : public grpc::ProfilerService::Service {
::grpc::Status Profile(::grpc::ServerContext* ctx, const ProfileRequest* req,
ProfileResponse* response) override {
LOG(INFO) << "Received a profile request.";
std::unique_ptr<ProfilerSession> profiler =
ProfilerSession::Create(&profiler_context_);
std::unique_ptr<ProfilerSession> profiler = ProfilerSession::Create();
if (!profiler->Status().ok()) {
return ::grpc::Status(::grpc::StatusCode::INTERNAL,
profiler->Status().error_message());
}
Env* env = profiler_context_.eager_context != nullptr
? profiler_context_.eager_context->TFEnv()
: Env::Default();
Env* env = Env::Default();
for (size_t i = 0; i < req->duration_ms(); ++i) {
env->SleepForMicroseconds(1000);
if (ctx->IsCancelled()) {
@ -61,15 +55,11 @@ class ProfilerServiceImpl : public grpc::ProfilerService::Service {
return ::grpc::Status::OK;
}
private:
ProfilerContext profiler_context_;
};
} // namespace
std::unique_ptr<grpc::ProfilerService::Service> CreateProfilerService(
const ProfilerContext& profiler_context) {
return MakeUnique<ProfilerServiceImpl>(profiler_context);
std::unique_ptr<grpc::ProfilerService::Service> CreateProfilerService() {
return MakeUnique<ProfilerServiceImpl>();
}
} // namespace tensorflow

View File

@ -18,14 +18,13 @@ limitations under the License.
#include "grpcpp/grpcpp.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/status.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/platform/grpc_services.h"
#include "tensorflow/core/profiler/lib/profiler_session.h"
namespace tensorflow {
std::unique_ptr<grpc::ProfilerService::Service> CreateProfilerService(
const ProfilerContext& profiler_context);
std::unique_ptr<grpc::ProfilerService::Service> CreateProfilerService();
} // namespace tensorflow
#endif // TENSORFLOW_CORE_PROFILER_RPC_PROFILER_SERVICE_IMPL_H_

View File

@ -71,14 +71,9 @@ def start():
with _profiler_lock:
if _profiler is not None:
raise ProfilerAlreadyRunningError('Another profiler is running.')
profiler_context = pywrap_tensorflow.TFE_NewProfilerContext()
if context.default_execution_mode == context.EAGER_MODE:
context.ensure_initialized()
pywrap_tensorflow.TFE_ProfilerContextSetEagerContext(
profiler_context,
context.context()._handle) # pylint: disable=protected-access
_profiler = pywrap_tensorflow.TFE_NewProfiler(profiler_context)
pywrap_tensorflow.TFE_DeleteProfilerContext(profiler_context)
_profiler = pywrap_tensorflow.TFE_NewProfiler()
if not pywrap_tensorflow.TFE_ProfilerIsOk(_profiler):
logging.warning('Another profiler session is running which is probably '
'created by profiler server. Please avoid using profiler '
@ -161,14 +156,9 @@ def start_profiler_server(port):
Args:
port: port profiler server listens to.
"""
profiler_context = pywrap_tensorflow.TFE_NewProfilerContext()
if context.default_execution_mode == context.EAGER_MODE:
context.ensure_initialized()
pywrap_tensorflow.TFE_ProfilerContextSetEagerContext(
profiler_context,
context.context()._handle) # pylint: disable=protected-access
pywrap_tensorflow.TFE_StartProfilerServer(profiler_context, port)
pywrap_tensorflow.TFE_DeleteProfilerContext(profiler_context)
pywrap_tensorflow.TFE_StartProfilerServer(port)
class Profiler(object):

View File

@ -49,9 +49,6 @@ limitations under the License.
%rename("%s") TFE_ProfilerIsOk;
%rename("%s") TFE_DeleteProfiler;
%rename("%s") TFE_ProfilerSerializeToString;
%rename("%s") TFE_NewProfilerContext;
%rename("%s") TFE_ProfilerContextSetEagerContext;
%rename("%s") TFE_DeleteProfilerContext;
%rename("%s") TFE_StartProfilerServer;
%rename("%s") TFE_ProfilerClientStartTracing;
%rename("%s") TFE_ProfilerClientMonitor;