diff --git a/tensorflow/c/c_api_experimental.cc b/tensorflow/c/c_api_experimental.cc index e623f30b98c..e9e6d470c68 100644 --- a/tensorflow/c/c_api_experimental.cc +++ b/tensorflow/c/c_api_experimental.cc @@ -325,205 +325,6 @@ TF_Buffer* TFE_GetServerDef(const char* text_proto, TF_Status* status) { return ret; } -TFE_Context* TFE_CreateContextFromSession(TF_Session* session, - TF_Status* status) { - auto* opts = TFE_NewContextOptions(); - - // Reduce GPU memory allocation, and set appropriate config options for TFE - // context. - auto* config = TF_CreateConfig( - /*xla*/ false, /* gpu_memory_allow_growth */ true, /* num_cpu_devices */ - 10); - TFE_ContextOptionsSetConfig(opts, config->data, config->length, status); - if (!status->status.ok()) { - CHECK(!config); - TFE_DeleteContextOptions(opts); - return nullptr; - } - - auto* ctx = TFE_NewContextFromSession(opts, session, status); - TF_DeleteBuffer(config); - TFE_DeleteContextOptions(opts); - return ctx; -} - -// TODO: retrieve the device string via TFE_ContextListDevices() -static const char DEFAULT_CPU_DEVICE[] = - "/job:localhost/replica:0/task:0/device:CPU:0"; - -static TFE_TensorHandle* createTFEQueue(TFE_Context* ctx, TF_DataType inputType, - int tensor_id, TF_Status* status) { - std::unique_ptr queueOp( - TFE_NewOp(ctx, "FIFOQueueV2", status), TFE_DeleteOp); - TFE_OpSetDevice(queueOp.get(), DEFAULT_CPU_DEVICE, status); - if (!status->status.ok()) return nullptr; - // TODO: use NAMED_TENSOR_QUEUE_CAPACITY in S4TF compiler. - TFE_OpSetAttrInt(queueOp.get(), "capacity", 1); - TFE_OpSetAttrTypeList(queueOp.get(), "component_types", &inputType, 1); - auto shared_name = tensorflow::strings::StrCat("fifo_queue_", tensor_id); - TFE_OpSetAttrString(queueOp.get(), "shared_name", shared_name.data(), - shared_name.size()); - TFE_OpSetAttrString(queueOp.get(), "container", "", 0); - - // TODO: consider making this an unknown shape. - const int64_t* dims_ptr = nullptr; - int num_dims = 0; - TFE_OpSetAttrShapeList(queueOp.get(), "shapes", &dims_ptr, &num_dims, - /*num_values*/ 0, status); - if (!status->status.ok()) return nullptr; - - int num_retvals = 1; - TFE_TensorHandle* queue = nullptr; - TFE_Execute(queueOp.get(), &queue, &num_retvals, status); - if (!status->status.ok()) return nullptr; - CHECK_EQ(num_retvals, 1); - - return queue; -} - -static void createTFEEnqueue(TFE_Context* ctx, TF_DataType inputType, - TFE_TensorHandle* queue, TFE_TensorHandle* tensor, - TF_Status* status) { - TFE_Op* op = TFE_NewOp(ctx, "QueueEnqueueV2", status); - if (!status->status.ok()) return; - std::unique_ptr op_deleter(op, TFE_DeleteOp); - TFE_OpSetDevice(op, DEFAULT_CPU_DEVICE, status); - if (!status->status.ok()) return; - TFE_OpAddInput(op, queue, status); - if (!status->status.ok()) return; - TFE_OpAddInput(op, tensor, status); - if (!status->status.ok()) return; - TFE_OpSetAttrTypeList(op, "Tcomponents", &inputType, 1); - TFE_OpSetAttrInt(op, "timeout_ms", -1); - - int num_retvals = 0; - TFE_Execute(op, nullptr /*retvals*/, &num_retvals, status); - if (!status->status.ok()) return; - CHECK_EQ(num_retvals, 0); -} - -static TFE_TensorHandle* createTFEDequeue(TFE_Context* ctx, - TF_DataType inputType, - TFE_TensorHandle* queue, - TF_Status* status) { - TFE_Op* op = TFE_NewOp(ctx, "QueueDequeueV2", status); - if (!status->status.ok()) return nullptr; - std::unique_ptr op_deleter(op, TFE_DeleteOp); - TFE_OpSetDevice(op, DEFAULT_CPU_DEVICE, status); - if (!status->status.ok()) return nullptr; - - TFE_OpAddInput(op, queue, status); - if (!status->status.ok()) return nullptr; - TFE_OpSetAttrTypeList(op, "component_types", &inputType, 1); - TFE_OpSetAttrInt(op, "timeout_ms", -1); - TFE_TensorHandle* ret; - int num_retvals = 1; - TFE_Execute(op, &ret, &num_retvals, status); - if (!status->status.ok()) return nullptr; - CHECK_EQ(num_retvals, 1); - return ret; -} - -TFE_TensorHandle* TFE_DequeueNamedTensor(TF_Session* session, int tensor_id, - TF_DataType inputType, - TF_Status* status) { - assert(session); - VLOG(1) << "Dequeuing data tensor with id " << tensor_id; - - auto ctx = TFE_CreateContextFromSession(session, status); - if (!status->status.ok()) return nullptr; - std::unique_ptr ctx_deleter( - ctx, TFE_DeleteContext); - - TFE_TensorHandle* queue = createTFEQueue(ctx, inputType, tensor_id, status); - if (!status->status.ok()) return nullptr; - std::unique_ptr - queue_deleter(queue, TFE_DeleteTensorHandle); - - auto* ret = createTFEDequeue(ctx, inputType, queue, status); - return ret; -} - -TFE_TensorHandle* TFE_DequeueNamedTensorFromCtx(TFE_Context* ctx, int tensor_id, - TF_DataType inputType, - TF_Status* status) { - TFE_TensorHandle* queue = createTFEQueue(ctx, inputType, tensor_id, status); - if (!status->status.ok()) return nullptr; - std::unique_ptr - queue_deleter(queue, TFE_DeleteTensorHandle); - - auto* ret = createTFEDequeue(ctx, inputType, queue, status); - - return ret; -} - -void TFE_EnqueueNamedTensor(TF_Session* session, int tensor_id, - TFE_TensorHandle* tensor, TF_Status* status) { - assert(session); - VLOG(1) << "Enqueuing data tensor with id " << tensor_id; - - auto ctx = TFE_CreateContextFromSession(session, status); - if (!status->status.ok()) return; - std::unique_ptr ctx_deleter( - ctx, TFE_DeleteContext); - - TF_DataType inputType = TFE_TensorHandleDataType(tensor); - TFE_TensorHandle* queue = createTFEQueue(ctx, inputType, tensor_id, status); - if (!status->status.ok()) return; - std::unique_ptr - queue_deleter(queue, TFE_DeleteTensorHandle); - - createTFEEnqueue(ctx, inputType, queue, tensor, status); -} - -void TFE_EnqueueNamedTensorFromCtx(TFE_Context* ctx, int tensor_id, - TFE_TensorHandle* tensor, - TF_Status* status) { - VLOG(1) << "Enqueuing data tensor with id " << tensor_id; - - TF_DataType inputType = TFE_TensorHandleDataType(tensor); - TFE_TensorHandle* queue = createTFEQueue(ctx, inputType, tensor_id, status); - if (!status->status.ok()) return; - std::unique_ptr - queue_deleter(queue, TFE_DeleteTensorHandle); - - createTFEEnqueue(ctx, inputType, queue, tensor, status); -} - -void TFE_EnqueueVariantTensor(TF_Session* session, int tensor_id, - TFE_TensorHandle* tensor, TF_Status* status) { - VLOG(1) << "Enqueuing variant tensor with id " << tensor_id; - - auto ctx = TFE_CreateContextFromSession(session, status); - if (!status->status.ok()) return; - std::unique_ptr ctx_deleter( - ctx, TFE_DeleteContext); - - TFE_TensorHandle* queue = createTFEQueue(ctx, TF_VARIANT, tensor_id, status); - if (!status->status.ok()) return; - std::unique_ptr - queue_deleter(queue, TFE_DeleteTensorHandle); - - createTFEEnqueue(ctx, TF_VARIANT, queue, tensor, status); -} - -TFE_TensorHandle* TFE_DequeueVariantTensor(TF_Session* session, int tensor_id, - TF_Status* status) { - VLOG(1) << "Dequeuing variant tensor with id " << tensor_id; - - auto ctx = TFE_CreateContextFromSession(session, status); - if (!status->status.ok()) return nullptr; - std::unique_ptr ctx_deleter( - ctx, TFE_DeleteContext); - - TFE_TensorHandle* queue = createTFEQueue(ctx, TF_VARIANT, tensor_id, status); - if (!status->status.ok()) return nullptr; - std::unique_ptr - queue_deleter(queue, TFE_DeleteTensorHandle); - - return createTFEDequeue(ctx, TF_VARIANT, queue, status); -} - void TF_MakeInternalErrorStatus(TF_Status* status, const char* errMsg) { status->status = tensorflow::errors::Internal(errMsg); } @@ -622,10 +423,9 @@ void TF_AttrBuilderSetType(TF_AttrBuilder* builder, const char* attr_name, void TF_AttrBuilderSetTypeList(TF_AttrBuilder* builder, const char* attr_name, const TF_DataType* values, int num_values) { auto iter = builder->attr_names.insert(attr_name).first; - builder->Set( - (*iter).c_str(), - tensorflow::gtl::ArraySlice( - reinterpret_cast(values), num_values)); + builder->Set(*iter, tensorflow::gtl::ArraySlice( + reinterpret_cast(values), + num_values)); } void TF_AttrBuilderCheckCanRunOnDevice(TF_AttrBuilder* builder, diff --git a/tensorflow/c/c_api_experimental.h b/tensorflow/c/c_api_experimental.h index 551a45d92c4..d0ffbf125fb 100644 --- a/tensorflow/c/c_api_experimental.h +++ b/tensorflow/c/c_api_experimental.h @@ -146,48 +146,6 @@ TF_CAPI_EXPORT extern void TF_EnqueueNamedTensor(TF_Session* session, // Create a serialized tensorflow.ServerDef proto. TF_Buffer* TFE_GetServerDef(const char* text_proto, TF_Status* status); -// TODO: remove this API in favor of the next one. -TF_CAPI_EXPORT extern TFE_Context* TFE_NewContextFromSession( - const TFE_ContextOptions* opts, TF_Session* sess, TF_Status* status); - -// Creates from `session` a new eager context to run a graph function or -// sends/recvs, so that these concurrent TFE executions can share (via -// `session` and its associated device mgr) the same set of fifo queue resource -// ops, used for host<->TF tensor transfers. This way the sends/recvs calls and -// graph function execution can access the same fifo queue resource handles -// (associated with devices managed by the device manager, which can be obtained -// from `session`). -// -// TODO: Remove this function once we migrate away from using session. -TF_CAPI_EXPORT extern TFE_Context* TFE_CreateContextFromSession( - TF_Session* session, TF_Status* status); - -// TODO: Retire this API in favor of the next one. -TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_DequeueNamedTensor( - TF_Session* session, int tensor_id, TF_DataType inputType, - TF_Status* status); - -TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_DequeueNamedTensorFromCtx( - TFE_Context* ctx, int tensor_id, TF_DataType inputType, TF_Status* status); - -TF_CAPI_EXPORT extern void TFE_EnqueueNamedTensor(TF_Session* session, - int tensor_id, - TFE_TensorHandle* tensor, - TF_Status* status); - -TF_CAPI_EXPORT extern void TFE_EnqueueNamedTensorFromCtx( - TFE_Context* ctx, int tensor_id, TFE_TensorHandle* tensor, - TF_Status* status); - -// TODO: consider folding the 2 APIs below into the ones above. -TF_CAPI_EXPORT extern void TFE_EnqueueVariantTensor(TF_Session* session, - int tensor_id, - TFE_TensorHandle* tensor, - TF_Status* status); - -TF_CAPI_EXPORT extern TFE_TensorHandle* TFE_DequeueVariantTensor( - TF_Session* session, int tensor_id, TF_Status* status); - TF_CAPI_EXPORT extern void TF_MakeInternalErrorStatus(TF_Status* status, const char* errMsg); diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc index f5535c80d30..912cd184b77 100644 --- a/tensorflow/c/eager/c_api.cc +++ b/tensorflow/c/eager/c_api.cc @@ -727,24 +727,6 @@ TFE_Context* TFE_NewContext(const TFE_ContextOptions* opts, TF_Status* status) { tensorflow::GetDefaultCustomKernelCreator())); } -TFE_Context* TFE_NewContextFromSession(const TFE_ContextOptions* opts, - TF_Session* sess, TF_Status* status) { - const tensorflow::DeviceMgr* device_mgr = nullptr; - status->status = sess->session->LocalDeviceManager(&device_mgr); - if (!status->status.ok()) return nullptr; - tensorflow::Rendezvous* r = - new tensorflow::IntraProcessRendezvous(device_mgr); - - return tensorflow::wrap(new tensorflow::EagerContext( - opts->session_options.options, - static_cast( - opts->device_placement_policy), - static_cast(opts->mirroring_policy), - opts->async, opts->lazy_remote_inputs_copy, device_mgr, - /*device_mgr_owned*/ false, r, - tensorflow::GetDefaultCustomKernelCreator())); -} - void TFE_DeleteContext(TFE_Context* ctx) { if (ctx == nullptr) { return;