diff --git a/tensorflow/c/eager/c_api_unified_experimental.cc b/tensorflow/c/eager/c_api_unified_experimental.cc index 9c472551bc6..ce2052640ff 100644 --- a/tensorflow/c/eager/c_api_unified_experimental.cc +++ b/tensorflow/c/eager/c_api_unified_experimental.cc @@ -38,96 +38,159 @@ typedef void (*ExecuteOperation)(TF_AbstractOp* op, int num_inputs, TF_AbstractTensor* const* inputs, TF_OutputList* o, TF_ExecutionContext* ctx, TF_Status* s); + struct TF_ExecutionContext { - explicit TF_ExecutionContext() {} - absl::variant ctx; - ExecuteOperation execution_callback; -}; + // Needed to implement our own version of RTTI since dynamic_cast is not + // supported in mobile builds. + enum ExecutionContextKind { GraphContext, EagerContext }; + explicit TF_ExecutionContext(ExecutionContextKind kind) : k(kind) {} + ExecutionContextKind getKind() const { return k; } -struct TF_AbstractTensor { - absl::variant t; -}; + virtual void ExecuteOperation(TF_AbstractOp* op, int num_inputs, + TF_AbstractTensor* const* inputs, + TF_OutputList* o, TF_Status* s) = 0; + virtual TF_AbstractOp* CreateOperation() = 0; + virtual void RegisterFunction(TF_AbstractFunction* func, TF_Status* s) = 0; + virtual ~TF_ExecutionContext() {} -struct TF_AbstractOp { - string op_type; - string op_name; + private: + const ExecutionContextKind k; }; -TF_ExecutionContext* TF_NewExecutionContext() { - return new TF_ExecutionContext(); -} - void TF_DeleteExecutionContext(TF_ExecutionContext* c) { delete c; } -TF_AbstractOp* TF_NewAbstractOp() { - TF_AbstractOp* op = new TF_AbstractOp; - return op; +template +T* dynamic_cast_helper(S source) { + if (source->getKind() != T::kKind) { + return nullptr; + } + return tensorflow::down_cast(source); } -void TF_DeleteAbstractOp(TF_AbstractOp* op) { delete op; } - -TF_AbstractTensor* TF_NewAbstractTensor() { - TF_AbstractTensor* t = new TF_AbstractTensor; - return t; -} - -void TF_DeleteAbstractTensor(TF_AbstractTensor* t) { delete t; } - -struct TF_GraphContext { - TF_Graph* graph; - // TODO(srbs): Handle captures. -}; - -TF_GraphContext* TF_NewGraphContext(TF_Graph* g) { - auto ctx = new TF_GraphContext; - ctx->graph = g; - return ctx; -} - -void TF_DeleteGraphContext(TF_GraphContext* ctx) { delete ctx; } +class TF_GraphContext; +class TF_EagerContext; struct TF_GraphTensor { TF_Output output; TF_GraphContext* ctx; }; -TF_GraphTensor* TF_NewGraphTensor(TF_GraphContext* ctx, TF_Output output, - TF_Status* s) { - TF_GraphTensor* t = new TF_GraphTensor; - t->output = output; - t->ctx = ctx; - return t; -} -TF_Output TF_GraphTensorToOutput(const TF_GraphTensor* const t, TF_Status* s) { - return t->output; -} -void TF_DeleteGraphTensor(TF_GraphTensor* t) { delete t; } -void TF_AbstractTensorSetEagerTensor(TF_AbstractTensor* at, TFE_TensorHandle* t, - TF_Status* s) { - at->t = t; -} -TFE_TensorHandle* TF_AbstractTensorGetEagerTensor(TF_AbstractTensor* at, - TF_Status* s) { - if (!absl::holds_alternative(at->t)) { - string msg = absl::StrCat("Not an eager tensor handle.", - reinterpret_cast(at)); - TF_SetStatus(s, TF_INVALID_ARGUMENT, msg.c_str()); - return nullptr; + +struct TF_AbstractTensor { + absl::variant t; + + ~TF_AbstractTensor() { + if (absl::holds_alternative(t)) { + TFE_DeleteTensorHandle(absl::get(t)); + } else if (absl::holds_alternative(t)) { + delete absl::get(t); + } } - return absl::get(at->t); +}; + +struct TF_AbstractOp { + // Needed to implement our own version of RTTI since dynamic_cast is not + // supported in mobile builds. + enum AbstractOpKind { GraphOp, EagerOp }; + explicit TF_AbstractOp(AbstractOpKind kind) : k(kind) {} + AbstractOpKind getKind() const { return k; } + virtual void SetOpType(const char* const op_type, TF_Status* s) = 0; + virtual void SetOpName(const char* const op_name, TF_Status* s) = 0; + virtual void SetAttrType(const char* const attr_name, TF_DataType value, + TF_Status* s) = 0; + virtual ~TF_AbstractOp() {} + + private: + const AbstractOpKind k; +}; + +TF_AbstractOp* TF_NewAbstractOp(TF_ExecutionContext* c) { + return c->CreateOperation(); } -void TF_AbstractTensorSetGraphTensor(TF_AbstractTensor* at, TF_GraphTensor* t, - TF_Status* s) { - at->t = t; -} -TF_GraphTensor* TF_AbstractTensorGetGraphTensor(TF_AbstractTensor* at, - TF_Status* s) { - if (!absl::holds_alternative(at->t)) { - string msg = absl::StrCat("Not an graph tensor handle."); - TF_SetStatus(s, TF_INVALID_ARGUMENT, msg.c_str()); - return nullptr; + +void TF_DeleteAbstractOp(TF_AbstractOp* op) { delete op; } + +class TF_GraphOp : public TF_AbstractOp { + public: + explicit TF_GraphOp(TF_Graph* g) : TF_AbstractOp(kKind), g_(g) {} + void SetOpType(const char* const op_type, TF_Status* s) override { + if (op_) { + TF_SetStatus( + s, TF_FAILED_PRECONDITION, + absl::StrCat("SetOpType called on already built op.").c_str()); + return; + } + if (op_name_ != nullptr) { + op_.reset(TF_NewOperation(g_, op_type, op_name_)); + op_name_ = nullptr; + } else { + op_type_ = op_type; + } } - return absl::get(at->t); -} + void SetOpName(const char* const op_name, TF_Status* s) override { + if (op_) { + TF_SetStatus( + s, TF_FAILED_PRECONDITION, + absl::StrCat("SetOpName called on already built op.").c_str()); + return; + } + if (op_type_ != nullptr) { + op_.reset(TF_NewOperation(g_, op_type_, op_name)); + op_type_ = nullptr; + } else { + op_name_ = op_name; + } + } + void SetAttrType(const char* const attr_name, TF_DataType value, + TF_Status* s) override { + if (!op_) { + TF_SetStatus( + s, TF_FAILED_PRECONDITION, + "op_type and op_name must be specified before specifying attrs."); + return; + } + TF_SetAttrType(op_.get(), attr_name, value); + } + ~TF_GraphOp() override {} + + static constexpr AbstractOpKind kKind = GraphOp; + + private: + friend class TF_GraphContext; // For access to op_. + TF_Graph* g_; + std::unique_ptr op_; + // Hold `op_type` and `op_name` till both are available since we need both + // to build a graph operation. + const char* op_type_ = nullptr; + const char* op_name_ = nullptr; +}; + +class TF_EagerOp : public TF_AbstractOp { + public: + explicit TF_EagerOp(TFE_Context* ctx) : TF_AbstractOp(kKind), ctx_(ctx) {} + void SetOpType(const char* const op_type, TF_Status* s) override { + op_ = TFE_NewOp(ctx_, op_type, s); + } + void SetOpName(const char* const op_name, TF_Status* s) override { + // Name is ignored in eager mode. + } + void SetAttrType(const char* const attr_name, TF_DataType value, + TF_Status* s) override { + if (op_ == nullptr) { + TF_SetStatus(s, TF_FAILED_PRECONDITION, + "op_type must be specified before specifying attrs."); + return; + } + TFE_OpSetAttrType(op_, attr_name, value); + } + + ~TF_EagerOp() override { TFE_DeleteOp(op_); } + static constexpr AbstractOpKind kKind = EagerOp; + + private: + friend class TF_EagerContext; // For access to op_. + TFE_Op* op_ = nullptr; + TFE_Context* ctx_; +}; bool IsEagerTensor(const TF_AbstractTensor* const t) { return absl::holds_alternative(t->t); @@ -138,6 +201,221 @@ struct TF_OutputList { int expected_num_outputs = -1; }; +struct TF_AbstractFunction { + TF_Function* func = nullptr; + + ~TF_AbstractFunction() { TF_DeleteFunction(func); } +}; + +class TF_EagerContext : public TF_ExecutionContext { + public: + TF_EagerContext() : TF_ExecutionContext(kKind) {} + + void Build(TFE_ContextOptions* options, TF_Status* status) { + eager_ctx_ = TFE_NewContext(options, status); + } + + TF_AbstractOp* CreateOperation() override { + // TODO(srbs): Should the lifetime of this op be tied to the context. + return new TF_EagerOp(eager_ctx_); + } + + void ExecuteOperation(TF_AbstractOp* op, int num_inputs, + TF_AbstractTensor* const* inputs, TF_OutputList* o, + TF_Status* s) override { + auto* eager_op = dynamic_cast_helper(op); + if (eager_op == nullptr) { + TF_SetStatus(s, TF_INVALID_ARGUMENT, + "Unable to cast TF_AbstractOp to TF_EagerOp."); + return; + } + auto* tfe_op = eager_op->op_; + if (TF_GetCode(s) != TF_OK) return; + for (int i = 0; i < num_inputs; ++i) { + if (!IsEagerTensor(inputs[i])) { + TF_SetStatus(s, TF_INVALID_ARGUMENT, "Not an eager tensor."); + return; + } + TFE_OpAddInput(tfe_op, absl::get(inputs[i]->t), s); + if (TF_GetCode(s) != TF_OK) return; + } + if (o->expected_num_outputs == -1) { + string msg = + "The number of outputs must be provided in eager mode. Use " + "TF_OutputListSetNumOutputs."; + TF_SetStatus(s, TF_INVALID_ARGUMENT, msg.c_str()); + return; + } + tensorflow::gtl::InlinedVector retvals; + int num_retvals = o->expected_num_outputs; + retvals.resize(num_retvals); + TFE_Execute(tfe_op, retvals.data(), &num_retvals, s); + if (TF_GetCode(s) != TF_OK) { + return; + } + o->outputs.clear(); + o->outputs.reserve(num_retvals); + for (int i = 0; i < num_retvals; ++i) { + auto* t = new TF_AbstractTensor(); + t->t = retvals[i]; + o->outputs.push_back(t); + } + } + + void RegisterFunction(TF_AbstractFunction* func, TF_Status* s) override { + TFE_ContextAddFunction(eager_ctx_, func->func, s); + } + + ~TF_EagerContext() override { TFE_DeleteContext(eager_ctx_); } + + static constexpr ExecutionContextKind kKind = EagerContext; + + private: + friend TFE_Context* TF_ExecutionContextGetTFEContext( + TF_ExecutionContext* ctx); + TFE_Context* eager_ctx_; +}; + +void TF_DeleteAbstractTensor(TF_AbstractTensor* t) { delete t; } + +TF_GraphContext* GetGraphContext(TF_AbstractTensor const* t) { + return absl::get(t->t)->ctx; +} + +class TF_GraphContext : public TF_ExecutionContext { + public: + TF_GraphContext() + : TF_ExecutionContext(kKind), graph_(new TF_Graph(), TF_DeleteGraph) {} + + TF_AbstractOp* CreateOperation() override { + // TODO(srbs): Should the lifetime of this op be tied to the context. + return new TF_GraphOp(graph_.get()); + } + + void ExecuteOperation(TF_AbstractOp* op, int num_inputs, + TF_AbstractTensor* const* inputs, TF_OutputList* o, + TF_Status* s) override { + auto* graph_op = dynamic_cast_helper(op); + if (graph_op == nullptr) { + TF_SetStatus(s, TF_INVALID_ARGUMENT, + "Unable to cast TF_AbstractOp to TF_GraphOp."); + return; + } + auto* tf_opdesc = graph_op->op_.release(); + for (int i = 0; i < num_inputs; ++i) { + auto* input = inputs[i]; + if (IsEagerTensor(input)) { + TF_SetStatus(s, TF_INVALID_ARGUMENT, + "Capturing eager tensors is not supported yet."); + return; + } else { + if (GetGraphContext(input) != this) { + TF_SetStatus( + s, TF_INVALID_ARGUMENT, + "Capturing tensors from other graphs is not supported yet."); + return; + } + TF_AddInput(tf_opdesc, absl::get(input->t)->output); + } + } + auto* operation = TF_FinishOperation(tf_opdesc, s); + // TF_FinishOperation deletes `tf_opdesc` so clear its reference. + graph_op->op_ = nullptr; + if (TF_GetCode(s) != TF_OK) return; + int num_outputs = TF_OperationNumOutputs(operation); + o->outputs.clear(); + o->outputs.reserve(num_outputs); + for (int i = 0; i < num_outputs; ++i) { + auto* t = new TF_AbstractTensor; + TF_GraphTensor* graph_t = new TF_GraphTensor; + graph_t->ctx = this; + graph_t->output = {operation, i}; + t->t = graph_t; + o->outputs.push_back(t); + } + } + + TF_Function* ToFunction(const char* fn_name, int num_inputs, + const TF_AbstractTensor* inputs, int num_outputs, + const TF_AbstractTensor* outputs, + TF_Status* status) const { + std::vector graph_inputs; + graph_inputs.resize(num_inputs); + std::vector graph_outputs; + graph_outputs.resize(num_outputs); + for (int i = 0; i < num_inputs; i++) { + graph_inputs[i] = absl::get(inputs[i].t)->output; + } + for (int i = 0; i < num_outputs; i++) { + graph_outputs[i] = absl::get(outputs[i].t)->output; + } + + return TF_GraphToFunction(graph_.get(), fn_name, 0, -1, nullptr, + graph_inputs.size(), graph_inputs.data(), + graph_outputs.size(), graph_outputs.data(), + nullptr, nullptr, fn_name, status); + } + + void RegisterFunction(TF_AbstractFunction* func, TF_Status* s) override { + TF_SetStatus(s, TF_UNIMPLEMENTED, + "Registering graph functions has not been implemented yet."); + } + + ~TF_GraphContext() override {} + + static constexpr ExecutionContextKind kKind = GraphContext; + + private: + std::unique_ptr graph_; +}; + +struct TF_GraphContextOptions {}; +struct TF_EagerContextOptions { + explicit TF_EagerContextOptions(TFE_ContextOptions* options) + : options(options) {} + TFE_ContextOptions* options; // Not owned. +}; + +struct TF_ExecutionContextOptions { + absl::variant options; + ~TF_ExecutionContextOptions() { + if (absl::holds_alternative(options)) { + delete absl::get(options); + } else if (absl::holds_alternative(options)) { + delete absl::get(options); + } + } +}; + +TF_ExecutionContextOptions* TF_NewGraphContextOptions() { + auto* options = new TF_ExecutionContextOptions(); + options->options = new TF_GraphContextOptions(); + return options; +} + +void TF_DeleteExecutionContextOptions(TF_ExecutionContextOptions* options) { + delete options; +} + +TF_ExecutionContextOptions* TF_NewEagerContextOptions( + TFE_ContextOptions* tfe_options) { + auto* options = new TF_ExecutionContextOptions(); + options->options = new TF_EagerContextOptions(tfe_options); + return options; +} + +TF_ExecutionContext* TF_NewExecutionContext(TF_ExecutionContextOptions* options, + TF_Status* s) { + if (absl::holds_alternative(options->options)) { + auto* ctx = new TF_EagerContext(); + ctx->Build(absl::get(options->options)->options, + s); + return ctx; + } else { + return new TF_GraphContext(); + } +} + TF_OutputList* TF_NewOutputList() { return new TF_OutputList; } void TF_DeleteOutputList(TF_OutputList* o) { delete o; } void TF_OutputListSetNumOutputs(TF_OutputList* o, int num_outputs, @@ -149,113 +427,74 @@ TF_AbstractTensor* TF_OutputListGet(TF_OutputList* o, int i) { return o->outputs[i]; } -void ExecuteOperationEager(TF_AbstractOp* op, int num_inputs, - TF_AbstractTensor* const* inputs, TF_OutputList* o, - TF_ExecutionContext* ctx, TF_Status* s) { - auto* tfe_op = - TFE_NewOp(absl::get(ctx->ctx), op->op_type.c_str(), s); - if (TF_GetCode(s) != TF_OK) return; - for (int i = 0; i < num_inputs; ++i) { - if (!IsEagerTensor(inputs[i])) { - TF_SetStatus(s, TF_INVALID_ARGUMENT, "Not an eager tensor."); - return; - } - TFE_OpAddInput(tfe_op, absl::get(inputs[i]->t), s); - if (TF_GetCode(s) != TF_OK) return; - } - if (o->expected_num_outputs == -1) { - string msg = - "The number of outputs must be provided in eager mode. Use " - "TF_OutputListSetNumOutputs."; - TF_SetStatus(s, TF_INVALID_ARGUMENT, msg.c_str()); - return; - } - tensorflow::gtl::InlinedVector retvals; - int num_retvals = o->expected_num_outputs; - retvals.resize(num_retvals); - TFE_Execute(tfe_op, retvals.data(), &num_retvals, s); - TFE_DeleteOp(tfe_op); - if (TF_GetCode(s) != TF_OK) { - return; - } - o->outputs.clear(); - o->outputs.reserve(num_retvals); - for (int i = 0; i < num_retvals; ++i) { - auto* t = TF_NewAbstractTensor(); - t->t = retvals[i]; - o->outputs.push_back(t); - } -} - -TF_GraphContext* GetGraphContext(TF_AbstractTensor const* t) { - return absl::get(t->t)->ctx; -} - -void ExecuteOperationGraph(TF_AbstractOp* op, int num_inputs, - TF_AbstractTensor* const* inputs, TF_OutputList* o, - TF_ExecutionContext* ctx, TF_Status* s) { - TF_GraphContext* graph_ctx = absl::get(ctx->ctx); - TF_Graph* g = graph_ctx->graph; - auto* tf_opdesc = - TF_NewOperation(g, op->op_type.c_str(), op->op_name.c_str()); - for (int i = 0; i < num_inputs; ++i) { - auto* input = inputs[i]; - if (IsEagerTensor(input)) { - TF_SetStatus(s, TF_INVALID_ARGUMENT, - "Capturing eager tensors is not supported yet."); - return; - } else { - if (GetGraphContext(input) != graph_ctx) { - TF_SetStatus( - s, TF_INVALID_ARGUMENT, - "Capturing tensors from other graphs is not supported yet."); - return; - } - TF_AddInput(tf_opdesc, absl::get(input->t)->output); - } - } - auto* operation = TF_FinishOperation(tf_opdesc, s); - if (TF_GetCode(s) != TF_OK) return; - int num_outputs = TF_OperationNumOutputs(operation); - o->outputs.clear(); - o->outputs.reserve(num_outputs); - for (int i = 0; i < num_outputs; ++i) { - auto* t = TF_NewAbstractTensor(); - TF_GraphTensor* output_t = TF_NewGraphTensor(graph_ctx, {operation, i}, s); - if (TF_GetCode(s) != TF_OK) { - return; - } - t->t = output_t; - o->outputs.push_back(t); - } -} - -void TF_ExecutionContextSetEagerContext(TF_ExecutionContext* context, - TFE_Context* eager_context, - TF_Status* s) { - context->ctx = eager_context; - context->execution_callback = &ExecuteOperationEager; -} - -void TF_ExecutionContextSetGraphContext(TF_ExecutionContext* context, - TF_GraphContext* graph_context, - TF_Status* s) { - context->ctx = graph_context; - context->execution_callback = &ExecuteOperationGraph; -} - void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type, TF_Status* s) { - op->op_type = op_type; + op->SetOpType(op_type, s); } void TF_AbstractOpSetOpName(TF_AbstractOp* op, const char* const op_name, TF_Status* s) { - op->op_name = op_name; + op->SetOpName(op_name, s); +} + +void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name, + TF_DataType value, TF_Status* s) { + op->SetAttrType(attr_name, value, s); } void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs, TF_AbstractTensor* const* inputs, TF_OutputList* o, TF_ExecutionContext* ctx, TF_Status* s) { - ctx->execution_callback(op, num_inputs, inputs, o, ctx, s); + ctx->ExecuteOperation(op, num_inputs, inputs, o, s); +} + +TF_AbstractFunction* TF_ExecutionContextToFunction( + const TF_ExecutionContext* fn_body, const char* fn_name, int num_inputs, + const TF_AbstractTensor* inputs, int num_outputs, + const TF_AbstractTensor* outputs, TF_Status* status) { + auto* graph_ctx = dynamic_cast_helper(fn_body); + if (graph_ctx == nullptr) { + TF_SetStatus(status, TF_INVALID_ARGUMENT, + "fn_body is not a TF_GraphContext."); + return nullptr; + } + TF_AbstractFunction* func = new TF_AbstractFunction; + func->func = graph_ctx->ToFunction(fn_name, num_inputs, inputs, num_outputs, + outputs, status); + return func; +} + +void TF_DeleteAbstractFunction(TF_AbstractFunction* func) { delete func; } + +void TF_ExecutionContextRegisterFunction(TF_ExecutionContext* ctx, + TF_AbstractFunction* func, + TF_Status* s) { + ctx->RegisterFunction(func, s); +} + +// Temporary APIs till we figure out how to create scalar valued Eager +// tensors and how to get value out of eager abstract tensors. +TF_AbstractTensor* TF_NewAbstractTensor() { + TF_AbstractTensor* t = new TF_AbstractTensor; + return t; +} + +void TF_AbstractTensorSetEagerTensor(TF_AbstractTensor* at, TFE_TensorHandle* t, + TF_Status* s) { + at->t = t; +} + +TFE_TensorHandle* TF_AbstractTensorGetEagerTensor(TF_AbstractTensor* at, + TF_Status* s) { + if (!absl::holds_alternative(at->t)) { + string msg = absl::StrCat("Not an eager tensor handle.", + reinterpret_cast(at)); + TF_SetStatus(s, TF_INVALID_ARGUMENT, msg.c_str()); + return nullptr; + } + return absl::get(at->t); +} + +TFE_Context* TF_ExecutionContextGetTFEContext(TF_ExecutionContext* ctx) { + return dynamic_cast_helper(ctx)->eager_ctx_; } diff --git a/tensorflow/c/eager/c_api_unified_experimental.h b/tensorflow/c/eager/c_api_unified_experimental.h index 6346ceaf26e..291adbeef37 100644 --- a/tensorflow/c/eager/c_api_unified_experimental.h +++ b/tensorflow/c/eager/c_api_unified_experimental.h @@ -15,8 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_C_EAGER_C_API_UNIFIED_EXPERIMENTAL_H_ #define TENSORFLOW_C_EAGER_C_API_UNIFIED_EXPERIMENTAL_H_ -#include "tensorflow/c/c_api.h" #include "tensorflow/c/eager/c_api.h" +#include "tensorflow/c/tf_status.h" #ifdef __cplusplus extern "C" { @@ -41,32 +41,19 @@ typedef struct TF_AbstractTensor TF_AbstractTensor; // could contain the op type and other attributes. typedef struct TF_AbstractOp TF_AbstractOp; -TF_ExecutionContext* TF_NewExecutionContext(); +// `TF_ExecutionContextOptions` define what type of `TF_ExecutionContext` is +// created. It can be used to pass context specific params. +typedef struct TF_ExecutionContextOptions TF_ExecutionContextOptions; +void TF_DeleteExecutionContextOptions(TF_ExecutionContextOptions*); + +TF_ExecutionContext* TF_NewExecutionContext(TF_ExecutionContextOptions*, + TF_Status* s); void TF_DeleteExecutionContext(TF_ExecutionContext*); -TF_AbstractOp* TF_NewAbstractOp(); +TF_AbstractOp* TF_NewAbstractOp(TF_ExecutionContext* ctx); void TF_DeleteAbstractOp(TF_AbstractOp*); -TF_AbstractTensor* TF_NewAbstractTensor(); void TF_DeleteAbstractTensor(TF_AbstractTensor*); - -// ----------------------------------------------------------------------------- -// APIs for Eager and graph modes -// ----------------------------------------------------------------------------- - -// Keeps track of the current graph and other state e.g. captures etc. -typedef struct TF_GraphContext TF_GraphContext; -TF_GraphContext* TF_NewGraphContext(TF_Graph*); -void TF_DeleteGraphContext(TF_GraphContext*); - -// `eager_context` must outlive `context`. -void TF_ExecutionContextSetEagerContext(TF_ExecutionContext* context, - TFE_Context* eager_context, TF_Status*); -// `graph_context` must outlive `context`. -void TF_ExecutionContextSetGraphContext(TF_ExecutionContext* context, - TF_GraphContext* graph_context, - TF_Status*); - // TODO(srbs): Add APIs for specifying attrs etc. // `op_type` must outlive `op`. void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type, @@ -74,25 +61,9 @@ void TF_AbstractOpSetOpType(TF_AbstractOp* op, const char* const op_type, // `op_name` must outlive `op`. void TF_AbstractOpSetOpName(TF_AbstractOp* op, const char* const op_name, TF_Status* s); - -// Wrapper for TF_Output but contains a pointer to TF_GraphContext as well. -typedef struct TF_GraphTensor TF_GraphTensor; -TF_GraphTensor* TF_NewGraphTensor(TF_GraphContext* c, TF_Output t, - TF_Status* s); -TF_Output TF_GraphTensorToOutput(const TF_GraphTensor* const t, TF_Status* s); -void TF_DeleteGraphTensor(TF_GraphTensor* t); - -// `t` must outlive `at`. -void TF_AbstractTensorSetEagerTensor(TF_AbstractTensor* at, TFE_TensorHandle* t, - TF_Status* s); -TFE_TensorHandle* TF_AbstractTensorGetEagerTensor(TF_AbstractTensor* at, - TF_Status* s); - -// `t` must outlive `at`. -void TF_AbstractTensorSetGraphTensor(TF_AbstractTensor* at, TF_GraphTensor* t, - TF_Status* s); -TF_GraphTensor* TF_AbstractTensorGetGraphTensor(TF_AbstractTensor* at, - TF_Status* s); +// `attr_name` must outlive `op`. +void TF_AbstractOpSetAttrType(TF_AbstractOp* op, const char* const attr_name, + TF_DataType value, TF_Status* s); // TF_OutputList just lets us not specify the number of outputs of an operation // beforehand. This forces a memory allocation in the runtime, which is bad, but @@ -104,6 +75,17 @@ void TF_OutputListSetNumOutputs(TF_OutputList* o, int, TF_Status*); int TF_OutputListNumOutputs(TF_OutputList* o); TF_AbstractTensor* TF_OutputListGet(TF_OutputList* o, int i); +// Stores a function representation that can be used for execution or for +// setting functional attributes of other composite ops e.g. control flow. +typedef struct TF_AbstractFunction TF_AbstractFunction; +TF_AbstractFunction* TF_ExecutionContextToFunction( + const TF_ExecutionContext* fn_body, const char* fn_name, int num_inputs, + const TF_AbstractTensor* inputs, int num_outputs, + const TF_AbstractTensor* outputs, TF_Status* status); +void TF_DeleteAbstractFunction(TF_AbstractFunction*); +void TF_ExecutionContextRegisterFunction(TF_ExecutionContext*, + TF_AbstractFunction*, TF_Status*); + // TF_ExecuteOperation will, if in eager mode, execute, if in graph mode, maybe // capture some inputs and then add a node in the graph, and after // execution/node creation it'll go and record things that happened in any tape @@ -112,6 +94,23 @@ void TF_ExecuteOperation(TF_AbstractOp* op, int num_inputs, TF_AbstractTensor* const* inputs, TF_OutputList* o, TF_ExecutionContext* ctx, TF_Status* s); +// ----------------------------------------------------------------------------- +// APIs specific to Eager and graph modes +// ----------------------------------------------------------------------------- + +TF_ExecutionContextOptions* TF_NewGraphContextOptions(); +TF_ExecutionContextOptions* TF_NewEagerContextOptions(TFE_ContextOptions*); + +// Temporary APIs till we figure out how to create scalar valued Eager +// tensors and how to get value out of eager abstract tensors. +TF_AbstractTensor* TF_NewAbstractTensor(); +void TF_AbstractTensorSetEagerTensor( + TF_AbstractTensor* at, TFE_TensorHandle* t, + TF_Status* s); // `at` takes ownership of `t`. +TFE_TensorHandle* TF_AbstractTensorGetEagerTensor(TF_AbstractTensor* at, + TF_Status* s); +TFE_Context* TF_ExecutionContextGetTFEContext(TF_ExecutionContext*); + #ifdef __cplusplus } /* end extern "C" */ #endif diff --git a/tensorflow/c/eager/c_api_unified_experimental_test.cc b/tensorflow/c/eager/c_api_unified_experimental_test.cc index 104ede9ebbd..fca82966a2a 100644 --- a/tensorflow/c/eager/c_api_unified_experimental_test.cc +++ b/tensorflow/c/eager/c_api_unified_experimental_test.cc @@ -33,26 +33,25 @@ namespace tensorflow { namespace { TEST(UnifedCAPI, TestBasicEager) { - TF_ExecutionContext* ctx = TF_NewExecutionContext(); std::unique_ptr status( TF_NewStatus(), TF_DeleteStatus); TFE_ContextOptions* opts = TFE_NewContextOptions(); - TFE_Context* eager_ctx = TFE_NewContext(opts, status.get()); + TF_ExecutionContextOptions* options = TF_NewEagerContextOptions(opts); + TF_ExecutionContext* ctx = TF_NewExecutionContext(options, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); TFE_DeleteContextOptions(opts); - // Enter the eager context. - TF_ExecutionContextSetEagerContext(ctx, eager_ctx, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); // Build an abstract input tensor. + TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx); TFE_TensorHandle* t = TestScalarTensorHandle(eager_ctx, 2.0f); TF_AbstractTensor* at = TF_NewAbstractTensor(); TF_AbstractTensorSetEagerTensor(at, t, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); // Build an abstract operation. - auto* op = TF_NewAbstractOp(); + auto* op = TF_NewAbstractOp(ctx); TF_AbstractOpSetOpType(op, "Add", status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); @@ -69,7 +68,6 @@ TEST(UnifedCAPI, TestBasicEager) { // Clean up operation and inputs. TF_DeleteAbstractOp(op); TF_DeleteAbstractTensor(at); - TFE_DeleteTensorHandle(t); // Verify the results. ASSERT_EQ(1, TF_OutputListNumOutputs(o)); @@ -83,100 +81,98 @@ TEST(UnifedCAPI, TestBasicEager) { TF_DeleteTensor(result_tensor); TF_DeleteAbstractTensor(result); - TFE_DeleteTensorHandle(result_t); TF_DeleteOutputList(o); - TFE_DeleteContext(eager_ctx); TF_DeleteExecutionContext(ctx); + TF_DeleteExecutionContextOptions(options); } TEST(UnifedCAPI, TestBasicGraph) { - TF_ExecutionContext* ctx = TF_NewExecutionContext(); std::unique_ptr status( TF_NewStatus(), TF_DeleteStatus); - - // Enter a graph context. - TF_Graph* g = TF_NewGraph(); - TF_GraphContext* graph_context = TF_NewGraphContext(g); - TF_ExecutionContextSetGraphContext(ctx, graph_context, status.get()); + TF_ExecutionContextOptions* options = TF_NewGraphContextOptions(); + TF_ExecutionContext* graph_ctx = + TF_NewExecutionContext(options, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); // Add a placeholder to the graph. - auto* placeholder_op = TF_NewOperation(g, "Placeholder", "Placeholder"); - TF_SetAttrType(placeholder_op, "dtype", TF_FLOAT); - auto* operation = TF_FinishOperation(placeholder_op, status.get()); + auto* placeholder_op = TF_NewAbstractOp(graph_ctx); + TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - TF_Output placeholder_t = {operation, 0}; - TF_GraphTensor* graph_t = - TF_NewGraphTensor(graph_context, placeholder_t, status.get()); + TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - TF_AbstractTensor* t = TF_NewAbstractTensor(); - TF_AbstractTensorSetGraphTensor(t, graph_t, status.get()); - ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - - // Build an abstract operation. - auto* op = TF_NewAbstractOp(); - TF_AbstractOpSetOpType(op, "Add", status.get()); - ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - TF_AbstractOpSetOpName(op, "my_add", status.get()); + TF_AbstractOpSetAttrType(placeholder_op, "dtype", TF_FLOAT, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); // Build inputs and outputs. - TF_AbstractTensor* inputs[2] = {t, t}; - TF_OutputList* o = TF_NewOutputList(); + TF_OutputList* placeholder_outputs = TF_NewOutputList(); // Execute. - TF_ExecuteOperation(op, 2, inputs, o, ctx, status.get()); + TF_ExecuteOperation(placeholder_op, 0, nullptr, placeholder_outputs, + graph_ctx, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + ASSERT_EQ(1, TF_OutputListNumOutputs(placeholder_outputs)); + TF_AbstractTensor* placeholder_t = TF_OutputListGet(placeholder_outputs, 0); + + // Delete placeholder op. + TF_DeleteAbstractOp(placeholder_op); + + // Build an abstract operation. + auto* add_op = TF_NewAbstractOp(graph_ctx); + TF_AbstractOpSetOpType(add_op, "Add", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractOpSetOpName(add_op, "my_add", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Build inputs and outputs. + TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t}; + TF_OutputList* add_outputs = TF_NewOutputList(); + + // Execute. + TF_ExecuteOperation(add_op, 2, inputs, add_outputs, graph_ctx, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractTensor* output_t = TF_OutputListGet(add_outputs, 0); // Clean up operation and inputs. - TF_DeleteAbstractOp(op); - TF_DeleteAbstractTensor(t); - TF_DeleteGraphTensor(graph_t); + TF_DeleteAbstractOp(add_op); - TF_AbstractTensor* result = TF_OutputListGet(o, 0); - TF_GraphTensor* result_graph_tensor = - TF_AbstractTensorGetGraphTensor(result, status.get()); - TF_DeleteAbstractTensor(result); - ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - TF_Output result_output = - TF_GraphTensorToOutput(result_graph_tensor, status.get()); - TF_DeleteGraphTensor(result_graph_tensor); - ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); string fn_name = "double"; - TF_Function* f = TF_GraphToFunction( - g, fn_name.c_str(), 0, -1, nullptr, 1, &placeholder_t, 1, &result_output, - nullptr, nullptr, fn_name.c_str(), status.get()); - ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractFunction* func = TF_ExecutionContextToFunction( + graph_ctx, fn_name.c_str(), 1, placeholder_t, 1, output_t, status.get()); + TF_DeleteAbstractTensor(placeholder_t); + TF_DeleteAbstractTensor(output_t); - // Build an eager context to run the function. + // Build eager context. TFE_ContextOptions* opts = TFE_NewContextOptions(); - TFE_Context* eager_ctx = TFE_NewContext(opts, status.get()); + TF_ExecutionContextOptions* eager_ctx_options = + TF_NewEagerContextOptions(opts); + TF_ExecutionContext* eager_execution_ctx = + TF_NewExecutionContext(eager_ctx_options, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); TFE_DeleteContextOptions(opts); - // Build the abstract op to run the function. - TFE_ContextAddFunction(eager_ctx, f, status.get()); + TF_ExecutionContextRegisterFunction(eager_execution_ctx, func, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - TF_AbstractOp* fn_op = TF_NewAbstractOp(); + // Build the abstract op to run the function. + TF_AbstractOp* fn_op = TF_NewAbstractOp(eager_execution_ctx); TF_AbstractOpSetOpType(fn_op, fn_name.c_str(), status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); // Build an abstract input tensor. - TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f); TF_AbstractTensor* input_t = TF_NewAbstractTensor(); + TFE_Context* eager_ctx = + TF_ExecutionContextGetTFEContext(eager_execution_ctx); + TFE_TensorHandle* input_eager = TestScalarTensorHandle(eager_ctx, 2.0f); TF_AbstractTensorSetEagerTensor(input_t, input_eager, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - // Enter the eager context. - TF_ExecutionContextSetEagerContext(ctx, eager_ctx, status.get()); + TF_OutputListSetNumOutputs(add_outputs, 1, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - TF_OutputListSetNumOutputs(o, 1, status.get()); - ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - TF_ExecuteOperation(fn_op, 1, &input_t, o, ctx, status.get()); + TF_ExecuteOperation(fn_op, 1, &input_t, add_outputs, eager_execution_ctx, + status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); - ASSERT_EQ(1, TF_OutputListNumOutputs(o)); - TF_AbstractTensor* final_result = TF_OutputListGet(o, 0); + ASSERT_EQ(1, TF_OutputListNumOutputs(add_outputs)); + TF_AbstractTensor* final_result = TF_OutputListGet(add_outputs, 0); TFE_TensorHandle* final = TF_AbstractTensorGetEagerTensor(final_result, status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); @@ -185,19 +181,201 @@ TEST(UnifedCAPI, TestBasicGraph) { float* f_value = static_cast(TF_TensorData(f_t)); ASSERT_EQ(*f_value, 4.0); - TF_DeleteOutputList(o); + TF_DeleteOutputList(add_outputs); + TF_DeleteOutputList(placeholder_outputs); TF_DeleteAbstractOp(fn_op); TF_DeleteAbstractTensor(input_t); - TFE_DeleteTensorHandle(input_eager); TF_DeleteAbstractTensor(final_result); - TFE_DeleteTensorHandle(final); TF_DeleteTensor(f_t); - TF_DeleteFunction(f); + TF_DeleteAbstractFunction(func); + + TF_DeleteExecutionContext(graph_ctx); + TF_DeleteExecutionContext(eager_execution_ctx); + TF_DeleteExecutionContextOptions(eager_ctx_options); + TF_DeleteExecutionContextOptions(options); +} + +TEST(UnifedCAPI, TF_ExecutionContextToFunctionWithEagerContextRaises) { + std::unique_ptr status( + TF_NewStatus(), TF_DeleteStatus); + TFE_ContextOptions* opts = TFE_NewContextOptions(); + TF_ExecutionContextOptions* options = TF_NewEagerContextOptions(opts); + TF_ExecutionContext* ctx = TF_NewExecutionContext(options, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TFE_DeleteContextOptions(opts); + + TF_AbstractFunction* func = TF_ExecutionContextToFunction( + ctx, nullptr, 0, nullptr, 0, nullptr, status.get()); + ASSERT_EQ(nullptr, func); + ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get())); - TF_DeleteGraphContext(graph_context); - TF_DeleteGraph(g); - TFE_DeleteContext(eager_ctx); TF_DeleteExecutionContext(ctx); + TF_DeleteExecutionContextOptions(options); +} + +TEST(UnifedCAPI, TF_CallingSetOpTypeAfterFinishingOpBuildingRaises) { + std::unique_ptr status( + TF_NewStatus(), TF_DeleteStatus); + TF_ExecutionContextOptions* options = TF_NewGraphContextOptions(); + TF_ExecutionContext* graph_ctx = + TF_NewExecutionContext(options, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Add a placeholder to the graph. + auto* placeholder_op = TF_NewAbstractOp(graph_ctx); + TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // This should fail. + TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get()); + ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get())); + + TF_DeleteAbstractOp(placeholder_op); + TF_DeleteExecutionContext(graph_ctx); + TF_DeleteExecutionContextOptions(options); +} + +TEST(UnifedCAPI, TF_CallingSetOpNameAfterFinishingOpBuildingRaises) { + std::unique_ptr status( + TF_NewStatus(), TF_DeleteStatus); + TF_ExecutionContextOptions* options = TF_NewGraphContextOptions(); + TF_ExecutionContext* graph_ctx = + TF_NewExecutionContext(options, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Add a placeholder to the graph. + auto* placeholder_op = TF_NewAbstractOp(graph_ctx); + TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // This should fail. + TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get()); + ASSERT_EQ(TF_FAILED_PRECONDITION, TF_GetCode(status.get())); + + TF_DeleteAbstractOp(placeholder_op); + TF_DeleteExecutionContext(graph_ctx); + TF_DeleteExecutionContextOptions(options); +} + +TEST(UnifedCAPI, TestExecutingEagerOpInGraphModeRaises) { + // Build an Eager context. + std::unique_ptr status( + TF_NewStatus(), TF_DeleteStatus); + TFE_ContextOptions* opts = TFE_NewContextOptions(); + TF_ExecutionContextOptions* options = TF_NewEagerContextOptions(opts); + TF_ExecutionContext* ctx = TF_NewExecutionContext(options, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TFE_DeleteContextOptions(opts); + + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Build an Eager operation. + auto* op = TF_NewAbstractOp(ctx); + TF_AbstractOpSetOpType(op, "Add", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Build an abstract input tensor. + TFE_Context* eager_ctx = TF_ExecutionContextGetTFEContext(ctx); + TFE_TensorHandle* t = TestScalarTensorHandle(eager_ctx, 2.0f); + TF_AbstractTensor* at = TF_NewAbstractTensor(); + TF_AbstractTensorSetEagerTensor(at, t, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Build inputs and outputs. + TF_AbstractTensor* inputs[2] = {at, at}; + TF_OutputList* o = TF_NewOutputList(); + TF_OutputListSetNumOutputs(o, 1, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Build a Graph context. + TF_ExecutionContextOptions* graph_options = TF_NewGraphContextOptions(); + TF_ExecutionContext* graph_ctx = + TF_NewExecutionContext(graph_options, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Execute eager op using graph context. + TF_ExecuteOperation(op, 2, inputs, o, graph_ctx, status.get()); + ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get())); + + // Clean up operation and inputs. + TF_DeleteAbstractOp(op); + TF_DeleteAbstractTensor(at); + + TF_DeleteOutputList(o); + TF_DeleteExecutionContext(ctx); + TF_DeleteExecutionContextOptions(options); + TF_DeleteExecutionContext(graph_ctx); + TF_DeleteExecutionContextOptions(graph_options); +} + +TEST(UnifedCAPI, TestExecutingGraphOpInEagerModeRaises) { + std::unique_ptr status( + TF_NewStatus(), TF_DeleteStatus); + TF_ExecutionContextOptions* options = TF_NewGraphContextOptions(); + TF_ExecutionContext* graph_ctx = + TF_NewExecutionContext(options, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Add a placeholder to the graph. + auto* placeholder_op = TF_NewAbstractOp(graph_ctx); + TF_AbstractOpSetOpType(placeholder_op, "Placeholder", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractOpSetOpName(placeholder_op, "my_ph", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractOpSetAttrType(placeholder_op, "dtype", TF_FLOAT, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Build inputs and outputs. + TF_OutputList* placeholder_outputs = TF_NewOutputList(); + + // Execute. + TF_ExecuteOperation(placeholder_op, 0, nullptr, placeholder_outputs, + graph_ctx, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + ASSERT_EQ(1, TF_OutputListNumOutputs(placeholder_outputs)); + TF_AbstractTensor* placeholder_t = TF_OutputListGet(placeholder_outputs, 0); + + // Delete placeholder op. + TF_DeleteAbstractOp(placeholder_op); + + // Build an abstract operation. + auto* add_op = TF_NewAbstractOp(graph_ctx); + TF_AbstractOpSetOpType(add_op, "Add", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TF_AbstractOpSetOpName(add_op, "my_add", status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + + // Build inputs and outputs. + TF_AbstractTensor* inputs[2] = {placeholder_t, placeholder_t}; + TF_OutputList* add_outputs = TF_NewOutputList(); + + // Build eager context. + TFE_ContextOptions* opts = TFE_NewContextOptions(); + TF_ExecutionContextOptions* eager_ctx_options = + TF_NewEagerContextOptions(opts); + TF_ExecutionContext* eager_execution_ctx = + TF_NewExecutionContext(eager_ctx_options, status.get()); + ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); + TFE_DeleteContextOptions(opts); + + // Execute. + TF_ExecuteOperation(add_op, 2, inputs, add_outputs, eager_execution_ctx, + status.get()); + ASSERT_EQ(TF_INVALID_ARGUMENT, TF_GetCode(status.get())); + + // Clean up operation and inputs. + TF_DeleteAbstractTensor(placeholder_t); + TF_DeleteAbstractOp(add_op); + TF_DeleteOutputList(add_outputs); + TF_DeleteOutputList(placeholder_outputs); + TF_DeleteExecutionContext(graph_ctx); + TF_DeleteExecutionContext(eager_execution_ctx); + TF_DeleteExecutionContextOptions(eager_ctx_options); + TF_DeleteExecutionContextOptions(options); } } // namespace