diff --git a/tensorflow/BUILD b/tensorflow/BUILD index 005acff27f7..55406a5686a 100644 --- a/tensorflow/BUILD +++ b/tensorflow/BUILD @@ -644,7 +644,6 @@ tf_cc_shared_object( "//tensorflow/core:lib_internal_impl", "//tensorflow/core/profiler:profiler_impl", "//tensorflow/stream_executor:stream_executor_impl", - "//tensorflow/c:c_core_api_no_xla", "//tensorflow:tf_framework_version_script.lds", ] + tf_additional_binary_deps(), ) diff --git a/tensorflow/c/BUILD b/tensorflow/c/BUILD index 248bb826c28..c5574793b74 100644 --- a/tensorflow/c/BUILD +++ b/tensorflow/c/BUILD @@ -23,7 +23,6 @@ filegroup( srcs = [ "c_api.h", "c_api_experimental.h", - "c_core_api.h", "tf_attrtype.h", "tf_datatype.h", "tf_file_statistics.h", @@ -74,7 +73,6 @@ tf_cuda_library( hdrs = [ "c_api.h", "c_api_internal.h", - "c_core_api.h", "tf_datatype.h", "tf_tensor.h", ], @@ -118,41 +116,10 @@ cc_library( visibility = ["//visibility:public"], ) -tf_cuda_library( - name = "c_core_api", - hdrs = [ - "c_core_api.h", - "tf_attrtype.h", - "tf_datatype.h", - "tf_file_statistics.h", - "tf_status.h", - "tf_tensor.h", - ], - copts = tf_copts(), - visibility = [ - "//visibility:public", - ], - deps = [ - ":c_core_api_no_xla", - ":c_api_internal", - ":tf_attrtype", - ":tf_status_internal", - ":tf_file_statistics", - ":tf_tensor_internal", - ] + select({ - "//tensorflow:with_xla_support": [ - "//tensorflow/compiler/tf2xla:xla_compiler", - "//tensorflow/compiler/jit", - ], - "//conditions:default": [], - }), -) - tf_cuda_library( name = "c_api", hdrs = [ "c_api.h", - "c_core_api.h", "tf_attrtype.h", "tf_datatype.h", "tf_file_statistics.h", @@ -162,7 +129,6 @@ tf_cuda_library( copts = tf_copts(), visibility = ["//visibility:public"], deps = [ - ":c_core_api", ":c_api_no_xla", ":c_api_internal", ":tf_attrtype", @@ -178,48 +144,11 @@ tf_cuda_library( }), ) -tf_cuda_library( - name = "c_core_api_no_xla", - srcs = [ - "c_api_function.cc", - "c_core_api.cc", - ], - hdrs = [ - "c_core_api.h", - ], - copts = tf_copts(), - visibility = ["//tensorflow:__subpackages__"], - deps = [ - ":c_api_internal", - ":tf_attrtype", - ":tf_datatype", - ":tf_status_internal", - ] + select({ - "//tensorflow:android": [ - "//tensorflow/core:android_tensorflow_lib_lite", - ], - "//conditions:default": [ - ":tf_status", - ":tf_tensor", - "@com_google_absl//absl/strings", - "//tensorflow/cc/saved_model:loader_lite", - "//tensorflow/core:core_cpu", - "//tensorflow/core:core_cpu_internal", - "//tensorflow/core:framework", - "//tensorflow/core:op_gen_lib", - "//tensorflow/core:protos_all_cc", - "//tensorflow/core:lib", - "//tensorflow/core:lib_internal", - "//tensorflow/core/distributed_runtime:server_lib", - ], - }), - alwayslink = 1, -) - tf_cuda_library( name = "c_api_no_xla", srcs = [ "c_api.cc", + "c_api_function.cc", ], hdrs = [ "c_api.h", @@ -230,7 +159,6 @@ tf_cuda_library( "//third_party/llvm/llvm-project:__subpackages__", ], deps = [ - ":c_core_api_no_xla", ":c_api_internal", ":tf_attrtype", ":tf_datatype", @@ -256,6 +184,8 @@ tf_cuda_library( "//tensorflow/core:protos_all_cc", "//tensorflow/core:lib", "//tensorflow/core:lib_internal", + "//tensorflow/core/distributed_runtime:server_lib", + "//tensorflow/core/kernels:logging_ops", ], }), alwayslink = 1, diff --git a/tensorflow/c/c_api.cc b/tensorflow/c/c_api.cc index 3a110e4c9f2..bc1fbd3fcf5 100644 --- a/tensorflow/c/c_api.cc +++ b/tensorflow/c/c_api.cc @@ -29,6 +29,9 @@ limitations under the License. #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/framework/scope_internal.h" #include "tensorflow/cc/ops/while_loop.h" +#include "tensorflow/cc/saved_model/loader.h" +#include "tensorflow/core/distributed_runtime/server_lib.h" +#include "tensorflow/core/framework/logging.h" #include "tensorflow/core/framework/op_gen_lib.h" #endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) #include "tensorflow/c/c_api_internal.h" @@ -96,14 +99,566 @@ using tensorflow::TensorBuffer; using tensorflow::TensorId; using tensorflow::TensorShape; using tensorflow::TensorShapeProto; -using tensorflow::ToTensorId; using tensorflow::VersionDef; using tensorflow::errors::FailedPrecondition; using tensorflow::errors::InvalidArgument; using tensorflow::gtl::ArraySlice; using tensorflow::strings::StrCat; +extern "C" { + +// -------------------------------------------------------------------------- +const char* TF_Version() { return TF_VERSION_STRING; } + +// -------------------------------------------------------------------------- + +// -------------------------------------------------------------------------- +TF_SessionOptions* TF_NewSessionOptions() { return new TF_SessionOptions; } +void TF_DeleteSessionOptions(TF_SessionOptions* opt) { delete opt; } + +void TF_SetTarget(TF_SessionOptions* options, const char* target) { + options->options.target = target; +} + +void TF_SetConfig(TF_SessionOptions* options, const void* proto, + size_t proto_len, TF_Status* status) { + if (!options->options.config.ParseFromArray(proto, proto_len)) { + status->status = InvalidArgument("Unparseable ConfigProto"); + } +} +// -------------------------------------------------------------------------- +TF_Buffer* TF_NewBuffer() { return new TF_Buffer{nullptr, 0, nullptr}; } + +TF_Buffer* TF_NewBufferFromString(const void* proto, size_t proto_len) { + void* copy = tensorflow::port::Malloc(proto_len); + memcpy(copy, proto, proto_len); + + TF_Buffer* buf = new TF_Buffer; + buf->data = copy; + buf->length = proto_len; + buf->data_deallocator = [](void* data, size_t length) { + tensorflow::port::Free(data); + }; + return buf; +} + +void TF_DeleteBuffer(TF_Buffer* buffer) { + if (buffer == nullptr) return; + if (buffer->data_deallocator != nullptr) { + (*buffer->data_deallocator)(const_cast(buffer->data), + buffer->length); + } + delete buffer; +} + +TF_Buffer TF_GetBuffer(TF_Buffer* buffer) { return *buffer; } + +// -------------------------------------------------------------------------- + +TF_DeprecatedSession* TF_NewDeprecatedSession(const TF_SessionOptions* opt, + TF_Status* status) { + Session* session; + status->status = NewSession(opt->options, &session); + if (status->status.ok()) { + return new TF_DeprecatedSession({session}); + } else { + DCHECK_EQ(nullptr, session); + return nullptr; + } +} + +void TF_CloseDeprecatedSession(TF_DeprecatedSession* s, TF_Status* status) { + status->status = s->session->Close(); +} + +void TF_DeleteDeprecatedSession(TF_DeprecatedSession* s, TF_Status* status) { + status->status = Status::OK(); + if (s == nullptr) return; + delete s->session; + delete s; +} + +void TF_ExtendGraph(TF_DeprecatedSession* s, const void* proto, + size_t proto_len, TF_Status* status) { + GraphDef g; + if (!tensorflow::ParseProtoUnlimited(&g, proto, proto_len)) { + status->status = InvalidArgument("Invalid GraphDef"); + return; + } + status->status = s->session->Extend(g); +} + +} // end extern "C" + +// Reset helper for converting character arrays to string vectors. +static void TF_Reset_Helper(const TF_SessionOptions* opt, + const char** containers, int ncontainers, + TF_Status* status) { + std::vector container_names(ncontainers); + for (int i = 0; i < ncontainers; ++i) { + container_names[i] = containers[i]; + } + + status->status = Reset(opt->options, container_names); +} + +extern "C" { + +void TF_Reset(const TF_SessionOptions* opt, const char** containers, + int ncontainers, TF_Status* status) { + TF_Reset_Helper(opt, containers, ncontainers, status); +} + +} // end extern "C" + +namespace tensorflow { + + +Status MessageToBuffer(const tensorflow::protobuf::MessageLite& in, + TF_Buffer* out) { + if (out->data != nullptr) { + return InvalidArgument("Passing non-empty TF_Buffer is invalid."); + } + const size_t proto_size = in.ByteSizeLong(); + void* buf = port::Malloc(proto_size); + if (buf == nullptr) { + return tensorflow::errors::ResourceExhausted( + "Failed to allocate memory to serialize message of type '", + in.GetTypeName(), "' and size ", proto_size); + } + if (!in.SerializeWithCachedSizesToArray(static_cast(buf))) { + port::Free(buf); + return InvalidArgument("Unable to serialize ", in.GetTypeName(), + " protocol buffer, perhaps the serialized size (", + proto_size, " bytes) is too large?"); + } + out->data = buf; + out->length = proto_size; + out->data_deallocator = [](void* data, size_t length) { port::Free(data); }; + return Status::OK(); +} + +void RecordMutation(TF_Graph* graph, const TF_Operation& op, + const char* mutation_type) { + // If any session has already run this node_id, mark this session as + // unrunnable. + for (auto it : graph->sessions) { + mutex_lock session_lock(it.first->mu); + if (it.first->last_num_graph_nodes > op.node.id()) { + it.second = strings::StrCat( + "Operation '", op.node.DebugString(), "' was changed by ", + mutation_type, + " after it was run by a session. This mutation will have no effect, " + "and will trigger an error in the future. Either don't modify " + "nodes after running them or create a new session."); + } + } +} + namespace { + +// Helper method that creates a shape handle for a shape described by dims. +tensorflow::shape_inference::ShapeHandle ShapeHandleFromDims( + tensorflow::shape_inference::InferenceContext* ic, int num_dims, + const int64_t* dims) { + if (num_dims != -1) { + std::vector dim_vec; + dim_vec.reserve(num_dims); + for (int i = 0; i < num_dims; ++i) { + dim_vec.push_back(ic->MakeDim(dims[i])); + } + return ic->MakeShape(dim_vec); + } else { + return ic->UnknownShape(); + } +} + +} // namespace + +void TF_GraphSetOutputHandleShapesAndTypes(TF_Graph* graph, TF_Output output, + int num_shapes_and_types, + const int64_t** shapes, + const int* ranks, + const TF_DataType* types, + TF_Status* status) { + Node* node = &output.oper->node; + + mutex_lock l(graph->mu); + tensorflow::shape_inference::InferenceContext* ic = + graph->refiner.GetContext(node); + if (ic == nullptr) { + status->status = + InvalidArgument("Node ", node->name(), " was not found in the graph"); + return; + } + + auto shape_and_type_vec = + std::vector( + num_shapes_and_types); + for (int i = 0; i < num_shapes_and_types; ++i) { + tensorflow::shape_inference::ShapeHandle shape_handle = + ShapeHandleFromDims(ic, ranks[i], shapes[i]); + shape_and_type_vec[i] = tensorflow::shape_inference::ShapeAndType( + shape_handle, static_cast(types[i])); + } + + ic->set_output_handle_shapes_and_types(output.index, shape_and_type_vec); +} + +// Helpers for loading a TensorFlow plugin (a .so file). +Status LoadLibrary(const char* library_filename, void** result, + const void** buf, size_t* len); + +// TODO(josh11b,mrry): Change Session to be able to use a Graph* +// directly, instead of requiring us to serialize to a GraphDef and +// call Session::Extend(). +bool ExtendSessionGraphHelper(TF_Session* session, TF_Status* status) { + if (session->graph != nullptr) { + // Take the graph lock before the session lock to avoid deadlock. This is + // safe since session->graph does not change. + session->graph->mu.lock(); + mutex_lock session_lock(session->mu); + const Graph& graph = session->graph->graph; + + const string& mutation_warning = session->graph->sessions[session]; + if (!mutation_warning.empty()) { + // TODO(b/74949947): turn this back into an error status + LOG(WARNING) << mutation_warning; + session->graph->sessions[session].clear(); + } + + const auto num_nodes = graph.num_node_ids(); + if (session->last_num_graph_nodes < num_nodes) { + // TODO(nolivia): check this on a subset of the graph instead of all of + // it. + status->status = graph::ValidateGraphHasNoCycle(session->graph->graph); + if (!status->status.ok()) { + session->graph->mu.unlock(); + return false; + } + + GraphDef graph_def; + *graph_def.mutable_versions() = graph.versions(); + // Fill graph_def with nodes with ids in the range + // [session->last_num_graph_nodes, num_nodes), that is the nodes + // added since the last TF_SessionRun() call. + for (auto id = session->last_num_graph_nodes; id < num_nodes; ++id) { + Node* const node = graph.FindNodeId(id); + if (node != nullptr && node->IsOp()) { + NodeDef* const node_def = graph_def.add_node(); + *node_def = node->def(); + } + } + *graph_def.mutable_library() = graph.flib_def().ToProto(); + session->graph->mu.unlock(); + status->status = session->session->Extend(std::move(graph_def)); + if (!status->status.ok()) { + // Contract is we always delete input_values[i]. + return false; + } + // Note: session->session is not modified if Extend() fails, so + // we only set last_num_graph_nodes if it succeeds. + session->last_num_graph_nodes = num_nodes; + } else { + session->graph->mu.unlock(); + } + } + return true; +} + +} // namespace tensorflow + +static void TF_Run_Setup(int noutputs, TF_Tensor** c_outputs, + TF_Status* status) { + status->status = Status::OK(); + for (int i = 0; i < noutputs; ++i) { + c_outputs[i] = nullptr; + } +} + +static bool TF_Run_Inputs(TF_Tensor* const* c_inputs, + std::vector>* input_pairs, + TF_Status* status) { + const int ninputs = input_pairs->size(); + for (int i = 0; i < ninputs; ++i) { + status->status = TF_TensorToTensor(c_inputs[i], &(*input_pairs)[i].second); + if (!status->status.ok()) return false; + } + return true; +} + +// Create an empty tensor of type 'dtype'. 'shape' can be arbitrary, but has to +// result in a zero-sized tensor. +static TF_Tensor* EmptyTensor(TF_DataType dtype, + const tensorflow::TensorShape& shape) { + static char empty; + tensorflow::int64 nelems = 1; + std::vector dims; + for (int i = 0; i < shape.dims(); ++i) { + dims.push_back(shape.dim_size(i)); + nelems *= shape.dim_size(i); + } + CHECK_EQ(nelems, 0); + static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), + "64-bit int types should match in size"); + return TF_NewTensor( + dtype, reinterpret_cast(dims.data()), shape.dims(), + reinterpret_cast(&empty), 0, [](void*, size_t, void*) {}, nullptr); +} + +static void TF_Run_Helper( + Session* session, const char* handle, const TF_Buffer* run_options, + // Input tensors + const std::vector>& input_pairs, + // Output tensors + const std::vector& output_tensor_names, TF_Tensor** c_outputs, + // Target nodes + const std::vector& target_oper_names, TF_Buffer* run_metadata, + TF_Status* status) { + const int noutputs = output_tensor_names.size(); + std::vector outputs(noutputs); + Status result; + + if (handle == nullptr) { + RunOptions run_options_proto; + if (run_options != nullptr && !run_options_proto.ParseFromArray( + run_options->data, run_options->length)) { + status->status = InvalidArgument("Unparseable RunOptions proto"); + return; + } + if (run_metadata != nullptr && run_metadata->data != nullptr) { + status->status = + InvalidArgument("Passing non-empty run_metadata is invalid."); + return; + } + + RunMetadata run_metadata_proto; + result = session->Run(run_options_proto, input_pairs, output_tensor_names, + target_oper_names, &outputs, &run_metadata_proto); + + // Serialize back to upstream client, who now owns the new buffer + if (run_metadata != nullptr) { + status->status = MessageToBuffer(run_metadata_proto, run_metadata); + if (!status->status.ok()) return; + } + } else { + // NOTE(zongheng): PRun does not support RunOptions yet. + result = session->PRun(handle, input_pairs, output_tensor_names, &outputs); + } + if (!result.ok()) { + status->status = result; + return; + } + + // Store results in c_outputs[] + for (int i = 0; i < noutputs; ++i) { + const Tensor& src = outputs[i]; + if (!src.IsInitialized() || src.NumElements() == 0) { + c_outputs[i] = + EmptyTensor(static_cast(src.dtype()), src.shape()); + continue; + } + c_outputs[i] = TF_TensorFromTensor(src, &status->status); + if (!status->status.ok()) return; + } +} + +extern "C" { + +void TF_Run(TF_DeprecatedSession* s, const TF_Buffer* run_options, + // Input tensors + const char** c_input_names, TF_Tensor** c_inputs, int ninputs, + // Output tensors + const char** c_output_names, TF_Tensor** c_outputs, int noutputs, + // Target nodes + const char** c_target_oper_names, int ntargets, + TF_Buffer* run_metadata, TF_Status* status) { + TF_Run_Setup(noutputs, c_outputs, status); + std::vector> input_pairs(ninputs); + if (!TF_Run_Inputs(c_inputs, &input_pairs, status)) return; + for (int i = 0; i < ninputs; ++i) { + input_pairs[i].first = c_input_names[i]; + } + std::vector output_names(noutputs); + for (int i = 0; i < noutputs; ++i) { + output_names[i] = c_output_names[i]; + } + std::vector target_oper_names(ntargets); + for (int i = 0; i < ntargets; ++i) { + target_oper_names[i] = c_target_oper_names[i]; + } + TF_Run_Helper(s->session, nullptr, run_options, input_pairs, output_names, + c_outputs, target_oper_names, run_metadata, status); +} + +void TF_PRunSetup(TF_DeprecatedSession* s, + // Input names + const char** c_input_names, int ninputs, + // Output names + const char** c_output_names, int noutputs, + // Target nodes + const char** c_target_oper_names, int ntargets, + const char** handle, TF_Status* status) { + *handle = nullptr; + + std::vector input_names(ninputs); + std::vector output_names(noutputs); + std::vector target_oper_names(ntargets); + for (int i = 0; i < ninputs; ++i) { + input_names[i] = c_input_names[i]; + } + for (int i = 0; i < noutputs; ++i) { + output_names[i] = c_output_names[i]; + } + for (int i = 0; i < ntargets; ++i) { + target_oper_names[i] = c_target_oper_names[i]; + } + string new_handle; + status->status = s->session->PRunSetup(input_names, output_names, + target_oper_names, &new_handle); + if (status->status.ok()) { + char* buf = new char[new_handle.size() + 1]; + memcpy(buf, new_handle.c_str(), new_handle.size() + 1); + *handle = buf; + } +} + +void TF_PRun(TF_DeprecatedSession* s, const char* handle, + // Input tensors + const char** c_input_names, TF_Tensor** c_inputs, int ninputs, + // Output tensors + const char** c_output_names, TF_Tensor** c_outputs, int noutputs, + // Target nodes + const char** c_target_oper_names, int ntargets, + TF_Status* status) { + TF_Run_Setup(noutputs, c_outputs, status); + std::vector> input_pairs(ninputs); + if (!TF_Run_Inputs(c_inputs, &input_pairs, status)) return; + for (int i = 0; i < ninputs; ++i) { + input_pairs[i].first = c_input_names[i]; + } + + std::vector output_names(noutputs); + for (int i = 0; i < noutputs; ++i) { + output_names[i] = c_output_names[i]; + } + std::vector target_oper_names(ntargets); + for (int i = 0; i < ntargets; ++i) { + target_oper_names[i] = c_target_oper_names[i]; + } + TF_Run_Helper(s->session, handle, nullptr, input_pairs, output_names, + c_outputs, target_oper_names, nullptr, status); +} + +TF_Library* TF_LoadLibrary(const char* library_filename, TF_Status* status) { + TF_Library* lib_handle = new TF_Library; + status->status = tensorflow::LoadLibrary( + library_filename, &lib_handle->lib_handle, &lib_handle->op_list.data, + &lib_handle->op_list.length); + if (!status->status.ok()) { + delete lib_handle; + return nullptr; + } + return lib_handle; +} + +TF_Buffer TF_GetOpList(TF_Library* lib_handle) { return lib_handle->op_list; } + +void TF_DeleteLibraryHandle(TF_Library* lib_handle) { + if (lib_handle == nullptr) return; + tensorflow::port::Free(const_cast(lib_handle->op_list.data)); + delete lib_handle; +} + +TF_Buffer* TF_GetAllOpList() { + std::vector op_defs; + tensorflow::OpRegistry::Global()->GetRegisteredOps(&op_defs); + tensorflow::OpList op_list; + for (const auto& op : op_defs) { + *(op_list.add_op()) = op; + } + TF_Buffer* ret = TF_NewBuffer(); + TF_CHECK_OK(MessageToBuffer(op_list, ret)); + return ret; +} + +// -------------------------------------------------------------------------- +// ListDevices & SessionListDevices API + +void TF_DeleteDeviceList(TF_DeviceList* list) { delete list; } + +TF_DeviceList* TF_SessionListDevices(TF_Session* session, TF_Status* status) { + TF_DeviceList* response = new TF_DeviceList; + status->status = session->session->ListDevices(&response->response); + return response; +} + +TF_DeviceList* TF_DeprecatedSessionListDevices(TF_DeprecatedSession* session, + TF_Status* status) { + TF_DeviceList* response = new TF_DeviceList; + status->status = session->session->ListDevices(&response->response); + return response; +} + +int TF_DeviceListCount(const TF_DeviceList* list) { + return list->response.size(); +} + +#define TF_DEVICELIST_METHOD(return_type, method_name, accessor, err_val) \ + return_type method_name(const TF_DeviceList* list, const int index, \ + TF_Status* status) { \ + if (list == nullptr) { \ + status->status = InvalidArgument("list is null!"); \ + return err_val; \ + } \ + if (index < 0 || index >= list->response.size()) { \ + status->status = InvalidArgument("index out of bounds"); \ + return err_val; \ + } \ + status->status = Status::OK(); \ + return list->response[index].accessor; \ + } + +TF_DEVICELIST_METHOD(const char*, TF_DeviceListName, name().c_str(), nullptr); +TF_DEVICELIST_METHOD(const char*, TF_DeviceListType, device_type().c_str(), + nullptr); +TF_DEVICELIST_METHOD(int64_t, TF_DeviceListMemoryBytes, memory_limit(), -1); +TF_DEVICELIST_METHOD(uint64_t, TF_DeviceListIncarnation, incarnation(), 0); + +#undef TF_DEVICELIST_METHOD + +} // end extern "C" + +// -------------------------------------------------------------------------- +// New Graph and Session API + +// Helper functions ----------------------------------------------------------- + +namespace { + +TF_Operation* ToOperation(Node* node) { + return static_cast(static_cast(node)); +} + +string OutputName(const TF_Output& output) { + return StrCat(output.oper->node.name(), ":", output.index); +} + +const tensorflow::AttrValue* GetAttrValue(TF_Operation* oper, + const char* attr_name, + TF_Status* status) { + const tensorflow::AttrValue* attr = oper->node.attrs().Find(attr_name); + if (attr == nullptr) { + status->status = InvalidArgument("Operation '", oper->node.name(), + "' has no attr named '", attr_name, "'."); + } + return attr; +} + +TensorId ToTensorId(const TF_Output& output) { + return TensorId(output.oper->node.name(), output.index); +} + #if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) std::vector OutputsFromTFOutputs(TF_Output* tf_outputs, int n) { @@ -126,8 +681,1134 @@ void TFOutputsFromOutputs(const std::vector& outputs, } // namespace +// Shape functions ----------------------------------------------------------- + +void TF_GraphSetTensorShape(TF_Graph* graph, TF_Output output, + const int64_t* dims, const int num_dims, + TF_Status* status) { + Node* node = &output.oper->node; + + mutex_lock l(graph->mu); + tensorflow::shape_inference::InferenceContext* ic = + graph->refiner.GetContext(node); + if (ic == nullptr) { + status->status = + InvalidArgument("Node ", node->name(), " was not found in the graph"); + return; + } + tensorflow::shape_inference::ShapeHandle new_shape = + tensorflow::ShapeHandleFromDims(ic, num_dims, dims); + status->status = graph->refiner.SetShape(node, output.index, new_shape); +} + +int TF_GraphGetTensorNumDims(TF_Graph* graph, TF_Output output, + TF_Status* status) { + Node* node = &output.oper->node; + + mutex_lock l(graph->mu); + tensorflow::shape_inference::InferenceContext* ic = + graph->refiner.GetContext(node); + if (ic == nullptr) { + status->status = + InvalidArgument("Node ", node->name(), " was not found in the graph"); + return -1; + } + + tensorflow::shape_inference::ShapeHandle shape = ic->output(output.index); + + // Unknown rank means the number of dimensions is -1. + if (!ic->RankKnown(shape)) { + return -1; + } + + return ic->Rank(shape); +} + +void TF_GraphGetTensorShape(TF_Graph* graph, TF_Output output, int64_t* dims, + int num_dims, TF_Status* status) { + Node* node = &output.oper->node; + + mutex_lock l(graph->mu); + tensorflow::shape_inference::InferenceContext* ic = + graph->refiner.GetContext(node); + if (ic == nullptr) { + status->status = + InvalidArgument("Node ", node->name(), " was not found in the graph"); + return; + } + + tensorflow::shape_inference::ShapeHandle shape = ic->output(output.index); + + int rank = -1; + if (ic->RankKnown(shape)) { + rank = ic->Rank(shape); + } + + if (num_dims != rank) { + status->status = InvalidArgument("Expected rank is ", num_dims, + " but actual rank is ", rank); + return; + } + + if (num_dims == 0) { + // Output shape is a scalar. + return; + } + + // Rank is greater than 0, so fill in the values, if known, and + // -1 for unknown values. + for (int i = 0; i < num_dims; ++i) { + auto dim = ic->Dim(shape, i); + tensorflow::int64 value = -1; + if (ic->ValueKnown(dim)) { + value = ic->Value(dim); + } + dims[i] = value; + } +} + +// TF_OperationDescription functions ------------------------------------------ + extern "C" { +static TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, + const char* op_type, + const char* oper_name) + TF_EXCLUSIVE_LOCKS_REQUIRED(graph->mu) { + return new TF_OperationDescription(graph, op_type, oper_name); +} + +TF_OperationDescription* TF_NewOperation(TF_Graph* graph, const char* op_type, + const char* oper_name) { + mutex_lock l(graph->mu); + return TF_NewOperationLocked(graph, op_type, oper_name); +} + +void TF_SetDevice(TF_OperationDescription* desc, const char* device) { + desc->node_builder.Device(device); +} + +void TF_AddInput(TF_OperationDescription* desc, TF_Output input) { + desc->node_builder.Input(&input.oper->node, input.index); +} + +void TF_AddInputList(TF_OperationDescription* desc, const TF_Output* inputs, + int num_inputs) { + std::vector input_list; + input_list.reserve(num_inputs); + for (int i = 0; i < num_inputs; ++i) { + input_list.emplace_back(&inputs[i].oper->node, inputs[i].index); + } + desc->node_builder.Input(input_list); +} + +void TF_AddControlInput(TF_OperationDescription* desc, TF_Operation* input) { + desc->node_builder.ControlInput(&input->node); +} + +void TF_ColocateWith(TF_OperationDescription* desc, TF_Operation* op) { + desc->colocation_constraints.emplace( + StrCat(tensorflow::kColocationGroupPrefix, op->node.name())); +} + +void TF_SetAttrString(TF_OperationDescription* desc, const char* attr_name, + const void* value, size_t length) { + tensorflow::StringPiece s(static_cast(value), length); + desc->node_builder.Attr(attr_name, s); +} + +void TF_SetAttrStringList(TF_OperationDescription* desc, const char* attr_name, + const void* const* values, const size_t* lengths, + int num_values) { + if (strcmp(attr_name, tensorflow::kColocationAttrName) == 0) { + desc->colocation_constraints.clear(); + for (int i = 0; i < num_values; ++i) { + desc->colocation_constraints.emplace(static_cast(values[i]), + lengths[i]); + } + } else { + std::vector v; + v.reserve(num_values); + for (int i = 0; i < num_values; ++i) { + v.emplace_back(static_cast(values[i]), lengths[i]); + } + desc->node_builder.Attr(attr_name, v); + } +} + +void TF_SetAttrInt(TF_OperationDescription* desc, const char* attr_name, + int64_t value) { + static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), + "64-bit int types should match in size"); + desc->node_builder.Attr(attr_name, static_cast(value)); +} + +void TF_SetAttrIntList(TF_OperationDescription* desc, const char* attr_name, + const int64_t* values, int num_values) { + static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), + "64-bit int types should match in size"); + desc->node_builder.Attr( + attr_name, + ArraySlice( + reinterpret_cast(values), num_values)); +} + +void TF_SetAttrFloat(TF_OperationDescription* desc, const char* attr_name, + float value) { + desc->node_builder.Attr(attr_name, value); +} + +void TF_SetAttrFloatList(TF_OperationDescription* desc, const char* attr_name, + const float* values, int num_values) { + desc->node_builder.Attr(attr_name, + ArraySlice(values, num_values)); +} + +void TF_SetAttrBool(TF_OperationDescription* desc, const char* attr_name, + unsigned char value) { + desc->node_builder.Attr(attr_name, static_cast(value)); +} + +void TF_SetAttrBoolList(TF_OperationDescription* desc, const char* attr_name, + const unsigned char* values, int num_values) { + std::unique_ptr b(new bool[num_values]); + for (int i = 0; i < num_values; ++i) { + b[i] = values[i]; + } + desc->node_builder.Attr(attr_name, + ArraySlice(b.get(), num_values)); +} + +void TF_SetAttrType(TF_OperationDescription* desc, const char* attr_name, + TF_DataType value) { + desc->node_builder.Attr(attr_name, static_cast(value)); +} + +void TF_SetAttrTypeList(TF_OperationDescription* desc, const char* attr_name, + const TF_DataType* values, int num_values) { + desc->node_builder.Attr( + attr_name, ArraySlice( + reinterpret_cast(values), num_values)); +} + +void TF_SetAttrPlaceholder(TF_OperationDescription* desc, const char* attr_name, + const char* placeholder) { + tensorflow::AttrValue attr_value; + attr_value.set_placeholder(placeholder); + desc->node_builder.Attr(attr_name, attr_value); +} + +void TF_SetAttrFuncName(TF_OperationDescription* desc, const char* attr_name, + const char* value, size_t length) { + tensorflow::NameAttrList func_name; + func_name.set_name(string(value, value + length)); + desc->node_builder.Attr(attr_name, func_name); +} + +void TF_SetAttrShape(TF_OperationDescription* desc, const char* attr_name, + const int64_t* dims, int num_dims) { + PartialTensorShape shape; + if (num_dims >= 0) { + static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), + "64-bit int types should match in size"); + shape = PartialTensorShape(ArraySlice( + reinterpret_cast(dims), num_dims)); + } + desc->node_builder.Attr(attr_name, shape); +} + +void TF_SetAttrShapeList(TF_OperationDescription* desc, const char* attr_name, + const int64_t* const* dims, const int* num_dims, + int num_shapes) { + std::vector shapes; + shapes.reserve(num_shapes); + for (int i = 0; i < num_shapes; ++i) { + if (num_dims[i] < 0) { + shapes.emplace_back(); + } else { + static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), + "64-bit int types should match in size"); + shapes.emplace_back(ArraySlice( + reinterpret_cast(dims[i]), num_dims[i])); + } + } + desc->node_builder.Attr(attr_name, shapes); +} + +void TF_SetAttrTensorShapeProto(TF_OperationDescription* desc, + const char* attr_name, const void* proto, + size_t proto_len, TF_Status* status) { + // shape.ParseFromArray takes an int as length, this function takes size_t, + // make sure there is no information loss. + if (proto_len > std::numeric_limits::max()) { + status->status = InvalidArgument( + "proto_len (", proto_len, + " bytes) is too large to be parsed by the protocol buffer library"); + return; + } + TensorShapeProto shape; + if (shape.ParseFromArray(proto, static_cast(proto_len))) { + desc->node_builder.Attr(attr_name, shape); + status->status = Status::OK(); + } else { + status->status = InvalidArgument("Unparseable TensorShapeProto"); + } +} + +void TF_SetAttrTensorShapeProtoList(TF_OperationDescription* desc, + const char* attr_name, + const void* const* protos, + const size_t* proto_lens, int num_shapes, + TF_Status* status) { + std::vector shapes; + shapes.resize(num_shapes); + for (int i = 0; i < num_shapes; ++i) { + if (proto_lens[i] > std::numeric_limits::max()) { + status->status = InvalidArgument( + "length of element ", i, " in the list (", proto_lens[i], + " bytes) is too large to be parsed by the protocol buffer library"); + return; + } + if (!shapes[i].ParseFromArray(protos[i], static_cast(proto_lens[i]))) { + status->status = + InvalidArgument("Unparseable TensorShapeProto at index ", i); + return; + } + } + desc->node_builder.Attr(attr_name, shapes); + status->status = Status::OK(); +} + +void TF_SetAttrTensor(TF_OperationDescription* desc, const char* attr_name, + TF_Tensor* value, TF_Status* status) { + Tensor t; + status->status = TF_TensorToTensor(value, &t); + if (status->status.ok()) desc->node_builder.Attr(attr_name, t); +} + +void TF_SetAttrTensorList(TF_OperationDescription* desc, const char* attr_name, + TF_Tensor* const* values, int num_values, + TF_Status* status) { + status->status = Status::OK(); + std::vector t; + t.reserve(num_values); + + for (int i = 0; i < num_values && status->status.ok(); ++i) { + Tensor v; + status->status = TF_TensorToTensor(values[i], &v); + t.emplace_back(v); + } + + if (status->status.ok()) desc->node_builder.Attr(attr_name, t); +} + +void TF_SetAttrValueProto(TF_OperationDescription* desc, const char* attr_name, + const void* proto, size_t proto_len, + TF_Status* status) { + tensorflow::AttrValue attr_value; + if (!attr_value.ParseFromArray(proto, proto_len)) { + status->status = InvalidArgument("Unparseable AttrValue proto"); + return; + } + + if (strcmp(attr_name, tensorflow::kColocationAttrName) == 0) { + if (attr_value.value_case() != tensorflow::AttrValue::kList && + attr_value.value_case() != tensorflow::AttrValue::VALUE_NOT_SET) { + status->status = + InvalidArgument("Expected \"list\" field for \"", + tensorflow::kColocationAttrName, "\" attribute"); + return; + } + desc->colocation_constraints.clear(); + for (const string& location : attr_value.list().s()) { + desc->colocation_constraints.insert(location); + } + } else { + desc->node_builder.Attr(attr_name, std::move(attr_value)); + } + + status->status = Status::OK(); +} + +static TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, + TF_Status* status) + TF_EXCLUSIVE_LOCKS_REQUIRED(desc->graph->mu) { + Node* ret = nullptr; + + if (desc->graph->name_map.count(desc->node_builder.node_name())) { + status->status = InvalidArgument("Duplicate node name in graph: '", + desc->node_builder.node_name(), "'"); + } else { + if (!desc->colocation_constraints.empty()) { + desc->node_builder.Attr( + tensorflow::kColocationAttrName, + std::vector(desc->colocation_constraints.begin(), + desc->colocation_constraints.end())); + } + status->status = desc->node_builder.Finalize(&desc->graph->graph, &ret, + /*consume=*/true); + + if (status->status.ok()) { + // Run shape inference function for newly added node. + status->status = desc->graph->refiner.AddNode(ret); + } + if (status->status.ok()) { + // Add the node to the name-to-node mapping. + desc->graph->name_map[ret->name()] = ret; + } else if (ret != nullptr) { + desc->graph->graph.RemoveNode(ret); + ret = nullptr; + } + } + + delete desc; + + return ToOperation(ret); +} + +TF_Operation* TF_FinishOperation(TF_OperationDescription* desc, + TF_Status* status) { + mutex_lock l(desc->graph->mu); + return TF_FinishOperationLocked(desc, status); +} + +// TF_Operation functions +// ---------------------------------------------------------- + +const char* TF_OperationName(TF_Operation* oper) { + return oper->node.name().c_str(); +} + +const char* TF_OperationOpType(TF_Operation* oper) { + return oper->node.type_string().c_str(); +} + +const char* TF_OperationDevice(TF_Operation* oper) { + return oper->node.requested_device().c_str(); +} + +int TF_OperationNumOutputs(TF_Operation* oper) { + return oper->node.num_outputs(); +} + +TF_DataType TF_OperationOutputType(TF_Output oper_out) { + return static_cast( + oper_out.oper->node.output_type(oper_out.index)); +} + +int TF_OperationOutputListLength(TF_Operation* oper, const char* arg_name, + TF_Status* status) { + NameRangeMap name_ranges; + status->status = + NameRangesForNode(oper->node, oper->node.op_def(), nullptr, &name_ranges); + if (!status->status.ok()) return -1; + auto iter = name_ranges.find(arg_name); + if (iter == name_ranges.end()) { + status->status = InvalidArgument("Output arg '", arg_name, "' not found"); + return -1; + } + return iter->second.second - iter->second.first; +} + +int TF_OperationNumInputs(TF_Operation* oper) { + return oper->node.num_inputs(); +} + +TF_DataType TF_OperationInputType(TF_Input oper_in) { + return static_cast(oper_in.oper->node.input_type(oper_in.index)); +} + +int TF_OperationInputListLength(TF_Operation* oper, const char* arg_name, + TF_Status* status) { + NameRangeMap name_ranges; + status->status = + NameRangesForNode(oper->node, oper->node.op_def(), &name_ranges, nullptr); + if (!status->status.ok()) return -1; + auto iter = name_ranges.find(arg_name); + if (iter == name_ranges.end()) { + status->status = InvalidArgument("Input arg '", arg_name, "' not found"); + return -1; + } + return iter->second.second - iter->second.first; +} + +TF_Output TF_OperationInput(TF_Input oper_in) { + const tensorflow::Edge* edge; + Status s = oper_in.oper->node.input_edge(oper_in.index, &edge); + if (!s.ok()) { + return {nullptr, -1}; + } + + return {ToOperation(edge->src()), edge->src_output()}; +} + +void TF_OperationAllInputs(TF_Operation* oper, TF_Output* inputs, + int max_inputs) { + for (auto* edge : oper->node.in_edges()) { + if (edge->dst_input() >= 0 && edge->dst_input() < max_inputs) { + inputs[edge->dst_input()] = {ToOperation(edge->src()), + edge->src_output()}; + } + } +} + +int TF_OperationOutputNumConsumers(TF_Output oper_out) { + int count = 0; + for (const auto* edge : oper_out.oper->node.out_edges()) { + if (edge->src_output() == oper_out.index) { + ++count; + } + } + return count; +} + +int TF_OperationOutputConsumers(TF_Output oper_out, TF_Input* consumers, + int max_consumers) { + int count = 0; + for (const auto* edge : oper_out.oper->node.out_edges()) { + if (edge->src_output() == oper_out.index) { + if (count < max_consumers) { + consumers[count] = {ToOperation(edge->dst()), edge->dst_input()}; + } + ++count; + } + } + return count; +} + +int TF_OperationNumControlInputs(TF_Operation* oper) { + int count = 0; + for (const auto* edge : oper->node.in_edges()) { + if (edge->IsControlEdge() && !edge->src()->IsSource()) { + ++count; + } + } + return count; +} + +int TF_OperationGetControlInputs(TF_Operation* oper, + TF_Operation** control_inputs, + int max_control_inputs) { + int count = 0; + for (const auto* edge : oper->node.in_edges()) { + if (edge->IsControlEdge() && !edge->src()->IsSource()) { + if (count < max_control_inputs) { + control_inputs[count] = ToOperation(edge->src()); + } + ++count; + } + } + return count; +} + +int TF_OperationNumControlOutputs(TF_Operation* oper) { + int count = 0; + for (const auto* edge : oper->node.out_edges()) { + if (edge->IsControlEdge() && !edge->dst()->IsSink()) { + ++count; + } + } + return count; +} + +int TF_OperationGetControlOutputs(TF_Operation* oper, + TF_Operation** control_outputs, + int max_control_outputs) { + int count = 0; + for (const auto* edge : oper->node.out_edges()) { + if (edge->IsControlEdge() && !edge->dst()->IsSink()) { + if (count < max_control_outputs) { + control_outputs[count] = ToOperation(edge->dst()); + } + ++count; + } + } + return count; +} + +TF_AttrMetadata TF_OperationGetAttrMetadata(TF_Operation* oper, + const char* attr_name, + TF_Status* status) { + TF_AttrMetadata metadata; + const auto* attr = GetAttrValue(oper, attr_name, status); + if (!status->status.ok()) return metadata; + switch (attr->value_case()) { +#define SINGLE_CASE(kK, attr_type, size_expr) \ + case tensorflow::AttrValue::kK: \ + metadata.is_list = 0; \ + metadata.list_size = -1; \ + metadata.type = attr_type; \ + metadata.total_size = size_expr; \ + break; + + SINGLE_CASE(kS, TF_ATTR_STRING, attr->s().length()); + SINGLE_CASE(kI, TF_ATTR_INT, -1); + SINGLE_CASE(kF, TF_ATTR_FLOAT, -1); + SINGLE_CASE(kB, TF_ATTR_BOOL, -1); + SINGLE_CASE(kType, TF_ATTR_TYPE, -1); + SINGLE_CASE(kShape, TF_ATTR_SHAPE, + attr->shape().unknown_rank() ? -1 : attr->shape().dim_size()); + SINGLE_CASE(kTensor, TF_ATTR_TENSOR, -1); +#undef SINGLE_CASE + + case tensorflow::AttrValue::kList: + metadata.is_list = 1; + metadata.list_size = 0; + metadata.total_size = -1; +#define LIST_CASE(field, attr_type, ...) \ + if (attr->list().field##_size() > 0) { \ + metadata.type = attr_type; \ + metadata.list_size = attr->list().field##_size(); \ + __VA_ARGS__; \ + break; \ + } + + LIST_CASE( + s, TF_ATTR_STRING, metadata.total_size = 0; + for (int i = 0; i < attr->list().s_size(); + ++i) { metadata.total_size += attr->list().s(i).size(); }); + LIST_CASE(i, TF_ATTR_INT); + LIST_CASE(f, TF_ATTR_FLOAT); + LIST_CASE(b, TF_ATTR_BOOL); + LIST_CASE(type, TF_ATTR_TYPE); + LIST_CASE( + shape, TF_ATTR_SHAPE, metadata.total_size = 0; + for (int i = 0; i < attr->list().shape_size(); ++i) { + const auto& s = attr->list().shape(i); + metadata.total_size += s.unknown_rank() ? 0 : s.dim_size(); + }); + LIST_CASE(tensor, TF_ATTR_TENSOR); + LIST_CASE(tensor, TF_ATTR_FUNC); +#undef LIST_CASE + // All lists empty, determine the type from the OpDef. + if (metadata.list_size == 0) { + for (int i = 0; i < oper->node.op_def().attr_size(); ++i) { + const auto& a = oper->node.op_def().attr(i); + if (a.name() != attr_name) continue; + const string& typestr = a.type(); + if (typestr == "list(string)") { + metadata.type = TF_ATTR_STRING; + } else if (typestr == "list(int)") { + metadata.type = TF_ATTR_INT; + } else if (typestr == "list(float)") { + metadata.type = TF_ATTR_FLOAT; + } else if (typestr == "list(bool)") { + metadata.type = TF_ATTR_BOOL; + } else if (typestr == "list(type)") { + metadata.type = TF_ATTR_TYPE; + } else if (typestr == "list(shape)") { + metadata.type = TF_ATTR_SHAPE; + } else if (typestr == "list(tensor)") { + metadata.type = TF_ATTR_TENSOR; + } else if (typestr == "list(func)") { + metadata.type = TF_ATTR_FUNC; + } else { + status->status = InvalidArgument( + "Attribute '", attr_name, + "' has an empty value of an unrecognized type '", typestr, "'"); + return metadata; + } + } + } + break; + + case tensorflow::AttrValue::kPlaceholder: + metadata.is_list = 0; + metadata.list_size = -1; + metadata.type = TF_ATTR_PLACEHOLDER; + metadata.total_size = -1; + break; + + case tensorflow::AttrValue::kFunc: + metadata.is_list = 0; + metadata.list_size = -1; + metadata.type = TF_ATTR_FUNC; + metadata.total_size = -1; + break; + + case tensorflow::AttrValue::VALUE_NOT_SET: + status->status = + InvalidArgument("Attribute '", attr_name, "' has no value set"); + break; + } + return metadata; +} + +void TF_OperationGetAttrString(TF_Operation* oper, const char* attr_name, + void* value, size_t max_length, + TF_Status* status) { + const auto* attr = GetAttrValue(oper, attr_name, status); + if (!status->status.ok()) return; + if (attr->value_case() != tensorflow::AttrValue::kS) { + status->status = + InvalidArgument("Attribute '", attr_name, "' is not a string"); + return; + } + if (max_length <= 0) { + return; + } + const auto& s = attr->s(); + std::memcpy(value, s.data(), std::min(s.length(), max_length)); +} + +void TF_OperationGetAttrStringList(TF_Operation* oper, const char* attr_name, + void** values, size_t* lengths, + int max_values, void* storage, + size_t storage_size, TF_Status* status) { + const auto* attr = GetAttrValue(oper, attr_name, status); + if (!status->status.ok()) return; + if (attr->value_case() != tensorflow::AttrValue::kList) { + status->status = + InvalidArgument("Value for '", attr_name, "' is not a list"); + return; + } + const auto len = std::min(max_values, attr->list().s_size()); + char* p = static_cast(storage); + for (int i = 0; i < len; ++i) { + const string& s = attr->list().s(i); + values[i] = p; + lengths[i] = s.size(); + if ((p + s.size()) > (static_cast(storage) + storage_size)) { + status->status = InvalidArgument( + "Not enough storage to hold the requested list of strings"); + return; + } + memcpy(values[i], s.data(), s.size()); + p += s.size(); + } +} + +#define DEFINE_GETATTR(func, c_type, cpp_type, list_field) \ + void func(TF_Operation* oper, const char* attr_name, c_type* value, \ + TF_Status* status) { \ + cpp_type v; \ + status->status = \ + tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &v); \ + *value = static_cast(v); \ + } \ + void func##List(TF_Operation* oper, const char* attr_name, c_type* values, \ + int max_values, TF_Status* status) { \ + const auto* attr = GetAttrValue(oper, attr_name, status); \ + if (!status->status.ok()) return; \ + if (attr->value_case() != tensorflow::AttrValue::kList) { \ + status->status = \ + InvalidArgument("Value for '", attr_name, "' is not a list."); \ + return; \ + } \ + const auto len = std::min(max_values, attr->list().list_field##_size()); \ + for (int i = 0; i < len; ++i) { \ + values[i] = static_cast(attr->list().list_field(i)); \ + } \ + } +DEFINE_GETATTR(TF_OperationGetAttrInt, int64_t, tensorflow::int64, i); +DEFINE_GETATTR(TF_OperationGetAttrFloat, float, float, f); +DEFINE_GETATTR(TF_OperationGetAttrBool, unsigned char, bool, b); +DEFINE_GETATTR(TF_OperationGetAttrType, TF_DataType, DataType, type); +#undef DEFINE_GETATTR + +void TF_OperationGetAttrShape(TF_Operation* oper, const char* attr_name, + int64_t* value, int num_dims, TF_Status* status) { + PartialTensorShape shape; + status->status = + tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &shape); + if (!status->status.ok()) return; + auto len = std::min(shape.dims(), num_dims); + for (int i = 0; i < len; ++i) { + value[i] = shape.dim_size(i); + } +} + +void TF_OperationGetAttrShapeList(TF_Operation* oper, const char* attr_name, + int64_t** dims, int* num_dims, int num_shapes, + int64_t* storage, int storage_size, + TF_Status* status) { + std::vector shapes; + status->status = + tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &shapes); + if (!status->status.ok()) return; + auto len = std::min(static_cast(shapes.size()), num_shapes); + int64_t* p = storage; + int storage_left = storage_size; + for (int i = 0; i < len; ++i) { + // shapes[i].dims() == -1 for shapes with an unknown rank. + int64_t n = shapes[i].dims(); + num_dims[i] = n; + dims[i] = p; + if (n < 0) { + continue; + } + if (storage_left < n) { + status->status = InvalidArgument( + "Not enough storage to hold the requested list of shapes"); + return; + } + storage_left -= n; + for (int j = 0; j < n; ++j, ++p) { + *p = shapes[i].dim_size(j); + } + } +} + +void TF_OperationGetAttrTensorShapeProto(TF_Operation* oper, + const char* attr_name, + TF_Buffer* value, TF_Status* status) { + const auto* attr = GetAttrValue(oper, attr_name, status); + if (!status->status.ok()) return; + if (attr->value_case() != tensorflow::AttrValue::kShape) { + status->status = + InvalidArgument("Value for '", attr_name, "' is not a shape."); + return; + } + status->status = MessageToBuffer(attr->shape(), value); +} + +void TF_OperationGetAttrTensorShapeProtoList(TF_Operation* oper, + const char* attr_name, + TF_Buffer** values, int max_values, + TF_Status* status) { + const auto* attr = GetAttrValue(oper, attr_name, status); + if (!status->status.ok()) return; + if (attr->value_case() != tensorflow::AttrValue::kList) { + status->status = + InvalidArgument("Value for '", attr_name, "' is not a list"); + return; + } + const auto len = std::min(max_values, attr->list().shape_size()); + for (int i = 0; i < len; ++i) { + values[i] = TF_NewBuffer(); + status->status = MessageToBuffer(attr->list().shape(i), values[i]); + if (!status->status.ok()) { + // Delete everything allocated to far, the operation has failed. + for (int j = 0; j <= i; ++j) { + TF_DeleteBuffer(values[j]); + } + return; + } + } +} + +void TF_OperationGetAttrTensor(TF_Operation* oper, const char* attr_name, + TF_Tensor** value, TF_Status* status) { + *value = nullptr; + Tensor t; + status->status = tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &t); + if (!status->status.ok()) return; + *value = TF_TensorFromTensor(t, &status->status); +} + +void TF_OperationGetAttrTensorList(TF_Operation* oper, const char* attr_name, + TF_Tensor** values, int max_values, + TF_Status* status) { + std::vector ts; + status->status = tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &ts); + if (!status->status.ok()) return; + const auto len = std::min(max_values, static_cast(ts.size())); + for (int i = 0; i < len; ++i) { + values[i] = TF_TensorFromTensor(ts[i], &status->status); + } +} + +void TF_OperationGetAttrValueProto(TF_Operation* oper, const char* attr_name, + TF_Buffer* output_attr_value, + TF_Status* status) { + const auto* attr = GetAttrValue(oper, attr_name, status); + if (!status->status.ok()) return; + status->status = MessageToBuffer(*attr, output_attr_value); +} + +void TF_OperationToNodeDef(TF_Operation* oper, TF_Buffer* output_node_def, + TF_Status* status) { + status->status = MessageToBuffer(oper->node.def(), output_node_def); +} + +// TF_Graph functions --------------------------------------------------------- + +TF_Graph::TF_Graph() + : graph(tensorflow::OpRegistry::Global()), + refiner(graph.versions().producer(), graph.op_registry()), + delete_requested(false), + parent(nullptr), + parent_inputs(nullptr) { + // Tell the shape refiner to also run shape inference on functions. + refiner.set_function_library_for_shape_inference(&graph.flib_def()); +} + +TF_Graph* TF_NewGraph() { return new TF_Graph; } + +void TF_DeleteGraph(TF_Graph* g) { + if (g == nullptr) return; + g->mu.lock(); + g->delete_requested = true; + const bool del = g->sessions.empty(); + g->mu.unlock(); + if (del) delete g; +} + +TF_Operation* TF_GraphOperationByName(TF_Graph* graph, const char* oper_name) { + mutex_lock l(graph->mu); + auto iter = graph->name_map.find(oper_name); + if (iter == graph->name_map.end()) { + return nullptr; + } else { + return ToOperation(iter->second); + } +} + +TF_Operation* TF_GraphNextOperation(TF_Graph* graph, size_t* pos) { + if (*pos == 0) { + // Advance past the first sentinel nodes in every graph (the source & sink). + *pos += 2; + } else { + // Advance to the next node. + *pos += 1; + } + + mutex_lock l(graph->mu); + while (*pos < static_cast(graph->graph.num_node_ids())) { + Node* node = graph->graph.FindNodeId(*pos); + // FindNodeId() returns nullptr for nodes that have been deleted. + // We aren't currently allowing nodes to be deleted, but it is safer + // to still check. + if (node != nullptr) return ToOperation(node); + *pos += 1; + } + + // No more nodes. + return nullptr; +} + +void TF_GraphToGraphDef(TF_Graph* graph, TF_Buffer* output_graph_def, + TF_Status* status) { + GraphDef def; + { + mutex_lock l(graph->mu); + graph->graph.ToGraphDef(&def); + } + status->status = MessageToBuffer(def, output_graph_def); +} + +void TF_GraphGetOpDef(TF_Graph* graph, const char* op_name, + TF_Buffer* output_op_def, TF_Status* status) { + const OpDef* op_def; + { + mutex_lock l(graph->mu); + status->status = graph->graph.op_registry()->LookUpOpDef(op_name, &op_def); + if (!status->status.ok()) return; + } + status->status = MessageToBuffer(*op_def, output_op_def); +} + +void TF_GraphVersions(TF_Graph* graph, TF_Buffer* output_version_def, + TF_Status* status) { + VersionDef versions; + { + mutex_lock l(graph->mu); + versions = graph->graph.versions(); + } + status->status = MessageToBuffer(versions, output_version_def); +} + +TF_ImportGraphDefOptions* TF_NewImportGraphDefOptions() { + return new TF_ImportGraphDefOptions; +} +void TF_DeleteImportGraphDefOptions(TF_ImportGraphDefOptions* opts) { + delete opts; +} +void TF_ImportGraphDefOptionsSetPrefix(TF_ImportGraphDefOptions* opts, + const char* prefix) { + opts->opts.prefix = prefix; +} +void TF_ImportGraphDefOptionsSetDefaultDevice(TF_ImportGraphDefOptions* opts, + const char* device) { + opts->opts.default_device = device; +} + +void TF_ImportGraphDefOptionsSetUniquifyNames(TF_ImportGraphDefOptions* opts, + unsigned char uniquify_names) { + opts->opts.uniquify_names = uniquify_names; +} + +void TF_ImportGraphDefOptionsSetUniquifyPrefix(TF_ImportGraphDefOptions* opts, + unsigned char uniquify_prefix) { + opts->opts.uniquify_prefix = uniquify_prefix; +} + +void TF_ImportGraphDefOptionsAddInputMapping(TF_ImportGraphDefOptions* opts, + const char* src_name, + int src_index, TF_Output dst) { + opts->tensor_id_data.push_back(src_name); + const string& src_name_str = opts->tensor_id_data.back(); + // We don't need to store dst's name in tensor_id_data, since `dst` must + // outlive the ImportGraphDef call. + opts->opts.input_map[TensorId(src_name_str, src_index)] = ToTensorId(dst); +} + +void TF_ImportGraphDefOptionsRemapControlDependency( + TF_ImportGraphDefOptions* opts, const char* src_name, TF_Operation* dst) { + opts->opts.input_map[TensorId(src_name, tensorflow::Graph::kControlSlot)] = + TensorId(dst->node.name(), tensorflow::Graph::kControlSlot); +} + +extern void TF_ImportGraphDefOptionsAddControlDependency( + TF_ImportGraphDefOptions* opts, TF_Operation* oper) { + opts->opts.control_dependencies.push_back(oper->node.name()); +} + +void TF_ImportGraphDefOptionsAddReturnOutput(TF_ImportGraphDefOptions* opts, + const char* oper_name, int index) { + opts->tensor_id_data.push_back(oper_name); + const string& oper_name_str = opts->tensor_id_data.back(); + opts->opts.return_tensors.emplace_back(oper_name_str, index); +} + +int TF_ImportGraphDefOptionsNumReturnOutputs( + const TF_ImportGraphDefOptions* opts) { + return opts->opts.return_tensors.size(); +} + +void TF_ImportGraphDefOptionsAddReturnOperation(TF_ImportGraphDefOptions* opts, + const char* oper_name) { + opts->opts.return_nodes.push_back(oper_name); +} + +int TF_ImportGraphDefOptionsNumReturnOperations( + const TF_ImportGraphDefOptions* opts) { + return opts->opts.return_nodes.size(); +} + +void TF_ImportGraphDefResultsReturnOutputs(TF_ImportGraphDefResults* results, + int* num_outputs, + TF_Output** outputs) { + *num_outputs = results->return_tensors.size(); + *outputs = results->return_tensors.data(); +} + +void TF_ImportGraphDefResultsReturnOperations(TF_ImportGraphDefResults* results, + int* num_opers, + TF_Operation*** opers) { + *num_opers = results->return_nodes.size(); + *opers = results->return_nodes.data(); +} + +void TF_ImportGraphDefResultsMissingUnusedInputMappings( + TF_ImportGraphDefResults* results, int* num_missing_unused_input_mappings, + const char*** src_names, int** src_indexes) { + *num_missing_unused_input_mappings = results->missing_unused_key_names.size(); + *src_names = results->missing_unused_key_names.data(); + *src_indexes = results->missing_unused_key_indexes.data(); +} + +void TF_DeleteImportGraphDefResults(TF_ImportGraphDefResults* results) { + delete results; +} + +static void GraphImportGraphDefLocked(TF_Graph* graph, const GraphDef& def, + const TF_ImportGraphDefOptions* opts, + TF_ImportGraphDefResults* tf_results, + TF_Status* status) + TF_EXCLUSIVE_LOCKS_REQUIRED(graph->mu) { + const int last_node_id = graph->graph.num_node_ids(); + tensorflow::ImportGraphDefResults results; + status->status = tensorflow::ImportGraphDef(opts->opts, def, &graph->graph, + &graph->refiner, &results); + if (!status->status.ok()) return; + + // Add new nodes to name_map + for (int i = last_node_id; i < graph->graph.num_node_ids(); ++i) { + auto* node = graph->graph.FindNodeId(i); + if (node != nullptr) graph->name_map[node->name()] = node; + } + + // Populate return_tensors + DCHECK(tf_results->return_tensors.empty()); + tf_results->return_tensors.resize(results.return_tensors.size()); + for (int i = 0; i < results.return_tensors.size(); ++i) { + tf_results->return_tensors[i].oper = + ToOperation(results.return_tensors[i].first); + tf_results->return_tensors[i].index = results.return_tensors[i].second; + } + + // Populate return_nodes + DCHECK(tf_results->return_nodes.empty()); + tf_results->return_nodes.resize(results.return_nodes.size()); + for (int i = 0; i < results.return_nodes.size(); ++i) { + tf_results->return_nodes[i] = ToOperation(results.return_nodes[i]); + } + + // Populate missing unused map keys + DCHECK(tf_results->missing_unused_key_names.empty()); + DCHECK(tf_results->missing_unused_key_indexes.empty()); + DCHECK(tf_results->missing_unused_key_names_data.empty()); + + size_t size = results.missing_unused_input_map_keys.size(); + tf_results->missing_unused_key_names.resize(size); + tf_results->missing_unused_key_indexes.resize(size); + + for (int i = 0; i < size; ++i) { + TensorId id = results.missing_unused_input_map_keys[i]; + tf_results->missing_unused_key_names_data.emplace_back(id.first); + tf_results->missing_unused_key_names[i] = + tf_results->missing_unused_key_names_data.back().c_str(); + tf_results->missing_unused_key_indexes[i] = id.second; + } +} + +TF_ImportGraphDefResults* TF_GraphImportGraphDefWithResults( + TF_Graph* graph, const TF_Buffer* graph_def, + const TF_ImportGraphDefOptions* options, TF_Status* status) { + GraphDef def; + if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data, + graph_def->length)) { + status->status = InvalidArgument("Invalid GraphDef"); + return nullptr; + } + auto results = new TF_ImportGraphDefResults(); + mutex_lock l(graph->mu); + GraphImportGraphDefLocked(graph, def, options, results, status); + if (!status->status.ok()) { + delete results; + return nullptr; + } + return results; +} + +void TF_GraphImportGraphDefWithReturnOutputs( + TF_Graph* graph, const TF_Buffer* graph_def, + const TF_ImportGraphDefOptions* options, TF_Output* return_outputs, + int num_return_outputs, TF_Status* status) { + if (num_return_outputs != options->opts.return_tensors.size()) { + status->status = InvalidArgument("Expected 'num_return_outputs' to be ", + options->opts.return_tensors.size(), + ", got ", num_return_outputs); + return; + } + if (num_return_outputs > 0 && return_outputs == nullptr) { + status->status = InvalidArgument( + "'return_outputs' must be preallocated to length ", num_return_outputs); + return; + } + GraphDef def; + if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data, + graph_def->length)) { + status->status = InvalidArgument("Invalid GraphDef"); + return; + } + TF_ImportGraphDefResults results; + mutex_lock l(graph->mu); + GraphImportGraphDefLocked(graph, def, options, &results, status); + DCHECK_EQ(results.return_tensors.size(), num_return_outputs); + memcpy(return_outputs, results.return_tensors.data(), + num_return_outputs * sizeof(TF_Output)); +} + +void TF_GraphImportGraphDef(TF_Graph* graph, const TF_Buffer* graph_def, + const TF_ImportGraphDefOptions* options, + TF_Status* status) { + TF_ImportGraphDefResults* results = + TF_GraphImportGraphDefWithResults(graph, graph_def, options, status); + TF_DeleteImportGraphDefResults(results); +} + // While loop functions ------------------------------------------------------- namespace { @@ -480,4 +2161,404 @@ void TF_AddGradientsWithPrefix(TF_Graph* g, const char* prefix, TF_Output* y, #endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) } +// TF_Session functions ---------------------------------------------- + +TF_Session::TF_Session(tensorflow::Session* s, TF_Graph* g) + : session(s), graph(g), last_num_graph_nodes(0), extend_before_run(true) {} + +TF_Session* TF_NewSession(TF_Graph* graph, const TF_SessionOptions* opt, + TF_Status* status) { + Session* session; + status->status = NewSession(opt->options, &session); + if (status->status.ok()) { + TF_Session* new_session = new TF_Session(session, graph); + if (graph != nullptr) { + mutex_lock l(graph->mu); + graph->sessions[new_session] = ""; + } + return new_session; + } else { + DCHECK_EQ(nullptr, session); + return nullptr; + } +} + +TF_Session* TF_LoadSessionFromSavedModel( + const TF_SessionOptions* session_options, const TF_Buffer* run_options, + const char* export_dir, const char* const* tags, int tags_len, + TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status) { +// TODO(sjr): Remove the IS_MOBILE_PLATFORM guard. This will require ensuring +// that the tensorflow/cc/saved_model:loader build target is mobile friendly. +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + status->status = tensorflow::errors::Unimplemented( + "Loading a SavedModel is not supported on mobile. File a bug at " + "https://github.com/tensorflow/tensorflow/issues if this feature is " + "important to you"); + return nullptr; +#else + mutex_lock l(graph->mu); + if (!graph->name_map.empty()) { + status->status = InvalidArgument("Graph is non-empty."); + return nullptr; + } + + RunOptions run_options_proto; + if (run_options != nullptr && !run_options_proto.ParseFromArray( + run_options->data, run_options->length)) { + status->status = InvalidArgument("Unparseable RunOptions proto"); + return nullptr; + } + + std::unordered_set tag_set; + for (int i = 0; i < tags_len; i++) { + tag_set.insert(string(tags[i])); + } + + tensorflow::SavedModelBundle bundle; + status->status = + tensorflow::LoadSavedModel(session_options->options, run_options_proto, + export_dir, tag_set, &bundle); + if (!status->status.ok()) return nullptr; + + // Create a TF_Graph from the MetaGraphDef. This is safe as long as Session + // extends using GraphDefs. The Graph instance is different, but equivalent + // to the one used to create the session. + // + // TODO(jhseu): When Session is modified to take Graphs instead of + // GraphDefs, return the Graph generated in LoadSavedModel(). + TF_ImportGraphDefOptions* import_opts = TF_NewImportGraphDefOptions(); + TF_ImportGraphDefResults results; + GraphImportGraphDefLocked(graph, bundle.meta_graph_def.graph_def(), + import_opts, &results, status); + TF_DeleteImportGraphDefOptions(import_opts); + if (!status->status.ok()) return nullptr; + + if (meta_graph_def != nullptr) { + status->status = MessageToBuffer(bundle.meta_graph_def, meta_graph_def); + if (!status->status.ok()) return nullptr; + } + + TF_Session* session = new TF_Session(bundle.session.release(), graph); + + graph->sessions[session] = ""; + session->last_num_graph_nodes = graph->graph.num_node_ids(); + return session; +#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) +} + +void TF_CloseSession(TF_Session* s, TF_Status* status) { + status->status = s->session->Close(); +} + +void TF_DeleteSession(TF_Session* s, TF_Status* status) { + status->status = Status::OK(); + if (s == nullptr) return; + TF_Graph* const graph = s->graph; + if (graph != nullptr) { + graph->mu.lock(); + graph->sessions.erase(s); + const bool del = graph->delete_requested && graph->sessions.empty(); + graph->mu.unlock(); + if (del) delete graph; + } + delete s->session; + delete s; +} + +void TF_SessionRun(TF_Session* session, const TF_Buffer* run_options, + const TF_Output* inputs, TF_Tensor* const* input_values, + int ninputs, const TF_Output* outputs, + TF_Tensor** output_values, int noutputs, + const TF_Operation* const* target_opers, int ntargets, + TF_Buffer* run_metadata, TF_Status* status) { + // TODO(josh11b,mrry): Change Session to be able to use a Graph* + // directly, instead of requiring us to serialize to a GraphDef and + // call Session::Extend(). + if (session->extend_before_run && + !ExtendSessionGraphHelper(session, status)) { + return; + } + + TF_Run_Setup(noutputs, output_values, status); + + // Convert from TF_Output and TF_Tensor to a string and Tensor. + std::vector> input_pairs(ninputs); + if (!TF_Run_Inputs(input_values, &input_pairs, status)) return; + for (int i = 0; i < ninputs; ++i) { + input_pairs[i].first = OutputName(inputs[i]); + } + + // Convert from TF_Output to string names. + std::vector output_names(noutputs); + for (int i = 0; i < noutputs; ++i) { + output_names[i] = OutputName(outputs[i]); + } + + // Convert from TF_Operation* to string names. + std::vector target_names(ntargets); + for (int i = 0; i < ntargets; ++i) { + target_names[i] = target_opers[i]->node.name(); + } + + // Actually run. + TF_Run_Helper(session->session, nullptr, run_options, input_pairs, + output_names, output_values, target_names, run_metadata, + status); +} + +void TF_SessionPRunSetup(TF_Session* session, const TF_Output* inputs, + int ninputs, const TF_Output* outputs, int noutputs, + const TF_Operation* const* target_opers, int ntargets, + const char** handle, TF_Status* status) { + *handle = nullptr; + + if (session->extend_before_run && + !ExtendSessionGraphHelper(session, status)) { + return; + } + + std::vector input_names(ninputs); + for (int i = 0; i < ninputs; ++i) { + input_names[i] = OutputName(inputs[i]); + } + + std::vector output_names(noutputs); + for (int i = 0; i < noutputs; ++i) { + output_names[i] = OutputName(outputs[i]); + } + + std::vector target_names(ntargets); + for (int i = 0; i < ntargets; ++i) { + target_names[i] = target_opers[i]->node.name(); + } + + string new_handle; + status->status = session->session->PRunSetup(input_names, output_names, + target_names, &new_handle); + if (status->status.ok()) { + char* buf = new char[new_handle.size() + 1]; + memcpy(buf, new_handle.c_str(), new_handle.size() + 1); + *handle = buf; + } +} + +void TF_DeletePRunHandle(const char* handle) { + delete[] handle; + // TODO(suharshs): Free up any resources held by the partial run state. +} + +void TF_SessionPRun(TF_Session* session, const char* handle, + const TF_Output* inputs, TF_Tensor* const* input_values, + int ninputs, const TF_Output* outputs, + TF_Tensor** output_values, int noutputs, + const TF_Operation* const* target_opers, int ntargets, + TF_Status* status) { + // TODO(josh11b,mrry): Change Session to be able to use a Graph* + // directly, instead of requiring us to serialize to a GraphDef and + // call Session::Extend(). + if (session->extend_before_run && + !ExtendSessionGraphHelper(session, status)) { + return; + } + + TF_Run_Setup(noutputs, output_values, status); + + // Convert from TF_Output and TF_Tensor to a string and Tensor. + std::vector> input_pairs(ninputs); + if (!TF_Run_Inputs(input_values, &input_pairs, status)) return; + for (int i = 0; i < ninputs; ++i) { + input_pairs[i].first = OutputName(inputs[i]); + } + + // Convert from TF_Output to string names. + std::vector output_names(noutputs); + for (int i = 0; i < noutputs; ++i) { + output_names[i] = OutputName(outputs[i]); + } + + // Convert from TF_Operation* to string names. + std::vector target_names(ntargets); + for (int i = 0; i < ntargets; ++i) { + target_names[i] = target_opers[i]->node.name(); + } + + TF_Run_Helper(session->session, handle, nullptr, input_pairs, output_names, + output_values, target_names, nullptr, status); +} + +unsigned char TF_TryEvaluateConstant(TF_Graph* graph, TF_Output output, + TF_Tensor** result, TF_Status* status) { + *result = nullptr; + mutex_lock l(graph->mu); + OutputTensor tensor(&output.oper->node, output.index); + bool evaluated; + Tensor result_tensor; + status->status = EvaluateConstantTensor( + tensor, graph->refiner, *graph->graph.op_registry(), + graph->graph.versions().producer(), &evaluated, &result_tensor); + if (evaluated) { + DCHECK(status->status.ok()); + *result = TF_TensorFromTensor(result_tensor, &status->status); + if (!status->status.ok()) evaluated = false; + } + return evaluated; +} + +TF_ApiDefMap* TF_NewApiDefMap(TF_Buffer* op_list_buffer, TF_Status* status) { + tensorflow::OpList op_list; + if (!op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length)) { + status->status = InvalidArgument("Unparseable OpList"); + return nullptr; + } + status->status = Status::OK(); + return new TF_ApiDefMap(op_list); +} + +void TF_DeleteApiDefMap(TF_ApiDefMap* apimap) { delete apimap; } + +void TF_ApiDefMapPut(TF_ApiDefMap* api_def_map, const char* text, + size_t text_len, TF_Status* status) { +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + status->status = tensorflow::errors::Unimplemented( + "ApiDefMap is not supported on mobile."); +#else + mutex_lock l(api_def_map->lock); + if (api_def_map->update_docs_called) { + status->status = FailedPrecondition( + "TF_ApiDefMapPut cannot be called after TF_ApiDefMapGet has been " + "called."); + return; + } + string api_def_text(text, text_len); + status->status = api_def_map->api_def_map.LoadApiDef(api_def_text); +#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) +} + +TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map, const char* name, + size_t name_len, TF_Status* status) { +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + status->status = tensorflow::errors::Unimplemented( + "ApiDefMap is not supported on mobile."); + return nullptr; +#else + mutex_lock l(api_def_map->lock); + if (!api_def_map->update_docs_called) { + api_def_map->api_def_map.UpdateDocs(); + api_def_map->update_docs_called = true; + } + string name_str(name, name_len); + const auto* api_def = api_def_map->api_def_map.GetApiDef(name_str); + if (api_def == nullptr) { + return nullptr; + } + + TF_Buffer* ret = TF_NewBuffer(); + status->status = MessageToBuffer(*api_def, ret); + if (!status->status.ok()) { + TF_DeleteBuffer(ret); + return nullptr; + } + return ret; +#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) +} + +TF_Buffer* TF_GetAllRegisteredKernels(TF_Status* status) { + tensorflow::KernelList kernel_list = tensorflow::GetAllRegisteredKernels(); + TF_Buffer* ret = TF_NewBuffer(); + status->status = MessageToBuffer(kernel_list, ret); + if (!status->status.ok()) { + TF_DeleteBuffer(ret); + return nullptr; + } + return ret; +} + +TF_Buffer* TF_GetRegisteredKernelsForOp(const char* name, TF_Status* status) { + tensorflow::KernelList kernel_list = + tensorflow::GetRegisteredKernelsForOp(name); + TF_Buffer* ret = TF_NewBuffer(); + status->status = MessageToBuffer(kernel_list, ret); + if (!status->status.ok()) { + TF_DeleteBuffer(ret); + return nullptr; + } + return ret; +} + +// TF_Server functions ---------------------------------------------- + +#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) +TF_Server::TF_Server(std::unique_ptr server) + : target(server->target()), server(std::move(server)) {} +#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) + +TF_Server* TF_NewServer(const void* proto, size_t proto_len, + TF_Status* status) { +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + status->status = tensorflow::errors::Unimplemented( + "Server functionality is not supported on mobile"); + return nullptr; +#else + tensorflow::ServerDef server_def; + if (!server_def.ParseFromArray(proto, static_cast(proto_len))) { + status->status = InvalidArgument( + "Could not parse provided bytes into a ServerDef protocol buffer"); + return nullptr; + } + + std::unique_ptr out_server; + status->status = tensorflow::NewServer(server_def, &out_server); + if (!status->status.ok()) return nullptr; + + return new TF_Server(std::move(out_server)); +#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) +} + +void TF_ServerStart(TF_Server* server, TF_Status* status) { +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + status->status = tensorflow::errors::Unimplemented( + "Server functionality is not supported on mobile"); +#else + status->status = server->server->Start(); +#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) +} + +void TF_ServerStop(TF_Server* server, TF_Status* status) { +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + status->status = tensorflow::errors::Unimplemented( + "Server functionality is not supported on mobile"); +#else + status->status = server->server->Stop(); +#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) +} + +void TF_ServerJoin(TF_Server* server, TF_Status* status) { +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + status->status = tensorflow::errors::Unimplemented( + "Server functionality is not supported on mobile"); +#else + status->status = server->server->Join(); +#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) +} + +const char* TF_ServerTarget(TF_Server* server) { +#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) + return nullptr; +#else + return server->target.c_str(); +#endif +} + +void TF_DeleteServer(TF_Server* server) { +#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) + delete server; +#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) +} + +void TF_RegisterLogListener(void (*listener)(const char*)) { +#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) + tensorflow::logging::RegisterListener(listener); +#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) +} + } // end extern "C" diff --git a/tensorflow/c/c_api.h b/tensorflow/c/c_api.h index f9942239eec..0c413f6ebae 100644 --- a/tensorflow/c/c_api.h +++ b/tensorflow/c/c_api.h @@ -19,21 +19,884 @@ limitations under the License. #include #include -#include "tensorflow/c/c_core_api.h" #include "tensorflow/c/tf_attrtype.h" #include "tensorflow/c/tf_datatype.h" #include "tensorflow/c/tf_status.h" #include "tensorflow/c/tf_tensor.h" // -------------------------------------------------------------------------- -// Non-core C API for TensorFlow. +// C API for TensorFlow. // -// This file contains the non-core C API for TensorFlow. Most of the -// API documentation and functionality resides in c_core_api.h. +// The API leans towards simplicity and uniformity instead of convenience +// since most usage will be by language specific wrappers. +// +// Conventions: +// * We use the prefix TF_ for everything in the API. +// * Objects are always passed around as pointers to opaque structs +// and these structs are allocated/deallocated via the API. +// * TF_Status holds error information. It is an object type +// and therefore is passed around as a pointer to an opaque +// struct as mentioned above. +// * Every call that has a TF_Status* argument clears it on success +// and fills it with error info on failure. +// * unsigned char is used for booleans (instead of the 'bool' type). +// In C++ bool is a keyword while in C99 bool is a macro defined +// in stdbool.h. It is possible for the two to be inconsistent. +// For example, neither the C99 nor the C++11 standard force a byte +// size on the bool type, so the macro defined in stdbool.h could +// be inconsistent with the bool keyword in C++. Thus, the use +// of stdbool.h is avoided and unsigned char is used instead. +// * size_t is used to represent byte sizes of objects that are +// materialized in the address space of the calling process. +// * int is used as an index into arrays. +// * Deletion functions are safe to call on nullptr. +// +// Questions left to address: +// * Might at some point need a way for callers to provide their own Env. +// * Maybe add TF_TensorShape that encapsulates dimension info. +// +// Design decisions made: +// * Backing store for tensor memory has an associated deallocation +// function. This deallocation function will point to client code +// for tensors populated by the client. So the client can do things +// like shadowing a numpy array. +// * We do not provide TF_OK since it is not strictly necessary and we +// are not optimizing for convenience. +// * We make assumption that one session has one graph. This should be +// fine since we have the ability to run sub-graphs. +// * We could allow NULL for some arguments (e.g., NULL options arg). +// However since convenience is not a primary goal, we don't do this. +// * Devices are not in this API. Instead, they are created/used internally +// and the API just provides high level controls over the number of +// devices of each type. + +// Macro to control visibility of exported symbols in the shared library (.so, +// .dylib, .dll). +// This duplicates the TF_EXPORT macro definition in +// tensorflow/core/platform/macros.h in order to keep this .h file independent +// of any other includes. +#ifdef SWIG +#define TF_CAPI_EXPORT +#else +#if defined(_WIN32) +#ifdef TF_COMPILE_LIBRARY +#define TF_CAPI_EXPORT __declspec(dllexport) +#else +#define TF_CAPI_EXPORT __declspec(dllimport) +#endif // TF_COMPILE_LIBRARY +#else +#define TF_CAPI_EXPORT __attribute__((visibility("default"))) +#endif // _WIN32 +#endif // SWIG + #ifdef __cplusplus extern "C" { #endif +// -------------------------------------------------------------------------- +// TF_Version returns a string describing version information of the +// TensorFlow library. TensorFlow using semantic versioning. +TF_CAPI_EXPORT extern const char* TF_Version(void); + +// -------------------------------------------------------------------------- +// TF_Buffer holds a pointer to a block of data and its associated length. +// Typically, the data consists of a serialized protocol buffer, but other data +// may also be held in a buffer. +// +// By default, TF_Buffer itself does not do any memory management of the +// pointed-to block. If need be, users of this struct should specify how to +// deallocate the block by setting the `data_deallocator` function pointer. +typedef struct TF_Buffer { + const void* data; + size_t length; + void (*data_deallocator)(void* data, size_t length); +} TF_Buffer; + +// Makes a copy of the input and sets an appropriate deallocator. Useful for +// passing in read-only, input protobufs. +TF_CAPI_EXPORT extern TF_Buffer* TF_NewBufferFromString(const void* proto, + size_t proto_len); + +// Useful for passing *out* a protobuf. +TF_CAPI_EXPORT extern TF_Buffer* TF_NewBuffer(void); + +TF_CAPI_EXPORT extern void TF_DeleteBuffer(TF_Buffer*); + +TF_CAPI_EXPORT extern TF_Buffer TF_GetBuffer(TF_Buffer* buffer); + +// -------------------------------------------------------------------------- +// TF_SessionOptions holds options that can be passed during session creation. +typedef struct TF_SessionOptions TF_SessionOptions; + +// Return a new options object. +TF_CAPI_EXPORT extern TF_SessionOptions* TF_NewSessionOptions(void); + +// Set the target in TF_SessionOptions.options. +// target can be empty, a single entry, or a comma separated list of entries. +// Each entry is in one of the following formats : +// "local" +// ip:port +// host:port +TF_CAPI_EXPORT extern void TF_SetTarget(TF_SessionOptions* options, + const char* target); + +// Set the config in TF_SessionOptions.options. +// config should be a serialized tensorflow.ConfigProto proto. +// If config was not parsed successfully as a ConfigProto, record the +// error information in *status. +TF_CAPI_EXPORT extern void TF_SetConfig(TF_SessionOptions* options, + const void* proto, size_t proto_len, + TF_Status* status); + +// Destroy an options object. +TF_CAPI_EXPORT extern void TF_DeleteSessionOptions(TF_SessionOptions*); + +// TODO(jeff,sanjay): +// - export functions to set Config fields + +// -------------------------------------------------------------------------- +// The new graph construction API, still under development. + +// Represents a computation graph. Graphs may be shared between sessions. +// Graphs are thread-safe when used as directed below. +typedef struct TF_Graph TF_Graph; + +// Return a new graph object. +TF_CAPI_EXPORT extern TF_Graph* TF_NewGraph(void); + +// Destroy an options object. Graph will be deleted once no more +// TFSession's are referencing it. +TF_CAPI_EXPORT extern void TF_DeleteGraph(TF_Graph*); + +// Operation being built. The underlying graph must outlive this. +typedef struct TF_OperationDescription TF_OperationDescription; + +// Operation that has been added to the graph. Valid until the graph is +// deleted -- in particular adding a new operation to the graph does not +// invalidate old TF_Operation* pointers. +typedef struct TF_Operation TF_Operation; + +// Represents a specific input of an operation. +typedef struct TF_Input { + TF_Operation* oper; + int index; // The index of the input within oper. +} TF_Input; + +// Represents a specific output of an operation. +typedef struct TF_Output { + TF_Operation* oper; + int index; // The index of the output within oper. +} TF_Output; + +// TF_Function is a grouping of operations with defined inputs and outputs. +// Once created and added to graphs, functions can be invoked by creating an +// operation whose operation type matches the function name. +typedef struct TF_Function TF_Function; + +// Function definition options. TODO(iga): Define and implement +typedef struct TF_FunctionOptions TF_FunctionOptions; + +// Sets the shape of the Tensor referenced by `output` in `graph` to +// the shape described by `dims` and `num_dims`. +// +// If the number of dimensions is unknown, `num_dims` must be set to +// -1 and `dims` can be null. If a dimension is unknown, the +// corresponding entry in the `dims` array must be -1. +// +// This does not overwrite the existing shape associated with `output`, +// but merges the input shape with the existing shape. For example, +// setting a shape of [-1, 2] with an existing shape [2, -1] would set +// a final shape of [2, 2] based on shape merging semantics. +// +// Returns an error into `status` if: +// * `output` is not in `graph`. +// * An invalid shape is being set (e.g., the shape being set +// is incompatible with the existing shape). +TF_CAPI_EXPORT extern void TF_GraphSetTensorShape(TF_Graph* graph, + TF_Output output, + const int64_t* dims, + const int num_dims, + TF_Status* status); + +// Returns the number of dimensions of the Tensor referenced by `output` +// in `graph`. +// +// If the number of dimensions in the shape is unknown, returns -1. +// +// Returns an error into `status` if: +// * `output` is not in `graph`. +TF_CAPI_EXPORT extern int TF_GraphGetTensorNumDims(TF_Graph* graph, + TF_Output output, + TF_Status* status); + +// Returns the shape of the Tensor referenced by `output` in `graph` +// into `dims`. `dims` must be an array large enough to hold `num_dims` +// entries (e.g., the return value of TF_GraphGetTensorNumDims). +// +// If the number of dimensions in the shape is unknown or the shape is +// a scalar, `dims` will remain untouched. Otherwise, each element of +// `dims` will be set corresponding to the size of the dimension. An +// unknown dimension is represented by `-1`. +// +// Returns an error into `status` if: +// * `output` is not in `graph`. +// * `num_dims` does not match the actual number of dimensions. +TF_CAPI_EXPORT extern void TF_GraphGetTensorShape(TF_Graph* graph, + TF_Output output, + int64_t* dims, int num_dims, + TF_Status* status); + +// Operation will only be added to *graph when TF_FinishOperation() is +// called (assuming TF_FinishOperation() does not return an error). +// *graph must not be deleted until after TF_FinishOperation() is +// called. +TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperation( + TF_Graph* graph, const char* op_type, const char* oper_name); + +// Specify the device for `desc`. Defaults to empty, meaning unconstrained. +TF_CAPI_EXPORT extern void TF_SetDevice(TF_OperationDescription* desc, + const char* device); + +// The calls to TF_AddInput and TF_AddInputList must match (in number, +// order, and type) the op declaration. For example, the "Concat" op +// has registration: +// REGISTER_OP("Concat") +// .Input("concat_dim: int32") +// .Input("values: N * T") +// .Output("output: T") +// .Attr("N: int >= 2") +// .Attr("T: type"); +// that defines two inputs, "concat_dim" and "values" (in that order). +// You must use TF_AddInput() for the first input (since it takes a +// single tensor), and TF_AddInputList() for the second input (since +// it takes a list, even if you were to pass a list with a single +// tensor), as in: +// TF_OperationDescription* desc = TF_NewOperation(graph, "Concat", "c"); +// TF_Output concat_dim_input = {...}; +// TF_AddInput(desc, concat_dim_input); +// TF_Output values_inputs[5] = {{...}, ..., {...}}; +// TF_AddInputList(desc, values_inputs, 5); + +// For inputs that take a single tensor. +TF_CAPI_EXPORT extern void TF_AddInput(TF_OperationDescription* desc, + TF_Output input); + +// For inputs that take a list of tensors. +// inputs must point to TF_Output[num_inputs]. +TF_CAPI_EXPORT extern void TF_AddInputList(TF_OperationDescription* desc, + const TF_Output* inputs, + int num_inputs); + +// Call once per control input to `desc`. +TF_CAPI_EXPORT extern void TF_AddControlInput(TF_OperationDescription* desc, + TF_Operation* input); + +// Request that `desc` be co-located on the device where `op` +// is placed. +// +// Use of this is discouraged since the implementation of device placement is +// subject to change. Primarily intended for internal libraries +TF_CAPI_EXPORT extern void TF_ColocateWith(TF_OperationDescription* desc, + TF_Operation* op); + +// Call some TF_SetAttr*() function for every attr that is not +// inferred from an input and doesn't have a default value you wish to +// keep. + +// `value` must point to a string of length `length` bytes. +TF_CAPI_EXPORT extern void TF_SetAttrString(TF_OperationDescription* desc, + const char* attr_name, + const void* value, size_t length); +// `values` and `lengths` each must have lengths `num_values`. +// `values[i]` must point to a string of length `lengths[i]` bytes. +TF_CAPI_EXPORT extern void TF_SetAttrStringList(TF_OperationDescription* desc, + const char* attr_name, + const void* const* values, + const size_t* lengths, + int num_values); +TF_CAPI_EXPORT extern void TF_SetAttrInt(TF_OperationDescription* desc, + const char* attr_name, int64_t value); +TF_CAPI_EXPORT extern void TF_SetAttrIntList(TF_OperationDescription* desc, + const char* attr_name, + const int64_t* values, + int num_values); +TF_CAPI_EXPORT extern void TF_SetAttrFloat(TF_OperationDescription* desc, + const char* attr_name, float value); +TF_CAPI_EXPORT extern void TF_SetAttrFloatList(TF_OperationDescription* desc, + const char* attr_name, + const float* values, + int num_values); +TF_CAPI_EXPORT extern void TF_SetAttrBool(TF_OperationDescription* desc, + const char* attr_name, + unsigned char value); +TF_CAPI_EXPORT extern void TF_SetAttrBoolList(TF_OperationDescription* desc, + const char* attr_name, + const unsigned char* values, + int num_values); +TF_CAPI_EXPORT extern void TF_SetAttrType(TF_OperationDescription* desc, + const char* attr_name, + TF_DataType value); +TF_CAPI_EXPORT extern void TF_SetAttrTypeList(TF_OperationDescription* desc, + const char* attr_name, + const TF_DataType* values, + int num_values); +TF_CAPI_EXPORT extern void TF_SetAttrPlaceholder(TF_OperationDescription* desc, + const char* attr_name, + const char* placeholder); + +// Set a 'func' attribute to the specified name. +// `value` must point to a string of length `length` bytes. +TF_CAPI_EXPORT extern void TF_SetAttrFuncName(TF_OperationDescription* desc, + const char* attr_name, + const char* value, size_t length); + +// Set `num_dims` to -1 to represent "unknown rank". Otherwise, +// `dims` points to an array of length `num_dims`. `dims[i]` must be +// >= -1, with -1 meaning "unknown dimension". +TF_CAPI_EXPORT extern void TF_SetAttrShape(TF_OperationDescription* desc, + const char* attr_name, + const int64_t* dims, int num_dims); +// `dims` and `num_dims` must point to arrays of length `num_shapes`. +// Set `num_dims[i]` to -1 to represent "unknown rank". Otherwise, +// `dims[i]` points to an array of length `num_dims[i]`. `dims[i][j]` +// must be >= -1, with -1 meaning "unknown dimension". +TF_CAPI_EXPORT extern void TF_SetAttrShapeList(TF_OperationDescription* desc, + const char* attr_name, + const int64_t* const* dims, + const int* num_dims, + int num_shapes); +// `proto` must point to an array of `proto_len` bytes representing a +// binary-serialized TensorShapeProto. +TF_CAPI_EXPORT extern void TF_SetAttrTensorShapeProto( + TF_OperationDescription* desc, const char* attr_name, const void* proto, + size_t proto_len, TF_Status* status); +// `protos` and `proto_lens` must point to arrays of length `num_shapes`. +// `protos[i]` must point to an array of `proto_lens[i]` bytes +// representing a binary-serialized TensorShapeProto. +TF_CAPI_EXPORT extern void TF_SetAttrTensorShapeProtoList( + TF_OperationDescription* desc, const char* attr_name, + const void* const* protos, const size_t* proto_lens, int num_shapes, + TF_Status* status); + +TF_CAPI_EXPORT extern void TF_SetAttrTensor(TF_OperationDescription* desc, + const char* attr_name, + TF_Tensor* value, + TF_Status* status); +TF_CAPI_EXPORT extern void TF_SetAttrTensorList(TF_OperationDescription* desc, + const char* attr_name, + TF_Tensor* const* values, + int num_values, + TF_Status* status); + +// `proto` should point to a sequence of bytes of length `proto_len` +// representing a binary serialization of an AttrValue protocol +// buffer. +TF_CAPI_EXPORT extern void TF_SetAttrValueProto(TF_OperationDescription* desc, + const char* attr_name, + const void* proto, + size_t proto_len, + TF_Status* status); + +// If this function succeeds: +// * *status is set to an OK value, +// * a TF_Operation is added to the graph, +// * a non-null value pointing to the added operation is returned -- +// this value is valid until the underlying graph is deleted. +// Otherwise: +// * *status is set to a non-OK value, +// * the graph is not modified, +// * a null value is returned. +// In either case, it deletes `desc`. +TF_CAPI_EXPORT extern TF_Operation* TF_FinishOperation( + TF_OperationDescription* desc, TF_Status* status); + +// TF_Operation functions. Operations are immutable once created, so +// these are all query functions. + +TF_CAPI_EXPORT extern const char* TF_OperationName(TF_Operation* oper); +TF_CAPI_EXPORT extern const char* TF_OperationOpType(TF_Operation* oper); +TF_CAPI_EXPORT extern const char* TF_OperationDevice(TF_Operation* oper); + +TF_CAPI_EXPORT extern int TF_OperationNumOutputs(TF_Operation* oper); +TF_CAPI_EXPORT extern TF_DataType TF_OperationOutputType(TF_Output oper_out); +TF_CAPI_EXPORT extern int TF_OperationOutputListLength(TF_Operation* oper, + const char* arg_name, + TF_Status* status); + +TF_CAPI_EXPORT extern int TF_OperationNumInputs(TF_Operation* oper); +TF_CAPI_EXPORT extern TF_DataType TF_OperationInputType(TF_Input oper_in); +TF_CAPI_EXPORT extern int TF_OperationInputListLength(TF_Operation* oper, + const char* arg_name, + TF_Status* status); + +// In this code: +// TF_Output producer = TF_OperationInput(consumer); +// There is an edge from producer.oper's output (given by +// producer.index) to consumer.oper's input (given by consumer.index). +TF_CAPI_EXPORT extern TF_Output TF_OperationInput(TF_Input oper_in); + +// Get list of all inputs of a specific operation. `inputs` must point to +// an array of length at least `max_inputs` (ideally set to +// TF_OperationNumInputs(oper)). Beware that a concurrent +// modification of the graph can increase the number of inputs of +// an operation. +TF_CAPI_EXPORT extern void TF_OperationAllInputs(TF_Operation* oper, + TF_Output* inputs, + int max_inputs); + +// Get the number of current consumers of a specific output of an +// operation. Note that this number can change when new operations +// are added to the graph. +TF_CAPI_EXPORT extern int TF_OperationOutputNumConsumers(TF_Output oper_out); + +// Get list of all current consumers of a specific output of an +// operation. `consumers` must point to an array of length at least +// `max_consumers` (ideally set to +// TF_OperationOutputNumConsumers(oper_out)). Beware that a concurrent +// modification of the graph can increase the number of consumers of +// an operation. Returns the number of output consumers (should match +// TF_OperationOutputNumConsumers(oper_out)). +TF_CAPI_EXPORT extern int TF_OperationOutputConsumers(TF_Output oper_out, + TF_Input* consumers, + int max_consumers); + +// Get the number of control inputs to an operation. +TF_CAPI_EXPORT extern int TF_OperationNumControlInputs(TF_Operation* oper); + +// Get list of all control inputs to an operation. `control_inputs` must +// point to an array of length `max_control_inputs` (ideally set to +// TF_OperationNumControlInputs(oper)). Returns the number of control +// inputs (should match TF_OperationNumControlInputs(oper)). +TF_CAPI_EXPORT extern int TF_OperationGetControlInputs( + TF_Operation* oper, TF_Operation** control_inputs, int max_control_inputs); + +// Get the number of operations that have `*oper` as a control input. +// Note that this number can change when new operations are added to +// the graph. +TF_CAPI_EXPORT extern int TF_OperationNumControlOutputs(TF_Operation* oper); + +// Get the list of operations that have `*oper` as a control input. +// `control_outputs` must point to an array of length at least +// `max_control_outputs` (ideally set to +// TF_OperationNumControlOutputs(oper)). Beware that a concurrent +// modification of the graph can increase the number of control +// outputs. Returns the number of control outputs (should match +// TF_OperationNumControlOutputs(oper)). +TF_CAPI_EXPORT extern int TF_OperationGetControlOutputs( + TF_Operation* oper, TF_Operation** control_outputs, + int max_control_outputs); + +// TF_AttrMetadata describes the value of an attribute on an operation. +typedef struct TF_AttrMetadata { + // A boolean: 1 if the attribute value is a list, 0 otherwise. + unsigned char is_list; + + // Length of the list if is_list is true. Undefined otherwise. + int64_t list_size; + + // Type of elements of the list if is_list != 0. + // Type of the single value stored in the attribute if is_list == 0. + TF_AttrType type; + + // Total size the attribute value. + // The units of total_size depend on is_list and type. + // (1) If type == TF_ATTR_STRING and is_list == 0 + // then total_size is the byte size of the string + // valued attribute. + // (2) If type == TF_ATTR_STRING and is_list == 1 + // then total_size is the cumulative byte size + // of all the strings in the list. + // (3) If type == TF_ATTR_SHAPE and is_list == 0 + // then total_size is the number of dimensions + // of the shape valued attribute, or -1 + // if its rank is unknown. + // (4) If type == TF_ATTR_SHAPE and is_list == 1 + // then total_size is the cumulative number + // of dimensions of all shapes in the list. + // (5) Otherwise, total_size is undefined. + int64_t total_size; +} TF_AttrMetadata; + +// Returns metadata about the value of the attribute `attr_name` of `oper`. +TF_CAPI_EXPORT extern TF_AttrMetadata TF_OperationGetAttrMetadata( + TF_Operation* oper, const char* attr_name, TF_Status* status); + +// Fills in `value` with the value of the attribute `attr_name`. `value` must +// point to an array of length at least `max_length` (ideally set to +// TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, +// attr_name)). +TF_CAPI_EXPORT extern void TF_OperationGetAttrString(TF_Operation* oper, + const char* attr_name, + void* value, + size_t max_length, + TF_Status* status); + +// Get the list of strings in the value of the attribute `attr_name`. Fills in +// `values` and `lengths`, each of which must point to an array of length at +// least `max_values`. +// +// The elements of values will point to addresses in `storage` which must be at +// least `storage_size` bytes in length. Ideally, max_values would be set to +// TF_AttrMetadata.list_size and `storage` would be at least +// TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper, +// attr_name). +// +// Fails if storage_size is too small to hold the requested number of strings. +TF_CAPI_EXPORT extern void TF_OperationGetAttrStringList( + TF_Operation* oper, const char* attr_name, void** values, size_t* lengths, + int max_values, void* storage, size_t storage_size, TF_Status* status); + +TF_CAPI_EXPORT extern void TF_OperationGetAttrInt(TF_Operation* oper, + const char* attr_name, + int64_t* value, + TF_Status* status); + +// Fills in `values` with the value of the attribute `attr_name` of `oper`. +// `values` must point to an array of length at least `max_values` (ideally set +// TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, +// attr_name)). +TF_CAPI_EXPORT extern void TF_OperationGetAttrIntList(TF_Operation* oper, + const char* attr_name, + int64_t* values, + int max_values, + TF_Status* status); + +TF_CAPI_EXPORT extern void TF_OperationGetAttrFloat(TF_Operation* oper, + const char* attr_name, + float* value, + TF_Status* status); + +// Fills in `values` with the value of the attribute `attr_name` of `oper`. +// `values` must point to an array of length at least `max_values` (ideally set +// to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, +// attr_name)). +TF_CAPI_EXPORT extern void TF_OperationGetAttrFloatList(TF_Operation* oper, + const char* attr_name, + float* values, + int max_values, + TF_Status* status); + +TF_CAPI_EXPORT extern void TF_OperationGetAttrBool(TF_Operation* oper, + const char* attr_name, + unsigned char* value, + TF_Status* status); + +// Fills in `values` with the value of the attribute `attr_name` of `oper`. +// `values` must point to an array of length at least `max_values` (ideally set +// to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, +// attr_name)). +TF_CAPI_EXPORT extern void TF_OperationGetAttrBoolList(TF_Operation* oper, + const char* attr_name, + unsigned char* values, + int max_values, + TF_Status* status); + +TF_CAPI_EXPORT extern void TF_OperationGetAttrType(TF_Operation* oper, + const char* attr_name, + TF_DataType* value, + TF_Status* status); + +// Fills in `values` with the value of the attribute `attr_name` of `oper`. +// `values` must point to an array of length at least `max_values` (ideally set +// to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, +// attr_name)). +TF_CAPI_EXPORT extern void TF_OperationGetAttrTypeList(TF_Operation* oper, + const char* attr_name, + TF_DataType* values, + int max_values, + TF_Status* status); + +// Fills in `value` with the value of the attribute `attr_name` of `oper`. +// `values` must point to an array of length at least `num_dims` (ideally set to +// TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)). +TF_CAPI_EXPORT extern void TF_OperationGetAttrShape(TF_Operation* oper, + const char* attr_name, + int64_t* value, + int num_dims, + TF_Status* status); + +// Fills in `dims` with the list of shapes in the attribute `attr_name` of +// `oper` and `num_dims` with the corresponding number of dimensions. On return, +// for every i where `num_dims[i]` > 0, `dims[i]` will be an array of +// `num_dims[i]` elements. A value of -1 for `num_dims[i]` indicates that the +// i-th shape in the list is unknown. +// +// The elements of `dims` will point to addresses in `storage` which must be +// large enough to hold at least `storage_size` int64_ts. Ideally, `num_shapes` +// would be set to TF_AttrMetadata.list_size and `storage_size` would be set to +// TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, +// attr_name). +// +// Fails if storage_size is insufficient to hold the requested shapes. +TF_CAPI_EXPORT extern void TF_OperationGetAttrShapeList( + TF_Operation* oper, const char* attr_name, int64_t** dims, int* num_dims, + int num_shapes, int64_t* storage, int storage_size, TF_Status* status); + +// Sets `value` to the binary-serialized TensorShapeProto of the value of +// `attr_name` attribute of `oper`'. +TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProto( + TF_Operation* oper, const char* attr_name, TF_Buffer* value, + TF_Status* status); + +// Fills in `values` with binary-serialized TensorShapeProto values of the +// attribute `attr_name` of `oper`. `values` must point to an array of length at +// least `num_values` (ideally set to TF_AttrMetadata.list_size from +// TF_OperationGetAttrMetadata(oper, attr_name)). +TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProtoList( + TF_Operation* oper, const char* attr_name, TF_Buffer** values, + int max_values, TF_Status* status); + +// Gets the TF_Tensor valued attribute of `attr_name` of `oper`. +// +// Allocates a new TF_Tensor which the caller is expected to take +// ownership of (and can deallocate using TF_DeleteTensor). +TF_CAPI_EXPORT extern void TF_OperationGetAttrTensor(TF_Operation* oper, + const char* attr_name, + TF_Tensor** value, + TF_Status* status); + +// Fills in `values` with the TF_Tensor values of the attribute `attr_name` of +// `oper`. `values` must point to an array of TF_Tensor* of length at least +// `max_values` (ideally set to TF_AttrMetadata.list_size from +// TF_OperationGetAttrMetadata(oper, attr_name)). +// +// The caller takes ownership of all the non-null TF_Tensor* entries in `values` +// (which can be deleted using TF_DeleteTensor(values[i])). +TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorList(TF_Operation* oper, + const char* attr_name, + TF_Tensor** values, + int max_values, + TF_Status* status); + +// Sets `output_attr_value` to the binary-serialized AttrValue proto +// representation of the value of the `attr_name` attr of `oper`. +TF_CAPI_EXPORT extern void TF_OperationGetAttrValueProto( + TF_Operation* oper, const char* attr_name, TF_Buffer* output_attr_value, + TF_Status* status); + +// Returns the operation in the graph with `oper_name`. Returns nullptr if +// no operation found. +TF_CAPI_EXPORT extern TF_Operation* TF_GraphOperationByName( + TF_Graph* graph, const char* oper_name); + +// Iterate through the operations of a graph. To use: +// size_t pos = 0; +// TF_Operation* oper; +// while ((oper = TF_GraphNextOperation(graph, &pos)) != nullptr) { +// DoSomethingWithOperation(oper); +// } +TF_CAPI_EXPORT extern TF_Operation* TF_GraphNextOperation(TF_Graph* graph, + size_t* pos); + +// Write out a serialized representation of `graph` (as a GraphDef protocol +// message) to `output_graph_def` (allocated by TF_NewBuffer()). +// `output_graph_def`'s underlying buffer will be freed when TF_DeleteBuffer() +// is called. +// +// May fail on very large graphs in the future. +TF_CAPI_EXPORT extern void TF_GraphToGraphDef(TF_Graph* graph, + TF_Buffer* output_graph_def, + TF_Status* status); + +// Returns the serialized OpDef proto with name `op_name`, or a bad status if no +// such op exists. This can return OpDefs of functions copied into the graph. +TF_CAPI_EXPORT extern void TF_GraphGetOpDef(TF_Graph* graph, + const char* op_name, + TF_Buffer* output_op_def, + TF_Status* status); + +// Returns the serialized VersionDef proto for this graph. +TF_CAPI_EXPORT extern void TF_GraphVersions(TF_Graph* graph, + TF_Buffer* output_version_def, + TF_Status* status); + +// TF_ImportGraphDefOptions holds options that can be passed to +// TF_GraphImportGraphDef. +typedef struct TF_ImportGraphDefOptions TF_ImportGraphDefOptions; + +TF_CAPI_EXPORT extern TF_ImportGraphDefOptions* TF_NewImportGraphDefOptions( + void); +TF_CAPI_EXPORT extern void TF_DeleteImportGraphDefOptions( + TF_ImportGraphDefOptions* opts); + +// Set the prefix to be prepended to the names of nodes in `graph_def` that will +// be imported into `graph`. `prefix` is copied and has no lifetime +// requirements. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetPrefix( + TF_ImportGraphDefOptions* opts, const char* prefix); + +// Set the execution device for nodes in `graph_def`. +// Only applies to nodes where a device was not already explicitly specified. +// `device` is copied and has no lifetime requirements. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetDefaultDevice( + TF_ImportGraphDefOptions* opts, const char* device); + +// Set whether to uniquify imported operation names. If true, imported operation +// names will be modified if their name already exists in the graph. If false, +// conflicting names will be treated as an error. Note that this option has no +// effect if a prefix is set, since the prefix will guarantee all names are +// unique. Defaults to false. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyNames( + TF_ImportGraphDefOptions* opts, unsigned char uniquify_names); + +// If true, the specified prefix will be modified if it already exists as an +// operation name or prefix in the graph. If false, a conflicting prefix will be +// treated as an error. This option has no effect if no prefix is specified. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyPrefix( + TF_ImportGraphDefOptions* opts, unsigned char uniquify_prefix); + +// Set any imported nodes with input `src_name:src_index` to have that input +// replaced with `dst`. `src_name` refers to a node in the graph to be imported, +// `dst` references a node already existing in the graph being imported into. +// `src_name` is copied and has no lifetime requirements. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddInputMapping( + TF_ImportGraphDefOptions* opts, const char* src_name, int src_index, + TF_Output dst); + +// Set any imported nodes with control input `src_name` to have that input +// replaced with `dst`. `src_name` refers to a node in the graph to be imported, +// `dst` references an operation already existing in the graph being imported +// into. `src_name` is copied and has no lifetime requirements. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsRemapControlDependency( + TF_ImportGraphDefOptions* opts, const char* src_name, TF_Operation* dst); + +// Cause the imported graph to have a control dependency on `oper`. `oper` +// should exist in the graph being imported into. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddControlDependency( + TF_ImportGraphDefOptions* opts, TF_Operation* oper); + +// Add an output in `graph_def` to be returned via the `return_outputs` output +// parameter of TF_GraphImportGraphDef(). If the output is remapped via an input +// mapping, the corresponding existing tensor in `graph` will be returned. +// `oper_name` is copied and has no lifetime requirements. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddReturnOutput( + TF_ImportGraphDefOptions* opts, const char* oper_name, int index); + +// Returns the number of return outputs added via +// TF_ImportGraphDefOptionsAddReturnOutput(). +TF_CAPI_EXPORT extern int TF_ImportGraphDefOptionsNumReturnOutputs( + const TF_ImportGraphDefOptions* opts); + +// Add an operation in `graph_def` to be returned via the `return_opers` output +// parameter of TF_GraphImportGraphDef(). `oper_name` is copied and has no +// lifetime requirements. +TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddReturnOperation( + TF_ImportGraphDefOptions* opts, const char* oper_name); + +// Returns the number of return operations added via +// TF_ImportGraphDefOptionsAddReturnOperation(). +TF_CAPI_EXPORT extern int TF_ImportGraphDefOptionsNumReturnOperations( + const TF_ImportGraphDefOptions* opts); + +// TF_ImportGraphDefResults holds results that are generated by +// TF_GraphImportGraphDefWithResults(). +typedef struct TF_ImportGraphDefResults TF_ImportGraphDefResults; + +// Fetches the return outputs requested via +// TF_ImportGraphDefOptionsAddReturnOutput(). The number of fetched outputs is +// returned in `num_outputs`. The array of return outputs is returned in +// `outputs`. `*outputs` is owned by and has the lifetime of `results`. +TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsReturnOutputs( + TF_ImportGraphDefResults* results, int* num_outputs, TF_Output** outputs); + +// Fetches the return operations requested via +// TF_ImportGraphDefOptionsAddReturnOperation(). The number of fetched +// operations is returned in `num_opers`. The array of return operations is +// returned in `opers`. `*opers` is owned by and has the lifetime of `results`. +TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsReturnOperations( + TF_ImportGraphDefResults* results, int* num_opers, TF_Operation*** opers); + +// Fetches any input mappings requested via +// TF_ImportGraphDefOptionsAddInputMapping() that didn't appear in the GraphDef +// and weren't used as input to any node in the imported graph def. The number +// of fetched mappings is returned in `num_missing_unused_input_mappings`. The +// array of each mapping's source node name is returned in `src_names`, and the +// array of each mapping's source index is returned in `src_indexes`. +// +// `*src_names`, `*src_indexes`, and the memory backing each string in +// `src_names` are owned by and have the lifetime of `results`. +TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsMissingUnusedInputMappings( + TF_ImportGraphDefResults* results, int* num_missing_unused_input_mappings, + const char*** src_names, int** src_indexes); + +// Deletes a results object returned by TF_GraphImportGraphDefWithResults(). +TF_CAPI_EXPORT extern void TF_DeleteImportGraphDefResults( + TF_ImportGraphDefResults* results); + +// Import the graph serialized in `graph_def` into `graph`. Returns nullptr and +// a bad status on error. Otherwise, returns a populated +// TF_ImportGraphDefResults instance. The returned instance must be deleted via +// TF_DeleteImportGraphDefResults(). +TF_CAPI_EXPORT extern TF_ImportGraphDefResults* +TF_GraphImportGraphDefWithResults(TF_Graph* graph, const TF_Buffer* graph_def, + const TF_ImportGraphDefOptions* options, + TF_Status* status); + +// Import the graph serialized in `graph_def` into `graph`. +// Convenience function for when only return outputs are needed. +// +// `num_return_outputs` must be the number of return outputs added (i.e. the +// result of TF_ImportGraphDefOptionsNumReturnOutputs()). If +// `num_return_outputs` is non-zero, `return_outputs` must be of length +// `num_return_outputs`. Otherwise it can be null. +TF_CAPI_EXPORT extern void TF_GraphImportGraphDefWithReturnOutputs( + TF_Graph* graph, const TF_Buffer* graph_def, + const TF_ImportGraphDefOptions* options, TF_Output* return_outputs, + int num_return_outputs, TF_Status* status); + +// Import the graph serialized in `graph_def` into `graph`. +// Convenience function for when no results are needed. +TF_CAPI_EXPORT extern void TF_GraphImportGraphDef( + TF_Graph* graph, const TF_Buffer* graph_def, + const TF_ImportGraphDefOptions* options, TF_Status* status); + +// Adds a copy of function `func` and optionally its gradient function `grad` +// to `g`. Once `func`/`grad` is added to `g`, it can be called by creating +// an operation using the function's name. +// Any changes to `func`/`grad` (including deleting it) done after this method +// returns, won't affect the copy of `func`/`grad` in `g`. +// If `func` or `grad` are already in `g`, TF_GraphCopyFunction has no +// effect on them, but can establish the function->gradient relationship +// between them if `func` does not already have a gradient. If `func` already +// has a gradient different from `grad`, an error is returned. +// +// `func` must not be null. +// If `grad` is null and `func` is not in `g`, `func` is added without a +// gradient. +// If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop. +// `grad` must have appropriate signature as described in the doc of +// GradientDef in tensorflow/core/framework/function.proto. +// +// If successful, status is set to OK and `func` and `grad` are added to `g`. +// Otherwise, status is set to the encountered error and `g` is unmodified. +TF_CAPI_EXPORT extern void TF_GraphCopyFunction(TF_Graph* g, + const TF_Function* func, + const TF_Function* grad, + TF_Status* status); + +// Returns the number of TF_Functions registered in `g`. +TF_CAPI_EXPORT extern int TF_GraphNumFunctions(TF_Graph* g); + +// Fills in `funcs` with the TF_Function* registered in `g`. +// `funcs` must point to an array of TF_Function* of length at least +// `max_func`. In usual usage, max_func should be set to the result of +// TF_GraphNumFunctions(g). In this case, all the functions registered in +// `g` will be returned. Else, an unspecified subset. +// +// If successful, returns the number of TF_Function* successfully set in +// `funcs` and sets status to OK. The caller takes ownership of +// all the returned TF_Functions. They must be deleted with TF_DeleteFunction. +// On error, returns 0, sets status to the encountered error, and the contents +// of funcs will be undefined. +TF_CAPI_EXPORT extern int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs, + int max_func, TF_Status* status); + +// Note: The following function may fail on very large protos in the future. + +TF_CAPI_EXPORT extern void TF_OperationToNodeDef(TF_Operation* oper, + TF_Buffer* output_node_def, + TF_Status* status); + typedef struct TF_WhileParams { // The number of inputs to the while loop, i.e. the number of loop variables. // This is the size of cond_inputs, body_inputs, and body_outputs. @@ -149,6 +1012,558 @@ TF_CAPI_EXPORT void TF_AddGradientsWithPrefix(TF_Graph* g, const char* prefix, TF_Output* dx, TF_Status* status, TF_Output* dy); +// Create a TF_Function from a TF_Graph +// +// Params: +// fn_body - the graph whose operations (or subset of whose operations) will be +// converted to TF_Function. +// fn_name - the name of the new TF_Function. Should match the operation +// name (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*. +// If `append_hash_to_fn_name` is false, `fn_name` must be distinct +// from other function and operation names (at least those +// registered in graphs where this function will be used). +// append_hash_to_fn_name - Must be 0 or 1. If set to 1, the actual name +// of the function will be `fn_name` appended with +// '_'. +// If set to 0, the function's name will be `fn_name`. +// num_opers - `num_opers` contains the number of elements in the `opers` array +// or a special value of -1 meaning that no array is given. +// The distinction between an empty array of operations and no +// array of operations is necessary to distinguish the case of +// creating a function with no body (e.g. identity or permutation) +// and the case of creating a function whose body contains all +// the nodes in the graph (except for the automatic skipping, see +// below). +// opers - Array of operations to become the body of the function or null. +// - If no array is given (`num_opers` = -1), all the +// operations in `fn_body` will become part of the function +// except operations referenced in `inputs`. These operations +// must have a single output (these operations are typically +// placeholders created for the sole purpose of representing +// an input. We can relax this constraint if there are +// compelling use cases). +// - If an array is given (`num_opers` >= 0), all operations +// in it will become part of the function. In particular, no +// automatic skipping of dummy input operations is performed. +// ninputs - number of elements in `inputs` array +// inputs - array of TF_Outputs that specify the inputs to the function. +// If `ninputs` is zero (the function takes no inputs), `inputs` +// can be null. The names used for function inputs are normalized +// names of the operations (usually placeholders) pointed to by +// `inputs`. These operation names should start with a letter. +// Normalization will convert all letters to lowercase and +// non-alphanumeric characters to '_' to make resulting names match +// the "[a-z][a-z0-9_]*" pattern for operation argument names. +// `inputs` cannot contain the same tensor twice. +// noutputs - number of elements in `outputs` array +// outputs - array of TF_Outputs that specify the outputs of the function. +// If `noutputs` is zero (the function returns no outputs), `outputs` +// can be null. `outputs` can contain the same tensor more than once. +// output_names - The names of the function's outputs. `output_names` array +// must either have the same length as `outputs` +// (i.e. `noutputs`) or be null. In the former case, +// the names should match the regular expression for ArgDef +// names - "[a-z][a-z0-9_]*". In the latter case, +// names for outputs will be generated automatically. +// opts - various options for the function, e.g. XLA's inlining control. +// description - optional human-readable description of this function. +// status - Set to OK on success and an appropriate error on failure. +// +// Note that when the same TF_Output is listed as both an input and an output, +// the corresponding function's output will equal to this input, +// instead of the original node's output. +// +// Callers must also satisfy the following constraints: +// - `inputs` cannot refer to TF_Outputs within a control flow context. For +// example, one cannot use the output of "switch" node as input. +// - `inputs` and `outputs` cannot have reference types. Reference types are +// not exposed through C API and are being replaced with Resources. We support +// reference types inside function's body to support legacy code. Do not +// use them in new code. +// - Every node in the function's body must have all of its inputs (including +// control inputs). In other words, for every node in the body, each input +// must be either listed in `inputs` or must come from another node in +// the body. In particular, it is an error to have a control edge going from +// a node outside of the body into a node in the body. This applies to control +// edges going from nodes referenced in `inputs` to nodes in the body when +// the former nodes are not in the body (automatically skipped or not +// included in explicitly specified body). +// +// Returns: +// On success, a newly created TF_Function instance. It must be deleted by +// calling TF_DeleteFunction. +// +// On failure, null. +TF_CAPI_EXPORT extern TF_Function* TF_GraphToFunction( + const TF_Graph* fn_body, const char* fn_name, + unsigned char append_hash_to_fn_name, int num_opers, + const TF_Operation* const* opers, int ninputs, const TF_Output* inputs, + int noutputs, const TF_Output* outputs, const char* const* output_names, + const TF_FunctionOptions* opts, const char* description, TF_Status* status); + +// Similar to TF_GraphToFunction but allows specifying control outputs of the +// function. +// +// The arguments of TF_GraphToFunction have the same meaning, but the new +// arguments are as follows: +// +// ncontrol_outputs: Number of control outputs of the function. +// control_outputs: vector of TF_Operation objects to be marked as control +// outputs of the function. Operations marked as control outputs are +// guaranteed to execute. +// control_output_names: Optional. If not nullptr, vector of strings, one +// per control output, with their names to be added to the function's +// OpDef. +TF_CAPI_EXPORT extern TF_Function* TF_GraphToFunctionWithControlOutputs( + const TF_Graph* fn_body, const char* fn_name, + unsigned char append_hash_to_fn_name, int num_opers, + const TF_Operation* const* opers, int ninputs, const TF_Output* inputs, + int noutputs, const TF_Output* outputs, const char* const* output_names, + int ncontrol_outputs, const TF_Operation* const* control_outputs, + const char* const* control_output_names, const TF_FunctionOptions* opts, + const char* description, TF_Status* status); + +// Returns the name of the graph function. +// The return value points to memory that is only usable until the next +// mutation to *func. +TF_CAPI_EXPORT extern const char* TF_FunctionName(TF_Function* func); + +// Write out a serialized representation of `func` (as a FunctionDef protocol +// message) to `output_func_def` (allocated by TF_NewBuffer()). +// `output_func_def`'s underlying buffer will be freed when TF_DeleteBuffer() +// is called. +// +// May fail on very large graphs in the future. +TF_CAPI_EXPORT extern void TF_FunctionToFunctionDef(TF_Function* func, + TF_Buffer* output_func_def, + TF_Status* status); + +// Construct and return the function whose FunctionDef representation is +// serialized in `proto`. `proto_len` must equal the number of bytes +// pointed to by `proto`. +// Returns: +// On success, a newly created TF_Function instance. It must be deleted by +// calling TF_DeleteFunction. +// +// On failure, null. +TF_CAPI_EXPORT extern TF_Function* TF_FunctionImportFunctionDef( + const void* proto, size_t proto_len, TF_Status* status); + +// Sets function attribute named `attr_name` to value stored in `proto`. +// If this attribute is already set to another value, it is overridden. +// `proto` should point to a sequence of bytes of length `proto_len` +// representing a binary serialization of an AttrValue protocol +// buffer. +TF_CAPI_EXPORT extern void TF_FunctionSetAttrValueProto(TF_Function* func, + const char* attr_name, + const void* proto, + size_t proto_len, + TF_Status* status); + +// Sets `output_attr_value` to the binary-serialized AttrValue proto +// representation of the value of the `attr_name` attr of `func`. +// If `attr_name` attribute is not present, status is set to an error. +TF_CAPI_EXPORT extern void TF_FunctionGetAttrValueProto( + TF_Function* func, const char* attr_name, TF_Buffer* output_attr_value, + TF_Status* status); + +// Frees the memory used by the `func` struct. +// TF_DeleteFunction is a noop if `func` is null. +// Deleting a function does not remove it from any graphs it was copied to. +TF_CAPI_EXPORT extern void TF_DeleteFunction(TF_Function* func); + +// Attempts to evaluate `output`. This will only be possible if `output` doesn't +// depend on any graph inputs (this function is safe to call if this isn't the +// case though). +// +// If the evaluation is successful, this function returns true and `output`s +// value is returned in `result`. Otherwise returns false. An error status is +// returned if something is wrong with the graph or input. Note that this may +// return false even if no error status is set. +TF_CAPI_EXPORT extern unsigned char TF_TryEvaluateConstant(TF_Graph* graph, + TF_Output output, + TF_Tensor** result, + TF_Status* status); + +// TODO(josh11b): Register OpDef, available to all operations added +// to this graph. + +// -------------------------------------------------------------------------- +// API for driving Graph execution. + +typedef struct TF_Session TF_Session; + +// Return a new execution session with the associated graph, or NULL on +// error. Does not take ownership of any input parameters. +// +// *`graph` must be a valid graph (not deleted or nullptr). `graph` will be be +// kept alive for the lifetime of the returned TF_Session. New nodes can still +// be added to `graph` after this call. +TF_CAPI_EXPORT extern TF_Session* TF_NewSession(TF_Graph* graph, + const TF_SessionOptions* opts, + TF_Status* status); + +// This function creates a new TF_Session (which is created on success) using +// `session_options`, and then initializes state (restoring tensors and other +// assets) using `run_options`. +// +// Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`) +// are valid. +// +// - `export_dir` must be set to the path of the exported SavedModel. +// - `tags` must include the set of tags used to identify one MetaGraphDef in +// the SavedModel. +// - `graph` must be a graph newly allocated with TF_NewGraph(). +// +// If successful, populates `graph` with the contents of the Graph and +// `meta_graph_def` with the MetaGraphDef of the loaded model. +TF_CAPI_EXPORT extern TF_Session* TF_LoadSessionFromSavedModel( + const TF_SessionOptions* session_options, const TF_Buffer* run_options, + const char* export_dir, const char* const* tags, int tags_len, + TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status); + +// Close a session. +// +// Contacts any other processes associated with the session, if applicable. +// May not be called after TF_DeleteSession(). +TF_CAPI_EXPORT extern void TF_CloseSession(TF_Session*, TF_Status* status); + +// Destroy a session object. +// +// Even if error information is recorded in *status, this call discards all +// local resources associated with the session. The session may not be used +// during or after this call (and the session drops its reference to the +// corresponding graph). +TF_CAPI_EXPORT extern void TF_DeleteSession(TF_Session*, TF_Status* status); + +// Run the graph associated with the session starting with the supplied inputs +// (inputs[0,ninputs-1] with corresponding values in input_values[0,ninputs-1]). +// +// Any NULL and non-NULL value combinations for (`run_options`, +// `run_metadata`) are valid. +// +// - `run_options` may be NULL, in which case it will be ignored; or +// non-NULL, in which case it must point to a `TF_Buffer` containing the +// serialized representation of a `RunOptions` protocol buffer. +// - `run_metadata` may be NULL, in which case it will be ignored; or +// non-NULL, in which case it must point to an empty, freshly allocated +// `TF_Buffer` that may be updated to contain the serialized representation +// of a `RunMetadata` protocol buffer. +// +// The caller retains ownership of `input_values` (which can be deleted using +// TF_DeleteTensor). The caller also retains ownership of `run_options` and/or +// `run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on +// them. +// +// On success, the tensors corresponding to outputs[0,noutputs-1] are placed in +// output_values[]. Ownership of the elements of output_values[] is transferred +// to the caller, which must eventually call TF_DeleteTensor on them. +// +// On failure, output_values[] contains NULLs. +TF_CAPI_EXPORT extern void TF_SessionRun( + TF_Session* session, + // RunOptions + const TF_Buffer* run_options, + // Input tensors + const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs, + // Output tensors + const TF_Output* outputs, TF_Tensor** output_values, int noutputs, + // Target operations + const TF_Operation* const* target_opers, int ntargets, + // RunMetadata + TF_Buffer* run_metadata, + // Output status + TF_Status*); + +// Set up the graph with the intended feeds (inputs) and fetches (outputs) for a +// sequence of partial run calls. +// +// On success, returns a handle that is used for subsequent PRun calls. The +// handle should be deleted with TF_DeletePRunHandle when it is no longer +// needed. +// +// On failure, out_status contains a tensorflow::Status with an error +// message. *handle is set to nullptr. +TF_CAPI_EXPORT extern void TF_SessionPRunSetup( + TF_Session*, + // Input names + const TF_Output* inputs, int ninputs, + // Output names + const TF_Output* outputs, int noutputs, + // Target operations + const TF_Operation* const* target_opers, int ntargets, + // Output handle + const char** handle, + // Output status + TF_Status*); + +// Continue to run the graph with additional feeds and fetches. The +// execution state is uniquely identified by the handle. +TF_CAPI_EXPORT extern void TF_SessionPRun( + TF_Session*, const char* handle, + // Input tensors + const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs, + // Output tensors + const TF_Output* outputs, TF_Tensor** output_values, int noutputs, + // Target operations + const TF_Operation* const* target_opers, int ntargets, + // Output status + TF_Status*); + +// Deletes a handle allocated by TF_SessionPRunSetup. +// Once called, no more calls to TF_SessionPRun should be made. +TF_CAPI_EXPORT extern void TF_DeletePRunHandle(const char* handle); + +// -------------------------------------------------------------------------- +// The deprecated session API. Please switch to the above instead of +// TF_ExtendGraph(). This deprecated API can be removed at any time without +// notice. + +typedef struct TF_DeprecatedSession TF_DeprecatedSession; + +TF_CAPI_EXPORT extern TF_DeprecatedSession* TF_NewDeprecatedSession( + const TF_SessionOptions*, TF_Status* status); +TF_CAPI_EXPORT extern void TF_CloseDeprecatedSession(TF_DeprecatedSession*, + TF_Status* status); +TF_CAPI_EXPORT extern void TF_DeleteDeprecatedSession(TF_DeprecatedSession*, + TF_Status* status); +TF_CAPI_EXPORT extern void TF_Reset(const TF_SessionOptions* opt, + const char** containers, int ncontainers, + TF_Status* status); +// Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and +// add the nodes in that GraphDef to the graph for the session. +// +// Prefer use of TF_Session and TF_GraphImportGraphDef over this. +TF_CAPI_EXPORT extern void TF_ExtendGraph(TF_DeprecatedSession*, + const void* proto, size_t proto_len, + TF_Status*); + +// See TF_SessionRun() above. +TF_CAPI_EXPORT extern void TF_Run(TF_DeprecatedSession*, + const TF_Buffer* run_options, + const char** input_names, TF_Tensor** inputs, + int ninputs, const char** output_names, + TF_Tensor** outputs, int noutputs, + const char** target_oper_names, int ntargets, + TF_Buffer* run_metadata, TF_Status*); + +// See TF_SessionPRunSetup() above. +TF_CAPI_EXPORT extern void TF_PRunSetup(TF_DeprecatedSession*, + const char** input_names, int ninputs, + const char** output_names, int noutputs, + const char** target_oper_names, + int ntargets, const char** handle, + TF_Status*); + +// See TF_SessionPRun above. +TF_CAPI_EXPORT extern void TF_PRun(TF_DeprecatedSession*, const char* handle, + const char** input_names, TF_Tensor** inputs, + int ninputs, const char** output_names, + TF_Tensor** outputs, int noutputs, + const char** target_oper_names, int ntargets, + TF_Status*); + +typedef struct TF_DeviceList TF_DeviceList; + +// Lists all devices in a TF_Session. +// +// Caller takes ownership of the returned TF_DeviceList* which must eventually +// be freed with a call to TF_DeleteDeviceList. +TF_CAPI_EXPORT extern TF_DeviceList* TF_SessionListDevices(TF_Session* session, + TF_Status* status); + +// Lists all devices in a TF_Session. +// +// Caller takes ownership of the returned TF_DeviceList* which must eventually +// be freed with a call to TF_DeleteDeviceList. +TF_CAPI_EXPORT extern TF_DeviceList* TF_DeprecatedSessionListDevices( + TF_DeprecatedSession* session, TF_Status* status); + +// Deallocates the device list. +TF_CAPI_EXPORT extern void TF_DeleteDeviceList(TF_DeviceList* list); + +// Counts the number of elements in the device list. +TF_CAPI_EXPORT extern int TF_DeviceListCount(const TF_DeviceList* list); + +// Retrieves the full name of the device (e.g. /job:worker/replica:0/...) +// The return value will be a pointer to a null terminated string. The caller +// must not modify or delete the string. It will be deallocated upon a call to +// TF_DeleteDeviceList. +// +// If index is out of bounds, an error code will be set in the status object, +// and a null pointer will be returned. +TF_CAPI_EXPORT extern const char* TF_DeviceListName(const TF_DeviceList* list, + int index, + TF_Status* status); + +// Retrieves the type of the device at the given index. +// +// The caller must not modify or delete the string. It will be deallocated upon +// a call to TF_DeleteDeviceList. +// +// If index is out of bounds, an error code will be set in the status object, +// and a null pointer will be returned. +TF_CAPI_EXPORT extern const char* TF_DeviceListType(const TF_DeviceList* list, + int index, + TF_Status* status); + +// Retrieve the amount of memory associated with a given device. +// +// If index is out of bounds, an error code will be set in the status object, +// and -1 will be returned. +TF_CAPI_EXPORT extern int64_t TF_DeviceListMemoryBytes( + const TF_DeviceList* list, int index, TF_Status* status); + +// Retrieve the incarnation number of a given device. +// +// If index is out of bounds, an error code will be set in the status object, +// and 0 will be returned. +TF_CAPI_EXPORT extern uint64_t TF_DeviceListIncarnation( + const TF_DeviceList* list, int index, TF_Status* status); + +// -------------------------------------------------------------------------- +// Load plugins containing custom ops and kernels + +// TF_Library holds information about dynamically loaded TensorFlow plugins. +typedef struct TF_Library TF_Library; + +// Load the library specified by library_filename and register the ops and +// kernels present in that library. +// +// Pass "library_filename" to a platform-specific mechanism for dynamically +// loading a library. The rules for determining the exact location of the +// library are platform-specific and are not documented here. +// +// On success, place OK in status and return the newly created library handle. +// The caller owns the library handle. +// +// On failure, place an error status in status and return NULL. +TF_CAPI_EXPORT extern TF_Library* TF_LoadLibrary(const char* library_filename, + TF_Status* status); + +// Get the OpList of OpDefs defined in the library pointed by lib_handle. +// +// Returns a TF_Buffer. The memory pointed to by the result is owned by +// lib_handle. The data in the buffer will be the serialized OpList proto for +// ops defined in the library. +TF_CAPI_EXPORT extern TF_Buffer TF_GetOpList(TF_Library* lib_handle); + +// Frees the memory associated with the library handle. +// Does NOT unload the library. +TF_CAPI_EXPORT extern void TF_DeleteLibraryHandle(TF_Library* lib_handle); + +// Get the OpList of all OpDefs defined in this address space. +// Returns a TF_Buffer, ownership of which is transferred to the caller +// (and can be freed using TF_DeleteBuffer). +// +// The data in the buffer will be the serialized OpList proto for ops registered +// in this address space. +TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllOpList(void); + +// TF_ApiDefMap encapsulates a collection of API definitions for an operation. +// +// This object maps the name of a TensorFlow operation to a description of the +// API to generate for it, as defined by the ApiDef protocol buffer ( +// https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto) +// +// The ApiDef messages are typically used to generate convenience wrapper +// functions for TensorFlow operations in various language bindings. +typedef struct TF_ApiDefMap TF_ApiDefMap; + +// Creates a new TF_ApiDefMap instance. +// +// Params: +// op_list_buffer - TF_Buffer instance containing serialized OpList +// protocol buffer. (See +// https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto +// for the OpList proto definition). +// status - Set to OK on success and an appropriate error on failure. +TF_CAPI_EXPORT extern TF_ApiDefMap* TF_NewApiDefMap(TF_Buffer* op_list_buffer, + TF_Status* status); + +// Deallocates a TF_ApiDefMap. +TF_CAPI_EXPORT extern void TF_DeleteApiDefMap(TF_ApiDefMap* apimap); + +// Add ApiDefs to the map. +// +// `text` corresponds to a text representation of an ApiDefs protocol message. +// (https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto). +// +// The provided ApiDefs will be merged with existing ones in the map, with +// precedence given to the newly added version in case of conflicts with +// previous calls to TF_ApiDefMapPut. +TF_CAPI_EXPORT extern void TF_ApiDefMapPut(TF_ApiDefMap* api_def_map, + const char* text, size_t text_len, + TF_Status* status); + +// Returns a serialized ApiDef protocol buffer for the TensorFlow operation +// named `name`. +TF_CAPI_EXPORT extern TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map, + const char* name, + size_t name_len, + TF_Status* status); + +// -------------------------------------------------------------------------- +// Kernel definition information. + +// Returns a serialized KernelList protocol buffer containing KernelDefs for all +// registered kernels. +TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllRegisteredKernels(TF_Status* status); + +// Returns a serialized KernelList protocol buffer containing KernelDefs for all +// kernels registered for the operation named `name`. +TF_CAPI_EXPORT extern TF_Buffer* TF_GetRegisteredKernelsForOp( + const char* name, TF_Status* status); + +// -------------------------------------------------------------------------- +// In-process TensorFlow server functionality, for use in distributed training. +// A Server instance encapsulates a set of devices and a Session target that +// can participate in distributed training. A server belongs to a cluster +// (specified by a ClusterSpec), and corresponds to a particular task in a +// named job. The server can communicate with any other server in the same +// cluster. + +// In-process TensorFlow server. +typedef struct TF_Server TF_Server; + +// Creates a new in-process TensorFlow server configured using a serialized +// ServerDef protocol buffer provided via `proto` and `proto_len`. +// +// The server will not serve any requests until TF_ServerStart is invoked. +// The server will stop serving requests once TF_ServerStop or +// TF_DeleteServer is invoked. +TF_CAPI_EXPORT extern TF_Server* TF_NewServer(const void* proto, + size_t proto_len, + TF_Status* status); + +// Starts an in-process TensorFlow server. +TF_CAPI_EXPORT extern void TF_ServerStart(TF_Server* server, TF_Status* status); + +// Stops an in-process TensorFlow server. +TF_CAPI_EXPORT extern void TF_ServerStop(TF_Server* server, TF_Status* status); + +// Blocks until the server has been successfully stopped (via TF_ServerStop or +// TF_ServerClose). +TF_CAPI_EXPORT extern void TF_ServerJoin(TF_Server* server, TF_Status* status); + +// Returns the target string that can be provided to TF_SetTarget() to connect +// a TF_Session to `server`. +// +// The returned string is valid only until TF_DeleteServer is invoked. +TF_CAPI_EXPORT extern const char* TF_ServerTarget(TF_Server* server); + +// Destroy an in-process TensorFlow server, frees memory. If server is running +// it will be stopped and joined. +TF_CAPI_EXPORT extern void TF_DeleteServer(TF_Server* server); + +// Register a listener method that processes printed messages. +// +// If any listeners are registered, the print operator will call all listeners +// with the printed messages and immediately return without writing to the +// logs. +TF_CAPI_EXPORT extern void TF_RegisterLogListener( + void (*listener)(const char*)); + #ifdef __cplusplus } /* end extern "C" */ #endif diff --git a/tensorflow/c/c_api_internal.h b/tensorflow/c/c_api_internal.h index 11fb7705625..32880378c2b 100644 --- a/tensorflow/c/c_api_internal.h +++ b/tensorflow/c/c_api_internal.h @@ -16,14 +16,14 @@ limitations under the License. #ifndef TENSORFLOW_C_C_API_INTERNAL_H_ #define TENSORFLOW_C_C_API_INTERNAL_H_ +#include "tensorflow/c/c_api.h" + #include #include #include #include #include -#include "tensorflow/c/c_core_api.h" - // clang-format off // Required for IS_MOBILE_PLATFORM #include "tensorflow/core/platform/platform.h" @@ -217,10 +217,6 @@ bool ExtendSessionGraphHelper(TF_Session* session, TF_Status* status) std::string getTF_OutputDebugString(TF_Output node); -TF_Operation* ToOperation(Node* node); - -TensorId ToTensorId(const TF_Output& output); - } // end namespace tensorflow #endif // TENSORFLOW_C_C_API_INTERNAL_H_ diff --git a/tensorflow/c/c_core_api.cc b/tensorflow/c/c_core_api.cc deleted file mode 100644 index 67daaef08ac..00000000000 --- a/tensorflow/c/c_core_api.cc +++ /dev/null @@ -1,2193 +0,0 @@ -/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow/c/c_api.h" - -#include -#include -#include -#include - -#include "absl/strings/match.h" -// Required for IS_MOBILE_PLATFORM -#include "tensorflow/core/platform/platform.h" // NOLINT - -#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) -#include "tensorflow/cc/saved_model/loader.h" -#include "tensorflow/core/distributed_runtime/server_lib.h" -#include "tensorflow/core/framework/logging.h" -#include "tensorflow/core/framework/op_gen_lib.h" -#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) -#include "tensorflow/c/c_api_internal.h" -#include "tensorflow/c/tf_status_internal.h" -#include "tensorflow/c/tf_tensor.h" -#include "tensorflow/core/common_runtime/device_mgr.h" -#include "tensorflow/core/common_runtime/eval_const_tensor.h" -#include "tensorflow/core/common_runtime/shape_refiner.h" -#include "tensorflow/core/framework/allocation_description.pb.h" -#include "tensorflow/core/framework/kernel_def.pb.h" -#include "tensorflow/core/framework/log_memory.h" -#include "tensorflow/core/framework/node_def_util.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/partial_tensor_shape.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/framework/tensor.pb.h" // NOLINT -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/framework/tensor_shape.pb.h" -#include "tensorflow/core/framework/types.h" -#include "tensorflow/core/framework/versions.pb.h" -#include "tensorflow/core/graph/graph.h" -#include "tensorflow/core/graph/graph_constructor.h" -#include "tensorflow/core/graph/node_builder.h" -#include "tensorflow/core/graph/validate.h" -#include "tensorflow/core/lib/core/coding.h" -#include "tensorflow/core/lib/core/errors.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/stringpiece.h" -#include "tensorflow/core/lib/gtl/array_slice.h" -#include "tensorflow/core/lib/strings/str_util.h" -#include "tensorflow/core/lib/strings/strcat.h" -#include "tensorflow/core/platform/mem.h" -#include "tensorflow/core/platform/mutex.h" -#include "tensorflow/core/platform/protobuf.h" -#include "tensorflow/core/platform/thread_annotations.h" -#include "tensorflow/core/platform/types.h" -#include "tensorflow/core/public/session.h" -#include "tensorflow/core/public/version.h" - -// The implementation below is at the top level instead of the -// brain namespace because we are defining 'extern "C"' functions. -using tensorflow::AllocationDescription; -using tensorflow::DataType; -using tensorflow::ExtendSessionGraphHelper; -using tensorflow::Graph; -using tensorflow::GraphDef; -using tensorflow::mutex_lock; -using tensorflow::NameRangeMap; -using tensorflow::NameRangesForNode; -using tensorflow::NewSession; -using tensorflow::Node; -using tensorflow::NodeBuilder; -using tensorflow::NodeDef; -using tensorflow::OpDef; -using tensorflow::OpRegistry; -using tensorflow::OutputTensor; -using tensorflow::PartialTensorShape; -using tensorflow::RunMetadata; -using tensorflow::RunOptions; -using tensorflow::Session; -using tensorflow::Status; -using tensorflow::string; -using tensorflow::Tensor; -using tensorflow::TensorBuffer; -using tensorflow::TensorId; -using tensorflow::TensorShape; -using tensorflow::TensorShapeProto; -using tensorflow::ToTensorId; -using tensorflow::VersionDef; -using tensorflow::errors::FailedPrecondition; -using tensorflow::errors::InvalidArgument; -using tensorflow::gtl::ArraySlice; -using tensorflow::strings::StrCat; - -extern "C" { - -// -------------------------------------------------------------------------- -const char* TF_Version() { return TF_VERSION_STRING; } - -// -------------------------------------------------------------------------- - -// -------------------------------------------------------------------------- -TF_SessionOptions* TF_NewSessionOptions() { return new TF_SessionOptions; } -void TF_DeleteSessionOptions(TF_SessionOptions* opt) { delete opt; } - -void TF_SetTarget(TF_SessionOptions* options, const char* target) { - options->options.target = target; -} - -void TF_SetConfig(TF_SessionOptions* options, const void* proto, - size_t proto_len, TF_Status* status) { - if (!options->options.config.ParseFromArray(proto, proto_len)) { - status->status = InvalidArgument("Unparseable ConfigProto"); - } -} -// -------------------------------------------------------------------------- -TF_Buffer* TF_NewBuffer() { return new TF_Buffer{nullptr, 0, nullptr}; } - -TF_Buffer* TF_NewBufferFromString(const void* proto, size_t proto_len) { - void* copy = tensorflow::port::Malloc(proto_len); - memcpy(copy, proto, proto_len); - - TF_Buffer* buf = new TF_Buffer; - buf->data = copy; - buf->length = proto_len; - buf->data_deallocator = [](void* data, size_t length) { - tensorflow::port::Free(data); - }; - return buf; -} - -void TF_DeleteBuffer(TF_Buffer* buffer) { - if (buffer == nullptr) return; - if (buffer->data_deallocator != nullptr) { - (*buffer->data_deallocator)(const_cast(buffer->data), - buffer->length); - } - delete buffer; -} - -TF_Buffer TF_GetBuffer(TF_Buffer* buffer) { return *buffer; } - -// -------------------------------------------------------------------------- - -TF_DeprecatedSession* TF_NewDeprecatedSession(const TF_SessionOptions* opt, - TF_Status* status) { - Session* session; - status->status = NewSession(opt->options, &session); - if (status->status.ok()) { - return new TF_DeprecatedSession({session}); - } else { - DCHECK_EQ(nullptr, session); - return nullptr; - } -} - -void TF_CloseDeprecatedSession(TF_DeprecatedSession* s, TF_Status* status) { - status->status = s->session->Close(); -} - -void TF_DeleteDeprecatedSession(TF_DeprecatedSession* s, TF_Status* status) { - status->status = Status::OK(); - if (s == nullptr) return; - delete s->session; - delete s; -} - -void TF_ExtendGraph(TF_DeprecatedSession* s, const void* proto, - size_t proto_len, TF_Status* status) { - GraphDef g; - if (!tensorflow::ParseProtoUnlimited(&g, proto, proto_len)) { - status->status = InvalidArgument("Invalid GraphDef"); - return; - } - status->status = s->session->Extend(g); -} - -} // end extern "C" - -// Reset helper for converting character arrays to string vectors. -static void TF_Reset_Helper(const TF_SessionOptions* opt, - const char** containers, int ncontainers, - TF_Status* status) { - std::vector container_names(ncontainers); - for (int i = 0; i < ncontainers; ++i) { - container_names[i] = containers[i]; - } - - status->status = Reset(opt->options, container_names); -} - -extern "C" { - -void TF_Reset(const TF_SessionOptions* opt, const char** containers, - int ncontainers, TF_Status* status) { - TF_Reset_Helper(opt, containers, ncontainers, status); -} - -} // end extern "C" - -namespace tensorflow { - - -Status MessageToBuffer(const tensorflow::protobuf::MessageLite& in, - TF_Buffer* out) { - if (out->data != nullptr) { - return InvalidArgument("Passing non-empty TF_Buffer is invalid."); - } - const size_t proto_size = in.ByteSizeLong(); - void* buf = port::Malloc(proto_size); - if (buf == nullptr) { - return tensorflow::errors::ResourceExhausted( - "Failed to allocate memory to serialize message of type '", - in.GetTypeName(), "' and size ", proto_size); - } - if (!in.SerializeWithCachedSizesToArray(static_cast(buf))) { - port::Free(buf); - return InvalidArgument("Unable to serialize ", in.GetTypeName(), - " protocol buffer, perhaps the serialized size (", - proto_size, " bytes) is too large?"); - } - out->data = buf; - out->length = proto_size; - out->data_deallocator = [](void* data, size_t length) { port::Free(data); }; - return Status::OK(); -} - -void RecordMutation(TF_Graph* graph, const TF_Operation& op, - const char* mutation_type) { - // If any session has already run this node_id, mark this session as - // unrunnable. - for (auto it : graph->sessions) { - mutex_lock session_lock(it.first->mu); - if (it.first->last_num_graph_nodes > op.node.id()) { - it.second = strings::StrCat( - "Operation '", op.node.DebugString(), "' was changed by ", - mutation_type, - " after it was run by a session. This mutation will have no effect, " - "and will trigger an error in the future. Either don't modify " - "nodes after running them or create a new session."); - } - } -} - -namespace { - -// Helper method that creates a shape handle for a shape described by dims. -tensorflow::shape_inference::ShapeHandle ShapeHandleFromDims( - tensorflow::shape_inference::InferenceContext* ic, int num_dims, - const int64_t* dims) { - if (num_dims != -1) { - std::vector dim_vec; - dim_vec.reserve(num_dims); - for (int i = 0; i < num_dims; ++i) { - dim_vec.push_back(ic->MakeDim(dims[i])); - } - return ic->MakeShape(dim_vec); - } else { - return ic->UnknownShape(); - } -} - -} // namespace - -void TF_GraphSetOutputHandleShapesAndTypes(TF_Graph* graph, TF_Output output, - int num_shapes_and_types, - const int64_t** shapes, - const int* ranks, - const TF_DataType* types, - TF_Status* status) { - Node* node = &output.oper->node; - - mutex_lock l(graph->mu); - tensorflow::shape_inference::InferenceContext* ic = - graph->refiner.GetContext(node); - if (ic == nullptr) { - status->status = - InvalidArgument("Node ", node->name(), " was not found in the graph"); - return; - } - - auto shape_and_type_vec = - std::vector( - num_shapes_and_types); - for (int i = 0; i < num_shapes_and_types; ++i) { - tensorflow::shape_inference::ShapeHandle shape_handle = - ShapeHandleFromDims(ic, ranks[i], shapes[i]); - shape_and_type_vec[i] = tensorflow::shape_inference::ShapeAndType( - shape_handle, static_cast(types[i])); - } - - ic->set_output_handle_shapes_and_types(output.index, shape_and_type_vec); -} - -// Helpers for loading a TensorFlow plugin (a .so file). -Status LoadLibrary(const char* library_filename, void** result, - const void** buf, size_t* len); - -// TODO(josh11b,mrry): Change Session to be able to use a Graph* -// directly, instead of requiring us to serialize to a GraphDef and -// call Session::Extend(). -bool ExtendSessionGraphHelper(TF_Session* session, TF_Status* status) { - if (session->graph != nullptr) { - // Take the graph lock before the session lock to avoid deadlock. This is - // safe since session->graph does not change. - session->graph->mu.lock(); - mutex_lock session_lock(session->mu); - const Graph& graph = session->graph->graph; - - const string& mutation_warning = session->graph->sessions[session]; - if (!mutation_warning.empty()) { - // TODO(b/74949947): turn this back into an error status - LOG(WARNING) << mutation_warning; - session->graph->sessions[session].clear(); - } - - const auto num_nodes = graph.num_node_ids(); - if (session->last_num_graph_nodes < num_nodes) { - // TODO(nolivia): check this on a subset of the graph instead of all of - // it. - status->status = graph::ValidateGraphHasNoCycle(session->graph->graph); - if (!status->status.ok()) { - session->graph->mu.unlock(); - return false; - } - - GraphDef graph_def; - *graph_def.mutable_versions() = graph.versions(); - // Fill graph_def with nodes with ids in the range - // [session->last_num_graph_nodes, num_nodes), that is the nodes - // added since the last TF_SessionRun() call. - for (auto id = session->last_num_graph_nodes; id < num_nodes; ++id) { - Node* const node = graph.FindNodeId(id); - if (node != nullptr && node->IsOp()) { - NodeDef* const node_def = graph_def.add_node(); - *node_def = node->def(); - } - } - *graph_def.mutable_library() = graph.flib_def().ToProto(); - session->graph->mu.unlock(); - status->status = session->session->Extend(std::move(graph_def)); - if (!status->status.ok()) { - // Contract is we always delete input_values[i]. - return false; - } - // Note: session->session is not modified if Extend() fails, so - // we only set last_num_graph_nodes if it succeeds. - session->last_num_graph_nodes = num_nodes; - } else { - session->graph->mu.unlock(); - } - } - return true; -} - -} // namespace tensorflow - -static void TF_Run_Setup(int noutputs, TF_Tensor** c_outputs, - TF_Status* status) { - status->status = Status::OK(); - for (int i = 0; i < noutputs; ++i) { - c_outputs[i] = nullptr; - } -} - -static bool TF_Run_Inputs(TF_Tensor* const* c_inputs, - std::vector>* input_pairs, - TF_Status* status) { - const int ninputs = input_pairs->size(); - for (int i = 0; i < ninputs; ++i) { - status->status = TF_TensorToTensor(c_inputs[i], &(*input_pairs)[i].second); - if (!status->status.ok()) return false; - } - return true; -} - -// Create an empty tensor of type 'dtype'. 'shape' can be arbitrary, but has to -// result in a zero-sized tensor. -static TF_Tensor* EmptyTensor(TF_DataType dtype, - const tensorflow::TensorShape& shape) { - static char empty; - tensorflow::int64 nelems = 1; - std::vector dims; - for (int i = 0; i < shape.dims(); ++i) { - dims.push_back(shape.dim_size(i)); - nelems *= shape.dim_size(i); - } - CHECK_EQ(nelems, 0); - static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), - "64-bit int types should match in size"); - return TF_NewTensor( - dtype, reinterpret_cast(dims.data()), shape.dims(), - reinterpret_cast(&empty), 0, [](void*, size_t, void*) {}, nullptr); -} - -static void TF_Run_Helper( - Session* session, const char* handle, const TF_Buffer* run_options, - // Input tensors - const std::vector>& input_pairs, - // Output tensors - const std::vector& output_tensor_names, TF_Tensor** c_outputs, - // Target nodes - const std::vector& target_oper_names, TF_Buffer* run_metadata, - TF_Status* status) { - const int noutputs = output_tensor_names.size(); - std::vector outputs(noutputs); - Status result; - - if (handle == nullptr) { - RunOptions run_options_proto; - if (run_options != nullptr && !run_options_proto.ParseFromArray( - run_options->data, run_options->length)) { - status->status = InvalidArgument("Unparseable RunOptions proto"); - return; - } - if (run_metadata != nullptr && run_metadata->data != nullptr) { - status->status = - InvalidArgument("Passing non-empty run_metadata is invalid."); - return; - } - - RunMetadata run_metadata_proto; - result = session->Run(run_options_proto, input_pairs, output_tensor_names, - target_oper_names, &outputs, &run_metadata_proto); - - // Serialize back to upstream client, who now owns the new buffer - if (run_metadata != nullptr) { - status->status = MessageToBuffer(run_metadata_proto, run_metadata); - if (!status->status.ok()) return; - } - } else { - // NOTE(zongheng): PRun does not support RunOptions yet. - result = session->PRun(handle, input_pairs, output_tensor_names, &outputs); - } - if (!result.ok()) { - status->status = result; - return; - } - - // Store results in c_outputs[] - for (int i = 0; i < noutputs; ++i) { - const Tensor& src = outputs[i]; - if (!src.IsInitialized() || src.NumElements() == 0) { - c_outputs[i] = - EmptyTensor(static_cast(src.dtype()), src.shape()); - continue; - } - c_outputs[i] = TF_TensorFromTensor(src, &status->status); - if (!status->status.ok()) return; - } -} - -extern "C" { - -void TF_Run(TF_DeprecatedSession* s, const TF_Buffer* run_options, - // Input tensors - const char** c_input_names, TF_Tensor** c_inputs, int ninputs, - // Output tensors - const char** c_output_names, TF_Tensor** c_outputs, int noutputs, - // Target nodes - const char** c_target_oper_names, int ntargets, - TF_Buffer* run_metadata, TF_Status* status) { - TF_Run_Setup(noutputs, c_outputs, status); - std::vector> input_pairs(ninputs); - if (!TF_Run_Inputs(c_inputs, &input_pairs, status)) return; - for (int i = 0; i < ninputs; ++i) { - input_pairs[i].first = c_input_names[i]; - } - std::vector output_names(noutputs); - for (int i = 0; i < noutputs; ++i) { - output_names[i] = c_output_names[i]; - } - std::vector target_oper_names(ntargets); - for (int i = 0; i < ntargets; ++i) { - target_oper_names[i] = c_target_oper_names[i]; - } - TF_Run_Helper(s->session, nullptr, run_options, input_pairs, output_names, - c_outputs, target_oper_names, run_metadata, status); -} - -void TF_PRunSetup(TF_DeprecatedSession* s, - // Input names - const char** c_input_names, int ninputs, - // Output names - const char** c_output_names, int noutputs, - // Target nodes - const char** c_target_oper_names, int ntargets, - const char** handle, TF_Status* status) { - *handle = nullptr; - - std::vector input_names(ninputs); - std::vector output_names(noutputs); - std::vector target_oper_names(ntargets); - for (int i = 0; i < ninputs; ++i) { - input_names[i] = c_input_names[i]; - } - for (int i = 0; i < noutputs; ++i) { - output_names[i] = c_output_names[i]; - } - for (int i = 0; i < ntargets; ++i) { - target_oper_names[i] = c_target_oper_names[i]; - } - string new_handle; - status->status = s->session->PRunSetup(input_names, output_names, - target_oper_names, &new_handle); - if (status->status.ok()) { - char* buf = new char[new_handle.size() + 1]; - memcpy(buf, new_handle.c_str(), new_handle.size() + 1); - *handle = buf; - } -} - -void TF_PRun(TF_DeprecatedSession* s, const char* handle, - // Input tensors - const char** c_input_names, TF_Tensor** c_inputs, int ninputs, - // Output tensors - const char** c_output_names, TF_Tensor** c_outputs, int noutputs, - // Target nodes - const char** c_target_oper_names, int ntargets, - TF_Status* status) { - TF_Run_Setup(noutputs, c_outputs, status); - std::vector> input_pairs(ninputs); - if (!TF_Run_Inputs(c_inputs, &input_pairs, status)) return; - for (int i = 0; i < ninputs; ++i) { - input_pairs[i].first = c_input_names[i]; - } - - std::vector output_names(noutputs); - for (int i = 0; i < noutputs; ++i) { - output_names[i] = c_output_names[i]; - } - std::vector target_oper_names(ntargets); - for (int i = 0; i < ntargets; ++i) { - target_oper_names[i] = c_target_oper_names[i]; - } - TF_Run_Helper(s->session, handle, nullptr, input_pairs, output_names, - c_outputs, target_oper_names, nullptr, status); -} - -TF_Library* TF_LoadLibrary(const char* library_filename, TF_Status* status) { - TF_Library* lib_handle = new TF_Library; - status->status = tensorflow::LoadLibrary( - library_filename, &lib_handle->lib_handle, &lib_handle->op_list.data, - &lib_handle->op_list.length); - if (!status->status.ok()) { - delete lib_handle; - return nullptr; - } - return lib_handle; -} - -TF_Buffer TF_GetOpList(TF_Library* lib_handle) { return lib_handle->op_list; } - -void TF_DeleteLibraryHandle(TF_Library* lib_handle) { - if (lib_handle == nullptr) return; - tensorflow::port::Free(const_cast(lib_handle->op_list.data)); - delete lib_handle; -} - -TF_Buffer* TF_GetAllOpList() { - std::vector op_defs; - tensorflow::OpRegistry::Global()->GetRegisteredOps(&op_defs); - tensorflow::OpList op_list; - for (const auto& op : op_defs) { - *(op_list.add_op()) = op; - } - TF_Buffer* ret = TF_NewBuffer(); - TF_CHECK_OK(MessageToBuffer(op_list, ret)); - return ret; -} - -// -------------------------------------------------------------------------- -// ListDevices & SessionListDevices API - -void TF_DeleteDeviceList(TF_DeviceList* list) { delete list; } - -TF_DeviceList* TF_SessionListDevices(TF_Session* session, TF_Status* status) { - TF_DeviceList* response = new TF_DeviceList; - status->status = session->session->ListDevices(&response->response); - return response; -} - -TF_DeviceList* TF_DeprecatedSessionListDevices(TF_DeprecatedSession* session, - TF_Status* status) { - TF_DeviceList* response = new TF_DeviceList; - status->status = session->session->ListDevices(&response->response); - return response; -} - -int TF_DeviceListCount(const TF_DeviceList* list) { - return list->response.size(); -} - -#define TF_DEVICELIST_METHOD(return_type, method_name, accessor, err_val) \ - return_type method_name(const TF_DeviceList* list, const int index, \ - TF_Status* status) { \ - if (list == nullptr) { \ - status->status = InvalidArgument("list is null!"); \ - return err_val; \ - } \ - if (index < 0 || index >= list->response.size()) { \ - status->status = InvalidArgument("index out of bounds"); \ - return err_val; \ - } \ - status->status = Status::OK(); \ - return list->response[index].accessor; \ - } - -TF_DEVICELIST_METHOD(const char*, TF_DeviceListName, name().c_str(), nullptr); -TF_DEVICELIST_METHOD(const char*, TF_DeviceListType, device_type().c_str(), - nullptr); -TF_DEVICELIST_METHOD(int64_t, TF_DeviceListMemoryBytes, memory_limit(), -1); -TF_DEVICELIST_METHOD(uint64_t, TF_DeviceListIncarnation, incarnation(), 0); - -#undef TF_DEVICELIST_METHOD - -} // end extern "C" - -// -------------------------------------------------------------------------- -// New Graph and Session API - -// Helper functions ----------------------------------------------------------- - -namespace tensorflow { - -TF_Operation* ToOperation(Node* node) { - return static_cast(static_cast(node)); -} - -TensorId ToTensorId(const TF_Output& output) { - return TensorId(output.oper->node.name(), output.index); -} - -} // namespace tensorflow - -namespace { - -string OutputName(const TF_Output& output) { - return StrCat(output.oper->node.name(), ":", output.index); -} - -const tensorflow::AttrValue* GetAttrValue(TF_Operation* oper, - const char* attr_name, - TF_Status* status) { - const tensorflow::AttrValue* attr = oper->node.attrs().Find(attr_name); - if (attr == nullptr) { - status->status = InvalidArgument("Operation '", oper->node.name(), - "' has no attr named '", attr_name, "'."); - } - return attr; -} - -} // namespace - -// Shape functions ----------------------------------------------------------- - -void TF_GraphSetTensorShape(TF_Graph* graph, TF_Output output, - const int64_t* dims, const int num_dims, - TF_Status* status) { - Node* node = &output.oper->node; - - mutex_lock l(graph->mu); - tensorflow::shape_inference::InferenceContext* ic = - graph->refiner.GetContext(node); - if (ic == nullptr) { - status->status = - InvalidArgument("Node ", node->name(), " was not found in the graph"); - return; - } - tensorflow::shape_inference::ShapeHandle new_shape = - tensorflow::ShapeHandleFromDims(ic, num_dims, dims); - status->status = graph->refiner.SetShape(node, output.index, new_shape); -} - -int TF_GraphGetTensorNumDims(TF_Graph* graph, TF_Output output, - TF_Status* status) { - Node* node = &output.oper->node; - - mutex_lock l(graph->mu); - tensorflow::shape_inference::InferenceContext* ic = - graph->refiner.GetContext(node); - if (ic == nullptr) { - status->status = - InvalidArgument("Node ", node->name(), " was not found in the graph"); - return -1; - } - - tensorflow::shape_inference::ShapeHandle shape = ic->output(output.index); - - // Unknown rank means the number of dimensions is -1. - if (!ic->RankKnown(shape)) { - return -1; - } - - return ic->Rank(shape); -} - -void TF_GraphGetTensorShape(TF_Graph* graph, TF_Output output, int64_t* dims, - int num_dims, TF_Status* status) { - Node* node = &output.oper->node; - - mutex_lock l(graph->mu); - tensorflow::shape_inference::InferenceContext* ic = - graph->refiner.GetContext(node); - if (ic == nullptr) { - status->status = - InvalidArgument("Node ", node->name(), " was not found in the graph"); - return; - } - - tensorflow::shape_inference::ShapeHandle shape = ic->output(output.index); - - int rank = -1; - if (ic->RankKnown(shape)) { - rank = ic->Rank(shape); - } - - if (num_dims != rank) { - status->status = InvalidArgument("Expected rank is ", num_dims, - " but actual rank is ", rank); - return; - } - - if (num_dims == 0) { - // Output shape is a scalar. - return; - } - - // Rank is greater than 0, so fill in the values, if known, and - // -1 for unknown values. - for (int i = 0; i < num_dims; ++i) { - auto dim = ic->Dim(shape, i); - tensorflow::int64 value = -1; - if (ic->ValueKnown(dim)) { - value = ic->Value(dim); - } - dims[i] = value; - } -} - -// TF_OperationDescription functions ------------------------------------------ - -extern "C" { - -static TF_OperationDescription* TF_NewOperationLocked(TF_Graph* graph, - const char* op_type, - const char* oper_name) - TF_EXCLUSIVE_LOCKS_REQUIRED(graph->mu) { - return new TF_OperationDescription(graph, op_type, oper_name); -} - -TF_OperationDescription* TF_NewOperation(TF_Graph* graph, const char* op_type, - const char* oper_name) { - mutex_lock l(graph->mu); - return TF_NewOperationLocked(graph, op_type, oper_name); -} - -void TF_SetDevice(TF_OperationDescription* desc, const char* device) { - desc->node_builder.Device(device); -} - -void TF_AddInput(TF_OperationDescription* desc, TF_Output input) { - desc->node_builder.Input(&input.oper->node, input.index); -} - -void TF_AddInputList(TF_OperationDescription* desc, const TF_Output* inputs, - int num_inputs) { - std::vector input_list; - input_list.reserve(num_inputs); - for (int i = 0; i < num_inputs; ++i) { - input_list.emplace_back(&inputs[i].oper->node, inputs[i].index); - } - desc->node_builder.Input(input_list); -} - -void TF_AddControlInput(TF_OperationDescription* desc, TF_Operation* input) { - desc->node_builder.ControlInput(&input->node); -} - -void TF_ColocateWith(TF_OperationDescription* desc, TF_Operation* op) { - desc->colocation_constraints.emplace( - StrCat(tensorflow::kColocationGroupPrefix, op->node.name())); -} - -void TF_SetAttrString(TF_OperationDescription* desc, const char* attr_name, - const void* value, size_t length) { - tensorflow::StringPiece s(static_cast(value), length); - desc->node_builder.Attr(attr_name, s); -} - -void TF_SetAttrStringList(TF_OperationDescription* desc, const char* attr_name, - const void* const* values, const size_t* lengths, - int num_values) { - if (strcmp(attr_name, tensorflow::kColocationAttrName) == 0) { - desc->colocation_constraints.clear(); - for (int i = 0; i < num_values; ++i) { - desc->colocation_constraints.emplace(static_cast(values[i]), - lengths[i]); - } - } else { - std::vector v; - v.reserve(num_values); - for (int i = 0; i < num_values; ++i) { - v.emplace_back(static_cast(values[i]), lengths[i]); - } - desc->node_builder.Attr(attr_name, v); - } -} - -void TF_SetAttrInt(TF_OperationDescription* desc, const char* attr_name, - int64_t value) { - static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), - "64-bit int types should match in size"); - desc->node_builder.Attr(attr_name, static_cast(value)); -} - -void TF_SetAttrIntList(TF_OperationDescription* desc, const char* attr_name, - const int64_t* values, int num_values) { - static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), - "64-bit int types should match in size"); - desc->node_builder.Attr( - attr_name, - ArraySlice( - reinterpret_cast(values), num_values)); -} - -void TF_SetAttrFloat(TF_OperationDescription* desc, const char* attr_name, - float value) { - desc->node_builder.Attr(attr_name, value); -} - -void TF_SetAttrFloatList(TF_OperationDescription* desc, const char* attr_name, - const float* values, int num_values) { - desc->node_builder.Attr(attr_name, - ArraySlice(values, num_values)); -} - -void TF_SetAttrBool(TF_OperationDescription* desc, const char* attr_name, - unsigned char value) { - desc->node_builder.Attr(attr_name, static_cast(value)); -} - -void TF_SetAttrBoolList(TF_OperationDescription* desc, const char* attr_name, - const unsigned char* values, int num_values) { - std::unique_ptr b(new bool[num_values]); - for (int i = 0; i < num_values; ++i) { - b[i] = values[i]; - } - desc->node_builder.Attr(attr_name, - ArraySlice(b.get(), num_values)); -} - -void TF_SetAttrType(TF_OperationDescription* desc, const char* attr_name, - TF_DataType value) { - desc->node_builder.Attr(attr_name, static_cast(value)); -} - -void TF_SetAttrTypeList(TF_OperationDescription* desc, const char* attr_name, - const TF_DataType* values, int num_values) { - desc->node_builder.Attr( - attr_name, ArraySlice( - reinterpret_cast(values), num_values)); -} - -void TF_SetAttrPlaceholder(TF_OperationDescription* desc, const char* attr_name, - const char* placeholder) { - tensorflow::AttrValue attr_value; - attr_value.set_placeholder(placeholder); - desc->node_builder.Attr(attr_name, attr_value); -} - -void TF_SetAttrFuncName(TF_OperationDescription* desc, const char* attr_name, - const char* value, size_t length) { - tensorflow::NameAttrList func_name; - func_name.set_name(string(value, value + length)); - desc->node_builder.Attr(attr_name, func_name); -} - -void TF_SetAttrShape(TF_OperationDescription* desc, const char* attr_name, - const int64_t* dims, int num_dims) { - PartialTensorShape shape; - if (num_dims >= 0) { - static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), - "64-bit int types should match in size"); - shape = PartialTensorShape(ArraySlice( - reinterpret_cast(dims), num_dims)); - } - desc->node_builder.Attr(attr_name, shape); -} - -void TF_SetAttrShapeList(TF_OperationDescription* desc, const char* attr_name, - const int64_t* const* dims, const int* num_dims, - int num_shapes) { - std::vector shapes; - shapes.reserve(num_shapes); - for (int i = 0; i < num_shapes; ++i) { - if (num_dims[i] < 0) { - shapes.emplace_back(); - } else { - static_assert(sizeof(int64_t) == sizeof(tensorflow::int64), - "64-bit int types should match in size"); - shapes.emplace_back(ArraySlice( - reinterpret_cast(dims[i]), num_dims[i])); - } - } - desc->node_builder.Attr(attr_name, shapes); -} - -void TF_SetAttrTensorShapeProto(TF_OperationDescription* desc, - const char* attr_name, const void* proto, - size_t proto_len, TF_Status* status) { - // shape.ParseFromArray takes an int as length, this function takes size_t, - // make sure there is no information loss. - if (proto_len > std::numeric_limits::max()) { - status->status = InvalidArgument( - "proto_len (", proto_len, - " bytes) is too large to be parsed by the protocol buffer library"); - return; - } - TensorShapeProto shape; - if (shape.ParseFromArray(proto, static_cast(proto_len))) { - desc->node_builder.Attr(attr_name, shape); - status->status = Status::OK(); - } else { - status->status = InvalidArgument("Unparseable TensorShapeProto"); - } -} - -void TF_SetAttrTensorShapeProtoList(TF_OperationDescription* desc, - const char* attr_name, - const void* const* protos, - const size_t* proto_lens, int num_shapes, - TF_Status* status) { - std::vector shapes; - shapes.resize(num_shapes); - for (int i = 0; i < num_shapes; ++i) { - if (proto_lens[i] > std::numeric_limits::max()) { - status->status = InvalidArgument( - "length of element ", i, " in the list (", proto_lens[i], - " bytes) is too large to be parsed by the protocol buffer library"); - return; - } - if (!shapes[i].ParseFromArray(protos[i], static_cast(proto_lens[i]))) { - status->status = - InvalidArgument("Unparseable TensorShapeProto at index ", i); - return; - } - } - desc->node_builder.Attr(attr_name, shapes); - status->status = Status::OK(); -} - -void TF_SetAttrTensor(TF_OperationDescription* desc, const char* attr_name, - TF_Tensor* value, TF_Status* status) { - Tensor t; - status->status = TF_TensorToTensor(value, &t); - if (status->status.ok()) desc->node_builder.Attr(attr_name, t); -} - -void TF_SetAttrTensorList(TF_OperationDescription* desc, const char* attr_name, - TF_Tensor* const* values, int num_values, - TF_Status* status) { - status->status = Status::OK(); - std::vector t; - t.reserve(num_values); - - for (int i = 0; i < num_values && status->status.ok(); ++i) { - Tensor v; - status->status = TF_TensorToTensor(values[i], &v); - t.emplace_back(v); - } - - if (status->status.ok()) desc->node_builder.Attr(attr_name, t); -} - -void TF_SetAttrValueProto(TF_OperationDescription* desc, const char* attr_name, - const void* proto, size_t proto_len, - TF_Status* status) { - tensorflow::AttrValue attr_value; - if (!attr_value.ParseFromArray(proto, proto_len)) { - status->status = InvalidArgument("Unparseable AttrValue proto"); - return; - } - - if (strcmp(attr_name, tensorflow::kColocationAttrName) == 0) { - if (attr_value.value_case() != tensorflow::AttrValue::kList && - attr_value.value_case() != tensorflow::AttrValue::VALUE_NOT_SET) { - status->status = - InvalidArgument("Expected \"list\" field for \"", - tensorflow::kColocationAttrName, "\" attribute"); - return; - } - desc->colocation_constraints.clear(); - for (const string& location : attr_value.list().s()) { - desc->colocation_constraints.insert(location); - } - } else { - desc->node_builder.Attr(attr_name, std::move(attr_value)); - } - - status->status = Status::OK(); -} - -static TF_Operation* TF_FinishOperationLocked(TF_OperationDescription* desc, - TF_Status* status) - TF_EXCLUSIVE_LOCKS_REQUIRED(desc->graph->mu) { - Node* ret = nullptr; - - if (desc->graph->name_map.count(desc->node_builder.node_name())) { - status->status = InvalidArgument("Duplicate node name in graph: '", - desc->node_builder.node_name(), "'"); - } else { - if (!desc->colocation_constraints.empty()) { - desc->node_builder.Attr( - tensorflow::kColocationAttrName, - std::vector(desc->colocation_constraints.begin(), - desc->colocation_constraints.end())); - } - status->status = desc->node_builder.Finalize(&desc->graph->graph, &ret, - /*consume=*/true); - - if (status->status.ok()) { - // Run shape inference function for newly added node. - status->status = desc->graph->refiner.AddNode(ret); - } - if (status->status.ok()) { - // Add the node to the name-to-node mapping. - desc->graph->name_map[ret->name()] = ret; - } else if (ret != nullptr) { - desc->graph->graph.RemoveNode(ret); - ret = nullptr; - } - } - - delete desc; - - return ToOperation(ret); -} - -TF_Operation* TF_FinishOperation(TF_OperationDescription* desc, - TF_Status* status) { - mutex_lock l(desc->graph->mu); - return TF_FinishOperationLocked(desc, status); -} - -// TF_Operation functions -// ---------------------------------------------------------- - -const char* TF_OperationName(TF_Operation* oper) { - return oper->node.name().c_str(); -} - -const char* TF_OperationOpType(TF_Operation* oper) { - return oper->node.type_string().c_str(); -} - -const char* TF_OperationDevice(TF_Operation* oper) { - return oper->node.requested_device().c_str(); -} - -int TF_OperationNumOutputs(TF_Operation* oper) { - return oper->node.num_outputs(); -} - -TF_DataType TF_OperationOutputType(TF_Output oper_out) { - return static_cast( - oper_out.oper->node.output_type(oper_out.index)); -} - -int TF_OperationOutputListLength(TF_Operation* oper, const char* arg_name, - TF_Status* status) { - NameRangeMap name_ranges; - status->status = - NameRangesForNode(oper->node, oper->node.op_def(), nullptr, &name_ranges); - if (!status->status.ok()) return -1; - auto iter = name_ranges.find(arg_name); - if (iter == name_ranges.end()) { - status->status = InvalidArgument("Output arg '", arg_name, "' not found"); - return -1; - } - return iter->second.second - iter->second.first; -} - -int TF_OperationNumInputs(TF_Operation* oper) { - return oper->node.num_inputs(); -} - -TF_DataType TF_OperationInputType(TF_Input oper_in) { - return static_cast(oper_in.oper->node.input_type(oper_in.index)); -} - -int TF_OperationInputListLength(TF_Operation* oper, const char* arg_name, - TF_Status* status) { - NameRangeMap name_ranges; - status->status = - NameRangesForNode(oper->node, oper->node.op_def(), &name_ranges, nullptr); - if (!status->status.ok()) return -1; - auto iter = name_ranges.find(arg_name); - if (iter == name_ranges.end()) { - status->status = InvalidArgument("Input arg '", arg_name, "' not found"); - return -1; - } - return iter->second.second - iter->second.first; -} - -TF_Output TF_OperationInput(TF_Input oper_in) { - const tensorflow::Edge* edge; - Status s = oper_in.oper->node.input_edge(oper_in.index, &edge); - if (!s.ok()) { - return {nullptr, -1}; - } - - return {ToOperation(edge->src()), edge->src_output()}; -} - -void TF_OperationAllInputs(TF_Operation* oper, TF_Output* inputs, - int max_inputs) { - for (auto* edge : oper->node.in_edges()) { - if (edge->dst_input() >= 0 && edge->dst_input() < max_inputs) { - inputs[edge->dst_input()] = {ToOperation(edge->src()), - edge->src_output()}; - } - } -} - -int TF_OperationOutputNumConsumers(TF_Output oper_out) { - int count = 0; - for (const auto* edge : oper_out.oper->node.out_edges()) { - if (edge->src_output() == oper_out.index) { - ++count; - } - } - return count; -} - -int TF_OperationOutputConsumers(TF_Output oper_out, TF_Input* consumers, - int max_consumers) { - int count = 0; - for (const auto* edge : oper_out.oper->node.out_edges()) { - if (edge->src_output() == oper_out.index) { - if (count < max_consumers) { - consumers[count] = {ToOperation(edge->dst()), edge->dst_input()}; - } - ++count; - } - } - return count; -} - -int TF_OperationNumControlInputs(TF_Operation* oper) { - int count = 0; - for (const auto* edge : oper->node.in_edges()) { - if (edge->IsControlEdge() && !edge->src()->IsSource()) { - ++count; - } - } - return count; -} - -int TF_OperationGetControlInputs(TF_Operation* oper, - TF_Operation** control_inputs, - int max_control_inputs) { - int count = 0; - for (const auto* edge : oper->node.in_edges()) { - if (edge->IsControlEdge() && !edge->src()->IsSource()) { - if (count < max_control_inputs) { - control_inputs[count] = ToOperation(edge->src()); - } - ++count; - } - } - return count; -} - -int TF_OperationNumControlOutputs(TF_Operation* oper) { - int count = 0; - for (const auto* edge : oper->node.out_edges()) { - if (edge->IsControlEdge() && !edge->dst()->IsSink()) { - ++count; - } - } - return count; -} - -int TF_OperationGetControlOutputs(TF_Operation* oper, - TF_Operation** control_outputs, - int max_control_outputs) { - int count = 0; - for (const auto* edge : oper->node.out_edges()) { - if (edge->IsControlEdge() && !edge->dst()->IsSink()) { - if (count < max_control_outputs) { - control_outputs[count] = ToOperation(edge->dst()); - } - ++count; - } - } - return count; -} - -TF_AttrMetadata TF_OperationGetAttrMetadata(TF_Operation* oper, - const char* attr_name, - TF_Status* status) { - TF_AttrMetadata metadata; - const auto* attr = GetAttrValue(oper, attr_name, status); - if (!status->status.ok()) return metadata; - switch (attr->value_case()) { -#define SINGLE_CASE(kK, attr_type, size_expr) \ - case tensorflow::AttrValue::kK: \ - metadata.is_list = 0; \ - metadata.list_size = -1; \ - metadata.type = attr_type; \ - metadata.total_size = size_expr; \ - break; - - SINGLE_CASE(kS, TF_ATTR_STRING, attr->s().length()); - SINGLE_CASE(kI, TF_ATTR_INT, -1); - SINGLE_CASE(kF, TF_ATTR_FLOAT, -1); - SINGLE_CASE(kB, TF_ATTR_BOOL, -1); - SINGLE_CASE(kType, TF_ATTR_TYPE, -1); - SINGLE_CASE(kShape, TF_ATTR_SHAPE, - attr->shape().unknown_rank() ? -1 : attr->shape().dim_size()); - SINGLE_CASE(kTensor, TF_ATTR_TENSOR, -1); -#undef SINGLE_CASE - - case tensorflow::AttrValue::kList: - metadata.is_list = 1; - metadata.list_size = 0; - metadata.total_size = -1; -#define LIST_CASE(field, attr_type, ...) \ - if (attr->list().field##_size() > 0) { \ - metadata.type = attr_type; \ - metadata.list_size = attr->list().field##_size(); \ - __VA_ARGS__; \ - break; \ - } - - LIST_CASE( - s, TF_ATTR_STRING, metadata.total_size = 0; - for (int i = 0; i < attr->list().s_size(); - ++i) { metadata.total_size += attr->list().s(i).size(); }); - LIST_CASE(i, TF_ATTR_INT); - LIST_CASE(f, TF_ATTR_FLOAT); - LIST_CASE(b, TF_ATTR_BOOL); - LIST_CASE(type, TF_ATTR_TYPE); - LIST_CASE( - shape, TF_ATTR_SHAPE, metadata.total_size = 0; - for (int i = 0; i < attr->list().shape_size(); ++i) { - const auto& s = attr->list().shape(i); - metadata.total_size += s.unknown_rank() ? 0 : s.dim_size(); - }); - LIST_CASE(tensor, TF_ATTR_TENSOR); - LIST_CASE(tensor, TF_ATTR_FUNC); -#undef LIST_CASE - // All lists empty, determine the type from the OpDef. - if (metadata.list_size == 0) { - for (int i = 0; i < oper->node.op_def().attr_size(); ++i) { - const auto& a = oper->node.op_def().attr(i); - if (a.name() != attr_name) continue; - const string& typestr = a.type(); - if (typestr == "list(string)") { - metadata.type = TF_ATTR_STRING; - } else if (typestr == "list(int)") { - metadata.type = TF_ATTR_INT; - } else if (typestr == "list(float)") { - metadata.type = TF_ATTR_FLOAT; - } else if (typestr == "list(bool)") { - metadata.type = TF_ATTR_BOOL; - } else if (typestr == "list(type)") { - metadata.type = TF_ATTR_TYPE; - } else if (typestr == "list(shape)") { - metadata.type = TF_ATTR_SHAPE; - } else if (typestr == "list(tensor)") { - metadata.type = TF_ATTR_TENSOR; - } else if (typestr == "list(func)") { - metadata.type = TF_ATTR_FUNC; - } else { - status->status = InvalidArgument( - "Attribute '", attr_name, - "' has an empty value of an unrecognized type '", typestr, "'"); - return metadata; - } - } - } - break; - - case tensorflow::AttrValue::kPlaceholder: - metadata.is_list = 0; - metadata.list_size = -1; - metadata.type = TF_ATTR_PLACEHOLDER; - metadata.total_size = -1; - break; - - case tensorflow::AttrValue::kFunc: - metadata.is_list = 0; - metadata.list_size = -1; - metadata.type = TF_ATTR_FUNC; - metadata.total_size = -1; - break; - - case tensorflow::AttrValue::VALUE_NOT_SET: - status->status = - InvalidArgument("Attribute '", attr_name, "' has no value set"); - break; - } - return metadata; -} - -void TF_OperationGetAttrString(TF_Operation* oper, const char* attr_name, - void* value, size_t max_length, - TF_Status* status) { - const auto* attr = GetAttrValue(oper, attr_name, status); - if (!status->status.ok()) return; - if (attr->value_case() != tensorflow::AttrValue::kS) { - status->status = - InvalidArgument("Attribute '", attr_name, "' is not a string"); - return; - } - if (max_length <= 0) { - return; - } - const auto& s = attr->s(); - std::memcpy(value, s.data(), std::min(s.length(), max_length)); -} - -void TF_OperationGetAttrStringList(TF_Operation* oper, const char* attr_name, - void** values, size_t* lengths, - int max_values, void* storage, - size_t storage_size, TF_Status* status) { - const auto* attr = GetAttrValue(oper, attr_name, status); - if (!status->status.ok()) return; - if (attr->value_case() != tensorflow::AttrValue::kList) { - status->status = - InvalidArgument("Value for '", attr_name, "' is not a list"); - return; - } - const auto len = std::min(max_values, attr->list().s_size()); - char* p = static_cast(storage); - for (int i = 0; i < len; ++i) { - const string& s = attr->list().s(i); - values[i] = p; - lengths[i] = s.size(); - if ((p + s.size()) > (static_cast(storage) + storage_size)) { - status->status = InvalidArgument( - "Not enough storage to hold the requested list of strings"); - return; - } - memcpy(values[i], s.data(), s.size()); - p += s.size(); - } -} - -#define DEFINE_GETATTR(func, c_type, cpp_type, list_field) \ - void func(TF_Operation* oper, const char* attr_name, c_type* value, \ - TF_Status* status) { \ - cpp_type v; \ - status->status = \ - tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &v); \ - *value = static_cast(v); \ - } \ - void func##List(TF_Operation* oper, const char* attr_name, c_type* values, \ - int max_values, TF_Status* status) { \ - const auto* attr = GetAttrValue(oper, attr_name, status); \ - if (!status->status.ok()) return; \ - if (attr->value_case() != tensorflow::AttrValue::kList) { \ - status->status = \ - InvalidArgument("Value for '", attr_name, "' is not a list."); \ - return; \ - } \ - const auto len = std::min(max_values, attr->list().list_field##_size()); \ - for (int i = 0; i < len; ++i) { \ - values[i] = static_cast(attr->list().list_field(i)); \ - } \ - } -DEFINE_GETATTR(TF_OperationGetAttrInt, int64_t, tensorflow::int64, i); -DEFINE_GETATTR(TF_OperationGetAttrFloat, float, float, f); -DEFINE_GETATTR(TF_OperationGetAttrBool, unsigned char, bool, b); -DEFINE_GETATTR(TF_OperationGetAttrType, TF_DataType, DataType, type); -#undef DEFINE_GETATTR - -void TF_OperationGetAttrShape(TF_Operation* oper, const char* attr_name, - int64_t* value, int num_dims, TF_Status* status) { - PartialTensorShape shape; - status->status = - tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &shape); - if (!status->status.ok()) return; - auto len = std::min(shape.dims(), num_dims); - for (int i = 0; i < len; ++i) { - value[i] = shape.dim_size(i); - } -} - -void TF_OperationGetAttrShapeList(TF_Operation* oper, const char* attr_name, - int64_t** dims, int* num_dims, int num_shapes, - int64_t* storage, int storage_size, - TF_Status* status) { - std::vector shapes; - status->status = - tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &shapes); - if (!status->status.ok()) return; - auto len = std::min(static_cast(shapes.size()), num_shapes); - int64_t* p = storage; - int storage_left = storage_size; - for (int i = 0; i < len; ++i) { - // shapes[i].dims() == -1 for shapes with an unknown rank. - int64_t n = shapes[i].dims(); - num_dims[i] = n; - dims[i] = p; - if (n < 0) { - continue; - } - if (storage_left < n) { - status->status = InvalidArgument( - "Not enough storage to hold the requested list of shapes"); - return; - } - storage_left -= n; - for (int j = 0; j < n; ++j, ++p) { - *p = shapes[i].dim_size(j); - } - } -} - -void TF_OperationGetAttrTensorShapeProto(TF_Operation* oper, - const char* attr_name, - TF_Buffer* value, TF_Status* status) { - const auto* attr = GetAttrValue(oper, attr_name, status); - if (!status->status.ok()) return; - if (attr->value_case() != tensorflow::AttrValue::kShape) { - status->status = - InvalidArgument("Value for '", attr_name, "' is not a shape."); - return; - } - status->status = MessageToBuffer(attr->shape(), value); -} - -void TF_OperationGetAttrTensorShapeProtoList(TF_Operation* oper, - const char* attr_name, - TF_Buffer** values, int max_values, - TF_Status* status) { - const auto* attr = GetAttrValue(oper, attr_name, status); - if (!status->status.ok()) return; - if (attr->value_case() != tensorflow::AttrValue::kList) { - status->status = - InvalidArgument("Value for '", attr_name, "' is not a list"); - return; - } - const auto len = std::min(max_values, attr->list().shape_size()); - for (int i = 0; i < len; ++i) { - values[i] = TF_NewBuffer(); - status->status = MessageToBuffer(attr->list().shape(i), values[i]); - if (!status->status.ok()) { - // Delete everything allocated to far, the operation has failed. - for (int j = 0; j <= i; ++j) { - TF_DeleteBuffer(values[j]); - } - return; - } - } -} - -void TF_OperationGetAttrTensor(TF_Operation* oper, const char* attr_name, - TF_Tensor** value, TF_Status* status) { - *value = nullptr; - Tensor t; - status->status = tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &t); - if (!status->status.ok()) return; - *value = TF_TensorFromTensor(t, &status->status); -} - -void TF_OperationGetAttrTensorList(TF_Operation* oper, const char* attr_name, - TF_Tensor** values, int max_values, - TF_Status* status) { - std::vector ts; - status->status = tensorflow::GetNodeAttr(oper->node.attrs(), attr_name, &ts); - if (!status->status.ok()) return; - const auto len = std::min(max_values, static_cast(ts.size())); - for (int i = 0; i < len; ++i) { - values[i] = TF_TensorFromTensor(ts[i], &status->status); - } -} - -void TF_OperationGetAttrValueProto(TF_Operation* oper, const char* attr_name, - TF_Buffer* output_attr_value, - TF_Status* status) { - const auto* attr = GetAttrValue(oper, attr_name, status); - if (!status->status.ok()) return; - status->status = MessageToBuffer(*attr, output_attr_value); -} - -void TF_OperationToNodeDef(TF_Operation* oper, TF_Buffer* output_node_def, - TF_Status* status) { - status->status = MessageToBuffer(oper->node.def(), output_node_def); -} - -// TF_Graph functions --------------------------------------------------------- - -TF_Graph::TF_Graph() - : graph(tensorflow::OpRegistry::Global()), - refiner(graph.versions().producer(), graph.op_registry()), - delete_requested(false), - parent(nullptr), - parent_inputs(nullptr) { - // Tell the shape refiner to also run shape inference on functions. - refiner.set_function_library_for_shape_inference(&graph.flib_def()); -} - -TF_Graph* TF_NewGraph() { return new TF_Graph; } - -void TF_DeleteGraph(TF_Graph* g) { - if (g == nullptr) return; - g->mu.lock(); - g->delete_requested = true; - const bool del = g->sessions.empty(); - g->mu.unlock(); - if (del) delete g; -} - -TF_Operation* TF_GraphOperationByName(TF_Graph* graph, const char* oper_name) { - mutex_lock l(graph->mu); - auto iter = graph->name_map.find(oper_name); - if (iter == graph->name_map.end()) { - return nullptr; - } else { - return ToOperation(iter->second); - } -} - -TF_Operation* TF_GraphNextOperation(TF_Graph* graph, size_t* pos) { - if (*pos == 0) { - // Advance past the first sentinel nodes in every graph (the source & sink). - *pos += 2; - } else { - // Advance to the next node. - *pos += 1; - } - - mutex_lock l(graph->mu); - while (*pos < static_cast(graph->graph.num_node_ids())) { - Node* node = graph->graph.FindNodeId(*pos); - // FindNodeId() returns nullptr for nodes that have been deleted. - // We aren't currently allowing nodes to be deleted, but it is safer - // to still check. - if (node != nullptr) return ToOperation(node); - *pos += 1; - } - - // No more nodes. - return nullptr; -} - -void TF_GraphToGraphDef(TF_Graph* graph, TF_Buffer* output_graph_def, - TF_Status* status) { - GraphDef def; - { - mutex_lock l(graph->mu); - graph->graph.ToGraphDef(&def); - } - status->status = MessageToBuffer(def, output_graph_def); -} - -void TF_GraphGetOpDef(TF_Graph* graph, const char* op_name, - TF_Buffer* output_op_def, TF_Status* status) { - const OpDef* op_def; - { - mutex_lock l(graph->mu); - status->status = graph->graph.op_registry()->LookUpOpDef(op_name, &op_def); - if (!status->status.ok()) return; - } - status->status = MessageToBuffer(*op_def, output_op_def); -} - -void TF_GraphVersions(TF_Graph* graph, TF_Buffer* output_version_def, - TF_Status* status) { - VersionDef versions; - { - mutex_lock l(graph->mu); - versions = graph->graph.versions(); - } - status->status = MessageToBuffer(versions, output_version_def); -} - -TF_ImportGraphDefOptions* TF_NewImportGraphDefOptions() { - return new TF_ImportGraphDefOptions; -} -void TF_DeleteImportGraphDefOptions(TF_ImportGraphDefOptions* opts) { - delete opts; -} -void TF_ImportGraphDefOptionsSetPrefix(TF_ImportGraphDefOptions* opts, - const char* prefix) { - opts->opts.prefix = prefix; -} -void TF_ImportGraphDefOptionsSetDefaultDevice(TF_ImportGraphDefOptions* opts, - const char* device) { - opts->opts.default_device = device; -} - -void TF_ImportGraphDefOptionsSetUniquifyNames(TF_ImportGraphDefOptions* opts, - unsigned char uniquify_names) { - opts->opts.uniquify_names = uniquify_names; -} - -void TF_ImportGraphDefOptionsSetUniquifyPrefix(TF_ImportGraphDefOptions* opts, - unsigned char uniquify_prefix) { - opts->opts.uniquify_prefix = uniquify_prefix; -} - -void TF_ImportGraphDefOptionsAddInputMapping(TF_ImportGraphDefOptions* opts, - const char* src_name, - int src_index, TF_Output dst) { - opts->tensor_id_data.push_back(src_name); - const string& src_name_str = opts->tensor_id_data.back(); - // We don't need to store dst's name in tensor_id_data, since `dst` must - // outlive the ImportGraphDef call. - opts->opts.input_map[TensorId(src_name_str, src_index)] = ToTensorId(dst); -} - -void TF_ImportGraphDefOptionsRemapControlDependency( - TF_ImportGraphDefOptions* opts, const char* src_name, TF_Operation* dst) { - opts->opts.input_map[TensorId(src_name, tensorflow::Graph::kControlSlot)] = - TensorId(dst->node.name(), tensorflow::Graph::kControlSlot); -} - -extern void TF_ImportGraphDefOptionsAddControlDependency( - TF_ImportGraphDefOptions* opts, TF_Operation* oper) { - opts->opts.control_dependencies.push_back(oper->node.name()); -} - -void TF_ImportGraphDefOptionsAddReturnOutput(TF_ImportGraphDefOptions* opts, - const char* oper_name, int index) { - opts->tensor_id_data.push_back(oper_name); - const string& oper_name_str = opts->tensor_id_data.back(); - opts->opts.return_tensors.emplace_back(oper_name_str, index); -} - -int TF_ImportGraphDefOptionsNumReturnOutputs( - const TF_ImportGraphDefOptions* opts) { - return opts->opts.return_tensors.size(); -} - -void TF_ImportGraphDefOptionsAddReturnOperation(TF_ImportGraphDefOptions* opts, - const char* oper_name) { - opts->opts.return_nodes.push_back(oper_name); -} - -int TF_ImportGraphDefOptionsNumReturnOperations( - const TF_ImportGraphDefOptions* opts) { - return opts->opts.return_nodes.size(); -} - -void TF_ImportGraphDefResultsReturnOutputs(TF_ImportGraphDefResults* results, - int* num_outputs, - TF_Output** outputs) { - *num_outputs = results->return_tensors.size(); - *outputs = results->return_tensors.data(); -} - -void TF_ImportGraphDefResultsReturnOperations(TF_ImportGraphDefResults* results, - int* num_opers, - TF_Operation*** opers) { - *num_opers = results->return_nodes.size(); - *opers = results->return_nodes.data(); -} - -void TF_ImportGraphDefResultsMissingUnusedInputMappings( - TF_ImportGraphDefResults* results, int* num_missing_unused_input_mappings, - const char*** src_names, int** src_indexes) { - *num_missing_unused_input_mappings = results->missing_unused_key_names.size(); - *src_names = results->missing_unused_key_names.data(); - *src_indexes = results->missing_unused_key_indexes.data(); -} - -void TF_DeleteImportGraphDefResults(TF_ImportGraphDefResults* results) { - delete results; -} - -static void GraphImportGraphDefLocked(TF_Graph* graph, const GraphDef& def, - const TF_ImportGraphDefOptions* opts, - TF_ImportGraphDefResults* tf_results, - TF_Status* status) - TF_EXCLUSIVE_LOCKS_REQUIRED(graph->mu) { - const int last_node_id = graph->graph.num_node_ids(); - tensorflow::ImportGraphDefResults results; - status->status = tensorflow::ImportGraphDef(opts->opts, def, &graph->graph, - &graph->refiner, &results); - if (!status->status.ok()) return; - - // Add new nodes to name_map - for (int i = last_node_id; i < graph->graph.num_node_ids(); ++i) { - auto* node = graph->graph.FindNodeId(i); - if (node != nullptr) graph->name_map[node->name()] = node; - } - - // Populate return_tensors - DCHECK(tf_results->return_tensors.empty()); - tf_results->return_tensors.resize(results.return_tensors.size()); - for (int i = 0; i < results.return_tensors.size(); ++i) { - tf_results->return_tensors[i].oper = - ToOperation(results.return_tensors[i].first); - tf_results->return_tensors[i].index = results.return_tensors[i].second; - } - - // Populate return_nodes - DCHECK(tf_results->return_nodes.empty()); - tf_results->return_nodes.resize(results.return_nodes.size()); - for (int i = 0; i < results.return_nodes.size(); ++i) { - tf_results->return_nodes[i] = ToOperation(results.return_nodes[i]); - } - - // Populate missing unused map keys - DCHECK(tf_results->missing_unused_key_names.empty()); - DCHECK(tf_results->missing_unused_key_indexes.empty()); - DCHECK(tf_results->missing_unused_key_names_data.empty()); - - size_t size = results.missing_unused_input_map_keys.size(); - tf_results->missing_unused_key_names.resize(size); - tf_results->missing_unused_key_indexes.resize(size); - - for (int i = 0; i < size; ++i) { - TensorId id = results.missing_unused_input_map_keys[i]; - tf_results->missing_unused_key_names_data.emplace_back(id.first); - tf_results->missing_unused_key_names[i] = - tf_results->missing_unused_key_names_data.back().c_str(); - tf_results->missing_unused_key_indexes[i] = id.second; - } -} - -TF_ImportGraphDefResults* TF_GraphImportGraphDefWithResults( - TF_Graph* graph, const TF_Buffer* graph_def, - const TF_ImportGraphDefOptions* options, TF_Status* status) { - GraphDef def; - if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data, - graph_def->length)) { - status->status = InvalidArgument("Invalid GraphDef"); - return nullptr; - } - auto results = new TF_ImportGraphDefResults(); - mutex_lock l(graph->mu); - GraphImportGraphDefLocked(graph, def, options, results, status); - if (!status->status.ok()) { - delete results; - return nullptr; - } - return results; -} - -void TF_GraphImportGraphDefWithReturnOutputs( - TF_Graph* graph, const TF_Buffer* graph_def, - const TF_ImportGraphDefOptions* options, TF_Output* return_outputs, - int num_return_outputs, TF_Status* status) { - if (num_return_outputs != options->opts.return_tensors.size()) { - status->status = InvalidArgument("Expected 'num_return_outputs' to be ", - options->opts.return_tensors.size(), - ", got ", num_return_outputs); - return; - } - if (num_return_outputs > 0 && return_outputs == nullptr) { - status->status = InvalidArgument( - "'return_outputs' must be preallocated to length ", num_return_outputs); - return; - } - GraphDef def; - if (!tensorflow::ParseProtoUnlimited(&def, graph_def->data, - graph_def->length)) { - status->status = InvalidArgument("Invalid GraphDef"); - return; - } - TF_ImportGraphDefResults results; - mutex_lock l(graph->mu); - GraphImportGraphDefLocked(graph, def, options, &results, status); - DCHECK_EQ(results.return_tensors.size(), num_return_outputs); - memcpy(return_outputs, results.return_tensors.data(), - num_return_outputs * sizeof(TF_Output)); -} - -void TF_GraphImportGraphDef(TF_Graph* graph, const TF_Buffer* graph_def, - const TF_ImportGraphDefOptions* options, - TF_Status* status) { - TF_ImportGraphDefResults* results = - TF_GraphImportGraphDefWithResults(graph, graph_def, options, status); - TF_DeleteImportGraphDefResults(results); -} - -// TF_Session functions ---------------------------------------------- - -TF_Session::TF_Session(tensorflow::Session* s, TF_Graph* g) - : session(s), graph(g), last_num_graph_nodes(0), extend_before_run(true) {} - -TF_Session* TF_NewSession(TF_Graph* graph, const TF_SessionOptions* opt, - TF_Status* status) { - Session* session; - status->status = NewSession(opt->options, &session); - if (status->status.ok()) { - TF_Session* new_session = new TF_Session(session, graph); - if (graph != nullptr) { - mutex_lock l(graph->mu); - graph->sessions[new_session] = ""; - } - return new_session; - } else { - DCHECK_EQ(nullptr, session); - return nullptr; - } -} - -TF_Session* TF_LoadSessionFromSavedModel( - const TF_SessionOptions* session_options, const TF_Buffer* run_options, - const char* export_dir, const char* const* tags, int tags_len, - TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status) { -// TODO(sjr): Remove the IS_MOBILE_PLATFORM guard. This will require ensuring -// that the tensorflow/cc/saved_model:loader build target is mobile friendly. -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - status->status = tensorflow::errors::Unimplemented( - "Loading a SavedModel is not supported on mobile. File a bug at " - "https://github.com/tensorflow/tensorflow/issues if this feature is " - "important to you"); - return nullptr; -#else - mutex_lock l(graph->mu); - if (!graph->name_map.empty()) { - status->status = InvalidArgument("Graph is non-empty."); - return nullptr; - } - - RunOptions run_options_proto; - if (run_options != nullptr && !run_options_proto.ParseFromArray( - run_options->data, run_options->length)) { - status->status = InvalidArgument("Unparseable RunOptions proto"); - return nullptr; - } - - std::unordered_set tag_set; - for (int i = 0; i < tags_len; i++) { - tag_set.insert(string(tags[i])); - } - - tensorflow::SavedModelBundle bundle; - status->status = - tensorflow::LoadSavedModel(session_options->options, run_options_proto, - export_dir, tag_set, &bundle); - if (!status->status.ok()) return nullptr; - - // Create a TF_Graph from the MetaGraphDef. This is safe as long as Session - // extends using GraphDefs. The Graph instance is different, but equivalent - // to the one used to create the session. - // - // TODO(jhseu): When Session is modified to take Graphs instead of - // GraphDefs, return the Graph generated in LoadSavedModel(). - TF_ImportGraphDefOptions* import_opts = TF_NewImportGraphDefOptions(); - TF_ImportGraphDefResults results; - GraphImportGraphDefLocked(graph, bundle.meta_graph_def.graph_def(), - import_opts, &results, status); - TF_DeleteImportGraphDefOptions(import_opts); - if (!status->status.ok()) return nullptr; - - if (meta_graph_def != nullptr) { - status->status = MessageToBuffer(bundle.meta_graph_def, meta_graph_def); - if (!status->status.ok()) return nullptr; - } - - TF_Session* session = new TF_Session(bundle.session.release(), graph); - - graph->sessions[session] = ""; - session->last_num_graph_nodes = graph->graph.num_node_ids(); - return session; -#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) -} - -void TF_CloseSession(TF_Session* s, TF_Status* status) { - status->status = s->session->Close(); -} - -void TF_DeleteSession(TF_Session* s, TF_Status* status) { - status->status = Status::OK(); - if (s == nullptr) return; - TF_Graph* const graph = s->graph; - if (graph != nullptr) { - graph->mu.lock(); - graph->sessions.erase(s); - const bool del = graph->delete_requested && graph->sessions.empty(); - graph->mu.unlock(); - if (del) delete graph; - } - delete s->session; - delete s; -} - -void TF_SessionRun(TF_Session* session, const TF_Buffer* run_options, - const TF_Output* inputs, TF_Tensor* const* input_values, - int ninputs, const TF_Output* outputs, - TF_Tensor** output_values, int noutputs, - const TF_Operation* const* target_opers, int ntargets, - TF_Buffer* run_metadata, TF_Status* status) { - // TODO(josh11b,mrry): Change Session to be able to use a Graph* - // directly, instead of requiring us to serialize to a GraphDef and - // call Session::Extend(). - if (session->extend_before_run && - !ExtendSessionGraphHelper(session, status)) { - return; - } - - TF_Run_Setup(noutputs, output_values, status); - - // Convert from TF_Output and TF_Tensor to a string and Tensor. - std::vector> input_pairs(ninputs); - if (!TF_Run_Inputs(input_values, &input_pairs, status)) return; - for (int i = 0; i < ninputs; ++i) { - input_pairs[i].first = OutputName(inputs[i]); - } - - // Convert from TF_Output to string names. - std::vector output_names(noutputs); - for (int i = 0; i < noutputs; ++i) { - output_names[i] = OutputName(outputs[i]); - } - - // Convert from TF_Operation* to string names. - std::vector target_names(ntargets); - for (int i = 0; i < ntargets; ++i) { - target_names[i] = target_opers[i]->node.name(); - } - - // Actually run. - TF_Run_Helper(session->session, nullptr, run_options, input_pairs, - output_names, output_values, target_names, run_metadata, - status); -} - -void TF_SessionPRunSetup(TF_Session* session, const TF_Output* inputs, - int ninputs, const TF_Output* outputs, int noutputs, - const TF_Operation* const* target_opers, int ntargets, - const char** handle, TF_Status* status) { - *handle = nullptr; - - if (session->extend_before_run && - !ExtendSessionGraphHelper(session, status)) { - return; - } - - std::vector input_names(ninputs); - for (int i = 0; i < ninputs; ++i) { - input_names[i] = OutputName(inputs[i]); - } - - std::vector output_names(noutputs); - for (int i = 0; i < noutputs; ++i) { - output_names[i] = OutputName(outputs[i]); - } - - std::vector target_names(ntargets); - for (int i = 0; i < ntargets; ++i) { - target_names[i] = target_opers[i]->node.name(); - } - - string new_handle; - status->status = session->session->PRunSetup(input_names, output_names, - target_names, &new_handle); - if (status->status.ok()) { - char* buf = new char[new_handle.size() + 1]; - memcpy(buf, new_handle.c_str(), new_handle.size() + 1); - *handle = buf; - } -} - -void TF_DeletePRunHandle(const char* handle) { - delete[] handle; - // TODO(suharshs): Free up any resources held by the partial run state. -} - -void TF_SessionPRun(TF_Session* session, const char* handle, - const TF_Output* inputs, TF_Tensor* const* input_values, - int ninputs, const TF_Output* outputs, - TF_Tensor** output_values, int noutputs, - const TF_Operation* const* target_opers, int ntargets, - TF_Status* status) { - // TODO(josh11b,mrry): Change Session to be able to use a Graph* - // directly, instead of requiring us to serialize to a GraphDef and - // call Session::Extend(). - if (session->extend_before_run && - !ExtendSessionGraphHelper(session, status)) { - return; - } - - TF_Run_Setup(noutputs, output_values, status); - - // Convert from TF_Output and TF_Tensor to a string and Tensor. - std::vector> input_pairs(ninputs); - if (!TF_Run_Inputs(input_values, &input_pairs, status)) return; - for (int i = 0; i < ninputs; ++i) { - input_pairs[i].first = OutputName(inputs[i]); - } - - // Convert from TF_Output to string names. - std::vector output_names(noutputs); - for (int i = 0; i < noutputs; ++i) { - output_names[i] = OutputName(outputs[i]); - } - - // Convert from TF_Operation* to string names. - std::vector target_names(ntargets); - for (int i = 0; i < ntargets; ++i) { - target_names[i] = target_opers[i]->node.name(); - } - - TF_Run_Helper(session->session, handle, nullptr, input_pairs, output_names, - output_values, target_names, nullptr, status); -} - -unsigned char TF_TryEvaluateConstant(TF_Graph* graph, TF_Output output, - TF_Tensor** result, TF_Status* status) { - *result = nullptr; - mutex_lock l(graph->mu); - OutputTensor tensor(&output.oper->node, output.index); - bool evaluated; - Tensor result_tensor; - status->status = EvaluateConstantTensor( - tensor, graph->refiner, *graph->graph.op_registry(), - graph->graph.versions().producer(), &evaluated, &result_tensor); - if (evaluated) { - DCHECK(status->status.ok()); - *result = TF_TensorFromTensor(result_tensor, &status->status); - if (!status->status.ok()) evaluated = false; - } - return evaluated; -} - -TF_ApiDefMap* TF_NewApiDefMap(TF_Buffer* op_list_buffer, TF_Status* status) { - tensorflow::OpList op_list; - if (!op_list.ParseFromArray(op_list_buffer->data, op_list_buffer->length)) { - status->status = InvalidArgument("Unparseable OpList"); - return nullptr; - } - status->status = Status::OK(); - return new TF_ApiDefMap(op_list); -} - -void TF_DeleteApiDefMap(TF_ApiDefMap* apimap) { delete apimap; } - -void TF_ApiDefMapPut(TF_ApiDefMap* api_def_map, const char* text, - size_t text_len, TF_Status* status) { -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - status->status = tensorflow::errors::Unimplemented( - "ApiDefMap is not supported on mobile."); -#else - mutex_lock l(api_def_map->lock); - if (api_def_map->update_docs_called) { - status->status = FailedPrecondition( - "TF_ApiDefMapPut cannot be called after TF_ApiDefMapGet has been " - "called."); - return; - } - string api_def_text(text, text_len); - status->status = api_def_map->api_def_map.LoadApiDef(api_def_text); -#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) -} - -TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map, const char* name, - size_t name_len, TF_Status* status) { -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - status->status = tensorflow::errors::Unimplemented( - "ApiDefMap is not supported on mobile."); - return nullptr; -#else - mutex_lock l(api_def_map->lock); - if (!api_def_map->update_docs_called) { - api_def_map->api_def_map.UpdateDocs(); - api_def_map->update_docs_called = true; - } - string name_str(name, name_len); - const auto* api_def = api_def_map->api_def_map.GetApiDef(name_str); - if (api_def == nullptr) { - return nullptr; - } - - TF_Buffer* ret = TF_NewBuffer(); - status->status = MessageToBuffer(*api_def, ret); - if (!status->status.ok()) { - TF_DeleteBuffer(ret); - return nullptr; - } - return ret; -#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) -} - -TF_Buffer* TF_GetAllRegisteredKernels(TF_Status* status) { - tensorflow::KernelList kernel_list = tensorflow::GetAllRegisteredKernels(); - TF_Buffer* ret = TF_NewBuffer(); - status->status = MessageToBuffer(kernel_list, ret); - if (!status->status.ok()) { - TF_DeleteBuffer(ret); - return nullptr; - } - return ret; -} - -TF_Buffer* TF_GetRegisteredKernelsForOp(const char* name, TF_Status* status) { - tensorflow::KernelList kernel_list = - tensorflow::GetRegisteredKernelsForOp(name); - TF_Buffer* ret = TF_NewBuffer(); - status->status = MessageToBuffer(kernel_list, ret); - if (!status->status.ok()) { - TF_DeleteBuffer(ret); - return nullptr; - } - return ret; -} - -// TF_Server functions ---------------------------------------------- - -#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) -TF_Server::TF_Server(std::unique_ptr server) - : target(server->target()), server(std::move(server)) {} -#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) - -TF_Server* TF_NewServer(const void* proto, size_t proto_len, - TF_Status* status) { -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - status->status = tensorflow::errors::Unimplemented( - "Server functionality is not supported on mobile"); - return nullptr; -#else - tensorflow::ServerDef server_def; - if (!server_def.ParseFromArray(proto, static_cast(proto_len))) { - status->status = InvalidArgument( - "Could not parse provided bytes into a ServerDef protocol buffer"); - return nullptr; - } - - std::unique_ptr out_server; - status->status = tensorflow::NewServer(server_def, &out_server); - if (!status->status.ok()) return nullptr; - - return new TF_Server(std::move(out_server)); -#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) -} - -void TF_ServerStart(TF_Server* server, TF_Status* status) { -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - status->status = tensorflow::errors::Unimplemented( - "Server functionality is not supported on mobile"); -#else - status->status = server->server->Start(); -#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) -} - -void TF_ServerStop(TF_Server* server, TF_Status* status) { -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - status->status = tensorflow::errors::Unimplemented( - "Server functionality is not supported on mobile"); -#else - status->status = server->server->Stop(); -#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) -} - -void TF_ServerJoin(TF_Server* server, TF_Status* status) { -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - status->status = tensorflow::errors::Unimplemented( - "Server functionality is not supported on mobile"); -#else - status->status = server->server->Join(); -#endif // defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) -} - -const char* TF_ServerTarget(TF_Server* server) { -#if defined(IS_MOBILE_PLATFORM) || defined(IS_SLIM_BUILD) - return nullptr; -#else - return server->target.c_str(); -#endif -} - -void TF_DeleteServer(TF_Server* server) { -#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) - delete server; -#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) -} - -void TF_RegisterLogListener(void (*listener)(const char*)) { -#if !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) - tensorflow::logging::RegisterListener(listener); -#endif // !defined(IS_MOBILE_PLATFORM) && !defined(IS_SLIM_BUILD) -} - -} // end extern "C" diff --git a/tensorflow/c/c_core_api.h b/tensorflow/c/c_core_api.h deleted file mode 100644 index d3b5447b717..00000000000 --- a/tensorflow/c/c_core_api.h +++ /dev/null @@ -1,1456 +0,0 @@ -/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_C_C_CORE_API_H_ -#define TENSORFLOW_C_C_CORE_API_H_ - -#include -#include - -#include "tensorflow/c/tf_attrtype.h" -#include "tensorflow/c/tf_datatype.h" -#include "tensorflow/c/tf_status.h" -#include "tensorflow/c/tf_tensor.h" - -// -------------------------------------------------------------------------- -// C API for TensorFlow. -// -// The API leans towards simplicity and uniformity instead of convenience -// since most usage will be by language specific wrappers. -// -// Conventions: -// * We use the prefix TF_ for everything in the API. -// * Objects are always passed around as pointers to opaque structs -// and these structs are allocated/deallocated via the API. -// * TF_Status holds error information. It is an object type -// and therefore is passed around as a pointer to an opaque -// struct as mentioned above. -// * Every call that has a TF_Status* argument clears it on success -// and fills it with error info on failure. -// * unsigned char is used for booleans (instead of the 'bool' type). -// In C++ bool is a keyword while in C99 bool is a macro defined -// in stdbool.h. It is possible for the two to be inconsistent. -// For example, neither the C99 nor the C++11 standard force a byte -// size on the bool type, so the macro defined in stdbool.h could -// be inconsistent with the bool keyword in C++. Thus, the use -// of stdbool.h is avoided and unsigned char is used instead. -// * size_t is used to represent byte sizes of objects that are -// materialized in the address space of the calling process. -// * int is used as an index into arrays. -// * Deletion functions are safe to call on nullptr. -// -// Questions left to address: -// * Might at some point need a way for callers to provide their own Env. -// * Maybe add TF_TensorShape that encapsulates dimension info. -// -// Design decisions made: -// * Backing store for tensor memory has an associated deallocation -// function. This deallocation function will point to client code -// for tensors populated by the client. So the client can do things -// like shadowing a numpy array. -// * We do not provide TF_OK since it is not strictly necessary and we -// are not optimizing for convenience. -// * We make assumption that one session has one graph. This should be -// fine since we have the ability to run sub-graphs. -// * We could allow NULL for some arguments (e.g., NULL options arg). -// However since convenience is not a primary goal, we don't do this. -// * Devices are not in this API. Instead, they are created/used internally -// and the API just provides high level controls over the number of -// devices of each type. - -// Macro to control visibility of exported symbols in the shared library (.so, -// .dylib, .dll). -// This duplicates the TF_EXPORT macro definition in -// tensorflow/core/platform/macros.h in order to keep this .h file independent -// of any other includes. -#ifdef SWIG -#define TF_CAPI_EXPORT -#else -#if defined(_WIN32) -#ifdef TF_COMPILE_LIBRARY -#define TF_CAPI_EXPORT __declspec(dllexport) -#else -#define TF_CAPI_EXPORT __declspec(dllimport) -#endif // TF_COMPILE_LIBRARY -#else -#define TF_CAPI_EXPORT __attribute__((visibility("default"))) -#endif // _WIN32 -#endif // SWIG - -#ifdef __cplusplus -extern "C" { -#endif - -// -------------------------------------------------------------------------- -// TF_Version returns a string describing version information of the -// TensorFlow library. TensorFlow using semantic versioning. -TF_CAPI_EXPORT extern const char* TF_Version(void); - -// -------------------------------------------------------------------------- -// TF_Buffer holds a pointer to a block of data and its associated length. -// Typically, the data consists of a serialized protocol buffer, but other data -// may also be held in a buffer. -// -// By default, TF_Buffer itself does not do any memory management of the -// pointed-to block. If need be, users of this struct should specify how to -// deallocate the block by setting the `data_deallocator` function pointer. -typedef struct TF_Buffer { - const void* data; - size_t length; - void (*data_deallocator)(void* data, size_t length); -} TF_Buffer; - -// Makes a copy of the input and sets an appropriate deallocator. Useful for -// passing in read-only, input protobufs. -TF_CAPI_EXPORT extern TF_Buffer* TF_NewBufferFromString(const void* proto, - size_t proto_len); - -// Useful for passing *out* a protobuf. -TF_CAPI_EXPORT extern TF_Buffer* TF_NewBuffer(void); - -TF_CAPI_EXPORT extern void TF_DeleteBuffer(TF_Buffer*); - -TF_CAPI_EXPORT extern TF_Buffer TF_GetBuffer(TF_Buffer* buffer); - -// -------------------------------------------------------------------------- -// TF_SessionOptions holds options that can be passed during session creation. -typedef struct TF_SessionOptions TF_SessionOptions; - -// Return a new options object. -TF_CAPI_EXPORT extern TF_SessionOptions* TF_NewSessionOptions(void); - -// Set the target in TF_SessionOptions.options. -// target can be empty, a single entry, or a comma separated list of entries. -// Each entry is in one of the following formats : -// "local" -// ip:port -// host:port -TF_CAPI_EXPORT extern void TF_SetTarget(TF_SessionOptions* options, - const char* target); - -// Set the config in TF_SessionOptions.options. -// config should be a serialized tensorflow.ConfigProto proto. -// If config was not parsed successfully as a ConfigProto, record the -// error information in *status. -TF_CAPI_EXPORT extern void TF_SetConfig(TF_SessionOptions* options, - const void* proto, size_t proto_len, - TF_Status* status); - -// Destroy an options object. -TF_CAPI_EXPORT extern void TF_DeleteSessionOptions(TF_SessionOptions*); - -// TODO(jeff,sanjay): -// - export functions to set Config fields - -// -------------------------------------------------------------------------- -// The new graph construction API, still under development. - -// Represents a computation graph. Graphs may be shared between sessions. -// Graphs are thread-safe when used as directed below. -typedef struct TF_Graph TF_Graph; - -// Return a new graph object. -TF_CAPI_EXPORT extern TF_Graph* TF_NewGraph(void); - -// Destroy an options object. Graph will be deleted once no more -// TFSession's are referencing it. -TF_CAPI_EXPORT extern void TF_DeleteGraph(TF_Graph*); - -// Operation being built. The underlying graph must outlive this. -typedef struct TF_OperationDescription TF_OperationDescription; - -// Operation that has been added to the graph. Valid until the graph is -// deleted -- in particular adding a new operation to the graph does not -// invalidate old TF_Operation* pointers. -typedef struct TF_Operation TF_Operation; - -// Represents a specific input of an operation. -typedef struct TF_Input { - TF_Operation* oper; - int index; // The index of the input within oper. -} TF_Input; - -// Represents a specific output of an operation. -typedef struct TF_Output { - TF_Operation* oper; - int index; // The index of the output within oper. -} TF_Output; - -// TF_Function is a grouping of operations with defined inputs and outputs. -// Once created and added to graphs, functions can be invoked by creating an -// operation whose operation type matches the function name. -typedef struct TF_Function TF_Function; - -// Function definition options. TODO(iga): Define and implement -typedef struct TF_FunctionOptions TF_FunctionOptions; - -// Sets the shape of the Tensor referenced by `output` in `graph` to -// the shape described by `dims` and `num_dims`. -// -// If the number of dimensions is unknown, `num_dims` must be set to -// -1 and `dims` can be null. If a dimension is unknown, the -// corresponding entry in the `dims` array must be -1. -// -// This does not overwrite the existing shape associated with `output`, -// but merges the input shape with the existing shape. For example, -// setting a shape of [-1, 2] with an existing shape [2, -1] would set -// a final shape of [2, 2] based on shape merging semantics. -// -// Returns an error into `status` if: -// * `output` is not in `graph`. -// * An invalid shape is being set (e.g., the shape being set -// is incompatible with the existing shape). -TF_CAPI_EXPORT extern void TF_GraphSetTensorShape(TF_Graph* graph, - TF_Output output, - const int64_t* dims, - const int num_dims, - TF_Status* status); - -// Returns the number of dimensions of the Tensor referenced by `output` -// in `graph`. -// -// If the number of dimensions in the shape is unknown, returns -1. -// -// Returns an error into `status` if: -// * `output` is not in `graph`. -TF_CAPI_EXPORT extern int TF_GraphGetTensorNumDims(TF_Graph* graph, - TF_Output output, - TF_Status* status); - -// Returns the shape of the Tensor referenced by `output` in `graph` -// into `dims`. `dims` must be an array large enough to hold `num_dims` -// entries (e.g., the return value of TF_GraphGetTensorNumDims). -// -// If the number of dimensions in the shape is unknown or the shape is -// a scalar, `dims` will remain untouched. Otherwise, each element of -// `dims` will be set corresponding to the size of the dimension. An -// unknown dimension is represented by `-1`. -// -// Returns an error into `status` if: -// * `output` is not in `graph`. -// * `num_dims` does not match the actual number of dimensions. -TF_CAPI_EXPORT extern void TF_GraphGetTensorShape(TF_Graph* graph, - TF_Output output, - int64_t* dims, int num_dims, - TF_Status* status); - -// Operation will only be added to *graph when TF_FinishOperation() is -// called (assuming TF_FinishOperation() does not return an error). -// *graph must not be deleted until after TF_FinishOperation() is -// called. -TF_CAPI_EXPORT extern TF_OperationDescription* TF_NewOperation( - TF_Graph* graph, const char* op_type, const char* oper_name); - -// Specify the device for `desc`. Defaults to empty, meaning unconstrained. -TF_CAPI_EXPORT extern void TF_SetDevice(TF_OperationDescription* desc, - const char* device); - -// The calls to TF_AddInput and TF_AddInputList must match (in number, -// order, and type) the op declaration. For example, the "Concat" op -// has registration: -// REGISTER_OP("Concat") -// .Input("concat_dim: int32") -// .Input("values: N * T") -// .Output("output: T") -// .Attr("N: int >= 2") -// .Attr("T: type"); -// that defines two inputs, "concat_dim" and "values" (in that order). -// You must use TF_AddInput() for the first input (since it takes a -// single tensor), and TF_AddInputList() for the second input (since -// it takes a list, even if you were to pass a list with a single -// tensor), as in: -// TF_OperationDescription* desc = TF_NewOperation(graph, "Concat", "c"); -// TF_Output concat_dim_input = {...}; -// TF_AddInput(desc, concat_dim_input); -// TF_Output values_inputs[5] = {{...}, ..., {...}}; -// TF_AddInputList(desc, values_inputs, 5); - -// For inputs that take a single tensor. -TF_CAPI_EXPORT extern void TF_AddInput(TF_OperationDescription* desc, - TF_Output input); - -// For inputs that take a list of tensors. -// inputs must point to TF_Output[num_inputs]. -TF_CAPI_EXPORT extern void TF_AddInputList(TF_OperationDescription* desc, - const TF_Output* inputs, - int num_inputs); - -// Call once per control input to `desc`. -TF_CAPI_EXPORT extern void TF_AddControlInput(TF_OperationDescription* desc, - TF_Operation* input); - -// Request that `desc` be co-located on the device where `op` -// is placed. -// -// Use of this is discouraged since the implementation of device placement is -// subject to change. Primarily intended for internal libraries -TF_CAPI_EXPORT extern void TF_ColocateWith(TF_OperationDescription* desc, - TF_Operation* op); - -// Call some TF_SetAttr*() function for every attr that is not -// inferred from an input and doesn't have a default value you wish to -// keep. - -// `value` must point to a string of length `length` bytes. -TF_CAPI_EXPORT extern void TF_SetAttrString(TF_OperationDescription* desc, - const char* attr_name, - const void* value, size_t length); -// `values` and `lengths` each must have lengths `num_values`. -// `values[i]` must point to a string of length `lengths[i]` bytes. -TF_CAPI_EXPORT extern void TF_SetAttrStringList(TF_OperationDescription* desc, - const char* attr_name, - const void* const* values, - const size_t* lengths, - int num_values); -TF_CAPI_EXPORT extern void TF_SetAttrInt(TF_OperationDescription* desc, - const char* attr_name, int64_t value); -TF_CAPI_EXPORT extern void TF_SetAttrIntList(TF_OperationDescription* desc, - const char* attr_name, - const int64_t* values, - int num_values); -TF_CAPI_EXPORT extern void TF_SetAttrFloat(TF_OperationDescription* desc, - const char* attr_name, float value); -TF_CAPI_EXPORT extern void TF_SetAttrFloatList(TF_OperationDescription* desc, - const char* attr_name, - const float* values, - int num_values); -TF_CAPI_EXPORT extern void TF_SetAttrBool(TF_OperationDescription* desc, - const char* attr_name, - unsigned char value); -TF_CAPI_EXPORT extern void TF_SetAttrBoolList(TF_OperationDescription* desc, - const char* attr_name, - const unsigned char* values, - int num_values); -TF_CAPI_EXPORT extern void TF_SetAttrType(TF_OperationDescription* desc, - const char* attr_name, - TF_DataType value); -TF_CAPI_EXPORT extern void TF_SetAttrTypeList(TF_OperationDescription* desc, - const char* attr_name, - const TF_DataType* values, - int num_values); -TF_CAPI_EXPORT extern void TF_SetAttrPlaceholder(TF_OperationDescription* desc, - const char* attr_name, - const char* placeholder); - -// Set a 'func' attribute to the specified name. -// `value` must point to a string of length `length` bytes. -TF_CAPI_EXPORT extern void TF_SetAttrFuncName(TF_OperationDescription* desc, - const char* attr_name, - const char* value, size_t length); - -// Set `num_dims` to -1 to represent "unknown rank". Otherwise, -// `dims` points to an array of length `num_dims`. `dims[i]` must be -// >= -1, with -1 meaning "unknown dimension". -TF_CAPI_EXPORT extern void TF_SetAttrShape(TF_OperationDescription* desc, - const char* attr_name, - const int64_t* dims, int num_dims); -// `dims` and `num_dims` must point to arrays of length `num_shapes`. -// Set `num_dims[i]` to -1 to represent "unknown rank". Otherwise, -// `dims[i]` points to an array of length `num_dims[i]`. `dims[i][j]` -// must be >= -1, with -1 meaning "unknown dimension". -TF_CAPI_EXPORT extern void TF_SetAttrShapeList(TF_OperationDescription* desc, - const char* attr_name, - const int64_t* const* dims, - const int* num_dims, - int num_shapes); -// `proto` must point to an array of `proto_len` bytes representing a -// binary-serialized TensorShapeProto. -TF_CAPI_EXPORT extern void TF_SetAttrTensorShapeProto( - TF_OperationDescription* desc, const char* attr_name, const void* proto, - size_t proto_len, TF_Status* status); -// `protos` and `proto_lens` must point to arrays of length `num_shapes`. -// `protos[i]` must point to an array of `proto_lens[i]` bytes -// representing a binary-serialized TensorShapeProto. -TF_CAPI_EXPORT extern void TF_SetAttrTensorShapeProtoList( - TF_OperationDescription* desc, const char* attr_name, - const void* const* protos, const size_t* proto_lens, int num_shapes, - TF_Status* status); - -TF_CAPI_EXPORT extern void TF_SetAttrTensor(TF_OperationDescription* desc, - const char* attr_name, - TF_Tensor* value, - TF_Status* status); -TF_CAPI_EXPORT extern void TF_SetAttrTensorList(TF_OperationDescription* desc, - const char* attr_name, - TF_Tensor* const* values, - int num_values, - TF_Status* status); - -// `proto` should point to a sequence of bytes of length `proto_len` -// representing a binary serialization of an AttrValue protocol -// buffer. -TF_CAPI_EXPORT extern void TF_SetAttrValueProto(TF_OperationDescription* desc, - const char* attr_name, - const void* proto, - size_t proto_len, - TF_Status* status); - -// If this function succeeds: -// * *status is set to an OK value, -// * a TF_Operation is added to the graph, -// * a non-null value pointing to the added operation is returned -- -// this value is valid until the underlying graph is deleted. -// Otherwise: -// * *status is set to a non-OK value, -// * the graph is not modified, -// * a null value is returned. -// In either case, it deletes `desc`. -TF_CAPI_EXPORT extern TF_Operation* TF_FinishOperation( - TF_OperationDescription* desc, TF_Status* status); - -// TF_Operation functions. Operations are immutable once created, so -// these are all query functions. - -TF_CAPI_EXPORT extern const char* TF_OperationName(TF_Operation* oper); -TF_CAPI_EXPORT extern const char* TF_OperationOpType(TF_Operation* oper); -TF_CAPI_EXPORT extern const char* TF_OperationDevice(TF_Operation* oper); - -TF_CAPI_EXPORT extern int TF_OperationNumOutputs(TF_Operation* oper); -TF_CAPI_EXPORT extern TF_DataType TF_OperationOutputType(TF_Output oper_out); -TF_CAPI_EXPORT extern int TF_OperationOutputListLength(TF_Operation* oper, - const char* arg_name, - TF_Status* status); - -TF_CAPI_EXPORT extern int TF_OperationNumInputs(TF_Operation* oper); -TF_CAPI_EXPORT extern TF_DataType TF_OperationInputType(TF_Input oper_in); -TF_CAPI_EXPORT extern int TF_OperationInputListLength(TF_Operation* oper, - const char* arg_name, - TF_Status* status); - -// In this code: -// TF_Output producer = TF_OperationInput(consumer); -// There is an edge from producer.oper's output (given by -// producer.index) to consumer.oper's input (given by consumer.index). -TF_CAPI_EXPORT extern TF_Output TF_OperationInput(TF_Input oper_in); - -// Get list of all inputs of a specific operation. `inputs` must point to -// an array of length at least `max_inputs` (ideally set to -// TF_OperationNumInputs(oper)). Beware that a concurrent -// modification of the graph can increase the number of inputs of -// an operation. -TF_CAPI_EXPORT extern void TF_OperationAllInputs(TF_Operation* oper, - TF_Output* inputs, - int max_inputs); - -// Get the number of current consumers of a specific output of an -// operation. Note that this number can change when new operations -// are added to the graph. -TF_CAPI_EXPORT extern int TF_OperationOutputNumConsumers(TF_Output oper_out); - -// Get list of all current consumers of a specific output of an -// operation. `consumers` must point to an array of length at least -// `max_consumers` (ideally set to -// TF_OperationOutputNumConsumers(oper_out)). Beware that a concurrent -// modification of the graph can increase the number of consumers of -// an operation. Returns the number of output consumers (should match -// TF_OperationOutputNumConsumers(oper_out)). -TF_CAPI_EXPORT extern int TF_OperationOutputConsumers(TF_Output oper_out, - TF_Input* consumers, - int max_consumers); - -// Get the number of control inputs to an operation. -TF_CAPI_EXPORT extern int TF_OperationNumControlInputs(TF_Operation* oper); - -// Get list of all control inputs to an operation. `control_inputs` must -// point to an array of length `max_control_inputs` (ideally set to -// TF_OperationNumControlInputs(oper)). Returns the number of control -// inputs (should match TF_OperationNumControlInputs(oper)). -TF_CAPI_EXPORT extern int TF_OperationGetControlInputs( - TF_Operation* oper, TF_Operation** control_inputs, int max_control_inputs); - -// Get the number of operations that have `*oper` as a control input. -// Note that this number can change when new operations are added to -// the graph. -TF_CAPI_EXPORT extern int TF_OperationNumControlOutputs(TF_Operation* oper); - -// Get the list of operations that have `*oper` as a control input. -// `control_outputs` must point to an array of length at least -// `max_control_outputs` (ideally set to -// TF_OperationNumControlOutputs(oper)). Beware that a concurrent -// modification of the graph can increase the number of control -// outputs. Returns the number of control outputs (should match -// TF_OperationNumControlOutputs(oper)). -TF_CAPI_EXPORT extern int TF_OperationGetControlOutputs( - TF_Operation* oper, TF_Operation** control_outputs, - int max_control_outputs); - -// TF_AttrMetadata describes the value of an attribute on an operation. -typedef struct TF_AttrMetadata { - // A boolean: 1 if the attribute value is a list, 0 otherwise. - unsigned char is_list; - - // Length of the list if is_list is true. Undefined otherwise. - int64_t list_size; - - // Type of elements of the list if is_list != 0. - // Type of the single value stored in the attribute if is_list == 0. - TF_AttrType type; - - // Total size the attribute value. - // The units of total_size depend on is_list and type. - // (1) If type == TF_ATTR_STRING and is_list == 0 - // then total_size is the byte size of the string - // valued attribute. - // (2) If type == TF_ATTR_STRING and is_list == 1 - // then total_size is the cumulative byte size - // of all the strings in the list. - // (3) If type == TF_ATTR_SHAPE and is_list == 0 - // then total_size is the number of dimensions - // of the shape valued attribute, or -1 - // if its rank is unknown. - // (4) If type == TF_ATTR_SHAPE and is_list == 1 - // then total_size is the cumulative number - // of dimensions of all shapes in the list. - // (5) Otherwise, total_size is undefined. - int64_t total_size; -} TF_AttrMetadata; - -// Returns metadata about the value of the attribute `attr_name` of `oper`. -TF_CAPI_EXPORT extern TF_AttrMetadata TF_OperationGetAttrMetadata( - TF_Operation* oper, const char* attr_name, TF_Status* status); - -// Fills in `value` with the value of the attribute `attr_name`. `value` must -// point to an array of length at least `max_length` (ideally set to -// TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, -// attr_name)). -TF_CAPI_EXPORT extern void TF_OperationGetAttrString(TF_Operation* oper, - const char* attr_name, - void* value, - size_t max_length, - TF_Status* status); - -// Get the list of strings in the value of the attribute `attr_name`. Fills in -// `values` and `lengths`, each of which must point to an array of length at -// least `max_values`. -// -// The elements of values will point to addresses in `storage` which must be at -// least `storage_size` bytes in length. Ideally, max_values would be set to -// TF_AttrMetadata.list_size and `storage` would be at least -// TF_AttrMetadata.total_size, obtained from TF_OperationGetAttrMetadata(oper, -// attr_name). -// -// Fails if storage_size is too small to hold the requested number of strings. -TF_CAPI_EXPORT extern void TF_OperationGetAttrStringList( - TF_Operation* oper, const char* attr_name, void** values, size_t* lengths, - int max_values, void* storage, size_t storage_size, TF_Status* status); - -TF_CAPI_EXPORT extern void TF_OperationGetAttrInt(TF_Operation* oper, - const char* attr_name, - int64_t* value, - TF_Status* status); - -// Fills in `values` with the value of the attribute `attr_name` of `oper`. -// `values` must point to an array of length at least `max_values` (ideally set -// TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, -// attr_name)). -TF_CAPI_EXPORT extern void TF_OperationGetAttrIntList(TF_Operation* oper, - const char* attr_name, - int64_t* values, - int max_values, - TF_Status* status); - -TF_CAPI_EXPORT extern void TF_OperationGetAttrFloat(TF_Operation* oper, - const char* attr_name, - float* value, - TF_Status* status); - -// Fills in `values` with the value of the attribute `attr_name` of `oper`. -// `values` must point to an array of length at least `max_values` (ideally set -// to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, -// attr_name)). -TF_CAPI_EXPORT extern void TF_OperationGetAttrFloatList(TF_Operation* oper, - const char* attr_name, - float* values, - int max_values, - TF_Status* status); - -TF_CAPI_EXPORT extern void TF_OperationGetAttrBool(TF_Operation* oper, - const char* attr_name, - unsigned char* value, - TF_Status* status); - -// Fills in `values` with the value of the attribute `attr_name` of `oper`. -// `values` must point to an array of length at least `max_values` (ideally set -// to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, -// attr_name)). -TF_CAPI_EXPORT extern void TF_OperationGetAttrBoolList(TF_Operation* oper, - const char* attr_name, - unsigned char* values, - int max_values, - TF_Status* status); - -TF_CAPI_EXPORT extern void TF_OperationGetAttrType(TF_Operation* oper, - const char* attr_name, - TF_DataType* value, - TF_Status* status); - -// Fills in `values` with the value of the attribute `attr_name` of `oper`. -// `values` must point to an array of length at least `max_values` (ideally set -// to TF_AttrMetadata.list_size from TF_OperationGetAttrMetadata(oper, -// attr_name)). -TF_CAPI_EXPORT extern void TF_OperationGetAttrTypeList(TF_Operation* oper, - const char* attr_name, - TF_DataType* values, - int max_values, - TF_Status* status); - -// Fills in `value` with the value of the attribute `attr_name` of `oper`. -// `values` must point to an array of length at least `num_dims` (ideally set to -// TF_Attr_Meta.size from TF_OperationGetAttrMetadata(oper, attr_name)). -TF_CAPI_EXPORT extern void TF_OperationGetAttrShape(TF_Operation* oper, - const char* attr_name, - int64_t* value, - int num_dims, - TF_Status* status); - -// Fills in `dims` with the list of shapes in the attribute `attr_name` of -// `oper` and `num_dims` with the corresponding number of dimensions. On return, -// for every i where `num_dims[i]` > 0, `dims[i]` will be an array of -// `num_dims[i]` elements. A value of -1 for `num_dims[i]` indicates that the -// i-th shape in the list is unknown. -// -// The elements of `dims` will point to addresses in `storage` which must be -// large enough to hold at least `storage_size` int64_ts. Ideally, `num_shapes` -// would be set to TF_AttrMetadata.list_size and `storage_size` would be set to -// TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper, -// attr_name). -// -// Fails if storage_size is insufficient to hold the requested shapes. -TF_CAPI_EXPORT extern void TF_OperationGetAttrShapeList( - TF_Operation* oper, const char* attr_name, int64_t** dims, int* num_dims, - int num_shapes, int64_t* storage, int storage_size, TF_Status* status); - -// Sets `value` to the binary-serialized TensorShapeProto of the value of -// `attr_name` attribute of `oper`'. -TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProto( - TF_Operation* oper, const char* attr_name, TF_Buffer* value, - TF_Status* status); - -// Fills in `values` with binary-serialized TensorShapeProto values of the -// attribute `attr_name` of `oper`. `values` must point to an array of length at -// least `num_values` (ideally set to TF_AttrMetadata.list_size from -// TF_OperationGetAttrMetadata(oper, attr_name)). -TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorShapeProtoList( - TF_Operation* oper, const char* attr_name, TF_Buffer** values, - int max_values, TF_Status* status); - -// Gets the TF_Tensor valued attribute of `attr_name` of `oper`. -// -// Allocates a new TF_Tensor which the caller is expected to take -// ownership of (and can deallocate using TF_DeleteTensor). -TF_CAPI_EXPORT extern void TF_OperationGetAttrTensor(TF_Operation* oper, - const char* attr_name, - TF_Tensor** value, - TF_Status* status); - -// Fills in `values` with the TF_Tensor values of the attribute `attr_name` of -// `oper`. `values` must point to an array of TF_Tensor* of length at least -// `max_values` (ideally set to TF_AttrMetadata.list_size from -// TF_OperationGetAttrMetadata(oper, attr_name)). -// -// The caller takes ownership of all the non-null TF_Tensor* entries in `values` -// (which can be deleted using TF_DeleteTensor(values[i])). -TF_CAPI_EXPORT extern void TF_OperationGetAttrTensorList(TF_Operation* oper, - const char* attr_name, - TF_Tensor** values, - int max_values, - TF_Status* status); - -// Sets `output_attr_value` to the binary-serialized AttrValue proto -// representation of the value of the `attr_name` attr of `oper`. -TF_CAPI_EXPORT extern void TF_OperationGetAttrValueProto( - TF_Operation* oper, const char* attr_name, TF_Buffer* output_attr_value, - TF_Status* status); - -// Returns the operation in the graph with `oper_name`. Returns nullptr if -// no operation found. -TF_CAPI_EXPORT extern TF_Operation* TF_GraphOperationByName( - TF_Graph* graph, const char* oper_name); - -// Iterate through the operations of a graph. To use: -// size_t pos = 0; -// TF_Operation* oper; -// while ((oper = TF_GraphNextOperation(graph, &pos)) != nullptr) { -// DoSomethingWithOperation(oper); -// } -TF_CAPI_EXPORT extern TF_Operation* TF_GraphNextOperation(TF_Graph* graph, - size_t* pos); - -// Write out a serialized representation of `graph` (as a GraphDef protocol -// message) to `output_graph_def` (allocated by TF_NewBuffer()). -// `output_graph_def`'s underlying buffer will be freed when TF_DeleteBuffer() -// is called. -// -// May fail on very large graphs in the future. -TF_CAPI_EXPORT extern void TF_GraphToGraphDef(TF_Graph* graph, - TF_Buffer* output_graph_def, - TF_Status* status); - -// Returns the serialized OpDef proto with name `op_name`, or a bad status if no -// such op exists. This can return OpDefs of functions copied into the graph. -TF_CAPI_EXPORT extern void TF_GraphGetOpDef(TF_Graph* graph, - const char* op_name, - TF_Buffer* output_op_def, - TF_Status* status); - -// Returns the serialized VersionDef proto for this graph. -TF_CAPI_EXPORT extern void TF_GraphVersions(TF_Graph* graph, - TF_Buffer* output_version_def, - TF_Status* status); - -// TF_ImportGraphDefOptions holds options that can be passed to -// TF_GraphImportGraphDef. -typedef struct TF_ImportGraphDefOptions TF_ImportGraphDefOptions; - -TF_CAPI_EXPORT extern TF_ImportGraphDefOptions* TF_NewImportGraphDefOptions( - void); -TF_CAPI_EXPORT extern void TF_DeleteImportGraphDefOptions( - TF_ImportGraphDefOptions* opts); - -// Set the prefix to be prepended to the names of nodes in `graph_def` that will -// be imported into `graph`. `prefix` is copied and has no lifetime -// requirements. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetPrefix( - TF_ImportGraphDefOptions* opts, const char* prefix); - -// Set the execution device for nodes in `graph_def`. -// Only applies to nodes where a device was not already explicitly specified. -// `device` is copied and has no lifetime requirements. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetDefaultDevice( - TF_ImportGraphDefOptions* opts, const char* device); - -// Set whether to uniquify imported operation names. If true, imported operation -// names will be modified if their name already exists in the graph. If false, -// conflicting names will be treated as an error. Note that this option has no -// effect if a prefix is set, since the prefix will guarantee all names are -// unique. Defaults to false. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyNames( - TF_ImportGraphDefOptions* opts, unsigned char uniquify_names); - -// If true, the specified prefix will be modified if it already exists as an -// operation name or prefix in the graph. If false, a conflicting prefix will be -// treated as an error. This option has no effect if no prefix is specified. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsSetUniquifyPrefix( - TF_ImportGraphDefOptions* opts, unsigned char uniquify_prefix); - -// Set any imported nodes with input `src_name:src_index` to have that input -// replaced with `dst`. `src_name` refers to a node in the graph to be imported, -// `dst` references a node already existing in the graph being imported into. -// `src_name` is copied and has no lifetime requirements. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddInputMapping( - TF_ImportGraphDefOptions* opts, const char* src_name, int src_index, - TF_Output dst); - -// Set any imported nodes with control input `src_name` to have that input -// replaced with `dst`. `src_name` refers to a node in the graph to be imported, -// `dst` references an operation already existing in the graph being imported -// into. `src_name` is copied and has no lifetime requirements. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsRemapControlDependency( - TF_ImportGraphDefOptions* opts, const char* src_name, TF_Operation* dst); - -// Cause the imported graph to have a control dependency on `oper`. `oper` -// should exist in the graph being imported into. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddControlDependency( - TF_ImportGraphDefOptions* opts, TF_Operation* oper); - -// Add an output in `graph_def` to be returned via the `return_outputs` output -// parameter of TF_GraphImportGraphDef(). If the output is remapped via an input -// mapping, the corresponding existing tensor in `graph` will be returned. -// `oper_name` is copied and has no lifetime requirements. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddReturnOutput( - TF_ImportGraphDefOptions* opts, const char* oper_name, int index); - -// Returns the number of return outputs added via -// TF_ImportGraphDefOptionsAddReturnOutput(). -TF_CAPI_EXPORT extern int TF_ImportGraphDefOptionsNumReturnOutputs( - const TF_ImportGraphDefOptions* opts); - -// Add an operation in `graph_def` to be returned via the `return_opers` output -// parameter of TF_GraphImportGraphDef(). `oper_name` is copied and has no -// lifetime requirements. -TF_CAPI_EXPORT extern void TF_ImportGraphDefOptionsAddReturnOperation( - TF_ImportGraphDefOptions* opts, const char* oper_name); - -// Returns the number of return operations added via -// TF_ImportGraphDefOptionsAddReturnOperation(). -TF_CAPI_EXPORT extern int TF_ImportGraphDefOptionsNumReturnOperations( - const TF_ImportGraphDefOptions* opts); - -// TF_ImportGraphDefResults holds results that are generated by -// TF_GraphImportGraphDefWithResults(). -typedef struct TF_ImportGraphDefResults TF_ImportGraphDefResults; - -// Fetches the return outputs requested via -// TF_ImportGraphDefOptionsAddReturnOutput(). The number of fetched outputs is -// returned in `num_outputs`. The array of return outputs is returned in -// `outputs`. `*outputs` is owned by and has the lifetime of `results`. -TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsReturnOutputs( - TF_ImportGraphDefResults* results, int* num_outputs, TF_Output** outputs); - -// Fetches the return operations requested via -// TF_ImportGraphDefOptionsAddReturnOperation(). The number of fetched -// operations is returned in `num_opers`. The array of return operations is -// returned in `opers`. `*opers` is owned by and has the lifetime of `results`. -TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsReturnOperations( - TF_ImportGraphDefResults* results, int* num_opers, TF_Operation*** opers); - -// Fetches any input mappings requested via -// TF_ImportGraphDefOptionsAddInputMapping() that didn't appear in the GraphDef -// and weren't used as input to any node in the imported graph def. The number -// of fetched mappings is returned in `num_missing_unused_input_mappings`. The -// array of each mapping's source node name is returned in `src_names`, and the -// array of each mapping's source index is returned in `src_indexes`. -// -// `*src_names`, `*src_indexes`, and the memory backing each string in -// `src_names` are owned by and have the lifetime of `results`. -TF_CAPI_EXPORT extern void TF_ImportGraphDefResultsMissingUnusedInputMappings( - TF_ImportGraphDefResults* results, int* num_missing_unused_input_mappings, - const char*** src_names, int** src_indexes); - -// Deletes a results object returned by TF_GraphImportGraphDefWithResults(). -TF_CAPI_EXPORT extern void TF_DeleteImportGraphDefResults( - TF_ImportGraphDefResults* results); - -// Import the graph serialized in `graph_def` into `graph`. Returns nullptr and -// a bad status on error. Otherwise, returns a populated -// TF_ImportGraphDefResults instance. The returned instance must be deleted via -// TF_DeleteImportGraphDefResults(). -TF_CAPI_EXPORT extern TF_ImportGraphDefResults* -TF_GraphImportGraphDefWithResults(TF_Graph* graph, const TF_Buffer* graph_def, - const TF_ImportGraphDefOptions* options, - TF_Status* status); - -// Import the graph serialized in `graph_def` into `graph`. -// Convenience function for when only return outputs are needed. -// -// `num_return_outputs` must be the number of return outputs added (i.e. the -// result of TF_ImportGraphDefOptionsNumReturnOutputs()). If -// `num_return_outputs` is non-zero, `return_outputs` must be of length -// `num_return_outputs`. Otherwise it can be null. -TF_CAPI_EXPORT extern void TF_GraphImportGraphDefWithReturnOutputs( - TF_Graph* graph, const TF_Buffer* graph_def, - const TF_ImportGraphDefOptions* options, TF_Output* return_outputs, - int num_return_outputs, TF_Status* status); - -// Import the graph serialized in `graph_def` into `graph`. -// Convenience function for when no results are needed. -TF_CAPI_EXPORT extern void TF_GraphImportGraphDef( - TF_Graph* graph, const TF_Buffer* graph_def, - const TF_ImportGraphDefOptions* options, TF_Status* status); - -// Adds a copy of function `func` and optionally its gradient function `grad` -// to `g`. Once `func`/`grad` is added to `g`, it can be called by creating -// an operation using the function's name. -// Any changes to `func`/`grad` (including deleting it) done after this method -// returns, won't affect the copy of `func`/`grad` in `g`. -// If `func` or `grad` are already in `g`, TF_GraphCopyFunction has no -// effect on them, but can establish the function->gradient relationship -// between them if `func` does not already have a gradient. If `func` already -// has a gradient different from `grad`, an error is returned. -// -// `func` must not be null. -// If `grad` is null and `func` is not in `g`, `func` is added without a -// gradient. -// If `grad` is null and `func` is in `g`, TF_GraphCopyFunction is a noop. -// `grad` must have appropriate signature as described in the doc of -// GradientDef in tensorflow/core/framework/function.proto. -// -// If successful, status is set to OK and `func` and `grad` are added to `g`. -// Otherwise, status is set to the encountered error and `g` is unmodified. -TF_CAPI_EXPORT extern void TF_GraphCopyFunction(TF_Graph* g, - const TF_Function* func, - const TF_Function* grad, - TF_Status* status); - -// Returns the number of TF_Functions registered in `g`. -TF_CAPI_EXPORT extern int TF_GraphNumFunctions(TF_Graph* g); - -// Fills in `funcs` with the TF_Function* registered in `g`. -// `funcs` must point to an array of TF_Function* of length at least -// `max_func`. In usual usage, max_func should be set to the result of -// TF_GraphNumFunctions(g). In this case, all the functions registered in -// `g` will be returned. Else, an unspecified subset. -// -// If successful, returns the number of TF_Function* successfully set in -// `funcs` and sets status to OK. The caller takes ownership of -// all the returned TF_Functions. They must be deleted with TF_DeleteFunction. -// On error, returns 0, sets status to the encountered error, and the contents -// of funcs will be undefined. -TF_CAPI_EXPORT extern int TF_GraphGetFunctions(TF_Graph* g, TF_Function** funcs, - int max_func, TF_Status* status); - -// Note: The following function may fail on very large protos in the future. - -TF_CAPI_EXPORT extern void TF_OperationToNodeDef(TF_Operation* oper, - TF_Buffer* output_node_def, - TF_Status* status); - -// Create a TF_Function from a TF_Graph -// -// Params: -// fn_body - the graph whose operations (or subset of whose operations) will be -// converted to TF_Function. -// fn_name - the name of the new TF_Function. Should match the operation -// name (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*. -// If `append_hash_to_fn_name` is false, `fn_name` must be distinct -// from other function and operation names (at least those -// registered in graphs where this function will be used). -// append_hash_to_fn_name - Must be 0 or 1. If set to 1, the actual name -// of the function will be `fn_name` appended with -// '_'. -// If set to 0, the function's name will be `fn_name`. -// num_opers - `num_opers` contains the number of elements in the `opers` array -// or a special value of -1 meaning that no array is given. -// The distinction between an empty array of operations and no -// array of operations is necessary to distinguish the case of -// creating a function with no body (e.g. identity or permutation) -// and the case of creating a function whose body contains all -// the nodes in the graph (except for the automatic skipping, see -// below). -// opers - Array of operations to become the body of the function or null. -// - If no array is given (`num_opers` = -1), all the -// operations in `fn_body` will become part of the function -// except operations referenced in `inputs`. These operations -// must have a single output (these operations are typically -// placeholders created for the sole purpose of representing -// an input. We can relax this constraint if there are -// compelling use cases). -// - If an array is given (`num_opers` >= 0), all operations -// in it will become part of the function. In particular, no -// automatic skipping of dummy input operations is performed. -// ninputs - number of elements in `inputs` array -// inputs - array of TF_Outputs that specify the inputs to the function. -// If `ninputs` is zero (the function takes no inputs), `inputs` -// can be null. The names used for function inputs are normalized -// names of the operations (usually placeholders) pointed to by -// `inputs`. These operation names should start with a letter. -// Normalization will convert all letters to lowercase and -// non-alphanumeric characters to '_' to make resulting names match -// the "[a-z][a-z0-9_]*" pattern for operation argument names. -// `inputs` cannot contain the same tensor twice. -// noutputs - number of elements in `outputs` array -// outputs - array of TF_Outputs that specify the outputs of the function. -// If `noutputs` is zero (the function returns no outputs), `outputs` -// can be null. `outputs` can contain the same tensor more than once. -// output_names - The names of the function's outputs. `output_names` array -// must either have the same length as `outputs` -// (i.e. `noutputs`) or be null. In the former case, -// the names should match the regular expression for ArgDef -// names - "[a-z][a-z0-9_]*". In the latter case, -// names for outputs will be generated automatically. -// opts - various options for the function, e.g. XLA's inlining control. -// description - optional human-readable description of this function. -// status - Set to OK on success and an appropriate error on failure. -// -// Note that when the same TF_Output is listed as both an input and an output, -// the corresponding function's output will equal to this input, -// instead of the original node's output. -// -// Callers must also satisfy the following constraints: -// - `inputs` cannot refer to TF_Outputs within a control flow context. For -// example, one cannot use the output of "switch" node as input. -// - `inputs` and `outputs` cannot have reference types. Reference types are -// not exposed through C API and are being replaced with Resources. We support -// reference types inside function's body to support legacy code. Do not -// use them in new code. -// - Every node in the function's body must have all of its inputs (including -// control inputs). In other words, for every node in the body, each input -// must be either listed in `inputs` or must come from another node in -// the body. In particular, it is an error to have a control edge going from -// a node outside of the body into a node in the body. This applies to control -// edges going from nodes referenced in `inputs` to nodes in the body when -// the former nodes are not in the body (automatically skipped or not -// included in explicitly specified body). -// -// Returns: -// On success, a newly created TF_Function instance. It must be deleted by -// calling TF_DeleteFunction. -// -// On failure, null. -TF_CAPI_EXPORT extern TF_Function* TF_GraphToFunction( - const TF_Graph* fn_body, const char* fn_name, - unsigned char append_hash_to_fn_name, int num_opers, - const TF_Operation* const* opers, int ninputs, const TF_Output* inputs, - int noutputs, const TF_Output* outputs, const char* const* output_names, - const TF_FunctionOptions* opts, const char* description, TF_Status* status); - -// Similar to TF_GraphToFunction but allows specifying control outputs of the -// function. -// -// The arguments of TF_GraphToFunction have the same meaning, but the new -// arguments are as follows: -// -// ncontrol_outputs: Number of control outputs of the function. -// control_outputs: vector of TF_Operation objects to be marked as control -// outputs of the function. Operations marked as control outputs are -// guaranteed to execute. -// control_output_names: Optional. If not nullptr, vector of strings, one -// per control output, with their names to be added to the function's -// OpDef. -TF_CAPI_EXPORT extern TF_Function* TF_GraphToFunctionWithControlOutputs( - const TF_Graph* fn_body, const char* fn_name, - unsigned char append_hash_to_fn_name, int num_opers, - const TF_Operation* const* opers, int ninputs, const TF_Output* inputs, - int noutputs, const TF_Output* outputs, const char* const* output_names, - int ncontrol_outputs, const TF_Operation* const* control_outputs, - const char* const* control_output_names, const TF_FunctionOptions* opts, - const char* description, TF_Status* status); - -// Returns the name of the graph function. -// The return value points to memory that is only usable until the next -// mutation to *func. -TF_CAPI_EXPORT extern const char* TF_FunctionName(TF_Function* func); - -// Write out a serialized representation of `func` (as a FunctionDef protocol -// message) to `output_func_def` (allocated by TF_NewBuffer()). -// `output_func_def`'s underlying buffer will be freed when TF_DeleteBuffer() -// is called. -// -// May fail on very large graphs in the future. -TF_CAPI_EXPORT extern void TF_FunctionToFunctionDef(TF_Function* func, - TF_Buffer* output_func_def, - TF_Status* status); - -// Construct and return the function whose FunctionDef representation is -// serialized in `proto`. `proto_len` must equal the number of bytes -// pointed to by `proto`. -// Returns: -// On success, a newly created TF_Function instance. It must be deleted by -// calling TF_DeleteFunction. -// -// On failure, null. -TF_CAPI_EXPORT extern TF_Function* TF_FunctionImportFunctionDef( - const void* proto, size_t proto_len, TF_Status* status); - -// Sets function attribute named `attr_name` to value stored in `proto`. -// If this attribute is already set to another value, it is overridden. -// `proto` should point to a sequence of bytes of length `proto_len` -// representing a binary serialization of an AttrValue protocol -// buffer. -TF_CAPI_EXPORT extern void TF_FunctionSetAttrValueProto(TF_Function* func, - const char* attr_name, - const void* proto, - size_t proto_len, - TF_Status* status); - -// Sets `output_attr_value` to the binary-serialized AttrValue proto -// representation of the value of the `attr_name` attr of `func`. -// If `attr_name` attribute is not present, status is set to an error. -TF_CAPI_EXPORT extern void TF_FunctionGetAttrValueProto( - TF_Function* func, const char* attr_name, TF_Buffer* output_attr_value, - TF_Status* status); - -// Frees the memory used by the `func` struct. -// TF_DeleteFunction is a noop if `func` is null. -// Deleting a function does not remove it from any graphs it was copied to. -TF_CAPI_EXPORT extern void TF_DeleteFunction(TF_Function* func); - -// Attempts to evaluate `output`. This will only be possible if `output` doesn't -// depend on any graph inputs (this function is safe to call if this isn't the -// case though). -// -// If the evaluation is successful, this function returns true and `output`s -// value is returned in `result`. Otherwise returns false. An error status is -// returned if something is wrong with the graph or input. Note that this may -// return false even if no error status is set. -TF_CAPI_EXPORT extern unsigned char TF_TryEvaluateConstant(TF_Graph* graph, - TF_Output output, - TF_Tensor** result, - TF_Status* status); - -// TODO(josh11b): Register OpDef, available to all operations added -// to this graph. - -// -------------------------------------------------------------------------- -// API for driving Graph execution. - -typedef struct TF_Session TF_Session; - -// Return a new execution session with the associated graph, or NULL on -// error. Does not take ownership of any input parameters. -// -// *`graph` must be a valid graph (not deleted or nullptr). `graph` will be be -// kept alive for the lifetime of the returned TF_Session. New nodes can still -// be added to `graph` after this call. -TF_CAPI_EXPORT extern TF_Session* TF_NewSession(TF_Graph* graph, - const TF_SessionOptions* opts, - TF_Status* status); - -// This function creates a new TF_Session (which is created on success) using -// `session_options`, and then initializes state (restoring tensors and other -// assets) using `run_options`. -// -// Any NULL and non-NULL value combinations for (`run_options, `meta_graph_def`) -// are valid. -// -// - `export_dir` must be set to the path of the exported SavedModel. -// - `tags` must include the set of tags used to identify one MetaGraphDef in -// the SavedModel. -// - `graph` must be a graph newly allocated with TF_NewGraph(). -// -// If successful, populates `graph` with the contents of the Graph and -// `meta_graph_def` with the MetaGraphDef of the loaded model. -TF_CAPI_EXPORT extern TF_Session* TF_LoadSessionFromSavedModel( - const TF_SessionOptions* session_options, const TF_Buffer* run_options, - const char* export_dir, const char* const* tags, int tags_len, - TF_Graph* graph, TF_Buffer* meta_graph_def, TF_Status* status); - -// Close a session. -// -// Contacts any other processes associated with the session, if applicable. -// May not be called after TF_DeleteSession(). -TF_CAPI_EXPORT extern void TF_CloseSession(TF_Session*, TF_Status* status); - -// Destroy a session object. -// -// Even if error information is recorded in *status, this call discards all -// local resources associated with the session. The session may not be used -// during or after this call (and the session drops its reference to the -// corresponding graph). -TF_CAPI_EXPORT extern void TF_DeleteSession(TF_Session*, TF_Status* status); - -// Run the graph associated with the session starting with the supplied inputs -// (inputs[0,ninputs-1] with corresponding values in input_values[0,ninputs-1]). -// -// Any NULL and non-NULL value combinations for (`run_options`, -// `run_metadata`) are valid. -// -// - `run_options` may be NULL, in which case it will be ignored; or -// non-NULL, in which case it must point to a `TF_Buffer` containing the -// serialized representation of a `RunOptions` protocol buffer. -// - `run_metadata` may be NULL, in which case it will be ignored; or -// non-NULL, in which case it must point to an empty, freshly allocated -// `TF_Buffer` that may be updated to contain the serialized representation -// of a `RunMetadata` protocol buffer. -// -// The caller retains ownership of `input_values` (which can be deleted using -// TF_DeleteTensor). The caller also retains ownership of `run_options` and/or -// `run_metadata` (when not NULL) and should manually call TF_DeleteBuffer on -// them. -// -// On success, the tensors corresponding to outputs[0,noutputs-1] are placed in -// output_values[]. Ownership of the elements of output_values[] is transferred -// to the caller, which must eventually call TF_DeleteTensor on them. -// -// On failure, output_values[] contains NULLs. -TF_CAPI_EXPORT extern void TF_SessionRun( - TF_Session* session, - // RunOptions - const TF_Buffer* run_options, - // Input tensors - const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs, - // Output tensors - const TF_Output* outputs, TF_Tensor** output_values, int noutputs, - // Target operations - const TF_Operation* const* target_opers, int ntargets, - // RunMetadata - TF_Buffer* run_metadata, - // Output status - TF_Status*); - -// Set up the graph with the intended feeds (inputs) and fetches (outputs) for a -// sequence of partial run calls. -// -// On success, returns a handle that is used for subsequent PRun calls. The -// handle should be deleted with TF_DeletePRunHandle when it is no longer -// needed. -// -// On failure, out_status contains a tensorflow::Status with an error -// message. *handle is set to nullptr. -TF_CAPI_EXPORT extern void TF_SessionPRunSetup( - TF_Session*, - // Input names - const TF_Output* inputs, int ninputs, - // Output names - const TF_Output* outputs, int noutputs, - // Target operations - const TF_Operation* const* target_opers, int ntargets, - // Output handle - const char** handle, - // Output status - TF_Status*); - -// Continue to run the graph with additional feeds and fetches. The -// execution state is uniquely identified by the handle. -TF_CAPI_EXPORT extern void TF_SessionPRun( - TF_Session*, const char* handle, - // Input tensors - const TF_Output* inputs, TF_Tensor* const* input_values, int ninputs, - // Output tensors - const TF_Output* outputs, TF_Tensor** output_values, int noutputs, - // Target operations - const TF_Operation* const* target_opers, int ntargets, - // Output status - TF_Status*); - -// Deletes a handle allocated by TF_SessionPRunSetup. -// Once called, no more calls to TF_SessionPRun should be made. -TF_CAPI_EXPORT extern void TF_DeletePRunHandle(const char* handle); - -// -------------------------------------------------------------------------- -// The deprecated session API. Please switch to the above instead of -// TF_ExtendGraph(). This deprecated API can be removed at any time without -// notice. - -typedef struct TF_DeprecatedSession TF_DeprecatedSession; - -TF_CAPI_EXPORT extern TF_DeprecatedSession* TF_NewDeprecatedSession( - const TF_SessionOptions*, TF_Status* status); -TF_CAPI_EXPORT extern void TF_CloseDeprecatedSession(TF_DeprecatedSession*, - TF_Status* status); -TF_CAPI_EXPORT extern void TF_DeleteDeprecatedSession(TF_DeprecatedSession*, - TF_Status* status); -TF_CAPI_EXPORT extern void TF_Reset(const TF_SessionOptions* opt, - const char** containers, int ncontainers, - TF_Status* status); -// Treat the bytes proto[0,proto_len-1] as a serialized GraphDef and -// add the nodes in that GraphDef to the graph for the session. -// -// Prefer use of TF_Session and TF_GraphImportGraphDef over this. -TF_CAPI_EXPORT extern void TF_ExtendGraph(TF_DeprecatedSession*, - const void* proto, size_t proto_len, - TF_Status*); - -// See TF_SessionRun() above. -TF_CAPI_EXPORT extern void TF_Run(TF_DeprecatedSession*, - const TF_Buffer* run_options, - const char** input_names, TF_Tensor** inputs, - int ninputs, const char** output_names, - TF_Tensor** outputs, int noutputs, - const char** target_oper_names, int ntargets, - TF_Buffer* run_metadata, TF_Status*); - -// See TF_SessionPRunSetup() above. -TF_CAPI_EXPORT extern void TF_PRunSetup(TF_DeprecatedSession*, - const char** input_names, int ninputs, - const char** output_names, int noutputs, - const char** target_oper_names, - int ntargets, const char** handle, - TF_Status*); - -// See TF_SessionPRun above. -TF_CAPI_EXPORT extern void TF_PRun(TF_DeprecatedSession*, const char* handle, - const char** input_names, TF_Tensor** inputs, - int ninputs, const char** output_names, - TF_Tensor** outputs, int noutputs, - const char** target_oper_names, int ntargets, - TF_Status*); - -typedef struct TF_DeviceList TF_DeviceList; - -// Lists all devices in a TF_Session. -// -// Caller takes ownership of the returned TF_DeviceList* which must eventually -// be freed with a call to TF_DeleteDeviceList. -TF_CAPI_EXPORT extern TF_DeviceList* TF_SessionListDevices(TF_Session* session, - TF_Status* status); - -// Lists all devices in a TF_Session. -// -// Caller takes ownership of the returned TF_DeviceList* which must eventually -// be freed with a call to TF_DeleteDeviceList. -TF_CAPI_EXPORT extern TF_DeviceList* TF_DeprecatedSessionListDevices( - TF_DeprecatedSession* session, TF_Status* status); - -// Deallocates the device list. -TF_CAPI_EXPORT extern void TF_DeleteDeviceList(TF_DeviceList* list); - -// Counts the number of elements in the device list. -TF_CAPI_EXPORT extern int TF_DeviceListCount(const TF_DeviceList* list); - -// Retrieves the full name of the device (e.g. /job:worker/replica:0/...) -// The return value will be a pointer to a null terminated string. The caller -// must not modify or delete the string. It will be deallocated upon a call to -// TF_DeleteDeviceList. -// -// If index is out of bounds, an error code will be set in the status object, -// and a null pointer will be returned. -TF_CAPI_EXPORT extern const char* TF_DeviceListName(const TF_DeviceList* list, - int index, - TF_Status* status); - -// Retrieves the type of the device at the given index. -// -// The caller must not modify or delete the string. It will be deallocated upon -// a call to TF_DeleteDeviceList. -// -// If index is out of bounds, an error code will be set in the status object, -// and a null pointer will be returned. -TF_CAPI_EXPORT extern const char* TF_DeviceListType(const TF_DeviceList* list, - int index, - TF_Status* status); - -// Retrieve the amount of memory associated with a given device. -// -// If index is out of bounds, an error code will be set in the status object, -// and -1 will be returned. -TF_CAPI_EXPORT extern int64_t TF_DeviceListMemoryBytes( - const TF_DeviceList* list, int index, TF_Status* status); - -// Retrieve the incarnation number of a given device. -// -// If index is out of bounds, an error code will be set in the status object, -// and 0 will be returned. -TF_CAPI_EXPORT extern uint64_t TF_DeviceListIncarnation( - const TF_DeviceList* list, int index, TF_Status* status); - -// -------------------------------------------------------------------------- -// Load plugins containing custom ops and kernels - -// TF_Library holds information about dynamically loaded TensorFlow plugins. -typedef struct TF_Library TF_Library; - -// Load the library specified by library_filename and register the ops and -// kernels present in that library. -// -// Pass "library_filename" to a platform-specific mechanism for dynamically -// loading a library. The rules for determining the exact location of the -// library are platform-specific and are not documented here. -// -// On success, place OK in status and return the newly created library handle. -// The caller owns the library handle. -// -// On failure, place an error status in status and return NULL. -TF_CAPI_EXPORT extern TF_Library* TF_LoadLibrary(const char* library_filename, - TF_Status* status); - -// Get the OpList of OpDefs defined in the library pointed by lib_handle. -// -// Returns a TF_Buffer. The memory pointed to by the result is owned by -// lib_handle. The data in the buffer will be the serialized OpList proto for -// ops defined in the library. -TF_CAPI_EXPORT extern TF_Buffer TF_GetOpList(TF_Library* lib_handle); - -// Frees the memory associated with the library handle. -// Does NOT unload the library. -TF_CAPI_EXPORT extern void TF_DeleteLibraryHandle(TF_Library* lib_handle); - -// Get the OpList of all OpDefs defined in this address space. -// Returns a TF_Buffer, ownership of which is transferred to the caller -// (and can be freed using TF_DeleteBuffer). -// -// The data in the buffer will be the serialized OpList proto for ops registered -// in this address space. -TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllOpList(void); - -// TF_ApiDefMap encapsulates a collection of API definitions for an operation. -// -// This object maps the name of a TensorFlow operation to a description of the -// API to generate for it, as defined by the ApiDef protocol buffer ( -// https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto) -// -// The ApiDef messages are typically used to generate convenience wrapper -// functions for TensorFlow operations in various language bindings. -typedef struct TF_ApiDefMap TF_ApiDefMap; - -// Creates a new TF_ApiDefMap instance. -// -// Params: -// op_list_buffer - TF_Buffer instance containing serialized OpList -// protocol buffer. (See -// https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto -// for the OpList proto definition). -// status - Set to OK on success and an appropriate error on failure. -TF_CAPI_EXPORT extern TF_ApiDefMap* TF_NewApiDefMap(TF_Buffer* op_list_buffer, - TF_Status* status); - -// Deallocates a TF_ApiDefMap. -TF_CAPI_EXPORT extern void TF_DeleteApiDefMap(TF_ApiDefMap* apimap); - -// Add ApiDefs to the map. -// -// `text` corresponds to a text representation of an ApiDefs protocol message. -// (https://www.tensorflow.org/code/tensorflow/core/framework/api_def.proto). -// -// The provided ApiDefs will be merged with existing ones in the map, with -// precedence given to the newly added version in case of conflicts with -// previous calls to TF_ApiDefMapPut. -TF_CAPI_EXPORT extern void TF_ApiDefMapPut(TF_ApiDefMap* api_def_map, - const char* text, size_t text_len, - TF_Status* status); - -// Returns a serialized ApiDef protocol buffer for the TensorFlow operation -// named `name`. -TF_CAPI_EXPORT extern TF_Buffer* TF_ApiDefMapGet(TF_ApiDefMap* api_def_map, - const char* name, - size_t name_len, - TF_Status* status); - -// -------------------------------------------------------------------------- -// Kernel definition information. - -// Returns a serialized KernelList protocol buffer containing KernelDefs for all -// registered kernels. -TF_CAPI_EXPORT extern TF_Buffer* TF_GetAllRegisteredKernels(TF_Status* status); - -// Returns a serialized KernelList protocol buffer containing KernelDefs for all -// kernels registered for the operation named `name`. -TF_CAPI_EXPORT extern TF_Buffer* TF_GetRegisteredKernelsForOp( - const char* name, TF_Status* status); - -// -------------------------------------------------------------------------- -// In-process TensorFlow server functionality, for use in distributed training. -// A Server instance encapsulates a set of devices and a Session target that -// can participate in distributed training. A server belongs to a cluster -// (specified by a ClusterSpec), and corresponds to a particular task in a -// named job. The server can communicate with any other server in the same -// cluster. - -// In-process TensorFlow server. -typedef struct TF_Server TF_Server; - -// Creates a new in-process TensorFlow server configured using a serialized -// ServerDef protocol buffer provided via `proto` and `proto_len`. -// -// The server will not serve any requests until TF_ServerStart is invoked. -// The server will stop serving requests once TF_ServerStop or -// TF_DeleteServer is invoked. -TF_CAPI_EXPORT extern TF_Server* TF_NewServer(const void* proto, - size_t proto_len, - TF_Status* status); - -// Starts an in-process TensorFlow server. -TF_CAPI_EXPORT extern void TF_ServerStart(TF_Server* server, TF_Status* status); - -// Stops an in-process TensorFlow server. -TF_CAPI_EXPORT extern void TF_ServerStop(TF_Server* server, TF_Status* status); - -// Blocks until the server has been successfully stopped (via TF_ServerStop or -// TF_ServerClose). -TF_CAPI_EXPORT extern void TF_ServerJoin(TF_Server* server, TF_Status* status); - -// Returns the target string that can be provided to TF_SetTarget() to connect -// a TF_Session to `server`. -// -// The returned string is valid only until TF_DeleteServer is invoked. -TF_CAPI_EXPORT extern const char* TF_ServerTarget(TF_Server* server); - -// Destroy an in-process TensorFlow server, frees memory. If server is running -// it will be stopped and joined. -TF_CAPI_EXPORT extern void TF_DeleteServer(TF_Server* server); - -// Register a listener method that processes printed messages. -// -// If any listeners are registered, the print operator will call all listeners -// with the printed messages and immediately return without writing to the -// logs. -TF_CAPI_EXPORT extern void TF_RegisterLogListener( - void (*listener)(const char*)); - -#ifdef __cplusplus -} /* end extern "C" */ -#endif - -#endif // TENSORFLOW_C_C_CORE_API_H_ diff --git a/tensorflow/c/eager/BUILD b/tensorflow/c/eager/BUILD index 2ec1f442780..c25cb264ce7 100644 --- a/tensorflow/c/eager/BUILD +++ b/tensorflow/c/eager/BUILD @@ -42,7 +42,7 @@ tf_cuda_library( "//conditions:default": [ "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:fixed_array", - "//tensorflow/c:c_core_api", + "//tensorflow/c:c_api", "//tensorflow/c:c_api_internal", "//tensorflow/c:tf_tensor_internal", "//tensorflow/core:core_cpu", diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc index 67324a441f9..96dc288f213 100644 --- a/tensorflow/c/eager/c_api.cc +++ b/tensorflow/c/eager/c_api.cc @@ -28,7 +28,7 @@ limitations under the License. #include "absl/algorithm/container.h" #include "absl/memory/memory.h" -#include "tensorflow/c/c_core_api.h" +#include "tensorflow/c/c_api.h" #include "tensorflow/c/c_api_internal.h" #include "tensorflow/c/eager/tensor_handle_interface.h" #include "tensorflow/c/tf_tensor_internal.h" diff --git a/tensorflow/c/eager/c_api.h b/tensorflow/c/eager/c_api.h index b951f45d0e1..070b3a9bb60 100644 --- a/tensorflow/c/eager/c_api.h +++ b/tensorflow/c/eager/c_api.h @@ -20,7 +20,7 @@ limitations under the License. // WARNING: Unlike tensorflow/c/c_api.h, the API here is not guaranteed to be // stable and can change without notice. -#include "tensorflow/c/c_core_api.h" +#include "tensorflow/c/c_api.h" // Macro to control visibility of exported symbols in the shared library (.so, // .dylib, .dll). diff --git a/tensorflow/tools/pip_package/BUILD b/tensorflow/tools/pip_package/BUILD index f69f79eed7a..c38d7b84a74 100644 --- a/tensorflow/tools/pip_package/BUILD +++ b/tensorflow/tools/pip_package/BUILD @@ -36,7 +36,6 @@ transitive_hdrs( "//tensorflow/cc/saved_model:loader", "//tensorflow/cc/saved_model:reader", "//tensorflow/cc/saved_model:bundle_v2", - "//tensorflow/c:c_core_api_no_xla", # WARNING: None of the C/C++ code under python/ has any API guarantees, and TF team # reserves the right to change APIs and other header-level interfaces. If your custom # op uses these headers, it may break when users upgrade their version of tensorflow. diff --git a/tensorflow/tools/pip_package/setup.py b/tensorflow/tools/pip_package/setup.py index 64a4469e0da..4dfe616263b 100644 --- a/tensorflow/tools/pip_package/setup.py +++ b/tensorflow/tools/pip_package/setup.py @@ -246,7 +246,6 @@ headers = ( list(find_files('*.proto', 'tensorflow/compiler')) + list(find_files('*.proto', 'tensorflow/core')) + list(find_files('*.proto', 'tensorflow/python')) + - list(find_files('*.h', 'tensorflow/c')) + list(find_files('*.h', 'tensorflow/cc')) + list(find_files('*.h', 'tensorflow/compiler')) + list(find_files('*.h', 'tensorflow/core')) +