fix the comment and change build dependency

This commit is contained in:
Zhoulong Jiang 2020-10-15 12:44:55 +00:00
parent b8a26f2d74
commit 554a5fbd72
4 changed files with 9 additions and 23 deletions

View File

@ -499,7 +499,6 @@ tf_cuda_library(
":tf_status",
":tf_status_helper",
":tf_tensor_internal",
"//tensorflow/c/experimental/stream_executor:stream_executor",
] + select({
"//tensorflow:android": [
":c_api_internal",
@ -510,6 +509,7 @@ tf_cuda_library(
":tf_tensor",
"//tensorflow/core:framework",
"//tensorflow/core:framework_lite",
"//tensorflow/c/experimental/stream_executor:stream_executor",
],
}),
)

View File

@ -1449,6 +1449,7 @@ typedef struct TF_Library TF_Library;
// On failure, place an error status in status and return NULL.
TF_CAPI_EXPORT extern TF_Library* TF_LoadLibrary(const char* library_filename,
TF_Status* status);
// Get the OpList of OpDefs defined in the library pointed by lib_handle.
//
// Returns a TF_Buffer. The memory pointed to by the result is owned by
@ -1460,24 +1461,6 @@ TF_CAPI_EXPORT extern TF_Buffer TF_GetOpList(TF_Library* lib_handle);
// Does NOT unload the library.
TF_CAPI_EXPORT extern void TF_DeleteLibraryHandle(TF_Library* lib_handle);
// Load the library specified by library_filename and register the pluggable
// device and related kernels present in that library.
//
// Pass "library_filename" to a platform-specific mechanism for dynamically
// loading a library. The reles for determining the exact location of the
// library are platform-specific and are not documented here
//
// On success, place OK in status and return the newly created library handle.
// The caller owns the library handle
//
// On failure, place an error status in status and return NULL.
TF_CAPI_EXPORT extern TF_Library* TF_LoadPluggableDeviceLibrary(
const char* library_filename, TF_Status* status);
// Frees the memory associated with the library handle.
// Does NOT unload the library.
TF_CAPI_EXPORT extern void TF_DeletePluggableDeviceLibraryHandle(
TF_Library* lib_handle);
// Get the OpList of all OpDefs defined in this address space.
// Returns a TF_Buffer, ownership of which is transferred to the caller
// (and can be freed using TF_DeleteBuffer).

View File

@ -1467,7 +1467,6 @@ TEST(CAPI, DeletingNullPointerIsSafe) {
EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TF_DeleteDeviceList(nullptr);
TF_DeleteLibraryHandle(nullptr);
TF_DeletePluggableDeviceLibraryHandle(nullptr);
TF_DeleteApiDefMap(nullptr);
TF_DeleteStatus(status);

View File

@ -67,7 +67,8 @@ typedef struct TF_OpKernelConstruction TF_OpKernelConstruction;
typedef struct TF_OpKernelContext TF_OpKernelContext;
// TF_InitKernel to do op/kernel registration.
// Plugin needs to implement this function to register all kernels.
// Plugin should either implement TF_InitKernel to register kernels or use
// static registration. This function should register all kernels in a plugin.
void TF_InitKernel();
// Allocates a new kernel builder and returns a pointer to it.
@ -134,8 +135,11 @@ TF_CAPI_EXPORT extern void TF_DeleteKernelBuilder(TF_KernelBuilder* builder);
// OpKernelContext routines
// TF_GetStream returns the SP_Stream available in ctx
// This function is only for pluggable device
// it will return nullptr in all other cased.
// This function returns a stream only for devices registered using the
// StreamExecutor C API
// (tensorflow/c/experimental/stream_executor/stream_executor.h). It will return
// nullptr in all other cases. Experimental: this function doesn't have
// compatibility guarantees and subject to change at any time."
TF_CAPI_EXPORT extern SP_Stream TF_GetStream(TF_OpKernelContext* ctx);
// TF_NumInputs returns the number of inputs available in ctx.