Skip TFE_ContextAsyncWait for tfrt. In current TF-TFRT integration, all ops are executed synchronously. We will revisit this later.

PiperOrigin-RevId: 311777624
Change-Id: I3a27805dcce53ccf572f3c500d6fd0a532b286b2
This commit is contained in:
Xiao Yu 2020-05-15 12:12:51 -07:00 committed by TensorFlower Gardener
parent 0b59eaf0bf
commit d968853cc6
3 changed files with 6 additions and 3 deletions

View File

@ -899,9 +899,7 @@ TF_CAPI_EXPORT extern void TFE_ContextAsyncWait(TFE_Context* ctx,
#if defined(IS_MOBILE_PLATFORM)
status->status = tensorflow::Status::OK();
#else // !defined(IS_MOBILE_PLATFORM)
tensorflow::EagerContext* context =
tensorflow::ContextFromInterface(tensorflow::unwrap(ctx));
status->status = context->SyncExecutors();
status->status = tensorflow::unwrap(ctx)->AsyncWait();
#endif // !IS_MOBILE_PLATFORM
}

View File

@ -101,6 +101,9 @@ class AbstractContextInterface {
// Destroy the step resource container for a training step.
virtual void EndStep() = 0;
// Block until all pending nodes are finished,
virtual Status AsyncWait() = 0;
protected:
virtual ~AbstractContextInterface() {}
};

View File

@ -295,6 +295,8 @@ class EagerContext : public AbstractContextInterface, public core::RefCounted {
// errors, and the error message will be combined from all executors.
Status SyncExecutors();
Status AsyncWait() override { return SyncExecutors(); }
core::RefCountPtr<KernelAndDevice> GetCachedKernel(Fprint128 cache_key);
void AddKernelToCache(Fprint128 cache_key, KernelAndDevice* kernel);