Do not tie the eager step container to GradientTape creation/deletion.
I believe the only use that cared about the step container was TensorArray/Stack, which we don't use in tf.function anymore. Deleting the step container is causing memory issues for TPU compile kernels running asynchronously, which don't care about it in eager but reference it to fix Session-related issues. Alternatively we can reference count the step container. But if we can just start removing the concept from eager that'd be even better. It's marked as an internal-only API for kernels, and tying it to GradientTape lifetime seems a bit questionable anyway. PiperOrigin-RevId: 342886741 Change-Id: I4303c77afdf8d414568e3d9c4cc3b3fbf317b7b3
This commit is contained in:
parent
7afd763939
commit
05d8bfb1bb
@ -848,10 +848,6 @@ class GradientTape(object):
|
||||
self._watch_accessed_variables = watch_accessed_variables
|
||||
self._watched_variables = ()
|
||||
self._recording = False
|
||||
self._created_eagerly = context.executing_eagerly()
|
||||
if self._created_eagerly:
|
||||
context.ensure_initialized()
|
||||
context.context().start_step()
|
||||
|
||||
def __enter__(self):
|
||||
"""Enters a context inside which operations are recorded on this tape."""
|
||||
@ -882,15 +878,6 @@ class GradientTape(object):
|
||||
tape.pop_tape(self._tape)
|
||||
self._recording = False
|
||||
|
||||
def __del__(self):
|
||||
if self._created_eagerly:
|
||||
try:
|
||||
context.context().end_step()
|
||||
except AttributeError:
|
||||
pass
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
def watch(self, tensor):
|
||||
"""Ensures that `tensor` is being traced by this tape.
|
||||
|
||||
|
@ -1742,12 +1742,6 @@ class Context(object):
|
||||
"""Returns a stack of context switches."""
|
||||
return self._context_switches
|
||||
|
||||
def start_step(self):
|
||||
pywrap_tfe.TFE_ContextStartStep(self._handle)
|
||||
|
||||
def end_step(self):
|
||||
pywrap_tfe.TFE_ContextEndStep(self._handle)
|
||||
|
||||
|
||||
class _EagerDeviceContext(object):
|
||||
"""Context-manager forcing placement of ops and Tensors on a device."""
|
||||
|
@ -1030,12 +1030,6 @@ PYBIND11_MODULE(_pywrap_tfe, m) {
|
||||
m.def("TFE_Py_SetEagerContext", [](const py::handle& o) {
|
||||
return tensorflow::PyoOrThrow(TFE_Py_SetEagerContext(o.ptr()));
|
||||
});
|
||||
m.def("TFE_ContextStartStep", [](py::handle& o) {
|
||||
TFE_ContextStartStep(tensorflow::InputTFE_Context(o.ptr()));
|
||||
});
|
||||
m.def("TFE_ContextEndStep", [](py::handle& o) {
|
||||
TFE_ContextEndStep(tensorflow::InputTFE_Context(o.ptr()));
|
||||
});
|
||||
m.def("TFE_Py_RegisterVSpace", [](const py::handle& o) {
|
||||
return tensorflow::PyoOrThrow(TFE_Py_RegisterVSpace(o.ptr()));
|
||||
});
|
||||
|
Loading…
Reference in New Issue
Block a user