Remove some stale forward compatibility dates
PiperOrigin-RevId: 283092973 Change-Id: Ia708a9c04a032e1222c7d56ad4936e263424fbdd
This commit is contained in:
parent
427c708efa
commit
14a0c12dc2
@ -34,7 +34,6 @@ from tensorflow.core.framework import attr_value_pb2
|
||||
from tensorflow.core.framework import function_pb2
|
||||
from tensorflow.python import pywrap_tensorflow
|
||||
from tensorflow.python import _pywrap_utils
|
||||
from tensorflow.python.compat import compat as fwd_compat
|
||||
from tensorflow.python.eager import backprop
|
||||
from tensorflow.python.eager import backprop_util
|
||||
from tensorflow.python.eager import context
|
||||
@ -1030,18 +1029,8 @@ class _TapeGradientFunctions(object):
|
||||
with ops.get_default_graph()._override_gradient_function( # pylint: disable=protected-access
|
||||
{"PartitionedCall": gradient_function,
|
||||
"StatefulPartitionedCall": gradient_function}):
|
||||
# Previously, we relyed on "_gradient_op_type" attribute to restore a
|
||||
# function gradient in function_deserialization.py, So add a dummy
|
||||
# value "PartitionedCallUnused" for the forward compatibility.
|
||||
if fwd_compat.forward_compatible(2019, 11, 16):
|
||||
forward_outputs = forward_function.call(context.context(),
|
||||
forward_inputs)
|
||||
else:
|
||||
with ops.get_default_graph().gradient_override_map(
|
||||
{"PartitionedCall": "PartitionedCallUnused",
|
||||
"StatefulPartitionedCall": "PartitionedCallUnused"}):
|
||||
forward_outputs = forward_function.call(context.context(),
|
||||
forward_inputs)
|
||||
forward_outputs = forward_function.call(context.context(),
|
||||
forward_inputs)
|
||||
py_backward, _ = self._wrap_backward_function(
|
||||
self._func_graph, backward_function, forward_outputs)
|
||||
# We will never request backward tape gradients for this operation
|
||||
@ -1703,16 +1692,7 @@ class ConcreteFunction(object):
|
||||
with ops.get_default_graph()._override_gradient_function( # pylint: disable=protected-access
|
||||
{"PartitionedCall": self._get_gradient_function(),
|
||||
"StatefulPartitionedCall": self._get_gradient_function()}):
|
||||
# Previously, we relyed on "_gradient_op_type" attribute to restore a
|
||||
# function gradient in function_deserialization.py. So add a dummy
|
||||
# value "PartitionedCallUnused" for the forward compatibility.
|
||||
if fwd_compat.forward_compatible(2019, 11, 16):
|
||||
flat_outputs = forward_function.call(ctx, args_with_tangents)
|
||||
else:
|
||||
with ops.get_default_graph().gradient_override_map(
|
||||
{"PartitionedCall": "PartitionedCallUnused",
|
||||
"StatefulPartitionedCall": "PartitionedCallUnused"}):
|
||||
flat_outputs = forward_function.call(ctx, args_with_tangents)
|
||||
flat_outputs = forward_function.call(ctx, args_with_tangents)
|
||||
forward_backward.record(flat_outputs)
|
||||
return self._build_call_outputs(flat_outputs)
|
||||
|
||||
|
@ -204,20 +204,14 @@ def _SumGrad(op, grad):
|
||||
|
||||
input_shape = array_ops.shape(op.inputs[0])
|
||||
|
||||
if compat.forward_compatible(2019, 10, 23):
|
||||
if not op.get_attr("keep_dims"):
|
||||
with ops.colocate_with(input_shape):
|
||||
# TODO(apassos) remove this once device placement for eager ops makes
|
||||
# more sense.
|
||||
output_shape_kept_dims = math_ops.reduced_shape(input_shape,
|
||||
op.inputs[1])
|
||||
grad = array_ops.reshape(grad, output_shape_kept_dims)
|
||||
return [array_ops.broadcast_to(grad, input_shape), None]
|
||||
with ops.colocate_with(input_shape):
|
||||
output_shape_kept_dims = math_ops.reduced_shape(input_shape, op.inputs[1])
|
||||
tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims)
|
||||
grad = array_ops.reshape(grad, output_shape_kept_dims)
|
||||
return [array_ops.tile(grad, tile_scaling), None]
|
||||
if not op.get_attr("keep_dims"):
|
||||
with ops.colocate_with(input_shape):
|
||||
# TODO(apassos) remove this once device placement for eager ops makes
|
||||
# more sense.
|
||||
output_shape_kept_dims = math_ops.reduced_shape(input_shape,
|
||||
op.inputs[1])
|
||||
grad = array_ops.reshape(grad, output_shape_kept_dims)
|
||||
return [array_ops.broadcast_to(grad, input_shape), None]
|
||||
|
||||
|
||||
def _MinOrMaxGrad(op, grad):
|
||||
|
@ -75,7 +75,6 @@ import six
|
||||
from six.moves import builtins
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.python.compat import compat as fwd_compat
|
||||
from tensorflow.python.eager import context
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
@ -1364,10 +1363,7 @@ def tensor_equals(self, other):
|
||||
g = getattr(self, "graph", None)
|
||||
if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
|
||||
(g is None or g._building_function)): # pylint: disable=protected-access
|
||||
if fwd_compat.forward_compatible(2019, 9, 25):
|
||||
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
|
||||
else:
|
||||
return gen_math_ops.equal(self, other)
|
||||
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
|
||||
else:
|
||||
# In legacy graph mode, tensor equality is object equality
|
||||
return self is other
|
||||
@ -1378,10 +1374,7 @@ def tensor_not_equals(self, other):
|
||||
if other is None:
|
||||
return True
|
||||
if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
|
||||
if fwd_compat.forward_compatible(2019, 9, 25):
|
||||
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
|
||||
else:
|
||||
return gen_math_ops.not_equal(self, other)
|
||||
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
|
||||
else:
|
||||
# In legacy graph mode, tensor equality is object equality
|
||||
return self is not other
|
||||
|
Loading…
x
Reference in New Issue
Block a user