Unexpose LossScaleGradientTape.

It doesn't support DistributionStrategy. It will be reexposed when it does. I tried to fix this in , but only made the issue worse. The issue is that when taking gradients with respect to variables (which occurs almost every time), it would crash with a very long error message when DistributionStrategy is used. The unit tests only tested taking gradients w.r.t. constants, as it was assumed there would be no functional difference between taking gradients w.r.t. variables and constants.

PiperOrigin-RevId: 285059221
Change-Id: I9ffc5d68f092f9ff3ea634b9523b67ff2bbc4bd7
This commit is contained in:
Reed Wanderman-Milne 2019-12-11 14:15:13 -08:00 committed by TensorFlower Gardener
parent 7efb44a087
commit c373e83e34
3 changed files with 1 additions and 44 deletions

View File

@ -25,10 +25,9 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export("mixed_precision.experimental.LossScaleGradientTape", v1=[])
# TODO(reedwm): Expose this. Currently it doesn't work with DistributionStrategy
class LossScaleGradientTape(backprop.GradientTape):
"""A gradient tape that scales losses and unscales resulting gradients.

View File

@ -1,38 +0,0 @@
path: "tensorflow.mixed_precision.experimental.LossScaleGradientTape"
tf_class {
is_instance: "<class \'tensorflow.python.training.experimental.loss_scaling_gradient_tape.LossScaleGradientTape\'>"
is_instance: "<class \'tensorflow.python.eager.backprop.GradientTape\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
argspec: "args=[\'self\', \'loss_scale\', \'persistent\', \'watch_accessed_variables\'], varargs=None, keywords=None, defaults=[\'False\', \'True\'], "
}
member_method {
name: "batch_jacobian"
argspec: "args=[\'self\', \'target\', \'source\', \'unconnected_gradients\', \'parallel_iterations\', \'experimental_use_pfor\'], varargs=None, keywords=None, defaults=[\'UnconnectedGradients.NONE\', \'None\', \'True\'], "
}
member_method {
name: "gradient"
argspec: "args=[\'self\', \'target\', \'sources\', \'output_gradients\', \'unconnected_gradients\'], varargs=None, keywords=None, defaults=[\'None\', \'UnconnectedGradients.NONE\'], "
}
member_method {
name: "jacobian"
argspec: "args=[\'self\', \'target\', \'sources\', \'unconnected_gradients\', \'parallel_iterations\', \'experimental_use_pfor\'], varargs=None, keywords=None, defaults=[\'UnconnectedGradients.NONE\', \'None\', \'True\'], "
}
member_method {
name: "reset"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "stop_recording"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "watch"
argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "watched_variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -12,8 +12,4 @@ tf_module {
name: "LossScale"
mtype: "<type \'type\'>"
}
member {
name: "LossScaleGradientTape"
mtype: "<type \'type\'>"
}
}