Rename LossScale classes.

tf.train.experimental.LossScale has been renamed to tf.mixed_precision.experimental.LossScale, and similarly for FixedLossScale and DynamicLossScale.

I kept the old names, but deprecated them. I will remove them for TensorFlow 2.2.

PiperOrigin-RevId: 272339726
This commit is contained in:
Reed Wanderman-Milne 2019-10-01 18:18:43 -07:00 committed by TensorFlower Gardener
parent 496cc4a74c
commit e5ebfd4540
15 changed files with 230 additions and 23 deletions

View File

@ -54,10 +54,10 @@ class Policy(object):
not match the computation dtype, variables will be automatically casted to the
computation dtype to avoid type errors.
Policies also have a `tf.train.experimental.LossScale` instance, which is used
by `tf.keras.Model`s to performance loss scaling. Loss scaling is only done by
Models in `Model.fit` and `Model.train_on_batch`. Layers which are not Models
ignore the loss scale.
Policies also have a `tf.mixed_precision.experimental.LossScale` instance,
which is used by `tf.keras.Model`s to performance loss scaling. Loss scaling
is only done by Models in `Model.fit` and `Model.train_on_batch`. Layers which
are not Models ignore the loss scale.
Policies are constructed by passing a string to the constructor, e.g.
`tf.keras.mixed_precision.experimental.Policy('float32')`. The string
@ -167,11 +167,11 @@ class Policy(object):
precision training.
* 'infer' (deprecated): Infer the compute and variable dtype from the
input dtype.
loss_scale: A `tf.train.experimental.LossScale`, or a value convertible to
one such as "dynamic". Defaults to using no loss scaling unless `name`
is "mixed_float16", in which case this defaults to "dynamic". Only
`tf.keras.Model`s, not layers, use the loss scale, and it is only used
during `Model.fit` or `Model.train_on_batch`.
loss_scale: A `tf.mixed_precision.experimental.LossScale`, or a value
convertible to one such as "dynamic". Defaults to using no loss scaling
unless `name` is "mixed_float16", in which case this defaults to
"dynamic". Only `tf.keras.Model`s, not layers, use the loss scale, and
it is only used during `Model.fit` or `Model.train_on_batch`.
"""
if isinstance(name, dtypes.DType):
@ -317,7 +317,7 @@ class Policy(object):
"""Returns the loss scale of this Policy.
Returns:
A `tf.train.experimental.LossScale`, or None.
A `tf.mixed_precision.experimental.LossScale`, or None.
"""
return self._loss_scale

View File

@ -49,6 +49,8 @@ TENSORFLOW_API_INIT_FILES_V1 = [
"manip/__init__.py",
"math/__init__.py",
"metrics/__init__.py",
"mixed_precision/__init__.py",
"mixed_precision/experimental/__init__.py",
"mlir/__init__.py",
"mlir/experimental/__init__.py",
"nest/__init__.py",

View File

@ -31,12 +31,15 @@ from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@six.add_metaclass(abc.ABCMeta)
@tf_export('train.experimental.LossScale')
@deprecation.deprecated_endpoints('train.experimental.LossScale')
@tf_export('mixed_precision.experimental.LossScale',
'train.experimental.LossScale')
class LossScale(trackable.Trackable):
"""Loss scale base class.
@ -186,7 +189,9 @@ def get_loss_scale_weights(loss_scale):
return loss_scale._weights.values() # pylint: disable=protected-access
@tf_export('train.experimental.FixedLossScale')
@deprecation.deprecated_endpoints('train.experimental.FixedLossScale')
@tf_export('mixed_precision.experimental.FixedLossScale',
'train.experimental.FixedLossScale')
class FixedLossScale(LossScale):
"""Loss scale with a fixed value.
@ -266,7 +271,9 @@ def _assign_if_finite(var, value):
control_flow_ops.no_op)
@tf_export('train.experimental.DynamicLossScale')
@deprecation.deprecated_endpoints('train.experimental.DynamicLossScale')
@tf_export('mixed_precision.experimental.DynamicLossScale',
'train.experimental.DynamicLossScale')
class DynamicLossScale(LossScale):
"""Loss scale that dynamically adjusts itself.

View File

@ -31,9 +31,9 @@ class LossScaleGradientTape(backprop.GradientTape):
"""A gradient tape that scales losses and unscales resulting gradients.
Operates as a normal gradient tape, but takes in a
`tf.train.experimental.LossScale` object. Losses are scaled up by some amount
before the gradients are calculated and the resulting gradients are scaled
down by the same amount.
`tf.mixed_precision.experimental.LossScale` object. Losses are scaled up by
some amount before the gradients are calculated and the resulting gradients
are scaled down by the same amount.
This has no net mathematical effect, but can be used to prevent vanishing
gradients, for example in the case of mixed precision training.
@ -48,7 +48,7 @@ class LossScaleGradientTape(backprop.GradientTape):
Usage:
```
opt = tf.keras.optimizers.SGD(1.0)
model_loss_scale = tf.train.experimental.DynamicLossScale()
model_loss_scale = tf.mixed_precision.experimental.DynamicLossScale()
for step in training_steps:
with LossScaleGradientTape(model_loss_scale) as tape:
@ -69,10 +69,10 @@ class LossScaleGradientTape(backprop.GradientTape):
"""Creates a new LossScaleGradientTape.
Args:
loss_scale: `tf.train.experimental.LossScale` object that
loss_scale: `tf.mixed_precision.experimental.LossScale` object that
manages what quantity to scale by. This is typically either a
FixedLossScale object with a constant scalar or a
`tf.train.experimental.DynamicLossScale` object that will
`tf.mixed_precision.experimental.DynamicLossScale` object that will
adjust the scalar appropriately if any non-finite gradients are
encountered.
persistent: Boolean controlling whether a persistent gradient tape is

View File

@ -190,7 +190,7 @@ def enable_mixed_precision_graph_rewrite(opt, loss_scale='dynamic'):
Args:
opt: An instance of a `tf.keras.optimizers.Optimizer`.
loss_scale: Either an int/float, the string `"dynamic"`, or an instance of a
`tf.train.experimental.LossScale`. The loss scale to use. It is
`tf.mixed_precision.experimental.LossScale`. The loss scale to use. It is
recommended to keep this as its default value of `"dynamic"`, which will
adjust the scaling automatically to prevent `Inf` or `NaN` values.
@ -317,9 +317,9 @@ def enable_mixed_precision_graph_rewrite_v1(opt, loss_scale='dynamic'):
opt: An instance of a `tf.keras.optimizers.Optimizer` or a
`tf.train.Optimizer`.
loss_scale: Either an int/float, the string `"dynamic"`, or an instance of
a `tf.train.experimental.LossScale`. The loss scale to use. It is
recommended to keep this as its default value of `"dynamic"`, which will
adjust the scaling automatically to prevent `Inf` or `NaN` values.
a `tf.mixed_precision.experimental.LossScale`. The loss scale to use. It
is recommended to keep this as its default value of `"dynamic"`, which
will adjust the scaling automatically to prevent `Inf` or `NaN` values.
Returns:
A version of `opt` that will use loss scaling to prevent underflow.

View File

@ -0,0 +1,35 @@
path: "tensorflow.mixed_precision.experimental.DynamicLossScale"
tf_class {
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.DynamicLossScale\'>"
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>"
is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>"
is_instance: "<type \'object\'>"
member {
name: "increment_period"
mtype: "<type \'property\'>"
}
member {
name: "initial_loss_scale"
mtype: "<type \'property\'>"
}
member {
name: "multiplier"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'initial_loss_scale\', \'increment_period\', \'multiplier\'], varargs=None, keywords=None, defaults=[\'32768\', \'2000\', \'2.0\'], "
}
member_method {
name: "from_config"
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_config"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "update"
argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,23 @@
path: "tensorflow.mixed_precision.experimental.FixedLossScale"
tf_class {
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.FixedLossScale\'>"
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>"
is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
argspec: "args=[\'self\', \'loss_scale_value\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "from_config"
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_config"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "update"
argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,22 @@
path: "tensorflow.mixed_precision.experimental.LossScale"
tf_class {
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>"
is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "from_config"
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_config"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "update"
argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,15 @@
path: "tensorflow.mixed_precision.experimental"
tf_module {
member {
name: "DynamicLossScale"
mtype: "<type \'type\'>"
}
member {
name: "FixedLossScale"
mtype: "<type \'type\'>"
}
member {
name: "LossScale"
mtype: "<type \'type\'>"
}
}

View File

@ -0,0 +1,7 @@
path: "tensorflow.mixed_precision"
tf_module {
member {
name: "experimental"
mtype: "<type \'module\'>"
}
}

View File

@ -508,6 +508,10 @@ tf_module {
name: "metrics"
mtype: "<type \'module\'>"
}
member {
name: "mixed_precision"
mtype: "<type \'module\'>"
}
member {
name: "mlir"
mtype: "<type \'module\'>"

View File

@ -0,0 +1,35 @@
path: "tensorflow.mixed_precision.experimental.DynamicLossScale"
tf_class {
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.DynamicLossScale\'>"
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>"
is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>"
is_instance: "<type \'object\'>"
member {
name: "increment_period"
mtype: "<type \'property\'>"
}
member {
name: "initial_loss_scale"
mtype: "<type \'property\'>"
}
member {
name: "multiplier"
mtype: "<type \'property\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'initial_loss_scale\', \'increment_period\', \'multiplier\'], varargs=None, keywords=None, defaults=[\'32768\', \'2000\', \'2.0\'], "
}
member_method {
name: "from_config"
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_config"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "update"
argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,23 @@
path: "tensorflow.mixed_precision.experimental.FixedLossScale"
tf_class {
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.FixedLossScale\'>"
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>"
is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
argspec: "args=[\'self\', \'loss_scale_value\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "from_config"
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_config"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "update"
argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -0,0 +1,22 @@
path: "tensorflow.mixed_precision.experimental.LossScale"
tf_class {
is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>"
is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "from_config"
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_config"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "update"
argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,5 +1,17 @@
path: "tensorflow.mixed_precision.experimental"
tf_module {
member {
name: "DynamicLossScale"
mtype: "<type \'type\'>"
}
member {
name: "FixedLossScale"
mtype: "<type \'type\'>"
}
member {
name: "LossScale"
mtype: "<type \'type\'>"
}
member {
name: "LossScaleGradientTape"
mtype: "<type \'type\'>"