diff --git a/tensorflow/python/keras/mixed_precision/experimental/policy.py b/tensorflow/python/keras/mixed_precision/experimental/policy.py index e016adbfdda..96f09be8095 100644 --- a/tensorflow/python/keras/mixed_precision/experimental/policy.py +++ b/tensorflow/python/keras/mixed_precision/experimental/policy.py @@ -54,10 +54,10 @@ class Policy(object): not match the computation dtype, variables will be automatically casted to the computation dtype to avoid type errors. - Policies also have a `tf.train.experimental.LossScale` instance, which is used - by `tf.keras.Model`s to performance loss scaling. Loss scaling is only done by - Models in `Model.fit` and `Model.train_on_batch`. Layers which are not Models - ignore the loss scale. + Policies also have a `tf.mixed_precision.experimental.LossScale` instance, + which is used by `tf.keras.Model`s to performance loss scaling. Loss scaling + is only done by Models in `Model.fit` and `Model.train_on_batch`. Layers which + are not Models ignore the loss scale. Policies are constructed by passing a string to the constructor, e.g. `tf.keras.mixed_precision.experimental.Policy('float32')`. The string @@ -167,11 +167,11 @@ class Policy(object): precision training. * 'infer' (deprecated): Infer the compute and variable dtype from the input dtype. - loss_scale: A `tf.train.experimental.LossScale`, or a value convertible to - one such as "dynamic". Defaults to using no loss scaling unless `name` - is "mixed_float16", in which case this defaults to "dynamic". Only - `tf.keras.Model`s, not layers, use the loss scale, and it is only used - during `Model.fit` or `Model.train_on_batch`. + loss_scale: A `tf.mixed_precision.experimental.LossScale`, or a value + convertible to one such as "dynamic". Defaults to using no loss scaling + unless `name` is "mixed_float16", in which case this defaults to + "dynamic". Only `tf.keras.Model`s, not layers, use the loss scale, and + it is only used during `Model.fit` or `Model.train_on_batch`. """ if isinstance(name, dtypes.DType): @@ -317,7 +317,7 @@ class Policy(object): """Returns the loss scale of this Policy. Returns: - A `tf.train.experimental.LossScale`, or None. + A `tf.mixed_precision.experimental.LossScale`, or None. """ return self._loss_scale diff --git a/tensorflow/python/tools/api/generator/api_init_files_v1.bzl b/tensorflow/python/tools/api/generator/api_init_files_v1.bzl index dfd1f12c8f2..89d48625b9b 100644 --- a/tensorflow/python/tools/api/generator/api_init_files_v1.bzl +++ b/tensorflow/python/tools/api/generator/api_init_files_v1.bzl @@ -49,6 +49,8 @@ TENSORFLOW_API_INIT_FILES_V1 = [ "manip/__init__.py", "math/__init__.py", "metrics/__init__.py", + "mixed_precision/__init__.py", + "mixed_precision/experimental/__init__.py", "mlir/__init__.py", "mlir/experimental/__init__.py", "nest/__init__.py", diff --git a/tensorflow/python/training/experimental/loss_scale.py b/tensorflow/python/training/experimental/loss_scale.py index 46f52f0a955..5845c86d029 100644 --- a/tensorflow/python/training/experimental/loss_scale.py +++ b/tensorflow/python/training/experimental/loss_scale.py @@ -31,12 +31,15 @@ from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.training.tracking import base as trackable from tensorflow.python.ops import variable_scope +from tensorflow.python.util import deprecation from tensorflow.python.util import nest from tensorflow.python.util.tf_export import tf_export @six.add_metaclass(abc.ABCMeta) -@tf_export('train.experimental.LossScale') +@deprecation.deprecated_endpoints('train.experimental.LossScale') +@tf_export('mixed_precision.experimental.LossScale', + 'train.experimental.LossScale') class LossScale(trackable.Trackable): """Loss scale base class. @@ -186,7 +189,9 @@ def get_loss_scale_weights(loss_scale): return loss_scale._weights.values() # pylint: disable=protected-access -@tf_export('train.experimental.FixedLossScale') +@deprecation.deprecated_endpoints('train.experimental.FixedLossScale') +@tf_export('mixed_precision.experimental.FixedLossScale', + 'train.experimental.FixedLossScale') class FixedLossScale(LossScale): """Loss scale with a fixed value. @@ -266,7 +271,9 @@ def _assign_if_finite(var, value): control_flow_ops.no_op) -@tf_export('train.experimental.DynamicLossScale') +@deprecation.deprecated_endpoints('train.experimental.DynamicLossScale') +@tf_export('mixed_precision.experimental.DynamicLossScale', + 'train.experimental.DynamicLossScale') class DynamicLossScale(LossScale): """Loss scale that dynamically adjusts itself. diff --git a/tensorflow/python/training/experimental/loss_scaling_gradient_tape.py b/tensorflow/python/training/experimental/loss_scaling_gradient_tape.py index 53e4926e407..4b75a74bc3b 100644 --- a/tensorflow/python/training/experimental/loss_scaling_gradient_tape.py +++ b/tensorflow/python/training/experimental/loss_scaling_gradient_tape.py @@ -31,9 +31,9 @@ class LossScaleGradientTape(backprop.GradientTape): """A gradient tape that scales losses and unscales resulting gradients. Operates as a normal gradient tape, but takes in a - `tf.train.experimental.LossScale` object. Losses are scaled up by some amount - before the gradients are calculated and the resulting gradients are scaled - down by the same amount. + `tf.mixed_precision.experimental.LossScale` object. Losses are scaled up by + some amount before the gradients are calculated and the resulting gradients + are scaled down by the same amount. This has no net mathematical effect, but can be used to prevent vanishing gradients, for example in the case of mixed precision training. @@ -48,7 +48,7 @@ class LossScaleGradientTape(backprop.GradientTape): Usage: ``` opt = tf.keras.optimizers.SGD(1.0) - model_loss_scale = tf.train.experimental.DynamicLossScale() + model_loss_scale = tf.mixed_precision.experimental.DynamicLossScale() for step in training_steps: with LossScaleGradientTape(model_loss_scale) as tape: @@ -69,10 +69,10 @@ class LossScaleGradientTape(backprop.GradientTape): """Creates a new LossScaleGradientTape. Args: - loss_scale: `tf.train.experimental.LossScale` object that + loss_scale: `tf.mixed_precision.experimental.LossScale` object that manages what quantity to scale by. This is typically either a FixedLossScale object with a constant scalar or a - `tf.train.experimental.DynamicLossScale` object that will + `tf.mixed_precision.experimental.DynamicLossScale` object that will adjust the scalar appropriately if any non-finite gradients are encountered. persistent: Boolean controlling whether a persistent gradient tape is diff --git a/tensorflow/python/training/experimental/mixed_precision.py b/tensorflow/python/training/experimental/mixed_precision.py index f2cc6c31339..29e4ec725b8 100644 --- a/tensorflow/python/training/experimental/mixed_precision.py +++ b/tensorflow/python/training/experimental/mixed_precision.py @@ -190,7 +190,7 @@ def enable_mixed_precision_graph_rewrite(opt, loss_scale='dynamic'): Args: opt: An instance of a `tf.keras.optimizers.Optimizer`. loss_scale: Either an int/float, the string `"dynamic"`, or an instance of a - `tf.train.experimental.LossScale`. The loss scale to use. It is + `tf.mixed_precision.experimental.LossScale`. The loss scale to use. It is recommended to keep this as its default value of `"dynamic"`, which will adjust the scaling automatically to prevent `Inf` or `NaN` values. @@ -317,9 +317,9 @@ def enable_mixed_precision_graph_rewrite_v1(opt, loss_scale='dynamic'): opt: An instance of a `tf.keras.optimizers.Optimizer` or a `tf.train.Optimizer`. loss_scale: Either an int/float, the string `"dynamic"`, or an instance of - a `tf.train.experimental.LossScale`. The loss scale to use. It is - recommended to keep this as its default value of `"dynamic"`, which will - adjust the scaling automatically to prevent `Inf` or `NaN` values. + a `tf.mixed_precision.experimental.LossScale`. The loss scale to use. It + is recommended to keep this as its default value of `"dynamic"`, which + will adjust the scaling automatically to prevent `Inf` or `NaN` values. Returns: A version of `opt` that will use loss scaling to prevent underflow. diff --git a/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-dynamic-loss-scale.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-dynamic-loss-scale.pbtxt new file mode 100644 index 00000000000..0d61b5a3495 --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-dynamic-loss-scale.pbtxt @@ -0,0 +1,35 @@ +path: "tensorflow.mixed_precision.experimental.DynamicLossScale" +tf_class { + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.DynamicLossScale\'>" + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>" + is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>" + is_instance: "<type \'object\'>" + member { + name: "increment_period" + mtype: "<type \'property\'>" + } + member { + name: "initial_loss_scale" + mtype: "<type \'property\'>" + } + member { + name: "multiplier" + mtype: "<type \'property\'>" + } + member_method { + name: "__init__" + argspec: "args=[\'self\', \'initial_loss_scale\', \'increment_period\', \'multiplier\'], varargs=None, keywords=None, defaults=[\'32768\', \'2000\', \'2.0\'], " + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "update" + argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-fixed-loss-scale.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-fixed-loss-scale.pbtxt new file mode 100644 index 00000000000..dd34ddd4e22 --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-fixed-loss-scale.pbtxt @@ -0,0 +1,23 @@ +path: "tensorflow.mixed_precision.experimental.FixedLossScale" +tf_class { + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.FixedLossScale\'>" + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>" + is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>" + is_instance: "<type \'object\'>" + member_method { + name: "__init__" + argspec: "args=[\'self\', \'loss_scale_value\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "update" + argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-loss-scale.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-loss-scale.pbtxt new file mode 100644 index 00000000000..b77e8dd8960 --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.-loss-scale.pbtxt @@ -0,0 +1,22 @@ +path: "tensorflow.mixed_precision.experimental.LossScale" +tf_class { + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>" + is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>" + is_instance: "<type \'object\'>" + member_method { + name: "__init__" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "update" + argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.pbtxt new file mode 100644 index 00000000000..61700226fbb --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.experimental.pbtxt @@ -0,0 +1,15 @@ +path: "tensorflow.mixed_precision.experimental" +tf_module { + member { + name: "DynamicLossScale" + mtype: "<type \'type\'>" + } + member { + name: "FixedLossScale" + mtype: "<type \'type\'>" + } + member { + name: "LossScale" + mtype: "<type \'type\'>" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.pbtxt new file mode 100644 index 00000000000..475c4a2ccde --- /dev/null +++ b/tensorflow/tools/api/golden/v1/tensorflow.mixed_precision.pbtxt @@ -0,0 +1,7 @@ +path: "tensorflow.mixed_precision" +tf_module { + member { + name: "experimental" + mtype: "<type \'module\'>" + } +} diff --git a/tensorflow/tools/api/golden/v1/tensorflow.pbtxt b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt index 94218f5e560..f7261a7834e 100644 --- a/tensorflow/tools/api/golden/v1/tensorflow.pbtxt +++ b/tensorflow/tools/api/golden/v1/tensorflow.pbtxt @@ -508,6 +508,10 @@ tf_module { name: "metrics" mtype: "<type \'module\'>" } + member { + name: "mixed_precision" + mtype: "<type \'module\'>" + } member { name: "mlir" mtype: "<type \'module\'>" diff --git a/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-dynamic-loss-scale.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-dynamic-loss-scale.pbtxt new file mode 100644 index 00000000000..0d61b5a3495 --- /dev/null +++ b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-dynamic-loss-scale.pbtxt @@ -0,0 +1,35 @@ +path: "tensorflow.mixed_precision.experimental.DynamicLossScale" +tf_class { + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.DynamicLossScale\'>" + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>" + is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>" + is_instance: "<type \'object\'>" + member { + name: "increment_period" + mtype: "<type \'property\'>" + } + member { + name: "initial_loss_scale" + mtype: "<type \'property\'>" + } + member { + name: "multiplier" + mtype: "<type \'property\'>" + } + member_method { + name: "__init__" + argspec: "args=[\'self\', \'initial_loss_scale\', \'increment_period\', \'multiplier\'], varargs=None, keywords=None, defaults=[\'32768\', \'2000\', \'2.0\'], " + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "update" + argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-fixed-loss-scale.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-fixed-loss-scale.pbtxt new file mode 100644 index 00000000000..dd34ddd4e22 --- /dev/null +++ b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-fixed-loss-scale.pbtxt @@ -0,0 +1,23 @@ +path: "tensorflow.mixed_precision.experimental.FixedLossScale" +tf_class { + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.FixedLossScale\'>" + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>" + is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>" + is_instance: "<type \'object\'>" + member_method { + name: "__init__" + argspec: "args=[\'self\', \'loss_scale_value\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "update" + argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-loss-scale.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-loss-scale.pbtxt new file mode 100644 index 00000000000..b77e8dd8960 --- /dev/null +++ b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.-loss-scale.pbtxt @@ -0,0 +1,22 @@ +path: "tensorflow.mixed_precision.experimental.LossScale" +tf_class { + is_instance: "<class \'tensorflow.python.training.experimental.loss_scale.LossScale\'>" + is_instance: "<class \'tensorflow.python.training.tracking.base.Trackable\'>" + is_instance: "<type \'object\'>" + member_method { + name: "__init__" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "from_config" + argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "get_config" + argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None" + } + member_method { + name: "update" + argspec: "args=[\'self\', \'grads\'], varargs=None, keywords=None, defaults=None" + } +} diff --git a/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.pbtxt b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.pbtxt index 30414f7f9ea..5abfdcd109d 100644 --- a/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.pbtxt +++ b/tensorflow/tools/api/golden/v2/tensorflow.mixed_precision.experimental.pbtxt @@ -1,5 +1,17 @@ path: "tensorflow.mixed_precision.experimental" tf_module { + member { + name: "DynamicLossScale" + mtype: "<type \'type\'>" + } + member { + name: "FixedLossScale" + mtype: "<type \'type\'>" + } + member { + name: "LossScale" + mtype: "<type \'type\'>" + } member { name: "LossScaleGradientTape" mtype: "<type \'type\'>"