From 9242d0c50952ba79627d457254961f7935f29b21 Mon Sep 17 00:00:00 2001 From: Scott Wegner Date: Thu, 7 May 2020 16:41:51 -0700 Subject: [PATCH] Fix documentation compatibility tag formatting. PiperOrigin-RevId: 310461418 Change-Id: I824ac86b0519d7429bd68189fbf088c30484a681 --- tensorflow/python/framework/ops.py | 20 +++++++++++-------- .../python/keras/optimizer_v2/rmsprop.py | 13 +++++++----- tensorflow/python/training/adam.py | 13 +++++++----- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py index f43663d5396..9b8f7cf4fde 100644 --- a/tensorflow/python/framework/ops.py +++ b/tensorflow/python/framework/ops.py @@ -6261,10 +6261,12 @@ def add_to_collection(name, value): Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. - value: The value to add to the collection. @compatibility(eager) - Collections are only supported in eager when variables are created inside - an EagerVariableStore (e.g. as part of a layer or template). - @end_compatibility + value: The value to add to the collection. + + @compatibility(eager) + Collections are only supported in eager when variables are created inside + an EagerVariableStore (e.g. as part of a layer or template). + @end_compatibility """ get_default_graph().add_to_collection(name, value) @@ -6279,10 +6281,12 @@ def add_to_collections(names, value): Args: names: The key for the collections. The `GraphKeys` class contains many standard names for collections. - value: The value to add to the collections. @compatibility(eager) - Collections are only supported in eager when variables are created inside - an EagerVariableStore (e.g. as part of a layer or template). - @end_compatibility + value: The value to add to the collections. + + @compatibility(eager) + Collections are only supported in eager when variables are created inside + an EagerVariableStore (e.g. as part of a layer or template). + @end_compatibility """ get_default_graph().add_to_collections(names, value) diff --git a/tensorflow/python/keras/optimizer_v2/rmsprop.py b/tensorflow/python/keras/optimizer_v2/rmsprop.py index 5de5e59b385..d1deaf34f45 100644 --- a/tensorflow/python/keras/optimizer_v2/rmsprop.py +++ b/tensorflow/python/keras/optimizer_v2/rmsprop.py @@ -121,16 +121,19 @@ class RMSprop(optimizer_v2.OptimizerV2): Setting this to `True` may help with training, but is slightly more expensive in terms of computation and memory. Defaults to `False`. name: Optional name prefix for the operations created when applying - gradients. Defaults to "RMSprop". @compatibility(eager) When eager - execution is enabled, `learning_rate`, `decay`, `momentum`, and - `epsilon` can each be a callable that takes no arguments and returns the - actual value to use. This can be useful for changing these values across - different invocations of optimizer functions. @end_compatibility + gradients. Defaults to "RMSprop". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is included for backward compatibility to allow time inverse decay of learning rate. `lr` is included for backward compatibility, recommended to use `learning_rate` instead. + + @compatibility(eager) + When eager execution is enabled, `learning_rate`, `decay`, `momentum`, and + `epsilon` can each be a callable that takes no arguments and returns the + actual value to use. This can be useful for changing these values across + different invocations of optimizer functions. + @end_compatibility """ super(RMSprop, self).__init__(name, **kwargs) self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) diff --git a/tensorflow/python/training/adam.py b/tensorflow/python/training/adam.py index 615ac587c21..93bacbdc0bb 100644 --- a/tensorflow/python/training/adam.py +++ b/tensorflow/python/training/adam.py @@ -92,11 +92,14 @@ class AdamOptimizer(optimizer.Optimizer): Section 2.1), not the epsilon in Algorithm 1 of the paper. use_locking: If True use locks for update operations. name: Optional name for the operations created when applying gradients. - Defaults to "Adam". @compatibility(eager) When eager execution is - enabled, `learning_rate`, `beta1`, `beta2`, and `epsilon` can each be a - callable that takes no arguments and returns the actual value to use. - This can be useful for changing these values across different - invocations of optimizer functions. @end_compatibility + Defaults to "Adam". + + @compatibility(eager) + When eager execution is enabled, `learning_rate`, `beta1`, `beta2`, and + `epsilon` can each be a callable that takes no arguments and returns the + actual value to use. This can be useful for changing these values across + different invocations of optimizer functions. + @end_compatibility """ super(AdamOptimizer, self).__init__(use_locking, name) self._lr = learning_rate