Deprecate tf.train._ classes in TF 2.0 API in favor of tf.keras.optimizers._ ones.

PiperOrigin-RevId: 223171873
This commit is contained in:
Mihai Maruseac 2018-11-28 08:29:03 -08:00 committed by TensorFlower Gardener
parent 192d588eaf
commit 36a445c353
22 changed files with 20 additions and 559 deletions

View File

@ -25,7 +25,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.AdadeltaOptimizer")
@tf_export(v1=["train.AdadeltaOptimizer"])
class AdadeltaOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.

View File

@ -28,7 +28,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.AdagradOptimizer")
@tf_export(v1=["train.AdagradOptimizer"])
class AdagradOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adagrad algorithm.

View File

@ -26,7 +26,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.AdagradDAOptimizer")
@tf_export(v1=["train.AdagradDAOptimizer"])
class AdagradDAOptimizer(optimizer.Optimizer):
"""Adagrad Dual Averaging algorithm for sparse linear models.

View File

@ -29,7 +29,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.AdamOptimizer")
@tf_export(v1=["train.AdamOptimizer"])
class AdamOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adam algorithm.

View File

@ -25,7 +25,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.FtrlOptimizer")
@tf_export(v1=["train.FtrlOptimizer"])
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.

View File

@ -26,7 +26,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.GradientDescentOptimizer")
@tf_export(v1=["train.GradientDescentOptimizer"])
class GradientDescentOptimizer(optimizer.Optimizer):
"""Optimizer that implements the gradient descent algorithm.
"""

View File

@ -25,7 +25,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.MomentumOptimizer")
@tf_export(v1=["train.MomentumOptimizer"])
class MomentumOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Momentum algorithm.

View File

@ -214,7 +214,7 @@ def _get_processor(v):
raise NotImplementedError("Trying to optimize unsupported type ", v)
@tf_export("train.Optimizer")
@tf_export(v1=["train.Optimizer"])
class Optimizer(
# Optimizers inherit from CheckpointableBase rather than Checkpointable
# since they do most of their dependency management themselves (slot

View File

@ -26,7 +26,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.ProximalAdagradOptimizer")
@tf_export(v1=["train.ProximalAdagradOptimizer"])
class ProximalAdagradOptimizer(optimizer.Optimizer):
# pylint: disable=line-too-long
"""Optimizer that implements the Proximal Adagrad algorithm.

View File

@ -50,7 +50,7 @@ from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("train.RMSPropOptimizer")
@tf_export(v1=["train.RMSPropOptimizer"])
class RMSPropOptimizer(optimizer.Optimizer):
"""Optimizer that implements the RMSProp algorithm.

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.AdadeltaOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adadelta.AdadeltaOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'rho\', \'epsilon\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.001\', \'0.95\', \'1e-08\', \'False\', \'Adadelta\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.AdagradDAOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adagrad_da.AdagradDAOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'global_step\', \'initial_gradient_squared_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.1\', \'0.0\', \'0.0\', \'False\', \'AdagradDA\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.AdagradOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adagrad.AdagradOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'initial_accumulator_value\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.1\', \'False\', \'Adagrad\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.AdamOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.adam.AdamOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'beta1\', \'beta2\', \'epsilon\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.001\', \'0.9\', \'0.999\', \'1e-08\', \'False\', \'Adam\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.FtrlOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.ftrl.FtrlOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'learning_rate_power\', \'initial_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\', \'accum_name\', \'linear_name\', \'l2_shrinkage_regularization_strength\'], varargs=None, keywords=None, defaults=[\'-0.5\', \'0.1\', \'0.0\', \'0.0\', \'False\', \'Ftrl\', \'None\', \'None\', \'0.0\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.GradientDescentOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.gradient_descent.GradientDescentOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'False\', \'GradientDescent\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.MomentumOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.momentum.MomentumOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'momentum\', \'use_locking\', \'name\', \'use_nesterov\'], varargs=None, keywords=None, defaults=[\'False\', \'Momentum\', \'False\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,50 +0,0 @@
path: "tensorflow.train.Optimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.ProximalAdagradOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.proximal_adagrad.ProximalAdagradOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'initial_accumulator_value\', \'l1_regularization_strength\', \'l2_regularization_strength\', \'use_locking\', \'name\'], varargs=None, keywords=None, defaults=[\'0.1\', \'0.0\', \'0.0\', \'False\', \'ProximalAdagrad\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,51 +0,0 @@
path: "tensorflow.train.RMSPropOptimizer"
tf_class {
is_instance: "<class \'tensorflow.python.training.rmsprop.RMSPropOptimizer\'>"
is_instance: "<class \'tensorflow.python.training.optimizer.Optimizer\'>"
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
is_instance: "<type \'object\'>"
member {
name: "GATE_GRAPH"
mtype: "<type \'int\'>"
}
member {
name: "GATE_NONE"
mtype: "<type \'int\'>"
}
member {
name: "GATE_OP"
mtype: "<type \'int\'>"
}
member_method {
name: "__init__"
argspec: "args=[\'self\', \'learning_rate\', \'decay\', \'momentum\', \'epsilon\', \'use_locking\', \'centered\', \'name\'], varargs=None, keywords=None, defaults=[\'0.9\', \'0.0\', \'1e-10\', \'False\', \'False\', \'RMSProp\'], "
}
member_method {
name: "apply_gradients"
argspec: "args=[\'self\', \'grads_and_vars\', \'global_step\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "compute_gradients"
argspec: "args=[\'self\', \'loss\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'1\', \'None\', \'False\', \'None\'], "
}
member_method {
name: "get_name"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot"
argspec: "args=[\'self\', \'var\', \'name\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "get_slot_names"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "minimize"
argspec: "args=[\'self\', \'loss\', \'global_step\', \'var_list\', \'gate_gradients\', \'aggregation_method\', \'colocate_gradients_with_ops\', \'name\', \'grad_loss\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'1\', \'None\', \'False\', \'None\', \'None\'], "
}
member_method {
name: "variables"
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
}
}

View File

@ -1,21 +1,5 @@
path: "tensorflow.train"
tf_module {
member {
name: "AdadeltaOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "AdagradDAOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "AdagradOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "AdamOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "BytesList"
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
@ -88,18 +72,10 @@ tf_module {
name: "FloatList"
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
}
member {
name: "FtrlOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "GlobalStepWaiterHook"
mtype: "<type \'type\'>"
}
member {
name: "GradientDescentOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "Int64List"
mtype: "<class \'google.protobuf.pyext.cpp_message.GeneratedProtocolMessageType\'>"
@ -112,10 +88,6 @@ tf_module {
name: "LoggingTensorHook"
mtype: "<type \'type\'>"
}
member {
name: "MomentumOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "MonitoredSession"
mtype: "<type \'type\'>"
@ -128,22 +100,10 @@ tf_module {
name: "NanTensorHook"
mtype: "<type \'type\'>"
}
member {
name: "Optimizer"
mtype: "<type \'type\'>"
}
member {
name: "ProximalAdagradOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "ProximalGradientDescentOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "RMSPropOptimizer"
mtype: "<type \'type\'>"
}
member {
name: "Scaffold"
mtype: "<type \'type\'>"

View File

@ -581,10 +581,20 @@ renames = {
'tf.to_int64': 'tf.compat.v1.to_int64',
'tf.trace': 'tf.linalg.trace',
'tf.train.LooperThread': 'tf.compat.v1.train.LooperThread',
'tf.train.AdadeltaOptimizer': 'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer': 'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer': 'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer': 'tf.compat.v1.train.AdamOptimizer',
'tf.train.FtrlOptimizer': 'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GradientDescentOptimizer': 'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.MomentumOptimizer': 'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredTrainingSession': 'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NewCheckpointReader': 'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer': 'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook': 'tf.compat.v1.train.ProfilerHook',
'tf.train.ProximalAdagradOptimizer': 'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.QueueRunner': 'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer': 'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver': 'tf.compat.v1.train.Saver',
'tf.train.SaverDef': 'tf.compat.v1.train.SaverDef',
'tf.train.SyncReplicasOptimizer': 'tf.compat.v1.train.SyncReplicasOptimizer',