Rolls back exposure of the optimizers_v2 in tf 1.x, until the keras model estimator backward incompatibility issues can be sorted out. (Namely, the use of global step in estimators & trying to assign to iterations not working in optimizers_v2
PiperOrigin-RevId: 225458875
This commit is contained in:
parent
80ecf7a7c2
commit
bc99c3db7f
@ -33,7 +33,6 @@ from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import optimizers
|
||||
from tensorflow.python.keras.engine import saving
|
||||
from tensorflow.python.keras.engine import training
|
||||
from tensorflow.python.keras.optimizer_v2 import rmsprop
|
||||
from tensorflow.python.lib.io import file_io
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import random_ops
|
||||
@ -334,6 +333,7 @@ class TestWeightSavingAndLoading(test.TestCase, parameterized.TestCase):
|
||||
|
||||
class TestWholeModelSaving(test.TestCase):
|
||||
|
||||
@test_util.run_v1_only('b/120994067')
|
||||
def test_sequential_model_saving(self):
|
||||
if h5py is None:
|
||||
self.skipTest('h5py required to run this test')
|
||||
@ -345,7 +345,7 @@ class TestWholeModelSaving(test.TestCase):
|
||||
model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))
|
||||
model.compile(
|
||||
loss=keras.losses.MSE,
|
||||
optimizer=rmsprop.RMSprop(lr=0.0001),
|
||||
optimizer=keras.optimizers.RMSprop(lr=0.0001),
|
||||
metrics=[
|
||||
keras.metrics.categorical_accuracy,
|
||||
keras.metrics.CategoricalAccuracy()
|
||||
|
@ -30,6 +30,7 @@ from tensorflow.python.keras import keras_parameterized
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import input_layer as input_layer_lib
|
||||
from tensorflow.python.keras.engine import network as network_lib
|
||||
from tensorflow.python.keras.optimizer_v2 import gradient_descent
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import state_ops
|
||||
@ -859,7 +860,7 @@ class TopologyConstructionTest(keras_parameterized.TestCase):
|
||||
x = np.ones((100, 2))
|
||||
y = np.ones((100, 2))
|
||||
model.compile(
|
||||
optimizer='sgd',
|
||||
optimizer=gradient_descent.SGD(),
|
||||
loss='mse',
|
||||
run_eagerly=testing_utils.should_run_eagerly())
|
||||
loss = model.train_on_batch(x, y)
|
||||
@ -908,7 +909,7 @@ class TopologyConstructionTest(keras_parameterized.TestCase):
|
||||
model.add(keras.layers.Dense(3))
|
||||
model.compile(
|
||||
loss='mse',
|
||||
optimizer='sgd',
|
||||
optimizer=gradient_descent.SGD(),
|
||||
metrics=['acc'],
|
||||
run_eagerly=testing_utils.should_run_eagerly())
|
||||
|
||||
|
@ -31,7 +31,6 @@ from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.keras import backend as K
|
||||
from tensorflow.python.keras import metrics
|
||||
from tensorflow.python.keras import models
|
||||
from tensorflow.python.keras import optimizers
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import random_ops
|
||||
from tensorflow.python.ops import resource_variable_ops
|
||||
@ -482,7 +481,7 @@ class TestCloneAndBuildModel(test.TestCase):
|
||||
self.assert_optimizer_iterations_increases(adam.AdamOptimizer(0.01))
|
||||
|
||||
def test_replace_keras_optimizer_iterations_variable(self):
|
||||
self.assert_optimizer_iterations_increases(optimizers.Adam())
|
||||
self.assert_optimizer_iterations_increases('adam')
|
||||
|
||||
def test_clone_and_build_sequential_model_without_inputs_defined(self):
|
||||
with self.cached_session():
|
||||
|
@ -25,7 +25,7 @@ from tensorflow.python.training import training_ops
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export('keras.optimizers.Adadelta')
|
||||
@tf_export('keras.optimizers.Adadelta', v1=[])
|
||||
class Adadelta(optimizer_v2.OptimizerV2):
|
||||
r"""Optimizer that implements the Adadelta algorithm.
|
||||
|
||||
|
@ -30,7 +30,7 @@ from tensorflow.python.ops import state_ops
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export('keras.optimizers.Adagrad')
|
||||
@tf_export('keras.optimizers.Adagrad', v1=[])
|
||||
class Adagrad(optimizer_v2.OptimizerV2):
|
||||
r"""Optimizer that implements the Adagrad algorithm.
|
||||
|
||||
|
@ -27,7 +27,7 @@ from tensorflow.python.training import training_ops
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export('keras.optimizers.Adam')
|
||||
@tf_export('keras.optimizers.Adam', v1=[])
|
||||
class Adam(optimizer_v2.OptimizerV2):
|
||||
"""Optimizer that implements the Adam algorithm.
|
||||
|
||||
|
@ -28,7 +28,7 @@ from tensorflow.python.training import training_ops
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export('keras.optimizers.Adamax')
|
||||
@tf_export('keras.optimizers.Adamax', v1=[])
|
||||
class Adamax(adam.Adam):
|
||||
"""Optimizer that implements the Adamax algorithm.
|
||||
|
||||
|
@ -24,7 +24,7 @@ from tensorflow.python.training import training_ops
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export('keras.optimizers.Ftrl')
|
||||
@tf_export('keras.optimizers.Ftrl', v1=[])
|
||||
class Ftrl(optimizer_v2.OptimizerV2):
|
||||
"""Optimizer that implements the FTRL algorithm.
|
||||
|
||||
|
@ -24,7 +24,7 @@ from tensorflow.python.training import training_ops
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export("keras.optimizers.SGD")
|
||||
@tf_export("keras.optimizers.SGD", v1=[])
|
||||
class SGD(optimizer_v2.OptimizerV2):
|
||||
"""Stochastic gradient descent and momentum optimizer.
|
||||
|
||||
|
@ -67,7 +67,7 @@ def _deduplicate_indexed_slices(values, indices):
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
@tf_export("keras.optimizers.Optimizer")
|
||||
@tf_export("keras.optimizers.Optimizer", v1=[])
|
||||
class OptimizerV2(checkpointable.CheckpointableBase):
|
||||
"""Updated base class for optimizers.
|
||||
|
||||
|
@ -23,7 +23,7 @@ from tensorflow.python.training import training_ops
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export("keras.optimizers.RMSprop")
|
||||
@tf_export("keras.optimizers.RMSprop", v1=[])
|
||||
class RMSprop(optimizer_v2.OptimizerV2):
|
||||
r"""Optimizer that implements the RMSprop algorithm.
|
||||
|
||||
|
@ -45,6 +45,7 @@ from tensorflow.python.training.checkpointable import base as checkpointable
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@tf_export(v1=['keras.optimizers.Optimizer'])
|
||||
class Optimizer(object):
|
||||
"""Abstract optimizer base class.
|
||||
|
||||
@ -158,6 +159,7 @@ class Optimizer(object):
|
||||
return cls(**config)
|
||||
|
||||
|
||||
@tf_export(v1=['keras.optimizers.SGD'])
|
||||
class SGD(Optimizer):
|
||||
"""Stochastic gradient descent optimizer.
|
||||
|
||||
@ -222,6 +224,7 @@ class SGD(Optimizer):
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
|
||||
@tf_export(v1=['keras.optimizers.RMSprop'])
|
||||
class RMSprop(Optimizer):
|
||||
"""RMSProp optimizer.
|
||||
|
||||
@ -288,6 +291,7 @@ class RMSprop(Optimizer):
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
|
||||
@tf_export(v1=['keras.optimizers.Adagrad'])
|
||||
class Adagrad(Optimizer):
|
||||
"""Adagrad optimizer.
|
||||
|
||||
@ -354,6 +358,7 @@ class Adagrad(Optimizer):
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
|
||||
@tf_export(v1=['keras.optimizers.Adadelta'])
|
||||
class Adadelta(Optimizer):
|
||||
"""Adadelta optimizer.
|
||||
|
||||
@ -437,6 +442,7 @@ class Adadelta(Optimizer):
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
|
||||
@tf_export(v1=['keras.optimizers.Adam'])
|
||||
class Adam(Optimizer):
|
||||
"""Adam optimizer.
|
||||
|
||||
@ -533,6 +539,7 @@ class Adam(Optimizer):
|
||||
return dict(list(base_config.items()) + list(config.items()))
|
||||
|
||||
|
||||
@tf_export(v1=['keras.optimizers.Adamax'])
|
||||
class Adamax(Optimizer):
|
||||
"""Adamax optimizer from Adam paper's Section 7.
|
||||
|
||||
@ -799,17 +806,27 @@ def deserialize(config, custom_objects=None):
|
||||
Returns:
|
||||
A Keras Optimizer instance.
|
||||
"""
|
||||
all_classes = {
|
||||
'adadelta': adadelta_v2.Adadelta,
|
||||
'adagrad': adagrad_v2.Adagrad,
|
||||
'adam': adam_v2.Adam,
|
||||
'adamax': adamax_v2.Adamax,
|
||||
'nadam': nadam_v2.Nadam,
|
||||
'rmsprop': rmsprop_v2.RMSprop,
|
||||
'sgd': gradient_descent_v2.SGD
|
||||
}
|
||||
if not tf2.enabled():
|
||||
all_classes['nadam'] = Nadam
|
||||
if tf2.enabled():
|
||||
all_classes = {
|
||||
'adadelta': adadelta_v2.Adadelta,
|
||||
'adagrad': adagrad_v2.Adagrad,
|
||||
'adam': adam_v2.Adam,
|
||||
'adamax': adamax_v2.Adamax,
|
||||
'nadam': nadam_v2.Nadam,
|
||||
'rmsprop': rmsprop_v2.RMSprop,
|
||||
'sgd': gradient_descent_v2.SGD
|
||||
}
|
||||
else:
|
||||
all_classes = {
|
||||
'adadelta': Adadelta,
|
||||
'adagrad': Adagrad,
|
||||
'adam': Adam,
|
||||
'adamax': Adamax,
|
||||
'nadam': Nadam,
|
||||
'rmsprop': RMSprop,
|
||||
'sgd': SGD,
|
||||
'tfoptimizer': TFOptimizer
|
||||
}
|
||||
|
||||
# Make deserialization case-insensitive for built-in optimizers.
|
||||
if config['class_name'].lower() in all_classes:
|
||||
|
@ -1,36 +1,15 @@
|
||||
path: "tensorflow.keras.optimizers.Adadelta"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.adadelta.Adadelta\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.optimizer_v2.OptimizerV2\'>"
|
||||
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Adadelta\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "iterations"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "weights"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\', \'learning_rate\', \'rho\', \'epsilon\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.95\', \'1e-07\', \'Adadelta\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\', \'initializer\'], varargs=None, keywords=None, defaults=[\'zeros\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_weight"
|
||||
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'trainable\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'zeros\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "apply_gradients"
|
||||
argspec: "args=[\'self\', \'grads_and_vars\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'self\', \'lr\', \'rho\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'1.0\', \'0.95\', \'None\', \'0.0\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "from_config"
|
||||
argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_config"
|
||||
@ -40,14 +19,6 @@ tf_class {
|
||||
name: "get_gradients"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot_names"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_updates"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
@ -56,16 +27,8 @@ tf_class {
|
||||
name: "get_weights"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "minimize"
|
||||
argspec: "args=[\'self\', \'loss\', \'var_list\', \'grad_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_weights"
|
||||
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "variables"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +1,15 @@
|
||||
path: "tensorflow.keras.optimizers.Adagrad"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.adagrad.Adagrad\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.optimizer_v2.OptimizerV2\'>"
|
||||
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Adagrad\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "iterations"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "weights"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\', \'learning_rate\', \'initial_accumulator_value\', \'epsilon\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.1\', \'1e-07\', \'Adagrad\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\', \'initializer\'], varargs=None, keywords=None, defaults=[\'zeros\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_weight"
|
||||
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'trainable\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'zeros\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "apply_gradients"
|
||||
argspec: "args=[\'self\', \'grads_and_vars\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'self\', \'lr\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'0.01\', \'None\', \'0.0\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "from_config"
|
||||
argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_config"
|
||||
@ -40,14 +19,6 @@ tf_class {
|
||||
name: "get_gradients"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot_names"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_updates"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
@ -56,16 +27,8 @@ tf_class {
|
||||
name: "get_weights"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "minimize"
|
||||
argspec: "args=[\'self\', \'loss\', \'var_list\', \'grad_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_weights"
|
||||
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "variables"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +1,15 @@
|
||||
path: "tensorflow.keras.optimizers.Adam"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.adam.Adam\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.optimizer_v2.OptimizerV2\'>"
|
||||
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Adam\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "iterations"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "weights"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'amsgrad\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.9\', \'0.999\', \'1e-07\', \'False\', \'Adam\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\', \'initializer\'], varargs=None, keywords=None, defaults=[\'zeros\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_weight"
|
||||
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'trainable\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'zeros\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "apply_gradients"
|
||||
argspec: "args=[\'self\', \'grads_and_vars\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'self\', \'lr\', \'beta_1\', \'beta_2\', \'epsilon\', \'decay\', \'amsgrad\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.9\', \'0.999\', \'None\', \'0.0\', \'False\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "from_config"
|
||||
argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_config"
|
||||
@ -40,14 +19,6 @@ tf_class {
|
||||
name: "get_gradients"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot_names"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_updates"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
@ -56,16 +27,8 @@ tf_class {
|
||||
name: "get_weights"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "minimize"
|
||||
argspec: "args=[\'self\', \'loss\', \'var_list\', \'grad_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_weights"
|
||||
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "variables"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
||||
|
@ -1,37 +1,15 @@
|
||||
path: "tensorflow.keras.optimizers.Adamax"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.adamax.Adamax\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.adam.Adam\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.optimizer_v2.OptimizerV2\'>"
|
||||
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Adamax\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "iterations"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "weights"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\', \'learning_rate\', \'beta_1\', \'beta_2\', \'epsilon\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.9\', \'0.999\', \'1e-07\', \'Adamax\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\', \'initializer\'], varargs=None, keywords=None, defaults=[\'zeros\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_weight"
|
||||
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'trainable\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'zeros\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "apply_gradients"
|
||||
argspec: "args=[\'self\', \'grads_and_vars\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'self\', \'lr\', \'beta_1\', \'beta_2\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'0.002\', \'0.9\', \'0.999\', \'None\', \'0.0\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "from_config"
|
||||
argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_config"
|
||||
@ -41,14 +19,6 @@ tf_class {
|
||||
name: "get_gradients"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot_names"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_updates"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
@ -57,16 +27,8 @@ tf_class {
|
||||
name: "get_weights"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "minimize"
|
||||
argspec: "args=[\'self\', \'loss\', \'var_list\', \'grad_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_weights"
|
||||
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "variables"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
||||
|
@ -1,35 +1,14 @@
|
||||
path: "tensorflow.keras.optimizers.Optimizer"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.optimizer_v2.OptimizerV2\'>"
|
||||
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "iterations"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "weights"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\', \'name\'], varargs=None, keywords=kwargs, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "add_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\', \'initializer\'], varargs=None, keywords=None, defaults=[\'zeros\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_weight"
|
||||
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'trainable\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'zeros\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "apply_gradients"
|
||||
argspec: "args=[\'self\', \'grads_and_vars\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=kwargs, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "from_config"
|
||||
argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_config"
|
||||
@ -39,14 +18,6 @@ tf_class {
|
||||
name: "get_gradients"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot_names"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_updates"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
@ -55,16 +26,8 @@ tf_class {
|
||||
name: "get_weights"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "minimize"
|
||||
argspec: "args=[\'self\', \'loss\', \'var_list\', \'grad_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_weights"
|
||||
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "variables"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +1,15 @@
|
||||
path: "tensorflow.keras.optimizers.RMSprop"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.rmsprop.RMSprop\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.optimizer_v2.OptimizerV2\'>"
|
||||
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.RMSprop\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "iterations"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "weights"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\', \'learning_rate\', \'rho\', \'momentum\', \'epsilon\', \'centered\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.9\', \'0.0\', \'1e-07\', \'False\', \'RMSprop\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\', \'initializer\'], varargs=None, keywords=None, defaults=[\'zeros\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_weight"
|
||||
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'trainable\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'zeros\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "apply_gradients"
|
||||
argspec: "args=[\'self\', \'grads_and_vars\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'self\', \'lr\', \'rho\', \'epsilon\', \'decay\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.9\', \'None\', \'0.0\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "from_config"
|
||||
argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_config"
|
||||
@ -40,14 +19,6 @@ tf_class {
|
||||
name: "get_gradients"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot_names"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_updates"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
@ -56,16 +27,8 @@ tf_class {
|
||||
name: "get_weights"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "minimize"
|
||||
argspec: "args=[\'self\', \'loss\', \'var_list\', \'grad_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_weights"
|
||||
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "variables"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
||||
|
@ -1,36 +1,15 @@
|
||||
path: "tensorflow.keras.optimizers.SGD"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.gradient_descent.SGD\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizer_v2.optimizer_v2.OptimizerV2\'>"
|
||||
is_instance: "<class \'tensorflow.python.training.checkpointable.base.CheckpointableBase\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.SGD\'>"
|
||||
is_instance: "<class \'tensorflow.python.keras.optimizers.Optimizer\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "iterations"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "weights"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\', \'learning_rate\', \'momentum\', \'nesterov\', \'name\'], varargs=None, keywords=kwargs, defaults=[\'0.001\', \'0.0\', \'False\', \'SGD\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\', \'initializer\'], varargs=None, keywords=None, defaults=[\'zeros\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "add_weight"
|
||||
argspec: "args=[\'self\', \'name\', \'shape\', \'dtype\', \'initializer\', \'trainable\', \'synchronization\', \'aggregation\'], varargs=None, keywords=None, defaults=[\'None\', \'zeros\', \'None\', \'VariableSynchronization.AUTO\', \'VariableAggregation.NONE\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "apply_gradients"
|
||||
argspec: "args=[\'self\', \'grads_and_vars\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'self\', \'lr\', \'momentum\', \'decay\', \'nesterov\'], varargs=None, keywords=kwargs, defaults=[\'0.01\', \'0.0\', \'0.0\', \'False\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "from_config"
|
||||
argspec: "args=[\'cls\', \'config\', \'custom_objects\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
argspec: "args=[\'cls\', \'config\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_config"
|
||||
@ -40,14 +19,6 @@ tf_class {
|
||||
name: "get_gradients"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot"
|
||||
argspec: "args=[\'self\', \'var\', \'slot_name\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_slot_names"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "get_updates"
|
||||
argspec: "args=[\'self\', \'loss\', \'params\'], varargs=None, keywords=None, defaults=None"
|
||||
@ -56,16 +27,8 @@ tf_class {
|
||||
name: "get_weights"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "minimize"
|
||||
argspec: "args=[\'self\', \'loss\', \'var_list\', \'grad_loss\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "set_weights"
|
||||
argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "variables"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user