Add sdca ops to tf.train.
This adds: * tf.train.sdca_optimizer * tf.train.sdca_fprint * tf.train.sdca_shrink_l1 which were previously documented, and prior to 1.0, in tf.sdca. In 1.0, they were absent from tf.sdca, so this does not break compatibility. The module tf.sdca is removed. Change: 153176548
This commit is contained in:
parent
69c064d3fe
commit
ba4bfd9208
tensorflow
contrib/linear_optimizer/python
python
tools/api/golden
@ -1058,7 +1058,7 @@ class SdcaFprintTest(SdcaModelTest):
|
||||
def testFprint(self):
|
||||
with self._single_threaded_test_session():
|
||||
in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
|
||||
out_data = gen_sdca_ops._sdca_fprint(in_data)
|
||||
out_data = gen_sdca_ops.sdca_fprint(in_data)
|
||||
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
|
||||
[5849691694103072671, -4874542629849009556],
|
||||
[603227410218889250, 8762207001949257490]],
|
||||
|
@ -307,7 +307,7 @@ class SdcaModel(object):
|
||||
sparse_features_values.append(sf.feature_values)
|
||||
|
||||
# pylint: disable=protected-access
|
||||
example_ids_hashed = gen_sdca_ops._sdca_fprint(
|
||||
example_ids_hashed = gen_sdca_ops.sdca_fprint(
|
||||
internal_convert_to_tensor(self._examples['example_ids']))
|
||||
# pylint: enable=protected-access
|
||||
example_state_data = self._hashtable.lookup(example_ids_hashed)
|
||||
@ -328,7 +328,7 @@ class SdcaModel(object):
|
||||
sparse_weights.append(array_ops.gather(w, sparse_indices[-1]))
|
||||
|
||||
# pylint: disable=protected-access
|
||||
esu, sfw, dfw = gen_sdca_ops._sdca_optimizer(
|
||||
esu, sfw, dfw = gen_sdca_ops.sdca_optimizer(
|
||||
sparse_example_indices,
|
||||
sparse_feature_indices,
|
||||
sparse_features_values,
|
||||
@ -390,7 +390,7 @@ class SdcaModel(object):
|
||||
with ops.device(var.device):
|
||||
# pylint: disable=protected-access
|
||||
update_ops.append(
|
||||
gen_sdca_ops._sdca_shrink_l1(
|
||||
gen_sdca_ops.sdca_shrink_l1(
|
||||
self._convert_n_to_tensor(
|
||||
[var], as_ref=True),
|
||||
l1=self._symmetric_l1_regularization(),
|
||||
|
@ -60,7 +60,6 @@ py_library(
|
||||
":nn",
|
||||
":platform",
|
||||
":script_ops",
|
||||
":sdca_ops",
|
||||
":session_ops",
|
||||
":sets",
|
||||
":sparse_ops",
|
||||
@ -2270,6 +2269,7 @@ py_library(
|
||||
":random_ops",
|
||||
":resource_variable_ops",
|
||||
":resources",
|
||||
":sdca_ops",
|
||||
":sparse_ops",
|
||||
":state_ops",
|
||||
":string_ops",
|
||||
|
@ -80,7 +80,6 @@ from tensorflow.python.layers import layers
|
||||
from tensorflow.python.ops import image_ops as image
|
||||
from tensorflow.python.ops import metrics
|
||||
from tensorflow.python.ops import nn
|
||||
from tensorflow.python.ops import sdca_ops as sdca
|
||||
from tensorflow.python.ops import sets
|
||||
from tensorflow.python.ops import spectral_ops as spectral
|
||||
from tensorflow.python.ops.losses import losses
|
||||
@ -225,7 +224,6 @@ _allowed_symbols.extend([
|
||||
'python_io',
|
||||
'resource_loader',
|
||||
'saved_model',
|
||||
'sdca',
|
||||
'sets',
|
||||
'spectral',
|
||||
'summary',
|
||||
|
@ -301,9 +301,6 @@ PyFunc
|
||||
PyFuncStateless
|
||||
|
||||
# sdca_ops
|
||||
SdcaFprint
|
||||
SdcaOptimizer
|
||||
SdcaShrinkL1
|
||||
|
||||
# state_ops
|
||||
Variable
|
||||
|
@ -95,10 +95,14 @@ from __future__ import print_function
|
||||
import sys as _sys
|
||||
|
||||
from tensorflow.python.ops import io_ops as _io_ops
|
||||
from tensorflow.python.ops import sdca_ops as _sdca_ops
|
||||
from tensorflow.python.ops import state_ops as _state_ops
|
||||
from tensorflow.python.util.all_util import remove_undocumented
|
||||
|
||||
# pylint: disable=g-bad-import-order,unused-import
|
||||
from tensorflow.python.ops.sdca_ops import sdca_optimizer
|
||||
from tensorflow.python.ops.sdca_ops import sdca_fprint
|
||||
from tensorflow.python.ops.sdca_ops import sdca_shrink_l1
|
||||
from tensorflow.python.training.adadelta import AdadeltaOptimizer
|
||||
from tensorflow.python.training.adagrad import AdagradOptimizer
|
||||
from tensorflow.python.training.adagrad_da import AdagradDAOptimizer
|
||||
@ -224,4 +228,4 @@ _allowed_symbols = [
|
||||
# * Input methods in tf.train are documented in io_ops.
|
||||
# * Saver methods in tf.train are documented in state_ops.
|
||||
remove_undocumented(__name__, _allowed_symbols,
|
||||
[_sys.modules[__name__], _io_ops, _state_ops])
|
||||
[_sys.modules[__name__], _io_ops, _sdca_ops, _state_ops])
|
||||
|
@ -420,10 +420,6 @@ tf_module {
|
||||
name: "saved_model"
|
||||
mtype: "<type \'module\'>"
|
||||
}
|
||||
member {
|
||||
name: "sdca"
|
||||
mtype: "<type \'module\'>"
|
||||
}
|
||||
member {
|
||||
name: "sets"
|
||||
mtype: "<type \'module\'>"
|
||||
|
@ -1,3 +0,0 @@
|
||||
path: "tensorflow.sdca"
|
||||
tf_module {
|
||||
}
|
@ -360,6 +360,18 @@ tf_module {
|
||||
name: "replica_device_setter"
|
||||
argspec: "args=[\'ps_tasks\', \'ps_device\', \'worker_device\', \'merge_devices\', \'cluster\', \'ps_ops\', \'ps_strategy\'], varargs=None, keywords=None, defaults=[\'0\', \'/job:ps\', \'/job:worker\', \'True\', \'None\', \'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "sdca_fprint"
|
||||
argspec: "args=[\'input\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "sdca_optimizer"
|
||||
argspec: "args=[\'sparse_example_indices\', \'sparse_feature_indices\', \'sparse_feature_values\', \'dense_features\', \'example_weights\', \'example_labels\', \'sparse_indices\', \'sparse_weights\', \'dense_weights\', \'example_state_data\', \'loss_type\', \'l1\', \'l2\', \'num_loss_partitions\', \'num_inner_iterations\', \'adaptative\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "sdca_shrink_l1"
|
||||
argspec: "args=[\'weights\', \'l1\', \'l2\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "shuffle_batch"
|
||||
argspec: "args=[\'tensors\', \'batch_size\', \'capacity\', \'min_after_dequeue\', \'num_threads\', \'seed\', \'enqueue_many\', \'shapes\', \'allow_smaller_final_batch\', \'shared_name\', \'name\'], varargs=None, keywords=None, defaults=[\'1\', \'None\', \'False\', \'None\', \'False\', \'None\', \'None\'], "
|
||||
|
Loading…
Reference in New Issue
Block a user