From daa030ed68f19d98633d8ef6e45bb15419242a3b Mon Sep 17 00:00:00 2001 From: Martin Wicke Date: Sun, 29 Jan 2017 17:50:23 -0800 Subject: [PATCH] Seal contrib interfaces (as much a feasible). If you were using a symbol which is now hidden, it should be added to the _allowed_symbols list in the appropriate __init__.py file. Change: 145943844 --- tensorflow/contrib/__init__.py | 4 + tensorflow/contrib/bayesflow/__init__.py | 10 + tensorflow/contrib/copy_graph/__init__.py | 5 + tensorflow/contrib/crf/__init__.py | 4 + tensorflow/contrib/cudnn_rnn/__init__.py | 3 + tensorflow/contrib/deprecated/__init__.py | 7 + tensorflow/contrib/distributions/__init__.py | 9 + tensorflow/contrib/graph_editor/__init__.py | 4 + tensorflow/contrib/graph_editor/select.py | 3 +- tensorflow/contrib/image/__init__.py | 9 +- tensorflow/contrib/input_pipeline/BUILD | 2 +- tensorflow/contrib/input_pipeline/__init__.py | 5 +- tensorflow/contrib/integrate/__init__.py | 5 +- tensorflow/contrib/layers/__init__.py | 32 +- .../layers/python/layers/feature_column.py | 6 +- .../python/layers/feature_column_test.py | 7 +- tensorflow/contrib/learn/__init__.py | 25 +- .../contrib/learn/python/learn/__init__.py | 2 - .../python/learn/basic_session_run_hooks.py | 5 +- .../learn/python/learn/estimators/__init__.py | 1 + .../python/learn/estimators/estimator_test.py | 19 +- .../learn/python/learn/learn_io/numpy_io.py | 2 +- .../tests/dataframe/feeding_functions_test.py | 2 +- .../dataframe/feeding_queue_runner_test.py | 2 +- .../tests/dataframe/reader_source_test.py | 3 +- tensorflow/contrib/linalg/__init__.py | 3 + .../python/ops/linear_operator_test_util.py | 2 +- .../contrib/linear_optimizer/__init__.py | 5 + .../ops/sharded_mutable_dense_hashtable.py | 8 +- tensorflow/contrib/lookup/__init__.py | 7 + tensorflow/contrib/lookup/lookup_ops_test.py | 464 +++++++++--------- tensorflow/contrib/losses/__init__.py | 6 +- tensorflow/contrib/metrics/__init__.py | 7 +- .../metrics/python/ops/histogram_ops.py | 2 +- .../contrib/metrics/python/ops/metric_ops.py | 2 +- .../metrics/python/ops/metric_ops_test.py | 2 +- tensorflow/contrib/nn/__init__.py | 10 +- tensorflow/contrib/opt/__init__.py | 9 + tensorflow/contrib/rnn/__init__.py | 7 +- tensorflow/contrib/seq2seq/__init__.py | 18 +- .../contrib/stat_summarizer/__init__.py | 7 + tensorflow/contrib/tfprof/__init__.py | 1 - tensorflow/contrib/util/__init__.py | 7 +- tensorflow/contrib/util/loader.py | 5 +- tensorflow/python/framework/docs.py | 53 +- .../python/framework/gen_docs_combined.py | 7 + 46 files changed, 500 insertions(+), 308 deletions(-) diff --git a/tensorflow/contrib/__init__.py b/tensorflow/contrib/__init__.py index b0022423109..9404b7a1463 100644 --- a/tensorflow/contrib/__init__.py +++ b/tensorflow/contrib/__init__.py @@ -58,3 +58,7 @@ from tensorflow.contrib import training from tensorflow.contrib import util from tensorflow.contrib.ndlstm import python as ndlstm from tensorflow.contrib.specs import python as specs + +del absolute_import +del division +del print_function diff --git a/tensorflow/contrib/bayesflow/__init__.py b/tensorflow/contrib/bayesflow/__init__.py index 53dac356750..baa5748eb62 100644 --- a/tensorflow/contrib/bayesflow/__init__.py +++ b/tensorflow/contrib/bayesflow/__init__.py @@ -30,3 +30,13 @@ from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor from tensorflow.contrib.bayesflow.python.ops import stochastic_variables from tensorflow.contrib.bayesflow.python.ops import variational_inference # pylint: enable=unused-import,line-too-long + +from tensorflow.python.util.all_util import remove_undocumented + + +_allowed_symbols = ['entropy', 'monte_carlo', + 'special_math', 'stochastic_gradient_estimators', + 'stochastic_graph', 'stochastic_tensor', + 'stochastic_variables', 'variational_inference'] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/copy_graph/__init__.py b/tensorflow/contrib/copy_graph/__init__.py index 4e9f34ca8c1..96dc0d7df2d 100644 --- a/tensorflow/contrib/copy_graph/__init__.py +++ b/tensorflow/contrib/copy_graph/__init__.py @@ -20,4 +20,9 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from tensorflow.contrib.copy_graph.python.util import copy_elements from tensorflow.contrib.copy_graph.python.util.copy_elements import * + +from tensorflow.python.util.all_util import remove_undocumented + +remove_undocumented(__name__, doc_string_modules=[copy_elements]) diff --git a/tensorflow/contrib/crf/__init__.py b/tensorflow/contrib/crf/__init__.py index 195e8cd7171..7f7818c845d 100644 --- a/tensorflow/contrib/crf/__init__.py +++ b/tensorflow/contrib/crf/__init__.py @@ -37,3 +37,7 @@ from tensorflow.contrib.crf.python.ops.crf import crf_sequence_score from tensorflow.contrib.crf.python.ops.crf import crf_unary_score from tensorflow.contrib.crf.python.ops.crf import CrfForwardRnnCell from tensorflow.contrib.crf.python.ops.crf import viterbi_decode + +from tensorflow.python.util.all_util import remove_undocumented + +remove_undocumented(__name__) diff --git a/tensorflow/contrib/cudnn_rnn/__init__.py b/tensorflow/contrib/cudnn_rnn/__init__.py index b7ac5e7146f..7a8224fa5eb 100644 --- a/tensorflow/contrib/cudnn_rnn/__init__.py +++ b/tensorflow/contrib/cudnn_rnn/__init__.py @@ -23,3 +23,6 @@ from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnLSTM from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNRelu from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import CudnnRNNTanh from tensorflow.contrib.cudnn_rnn.python.ops.cudnn_rnn_ops import RNNParamsSaveable + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/deprecated/__init__.py b/tensorflow/contrib/deprecated/__init__.py index 2c94882cd75..befb8e6198b 100644 --- a/tensorflow/contrib/deprecated/__init__.py +++ b/tensorflow/contrib/deprecated/__init__.py @@ -95,3 +95,10 @@ from tensorflow.python.ops.logging_ops import merge_all_summaries from tensorflow.python.ops.logging_ops import merge_summary from tensorflow.python.ops.logging_ops import scalar_summary # pylint: enable=unused-import,line-too-long + +from tensorflow.python.util.all_util import remove_undocumented +_allowed_symbols = ['audio_summary', 'histogram_summary', + 'image_summary', 'merge_all_summaries', + 'merge_summary', 'scalar_summary'] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/distributions/__init__.py b/tensorflow/contrib/distributions/__init__.py index 01896c52440..f822f723eb0 100644 --- a/tensorflow/contrib/distributions/__init__.py +++ b/tensorflow/contrib/distributions/__init__.py @@ -134,3 +134,12 @@ from tensorflow.contrib.distributions.python.ops.uniform import * from tensorflow.contrib.distributions.python.ops.wishart import * # pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member + +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = ['bijector', + 'ConditionalDistribution', + 'ConditionalTransformedDistribution', + 'FULLY_REPARAMETERIZED', 'NOT_REPARAMETERIZED'] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/graph_editor/__init__.py b/tensorflow/contrib/graph_editor/__init__.py index 04a4cbb8198..47905cc9927 100644 --- a/tensorflow/contrib/graph_editor/__init__.py +++ b/tensorflow/contrib/graph_editor/__init__.py @@ -132,3 +132,7 @@ from tensorflow.contrib.graph_editor import util as _util ph = _util.make_placeholder_from_dtype_and_shape sgv = _subgraph.make_view sgv_scope = _subgraph.make_view_from_scope + +del absolute_import +del division +del print_function diff --git a/tensorflow/contrib/graph_editor/select.py b/tensorflow/contrib/graph_editor/select.py index 2401a3a1578..706c4091189 100644 --- a/tensorflow/contrib/graph_editor/select.py +++ b/tensorflow/contrib/graph_editor/select.py @@ -12,8 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Various ways of selecting operations and tensors in a graph. -""" +"""Various ways of selecting operations and tensors in a graph.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/contrib/image/__init__.py b/tensorflow/contrib/image/__init__.py index 03ca71e3f56..9d3ea508683 100644 --- a/tensorflow/contrib/image/__init__.py +++ b/tensorflow/contrib/image/__init__.py @@ -21,8 +21,8 @@ transforms (including rotation) are supported. ## Image `Ops` -@@ rotate -@@ transform +@@rotate +@@transform """ from __future__ import absolute_import from __future__ import division @@ -31,3 +31,8 @@ from __future__ import print_function # pylint: disable=line-too-long from tensorflow.contrib.image.python.ops.image_ops import rotate from tensorflow.contrib.image.python.ops.image_ops import transform + +from tensorflow.python.util.all_util import remove_undocumented + + +remove_undocumented(__name__) diff --git a/tensorflow/contrib/input_pipeline/BUILD b/tensorflow/contrib/input_pipeline/BUILD index 8eb8201f08f..a6bca863899 100644 --- a/tensorflow/contrib/input_pipeline/BUILD +++ b/tensorflow/contrib/input_pipeline/BUILD @@ -52,7 +52,7 @@ tf_kernel_library( py_library( name = "input_pipeline_py", - srcs = glob(["python/ops/*.py"]), + srcs = glob(["python/ops/*.py"]) + ["__init__.py"], data = [":python/ops/_input_pipeline_ops.so"], srcs_version = "PY2AND3", deps = [ diff --git a/tensorflow/contrib/input_pipeline/__init__.py b/tensorflow/contrib/input_pipeline/__init__.py index d1219883c95..02fd4135bfa 100644 --- a/tensorflow/contrib/input_pipeline/__init__.py +++ b/tensorflow/contrib/input_pipeline/__init__.py @@ -15,11 +15,12 @@ """Ops and modules related to input_pipeline. @@obtain_next - """ - from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.input_pipeline.python.ops.input_pipeline_ops import obtain_next + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/integrate/__init__.py b/tensorflow/contrib/integrate/__init__.py index 953dc6c55ae..c951efd3d9f 100644 --- a/tensorflow/contrib/integrate/__init__.py +++ b/tensorflow/contrib/integrate/__init__.py @@ -59,6 +59,7 @@ from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.contrib.integrate.python.ops.odes import * -from tensorflow.python.util.all_util import make_all +from tensorflow.python.util.all_util import remove_undocumented -__all__ = make_all(__name__) + +remove_undocumented(__name__) diff --git a/tensorflow/contrib/layers/__init__.py b/tensorflow/contrib/layers/__init__.py index b7832be73fc..c563b29de90 100644 --- a/tensorflow/contrib/layers/__init__.py +++ b/tensorflow/contrib/layers/__init__.py @@ -23,17 +23,27 @@ common machine learning algorithms. @@avg_pool2d @@batch_norm @@convolution2d +@@conv2d_in_plane @@convolution2d_in_plane +@@conv2d_transpose @@convolution2d_transpose +@@dropout @@flatten @@fully_connected @@layer_norm +@@linear @@max_pool2d @@one_hot_encoding +@@relu +@@relu6 @@repeat @@safe_embedding_lookup_sparse +@@separable_conv2d @@separable_convolution2d +@@softmax +@@stack @@unit_norm +@@embed_sequence Aliases for fully_connected which set a default activation function are available: `relu`, `relu6` and `linear`. @@ -95,6 +105,7 @@ Feature columns provide a mechanism to map data to a model. @@input_from_feature_columns @@joint_weighted_sum_from_feature_columns @@make_place_holder_tensors_for_base_features +@@multi_class_target @@one_hot_column @@parse_feature_columns_from_examples @@parse_feature_columns_from_sequence_examples @@ -105,6 +116,8 @@ Feature columns provide a mechanism to map data to a model. @@sparse_column_with_keys @@weighted_sparse_column @@weighted_sum_from_feature_columns +@@infer_real_valued_columns +@@sequence_input_from_feature_columns """ @@ -112,16 +125,21 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import sys - # pylint: disable=unused-import,wildcard-import from tensorflow.contrib.layers.python.layers import * -from tensorflow.contrib.layers.python.ops import sparse_ops -from tensorflow.python.util.all_util import make_all # pylint: enable=unused-import,wildcard-import +from tensorflow.python.util.all_util import remove_undocumented -# Note: `stack` operation is available, just excluded from the document above -# due to collision with tf.stack. +_allowed_symbols = ['bias_add', + 'conv2d', + 'feature_column', + 'legacy_fully_connected', + 'legacy_linear', + 'legacy_relu', + 'OPTIMIZER_CLS_NAMES', + 'regression_target', + 'SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY', + 'summaries'] -__all__ = make_all(__name__) +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/layers/python/layers/feature_column.py b/tensorflow/contrib/layers/python/layers/feature_column.py index ba6e70bfa2c..0db53f9af98 100644 --- a/tensorflow/contrib/layers/python/layers/feature_column.py +++ b/tensorflow/contrib/layers/python/layers/feature_column.py @@ -122,11 +122,11 @@ import math import six +from tensorflow.contrib import lookup from tensorflow.contrib.layers.python.layers import layers from tensorflow.contrib.layers.python.ops import bucketization_op from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op from tensorflow.contrib.layers.python.ops import sparse_ops as contrib_sparse_ops -from tensorflow.contrib.lookup import lookup_ops as contrib_lookup_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor as sparse_tensor_py from tensorflow.python.ops import array_ops @@ -587,7 +587,7 @@ class _SparseColumnKeys(_SparseColumn): """Handles sparse column to id conversion.""" input_tensor = self._get_input_sparse_tensor(columns_to_tensors) - table = contrib_lookup_ops.string_to_index_table_from_tensor( + table = lookup.string_to_index_table_from_tensor( mapping=list(self.lookup_config.keys), default_value=self.lookup_config.default_value, name="lookup") @@ -662,7 +662,7 @@ class _SparseColumnVocabulary(_SparseColumn): else: sparse_string_tensor = st - table = contrib_lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=self.lookup_config.vocabulary_file, num_oov_buckets=self.lookup_config.num_oov_buckets, vocab_size=self.lookup_config.vocab_size, diff --git a/tensorflow/contrib/layers/python/layers/feature_column_test.py b/tensorflow/contrib/layers/python/layers/feature_column_test.py index d166069bd6e..a3b2c98c807 100644 --- a/tensorflow/contrib/layers/python/layers/feature_column_test.py +++ b/tensorflow/contrib/layers/python/layers/feature_column_test.py @@ -23,15 +23,18 @@ import os import sys import tempfile -# TODO: #6568 Remove this hack that makes dlopen() not crash. +# pylint: disable=g-bad-todo +# TODO(#6568): Remove this hack that makes dlopen() not crash. +# pylint: enable=g-bad-todo +# pylint: disable=g-import-not-at-top if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"): import ctypes sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) import numpy as np +from tensorflow.contrib.layers.python.layers import feature_column as fc from tensorflow.contrib.layers.python.layers import feature_column_ops -import tensorflow.contrib.layers.python.layers.feature_column as fc from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib diff --git a/tensorflow/contrib/learn/__init__.py b/tensorflow/contrib/learn/__init__.py index 811b89e8845..2cc38fbbec7 100644 --- a/tensorflow/contrib/learn/__init__.py +++ b/tensorflow/contrib/learn/__init__.py @@ -24,13 +24,24 @@ Train and evaluate TensorFlow models. @@Estimator @@Trainable @@Evaluable +@@KMeansClustering @@ModeKeys +@@ModelFnOps +@@MetricSpec +@@PredictionKey @@DNNClassifier @@DNNRegressor +@@DNNLinearCombinedRegressor +@@DNNLinearCombinedClassifier @@LinearClassifier @@LinearRegressor @@LogisticRegressor +## Distributed training utilities +@@Experiment +@@ExportStrategy +@@TaskType + ## Graph actions Perform various training, evaluation, and inference actions on a graph. @@ -58,6 +69,10 @@ Queue and read batched input data. @@read_batch_features @@read_batch_record_features +Export utilities + +@@build_parsing_serving_input_fn +@@ProblemType """ from __future__ import absolute_import @@ -67,7 +82,11 @@ from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.contrib.learn.python.learn import * # pylint: enable=wildcard-import -from tensorflow.python.util.all_util import make_all -__all__ = make_all(__name__) -__all__.append('datasets') +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = ['datasets', 'head', 'io', 'models', + 'monitors', 'NotFittedError', 'ops', 'preprocessing', + 'utils', 'graph_actions'] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/learn/python/learn/__init__.py b/tensorflow/contrib/learn/python/learn/__init__.py index d7b9aaffd4c..3c6d05dc0dc 100644 --- a/tensorflow/contrib/learn/python/learn/__init__.py +++ b/tensorflow/contrib/learn/python/learn/__init__.py @@ -19,8 +19,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import numpy as np - # pylint: disable=wildcard-import from tensorflow.contrib.learn.python.learn import basic_session_run_hooks from tensorflow.contrib.learn.python.learn import datasets diff --git a/tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py b/tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py index d239201efe2..2284ec46e97 100644 --- a/tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py +++ b/tensorflow/contrib/learn/python/learn/basic_session_run_hooks.py @@ -12,10 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Some common SessionRunHook classes. - -@@ -""" +"""Some common SessionRunHook classes.""" from __future__ import absolute_import from __future__ import division diff --git a/tensorflow/contrib/learn/python/learn/estimators/__init__.py b/tensorflow/contrib/learn/python/learn/estimators/__init__.py index 04be0ac7fa0..2c1c0e6dd5b 100644 --- a/tensorflow/contrib/learn/python/learn/estimators/__init__.py +++ b/tensorflow/contrib/learn/python/learn/estimators/__init__.py @@ -306,6 +306,7 @@ from __future__ import division from __future__ import print_function from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError +from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py index 349072976c3..c4e49fa4078 100644 --- a/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py +++ b/tensorflow/contrib/learn/python/learn/estimators/estimator_test.py @@ -25,7 +25,10 @@ import os import sys import tempfile -# TODO: #6568 Remove this hack that makes dlopen() not crash. +# pylint: disable=g-bad-todo +# TODO(#6568): Remove this hack that makes dlopen() not crash. +# pylint: enable=g-bad-todo +# pylint: disable=g-import-not-at-top if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'): import ctypes sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) @@ -35,6 +38,7 @@ import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib import learn +from tensorflow.contrib import lookup from tensorflow.contrib.framework.python.ops import variables from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib from tensorflow.contrib.layers.python.layers import optimizers @@ -48,7 +52,6 @@ from tensorflow.contrib.learn.python.learn.estimators import linear from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.utils import input_fn_utils -from tensorflow.contrib.lookup import lookup_ops from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.contrib.testing.python.framework import util_test from tensorflow.python.client import session as session_lib @@ -221,8 +224,8 @@ def _build_estimator_for_export_tests(tmpdir): vocab_file = gfile.GFile(vocab_file_name, mode='w') vocab_file.write(VOCAB_FILE_CONTENT) vocab_file.close() - hashtable = lookup_ops.HashTable( - lookup_ops.TextFileStringTableInitializer(vocab_file_name), 'x') + hashtable = lookup.HashTable( + lookup.TextFileStringTableInitializer(vocab_file_name), 'x') features['bogus_lookup'] = hashtable.lookup( math_ops.to_int64(features['feature'])) @@ -878,8 +881,8 @@ class ReplicaDeviceSetterTest(test.TestCase): with ops.device(estimator._get_replica_device_setter(config)): default_val = constant_op.constant([-1, -1], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) input_string = constant_op.constant(['brain', 'salad', 'tank']) output = table.lookup(input_string) self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device) @@ -889,8 +892,8 @@ class ReplicaDeviceSetterTest(test.TestCase): with ops.device( estimator._get_replica_device_setter(run_config.RunConfig())): default_val = constant_op.constant([-1, -1], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) input_string = constant_op.constant(['brain', 'salad', 'tank']) output = table.lookup(input_string) self.assertDeviceEqual('', table._table_ref.device) diff --git a/tensorflow/contrib/learn/python/learn/learn_io/numpy_io.py b/tensorflow/contrib/learn/python/learn/learn_io/numpy_io.py index 0b27e2ae0f9..69610018390 100644 --- a/tensorflow/contrib/learn/python/learn/learn_io/numpy_io.py +++ b/tensorflow/contrib/learn/python/learn/learn_io/numpy_io.py @@ -97,7 +97,7 @@ def numpy_input_fn(x, shape_dict_of_x = {k: x[k].shape for k in x.keys()} shape_of_y = None if y is None else y.shape raise ValueError('Length of tensors in x and y is mismatched. All ' - 'elementson x and y must have the same length.\n' + 'elements in x and y must have the same length.\n' 'Shapes in x: {}\n' 'Shape for y: {}\n'.format(shape_dict_of_x, shape_of_y)) diff --git a/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py b/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py index a58a185f5cf..2b6a300193b 100644 --- a/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py +++ b/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_functions_test.py @@ -27,7 +27,7 @@ if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"): import numpy as np -import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff +from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff from tensorflow.python.platform import test # pylint: disable=g-import-not-at-top diff --git a/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py b/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py index a4c19147b6d..125a3e13d52 100644 --- a/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py +++ b/tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py @@ -27,7 +27,7 @@ if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"): import numpy as np -import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff +from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff from tensorflow.python.client import session from tensorflow.python.framework import ops from tensorflow.python.platform import test diff --git a/tensorflow/contrib/learn/python/learn/tests/dataframe/reader_source_test.py b/tensorflow/contrib/learn/python/learn/tests/dataframe/reader_source_test.py index 94eae51a99e..74f6bfd5c69 100644 --- a/tensorflow/contrib/learn/python/learn/tests/dataframe/reader_source_test.py +++ b/tensorflow/contrib/learn/python/learn/tests/dataframe/reader_source_test.py @@ -24,7 +24,8 @@ if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"): import ctypes sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL) -import tensorflow.contrib.learn.python.learn.dataframe.transforms.reader_source as rs +# pylint: disable=g-import-not-at-top +from tensorflow.contrib.learn.python.learn.dataframe.transforms import reader_source as rs from tensorflow.python.ops import io_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test diff --git a/tensorflow/contrib/linalg/__init__.py b/tensorflow/contrib/linalg/__init__.py index f4e1c6d7197..3fe0c5f761b 100644 --- a/tensorflow/contrib/linalg/__init__.py +++ b/tensorflow/contrib/linalg/__init__.py @@ -54,3 +54,6 @@ from tensorflow.contrib.linalg.python.ops.linear_operator_matrix import * from tensorflow.contrib.linalg.python.ops.linear_operator_tril import * # pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py b/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py index 5de9bb5d775..18dec3dd9ef 100644 --- a/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py +++ b/tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py @@ -22,7 +22,7 @@ import abc import numpy as np import six -from tensorflow.contrib.framework import tensor_util as contrib_tensor_util +from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed diff --git a/tensorflow/contrib/linear_optimizer/__init__.py b/tensorflow/contrib/linear_optimizer/__init__.py index 83bd8b5fcf0..d447487b4a5 100644 --- a/tensorflow/contrib/linear_optimizer/__init__.py +++ b/tensorflow/contrib/linear_optimizer/__init__.py @@ -17,6 +17,8 @@ ## This package provides optimizers to train linear models. @@SdcaModel +@@SparseFeatureColumn +@@SDCAOptimizer """ from __future__ import absolute_import from __future__ import division @@ -25,3 +27,6 @@ from __future__ import print_function from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn from tensorflow.contrib.linear_optimizer.python.sdca_optimizer import SDCAOptimizer + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable.py b/tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable.py index 494dfb6c990..7e214905b13 100644 --- a/tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable.py +++ b/tensorflow/contrib/linear_optimizer/python/ops/sharded_mutable_dense_hashtable.py @@ -20,7 +20,7 @@ from __future__ import print_function from six.moves import range -from tensorflow.contrib.lookup import lookup_ops +from tensorflow.contrib import lookup from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape @@ -30,7 +30,7 @@ from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops -class ShardedMutableDenseHashTable(lookup_ops.LookupInterface): +class ShardedMutableDenseHashTable(lookup.LookupInterface): """A sharded version of MutableDenseHashTable. It is designed to be interface compatible with LookupInterface and @@ -41,7 +41,7 @@ class ShardedMutableDenseHashTable(lookup_ops.LookupInterface): internally. The shard is computed via the modulo operation on the key. """ - # TODO(andreasst): consider moving this to lookup_ops + # TODO(andreasst): consider moving this to lookup module def __init__(self, key_dtype, @@ -56,7 +56,7 @@ class ShardedMutableDenseHashTable(lookup_ops.LookupInterface): table_shards = [] for i in range(num_shards): table_shards.append( - lookup_ops.MutableDenseHashTable( + lookup.MutableDenseHashTable( key_dtype=key_dtype, value_dtype=value_dtype, default_value=default_value, diff --git a/tensorflow/contrib/lookup/__init__.py b/tensorflow/contrib/lookup/__init__.py index fadf42000fe..e743832e807 100644 --- a/tensorflow/contrib/lookup/__init__.py +++ b/tensorflow/contrib/lookup/__init__.py @@ -25,6 +25,7 @@ @@IdTableWithHashBuckets @@HashTable @@MutableHashTable +@@MutableDenseHashTable @@TableInitializerBase @@KeyValueTensorInitializer @@TextFileIndex @@ -32,6 +33,9 @@ @@TextFileIdTableInitializer @@TextFileStringTableInitializer +@@HasherSpec +@@StrongHashSpec +@@FastHashSpec """ from __future__ import absolute_import @@ -41,3 +45,6 @@ from __future__ import print_function # pylint: disable=unused-import,wildcard-import from tensorflow.contrib.lookup.lookup_ops import * # pylint: enable=unused-import,wildcard-import + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/lookup/lookup_ops_test.py b/tensorflow/contrib/lookup/lookup_ops_test.py index 15c318b6ef7..b46db38770b 100644 --- a/tensorflow/contrib/lookup/lookup_ops_test.py +++ b/tensorflow/contrib/lookup/lookup_ops_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Tests for tf.contrib.lookup.lookup_ops.""" +"""Tests for tf.contrib.lookup.lookup.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -22,7 +22,7 @@ import tempfile import numpy as np import six -from tensorflow.contrib.lookup import lookup_ops +from tensorflow.contrib import lookup from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes @@ -45,8 +45,8 @@ class HashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() self.assertAllEqual(3, table.size().eval()) @@ -63,8 +63,8 @@ class HashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() self.assertAllEqual(3, table.size().eval()) @@ -81,8 +81,8 @@ class HashTableOpTest(test.TestCase): default_val = -1 keys = ["brain", "salad", "surgery"] values = [0, 1, 2] - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer( + table = lookup.HashTable( + lookup.KeyValueTensorInitializer( keys, values, value_dtype=dtypes.int64), default_val) table.init.run() @@ -100,8 +100,8 @@ class HashTableOpTest(test.TestCase): default_val = -1 keys = np.array(["brain", "salad", "surgery"], dtype=np.str) values = np.array([0, 1, 2], dtype=np.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() self.assertAllEqual(3, table.size().eval()) @@ -118,12 +118,12 @@ class HashTableOpTest(test.TestCase): keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table1 = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) - table2 = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) - table3 = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table1 = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) + table2 = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) + table3 = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) data_flow_ops.tables_initializer().run() self.assertAllEqual(3, table1.size().eval()) @@ -145,8 +145,8 @@ class HashTableOpTest(test.TestCase): default_val = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() input_string = constant_op.constant(["brain", "salad", "tank"]) @@ -160,8 +160,8 @@ class HashTableOpTest(test.TestCase): default_val = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() sp_indices = [[0, 0], [0, 1], [1, 0]] @@ -183,8 +183,8 @@ class HashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() input_string = constant_op.constant([1, 2, 3], dtypes.int64) @@ -192,22 +192,22 @@ class HashTableOpTest(test.TestCase): table.lookup(input_string) with self.assertRaises(TypeError): - lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), "UNK") + lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), "UNK") def testDTypes(self): with self.test_session(): default_val = -1 with self.assertRaises(TypeError): - lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(["a"], [1], [dtypes.string], - dtypes.int64), default_val) + lookup.HashTable( + lookup.KeyValueTensorInitializer(["a"], [1], [dtypes.string], + dtypes.int64), default_val) def testNotInitialized(self): with self.test_session(): default_val = -1 - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer( + table = lookup.HashTable( + lookup.KeyValueTensorInitializer( ["a"], [1], value_dtype=dtypes.int64), default_val) @@ -222,8 +222,8 @@ class HashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() with self.assertRaisesOpError("Table already initialized"): @@ -236,8 +236,8 @@ class HashTableOpTest(test.TestCase): values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64) with self.assertRaises(ValueError): - lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), default_val) + lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val) def testMultipleSessions(self): # Start a server @@ -252,8 +252,8 @@ class HashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.HashTable( - lookup_ops.KeyValueTensorInitializer(keys, values), + table = lookup.HashTable( + lookup.KeyValueTensorInitializer(keys, values), default_val, name="t1") @@ -276,8 +276,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() @@ -311,7 +311,7 @@ class MutableHashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["b", "c", "d"], dtypes.string) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableHashTable( + table = lookup.MutableHashTable( dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True) save = saver.Saver() @@ -333,7 +333,7 @@ class MutableHashTableOpTest(test.TestCase): v0 = variables.Variable(-1.0, name="v0") v1 = variables.Variable(-1.0, name="v1") default_val = -1 - table = lookup_ops.MutableHashTable( + table = lookup.MutableHashTable( dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True) table.insert( constant_op.constant(["a", "c"], dtypes.string), @@ -365,7 +365,7 @@ class MutableHashTableOpTest(test.TestCase): session1 = session.Session(server.target) session2 = session.Session(server.target) - table = lookup_ops.MutableHashTable( + table = lookup.MutableHashTable( dtypes.int64, dtypes.string, "-", name="t1") # Populate the table in the first session @@ -392,8 +392,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = constant_op.constant([-1, -1], dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() @@ -420,8 +420,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = constant_op.constant([-1, -1], dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64) - table1 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) self.assertAllEqual(0, table1.size().eval()) table1.insert(keys, values).run() self.assertAllEqual(3, table1.size().eval()) @@ -436,8 +436,8 @@ class MutableHashTableOpTest(test.TestCase): self.assertAllEqual(6, exported_values.eval().size) # Populate a second table from the exported data - table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) self.assertAllEqual(0, table2.size().eval()) table2.insert(exported_keys, exported_values).run() self.assertAllEqual(3, table2.size().eval()) @@ -450,8 +450,8 @@ class MutableHashTableOpTest(test.TestCase): with self.test_session(): default_val = constant_op.constant([-1, -1], dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) # Shape [6] instead of [3, 2] values = constant_op.constant([0, 1, 2, 3, 4, 5], dtypes.int64) @@ -481,8 +481,8 @@ class MutableHashTableOpTest(test.TestCase): def testMutableHashTableInvalidDefaultValue(self): with self.test_session(): default_val = constant_op.constant([[-1, -1]], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) with self.assertRaisesOpError("Default value must be a vector"): self.assertAllEqual(0, table.size().eval()) @@ -491,8 +491,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery", "brain"]) values = constant_op.constant([0, 1, 2, 3], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() @@ -509,8 +509,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) @@ -528,8 +528,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]]) values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) table.insert(keys, values).run() self.assertAllEqual(4, table.size().eval()) @@ -546,8 +546,8 @@ class MutableHashTableOpTest(test.TestCase): keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) @@ -567,12 +567,12 @@ class MutableHashTableOpTest(test.TestCase): keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table1 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) - table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) - table3 = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) + table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) + table3 = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) table1.insert(keys, values).run() table2.insert(keys, values).run() table3.insert(keys, values).run() @@ -596,8 +596,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) @@ -613,8 +613,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.int64, + default_val) # insert with keys of the wrong type with self.assertRaises(TypeError): @@ -636,15 +636,15 @@ class MutableHashTableOpTest(test.TestCase): # default value of the wrong type with self.assertRaises(TypeError): - lookup_ops.MutableHashTable(dtypes.string, dtypes.int64, "UNK") + lookup.MutableHashTable(dtypes.string, dtypes.int64, "UNK") def testMutableHashTableStringFloat(self): with self.test_session(): default_val = -1.5 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1.1, 2.2], dtypes.float32) - table = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, - default_val) + table = lookup.MutableHashTable(dtypes.string, dtypes.float32, + default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() @@ -661,8 +661,8 @@ class MutableHashTableOpTest(test.TestCase): default_val = "n/a" keys = constant_op.constant([0, 1, 2], dtypes.int64) values = constant_op.constant(["brain", "salad", "surgery"]) - table = lookup_ops.MutableHashTable(dtypes.int64, dtypes.string, - default_val) + table = lookup.MutableHashTable(dtypes.int64, dtypes.string, + default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() @@ -681,7 +681,7 @@ class MutableDenseHashTableOpTest(test.TestCase): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0) self.assertAllEqual(0, table.size().eval()) @@ -699,7 +699,7 @@ class MutableDenseHashTableOpTest(test.TestCase): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0) table.insert(keys, values).run() @@ -716,7 +716,7 @@ class MutableDenseHashTableOpTest(test.TestCase): keys = constant_op.constant(["a", "b", "c"], dtypes.string) values = constant_op.constant([0.0, 1.1, 2.2], dtypes.float32) default_value = constant_op.constant(-1.5, dtypes.float32) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.string, dtypes.float32, default_value=default_value, @@ -739,7 +739,7 @@ class MutableDenseHashTableOpTest(test.TestCase): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0.0, 1.1, 2.2], float_dtype) default_value = constant_op.constant(-1.5, float_dtype) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, float_dtype, default_value=default_value, empty_key=0) self.assertAllEqual(0, table.size().eval()) @@ -759,7 +759,7 @@ class MutableDenseHashTableOpTest(test.TestCase): values = constant_op.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]], dtypes.int64) default_value = constant_op.constant([-1, -2, -3, -4], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -791,7 +791,7 @@ class MutableDenseHashTableOpTest(test.TestCase): values = constant_op.constant([10, 11, 12], dtypes.int64) empty_key = constant_op.constant([0, 3], dtypes.int64) default_value = constant_op.constant(-1, dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -820,7 +820,7 @@ class MutableDenseHashTableOpTest(test.TestCase): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, @@ -848,7 +848,7 @@ class MutableDenseHashTableOpTest(test.TestCase): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([1, 2, 3], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, @@ -885,7 +885,7 @@ class MutableDenseHashTableOpTest(test.TestCase): empty_key = 0 keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -906,7 +906,7 @@ class MutableDenseHashTableOpTest(test.TestCase): self.assertEqual(save_path, val) with self.test_session(graph=ops.Graph()) as sess: - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -941,7 +941,7 @@ class MutableDenseHashTableOpTest(test.TestCase): default_value = constant_op.constant([-1, -2], dtypes.int64) keys = constant_op.constant([[11, 12], [11, 14], [13, 14]], dtypes.int64) values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -964,7 +964,7 @@ class MutableDenseHashTableOpTest(test.TestCase): with self.test_session(graph=ops.Graph()) as sess: empty_key = constant_op.constant([11, 13], dtypes.int64) default_value = constant_op.constant([-1, -2], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -1001,7 +1001,7 @@ class MutableDenseHashTableOpTest(test.TestCase): default_value = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant([[11, 12], [11, 14], [13, 14]], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -1024,7 +1024,7 @@ class MutableDenseHashTableOpTest(test.TestCase): with self.test_session(graph=ops.Graph()) as sess: empty_key = constant_op.constant([11, 13], dtypes.int64) default_value = constant_op.constant(-1, dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, @@ -1057,7 +1057,7 @@ class MutableDenseHashTableOpTest(test.TestCase): # The values are chosen to make sure collisions occur when using GCC STL keys = constant_op.constant([11, 12, 13, 19, 20, 21], dtypes.int64) values = constant_op.constant([51, 52, 53, 54, 55, 56], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, @@ -1080,7 +1080,7 @@ class MutableDenseHashTableOpTest(test.TestCase): with self.test_session(): keys = constant_op.constant([11, 0, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=12) self.assertAllEqual(0, table.size().eval()) @@ -1096,7 +1096,7 @@ class MutableDenseHashTableOpTest(test.TestCase): def testErrors(self): with self.test_session(): - table = lookup_ops.MutableDenseHashTable( + table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0) # Inserting the empty key returns an error @@ -1121,7 +1121,7 @@ class MutableDenseHashTableOpTest(test.TestCase): "Expected key shape"): table.insert(keys, values).run() - table2 = lookup_ops.MutableDenseHashTable( + table2 = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, @@ -1143,7 +1143,7 @@ class StringToIndexTableFromFile(test.TestCase): def test_string_to_index_table_from_file(self): vocabulary_file = self._createVocabFile("f2i_vocab1.txt") with self.test_session(): - table = lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=1) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1155,7 +1155,7 @@ class StringToIndexTableFromFile(test.TestCase): default_value = -42 vocabulary_file = self._createVocabFile("f2i_vocab2.txt") with self.test_session(): - table = lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, default_value=default_value) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1166,7 +1166,7 @@ class StringToIndexTableFromFile(test.TestCase): def test_string_to_index_table_from_file_with_oov_buckets(self): vocabulary_file = self._createVocabFile("f2i_vocab3.txt") with self.test_session(): - table = lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=1000) ids = table.lookup( constant_op.constant(["salad", "surgery", "tarkus", "toccata"])) @@ -1184,13 +1184,13 @@ class StringToIndexTableFromFile(test.TestCase): def test_string_to_index_table_from_file_with_only_oov_buckets(self): self.assertRaises( ValueError, - lookup_ops.string_to_index_table_from_file, + lookup.string_to_index_table_from_file, vocabulary_file=None) def test_string_to_index_table_from_file_with_vocab_size_too_small(self): vocabulary_file = self._createVocabFile("f2i_vocab5.txt") with self.test_session(): - table = lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=2) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1202,7 +1202,7 @@ class StringToIndexTableFromFile(test.TestCase): def test_string_to_index_table_from_file_with_vocab_size_too_large(self): vocabulary_file = self._createVocabFile("f2i_vocab6.txt") with self.test_session(): - table = lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=4) self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Invalid vocab_size", table.init.run) @@ -1212,12 +1212,12 @@ class StringToIndexTableFromFile(test.TestCase): self.assertRaises( ValueError, - lookup_ops.string_to_index_table_from_file, + lookup.string_to_index_table_from_file, vocabulary_file=vocabulary_file, vocab_size=0) with self.test_session(): - table = lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1230,17 +1230,17 @@ class StringToIndexTableFromFile(test.TestCase): vocabulary_file = self._createVocabFile("invalid_hasher.txt") with self.test_session(): with self.assertRaises(TypeError): - lookup_ops.string_to_index_table_from_file( + lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3, num_oov_buckets=1, hasher_spec=1) - table = lookup_ops.string_to_index_table_from_file( + table = lookup.string_to_index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3, num_oov_buckets=1, - hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None)) + hasher_spec=lookup.HasherSpec("my-awesome-hash", None)) self.assertRaises(ValueError, table.lookup, constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1250,7 +1250,7 @@ class StringToIndexTableFromTensor(test.TestCase): def test_string_to_index_table_from_tensor_with_tensor_init(self): with self.test_session(): - table = lookup_ops.string_to_index_table_from_tensor( + table = lookup.string_to_index_table_from_tensor( mapping=["brain", "salad", "surgery"], num_oov_buckets=1) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1261,7 +1261,7 @@ class StringToIndexTableFromTensor(test.TestCase): def test_string_to_index_table_from_tensor_with_default_value(self): default_value = -42 with self.test_session(): - table = lookup_ops.string_to_index_table_from_tensor( + table = lookup.string_to_index_table_from_tensor( mapping=["brain", "salad", "surgery"], default_value=default_value) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1272,21 +1272,21 @@ class StringToIndexTableFromTensor(test.TestCase): def test_string_to_index_table_from_tensor_with_only_oov_buckets(self): with self.test_session(): with self.assertRaises(ValueError): - lookup_ops.string_to_index_table_from_tensor( + lookup.string_to_index_table_from_tensor( mapping=None, num_oov_buckets=1) def test_string_to_index_table_from_tensor_with_invalid_hashers(self): with self.test_session(): with self.assertRaises(TypeError): - lookup_ops.string_to_index_table_from_tensor( + lookup.string_to_index_table_from_tensor( mapping=["brain", "salad", "surgery"], num_oov_buckets=1, hasher_spec=1) - table = lookup_ops.string_to_index_table_from_tensor( + table = lookup.string_to_index_table_from_tensor( mapping=["brain", "salad", "surgery"], num_oov_buckets=1, - hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None)) + hasher_spec=lookup.HasherSpec("my-awesome-hash", None)) self.assertRaises(ValueError, table.lookup, constant_op.constant(["salad", "surgery", "tarkus"])) @@ -1298,7 +1298,7 @@ class StringToIndexTest(test.TestCase): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) feats = constant_op.constant(["salad", "surgery", "tarkus"]) - indices = lookup_ops.string_to_index(feats, mapping=mapping_strings) + indices = lookup.string_to_index(feats, mapping=mapping_strings) self.assertRaises(errors_impl.OpError, indices.eval) data_flow_ops.tables_initializer().run() @@ -1309,7 +1309,7 @@ class StringToIndexTest(test.TestCase): with self.test_session(): mapping_strings = constant_op.constant(["hello", "hello"]) feats = constant_op.constant(["hello", "hola"]) - indices = lookup_ops.string_to_index(feats, mapping=mapping_strings) + _ = lookup.string_to_index(feats, mapping=mapping_strings) self.assertRaises(errors_impl.OpError, data_flow_ops.tables_initializer().run) @@ -1319,7 +1319,7 @@ class StringToIndexTest(test.TestCase): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) feats = constant_op.constant(["salad", "surgery", "tarkus"]) - indices = lookup_ops.string_to_index( + indices = lookup.string_to_index( feats, mapping=mapping_strings, default_value=default_value) self.assertRaises(errors_impl.OpError, indices.eval) @@ -1338,7 +1338,7 @@ class IndexToStringTableFromFileTest(test.TestCase): def test_index_to_string_table(self): vocabulary_file = self._createVocabFile("i2f_vocab1.txt") with self.test_session(): - table = lookup_ops.index_to_string_table_from_file( + table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file) features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64)) self.assertRaises(errors_impl.OpError, features.eval) @@ -1350,7 +1350,7 @@ class IndexToStringTableFromFileTest(test.TestCase): default_value = b"NONE" vocabulary_file = self._createVocabFile("f2i_vocab2.txt") with self.test_session(): - table = lookup_ops.index_to_string_table_from_file( + table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, default_value=default_value) features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64)) self.assertRaises(errors_impl.OpError, features.eval) @@ -1362,7 +1362,7 @@ class IndexToStringTableFromFileTest(test.TestCase): default_value = b"NONE" vocabulary_file = self._createVocabFile("f2i_vocab2.txt") with self.test_session(): - table = lookup_ops.index_to_string_table_from_file( + table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, vocab_size=2, default_value=default_value) @@ -1375,7 +1375,7 @@ class IndexToStringTableFromFileTest(test.TestCase): def test_index_to_string_table_with_vocab_size_too_large(self): vocabulary_file = self._createVocabFile("f2i_vocab6.txt") with self.test_session(): - table = lookup_ops.index_to_string_table_from_file( + table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, vocab_size=4) features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64)) @@ -1387,7 +1387,7 @@ class IndexToStringTableFromFileTest(test.TestCase): def test_index_to_string_table_with_vocab_size(self): vocabulary_file = self._createVocabFile("f2i_vocab7.txt") with self.test_session(): - table = lookup_ops.index_to_string_table_from_file( + table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3) features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64)) @@ -1401,7 +1401,7 @@ class IndexToStringTableFromTensorTest(test.TestCase): def test_index_to_string_table_from_tensor(self): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) - table = lookup_ops.index_to_string_table_from_tensor( + table = lookup.index_to_string_table_from_tensor( mapping=mapping_strings) indices = constant_op.constant([0, 1, 2, 3], dtypes.int64) @@ -1415,7 +1415,7 @@ class IndexToStringTableFromTensorTest(test.TestCase): def test_duplicate_entries(self): with self.test_session(): mapping_strings = constant_op.constant(["hello", "hello"]) - table = lookup_ops.index_to_string_table_from_tensor( + table = lookup.index_to_string_table_from_tensor( mapping=mapping_strings) indices = constant_op.constant([0, 1, 4], dtypes.int64) features = table.lookup(indices) @@ -1426,7 +1426,7 @@ class IndexToStringTableFromTensorTest(test.TestCase): default_value = b"NONE" with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) - table = lookup_ops.index_to_string_table_from_tensor( + table = lookup.index_to_string_table_from_tensor( mapping=mapping_strings, default_value=default_value) indices = constant_op.constant([1, 2, 4], dtypes.int64) features = table.lookup(indices) @@ -1443,7 +1443,7 @@ class IndexToStringTest(test.TestCase): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) indices = constant_op.constant([0, 1, 2, 3], dtypes.int64) - feats = lookup_ops.index_to_string(indices, mapping=mapping_strings) + feats = lookup.index_to_string(indices, mapping=mapping_strings) self.assertRaises(errors_impl.OpError, feats.eval) data_flow_ops.tables_initializer().run() @@ -1455,7 +1455,7 @@ class IndexToStringTest(test.TestCase): with self.test_session(): mapping_strings = constant_op.constant(["hello", "hello"]) indices = constant_op.constant([0, 1, 4], dtypes.int64) - feats = lookup_ops.index_to_string(indices, mapping=mapping_strings) + feats = lookup.index_to_string(indices, mapping=mapping_strings) data_flow_ops.tables_initializer().run() self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval()) @@ -1467,7 +1467,7 @@ class IndexToStringTest(test.TestCase): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) indices = constant_op.constant([1, 2, 4], dtypes.int64) - feats = lookup_ops.index_to_string( + feats = lookup.index_to_string( indices, mapping=mapping_strings, default_value=default_value) self.assertRaises(errors_impl.OpError, feats.eval) @@ -1488,11 +1488,11 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session(): default_value = -1 - table = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER), + table = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.string, + lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, + lookup.TextFileIndex.LINE_NUMBER), default_value) table.init.run() @@ -1507,11 +1507,11 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session(): default_value = "UNK" - key_index = lookup_ops.TextFileIndex.LINE_NUMBER - value_index = lookup_ops.TextFileIndex.WHOLE_LINE - table = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64, - key_index, dtypes.string, value_index), + key_index = lookup.TextFileIndex.LINE_NUMBER + value_index = lookup.TextFileIndex.WHOLE_LINE + table = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.int64, + key_index, dtypes.string, value_index), default_value) table.init.run() @@ -1531,9 +1531,9 @@ class InitializeTableFromFileOpTest(test.TestCase): key_index = 1 value_index = 2 - table = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string, - key_index, dtypes.int64, value_index), + table = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.string, + key_index, dtypes.int64, value_index), default_value) table.init.run() @@ -1552,9 +1552,9 @@ class InitializeTableFromFileOpTest(test.TestCase): default_value = -1 key_index = 2 value_index = 1 - table = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string, - key_index, dtypes.int64, value_index), + table = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.string, + key_index, dtypes.int64, value_index), default_value) with self.assertRaisesOpError("is not a valid"): table.init.run() @@ -1564,24 +1564,24 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session(): default_value = "UNK" - key_index = lookup_ops.TextFileIndex.WHOLE_LINE - value_index = lookup_ops.TextFileIndex.LINE_NUMBER + key_index = lookup.TextFileIndex.WHOLE_LINE + value_index = lookup.TextFileIndex.LINE_NUMBER with self.assertRaises(ValueError): - lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.int64, - key_index, dtypes.string, - value_index), default_value) + lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.int64, + key_index, dtypes.string, + value_index), default_value) def testInvalidIndex(self): vocabulary_file = self._createVocabFile("one_column_4.txt") with self.test_session(): default_value = -1 key_index = 1 # second column of the line - value_index = lookup_ops.TextFileIndex.LINE_NUMBER - table = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string, - key_index, dtypes.int64, value_index), + value_index = lookup.TextFileIndex.LINE_NUMBER + table = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.string, + key_index, dtypes.int64, value_index), default_value) with self.assertRaisesOpError("Invalid number of columns"): @@ -1593,25 +1593,25 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session() as sess: shared_name = "shared-one-columm" default_value = -1 - table1 = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER), + table1 = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.string, + lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, + lookup.TextFileIndex.LINE_NUMBER), default_value, shared_name=shared_name) - table2 = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER), + table2 = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.string, + lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, + lookup.TextFileIndex.LINE_NUMBER), default_value, shared_name=shared_name) - table3 = lookup_ops.HashTable( - lookup_ops.TextFileInitializer(vocabulary_file, dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER), + table3 = lookup.HashTable( + lookup.TextFileInitializer(vocabulary_file, dtypes.string, + lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, + lookup.TextFileIndex.LINE_NUMBER), default_value, shared_name=shared_name) @@ -1632,10 +1632,10 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session(): default_value = -1 with self.assertRaises(ValueError): - lookup_ops.HashTable( - lookup_ops.TextFileInitializer( - "", dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), + lookup.HashTable( + lookup.TextFileInitializer( + "", dtypes.string, lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) def testInitializeWithVocabSize(self): @@ -1643,13 +1643,13 @@ class InitializeTableFromFileOpTest(test.TestCase): default_value = -1 vocab_size = 3 vocabulary_file1 = self._createVocabFile("one_column6.txt") - table1 = lookup_ops.HashTable( - lookup_ops.TextFileInitializer( + table1 = lookup.HashTable( + lookup.TextFileInitializer( vocabulary_file1, dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, + lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER, + lookup.TextFileIndex.LINE_NUMBER, vocab_size=vocab_size), default_value) @@ -1659,13 +1659,13 @@ class InitializeTableFromFileOpTest(test.TestCase): vocabulary_file2 = self._createVocabFile("one_column7.txt") vocab_size = 5 - table2 = lookup_ops.HashTable( - lookup_ops.TextFileInitializer( + table2 = lookup.HashTable( + lookup.TextFileInitializer( vocabulary_file2, dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, + lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER, + lookup.TextFileIndex.LINE_NUMBER, vocab_size=vocab_size), default_value) with self.assertRaisesOpError("Invalid vocab_size"): @@ -1673,13 +1673,13 @@ class InitializeTableFromFileOpTest(test.TestCase): vocab_size = 1 vocabulary_file3 = self._createVocabFile("one_column3.txt") - table3 = lookup_ops.HashTable( - lookup_ops.TextFileInitializer( + table3 = lookup.HashTable( + lookup.TextFileInitializer( vocabulary_file3, dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, + lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER, + lookup.TextFileIndex.LINE_NUMBER, vocab_size=vocab_size), default_value) @@ -1692,11 +1692,11 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session(): default_value = -1 - table = lookup_ops.HashTable( - lookup_ops.TextFileInitializer("old_file.txt", dtypes.string, - lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, - lookup_ops.TextFileIndex.LINE_NUMBER), + table = lookup.HashTable( + lookup.TextFileInitializer("old_file.txt", dtypes.string, + lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, + lookup.TextFileIndex.LINE_NUMBER), default_value) # Initialize with non existing file (old_file.txt) should fail. @@ -1723,19 +1723,19 @@ class InitializeTableFromFileOpTest(test.TestCase): # Invalid data type other_type = constant_op.constant(1) with self.assertRaises(ValueError): - lookup_ops.HashTable( - lookup_ops.TextFileInitializer( - other_type, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), + lookup.HashTable( + lookup.TextFileInitializer( + other_type, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) # Non-scalar filename filenames = constant_op.constant([vocabulary_file, vocabulary_file]) with self.assertRaises(ValueError): - lookup_ops.HashTable( - lookup_ops.TextFileInitializer( - filenames, dtypes.string, lookup_ops.TextFileIndex.WHOLE_LINE, - dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER), + lookup.HashTable( + lookup.TextFileInitializer( + filenames, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, + dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) def testIdToStringTable(self): @@ -1743,8 +1743,8 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session(): default_value = "UNK" vocab_size = 3 - table = lookup_ops.HashTable( - lookup_ops.TextFileStringTableInitializer( + table = lookup.HashTable( + lookup.TextFileStringTableInitializer( vocab_file, vocab_size=vocab_size), default_value) @@ -1761,8 +1761,8 @@ class InitializeTableFromFileOpTest(test.TestCase): with self.test_session(): default_value = -1 vocab_size = 3 - table = lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + table = lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value) table.init.run() @@ -1788,9 +1788,9 @@ class IdTableWithHashBucketsTest(test.TestCase): default_value = -1 vocab_size = 3 oov_buckets = 1 - table = lookup_ops.IdTableWithHashBuckets( - lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + table = lookup.IdTableWithHashBuckets( + lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value), oov_buckets) @@ -1809,7 +1809,7 @@ class IdTableWithHashBucketsTest(test.TestCase): # Set a table that only uses hash buckets, for each input value returns # an id calculated by fingerprint("input") mod oov_buckets. - table = lookup_ops.IdTableWithHashBuckets(None, oov_buckets) + table = lookup.IdTableWithHashBuckets(None, oov_buckets) table.init.run() input_string = constant_op.constant(["brain", "salad", "surgery"]) @@ -1831,20 +1831,20 @@ class IdTableWithHashBucketsTest(test.TestCase): vocab_size = 3 oov_buckets = 3 - vocab_table = lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + vocab_table = lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value) - table1 = lookup_ops.IdTableWithHashBuckets( + table1 = lookup.IdTableWithHashBuckets( vocab_table, oov_buckets, - hasher_spec=lookup_ops.FastHashSpec, + hasher_spec=lookup.FastHashSpec, name="table1") - table2 = lookup_ops.IdTableWithHashBuckets( + table2 = lookup.IdTableWithHashBuckets( vocab_table, oov_buckets, - hasher_spec=lookup_ops.StrongHashSpec((1, 2)), + hasher_spec=lookup.StrongHashSpec((1, 2)), name="table2") data_flow_ops.tables_initializer().run() @@ -1872,9 +1872,9 @@ class IdTableWithHashBucketsTest(test.TestCase): default_value = -1 vocab_size = 3 oov_buckets = 1 - table1 = lookup_ops.IdTableWithHashBuckets( - lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + table1 = lookup.IdTableWithHashBuckets( + lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value, shared_name=shared_name), @@ -1897,9 +1897,9 @@ class IdTableWithHashBucketsTest(test.TestCase): # Underlying lookup table already initialized in previous session. # No need to call table2.init.run() - table2 = lookup_ops.IdTableWithHashBuckets( - lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + table2 = lookup.IdTableWithHashBuckets( + lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value, shared_name=shared_name), @@ -1918,17 +1918,17 @@ class IdTableWithHashBucketsTest(test.TestCase): default_value1 = -1 vocab_size = 3 oov_buckets = 0 - table1 = lookup_ops.IdTableWithHashBuckets( - lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + table1 = lookup.IdTableWithHashBuckets( + lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value1), oov_buckets) default_value2 = -2 - table2 = lookup_ops.IdTableWithHashBuckets( - lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + table2 = lookup.IdTableWithHashBuckets( + lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value2), oov_buckets) @@ -1959,9 +1959,9 @@ class IdTableWithHashBucketsTest(test.TestCase): dtypes.string), constant_op.constant(input_shape, dtypes.int64)) - table = lookup_ops.IdTableWithHashBuckets( - lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + table = lookup.IdTableWithHashBuckets( + lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=3), -1), 1) @@ -1984,19 +1984,19 @@ class IdTableWithHashBucketsTest(test.TestCase): default_value = -1 vocab_size = 3 oov_buckets = 1 - lookup_table = lookup_ops.HashTable( - lookup_ops.TextFileIdTableInitializer( + lookup_table = lookup.HashTable( + lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value) with self.assertRaises(TypeError): - lookup_ops.IdTableWithHashBuckets( + lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, hasher_spec=1) - table = lookup_ops.IdTableWithHashBuckets( + table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, - hasher_spec=lookup_ops.HasherSpec("my-awesome-hash", None)) + hasher_spec=lookup.HasherSpec("my-awesome-hash", None)) input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"]) @@ -2004,22 +2004,22 @@ class IdTableWithHashBucketsTest(test.TestCase): table.lookup(input_string) with self.assertRaises(ValueError): - table = lookup_ops.IdTableWithHashBuckets( + table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, - hasher_spec=lookup_ops.StrongHashSpec([])) + hasher_spec=lookup.StrongHashSpec([])) with self.assertRaises(ValueError): - table = lookup_ops.IdTableWithHashBuckets( + table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, - hasher_spec=lookup_ops.StrongHashSpec([1, 2, 3])) + hasher_spec=lookup.StrongHashSpec([1, 2, 3])) with self.assertRaises(TypeError): - table = lookup_ops.IdTableWithHashBuckets( + table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, - hasher_spec=lookup_ops.StrongHashSpec([None, 2])) + hasher_spec=lookup.StrongHashSpec([None, 2])) if __name__ == "__main__": diff --git a/tensorflow/contrib/losses/__init__.py b/tensorflow/contrib/losses/__init__.py index 14a4d531529..a405e11c22b 100644 --- a/tensorflow/contrib/losses/__init__.py +++ b/tensorflow/contrib/losses/__init__.py @@ -19,8 +19,10 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import sys - # pylint: disable=unused-import,wildcard-import +from tensorflow.contrib.losses.python import losses from tensorflow.contrib.losses.python.losses import * # pylint: enable=unused-import,wildcard-import + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__, doc_string_modules=[losses]) diff --git a/tensorflow/contrib/metrics/__init__.py b/tensorflow/contrib/metrics/__init__.py index aaa1b62d5f7..b5ad8fb8b5d 100644 --- a/tensorflow/contrib/metrics/__init__.py +++ b/tensorflow/contrib/metrics/__init__.py @@ -109,6 +109,7 @@ labels and predictions tensors and results in a weighted average of the metric. @@streaming_mean_iou @@streaming_mean_relative_error @@streaming_mean_squared_error +@@streaming_mean_tensor @@streaming_root_mean_squared_error @@streaming_covariance @@streaming_pearson_correlation @@ -137,6 +138,8 @@ labels and predictions tensors and results in a weighted average of the metric. @@aggregate_metrics @@aggregate_metric_map +@@confusion_matrix + ## Set `Ops` @@set_difference @@ -193,7 +196,7 @@ from tensorflow.contrib.metrics.python.ops.set_ops import set_difference from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection from tensorflow.contrib.metrics.python.ops.set_ops import set_size from tensorflow.contrib.metrics.python.ops.set_ops import set_union -from tensorflow.python.util.all_util import make_all # pylint: enable=unused-import,line-too-long -__all__ = make_all(__name__) +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/metrics/python/ops/histogram_ops.py b/tensorflow/contrib/metrics/python/ops/histogram_ops.py index 68d9bb5b7a4..d3d74d28a3a 100644 --- a/tensorflow/contrib/metrics/python/ops/histogram_ops.py +++ b/tensorflow/contrib/metrics/python/ops/histogram_ops.py @@ -23,7 +23,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -from tensorflow.contrib.framework import tensor_util +from tensorflow.contrib.framework.python.framework import tensor_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops.py b/tensorflow/contrib/metrics/python/ops/metric_ops.py index 7ac337732a7..0e07c1f47ac 100644 --- a/tensorflow/contrib/metrics/python/ops/metric_ops.py +++ b/tensorflow/contrib/metrics/python/ops/metric_ops.py @@ -23,7 +23,7 @@ from __future__ import division from __future__ import print_function from tensorflow.contrib.framework import deprecated -from tensorflow.contrib.framework import tensor_util +from tensorflow.contrib.framework.python.framework import tensor_util from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.metrics.python.ops import set_ops from tensorflow.python.framework import dtypes diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops_test.py b/tensorflow/contrib/metrics/python/ops/metric_ops_test.py index 4fb244e3d44..af6b365a2a8 100644 --- a/tensorflow/contrib/metrics/python/ops/metric_ops_test.py +++ b/tensorflow/contrib/metrics/python/ops/metric_ops_test.py @@ -4593,7 +4593,7 @@ class StreamingConcatTest(test.TestCase): self.assertListEqual(ops.get_collection(my_collection_name), [update_op]) def testNextArraySize(self): - next_array_size = metrics.python.ops.metric_ops._next_array_size + next_array_size = metric_ops._next_array_size # pylint: disable=protected-access with self.test_session(): self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2) self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4) diff --git a/tensorflow/contrib/nn/__init__.py b/tensorflow/contrib/nn/__init__.py index c2fe913b595..73757a6696e 100644 --- a/tensorflow/contrib/nn/__init__.py +++ b/tensorflow/contrib/nn/__init__.py @@ -12,7 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Module for deprecated ops in tf.nn.""" +"""Module for deprecated ops in tf.nn. + +@@deprecated_flipped_softmax_cross_entropy_with_logits +@@deprecated_flipped_sparse_softmax_cross_entropy_with_logits +@@deprecated_flipped_sigmoid_cross_entropy_with_logits +""" from __future__ import absolute_import from __future__ import division @@ -21,3 +26,6 @@ from __future__ import print_function # pylint: disable=unused-import,wildcard-import from tensorflow.contrib.nn.python.ops.cross_entropy import * # pylint: enable=unused-import,wildcard-import + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/opt/__init__.py b/tensorflow/contrib/opt/__init__.py index ec54c9b3c98..8ef90095965 100644 --- a/tensorflow/contrib/opt/__init__.py +++ b/tensorflow/contrib/opt/__init__.py @@ -23,3 +23,12 @@ from tensorflow.contrib.opt.python.training.external_optimizer import * from tensorflow.contrib.opt.python.training.moving_average_optimizer import * from tensorflow.contrib.opt.python.training.variable_clipping_optimizer import * # pylint: enable=wildcard-import + +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = ['ExternalOptimizerInterface', + 'MovingAverageOptimizer', + 'ScipyOptimizerInterface', + 'VariableClippingOptimizer'] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/rnn/__init__.py b/tensorflow/contrib/rnn/__init__.py index 0966fc0cf0d..56c394c028d 100644 --- a/tensorflow/contrib/rnn/__init__.py +++ b/tensorflow/contrib/rnn/__init__.py @@ -24,6 +24,7 @@ @@BasicLSTMCell @@GRUCell @@LSTMCell +@@LayerNormBasicLSTMCell ## Classes storing split `RNNCell` state @@ -32,6 +33,7 @@ ## RNN Cell wrappers (RNNCells that wrap other RNNCells) @@MultiRNNCell +@@LSTMBlockWrapper @@DropoutWrapper @@EmbeddingWrapper @@InputProjectionWrapper @@ -86,10 +88,13 @@ from tensorflow.contrib.rnn.python.ops.core_rnn_cell import MultiRNNCell from tensorflow.contrib.rnn.python.ops.core_rnn_cell import OutputProjectionWrapper from tensorflow.contrib.rnn.python.ops.core_rnn_cell import RNNCell -# pylint: disable=unused-import,wildcard-import, line-too-long +# pylint: disable=unused-import,wildcard-import,line-too-long from tensorflow.contrib.rnn.python.ops.fused_rnn_cell import * from tensorflow.contrib.rnn.python.ops.gru_ops import * from tensorflow.contrib.rnn.python.ops.lstm_ops import * from tensorflow.contrib.rnn.python.ops.rnn import * from tensorflow.contrib.rnn.python.ops.rnn_cell import * # pylint: enable=unused-import,wildcard-import,line-too-long + +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__, ['core_rnn_cell']) diff --git a/tensorflow/contrib/seq2seq/__init__.py b/tensorflow/contrib/seq2seq/__init__.py index a7e272984b9..f8b35a1cbb3 100644 --- a/tensorflow/contrib/seq2seq/__init__.py +++ b/tensorflow/contrib/seq2seq/__init__.py @@ -19,13 +19,23 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import sys - -# pylint: disable=unused-import,line-too-long +# pylint: disable=unused-import,wildcard-import,line-too-long from tensorflow.contrib.seq2seq.python.ops.attention_decoder_fn import attention_decoder_fn_inference from tensorflow.contrib.seq2seq.python.ops.attention_decoder_fn import attention_decoder_fn_train from tensorflow.contrib.seq2seq.python.ops.attention_decoder_fn import prepare_attention from tensorflow.contrib.seq2seq.python.ops.decoder_fn import * from tensorflow.contrib.seq2seq.python.ops.loss import * from tensorflow.contrib.seq2seq.python.ops.seq2seq import * -# pylint: enable=unused-import,line-too-long +# pylint: enable=unused-import,widcard-import,line-too-long + +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = ["attention_decoder_fn_inference", + "attention_decoder_fn_train", + "dynamic_rnn_decoder", + "prepare_attention", + "sequence_loss", + "simple_decoder_fn_train", + "simple_decoder_fn_inference"] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/stat_summarizer/__init__.py b/tensorflow/contrib/stat_summarizer/__init__.py index 32feb7edb97..53d5548863a 100644 --- a/tensorflow/contrib/stat_summarizer/__init__.py +++ b/tensorflow/contrib/stat_summarizer/__init__.py @@ -25,3 +25,10 @@ from __future__ import print_function from tensorflow.python.pywrap_tensorflow import DeleteStatSummarizer from tensorflow.python.pywrap_tensorflow import NewStatSummarizer from tensorflow.python.pywrap_tensorflow import StatSummarizer + +from tensorflow.python.util.all_util import remove_undocumented + +_allowed_symbols = ['DeleteStatSummarizer', 'NewStatSummarizer', + 'StatSummarizer'] + +remove_undocumented(__name__, _allowed_symbols) diff --git a/tensorflow/contrib/tfprof/__init__.py b/tensorflow/contrib/tfprof/__init__.py index 129dad2726c..f3952f6cb5c 100644 --- a/tensorflow/contrib/tfprof/__init__.py +++ b/tensorflow/contrib/tfprof/__init__.py @@ -19,4 +19,3 @@ from __future__ import print_function from tensorflow.contrib.tfprof.python.tools.tfprof import model_analyzer from tensorflow.contrib.tfprof.python.tools.tfprof import tfprof_logger -from tensorflow.python.util.all_util import make_all diff --git a/tensorflow/contrib/util/__init__.py b/tensorflow/contrib/util/__init__.py index cdaafef1f7d..45efdc20c80 100644 --- a/tensorflow/contrib/util/__init__.py +++ b/tensorflow/contrib/util/__init__.py @@ -35,7 +35,6 @@ from tensorflow.python.framework.meta_graph import stripped_op_list_for_graph from tensorflow.python.framework.tensor_util import constant_value from tensorflow.python.framework.tensor_util import make_tensor_proto from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray -from tensorflow.python.util.all_util import make_all - - -__all__ = make_all(__name__) +# pylint: disable=unused_import +from tensorflow.python.util.all_util import remove_undocumented +remove_undocumented(__name__) diff --git a/tensorflow/contrib/util/loader.py b/tensorflow/contrib/util/loader.py index 6f690f414a4..95657217a00 100644 --- a/tensorflow/contrib/util/loader.py +++ b/tensorflow/contrib/util/loader.py @@ -12,7 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -"""Utilities for loading op libraries.""" +"""Utilities for loading op libraries. + +@@load_op_library +""" from __future__ import absolute_import from __future__ import division from __future__ import print_function diff --git a/tensorflow/python/framework/docs.py b/tensorflow/python/framework/docs.py index 94e658d8c09..4ae0046117b 100644 --- a/tensorflow/python/framework/docs.py +++ b/tensorflow/python/framework/docs.py @@ -291,10 +291,43 @@ class Library(Document): def _generate_signature_for_function(self, func): """Given a function, returns a string representing its args.""" args_list = [] - argspec = inspect.getargspec(func) + if isinstance(func, functools.partial): + argspec = inspect.getargspec(func.func) + # Remove the args from the original function that have been used up. + first_default_arg = ( + len(argspec.args or []) - len(argspec.defaults or [])) + partial_args = len(func.args) + if argspec.args: + argspec_args = list(argspec.args[partial_args:]) + else: + argspec_args = [] + if argspec.defaults: + argspec_defaults = list(argspec.defaults[ + max(0, partial_args-first_default_arg):]) + else: + argspec_defaults = [] + first_default_arg = max(0, first_default_arg - partial_args) + for kwarg in func.keywords: + if kwarg in argspec_args: + i = argspec_args.index(kwarg) + argspec_args.pop(i) + if i >= first_default_arg: + argspec_defaults.pop(i-first_default_arg) + else: + first_default_arg -= 1 + argspec_varargs = None + argspec_keywords = None + + else: + argspec = inspect.getargspec(func) + argspec_args = argspec.args + argspec_defaults = argspec.defaults + argspec_varargs = argspec.varargs + argspec_keywords = argspec.keywords + first_arg_with_default = ( - len(argspec.args or []) - len(argspec.defaults or [])) - for arg in argspec.args[:first_arg_with_default]: + len(argspec_args or []) - len(argspec_defaults or [])) + for arg in argspec_args[:first_arg_with_default]: if arg == "self": # Python documentation typically skips `self` when printing method # signatures. @@ -306,16 +339,16 @@ class Library(Document): # TODO(aselle): This workaround is brittle on TestCase.__call__ # so we need to wrap this in a try/catch # We should do something better. - if argspec.varargs == "args" and argspec.keywords == "kwds": + if argspec_varargs == "args" and argspec_keywords == "kwds": try: original_func = func.__closure__[0].cell_contents return self._generate_signature_for_function(original_func) except TypeError: pass - if argspec.defaults: + if argspec_defaults: for arg, default in zip( - argspec.args[first_arg_with_default:], argspec.defaults): + argspec_args[first_arg_with_default:], argspec_defaults): if callable(default): if hasattr(default, "__name__"): args_list.append("%s=%s" % (arg, default.__name__)) @@ -326,10 +359,10 @@ class Library(Document): args_list.append("%s=%s()" % (arg, default.__class__.__name__)) else: args_list.append("%s=%r" % (arg, default)) - if argspec.varargs: - args_list.append("*" + argspec.varargs) - if argspec.keywords: - args_list.append("**" + argspec.keywords) + if argspec_varargs: + args_list.append("*" + argspec_varargs) + if argspec_keywords: + args_list.append("**" + argspec_keywords) return "(" + ", ".join(args_list) + ")" def _remove_docstring_indent(self, docstring): diff --git a/tensorflow/python/framework/gen_docs_combined.py b/tensorflow/python/framework/gen_docs_combined.py index 7c387e1da44..fb0a4dda3d2 100644 --- a/tensorflow/python/framework/gen_docs_combined.py +++ b/tensorflow/python/framework/gen_docs_combined.py @@ -261,11 +261,18 @@ _hidden_symbols = ["Event", "LogMessage", "Summary", "SessionLog", "xrange", # TODO(wicke): Remove contrib.layers.relu* after shortnames are # disabled. These conflict with tf.nn.relu* EXCLUDE = frozenset(["tf.contrib.learn.monitors.NanLossDuringTrainingError", + "tf.contrib.layers.dropout", + "tf.contrib.layers.bias_add", + "tf.contrib.layers.conv2d", + "tf.contrib.layers.conv2d_transpose", + "tf.contrib.layers.separable_conv2d", + "tf.contrib.layers.softmax", "tf.contrib.layers.relu", "tf.contrib.layers.relu6", "tf.contrib.framework.assert_global_step", "tf.contrib.framework.get_global_step", "tf.contrib.learn.NanLossDuringTrainingError", "tf.contrib.layers.stack", + "tf.contrib.layers.ProblemType", "tf.confusion_matrix"])