Fixed deprecated access of collections members.
Changed the access of the abstract base classes to use a collections_abc compatibility stub. * This prevents warnings in Python 3.7, and runtime errors in Python 3.8+. * The compatibility stub bridges the gap between Python 3.3+ where collections.abc was factored out of collections.
This commit is contained in:
parent
2cb25a86b2
commit
9febcf369a
tensorflow
contrib
cudnn_rnn/python/kernel_tests
factorization/python/ops
graph_editor
labeled_tensor/python/ops
layers/python/layers
learn/python/learn/estimators
tensor_forest/hybrid/python
python
client
data
distribute
feature_column
framework
keras
callbacks.py
engine
training.pytraining_arrays.pytraining_distributed.pytraining_eager.pytraining_generator.pytraining_utils.py
keras_parameterized.pykernel_tests
ops
saved_model
tpu
training
util
@ -18,7 +18,6 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import functools
|
||||
import itertools
|
||||
import os
|
||||
@ -59,6 +58,7 @@ from tensorflow.python.training import momentum
|
||||
from tensorflow.python.training import rmsprop
|
||||
from tensorflow.python.training import saver as saver_lib
|
||||
from tensorflow.python.training.tracking import util as trackable_utils
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
|
||||
@ -1131,7 +1131,7 @@ class CudnnRNNTestTraining(test_util.TensorFlowTestCase):
|
||||
return numeric_grad.reshape(x_shape)
|
||||
|
||||
def _GetShape(self, sess, inputs):
|
||||
if not isinstance(inputs, collections.Iterable):
|
||||
if not isinstance(inputs, collections_abc.Iterable):
|
||||
return sess.run(array_ops.shape(inputs))
|
||||
else:
|
||||
return sess.run([array_ops.shape(x) for x in inputs])
|
||||
|
@ -18,7 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import numbers
|
||||
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
@ -42,6 +41,7 @@ from tensorflow.python.ops import state_ops
|
||||
from tensorflow.python.ops import variable_scope
|
||||
from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import resource_loader
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
_factorization_ops = loader.load_op_library(
|
||||
resource_loader.get_path_to_datafile("_factorization_ops.so"))
|
||||
@ -388,7 +388,7 @@ class WALSModel(object):
|
||||
return None
|
||||
|
||||
init_mode = "list"
|
||||
if isinstance(wt_init, collections.Iterable):
|
||||
if isinstance(wt_init, collections_abc.Iterable):
|
||||
if num_shards == 1 and len(wt_init) == num_wts:
|
||||
wt_init = [wt_init]
|
||||
assert len(wt_init) == num_shards
|
||||
|
@ -19,11 +19,11 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import re
|
||||
from six import iteritems
|
||||
from tensorflow.python.framework import ops as tf_ops
|
||||
from tensorflow.python.ops import array_ops as tf_array_ops
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
__all__ = [
|
||||
"make_list_of_op",
|
||||
@ -157,7 +157,7 @@ def transform_tree(tree, fn, iterable_type=tuple):
|
||||
res = tree.__new__(type(tree),
|
||||
(transform_tree(child, fn) for child in tree))
|
||||
return res
|
||||
elif isinstance(tree, collections.Sequence):
|
||||
elif isinstance(tree, collections_abc.Sequence):
|
||||
res = tree.__new__(type(tree))
|
||||
res.__init__(transform_tree(child, fn) for child in tree)
|
||||
return res
|
||||
|
@ -21,11 +21,11 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import re
|
||||
|
||||
from tensorflow.python.util import tf_inspect
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
# used for register_type_abbreviation and _type_repr below.
|
||||
_TYPE_ABBREVIATIONS = {}
|
||||
@ -114,7 +114,7 @@ class Sequence(_SingleArgumentType):
|
||||
"""
|
||||
|
||||
def __instancecheck__(self, instance):
|
||||
return (isinstance(instance, collections.Sequence) and
|
||||
return (isinstance(instance, collections_abc.Sequence) and
|
||||
all(isinstance(x, self._type) for x in instance))
|
||||
|
||||
|
||||
@ -130,9 +130,9 @@ class Collection(_SingleArgumentType):
|
||||
"""
|
||||
|
||||
def __instancecheck__(self, instance):
|
||||
return (isinstance(instance, collections.Iterable) and
|
||||
isinstance(instance, collections.Sized) and
|
||||
isinstance(instance, collections.Container) and
|
||||
return (isinstance(instance, collections_abc.Iterable) and
|
||||
isinstance(instance, collections_abc.Sized) and
|
||||
isinstance(instance, collections_abc.Container) and
|
||||
all(isinstance(x, self._type) for x in instance))
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ class Mapping(_TwoArgumentType):
|
||||
|
||||
def __instancecheck__(self, instance):
|
||||
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
|
||||
return (isinstance(instance, collections.Mapping) and
|
||||
return (isinstance(instance, collections_abc.Mapping) and
|
||||
all(isinstance(k, key_type) for k in instance.keys()) and
|
||||
all(isinstance(k, value_type) for k in instance.values()))
|
||||
|
||||
|
@ -41,11 +41,12 @@ from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
# Types coercible to Axis.labels
|
||||
# We use this instead of collections.Sequence to exclude strings.
|
||||
# We use this instead of collections_abc.Sequence to exclude strings.
|
||||
LabelsLike = tc.Union(np.ndarray, range, list, tuple)
|
||||
|
||||
# Types coercible to a tf.compat.v1.Dimension
|
||||
@ -195,7 +196,7 @@ def as_axis(axis_data):
|
||||
return axis
|
||||
|
||||
|
||||
class Axes(collections.Mapping):
|
||||
class Axes(collections_abc.Mapping):
|
||||
"""Axis names and indices for a tensor.
|
||||
|
||||
It is an ordered mapping, with keys given by axis name and values given
|
||||
@ -719,7 +720,7 @@ def transpose(labeled_tensor, axis_order=None, name=None):
|
||||
@tc.accepts(LabeledTensorLike,
|
||||
tc.Collection(
|
||||
tc.Union(string_types,
|
||||
tc.Tuple(string_types, collections.Hashable))),
|
||||
tc.Tuple(string_types, collections_abc.Hashable))),
|
||||
tc.Optional(string_types))
|
||||
def expand_dims(labeled_tensor, axes, name=None):
|
||||
"""Insert dimensions of size 1.
|
||||
@ -1055,7 +1056,7 @@ def align(labeled_tensor_0, labeled_tensor_1, name=None):
|
||||
|
||||
|
||||
@tc.returns(types.FunctionType)
|
||||
@tc.accepts(string_types, collections.Callable)
|
||||
@tc.accepts(string_types, collections_abc.Callable)
|
||||
def define_unary_op(op_name, elementwise_function):
|
||||
"""Define a unary operation for labeled tensors.
|
||||
|
||||
@ -1124,7 +1125,7 @@ sigmoid = define_unary_op('sigmoid', math_ops.sigmoid)
|
||||
|
||||
|
||||
@tc.returns(types.FunctionType)
|
||||
@tc.accepts(string_types, collections.Callable)
|
||||
@tc.accepts(string_types, collections_abc.Callable)
|
||||
def define_binary_op(op_name, elementwise_function):
|
||||
"""Define a binary operation that broadcasts labeled tensors.
|
||||
|
||||
|
@ -17,7 +17,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import types
|
||||
|
||||
import numpy as np
|
||||
@ -34,6 +33,7 @@ from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops import numerics
|
||||
from tensorflow.python.ops import random_ops
|
||||
from tensorflow.python.training import input # pylint: disable=redefined-builtin
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
@tc.returns(core.LabeledTensor)
|
||||
@ -52,7 +52,7 @@ def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
|
||||
@tc.returns(core.LabeledTensor)
|
||||
@tc.accepts(core.LabeledTensorLike,
|
||||
tc.Mapping(string_types,
|
||||
tc.Union(slice, collections.Hashable, list)),
|
||||
tc.Union(slice, collections_abc.Hashable, list)),
|
||||
tc.Optional(string_types))
|
||||
def select(labeled_tensor, selection, name=None):
|
||||
"""Slice out a subset of the tensor.
|
||||
@ -111,8 +111,8 @@ def select(labeled_tensor, selection, name=None):
|
||||
slices[axis_name] = slice(start, stop)
|
||||
|
||||
# Needs to be after checking for slices, since slice objects claim to be
|
||||
# instances of collections.Hashable but hash() on them fails.
|
||||
elif isinstance(value, collections.Hashable):
|
||||
# instances of collections_abc.Hashable but hash() on them fails.
|
||||
elif isinstance(value, collections_abc.Hashable):
|
||||
slices[axis_name] = axis.index(value)
|
||||
|
||||
elif isinstance(value, list):
|
||||
@ -400,7 +400,7 @@ def rename_axis(labeled_tensor, existing_name, new_name, name=None):
|
||||
|
||||
|
||||
@tc.returns(tc.List(core.LabeledTensor))
|
||||
@tc.accepts(string_types, collections.Callable, int, bool,
|
||||
@tc.accepts(string_types, collections_abc.Callable, int, bool,
|
||||
tc.Collection(core.LabeledTensorLike), bool,
|
||||
tc.Optional(string_types))
|
||||
def _batch_helper(default_name,
|
||||
@ -606,7 +606,7 @@ def random_crop(labeled_tensor, shape_map, seed=None, name=None):
|
||||
|
||||
# TODO(shoyer): Allow the user to select the axis over which to map.
|
||||
@tc.returns(core.LabeledTensor)
|
||||
@tc.accepts(collections.Callable, core.LabeledTensorLike,
|
||||
@tc.accepts(collections_abc.Callable, core.LabeledTensorLike,
|
||||
tc.Optional(string_types))
|
||||
def map_fn(fn, labeled_tensor, name=None):
|
||||
"""Map on the list of tensors unpacked from labeled_tensor.
|
||||
@ -661,7 +661,7 @@ def map_fn(fn, labeled_tensor, name=None):
|
||||
|
||||
|
||||
@tc.returns(core.LabeledTensor)
|
||||
@tc.accepts(collections.Callable, core.LabeledTensorLike,
|
||||
@tc.accepts(collections_abc.Callable, core.LabeledTensorLike,
|
||||
core.LabeledTensorLike, tc.Optional(string_types))
|
||||
def foldl(fn, labeled_tensor, initial_value, name=None):
|
||||
"""Left fold on the list of tensors unpacked from labeled_tensor.
|
||||
@ -754,7 +754,7 @@ def squeeze(labeled_tensor, axis_names=None, name=None):
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
ReduceAxis = tc.Union(string_types,
|
||||
tc.Tuple(string_types, collections.Hashable))
|
||||
tc.Tuple(string_types, collections_abc.Hashable))
|
||||
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
|
||||
# pylint: enable=invalid-name
|
||||
|
||||
@ -876,7 +876,7 @@ def matmul(a, b, name=None):
|
||||
|
||||
|
||||
@tc.returns(types.FunctionType)
|
||||
@tc.accepts(string_types, collections.Callable)
|
||||
@tc.accepts(string_types, collections_abc.Callable)
|
||||
def define_reduce_op(op_name, reduce_fn):
|
||||
"""Define a reduction op for labeled tensors.
|
||||
|
||||
|
@ -155,6 +155,7 @@ from tensorflow.python.ops import variables
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.util import deprecation
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
# Imports the core `InputLayer` symbol in contrib during development.
|
||||
InputLayer = fc_core.InputLayer # pylint: disable=invalid-name
|
||||
@ -1403,7 +1404,7 @@ def shared_embedding_columns(sparse_id_columns,
|
||||
least one element of `sparse_id_columns` is not a `SparseColumn` or a
|
||||
`WeightedSparseColumn`.
|
||||
"""
|
||||
if (not isinstance(sparse_id_columns, collections.Sequence) or
|
||||
if (not isinstance(sparse_id_columns, collections_abc.Sequence) or
|
||||
isinstance(sparse_id_columns, six.string_types)):
|
||||
raise TypeError(
|
||||
"sparse_id_columns must be a non-string sequence (ex: list or tuple) "
|
||||
|
@ -19,12 +19,13 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import six
|
||||
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
def _pprint(d):
|
||||
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
|
||||
@ -55,7 +56,7 @@ class _BaseEstimator(object):
|
||||
for key in param_names:
|
||||
value = getattr(self, key, None)
|
||||
|
||||
if isinstance(value, collections.Callable):
|
||||
if isinstance(value, collections_abc.Callable):
|
||||
continue
|
||||
|
||||
# XXX: should we rather test if instance of estimator?
|
||||
|
@ -17,8 +17,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
|
||||
from tensorflow.contrib import layers
|
||||
from tensorflow.contrib.framework.python.ops import variables as framework_variables
|
||||
|
||||
@ -29,6 +27,7 @@ from tensorflow.python.ops import nn_ops
|
||||
from tensorflow.python.ops import variables
|
||||
|
||||
from tensorflow.python.training import adagrad
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
class HybridModel(object):
|
||||
@ -66,7 +65,7 @@ class HybridModel(object):
|
||||
|
||||
# If this is a collection of layers, return the mean of their inference
|
||||
# results.
|
||||
if isinstance(layer, collections.Iterable):
|
||||
if isinstance(layer, collections_abc.Iterable):
|
||||
return math_ops.reduce_mean(
|
||||
array_ops.stack([l.inference_graph(data) for l in layer]), 0)
|
||||
# If this is a single layer, return its inference result.
|
||||
|
@ -18,7 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import re
|
||||
import threading
|
||||
@ -41,6 +40,7 @@ from tensorflow.python.training.experimental import mixed_precision_global_state
|
||||
from tensorflow.python.util import compat
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
class SessionInterface(object):
|
||||
@ -259,7 +259,7 @@ class _FetchMapper(object):
|
||||
elif isinstance(fetch, (list, tuple)):
|
||||
# NOTE(touts): This is also the code path for namedtuples.
|
||||
return _ListFetchMapper(fetch)
|
||||
elif isinstance(fetch, collections.Mapping):
|
||||
elif isinstance(fetch, collections_abc.Mapping):
|
||||
return _DictFetchMapper(fetch)
|
||||
elif _is_attrs_instance(fetch):
|
||||
return _AttrsFetchMapper(fetch)
|
||||
|
@ -17,14 +17,13 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
|
||||
from tensorflow.python.compat import compat
|
||||
from tensorflow.python.data.ops import dataset_ops
|
||||
from tensorflow.python.data.util import nest
|
||||
from tensorflow.python.data.util import structure
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import gen_experimental_dataset_ops
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@ -53,7 +52,7 @@ class _ScanDataset(dataset_ops.UnaryDataset):
|
||||
input_dataset.element_spec),
|
||||
add_to_graph=False)
|
||||
if not (
|
||||
isinstance(wrapped_func.output_types, collections.Sequence) and
|
||||
isinstance(wrapped_func.output_types, collections_abc.Sequence) and
|
||||
len(wrapped_func.output_types) == 2):
|
||||
raise TypeError("The scan function must return a pair comprising the "
|
||||
"new state and the output value.")
|
||||
|
@ -35,12 +35,11 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections as _collections
|
||||
|
||||
import six as _six
|
||||
|
||||
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
|
||||
from tensorflow.python.framework import sparse_tensor as _sparse_tensor
|
||||
from tensorflow.python.util.compat import collections_abc as _collections_abc
|
||||
|
||||
|
||||
def _sorted(dict_):
|
||||
@ -71,7 +70,7 @@ def _sequence_like(instance, args):
|
||||
return type(instance)((key, result[key]) for key in instance)
|
||||
elif (isinstance(instance, tuple) and
|
||||
hasattr(instance, "_fields") and
|
||||
isinstance(instance._fields, _collections.Sequence) and
|
||||
isinstance(instance._fields, _collections_abc.Sequence) and
|
||||
all(isinstance(f, _six.string_types) for f in instance._fields)):
|
||||
# This is a namedtuple
|
||||
return type(instance)(*args)
|
||||
|
@ -18,7 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import copy
|
||||
import json
|
||||
@ -50,6 +49,7 @@ from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.training import coordinator
|
||||
from tensorflow.python.training import server_lib
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
original_run_std_server = dc._run_std_server # pylint: disable=protected-access
|
||||
@ -353,7 +353,7 @@ class MultiWorkerTestBase(test.TestCase):
|
||||
self.assertEqual(self._result, len(threads))
|
||||
|
||||
|
||||
class MockOsEnv(collections.Mapping):
|
||||
class MockOsEnv(collections_abc.Mapping):
|
||||
"""A class that allows per-thread TF_CONFIG."""
|
||||
|
||||
def __init__(self, *args):
|
||||
|
@ -166,6 +166,7 @@ from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.training import checkpoint_utils
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
def _internal_input_layer(features,
|
||||
@ -2245,7 +2246,7 @@ def _normalize_feature_columns(feature_columns):
|
||||
if isinstance(feature_columns, _FeatureColumn):
|
||||
feature_columns = [feature_columns]
|
||||
|
||||
if isinstance(feature_columns, collections.Iterator):
|
||||
if isinstance(feature_columns, collections_abc.Iterator):
|
||||
feature_columns = list(feature_columns)
|
||||
|
||||
if isinstance(feature_columns, dict):
|
||||
|
@ -169,6 +169,7 @@ from tensorflow.python.util import deprecation
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.tf_export import keras_export
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
_FEATURE_COLUMN_DEPRECATION_DATE = None
|
||||
@ -2740,7 +2741,7 @@ def _normalize_feature_columns(feature_columns):
|
||||
if isinstance(feature_columns, FeatureColumn):
|
||||
feature_columns = [feature_columns]
|
||||
|
||||
if isinstance(feature_columns, collections.Iterator):
|
||||
if isinstance(feature_columns, collections_abc.Iterator):
|
||||
feature_columns = list(feature_columns)
|
||||
|
||||
if isinstance(feature_columns, dict):
|
||||
|
@ -65,6 +65,7 @@ from tensorflow.python.util import lock_util
|
||||
from tensorflow.python.util import memory
|
||||
from tensorflow.python.util import tf_contextlib
|
||||
from tensorflow.python.util import tf_stack
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.deprecation import deprecated_args
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
@ -1234,7 +1235,7 @@ def internal_convert_n_to_tensor(values,
|
||||
RuntimeError: If a registered conversion function returns an invalid
|
||||
value.
|
||||
"""
|
||||
if not isinstance(values, collections.Sequence):
|
||||
if not isinstance(values, collections_abc.Sequence):
|
||||
raise TypeError("values must be a sequence.")
|
||||
ret = []
|
||||
if ctx is None:
|
||||
@ -1371,7 +1372,7 @@ def internal_convert_n_to_tensor_or_composite(values,
|
||||
RuntimeError: If a registered conversion function returns an invalid
|
||||
value.
|
||||
"""
|
||||
if not isinstance(values, collections.Sequence):
|
||||
if not isinstance(values, collections_abc.Sequence):
|
||||
raise TypeError("values must be a sequence.")
|
||||
ret = []
|
||||
for i, value in enumerate(values):
|
||||
|
@ -19,7 +19,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
from collections import OrderedDict
|
||||
import contextlib
|
||||
import functools
|
||||
@ -83,6 +82,7 @@ from tensorflow.python.util import tf_decorator
|
||||
from tensorflow.python.util import tf_inspect
|
||||
from tensorflow.python.util.protobuf import compare
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
# If the below import is made available through the BUILD rule, then this
|
||||
@ -2301,8 +2301,8 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
a = a._asdict()
|
||||
if hasattr(b, "_asdict"):
|
||||
b = b._asdict()
|
||||
a_is_dict = isinstance(a, collections.Mapping)
|
||||
if a_is_dict != isinstance(b, collections.Mapping):
|
||||
a_is_dict = isinstance(a, collections_abc.Mapping)
|
||||
if a_is_dict != isinstance(b, collections_abc.Mapping):
|
||||
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
|
||||
(path_str, path_str, msg))
|
||||
if a_is_dict:
|
||||
|
@ -47,6 +47,7 @@ from tensorflow.python.ops import summary_ops_v2
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.training import checkpoint_management
|
||||
from tensorflow.python.util.tf_export import keras_export
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
try:
|
||||
import requests
|
||||
@ -1902,7 +1903,7 @@ class CSVLogger(Callback):
|
||||
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
|
||||
if isinstance(k, six.string_types):
|
||||
return k
|
||||
elif isinstance(k, collections.Iterable) and not is_zero_dim_ndarray:
|
||||
elif isinstance(k, collections_abc.Iterable) and not is_zero_dim_ndarray:
|
||||
return '"[%s]"' % (', '.join(map(str, k)))
|
||||
else:
|
||||
return k
|
||||
|
@ -63,6 +63,7 @@ from tensorflow.python.util import nest
|
||||
from tensorflow.python.util import serialization
|
||||
from tensorflow.python.util import tf_inspect
|
||||
from tensorflow.python.util.tf_export import keras_export
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
try:
|
||||
from scipy.sparse import issparse # pylint: disable=g-import-not-at-top
|
||||
@ -603,9 +604,9 @@ class Model(network.Network):
|
||||
or a dataset iterator, and 'validation_steps' is None, validation
|
||||
will run until the `validation_data` dataset is exhausted.
|
||||
validation_freq: Only relevant if validation data is provided. Integer
|
||||
or `collections.Container` instance (e.g. list, tuple, etc.). If an
|
||||
integer, specifies how many training epochs to run before a new
|
||||
validation run is performed, e.g. `validation_freq=2` runs
|
||||
or `collections_abc.Container` instance (e.g. list, tuple, etc.).
|
||||
If an integer, specifies how many training epochs to run before a
|
||||
new validation run is performed, e.g. `validation_freq=2` runs
|
||||
validation every 2 epochs. If a Container, specifies the epochs on
|
||||
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
|
||||
validation at the end of the 1st, 2nd, and 10th epochs.
|
||||
@ -1048,7 +1049,7 @@ class Model(network.Network):
|
||||
# at this point.
|
||||
if self.run_eagerly or self._distribution_strategy:
|
||||
inputs = training_utils.cast_if_floating_dtype(inputs)
|
||||
if isinstance(inputs, collections.Sequence):
|
||||
if isinstance(inputs, collections_abc.Sequence):
|
||||
# Unwrap lists with only one input, as we do when training on batch
|
||||
if len(inputs) == 1:
|
||||
inputs = inputs[0]
|
||||
@ -1126,9 +1127,9 @@ class Model(network.Network):
|
||||
Optional for `Sequence`: if unspecified, will use
|
||||
the `len(validation_data)` as a number of steps.
|
||||
validation_freq: Only relevant if validation data is provided. Integer
|
||||
or `collections.Container` instance (e.g. list, tuple, etc.). If an
|
||||
integer, specifies how many training epochs to run before a new
|
||||
validation run is performed, e.g. `validation_freq=2` runs
|
||||
or `collections_abc.Container` instance (e.g. list, tuple, etc.).
|
||||
If an integer, specifies how many training epochs to run before a
|
||||
new validation run is performed, e.g. `validation_freq=2` runs
|
||||
validation every 2 epochs. If a Container, specifies the epochs on
|
||||
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
|
||||
validation at the end of the 1st, 2nd, and 10th epochs.
|
||||
|
@ -90,9 +90,10 @@ def model_iteration(model,
|
||||
declaring one epoch finished and starting the next epoch. Ignored with
|
||||
the default value of `None`.
|
||||
validation_steps: Number of steps to run validation for (only if doing
|
||||
validation from data tensors). Ignored with the default value of `None`.
|
||||
validation from data tensors). Ignored with the default value of
|
||||
`None`.
|
||||
validation_freq: Only relevant if validation data is provided. Integer or
|
||||
`collections.Container` instance (e.g. list, tuple, etc.). If an
|
||||
`collections_abc.Container` instance (e.g. list, tuple, etc.). If an
|
||||
integer, specifies how many training epochs to run before a new
|
||||
validation run is performed, e.g. `validation_freq=2` runs
|
||||
validation every 2 epochs. If a Container, specifies the epochs on
|
||||
@ -100,8 +101,8 @@ def model_iteration(model,
|
||||
validation at the end of the 1st, 2nd, and 10th epochs.
|
||||
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
|
||||
validation_in_fit: if true, then this method is invoked from within
|
||||
training iteration (for validation). In the case where `val_inputs` is a
|
||||
dataset, this flag indicates that its iterator and feed values are
|
||||
training iteration (for validation). In the case where `val_inputs` is
|
||||
a dataset, this flag indicates that its iterator and feed values are
|
||||
already created so should properly reuse resources.
|
||||
prepared_feed_values_from_dataset: if True, `inputs` is a list of feed
|
||||
tensors returned from `_prepare_feed_values` call on the validation
|
||||
|
@ -149,7 +149,7 @@ def experimental_tpu_fit_loop(model,
|
||||
(only if doing validation from data tensors).
|
||||
Ignored with the default value of `None`.
|
||||
validation_freq: Only relevant if validation data is provided. Integer or
|
||||
`collections.Container` instance (e.g. list, tuple, etc.). If an
|
||||
`collections.abc.Container` instance (e.g. list, tuple, etc.). If an
|
||||
integer, specifies how many training epochs to run before a new
|
||||
validation run is performed, e.g. `validation_freq=2` runs
|
||||
validation every 2 epochs. If a Container, specifies the epochs on
|
||||
|
@ -19,8 +19,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.eager.backprop import GradientTape
|
||||
@ -34,6 +32,7 @@ from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops.losses import util as tf_losses_utils
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
def _eager_loss_fn(outputs, targets, loss_fn, output_name):
|
||||
@ -275,7 +274,7 @@ def train_on_batch(model,
|
||||
Returns:
|
||||
total loss and the loss associated with each output.
|
||||
"""
|
||||
if isinstance(inputs, collections.Sequence):
|
||||
if isinstance(inputs, collections_abc.Sequence):
|
||||
if len(inputs) and tensor_util.is_tensor(inputs[0]):
|
||||
inputs = training_utils.cast_if_floating_to_model_input_dtypes(inputs,
|
||||
model)
|
||||
@ -334,7 +333,7 @@ def test_on_batch(model,
|
||||
Returns:
|
||||
total loss, loss and metrics associated with each output.
|
||||
"""
|
||||
if isinstance(inputs, collections.Sequence):
|
||||
if isinstance(inputs, collections_abc.Sequence):
|
||||
if len(inputs) and tensor_util.is_tensor(inputs[0]):
|
||||
inputs = training_utils.cast_if_floating_to_model_input_dtypes(inputs,
|
||||
model)
|
||||
|
@ -80,7 +80,7 @@ def model_iteration(model,
|
||||
validation_steps: Total number of steps (batches of samples) before
|
||||
declaring validation finished.
|
||||
validation_freq: Only relevant if validation data is provided. Integer or
|
||||
`collections.Container` instance (e.g. list, tuple, etc.). If an
|
||||
`collections.abc.Container` instance (e.g. list, tuple, etc.). If an
|
||||
integer, specifies how many training epochs to run before a new
|
||||
validation run is performed, e.g. `validation_freq=2` runs
|
||||
validation every 2 epochs. If a Container, specifies the epochs on
|
||||
|
@ -53,6 +53,7 @@ from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.ops.losses import util as tf_losses_utils
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
@ -1025,7 +1026,7 @@ def get_loss_function(loss):
|
||||
return loss
|
||||
|
||||
# Deserialize loss configuration, if needed.
|
||||
if isinstance(loss, collections.Mapping):
|
||||
if isinstance(loss, collections_abc.Mapping):
|
||||
loss = losses.get(loss)
|
||||
|
||||
# Custom callable class.
|
||||
@ -1245,7 +1246,7 @@ def prepare_loss_functions(loss, output_names):
|
||||
ValueError: If loss is a dict with keys not in model output names,
|
||||
or if loss is a list with len not equal to model outputs.
|
||||
"""
|
||||
if isinstance(loss, collections.Mapping):
|
||||
if isinstance(loss, collections_abc.Mapping):
|
||||
generic_utils.check_for_unexpected_keys('loss', loss, output_names)
|
||||
loss_functions = []
|
||||
for name in output_names:
|
||||
@ -1257,7 +1258,7 @@ def prepare_loss_functions(loss, output_names):
|
||||
loss_functions.append(get_loss_function(loss.get(name, None)))
|
||||
elif isinstance(loss, six.string_types):
|
||||
loss_functions = [get_loss_function(loss) for _ in output_names]
|
||||
elif isinstance(loss, collections.Sequence):
|
||||
elif isinstance(loss, collections_abc.Sequence):
|
||||
if len(loss) != len(output_names):
|
||||
raise ValueError('When passing a list as loss, it should have one entry '
|
||||
'per model outputs. The model has {} outputs, but you '
|
||||
@ -1747,9 +1748,9 @@ def should_run_validation(validation_freq, epoch):
|
||||
raise ValueError('`validation_freq` can not be less than 1.')
|
||||
return one_indexed_epoch % validation_freq == 0
|
||||
|
||||
if not isinstance(validation_freq, collections.Container):
|
||||
if not isinstance(validation_freq, collections_abc.Container):
|
||||
raise ValueError('`validation_freq` must be an Integer or '
|
||||
'`collections.Container` (e.g. list, tuple, etc.)')
|
||||
'`collections_abc.Container` (e.g. list, tuple, etc.)')
|
||||
return one_indexed_epoch in validation_freq
|
||||
|
||||
|
||||
|
@ -18,7 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import functools
|
||||
import itertools
|
||||
import unittest
|
||||
@ -31,6 +30,7 @@ from tensorflow.python.eager import context
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
class TestCase(test.TestCase, parameterized.TestCase):
|
||||
@ -312,7 +312,7 @@ def _test_or_class_decorator(test_or_class, single_method_decorator):
|
||||
The decorated result.
|
||||
"""
|
||||
def _decorate_test_or_class(obj):
|
||||
if isinstance(obj, collections.Iterable):
|
||||
if isinstance(obj, collections_abc.Iterable):
|
||||
return itertools.chain.from_iterable(
|
||||
single_method_decorator(method) for method in obj)
|
||||
if isinstance(obj, type):
|
||||
|
@ -18,7 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import math
|
||||
|
||||
import numpy as np
|
||||
@ -33,6 +32,7 @@ from tensorflow.python.ops import nn_ops
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
def GetTestConfigs():
|
||||
@ -79,7 +79,7 @@ class Conv3DTest(test.TestCase):
|
||||
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtype)
|
||||
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtype)
|
||||
|
||||
if isinstance(stride, collections.Iterable):
|
||||
if isinstance(stride, collections_abc.Iterable):
|
||||
strides = [1] + list(stride) + [1]
|
||||
else:
|
||||
strides = [1, stride, stride, stride, 1]
|
||||
@ -137,7 +137,7 @@ class Conv3DTest(test.TestCase):
|
||||
with self.cached_session(use_gpu=use_gpu):
|
||||
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
|
||||
t2 = constant_op.constant(x2, shape=filter_in_sizes)
|
||||
if isinstance(stride, collections.Iterable):
|
||||
if isinstance(stride, collections_abc.Iterable):
|
||||
strides = list(stride)
|
||||
else:
|
||||
strides = [stride, stride, stride]
|
||||
@ -377,7 +377,7 @@ class Conv3DTest(test.TestCase):
|
||||
filter_planes, filter_rows, filter_cols, in_depth, out_depth
|
||||
]
|
||||
|
||||
if isinstance(stride, collections.Iterable):
|
||||
if isinstance(stride, collections_abc.Iterable):
|
||||
strides = [1] + list(stride) + [1]
|
||||
else:
|
||||
strides = [1, stride, stride, stride, 1]
|
||||
|
@ -18,7 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import os
|
||||
import time
|
||||
|
||||
@ -47,6 +46,7 @@ from tensorflow.python.ops import variables
|
||||
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.platform import tf_logging
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
def GetShrunkInceptionShapes(shrink=10):
|
||||
@ -266,7 +266,7 @@ class Conv2DTest(test.TestCase):
|
||||
with test_util.device(use_gpu):
|
||||
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
|
||||
t2 = constant_op.constant(x2, shape=filter_in_sizes)
|
||||
if isinstance(stride, collections.Iterable):
|
||||
if isinstance(stride, collections_abc.Iterable):
|
||||
strides = list(stride)
|
||||
else:
|
||||
strides = [stride, stride]
|
||||
|
@ -18,8 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
|
||||
import six
|
||||
|
||||
from tensorflow.python.framework import constant_op
|
||||
@ -31,6 +29,7 @@ from tensorflow.python.ops import gen_nn_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.util import deprecation
|
||||
from tensorflow.python.util import dispatch
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@ -49,14 +48,14 @@ def clip_by_value(t, clip_value_min, clip_value_max,
|
||||
correct results.
|
||||
|
||||
For example:
|
||||
|
||||
|
||||
```python
|
||||
A = tf.constant([[1, 20, 13], [3, 21, 13]])
|
||||
B = tf.clip_by_value(A, clip_value_min=0, clip_value_max=3) # [[1, 3, 3],[3, 3, 3]]
|
||||
C = tf.clip_by_value(A, clip_value_min=0., clip_value_max=3.) # throws `TypeError`
|
||||
C = tf.clip_by_value(A, clip_value_min=0., clip_value_max=3.) # throws `TypeError`
|
||||
as input and clip_values are of different dtype
|
||||
```
|
||||
|
||||
|
||||
Args:
|
||||
t: A `Tensor` or `IndexedSlices`.
|
||||
clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
|
||||
@ -71,8 +70,8 @@ def clip_by_value(t, clip_value_min, clip_value_max,
|
||||
Raises:
|
||||
ValueError: If the clip tensors would trigger array broadcasting
|
||||
that would make the returned tensor larger than the input.
|
||||
TypeError: If dtype of the input is `int32` and dtype of
|
||||
the `clip_value_min' or `clip_value_max` is `float32`
|
||||
TypeError: If dtype of the input is `int32` and dtype of
|
||||
the `clip_value_min' or `clip_value_max` is `float32`
|
||||
"""
|
||||
with ops.name_scope(name, "clip_by_value",
|
||||
[t, clip_value_min, clip_value_max]) as name:
|
||||
@ -208,7 +207,7 @@ def global_norm(t_list, name=None):
|
||||
Raises:
|
||||
TypeError: If `t_list` is not a sequence.
|
||||
"""
|
||||
if (not isinstance(t_list, collections.Sequence)
|
||||
if (not isinstance(t_list, collections_abc.Sequence)
|
||||
or isinstance(t_list, six.string_types)):
|
||||
raise TypeError("t_list should be a sequence")
|
||||
t_list = list(t_list)
|
||||
@ -282,7 +281,7 @@ def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
|
||||
Raises:
|
||||
TypeError: If `t_list` is not a sequence.
|
||||
"""
|
||||
if (not isinstance(t_list, collections.Sequence)
|
||||
if (not isinstance(t_list, collections_abc.Sequence)
|
||||
or isinstance(t_list, six.string_types)):
|
||||
raise TypeError("t_list should be a sequence")
|
||||
t_list = list(t_list)
|
||||
|
@ -18,7 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import hashlib
|
||||
import threading
|
||||
|
||||
@ -41,6 +40,7 @@ from tensorflow.python.ops import resource_variable_ops
|
||||
# pylint: disable=wildcard-import
|
||||
from tensorflow.python.ops.gen_data_flow_ops import *
|
||||
from tensorflow.python.util import deprecation
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
# pylint: enable=wildcard-import
|
||||
@ -64,7 +64,7 @@ def _as_shape_list(shapes,
|
||||
"""Convert shapes to a list of tuples of int (or None)."""
|
||||
del dtypes
|
||||
if unknown_dim_allowed:
|
||||
if (not isinstance(shapes, collections.Sequence) or not shapes or
|
||||
if (not isinstance(shapes, collections_abc.Sequence) or not shapes or
|
||||
any(shape is None or isinstance(shape, int) for shape in shapes)):
|
||||
raise ValueError(
|
||||
"When providing partial shapes, a list of shapes must be provided.")
|
||||
|
@ -43,6 +43,7 @@ from tensorflow.python.ops import resource_variable_ops
|
||||
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.util import compat
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@ -728,7 +729,7 @@ def _HasAnyNotNoneGrads(grads, op):
|
||||
for out_grad in out_grads:
|
||||
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
|
||||
return True
|
||||
if out_grad and isinstance(out_grad, collections.Sequence):
|
||||
if out_grad and isinstance(out_grad, collections_abc.Sequence):
|
||||
if any(g is not None for g in out_grad):
|
||||
return True
|
||||
return False
|
||||
@ -953,7 +954,7 @@ def _AggregatedGrads(grads,
|
||||
assert control_flow_util.IsLoopSwitch(op)
|
||||
continue
|
||||
# Grads have to be Tensors or IndexedSlices
|
||||
if (isinstance(out_grad, collections.Sequence) and not all(
|
||||
if (isinstance(out_grad, collections_abc.Sequence) and not all(
|
||||
isinstance(g, (ops.Tensor, ops.IndexedSlices))
|
||||
for g in out_grad
|
||||
if g is not None
|
||||
|
@ -42,6 +42,7 @@ from tensorflow.python.ops.gen_nn_ops import *
|
||||
# pylint: enable=wildcard-import
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.util import deprecation
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.deprecation import deprecated_args
|
||||
from tensorflow.python.util.deprecation import deprecated_argument_lookup
|
||||
|
||||
@ -57,7 +58,7 @@ def _get_sequence(value, n, channel_index, name):
|
||||
"""Formats a value input for gen_nn_ops."""
|
||||
if value is None:
|
||||
value = [1]
|
||||
elif not isinstance(value, collections.Sized):
|
||||
elif not isinstance(value, collections_abc.Sized):
|
||||
value = [value]
|
||||
|
||||
current_n = len(value)
|
||||
@ -2742,7 +2743,7 @@ def relu6(features, name=None):
|
||||
def leaky_relu(features, alpha=0.2, name=None):
|
||||
"""Compute the Leaky ReLU activation function.
|
||||
|
||||
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
|
||||
Source: [Rectifier Nonlinearities Improve Neural Network Acoustic Models.
|
||||
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013](https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf).
|
||||
|
||||
Args:
|
||||
|
@ -23,7 +23,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import numbers
|
||||
|
||||
import numpy as np
|
||||
@ -46,6 +45,7 @@ from tensorflow.python.util import compat
|
||||
from tensorflow.python.util import deprecation
|
||||
from tensorflow.python.util import dispatch
|
||||
from tensorflow.python.util import tf_inspect
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.tf_export import get_canonical_name_for_symbol
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
@ -1658,10 +1658,10 @@ def sparse_merge_impl(sp_ids,
|
||||
type(vocab_size))
|
||||
vocab_size = [vocab_size]
|
||||
else:
|
||||
if not isinstance(sp_ids, collections.Iterable):
|
||||
if not isinstance(sp_ids, collections_abc.Iterable):
|
||||
raise TypeError("sp_ids has to be a SparseTensor or list thereof. "
|
||||
"Found %s" % type(sp_ids))
|
||||
if not isinstance(vocab_size, collections.Iterable):
|
||||
if not isinstance(vocab_size, collections_abc.Iterable):
|
||||
raise TypeError("vocab_size has to be a list of Tensors or Python ints. "
|
||||
"Found %s" % type(vocab_size))
|
||||
for dim in vocab_size:
|
||||
|
@ -19,7 +19,7 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
class KerasModeKeys(object):
|
||||
@ -65,7 +65,7 @@ def is_train(mode):
|
||||
return mode in [KerasModeKeys.TRAIN, EstimatorModeKeys.TRAIN]
|
||||
|
||||
|
||||
class ModeKeyMap(collections.Mapping):
|
||||
class ModeKeyMap(collections_abc.Mapping):
|
||||
"""Map using ModeKeys as keys.
|
||||
|
||||
This class creates an immutable mapping from modes to values. For example,
|
||||
|
@ -39,6 +39,7 @@ from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.framework import tensor_spec
|
||||
from tensorflow.python.util import compat
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
class NotEncodableError(Exception):
|
||||
@ -153,7 +154,7 @@ def _is_named_tuple(instance):
|
||||
if not isinstance(instance, tuple):
|
||||
return False
|
||||
return (hasattr(instance, "_fields") and
|
||||
isinstance(instance._fields, collections.Sequence) and
|
||||
isinstance(instance._fields, collections_abc.Sequence) and
|
||||
all(isinstance(f, six.string_types) for f in instance._fields))
|
||||
|
||||
|
||||
|
@ -18,8 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
|
||||
from tensorflow.python.eager import def_function
|
||||
from tensorflow.python.eager import function as defun
|
||||
from tensorflow.python.framework import ops
|
||||
@ -29,6 +27,7 @@ from tensorflow.python.saved_model import signature_constants
|
||||
from tensorflow.python.training.tracking import base
|
||||
from tensorflow.python.util import compat
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
DEFAULT_SIGNATURE_ATTR = "_default_save_signature"
|
||||
@ -87,7 +86,7 @@ def canonicalize_signatures(signatures):
|
||||
"""Converts `signatures` into a dictionary of concrete functions."""
|
||||
if signatures is None:
|
||||
return {}
|
||||
if not isinstance(signatures, collections.Mapping):
|
||||
if not isinstance(signatures, collections_abc.Mapping):
|
||||
signatures = {
|
||||
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signatures}
|
||||
concrete_signatures = {}
|
||||
@ -146,7 +145,7 @@ def _is_flat(sequence):
|
||||
|
||||
def _normalize_outputs(outputs, function_name, signature_key):
|
||||
"""Construct an output dictionary from unnormalized function outputs."""
|
||||
if isinstance(outputs, collections.Mapping):
|
||||
if isinstance(outputs, collections_abc.Mapping):
|
||||
for key, value in outputs.items():
|
||||
if not isinstance(value, ops.Tensor):
|
||||
raise ValueError(
|
||||
@ -158,7 +157,7 @@ def _normalize_outputs(outputs, function_name, signature_key):
|
||||
return outputs
|
||||
else:
|
||||
original_outputs = outputs
|
||||
if not isinstance(outputs, collections.Sequence):
|
||||
if not isinstance(outputs, collections_abc.Sequence):
|
||||
outputs = [outputs]
|
||||
if not _is_flat(outputs):
|
||||
raise ValueError(
|
||||
@ -180,7 +179,7 @@ def _normalize_outputs(outputs, function_name, signature_key):
|
||||
# saved if they contain a _SignatureMap. A ".signatures" attribute containing
|
||||
# any other type (e.g. a regular dict) will raise an exception asking the user
|
||||
# to first "del obj.signatures" if they want it overwritten.
|
||||
class _SignatureMap(collections.Mapping, base.Trackable):
|
||||
class _SignatureMap(collections_abc.Mapping, base.Trackable):
|
||||
"""A collection of SavedModel signatures."""
|
||||
|
||||
def __init__(self):
|
||||
@ -234,7 +233,7 @@ def create_signature_map(signatures):
|
||||
# be more problematic in case future export changes violated these
|
||||
# assertions.
|
||||
assert isinstance(func, defun.ConcreteFunction)
|
||||
assert isinstance(func.structured_outputs, collections.Mapping)
|
||||
assert isinstance(func.structured_outputs, collections_abc.Mapping)
|
||||
# pylint: disable=protected-access
|
||||
if len(func._arg_keywords) == 1:
|
||||
assert 1 == func._num_positional_args
|
||||
|
@ -19,7 +19,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.core.framework import attr_value_pb2
|
||||
@ -41,6 +40,7 @@ from tensorflow.python.tpu import tpu_function
|
||||
from tensorflow.python.tpu.ops import tpu_ops
|
||||
from tensorflow.python.util import compat
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
ops.NotDifferentiable("TPUReplicatedInput")
|
||||
@ -1022,7 +1022,7 @@ def _postprocess_flat_outputs(outputs):
|
||||
if outputs is None:
|
||||
outputs = tuple()
|
||||
# If the computation only returned one value, makes it a tuple.
|
||||
if not isinstance(outputs, collections.Sequence):
|
||||
if not isinstance(outputs, collections_abc.Sequence):
|
||||
outputs = (outputs,)
|
||||
|
||||
# Append `no_op` here so that fetching any return value of this function
|
||||
|
@ -24,8 +24,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.python.eager import context
|
||||
@ -46,6 +44,7 @@ from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.python.summary import summary
|
||||
from tensorflow.python.training import queue_runner
|
||||
from tensorflow.python.util import deprecation
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
@ -600,7 +599,7 @@ def _store_sparse_tensors_join(tensor_list_list, enqueue_many, keep_input):
|
||||
|
||||
def _restore_sparse_tensors(stored_list, sparse_info_list):
|
||||
"""Restore SparseTensors after dequeue in batch, batch_join, etc."""
|
||||
received_sequence = isinstance(stored_list, collections.Sequence)
|
||||
received_sequence = isinstance(stored_list, collections_abc.Sequence)
|
||||
if not received_sequence:
|
||||
stored_list = (stored_list,)
|
||||
tensors = [
|
||||
|
@ -35,6 +35,7 @@ from tensorflow.python.ops import variables
|
||||
from tensorflow.python.saved_model import revived_types
|
||||
from tensorflow.python.training.tracking import base
|
||||
from tensorflow.python.training.tracking import layer_utils
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
class NoDependency(object):
|
||||
@ -249,7 +250,7 @@ class TrackableDataStructure(base.Trackable):
|
||||
return self is other
|
||||
|
||||
|
||||
class List(TrackableDataStructure, collections.Sequence):
|
||||
class List(TrackableDataStructure, collections_abc.Sequence):
|
||||
"""An append-only sequence type which is trackable.
|
||||
|
||||
Maintains checkpoint dependencies on its contents (which must also be
|
||||
@ -371,7 +372,7 @@ class List(TrackableDataStructure, collections.Sequence):
|
||||
# TODO(tomhennigan) Update to collections.UserList?
|
||||
# TODO(allenl): Try switching this to wrapt.ObjectProxy again when we drop
|
||||
# Python 3.4 support (may still be tricky).
|
||||
class ListWrapper(List, collections.MutableSequence,
|
||||
class ListWrapper(List, collections_abc.MutableSequence,
|
||||
# Shadowed, but there for isinstance checks.
|
||||
list):
|
||||
"""Wraps the built-in `list` to support restore-on-create for variables.
|
||||
@ -579,7 +580,7 @@ class ListWrapper(List, collections.MutableSequence,
|
||||
}
|
||||
|
||||
|
||||
class Mapping(TrackableDataStructure, collections.Mapping):
|
||||
class Mapping(TrackableDataStructure, collections_abc.Mapping):
|
||||
"""An append-only trackable mapping data structure with string keys.
|
||||
|
||||
Maintains checkpoint dependencies on its contents (which must also be
|
||||
|
@ -38,6 +38,12 @@ import six as _six
|
||||
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
try:
|
||||
# This import only works on python 3.3 and above.
|
||||
import collections.abc as collections_abc # pylint: disable=unused-import
|
||||
except ImportError:
|
||||
import collections as collections_abc # pylint: disable=unused-import
|
||||
|
||||
|
||||
def as_bytes(bytes_or_text, encoding='utf-8'):
|
||||
"""Converts `bytearray`, `bytes`, or unicode python input types to `bytes`.
|
||||
|
@ -34,12 +34,11 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections as _collections
|
||||
|
||||
import six as _six
|
||||
|
||||
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
from tensorflow.python.util.compat import collections_abc as _collections_abc
|
||||
|
||||
|
||||
_SHALLOW_TREE_HAS_INVALID_KEYS = (
|
||||
@ -170,7 +169,7 @@ def _yield_sorted_items(iterable):
|
||||
Yields:
|
||||
The iterable's (key, value) pairs, in order of sorted keys.
|
||||
"""
|
||||
if isinstance(iterable, _collections.Mapping):
|
||||
if isinstance(iterable, _collections_abc.Mapping):
|
||||
# Iterate through dictionaries in a deterministic order by sorting the
|
||||
# keys. Notice this means that we ignore the original order of `OrderedDict`
|
||||
# instances. This is intentional, to avoid potential bugs caused by mixing
|
||||
@ -205,14 +204,14 @@ is_sequence_or_composite = _pywrap_tensorflow.IsSequenceOrComposite
|
||||
|
||||
@tf_export("nest.is_nested")
|
||||
def is_nested(seq):
|
||||
"""Returns true if its input is a collections.Sequence (except strings).
|
||||
"""Returns true if its input is a collections.abc.Sequence (except strings).
|
||||
|
||||
Args:
|
||||
seq: an input sequence.
|
||||
|
||||
Returns:
|
||||
True if the sequence is a not a string and is a collections.Sequence or a
|
||||
dict.
|
||||
True if the sequence is a not a string and is a collections.abc.Sequence
|
||||
or a dict.
|
||||
"""
|
||||
return is_sequence(seq)
|
||||
|
||||
@ -344,7 +343,7 @@ def flatten_dict_items(dictionary):
|
||||
ValueError: If any key and value do not have the same structure layout, or
|
||||
if keys are not unique.
|
||||
"""
|
||||
if not isinstance(dictionary, (dict, _collections.Mapping)):
|
||||
if not isinstance(dictionary, (dict, _collections_abc.Mapping)):
|
||||
raise TypeError("input must be a dictionary")
|
||||
flat_dictionary = {}
|
||||
for i, v in _six.iteritems(dictionary):
|
||||
@ -714,8 +713,8 @@ def assert_shallow_structure(shallow_tree,
|
||||
(_is_type_spec(shallow_tree) or _is_type_spec(input_tree))):
|
||||
pass # Compatibility will be checked below.
|
||||
|
||||
elif not (isinstance(shallow_tree, _collections.Mapping)
|
||||
and isinstance(input_tree, _collections.Mapping)):
|
||||
elif not (isinstance(shallow_tree, _collections_abc.Mapping)
|
||||
and isinstance(input_tree, _collections_abc.Mapping)):
|
||||
raise TypeError(_STRUCTURES_HAVE_MISMATCHING_TYPES.format(
|
||||
input_type=type(input_tree),
|
||||
shallow_type=type(shallow_tree)))
|
||||
@ -753,7 +752,7 @@ def assert_shallow_structure(shallow_tree,
|
||||
_INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
|
||||
input_size=len(input_tree), shallow_size=len(shallow_tree)))
|
||||
|
||||
if isinstance(shallow_tree, _collections.Mapping):
|
||||
if isinstance(shallow_tree, _collections_abc.Mapping):
|
||||
absent_keys = set(shallow_tree) - set(input_tree)
|
||||
if absent_keys:
|
||||
raise ValueError(_SHALLOW_TREE_HAS_INVALID_KEYS
|
||||
@ -1315,5 +1314,5 @@ def flatten_with_tuple_paths(structure, expand_composites=False):
|
||||
flatten(structure, expand_composites=expand_composites)))
|
||||
|
||||
|
||||
_pywrap_tensorflow.RegisterType("Mapping", _collections.Mapping)
|
||||
_pywrap_tensorflow.RegisterType("Sequence", _collections.Sequence)
|
||||
_pywrap_tensorflow.RegisterType("Mapping", _collections_abc.Mapping)
|
||||
_pywrap_tensorflow.RegisterType("Sequence", _collections_abc.Sequence)
|
||||
|
@ -32,6 +32,7 @@ from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import math_ops
|
||||
from tensorflow.python.platform import test
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
try:
|
||||
import attr # pylint:disable=g-import-not-at-top
|
||||
@ -39,7 +40,7 @@ except ImportError:
|
||||
attr = None
|
||||
|
||||
|
||||
class _CustomMapping(collections.Mapping):
|
||||
class _CustomMapping(collections_abc.Mapping):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._wrapped = dict(*args, **kwargs)
|
||||
|
@ -17,9 +17,10 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import weakref
|
||||
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
class _ObjectIdentityWrapper(object):
|
||||
"""Wraps an object, mapping __eq__ on wrapper to "is" on wrapped.
|
||||
@ -58,7 +59,7 @@ class _WeakObjectIdentityWrapper(_ObjectIdentityWrapper):
|
||||
return self._wrapped()
|
||||
|
||||
|
||||
class ObjectIdentityDictionary(collections.MutableMapping):
|
||||
class ObjectIdentityDictionary(collections_abc.MutableMapping):
|
||||
"""A mutable mapping data structure which compares using "is".
|
||||
|
||||
This is necessary because we have trackable objects (_ListWrapper) which
|
||||
@ -109,7 +110,7 @@ class ObjectIdentityWeakKeyDictionary(ObjectIdentityDictionary):
|
||||
yield unwrapped
|
||||
|
||||
|
||||
class ObjectIdentitySet(collections.MutableSet):
|
||||
class ObjectIdentitySet(collections_abc.MutableSet):
|
||||
"""Like the built-in set, but compares objects with "is"."""
|
||||
|
||||
def __init__(self, *args):
|
||||
|
@ -62,7 +62,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
import difflib
|
||||
|
||||
import six
|
||||
@ -72,6 +71,8 @@ from google.protobuf import descriptor_pool
|
||||
from google.protobuf import message
|
||||
from google.protobuf import text_format
|
||||
|
||||
from ..compat import collections_abc
|
||||
|
||||
|
||||
def assertProtoEqual(self, a, b, check_initialized=True, # pylint: disable=invalid-name
|
||||
normalize_numbers=False, msg=None):
|
||||
@ -186,7 +187,7 @@ def NormalizeNumberFields(pb):
|
||||
|
||||
|
||||
def _IsMap(value):
|
||||
return isinstance(value, collections.Mapping)
|
||||
return isinstance(value, collections_abc.Mapping)
|
||||
|
||||
|
||||
def _IsRepeatedContainer(value):
|
||||
|
@ -18,11 +18,10 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import collections
|
||||
|
||||
import numpy as np
|
||||
|
||||
from tensorflow.python.framework import tensor_shape
|
||||
from tensorflow.python.util.compat import collections_abc
|
||||
|
||||
|
||||
def get_json_type(obj):
|
||||
@ -63,7 +62,7 @@ def get_json_type(obj):
|
||||
if isinstance(obj, tensor_shape.TensorShape):
|
||||
return obj.as_list()
|
||||
|
||||
if isinstance(obj, collections.Mapping):
|
||||
if isinstance(obj, collections_abc.Mapping):
|
||||
return dict(obj)
|
||||
|
||||
raise TypeError('Not JSON Serializable:', obj)
|
||||
|
Loading…
Reference in New Issue
Block a user