Internal symbol name change.
PiperOrigin-RevId: 350215627 Change-Id: I05b5f5bab417e9e23e646d68526974270c44482a
This commit is contained in:
parent
b596693a77
commit
8232f94fa3
@ -46,7 +46,7 @@ class ControlFlowTestBase(converter_testing.TestCase):
|
||||
|
||||
def assertValuesEqual(self, actual, expected):
|
||||
values = nest.map_structure(
|
||||
lambda x: self.evaluate(x) if tensor_util.is_tensor(x) else x,
|
||||
lambda x: self.evaluate(x) if tensor_util.is_tf_type(x) else x,
|
||||
actual)
|
||||
self.assertAllEqual(values, expected)
|
||||
|
||||
|
@ -103,7 +103,7 @@ class FunctionScope(object):
|
||||
return None
|
||||
|
||||
def _mark_return_if_tensor(t):
|
||||
if tensor_util.is_tensor(t):
|
||||
if tensor_util.is_tf_type(t):
|
||||
return self.autodeps_scope.mark_as_return(t)
|
||||
return t
|
||||
|
||||
|
@ -32,7 +32,7 @@ def _validate_list_constructor(elements, element_dtype, element_shape):
|
||||
"""Validates the inputs of tensor_list."""
|
||||
if element_dtype is not None and element_shape is not None:
|
||||
return
|
||||
if tensor_util.is_tensor(elements):
|
||||
if tensor_util.is_tf_type(elements):
|
||||
return
|
||||
if isinstance(elements, (list, tuple)):
|
||||
if elements:
|
||||
@ -49,7 +49,7 @@ def _validate_list_constructor(elements, element_dtype, element_shape):
|
||||
|
||||
def match_staging_level(value, like_value):
|
||||
"""Casts a value to be staged at the same level as another."""
|
||||
if tensor_util.is_tensor(like_value):
|
||||
if tensor_util.is_tf_type(like_value):
|
||||
return constant_op.constant(value)
|
||||
return value
|
||||
|
||||
|
@ -35,7 +35,7 @@ class SpecialFunctionsTest(test.TestCase):
|
||||
tensor_one = special_functions.match_staging_level(1, some_tensor)
|
||||
python_one = special_functions.match_staging_level(1, 1)
|
||||
with self.cached_session() as sess:
|
||||
self.assertTrue(tensor_util.is_tensor(tensor_one))
|
||||
self.assertTrue(tensor_util.is_tf_type(tensor_one))
|
||||
self.assertAllEqual(self.evaluate(tensor_one), 1)
|
||||
self.assertEqual(python_one, 1)
|
||||
|
||||
@ -104,7 +104,7 @@ class SpecialFunctionsTest(test.TestCase):
|
||||
l = list_ops.tensor_list_from_tensor(
|
||||
t, element_shape=constant_op.constant([], dtype=dtypes.int32))
|
||||
self.assertTrue(
|
||||
tensor_util.is_tensor(
|
||||
tensor_util.is_tf_type(
|
||||
special_functions.stack(l, element_dtype=dtypes.float32)))
|
||||
|
||||
|
||||
|
@ -223,8 +223,8 @@ def _verify_single_loop_var(
|
||||
if isinstance(exit_, (bool, int, float, str, np.ndarray)):
|
||||
exit_ = ops.convert_to_tensor_v2(exit_)
|
||||
|
||||
if (not tensor_util.is_tensor(entry) or
|
||||
not tensor_util.is_tensor(exit_)):
|
||||
if (not tensor_util.is_tf_type(entry) or
|
||||
not tensor_util.is_tf_type(exit_)):
|
||||
return
|
||||
|
||||
# TODO(mdan): Properly account for CompositeTensors.
|
||||
@ -322,8 +322,8 @@ def verify_single_cond_var(name, body_var, orelse_var):
|
||||
if isinstance(orelse_var, (bool, int, float, str, np.ndarray)):
|
||||
orelse_var = ops.convert_to_tensor_v2(orelse_var)
|
||||
|
||||
if (not tensor_util.is_tensor(body_var) or
|
||||
not tensor_util.is_tensor(orelse_var)):
|
||||
if (not tensor_util.is_tf_type(body_var) or
|
||||
not tensor_util.is_tf_type(orelse_var)):
|
||||
return
|
||||
|
||||
# TODO(mdan): Properly account for CompositeTensors.
|
||||
@ -407,7 +407,7 @@ def for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):
|
||||
get_state.
|
||||
opts: Optional dict of extra loop parameters.
|
||||
"""
|
||||
if tensor_util.is_tensor(iter_):
|
||||
if tensor_util.is_tf_type(iter_):
|
||||
if tensors.is_range_tensor(iter_):
|
||||
_tf_range_for_stmt(iter_, extra_test, body, get_state, set_state,
|
||||
symbol_names, opts)
|
||||
@ -974,7 +974,7 @@ def _placeholder_value(like, original=None):
|
||||
return original
|
||||
if isinstance(like, (int, float, bool)):
|
||||
return type(like)(0)
|
||||
if tensor_util.is_tensor(like):
|
||||
if tensor_util.is_tf_type(like):
|
||||
return array_ops.zeros(like.shape, like.dtype)
|
||||
elif isinstance(like, (list, tuple, dict)):
|
||||
return nest.map_structure(_placeholder_value, like)
|
||||
@ -1115,7 +1115,7 @@ def _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts):
|
||||
])
|
||||
]):
|
||||
final_loop_vars = nest.map_structure(
|
||||
lambda v: (array_ops.identity(v) if tensor_util.is_tensor(v) else v),
|
||||
lambda v: (array_ops.identity(v) if tensor_util.is_tf_type(v) else v),
|
||||
final_loop_vars[1:],
|
||||
)
|
||||
|
||||
|
@ -144,8 +144,8 @@ def _verify_single_loop_var(
|
||||
if isinstance(exit_, (bool, int, float, str)):
|
||||
exit_ = ops.convert_to_tensor_v2(exit_)
|
||||
|
||||
if (not tensor_util.is_tensor(entry) or
|
||||
not tensor_util.is_tensor(exit_)):
|
||||
if (not tensor_util.is_tf_type(entry) or
|
||||
not tensor_util.is_tf_type(exit_)):
|
||||
return
|
||||
|
||||
# TODO(mdan): Properly account for CompositeTensors.
|
||||
@ -228,8 +228,8 @@ def _verify_single_cond_var(name, body_var, orelse_var):
|
||||
if isinstance(orelse_var, (bool, int, float, str)):
|
||||
orelse_var = ops.convert_to_tensor_v2(orelse_var)
|
||||
|
||||
if (not tensor_util.is_tensor(body_var) or
|
||||
not tensor_util.is_tensor(orelse_var)):
|
||||
if (not tensor_util.is_tf_type(body_var) or
|
||||
not tensor_util.is_tf_type(orelse_var)):
|
||||
return
|
||||
|
||||
# TODO(mdan): Properly account for CompositeTensors.
|
||||
@ -325,7 +325,7 @@ def for_stmt(iter_,
|
||||
Returns:
|
||||
Tuple containing the final state.
|
||||
"""
|
||||
if tensor_util.is_tensor(iter_):
|
||||
if tensor_util.is_tf_type(iter_):
|
||||
if tensors.is_range_tensor(iter_):
|
||||
return _tf_range_for_stmt(iter_, extra_test, body, get_state, set_state,
|
||||
init_vars, basic_symbol_names,
|
||||
|
@ -106,7 +106,7 @@ def tf_tensor_array_new(elements, element_dtype=None, element_shape=None):
|
||||
|
||||
def tf_tensor_list_new(elements, element_dtype=None, element_shape=None):
|
||||
"""Overload of new_list that stages a Tensor list creation."""
|
||||
if tensor_util.is_tensor(elements):
|
||||
if tensor_util.is_tf_type(elements):
|
||||
if element_shape is not None:
|
||||
raise ValueError(
|
||||
'element shape may not be specified when creating list from tensor')
|
||||
@ -188,7 +188,7 @@ def list_append(list_, x):
|
||||
"""
|
||||
if isinstance(list_, tensor_array_ops.TensorArray):
|
||||
return _tf_tensorarray_append(list_, x)
|
||||
elif tensor_util.is_tensor(list_):
|
||||
elif tensor_util.is_tf_type(list_):
|
||||
if list_.dtype == dtypes.variant:
|
||||
return _tf_tensor_list_append(list_, x)
|
||||
else:
|
||||
@ -258,7 +258,7 @@ def list_pop(list_, i, opts):
|
||||
|
||||
if isinstance(list_, tensor_array_ops.TensorArray):
|
||||
raise ValueError('TensorArray does not support item removal')
|
||||
elif tensor_util.is_tensor(list_):
|
||||
elif tensor_util.is_tf_type(list_):
|
||||
if list_.dtype == dtypes.variant:
|
||||
return _tf_tensor_list_pop(list_, i, opts)
|
||||
else:
|
||||
@ -322,7 +322,7 @@ def list_stack(list_, opts):
|
||||
|
||||
if isinstance(list_, tensor_array_ops.TensorArray):
|
||||
return _tf_tensorarray_stack(list_)
|
||||
elif tensor_util.is_tensor(list_):
|
||||
elif tensor_util.is_tf_type(list_):
|
||||
if list_.dtype == dtypes.variant:
|
||||
return _tf_tensor_list_stack(list_, opts)
|
||||
else:
|
||||
|
@ -53,7 +53,7 @@ def assert_stmt(expression1, expression2):
|
||||
if args or keywords:
|
||||
raise ValueError('{} may not have any arguments'.format(expression2))
|
||||
|
||||
if tensor_util.is_tensor(expression1):
|
||||
if tensor_util.is_tf_type(expression1):
|
||||
return _tf_assert_stmt(expression1, expression2)
|
||||
else:
|
||||
return _py_assert_stmt(expression1, expression2)
|
||||
|
@ -25,7 +25,7 @@ from tensorflow.python.ops import gen_math_ops
|
||||
|
||||
def not_(a):
|
||||
"""Functional form of "not"."""
|
||||
if tensor_util.is_tensor(a):
|
||||
if tensor_util.is_tf_type(a):
|
||||
return _tf_not(a)
|
||||
return _py_not(a)
|
||||
|
||||
@ -43,7 +43,7 @@ def _py_not(a):
|
||||
def and_(a, b):
|
||||
"""Functional form of "and". Uses lazy evaluation semantics."""
|
||||
a_val = a()
|
||||
if tensor_util.is_tensor(a_val):
|
||||
if tensor_util.is_tf_type(a_val):
|
||||
return _tf_lazy_and(a_val, b)
|
||||
return _py_lazy_and(a_val, b)
|
||||
|
||||
@ -62,7 +62,7 @@ def _py_lazy_and(cond, b):
|
||||
def or_(a, b):
|
||||
"""Functional form of "or". Uses lazy evaluation semantics."""
|
||||
a_val = a()
|
||||
if tensor_util.is_tensor(a_val):
|
||||
if tensor_util.is_tf_type(a_val):
|
||||
return _tf_lazy_or(a_val, b)
|
||||
return _py_lazy_or(a_val, b)
|
||||
|
||||
@ -80,7 +80,7 @@ def _py_lazy_or(cond, b):
|
||||
|
||||
def eq(a, b):
|
||||
"""Functional form of "equal"."""
|
||||
if tensor_util.is_tensor(a) or tensor_util.is_tensor(b):
|
||||
if tensor_util.is_tf_type(a) or tensor_util.is_tf_type(b):
|
||||
return _tf_equal(a, b)
|
||||
return _py_equal(a, b)
|
||||
|
||||
|
@ -177,7 +177,7 @@ def super_in_original_context(f, args, caller_fn_scope):
|
||||
|
||||
|
||||
def abs_(x):
|
||||
if tensor_util.is_tensor(x):
|
||||
if tensor_util.is_tf_type(x):
|
||||
return _tf_abs(x)
|
||||
if isinstance(x, dataset_ops.DatasetV2):
|
||||
return _tf_dataset_abs(x)
|
||||
@ -202,7 +202,7 @@ def _py_abs(x):
|
||||
|
||||
|
||||
def float_(x=0):
|
||||
if tensor_util.is_tensor(x):
|
||||
if tensor_util.is_tf_type(x):
|
||||
return _tf_float(x)
|
||||
return _py_float(x)
|
||||
|
||||
@ -219,7 +219,7 @@ def _py_float(x):
|
||||
|
||||
|
||||
def int_(x=0, base=UNSPECIFIED):
|
||||
if tensor_util.is_tensor(x):
|
||||
if tensor_util.is_tf_type(x):
|
||||
return _tf_int(x, base)
|
||||
return _py_int(x, base)
|
||||
|
||||
@ -245,7 +245,7 @@ def len_(s):
|
||||
return _tf_tensor_array_len(s)
|
||||
elif tensors.is_tensor_list(s):
|
||||
return _tf_tensor_list_len(s)
|
||||
elif tensor_util.is_tensor(s):
|
||||
elif tensor_util.is_tf_type(s):
|
||||
return _tf_tensor_len(s)
|
||||
if isinstance(s, dataset_ops.DatasetV2):
|
||||
return _tf_dataset_len(s)
|
||||
@ -326,7 +326,7 @@ def print_(*objects, **kwargs):
|
||||
raise ValueError('invalid keyword arguments: {}'.format(unknown_kwargs))
|
||||
|
||||
# TODO(mdan): Use next.flatten(objects) instead?
|
||||
if any(tensor_util.is_tensor(o) for o in objects):
|
||||
if any(tensor_util.is_tf_type(o) for o in objects):
|
||||
# TODO(mdan): use tf.print instead.
|
||||
return _tf_py_func_print(objects, kwargs)
|
||||
else:
|
||||
@ -346,7 +346,7 @@ def _tf_py_func_print(objects, kwargs):
|
||||
override_kwargs['flush'] = True
|
||||
|
||||
def print_wrapper(*vals):
|
||||
vals = tuple(v.numpy() if tensor_util.is_tensor(v) else v for v in vals)
|
||||
vals = tuple(v.numpy() if tensor_util.is_tf_type(v) else v for v in vals)
|
||||
if not six.PY2:
|
||||
# TensorFlow doesn't seem to generate Unicode when passing strings to
|
||||
# py_func. This causes the print to add a "b'" wrapper to the output,
|
||||
@ -360,7 +360,7 @@ def _tf_py_func_print(objects, kwargs):
|
||||
|
||||
|
||||
def range_(start_or_stop, stop=UNSPECIFIED, step=UNSPECIFIED):
|
||||
if any(tensor_util.is_tensor(s) for s in (start_or_stop, stop, step)):
|
||||
if any(tensor_util.is_tf_type(s) for s in (start_or_stop, stop, step)):
|
||||
return _tf_range(start_or_stop, stop, step)
|
||||
return _py_range(start_or_stop, stop, step)
|
||||
|
||||
@ -596,7 +596,7 @@ def _py_all(iterable):
|
||||
|
||||
|
||||
def sorted_(iterable, key=UNSPECIFIED, reverse=UNSPECIFIED):
|
||||
if tensor_util.is_tensor(iterable):
|
||||
if tensor_util.is_tf_type(iterable):
|
||||
return _tf_sorted(iterable, key, reverse)
|
||||
return _py_sorted(iterable, key, reverse)
|
||||
|
||||
|
@ -56,7 +56,7 @@ def get_item(target, i, opts):
|
||||
|
||||
if isinstance(target, tensor_array_ops.TensorArray):
|
||||
return _tf_tensorarray_get_item(target, i)
|
||||
elif tensor_util.is_tensor(target):
|
||||
elif tensor_util.is_tf_type(target):
|
||||
if target.dtype == dtypes.variant:
|
||||
return _tf_tensor_list_get_item(target, i, opts)
|
||||
elif target.dtype == dtypes.string and target.shape.ndims == 0:
|
||||
@ -116,7 +116,7 @@ def set_item(target, i, x):
|
||||
"""
|
||||
if isinstance(target, tensor_array_ops.TensorArray):
|
||||
return _tf_tensorarray_set_item(target, i, x)
|
||||
elif tensor_util.is_tensor(target):
|
||||
elif tensor_util.is_tf_type(target):
|
||||
if target.dtype == dtypes.variant:
|
||||
return _tf_tensor_list_set_item(target, i, x)
|
||||
else:
|
||||
|
@ -70,7 +70,7 @@ def wrap_py_func(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
|
||||
# Of the positional arguments, only grab the tensor ones to be passed through
|
||||
# the py_func.
|
||||
n_args = len(args)
|
||||
arg_is_tensor = tuple(map(tensor_util.is_tensor, args))
|
||||
arg_is_tensor = tuple(map(tensor_util.is_tf_type, args))
|
||||
for i in range(n_args):
|
||||
if arg_is_tensor[i]:
|
||||
tensor_args_idx[i] = len(tensor_args)
|
||||
@ -90,7 +90,7 @@ def wrap_py_func(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
|
||||
# kwarg_keys = ('a', 'b')
|
||||
if kwargs:
|
||||
kwarg_keys = tuple(kwargs.keys())
|
||||
kwarg_is_tensor = {k: tensor_util.is_tensor(kwargs[k]) for k in kwarg_keys}
|
||||
kwarg_is_tensor = {k: tensor_util.is_tf_type(kwargs[k]) for k in kwarg_keys}
|
||||
for k in kwarg_keys:
|
||||
if kwarg_is_tensor[k]:
|
||||
tensor_args_idx[k] = len(tensor_args)
|
||||
|
@ -31,7 +31,7 @@ from tensorflow.python.ops import tensor_array_ops
|
||||
|
||||
def is_dense_tensor(t):
|
||||
# TODO(mdan): Resolve this inconsistency.
|
||||
return (tensor_util.is_tensor(t) and
|
||||
return (tensor_util.is_tf_type(t) and
|
||||
not isinstance(t, sparse_tensor.SparseTensor))
|
||||
|
||||
|
||||
@ -44,10 +44,10 @@ def is_tensor_list(t):
|
||||
# With TF lacking support for templated types, this is unfortunately the
|
||||
# closest we can get right now. A dedicated op ought to be possible to
|
||||
# construct.
|
||||
return (tensor_util.is_tensor(t) and t.dtype == dtypes.variant and
|
||||
return (tensor_util.is_tf_type(t) and t.dtype == dtypes.variant and
|
||||
not t.shape.ndims)
|
||||
|
||||
|
||||
def is_range_tensor(t):
|
||||
"""Returns True if a tensor is the result of a tf.range op. Best effort."""
|
||||
return tensor_util.is_tensor(t) and hasattr(t, 'op') and t.op.type == 'Range'
|
||||
return tensor_util.is_tf_type(t) and hasattr(t, 'op') and t.op.type == 'Range'
|
||||
|
@ -514,7 +514,7 @@ def make_csv_dataset_v2(
|
||||
if column_defaults is not None:
|
||||
column_defaults = [
|
||||
constant_op.constant([], dtype=x)
|
||||
if not tensor_util.is_tensor(x) and x in _ACCEPTABLE_CSV_TYPES else x
|
||||
if not tensor_util.is_tf_type(x) and x in _ACCEPTABLE_CSV_TYPES else x
|
||||
for x in column_defaults
|
||||
]
|
||||
else:
|
||||
@ -760,7 +760,7 @@ class CsvDatasetV2(dataset_ops.DatasetSource):
|
||||
argument_dtype=dtypes.string)
|
||||
record_defaults = [
|
||||
constant_op.constant([], dtype=x)
|
||||
if not tensor_util.is_tensor(x) and x in _ACCEPTABLE_CSV_TYPES else x
|
||||
if not tensor_util.is_tf_type(x) and x in _ACCEPTABLE_CSV_TYPES else x
|
||||
for x in record_defaults
|
||||
]
|
||||
self._record_defaults = ops.convert_n_to_tensor(
|
||||
|
@ -89,7 +89,7 @@ def reduce_non_distributed_value(
|
||||
# be a single value. We also handle the case when `value` is a single value
|
||||
# and equal to 0.
|
||||
# TODO:(b/138823479): handle the tensor value properly.
|
||||
if not tensor_util.is_tensor(value) and value == 0:
|
||||
if not tensor_util.is_tf_type(value) and value == 0:
|
||||
return 0
|
||||
# If there is only a single value and the reduce op is MEAN,
|
||||
# that value should be on all destinations.
|
||||
|
@ -2878,7 +2878,7 @@ class ReplicaContextBase(object):
|
||||
self._thread_context = distribution_strategy_context._InReplicaThreadMode( # pylint: disable=protected-access
|
||||
self)
|
||||
if not (replica_id_in_sync_group is None or
|
||||
tensor_util.is_tensor(replica_id_in_sync_group) or
|
||||
tensor_util.is_tf_type(replica_id_in_sync_group) or
|
||||
isinstance(replica_id_in_sync_group, int)):
|
||||
raise ValueError(
|
||||
"replica_id_in_sync_group can only be an integer, a Tensor or None.")
|
||||
@ -2981,7 +2981,7 @@ class ReplicaContextBase(object):
|
||||
# error. Making the tensor at call time to ensure it is the same graph where
|
||||
# it's used. However to be compatible with tpu.replicate(),
|
||||
# self._replica_id_in_sync_group can also be a Tensor.
|
||||
if tensor_util.is_tensor(self._replica_id_in_sync_group):
|
||||
if tensor_util.is_tf_type(self._replica_id_in_sync_group):
|
||||
return self._replica_id_in_sync_group
|
||||
return constant_op.constant(
|
||||
self._replica_id_in_sync_group,
|
||||
|
@ -178,7 +178,7 @@ def update_regroup(extended, updates, group):
|
||||
# If values is just ops, the grouping is enough. Everything in values
|
||||
# should have the same type, since we expect every replica to be performing
|
||||
# the same computation.
|
||||
if not all(tensor_util.is_tensor(v) for v in values):
|
||||
if not all(tensor_util.is_tf_type(v) for v in values):
|
||||
return g
|
||||
|
||||
# Otherwise we need tensors with the same values as `values`, but
|
||||
|
@ -1969,10 +1969,10 @@ def _get_batched_dataset_attributes(d):
|
||||
drop_remainder = d._drop_remainder_t
|
||||
# pylint: enable=protected-access
|
||||
|
||||
if tensor_util.is_tensor(batch_size):
|
||||
if tensor_util.is_tf_type(batch_size):
|
||||
batch_size = tensor_util.constant_value(batch_size)
|
||||
|
||||
if tensor_util.is_tensor(drop_remainder):
|
||||
if tensor_util.is_tf_type(drop_remainder):
|
||||
drop_remainder = tensor_util.constant_value(drop_remainder)
|
||||
|
||||
return batch_size, drop_remainder
|
||||
|
@ -1087,7 +1087,7 @@ class TPUExtended(distribute_lib.StrategyExtendedV1):
|
||||
|
||||
def _reduce_to(self, reduce_op, value, destinations, options):
|
||||
if (isinstance(value, values.DistributedValues) or
|
||||
tensor_util.is_tensor(value)
|
||||
tensor_util.is_tf_type(value)
|
||||
) and tpu_values.enclosing_tpu_context() is not None:
|
||||
if reduce_op == reduce_util.ReduceOp.MEAN:
|
||||
# TODO(jhseu): Revisit once we support model-parallelism.
|
||||
@ -1341,7 +1341,7 @@ class TPUExtended(distribute_lib.StrategyExtendedV1):
|
||||
maximum_shapes = []
|
||||
flattened_list = nest.flatten(replicate_inputs[0])
|
||||
for input_tensor in flattened_list:
|
||||
if tensor_util.is_tensor(input_tensor):
|
||||
if tensor_util.is_tf_type(input_tensor):
|
||||
rank = input_tensor.shape.rank
|
||||
else:
|
||||
rank = np.ndim(input_tensor)
|
||||
|
@ -678,7 +678,7 @@ def _zeros(shape, dtype):
|
||||
|
||||
device = ctx.device_name
|
||||
|
||||
if tensor_util.is_tensor(shape):
|
||||
if tensor_util.is_tf_type(shape):
|
||||
shape_key = shape.ref()
|
||||
else:
|
||||
shape_key = shape
|
||||
|
@ -51,7 +51,7 @@ def _DTypeFromTensor(tensor):
|
||||
|
||||
def IsTrainable(tensor_or_dtype):
|
||||
"""Determines whether a tensor or dtype supports infinitesimal changes."""
|
||||
if tensor_util.is_tensor(tensor_or_dtype):
|
||||
if tensor_util.is_tf_type(tensor_or_dtype):
|
||||
dtype = _DTypeFromTensor(tensor_or_dtype)
|
||||
else:
|
||||
dtype = tensor_or_dtype
|
||||
|
@ -306,7 +306,7 @@ class WrappedFunction(function.ConcreteFunction):
|
||||
elif isinstance(fetch, meta_graph_pb2.TensorInfo):
|
||||
tensor_infos.append(fetch)
|
||||
decoded = _get_element_from_tensor_info(fetch, self._func_graph)
|
||||
if (tensor_util.is_tensor(decoded) or
|
||||
if (tensor_util.is_tf_type(decoded) or
|
||||
isinstance(decoded, composite_tensor.CompositeTensor)):
|
||||
tensor_fetches.append(decoded)
|
||||
else:
|
||||
@ -349,7 +349,7 @@ class WrappedFunction(function.ConcreteFunction):
|
||||
for ti in tensor_infos:
|
||||
if ti.WhichOneof("encoding") == "name": # Dense tensors only
|
||||
t = pruned_graph.as_graph_element(ti.name)
|
||||
if tensor_util.is_tensor(t):
|
||||
if tensor_util.is_tf_type(t):
|
||||
t.set_shape(tensor_shape.TensorShape(ti.tensor_shape))
|
||||
# pylint: disable=protected-access
|
||||
for f in self.graph._functions.values():
|
||||
|
@ -3560,12 +3560,12 @@ class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
|
||||
"""Tests that a user can register a CompositeTensor converter."""
|
||||
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
|
||||
y = ops.convert_to_tensor_or_composite(x)
|
||||
self.assertFalse(tensor_util.is_tensor(y))
|
||||
self.assertFalse(tensor_util.is_tf_type(y))
|
||||
self.assertIsInstance(y, _TupleTensor)
|
||||
self.assertLen(y, len(x))
|
||||
for x_, y_ in zip(x, y):
|
||||
self.assertIsInstance(y_, ops.Tensor)
|
||||
self.assertTrue(tensor_util.is_tensor(y_))
|
||||
self.assertTrue(tensor_util.is_tf_type(y_))
|
||||
self.assertAllEqual(x_, tensor_util.constant_value(y_))
|
||||
|
||||
|
||||
|
@ -1025,10 +1025,10 @@ def constant_value_as_shape(tensor): # pylint: disable=invalid-name
|
||||
|
||||
# TODO(mdan): Deprecate in favor of more static-friendly types.
|
||||
@tf_export("is_tensor")
|
||||
def is_tensor(x): # pylint: disable=invalid-name
|
||||
def is_tf_type(x): # pylint: disable=invalid-name
|
||||
"""Checks whether `x` is a TF-native type that can be passed to many TF ops.
|
||||
|
||||
Use is_tensor to differentiate types that can ingested by TensorFlow ops
|
||||
Use `is_tensor` to differentiate types that can ingested by TensorFlow ops
|
||||
without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and
|
||||
`tf.RaggedTensor`) from types that need to be converted into tensors before
|
||||
they are ingested (e.g., numpy `ndarray` and Python scalars).
|
||||
@ -1038,23 +1038,29 @@ def is_tensor(x): # pylint: disable=invalid-name
|
||||
```python
|
||||
if not tf.is_tensor(t):
|
||||
t = tf.convert_to_tensor(t)
|
||||
return t.dtype
|
||||
return t.shape, t.dtype
|
||||
```
|
||||
|
||||
we check to make sure that `t` is a tensor (and convert it if not) before
|
||||
accessing its `shape` and `dtype`.
|
||||
accessing its `shape` and `dtype`. (But note that not all TensorFlow native
|
||||
types have shapes or dtypes; `tf.data.Dataset` is an example of a TensorFlow
|
||||
native type that has neither shape nor dtype.)
|
||||
|
||||
Args:
|
||||
x: A python object to check.
|
||||
|
||||
Returns:
|
||||
`True` if `x` is a tensor or "tensor-like", `False` if not.
|
||||
`True` if `x` is a TensorFlow-native type.
|
||||
"""
|
||||
return (isinstance(x, internal.NativeObject) or
|
||||
isinstance(x, core.Tensor) or
|
||||
getattr(x, "is_tensor_like", False))
|
||||
|
||||
|
||||
# Deprecated alias for tensor_util.is_tf_type.
|
||||
is_tensor = is_tf_type
|
||||
|
||||
|
||||
def shape_tensor(shape): # pylint: disable=invalid-name
|
||||
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
|
||||
dtype = None
|
||||
|
@ -778,32 +778,32 @@ class IsTensorTest(test.TestCase):
|
||||
def testConstantTensor(self):
|
||||
np_val = np.random.rand(3).astype(np.int32)
|
||||
tf_val = constant_op.constant(np_val)
|
||||
self.assertFalse(tensor_util.is_tensor(np_val))
|
||||
self.assertTrue(tensor_util.is_tensor(tf_val))
|
||||
self.assertFalse(tensor_util.is_tf_type(np_val))
|
||||
self.assertTrue(tensor_util.is_tf_type(tf_val))
|
||||
|
||||
def testRaggedTensor(self):
|
||||
rt = ragged_factory_ops.constant([[1, 2], [3]])
|
||||
rt_value = self.evaluate(rt)
|
||||
self.assertTrue(tensor_util.is_tensor(rt))
|
||||
self.assertFalse(tensor_util.is_tensor(rt_value))
|
||||
self.assertTrue(tensor_util.is_tf_type(rt))
|
||||
self.assertFalse(tensor_util.is_tf_type(rt_value))
|
||||
|
||||
def testSparseTensor(self):
|
||||
st = sparse_tensor.SparseTensor([[1, 2]], [3], [10, 10])
|
||||
st_value = self.evaluate(st)
|
||||
self.assertTrue(tensor_util.is_tensor(st))
|
||||
self.assertFalse(tensor_util.is_tensor(st_value))
|
||||
self.assertTrue(tensor_util.is_tf_type(st))
|
||||
self.assertFalse(tensor_util.is_tf_type(st_value))
|
||||
|
||||
def testIndexedSlices(self):
|
||||
x = indexed_slices.IndexedSlices(
|
||||
constant_op.constant([1, 2, 3]), constant_op.constant([10, 20, 30]))
|
||||
x_value = indexed_slices.IndexedSlicesValue(
|
||||
np.array([1, 2, 3]), np.array([10, 20, 30]), np.array([100]))
|
||||
self.assertTrue(tensor_util.is_tensor(x))
|
||||
self.assertFalse(tensor_util.is_tensor(x_value))
|
||||
self.assertTrue(tensor_util.is_tf_type(x))
|
||||
self.assertFalse(tensor_util.is_tf_type(x_value))
|
||||
|
||||
def testVariable(self):
|
||||
v = variables.Variable([1, 2, 3])
|
||||
self.assertTrue(tensor_util.is_tensor(v))
|
||||
self.assertTrue(tensor_util.is_tf_type(v))
|
||||
|
||||
|
||||
class ConstantValueTest(test.TestCase):
|
||||
|
@ -2555,7 +2555,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
|
||||
def _GetNdArray(self, a):
|
||||
# If a is tensor-like then convert it to ndarray
|
||||
if tensor_util.is_tensor(a):
|
||||
if tensor_util.is_tf_type(a):
|
||||
if isinstance(a, ops._EagerTensorBase):
|
||||
a = a.numpy()
|
||||
else:
|
||||
|
@ -3713,7 +3713,7 @@ def get_value(x):
|
||||
Returns:
|
||||
A Numpy array.
|
||||
"""
|
||||
if not tensor_util.is_tensor(x):
|
||||
if not tensor_util.is_tf_type(x):
|
||||
return x
|
||||
if context.executing_eagerly() or isinstance(x, ops.EagerTensor):
|
||||
return x.numpy()
|
||||
@ -4032,7 +4032,7 @@ class GraphExecutionFunction(object):
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
if tensor_util.is_tensor(value):
|
||||
if tensor_util.is_tf_type(value):
|
||||
# Case: feeding symbolic tensor.
|
||||
feed_symbols.append(tensor)
|
||||
symbol_vals.append(value)
|
||||
@ -4719,7 +4719,7 @@ def in_train_phase(x, alt, training=None):
|
||||
training = learning_phase()
|
||||
|
||||
# TODO(b/138862903): Handle the case when training is tensor.
|
||||
if not tensor_util.is_tensor(training):
|
||||
if not tensor_util.is_tf_type(training):
|
||||
if training == 1 or training is True:
|
||||
if callable(x):
|
||||
return x()
|
||||
@ -6553,7 +6553,7 @@ def cast_variables_to_tensor(tensors):
|
||||
|
||||
|
||||
def _is_symbolic_tensor(x):
|
||||
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
|
||||
return tensor_util.is_tf_type(x) and not isinstance(x, ops.EagerTensor)
|
||||
|
||||
|
||||
def convert_inputs_if_ragged(inputs):
|
||||
|
@ -346,7 +346,7 @@ def validate_per_replica_inputs(distribution_strategy, x):
|
||||
# At this point x should contain only tensors.
|
||||
x_values = distribution_strategy.unwrap(x)
|
||||
for value in x_values:
|
||||
if not tensor_util.is_tensor(value):
|
||||
if not tensor_util.is_tf_type(value):
|
||||
raise ValueError('Dataset input to the model should be tensors instead '
|
||||
'they are of type {}'.format(type(value)))
|
||||
|
||||
|
@ -1074,7 +1074,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
|
||||
training_value = backend.learning_phase()
|
||||
# Force the training_value to be bool type which matches to the contract
|
||||
# for layer/model call args.
|
||||
if tensor_util.is_tensor(training_value):
|
||||
if tensor_util.is_tf_type(training_value):
|
||||
training_value = math_ops.cast(training_value, dtypes.bool)
|
||||
else:
|
||||
training_value = bool(training_value)
|
||||
@ -1209,7 +1209,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
|
||||
# Ensure value is a `bool` or `tf.bool`.
|
||||
if isinstance(training_mode, bool):
|
||||
pass
|
||||
elif tensor_util.is_tensor(training_mode):
|
||||
elif tensor_util.is_tf_type(training_mode):
|
||||
training_mode = math_ops.cast(training_mode, dtypes.bool)
|
||||
else:
|
||||
training_mode = bool(training_mode)
|
||||
@ -1551,7 +1551,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
|
||||
loss = loss()
|
||||
if loss is None:
|
||||
return None # Will be filtered out when computing the .losses property
|
||||
if not tensor_util.is_tensor(loss):
|
||||
if not tensor_util.is_tf_type(loss):
|
||||
loss = ops.convert_to_tensor_v2_with_dispatch(
|
||||
loss, dtype=backend.floatx())
|
||||
loss._unconditional_loss = True # pylint: disable=protected-access
|
||||
@ -1568,7 +1568,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
|
||||
continue
|
||||
if loss is None:
|
||||
continue
|
||||
if not tensor_util.is_tensor(loss) and not isinstance(
|
||||
if not tensor_util.is_tf_type(loss) and not isinstance(
|
||||
loss, keras_tensor.KerasTensor):
|
||||
loss = ops.convert_to_tensor_v2_with_dispatch(
|
||||
loss, dtype=backend.floatx())
|
||||
@ -1577,7 +1577,7 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
|
||||
isinstance(loss, keras_tensor.KerasTensor)) and
|
||||
not base_layer_utils.is_in_tf_function()):
|
||||
symbolic_losses.append(loss)
|
||||
elif tensor_util.is_tensor(loss):
|
||||
elif tensor_util.is_tf_type(loss):
|
||||
eager_losses.append(loss)
|
||||
|
||||
self._callable_losses.extend(callable_losses)
|
||||
|
@ -706,7 +706,7 @@ def mark_as_return(outputs, acd):
|
||||
|
||||
def _mark_as_return(tensor):
|
||||
"""Marks `tensor` as the return value for automatic control deps."""
|
||||
if not tensor_util.is_tensor(tensor):
|
||||
if not tensor_util.is_tf_type(tensor):
|
||||
return tensor
|
||||
|
||||
# pylint: disable=protected-access
|
||||
|
@ -735,7 +735,7 @@ class Layer(base_layer.Layer):
|
||||
if self._expects_training_arg and training_value is not None:
|
||||
# Force the training_value to be bool type which matches to the contract
|
||||
# for layer/model call args.
|
||||
if tensor_util.is_tensor(training_value):
|
||||
if tensor_util.is_tf_type(training_value):
|
||||
training_value = math_ops.cast(training_value, dtypes.bool)
|
||||
else:
|
||||
training_value = bool(training_value)
|
||||
@ -1035,7 +1035,7 @@ class Layer(base_layer.Layer):
|
||||
loss = loss()
|
||||
if loss is None:
|
||||
return None # Will be filtered out when computing the .losses property
|
||||
if not tensor_util.is_tensor(loss):
|
||||
if not tensor_util.is_tf_type(loss):
|
||||
loss = ops.convert_to_tensor_v2_with_dispatch(
|
||||
loss, dtype=backend.floatx())
|
||||
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
|
||||
@ -1051,7 +1051,7 @@ class Layer(base_layer.Layer):
|
||||
continue
|
||||
if loss is None:
|
||||
continue
|
||||
if not tensor_util.is_tensor(loss):
|
||||
if not tensor_util.is_tf_type(loss):
|
||||
loss = ops.convert_to_tensor_v2_with_dispatch(
|
||||
loss, dtype=backend.floatx())
|
||||
# TF Functions should take the eager path.
|
||||
|
@ -78,7 +78,7 @@ class Node(object):
|
||||
self._flat_arguments = nest.flatten((self.call_args, self.call_kwargs))
|
||||
# Used to avoid expensive `nest` operations in the most common case.
|
||||
self._single_positional_tensor_passed = (not self.call_kwargs and len(
|
||||
self.call_args) == 1 and tensor_util.is_tensor(self.call_args[0]))
|
||||
self.call_args) == 1 and tensor_util.is_tf_type(self.call_args[0]))
|
||||
|
||||
if not keras_tensor.keras_tensors_enabled():
|
||||
# Create TensorFlowOpLayers if needed.
|
||||
|
@ -44,7 +44,7 @@ class PartialBatchPaddingHandler(object):
|
||||
|
||||
def _find_any_tensor(batch_features):
|
||||
tensors = [
|
||||
x for x in nest.flatten(batch_features) if tensor_util.is_tensor(x)
|
||||
x for x in nest.flatten(batch_features) if tensor_util.is_tf_type(x)
|
||||
]
|
||||
if not tensors:
|
||||
raise ValueError('Cannot find any Tensor in features dict.')
|
||||
|
@ -363,7 +363,7 @@ class Sequential(functional.Functional):
|
||||
def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name
|
||||
# If applicable, update the static input shape of the model.
|
||||
if not self._has_explicit_input_shape:
|
||||
if not tensor_util.is_tensor(inputs) and not isinstance(
|
||||
if not tensor_util.is_tf_type(inputs) and not isinstance(
|
||||
inputs, np_arrays.ndarray):
|
||||
# This is a Sequential with mutiple inputs. This is technically an
|
||||
# invalid use case of Sequential, but we tolerate it for backwards
|
||||
|
@ -48,7 +48,7 @@ def slice_arrays(arrays, indices, contiguous=True):
|
||||
if not isinstance(arrays, list):
|
||||
converted_to_list = True
|
||||
arrays = [arrays]
|
||||
if any(tensor_util.is_tensor(x) for x in arrays):
|
||||
if any(tensor_util.is_tf_type(x) for x in arrays):
|
||||
if not contiguous:
|
||||
entries = [[x[i:i + 1] for i in indices] for x in arrays]
|
||||
slices = [array_ops.concat(x, axis=0) for x in entries]
|
||||
|
@ -531,7 +531,7 @@ def standardize_single_array(x, expected_shape=None):
|
||||
|
||||
if (x.shape is not None and len(x.shape) == 1 and
|
||||
(expected_shape is None or len(expected_shape) != 1)):
|
||||
if tensor_util.is_tensor(x):
|
||||
if tensor_util.is_tf_type(x):
|
||||
x = array_ops.expand_dims(x, axis=1)
|
||||
else:
|
||||
x = np.expand_dims(x, 1)
|
||||
@ -644,7 +644,7 @@ def standardize_input_data(data,
|
||||
if shapes:
|
||||
for i in range(len(names)):
|
||||
if shapes[i] is not None:
|
||||
if tensor_util.is_tensor(data[i]):
|
||||
if tensor_util.is_tf_type(data[i]):
|
||||
tensorshape = data[i].shape
|
||||
if not tensorshape:
|
||||
continue
|
||||
@ -742,7 +742,7 @@ def check_array_lengths(inputs, targets, weights=None):
|
||||
"""
|
||||
|
||||
def is_tensor_or_composite_tensor(x):
|
||||
return tensor_util.is_tensor(x) or is_composite_or_composite_value(x)
|
||||
return tensor_util.is_tf_type(x) or is_composite_or_composite_value(x)
|
||||
|
||||
def set_of_lengths(x):
|
||||
# Returns a set with the variation between
|
||||
@ -805,7 +805,7 @@ def check_loss_and_target_compatibility(targets, loss_fns, output_shapes):
|
||||
key_loss_classes = (losses.MeanSquaredError, losses.BinaryCrossentropy,
|
||||
losses.CategoricalCrossentropy)
|
||||
for y, loss, shape in zip(targets, loss_fns, output_shapes):
|
||||
if y is None or loss is None or tensor_util.is_tensor(y):
|
||||
if y is None or loss is None or tensor_util.is_tf_type(y):
|
||||
continue
|
||||
if losses.is_categorical_crossentropy(loss):
|
||||
if y.shape[-1] == 1:
|
||||
@ -1007,7 +1007,7 @@ def standardize_weights(y,
|
||||
'Expected sample_weight with rank '
|
||||
'less than or equal to ' + str(len(y.shape)))
|
||||
|
||||
if (not tensor_util.is_tensor(sample_weight) and
|
||||
if (not tensor_util.is_tf_type(sample_weight) and
|
||||
y.shape[:sample_weight.ndim] != sample_weight.shape):
|
||||
raise ValueError('Found a sample_weight array with shape ' +
|
||||
str(sample_weight.shape) + ' for an input with shape ' +
|
||||
@ -1021,7 +1021,7 @@ def standardize_weights(y,
|
||||
raise ValueError('`class_weight` not supported for '
|
||||
'3+ dimensional targets.')
|
||||
|
||||
if tensor_util.is_tensor(y):
|
||||
if tensor_util.is_tf_type(y):
|
||||
# Few classes are expected, so densifying is reasonable.
|
||||
keys = np.array(sorted(class_weight.keys()))
|
||||
values = np.array([class_weight[i] for i in keys])
|
||||
@ -1085,14 +1085,14 @@ def has_tensors(ls):
|
||||
# which would then require a steps_per_epoch argument.
|
||||
if isinstance(ls, (list, tuple)):
|
||||
return any(
|
||||
tensor_util.is_tensor(v) and
|
||||
tensor_util.is_tf_type(v) and
|
||||
not isinstance(v, ragged_tensor.RaggedTensor) for v in ls)
|
||||
if isinstance(ls, dict):
|
||||
return any(
|
||||
tensor_util.is_tensor(v) and
|
||||
tensor_util.is_tf_type(v) and
|
||||
not isinstance(v, ragged_tensor.RaggedTensor)
|
||||
for _, v in six.iteritems(ls))
|
||||
return tensor_util.is_tensor(ls) and not isinstance(
|
||||
return tensor_util.is_tf_type(ls) and not isinstance(
|
||||
ls, ragged_tensor.RaggedTensor)
|
||||
|
||||
|
||||
@ -1271,7 +1271,7 @@ def validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'):
|
||||
"""Helper function to validate either inputs or targets."""
|
||||
if isinstance(inp, (list, tuple)):
|
||||
if not all(isinstance(v, np.ndarray) or
|
||||
tensor_util.is_tensor(v) for v in inp):
|
||||
tensor_util.is_tf_type(v) for v in inp):
|
||||
raise ValueError(
|
||||
'Please provide as model inputs either a single array or a list of '
|
||||
'arrays. You passed: {}={}'.format(field_name, str(orig_inp)))
|
||||
@ -1279,7 +1279,7 @@ def validate_input_types(inp, orig_inp, allow_dict=True, field_name='inputs'):
|
||||
if not allow_dict:
|
||||
raise ValueError(
|
||||
'You cannot pass a dictionary as model {}.'.format(field_name))
|
||||
elif not isinstance(inp, np.ndarray) and not tensor_util.is_tensor(inp):
|
||||
elif not isinstance(inp, np.ndarray) and not tensor_util.is_tf_type(inp):
|
||||
raise ValueError(
|
||||
'Please provide as model inputs either a single array or a list of '
|
||||
'arrays. You passed: {}={}'.format(field_name, orig_inp))
|
||||
@ -1371,7 +1371,7 @@ def cast_if_floating_dtype_and_mismatch(targets, outputs):
|
||||
Returns:
|
||||
Targets in appropriate datatype.
|
||||
"""
|
||||
if tensor_util.is_tensor(targets):
|
||||
if tensor_util.is_tf_type(targets):
|
||||
# There is one target, so output[0] should be the only output.
|
||||
return cast_single_tensor(targets, dtype=outputs[0].dtype)
|
||||
new_targets = []
|
||||
|
@ -52,10 +52,10 @@ class ModelInputsTest(test.TestCase):
|
||||
model_inputs = training_utils_v1.ModelInputs(a)
|
||||
self.assertEqual(['input_1'], model_inputs.get_input_names())
|
||||
vals = model_inputs.get_symbolic_inputs()
|
||||
self.assertTrue(tensor_util.is_tensor(vals))
|
||||
self.assertTrue(tensor_util.is_tf_type(vals))
|
||||
vals = model_inputs.get_symbolic_inputs(return_single_as_list=True)
|
||||
self.assertEqual(1, len(vals))
|
||||
self.assertTrue(tensor_util.is_tensor(vals[0]))
|
||||
self.assertTrue(tensor_util.is_tf_type(vals[0]))
|
||||
self.assertEqual(backend.floatx(), vals[0].dtype)
|
||||
|
||||
def test_single_thing_eager(self):
|
||||
@ -87,8 +87,8 @@ class ModelInputsTest(test.TestCase):
|
||||
model_inputs = training_utils_v1.ModelInputs(a)
|
||||
self.assertEqual(['input_1', 'input_2'], model_inputs.get_input_names())
|
||||
vals = model_inputs.get_symbolic_inputs()
|
||||
self.assertTrue(tensor_util.is_tensor(vals[0]))
|
||||
self.assertTrue(tensor_util.is_tensor(vals[1]))
|
||||
self.assertTrue(tensor_util.is_tf_type(vals[0]))
|
||||
self.assertTrue(tensor_util.is_tf_type(vals[1]))
|
||||
|
||||
def test_list_eager(self):
|
||||
if not context.executing_eagerly():
|
||||
@ -113,8 +113,8 @@ class ModelInputsTest(test.TestCase):
|
||||
model_inputs = training_utils_v1.ModelInputs(a)
|
||||
self.assertEqual(['a', 'b'], model_inputs.get_input_names())
|
||||
vals = model_inputs.get_symbolic_inputs()
|
||||
self.assertTrue(tensor_util.is_tensor(vals['a']))
|
||||
self.assertTrue(tensor_util.is_tensor(vals['b']))
|
||||
self.assertTrue(tensor_util.is_tf_type(vals['a']))
|
||||
self.assertTrue(tensor_util.is_tf_type(vals['b']))
|
||||
|
||||
def test_dict_eager(self):
|
||||
if not context.executing_eagerly():
|
||||
|
@ -1446,7 +1446,7 @@ class Model(training_lib.Model):
|
||||
for name in self.output_names:
|
||||
tmp_target_tensors.append(target_tensors.get(name, None))
|
||||
target_tensors = tmp_target_tensors
|
||||
elif tensor_util.is_tensor(target_tensors):
|
||||
elif tensor_util.is_tf_type(target_tensors):
|
||||
target_tensors = [target_tensors]
|
||||
else:
|
||||
raise TypeError('Expected `target_tensors` to be a list or tuple or '
|
||||
@ -2548,8 +2548,8 @@ class Model(training_lib.Model):
|
||||
all_inputs.append(target)
|
||||
# Type check that all inputs are *either* value *or* symbolic.
|
||||
# TODO(fchollet): this check could be removed in Eager mode?
|
||||
if any(tensor_util.is_tensor(v) for v in all_inputs):
|
||||
if not all(tensor_util.is_tensor(v) for v in all_inputs):
|
||||
if any(tensor_util.is_tf_type(v) for v in all_inputs):
|
||||
if not all(tensor_util.is_tf_type(v) for v in all_inputs):
|
||||
raise ValueError('Do not pass inputs that mix Numpy arrays and '
|
||||
'TensorFlow tensors. '
|
||||
'You passed: x=' + str(orig_inputs) +
|
||||
@ -2634,7 +2634,7 @@ class Model(training_lib.Model):
|
||||
raise ValueError('Model inputs are already set.')
|
||||
|
||||
if self.__class__.__name__ == 'Sequential' and not self.built:
|
||||
if tensor_util.is_tensor(inputs):
|
||||
if tensor_util.is_tf_type(inputs):
|
||||
input_shape = (None,) + tuple(inputs.shape.as_list()[1:])
|
||||
elif isinstance(inputs, tensor_shape.TensorShape):
|
||||
input_shape = (None,) + tuple(inputs.as_list()[1:])
|
||||
@ -3142,7 +3142,7 @@ class _TrainingTarget(object):
|
||||
|
||||
|
||||
def _is_symbolic_tensor(x):
|
||||
return tensor_util.is_tensor(x)
|
||||
return tensor_util.is_tf_type(x)
|
||||
|
||||
|
||||
def _convert_scipy_sparse_tensor(value, expected_input):
|
||||
|
@ -286,7 +286,7 @@ class RNNCell(base_layer.Layer):
|
||||
# Validate the given batch_size and dtype against inputs if provided.
|
||||
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs, name="inputs")
|
||||
if batch_size is not None:
|
||||
if tensor_util.is_tensor(batch_size):
|
||||
if tensor_util.is_tf_type(batch_size):
|
||||
static_batch_size = tensor_util.constant_value(
|
||||
batch_size, partial=True)
|
||||
else:
|
||||
|
@ -161,7 +161,7 @@ class Hashing(base_preprocessing_layer.PreprocessingLayer):
|
||||
if isinstance(inputs, (tuple, list)):
|
||||
# If any of them is tensor or ndarray, then treat as list
|
||||
if any(
|
||||
tensor_util.is_tensor(inp) or isinstance(inp, np.ndarray)
|
||||
tensor_util.is_tf_type(inp) or isinstance(inp, np.ndarray)
|
||||
for inp in inputs):
|
||||
return [self._preprocess_single_input(inp) for inp in inputs]
|
||||
return self._preprocess_single_input(inputs)
|
||||
|
@ -248,7 +248,7 @@ class LossFunctionWrapper(Loss):
|
||||
Returns:
|
||||
Loss values per sample.
|
||||
"""
|
||||
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
|
||||
if tensor_util.is_tf_type(y_pred) and tensor_util.is_tf_type(y_true):
|
||||
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
|
||||
ag_fn = autograph.tf_convert(self.fn, ag_ctx.control_status_ctx())
|
||||
return ag_fn(y_true, y_pred, **self._fn_kwargs)
|
||||
|
@ -1061,7 +1061,7 @@ class OptimizerV2(trackable.Trackable):
|
||||
return learning_rate_schedule.serialize(value)
|
||||
if callable(value):
|
||||
return value()
|
||||
if tensor_util.is_tensor(value):
|
||||
if tensor_util.is_tf_type(value):
|
||||
return backend.get_value(value)
|
||||
return value
|
||||
|
||||
|
@ -84,7 +84,7 @@ def get_reachable_from_inputs(inputs, targets=None):
|
||||
except AttributeError:
|
||||
# Variables can be created in an Eager context.
|
||||
outputs = []
|
||||
elif tensor_util.is_tensor(x):
|
||||
elif tensor_util.is_tf_type(x):
|
||||
outputs = x.consumers()
|
||||
else:
|
||||
raise TypeError('Expected Operation, Variable, or Tensor, got ' + str(x))
|
||||
@ -391,7 +391,7 @@ def is_ragged(tensor):
|
||||
|
||||
|
||||
def is_tensor_or_variable(x):
|
||||
return tensor_util.is_tensor(x) or isinstance(x, variables.Variable)
|
||||
return tensor_util.is_tf_type(x) or isinstance(x, variables.Variable)
|
||||
|
||||
|
||||
def assert_no_legacy_layers(layers):
|
||||
|
@ -2292,7 +2292,7 @@ def transpose(a, perm=None, name="transpose", conjugate=False):
|
||||
A transposed `Tensor`.
|
||||
"""
|
||||
with ops.name_scope(name, "transpose", [a]) as name:
|
||||
if not tensor_util.is_tensor(a):
|
||||
if not tensor_util.is_tf_type(a):
|
||||
a = ops.convert_to_tensor(a, name="a")
|
||||
|
||||
if conjugate and a.dtype.is_complex:
|
||||
@ -3058,7 +3058,7 @@ def zeros_like_v2(
|
||||
def zeros_like_impl(tensor, dtype, name, optimize=True):
|
||||
"""Internal implementation for the v1/v2 zeros_like API calls."""
|
||||
with ops.name_scope(name, "zeros_like", [tensor]) as name:
|
||||
if not tensor_util.is_tensor(tensor):
|
||||
if not tensor_util.is_tf_type(tensor):
|
||||
tensor = ops.convert_to_tensor(tensor, name="tensor")
|
||||
tensor_shape = tensor.shape
|
||||
tensor_dtype = tensor.dtype
|
||||
@ -3506,7 +3506,7 @@ def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pyl
|
||||
if mode == "CONSTANT":
|
||||
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
|
||||
# remove the "Pad" fallback here.
|
||||
if not tensor_util.is_tensor(constant_values) and constant_values == 0:
|
||||
if not tensor_util.is_tf_type(constant_values) and constant_values == 0:
|
||||
result = gen_array_ops.pad(tensor, paddings, name=name)
|
||||
else:
|
||||
result = gen_array_ops.pad_v2(
|
||||
|
@ -76,7 +76,7 @@ def cond_v2(pred, true_fn, false_fn, name="cond"):
|
||||
# graphs. Propagate that behavior here.
|
||||
add_control_dependencies = ops.get_default_graph()._add_control_dependencies
|
||||
pred = ops.convert_to_tensor(pred)
|
||||
if (tensor_util.is_tensor(pred) and
|
||||
if (tensor_util.is_tf_type(pred) and
|
||||
(pred.shape.dims is None or pred.shape.dims)):
|
||||
pred = array_ops.squeeze_v2(pred)
|
||||
|
||||
|
@ -3058,7 +3058,7 @@ def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined
|
||||
return tensors
|
||||
with ops.name_scope(name, "tuple", tensors) as name:
|
||||
tensors = [
|
||||
t if (isinstance(t, ops.Operation) or tensor_util.is_tensor(t) or
|
||||
t if (isinstance(t, ops.Operation) or tensor_util.is_tf_type(t) or
|
||||
t is None) else ops.convert_to_tensor(t) for t in tensors
|
||||
]
|
||||
gating_ops = [
|
||||
@ -3081,7 +3081,7 @@ def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined
|
||||
gate = group(*gating_ops)
|
||||
tpl = []
|
||||
for t in tensors:
|
||||
if tensor_util.is_tensor(t):
|
||||
if tensor_util.is_tf_type(t):
|
||||
tpl.append(with_dependencies([gate], t))
|
||||
elif isinstance(t, ops.Operation):
|
||||
with ops.control_dependencies([gate]):
|
||||
|
@ -584,7 +584,7 @@ class Bijector(object):
|
||||
self._name = camel_to_snake(type(self).__name__.lstrip("_"))
|
||||
|
||||
for i, t in enumerate(self._graph_parents):
|
||||
if t is None or not tensor_util.is_tensor(t):
|
||||
if t is None or not tensor_util.is_tf_type(t):
|
||||
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
|
||||
|
||||
@property
|
||||
|
@ -462,7 +462,7 @@ class Distribution(_BaseDistribution):
|
||||
"""
|
||||
graph_parents = [] if graph_parents is None else graph_parents
|
||||
for i, t in enumerate(graph_parents):
|
||||
if t is None or not tensor_util.is_tensor(t):
|
||||
if t is None or not tensor_util.is_tf_type(t):
|
||||
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
|
||||
if not name or name[-1] != "/": # `name` is not a name scope
|
||||
non_unique_name = name or type(self).__name__
|
||||
|
@ -1153,7 +1153,7 @@ class LinearOperator(module.Module):
|
||||
graph_parents = [] if graph_parents is None else graph_parents
|
||||
for i, t in enumerate(graph_parents):
|
||||
if t is None or not (linear_operator_util.is_ref(t) or
|
||||
tensor_util.is_tensor(t)):
|
||||
tensor_util.is_tf_type(t)):
|
||||
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
|
||||
self._graph_parents = graph_parents
|
||||
|
||||
|
@ -914,7 +914,7 @@ def random_positive_definite_matrix(shape,
|
||||
`Tensor` with desired shape and dtype.
|
||||
"""
|
||||
dtype = dtypes.as_dtype(dtype)
|
||||
if not tensor_util.is_tensor(shape):
|
||||
if not tensor_util.is_tf_type(shape):
|
||||
shape = tensor_shape.TensorShape(shape)
|
||||
# Matrix must be square.
|
||||
shape.dims[-1].assert_is_compatible_with(shape.dims[-2])
|
||||
|
@ -67,7 +67,7 @@ def _set_handle_data(list_handle, element_shape, element_dtype):
|
||||
# TODO(b/169968286): It would be better if we had a consistent story for
|
||||
# creating handle data from eager operations (shared with VarHandleOp).
|
||||
if isinstance(list_handle, ops.EagerTensor):
|
||||
if tensor_util.is_tensor(element_shape):
|
||||
if tensor_util.is_tf_type(element_shape):
|
||||
element_shape = tensor_shape.TensorShape(None)
|
||||
elif not isinstance(element_shape, tensor_shape.TensorShape):
|
||||
element_shape = tensor_shape.TensorShape(element_shape)
|
||||
|
@ -294,7 +294,7 @@ def print_v2(*inputs, **kwargs):
|
||||
"File needs to be in the form of 'file://<filepath>'.")
|
||||
|
||||
# If we are only printing a single string scalar, there is no need to format
|
||||
if (len(inputs) == 1 and tensor_util.is_tensor(inputs[0]) and
|
||||
if (len(inputs) == 1 and tensor_util.is_tf_type(inputs[0]) and
|
||||
(not isinstance(inputs[0], sparse_tensor.SparseTensor)) and
|
||||
(inputs[0].shape.ndims == 0) and (inputs[0].dtype == dtypes.string)):
|
||||
formatted_string = inputs[0]
|
||||
@ -318,7 +318,7 @@ def print_v2(*inputs, **kwargs):
|
||||
else:
|
||||
inputs_ordered_dicts_sorted.append(input_)
|
||||
tensor_free_structure = nest.map_structure(
|
||||
lambda x: "" if tensor_util.is_tensor(x) else x,
|
||||
lambda x: "" if tensor_util.is_tf_type(x) else x,
|
||||
inputs_ordered_dicts_sorted)
|
||||
|
||||
tensor_free_template = " ".join(
|
||||
@ -338,7 +338,7 @@ def print_v2(*inputs, **kwargs):
|
||||
placeholders.append(
|
||||
"SparseTensor(indices={}, values={}, shape={})".format(
|
||||
placeholder, placeholder, placeholder))
|
||||
elif tensor_util.is_tensor(x):
|
||||
elif tensor_util.is_tf_type(x):
|
||||
tensors.append(x)
|
||||
placeholders.append(placeholder)
|
||||
else:
|
||||
|
@ -187,7 +187,7 @@ def _SumGrad(op, grad):
|
||||
|
||||
# Compute and cache `output_shape_kept_dims` and `tile_scaling`.
|
||||
def EvaluateAsTuple(t):
|
||||
if tensor_util.is_tensor(t):
|
||||
if tensor_util.is_tf_type(t):
|
||||
value = c_api.TF_TryEvaluateConstant_wrapper(
|
||||
t.graph._c_graph, t._as_tf_output()) # pylint: disable=protected-access
|
||||
assert value is not None
|
||||
|
@ -464,8 +464,8 @@ def divide(x, y, name=None):
|
||||
return DivideDelegateWithName(x, name) / y
|
||||
else:
|
||||
# We do conversion here to make sure at least x is a tensor.
|
||||
if not tensor_util.is_tensor(x):
|
||||
dtype = y.dtype.base_dtype if tensor_util.is_tensor(y) else None
|
||||
if not tensor_util.is_tf_type(x):
|
||||
dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
|
||||
x = ops.convert_to_tensor(x, dtype=dtype)
|
||||
return x / y
|
||||
|
||||
|
@ -1067,10 +1067,10 @@ def convolution_internal(
|
||||
estimated from `filters.shape`.
|
||||
"""
|
||||
if (not isinstance(filters, variables_lib.Variable) and
|
||||
not tensor_util.is_tensor(filters)):
|
||||
not tensor_util.is_tf_type(filters)):
|
||||
with ops.name_scope("convolution_internal", None, [filters, input]):
|
||||
filters = ops.convert_to_tensor(filters, name='filters')
|
||||
if (not isinstance(input, ops.Tensor) and not tensor_util.is_tensor(input)):
|
||||
if (not isinstance(input, ops.Tensor) and not tensor_util.is_tf_type(input)):
|
||||
with ops.name_scope("convolution_internal", None, [filters, input]):
|
||||
input = ops.convert_to_tensor(input, name="input")
|
||||
|
||||
@ -3280,7 +3280,7 @@ def conv_transpose(input, # pylint: disable=redefined-builtin
|
||||
"""
|
||||
with ops.name_scope(name, "conv_transpose",
|
||||
[input, filter, output_shape]) as name:
|
||||
if tensor_util.is_tensor(output_shape):
|
||||
if tensor_util.is_tf_type(output_shape):
|
||||
n = output_shape.shape[0] - 2
|
||||
elif isinstance(output_shape, collections_abc.Sized):
|
||||
n = len(output_shape) - 2
|
||||
@ -5157,7 +5157,7 @@ def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
|
||||
return x
|
||||
|
||||
is_executing_eagerly = context.executing_eagerly()
|
||||
if not tensor_util.is_tensor(rate):
|
||||
if not tensor_util.is_tf_type(rate):
|
||||
if is_rate_number:
|
||||
keep_prob = 1 - rate
|
||||
scale = 1 / keep_prob
|
||||
|
@ -825,7 +825,7 @@ def ngrams(data,
|
||||
|
||||
def string_format(template, inputs, placeholder="{}", summarize=3, name=None):
|
||||
"""Version of tf.strings.format that handles RaggedTensors."""
|
||||
if tensor_util.is_tensor(inputs) or ragged_tensor.is_ragged(inputs):
|
||||
if tensor_util.is_tf_type(inputs) or ragged_tensor.is_ragged(inputs):
|
||||
inputs = [inputs]
|
||||
|
||||
split_template = template.split(placeholder)
|
||||
|
@ -161,7 +161,7 @@ def string_format(template, inputs, placeholder="{}", summarize=3, name=None):
|
||||
"""
|
||||
# If there is only one tensor to format, we will automatically wrap it in a
|
||||
# list to simplify the user experience
|
||||
if tensor_util.is_tensor(inputs):
|
||||
if tensor_util.is_tf_type(inputs):
|
||||
inputs = [inputs]
|
||||
if template.count(placeholder) != len(inputs):
|
||||
raise ValueError("%s placeholder(s) in template does not match %s tensor(s)"
|
||||
|
@ -1118,7 +1118,7 @@ def _check_create_file_writer_args(inside_function, **kwargs):
|
||||
ValueError: if the arguments are graph tensors.
|
||||
"""
|
||||
for arg_name, arg in kwargs.items():
|
||||
if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tensor(arg):
|
||||
if not isinstance(arg, ops.EagerTensor) and tensor_util.is_tf_type(arg):
|
||||
if inside_function:
|
||||
raise ValueError(
|
||||
"Invalid graph Tensor argument \"%s=%s\" to create_file_writer() "
|
||||
|
@ -130,7 +130,7 @@ def while_loop(cond,
|
||||
# `orig_loop_vars` and `args`, converts flows in `args` to TensorArrays
|
||||
# and packs it into the structure of `orig_loop_vars`.
|
||||
pred = cond(*_pack_sequence_as(orig_loop_vars, args))
|
||||
if (tensor_util.is_tensor(pred) and
|
||||
if (tensor_util.is_tf_type(pred) and
|
||||
(pred.shape.dims is None or pred.shape.dims)):
|
||||
pred = array_ops.squeeze_v2(pred)
|
||||
|
||||
|
@ -388,7 +388,7 @@ class Loader(object):
|
||||
return obj.handle
|
||||
elif isinstance(obj, tracking.Asset):
|
||||
return obj.asset_path
|
||||
elif tensor_util.is_tensor(obj):
|
||||
elif tensor_util.is_tf_type(obj):
|
||||
return obj
|
||||
elif isinstance(obj, tracking.CapturableResource):
|
||||
# Note: this executes restored functions in the CapturableResource.
|
||||
|
@ -343,7 +343,7 @@ class _SupervisedOutput(ExportOutput):
|
||||
raise ValueError(
|
||||
'{} output value must be a Tensor; got {}.'.format(
|
||||
key, metric_val))
|
||||
if not (tensor_util.is_tensor(metric_op) or
|
||||
if not (tensor_util.is_tf_type(metric_op) or
|
||||
isinstance(metric_op, ops.Operation)):
|
||||
raise ValueError(
|
||||
'{} update_op must be a Tensor or Operation; got {}.'.format(
|
||||
|
@ -358,7 +358,7 @@ class _SaveableView(object):
|
||||
"\n".join(concrete_function.graph.saving_errors)).format(
|
||||
name=concrete_function.name))
|
||||
for capture in concrete_function.captured_inputs:
|
||||
if (tensor_util.is_tensor(capture) and
|
||||
if (tensor_util.is_tf_type(capture) and
|
||||
capture.dtype not in _UNCOPIABLE_DTYPES and
|
||||
capture not in self.captured_tensor_node_ids):
|
||||
if hasattr(capture, "_cached_variable"):
|
||||
|
@ -335,12 +335,12 @@ def keras_layer_tracepoint(layer, checkpoint_name):
|
||||
"""
|
||||
try:
|
||||
outputs = layer.output
|
||||
if tensor_util.is_tensor(outputs):
|
||||
if tensor_util.is_tf_type(outputs):
|
||||
trace_tensor(outputs, '%s' % (checkpoint_name))
|
||||
else:
|
||||
idx = 0
|
||||
for output_tensor in outputs:
|
||||
if tensor_util.is_tensor(outputs):
|
||||
if tensor_util.is_tf_type(outputs):
|
||||
trace_tensor(output_tensor, '%s_%d' % (checkpoint_name, idx))
|
||||
idx += 1
|
||||
except AttributeError:
|
||||
|
@ -450,7 +450,7 @@ class RestoredSaveableObject(saveable_object.SaveableObject):
|
||||
self.save_function = save_function
|
||||
self.restore_function = restore_function
|
||||
|
||||
if tensor_util.is_tensor(name):
|
||||
if tensor_util.is_tf_type(name):
|
||||
name_tensor = name
|
||||
else:
|
||||
with ops.init_scope():
|
||||
|
@ -1587,7 +1587,7 @@ class CheckpointV1(tracking.AutoTrackable):
|
||||
The full path to the checkpoint (i.e. `file_prefix`).
|
||||
"""
|
||||
output = self._saver.save(file_prefix=file_prefix, session=session)
|
||||
if tensor_util.is_tensor(output):
|
||||
if tensor_util.is_tf_type(output):
|
||||
if context.executing_eagerly():
|
||||
return compat.as_str(output.numpy())
|
||||
else:
|
||||
@ -2009,7 +2009,7 @@ class Checkpoint(tracking.AutoTrackable):
|
||||
"""
|
||||
options = options or checkpoint_options.CheckpointOptions()
|
||||
output = self._saver.save(file_prefix=file_prefix, options=options)
|
||||
if tensor_util.is_tensor(output):
|
||||
if tensor_util.is_tf_type(output):
|
||||
if context.executing_eagerly():
|
||||
return compat.as_str(output.numpy())
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user