Rename internal_convert_to_tensor for performance

Calling ops.internal_convert_to_tensor is more efficient than calling
ops.convert_to_tensor due to skipping the deprecated_argument_lookup and
also less python function calling overhead. We thus swap these functions
names so we can optimize most code paths.

PiperOrigin-RevId: 274321742
This commit is contained in:
Gaurav Jain 2019-10-12 01:24:25 -07:00 committed by TensorFlower Gardener
parent cb9473b76a
commit c3973c78f0
11 changed files with 47 additions and 51 deletions

View File

@ -1162,7 +1162,7 @@ class MirroredVariable(DistributedVariable, Mirrored):
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() call.
assert not as_ref
return ops.internal_convert_to_tensor(
return ops.convert_to_tensor(
self.get(), dtype=dtype, name=name, as_ref=as_ref)
def _clone_with_new_values(self, new_values):
@ -1180,7 +1180,7 @@ ops.register_tensor_conversion_function(MirroredVariable,
_tensor_conversion_mirrored)
def _tensor_conversion_mirrored_val(value, dtype=None, name=None, as_ref=False):
return ops.internal_convert_to_tensor(
return ops.convert_to_tensor(
value.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(Mirrored,
@ -1362,7 +1362,7 @@ class SyncOnReadVariable(DistributedVariable):
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
return ops.internal_convert_to_tensor(
return ops.convert_to_tensor(
self.get(), dtype=dtype, name=name, as_ref=as_ref)
def _clone_with_new_values(self, new_values):
@ -1768,8 +1768,7 @@ class AggregatingVariable(variables_lib.Variable):
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
def _tensor_conversion_aggregate(var, dtype=None, name=None, as_ref=False):
return ops.internal_convert_to_tensor(
var.get(), dtype=dtype, name=name, as_ref=as_ref)
return ops.convert_to_tensor(var.get(), dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(

View File

@ -795,11 +795,11 @@ class SyncOnReadVariablePropertiesTest(test.TestCase):
with context.graph_mode():
_, replica_local = _make_replica_local(
variable_scope.VariableAggregation.SUM)
converted = ops.internal_convert_to_tensor(replica_local, as_ref=False)
converted = ops.convert_to_tensor(replica_local, as_ref=False)
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)
converted = ops.internal_convert_to_tensor(replica_local, as_ref=True)
converted = ops.convert_to_tensor(replica_local, as_ref=True)
# Resources variable are converted to tensors as well when as_ref is True.
self.assertIsInstance(converted, ops.Tensor)
self.assertEqual(converted.dtype, replica_local.dtype)

View File

@ -253,19 +253,18 @@ def args_to_matching_eager(l, ctx, default_dtype=None):
dtype = t.dtype
break
internal_convert_to_tensor = ops.internal_convert_to_tensor
if dtype is None:
# Infer a dtype based on the first value, and use that dtype for the
# remaining values.
ret = []
for t in l:
ret.append(
internal_convert_to_tensor(
ops.convert_to_tensor(
t, dtype, preferred_dtype=default_dtype, ctx=ctx))
if dtype is None:
dtype = ret[-1].dtype
else:
ret = [internal_convert_to_tensor(t, dtype, ctx=ctx) for t in l]
ret = [ops.convert_to_tensor(t, dtype, ctx=ctx) for t in l]
# TODO(slebedev): consider removing this as it leaks a Keras concept.
# pylint: disable=protected-access
@ -280,7 +279,7 @@ def args_to_matching_eager(l, ctx, default_dtype=None):
def convert_to_mixed_eager_tensors(values, ctx):
v = [ops.internal_convert_to_tensor(t, ctx=ctx) for t in values]
v = [ops.convert_to_tensor(t, ctx=ctx) for t in values]
types = [t._datatype_enum() for t in v] # pylint: disable=protected-access
return types, v
@ -309,15 +308,15 @@ def args_to_mixed_eager_tensors(lists, ctx):
break
if dtype is None:
# Convert the first one and use its dtype.
lists_ret[0].append(ops.internal_convert_to_tensor(lists[0][i], ctx=ctx))
lists_ret[0].append(ops.convert_to_tensor(lists[0][i], ctx=ctx))
dtype = lists_ret[0][i].dtype
for j in range(1, len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
else:
# Convert everything to the found dtype.
for j in range(len(lists)):
lists_ret[j].append(
ops.internal_convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
ops.convert_to_tensor(lists[j][i], dtype=dtype, ctx=ctx))
types.append(dtype.as_datatype_enum)
return types, lists_ret

View File

@ -296,8 +296,7 @@ def internal_convert_to_tensor_or_indexed_slices(value,
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, ops.EagerTensor) and not context.executing_eagerly():
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
elif isinstance(value, _TensorLike):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
@ -305,8 +304,7 @@ def internal_convert_to_tensor_or_indexed_slices(value,
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return ops.internal_convert_to_tensor(
value, dtype=dtype, name=name, as_ref=as_ref)
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,

View File

@ -418,7 +418,7 @@ def _apply_op_helper(op_type_name, name=None, **keywords): # pylint: disable=in
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
converted_value = ops.convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
@ -460,7 +460,7 @@ def _apply_op_helper(op_type_name, name=None, **keywords): # pylint: disable=in
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.internal_convert_to_tensor(
values = ops.convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
@ -478,7 +478,7 @@ def _apply_op_helper(op_type_name, name=None, **keywords): # pylint: disable=in
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
observed = ops.convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(

View File

@ -1146,11 +1146,11 @@ register_dense_tensor_like_type(Tensor)
@tf_export(v1=["convert_to_tensor"])
def convert_to_tensor(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
@ -1255,7 +1255,7 @@ def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return internal_convert_to_tensor(
return convert_to_tensor(
value=value,
dtype=dtype,
name=name,
@ -1267,14 +1267,14 @@ def _error_prefix(name):
return "" if name is None else "%s: " % name
def internal_convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
def convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
# TODO(b/142518781): Fix all call-sites and remove redundant arg
preferred_dtype = preferred_dtype or dtype_hint
@ -1339,6 +1339,9 @@ def internal_convert_to_tensor(value,
(_error_prefix(name), value, type(value)))
internal_convert_to_tensor = convert_to_tensor
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
@ -1377,7 +1380,7 @@ def internal_convert_n_to_tensor(values,
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor(
convert_to_tensor(
value,
dtype=dtype,
name=n,
@ -1472,7 +1475,7 @@ def internal_convert_to_tensor_or_composite(value,
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return internal_convert_to_tensor(
return convert_to_tensor(
value,
dtype=dtype,
name=name,

View File

@ -126,7 +126,7 @@ class SparseTensor(_TensorLike, composite_tensor.CompositeTensor):
indices, name="indices", dtype=dtypes.int64)
# TODO(touts): Consider adding mutable_values() when 'values'
# is a VariableOp and updating users of SparseTensor.
values = ops.internal_convert_to_tensor(values, name="values")
values = ops.convert_to_tensor(values, name="values")
dense_shape = ops.convert_to_tensor(
dense_shape, name="dense_shape", dtype=dtypes.int64)
self._indices = indices
@ -429,7 +429,7 @@ def convert_to_tensor_or_sparse_tensor(value, dtype=None, name=None):
raise RuntimeError("Sparse dtype: requested = %s, actual = %s" %
(dtype.name, value.dtype.name))
return value
return ops.internal_convert_to_tensor(value, dtype=dtype, name=name)
return ops.convert_to_tensor(value, dtype=dtype, name=name)
def is_sparse(x):

View File

@ -113,17 +113,15 @@ class AutoCastVariable(variables.Variable):
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts this variable to a tensor."""
if not self._should_cast():
return ops.internal_convert_to_tensor(self._variable, dtype, name,
as_ref)
return ops.convert_to_tensor(self._variable, dtype, name, as_ref)
# TODO(reedwm): Support as_ref?
assert not as_ref
if dtype is not None and not dtype.is_compatible_with(self.dtype):
raise ValueError(
'Incompatible type conversion requested to type {!r} for variable '
'of type {!r}'.format(dtype.name, self.dtype.name))
val = ops.internal_convert_to_tensor(self._variable,
self._variable.dtype, name,
as_ref=False)
val = ops.convert_to_tensor(
self._variable, self._variable.dtype, name, as_ref=False)
return math_ops.cast(val, self.dtype)
def _should_act_as_resource_variable(self):

View File

@ -868,7 +868,7 @@ def partitioned_call(args,
# The generated binding returns an empty list for functions that don't
# return any Tensors, hence the need to use `create_op` directly.
args = [ops.internal_convert_to_tensor(x) for x in args]
args = [ops.convert_to_tensor(x) for x in args]
tin_attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
type=[x.dtype.as_datatype_enum for x in args]))

View File

@ -197,7 +197,7 @@ def saveable_objects_for_op(op, name):
raise ValueError("Can only save/restore ResourceVariables when "
"executing eagerly, got type: %s." % type(op))
variable = ops.internal_convert_to_tensor(op, as_ref=True)
variable = ops.convert_to_tensor(op, as_ref=True)
if not _tensor_comes_from_variable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
@ -287,7 +287,7 @@ def op_list_to_dict(op_list, convert_variable_to_tensor=True):
if isinstance(var, resource_variable_ops.BaseResourceVariable):
var = var._graph_element # pylint: disable=protected-access
else:
var = ops.internal_convert_to_tensor(var, as_ref=True)
var = ops.convert_to_tensor(var, as_ref=True)
if not _tensor_comes_from_variable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
if var.op.type == "ReadVariableOp":

View File

@ -321,8 +321,8 @@ class Asset(base.Trackable):
# initialization graph, since it is transient and should not end up in a
# serialized function body.
with ops.init_scope(), ops.device("CPU"):
self._path = ops.internal_convert_to_tensor(path, dtype=dtypes.string,
name="asset_path")
self._path = ops.convert_to_tensor(
path, dtype=dtypes.string, name="asset_path")
@property
def asset_path(self):
@ -426,5 +426,4 @@ def cached_per_instance(f):
ops.register_tensor_conversion_function(
Asset,
lambda asset, **kw: ops.internal_convert_to_tensor(asset.asset_path, **kw))
Asset, lambda asset, **kw: ops.convert_to_tensor(asset.asset_path, **kw))