Update keras to use the public TF API for convert_to_tensor()

PiperOrigin-RevId: 328871461
Change-Id: Ic44876d5988b86fc434617d3cbd7a4ce3e7cfcd6
This commit is contained in:
Scott Zhu 2020-08-27 20:49:25 -07:00 committed by TensorFlower Gardener
parent 9b28aa9e2e
commit 15ffb9d4fb
52 changed files with 238 additions and 197 deletions

View File

@ -840,7 +840,7 @@ def _to_tensor(x, dtype):
Returns:
A tensor.
"""
return ops.convert_to_tensor_v2(x, dtype=dtype)
return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
@ -4797,8 +4797,8 @@ def categorical_crossentropy(target, output, from_logits=False, axis=-1):
[0. 0. 0.]
"""
target = ops.convert_to_tensor_v2(target)
output = ops.convert_to_tensor_v2(output)
target = ops.convert_to_tensor_v2_with_dispatch(target)
output = ops.convert_to_tensor_v2_with_dispatch(output)
target.shape.assert_is_compatible_with(output.shape)
if from_logits:
@ -4847,8 +4847,8 @@ def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
target = ops.convert_to_tensor_v2(target)
output = ops.convert_to_tensor_v2(output)
target = ops.convert_to_tensor_v2_with_dispatch(target)
output = ops.convert_to_tensor_v2_with_dispatch(output)
if (not from_logits and
not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and
@ -4925,8 +4925,8 @@ def binary_crossentropy(target, output, from_logits=False):
Returns:
A tensor.
"""
target = ops.convert_to_tensor_v2(target)
output = ops.convert_to_tensor_v2(output)
target = ops.convert_to_tensor_v2_with_dispatch(target)
output = ops.convert_to_tensor_v2_with_dispatch(output)
if from_logits:
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)

View File

@ -491,7 +491,7 @@ class BackendLinearAlgebraTest(test.TestCase, parameterized.TestCase):
input_shape_b=(4, 7))
def test_relu(self):
x = ops.convert_to_tensor_v2([[-4, 0], [2, 7]], 'float32')
x = ops.convert_to_tensor_v2_with_dispatch([[-4, 0], [2, 7]], 'float32')
# standard relu
relu_op = backend.relu(x)
@ -1310,7 +1310,7 @@ class BackendNNOpsTest(test.TestCase, parameterized.TestCase):
inputs = backend.variable(input_val)
initial_states = [
backend.variable(init_state_val),
ops.convert_to_tensor_v2(
ops.convert_to_tensor_v2_with_dispatch(
np.concatenate([init_state_val, init_state_val], axis=-1))
]
mask = backend.variable(np_mask)
@ -1617,9 +1617,11 @@ class BackendCrossEntropyLossesTest(test.TestCase, parameterized.TestCase):
p = backend.placeholder()
o = backend.categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor_v2([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
p_val = ops.convert_to_tensor_v2([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
t_val = ops.convert_to_tensor_v2_with_dispatch([[1., 0., 0.], [0., 1., 0.],
[0., 0., 1.]])
p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],
[.05, .89, .06],
[.05, .01, .94]])
f = backend.function([t, p], o)
result = f([t_val, p_val])
@ -1633,7 +1635,8 @@ class BackendCrossEntropyLossesTest(test.TestCase, parameterized.TestCase):
self.assertArrayNear(result, [.105, .065, .111], 1e-3)
# from logits
p_val = ops.convert_to_tensor_v2([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],
[2., 3., 5.]])
o = backend.categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)
@ -1685,9 +1688,10 @@ class BackendCrossEntropyLossesTest(test.TestCase, parameterized.TestCase):
p = backend.placeholder()
o = backend.sparse_categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor_v2([0, 1, 2])
p_val = ops.convert_to_tensor_v2([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
t_val = ops.convert_to_tensor_v2_with_dispatch([0, 1, 2])
p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],
[.05, .89, .06],
[.05, .01, .94]])
f = backend.function([t, p], o)
result = f([t_val, p_val])
@ -1703,7 +1707,8 @@ class BackendCrossEntropyLossesTest(test.TestCase, parameterized.TestCase):
_ = f([t_val, p_val])
# from logits
p_val = ops.convert_to_tensor_v2([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],
[2., 3., 5.]])
o = backend.sparse_categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)
@ -2124,9 +2129,10 @@ class ControlOpsTests(test.TestCase):
self.assertEqual(backend.eval(tensor), [9.0])
def test_unequal_rank(self):
x = ops.convert_to_tensor_v2(
x = ops.convert_to_tensor_v2_with_dispatch(
np.array([[1, 2, 3], [4, 5, 6]]), dtype='float32')
y = ops.convert_to_tensor_v2(np.array([1, 2, 3]), dtype='float32')
y = ops.convert_to_tensor_v2_with_dispatch(
np.array([1, 2, 3]), dtype='float32')
def true_func():
return x

View File

@ -935,7 +935,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
verbose=0)
with context.eager_mode():
tensor = ops.convert_to_tensor(1.)
tensor = ops.convert_to_tensor_v2_with_dispatch(1.)
def mock_numpy():
raise RuntimeError(
@ -975,7 +975,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
verbose=2)
with context.eager_mode():
tensor = ops.convert_to_tensor(1.)
tensor = ops.convert_to_tensor_v2_with_dispatch(1.)
def mock_numpy():
raise RuntimeError(
@ -2193,7 +2193,7 @@ class TestTensorBoardV2(keras_parameterized.TestCase):
steps=100,
verbose=0)
tensor = ops.convert_to_tensor(1.)
tensor = ops.convert_to_tensor_v2_with_dispatch(1.)
def mock_numpy():
raise RuntimeError(

View File

@ -56,8 +56,8 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
@def_function.function
def optimize():
grads = values.PerReplica([
ops.convert_to_tensor([1., 1.]),
ops.convert_to_tensor([2., 2.]),
ops.convert_to_tensor_v2_with_dispatch([1., 1.]),
ops.convert_to_tensor_v2_with_dispatch([2., 2.]),
])
def step_fn(grads):
@ -85,7 +85,7 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
@def_function.function
def optimize():
grads = ops.convert_to_tensor([1., 1.])
grads = ops.convert_to_tensor_v2_with_dispatch([1., 1.])
def step_fn(grads):
optimizer.apply_gradients(
@ -107,7 +107,7 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
v = variables.Variable([0., 0.])
optimizer = gradient_descent.SGD(0.1)
grads = ops.convert_to_tensor([1., 1.])
grads = ops.convert_to_tensor_v2_with_dispatch([1., 1.])
def step_fn(grads):
with self.assertRaises(NotImplementedError):

View File

@ -1006,10 +1006,10 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
np_arrays.ndarray, np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor_v2` on all `inputs` because
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np_arrays.ndarray, np.ndarray, float, int)):
return ops.convert_to_tensor_v2(x)
return ops.convert_to_tensor_v2_with_dispatch(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
@ -1518,7 +1518,8 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
loss._unconditional_loss = True # pylint: disable=protected-access
return loss
@ -1535,7 +1536,8 @@ class Layer(module.Module, version_utils.LayerVersionSelector):
continue
if not tensor_util.is_tensor(loss) and not isinstance(
loss, keras_tensor.KerasTensor):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if ((tf_utils.is_symbolic_tensor(loss) or
isinstance(loss, keras_tensor.KerasTensor)) and
@ -3267,7 +3269,7 @@ def _in_functional_construction_mode(layer, inputs, args, kwargs, input_list):
def _convert_numpy_or_python_types(x):
if isinstance(x, (np_arrays.ndarray, np.ndarray, float, int)):
return ops.convert_to_tensor_v2(x)
return ops.convert_to_tensor_v2_with_dispatch(x)
return x

View File

@ -1135,7 +1135,7 @@ class NameScopingTest(keras_parameterized.TestCase):
self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor_v2(np.ones((10, 10)))
x = ops.convert_to_tensor_v2_with_dispatch(np.ones((10, 10)))
layer = layers.Dense(
10, activation=layers.ReLU(name='MyAct'), name='MyName3')
layer(x)

View File

@ -690,10 +690,10 @@ class Layer(base_layer.Layer):
# Accept NumPy and scalar inputs by converting to Tensors.
if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
def _convert_non_tensor(x):
# Don't call `ops.convert_to_tensor_v2` on all `inputs` because
# Don't call `ops.convert_to_tensor` on all `inputs` because
# `SparseTensors` can't be converted to `Tensor`.
if isinstance(x, (np.ndarray, float, int)):
return ops.convert_to_tensor_v2(x)
return ops.convert_to_tensor_v2_with_dispatch(x)
return x
inputs = nest.map_structure(_convert_non_tensor, inputs)
input_list = nest.flatten(inputs)
@ -1053,7 +1053,8 @@ class Layer(base_layer.Layer):
if loss is None:
return None # Will be filtered out when computing the .losses property
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access
return loss
@ -1068,7 +1069,8 @@ class Layer(base_layer.Layer):
if loss is None:
continue
if not tensor_util.is_tensor(loss):
loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())
loss = ops.convert_to_tensor_v2_with_dispatch(
loss, dtype=backend.floatx())
# TF Functions should take the eager path.
if (tf_utils.is_symbolic_tensor(loss) and
not base_layer_utils.is_in_tf_function()):
@ -1229,7 +1231,7 @@ class Layer(base_layer.Layer):
elif hasattr(x, 'op'):
update = x.op
else:
update = ops.convert_to_tensor_v2(x)
update = ops.convert_to_tensor_v2_with_dispatch(x)
reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])
update._unconditional_update = update not in reachable

View File

@ -149,7 +149,7 @@ class CombinerPreprocessingLayer(PreprocessingLayer):
else:
accumulator = self._combiner.restore(self._restore_updates())
if isinstance(data, (list, tuple)):
data = ops.convert_to_tensor_v2(data)
data = ops.convert_to_tensor_v2_with_dispatch(data)
if not isinstance(data,
(dataset_ops.DatasetV2,
np.ndarray,

View File

@ -53,7 +53,7 @@ class LossesContainerTest(keras_parameterized.TestCase):
y_t = [array_ops.ones((10, 1)), array_ops.zeros((10, 1))]
y_p = [array_ops.ones((10, 1)), array_ops.ones((10, 1))]
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
@ -86,7 +86,7 @@ class LossesContainerTest(keras_parameterized.TestCase):
y_t = {'out1': array_ops.ones((10, 1)), 'out2': array_ops.zeros((10, 1))}
y_p = {'out1': array_ops.ones((10, 1)), 'out2': array_ops.ones((10, 1))}
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
@ -112,7 +112,7 @@ class LossesContainerTest(keras_parameterized.TestCase):
y_t = [array_ops.ones((10, 1)), array_ops.zeros((10, 1))]
y_p = [array_ops.ones((10, 1)), array_ops.ones((10, 1))]
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
@ -135,7 +135,7 @@ class LossesContainerTest(keras_parameterized.TestCase):
y_t = {'out1': array_ops.ones((10, 1)), 'out2': array_ops.zeros((10, 1))}
y_p = {'out1': array_ops.ones((10, 1)), 'out2': array_ops.ones((10, 1))}
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
@ -170,7 +170,7 @@ class LossesContainerTest(keras_parameterized.TestCase):
array_ops.zeros((10, 1))],
'a': array_ops.ones((10, 1))
}
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(total_loss.numpy(), 0.75)
@ -193,7 +193,7 @@ class LossesContainerTest(keras_parameterized.TestCase):
y_t = [array_ops.ones((10, 1)), array_ops.zeros((10, 1))]
y_p = [array_ops.ones((10, 1)), array_ops.ones((10, 1))]
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
total_loss = loss_container(y_t, y_p, sample_weight=sw)
self.assertEqual(total_loss.numpy(), 0.5)
@ -220,13 +220,13 @@ class LossesContainerTest(keras_parameterized.TestCase):
})
y_p = {
'output1': ops.convert_to_tensor([[0], [1], [2]]),
'output2': ops.convert_to_tensor([[3], [4], [5]]),
'output3': ops.convert_to_tensor([[6], [7], [8]])
'output1': ops.convert_to_tensor_v2_with_dispatch([[0], [1], [2]]),
'output2': ops.convert_to_tensor_v2_with_dispatch([[3], [4], [5]]),
'output3': ops.convert_to_tensor_v2_with_dispatch([[6], [7], [8]])
}
y_t = {
'output1': ops.convert_to_tensor([[1], [2], [3]]),
'output3': ops.convert_to_tensor([[4], [5], [6]])
'output1': ops.convert_to_tensor_v2_with_dispatch([[1], [2], [3]]),
'output3': ops.convert_to_tensor_v2_with_dispatch([[4], [5], [6]])
}
total_loss = loss_container(y_t, y_p)
@ -372,7 +372,7 @@ class MetricsContainerTest(keras_parameterized.TestCase):
y_t = [array_ops.ones((10, 1)), array_ops.zeros((10, 1))]
y_p = [array_ops.ones((10, 1)), 2 * array_ops.ones((10, 1))]
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 6)
@ -415,7 +415,7 @@ class MetricsContainerTest(keras_parameterized.TestCase):
y_t = {'out1': array_ops.ones((10, 1)), 'out2': array_ops.zeros((10, 1))}
y_p = {'out1': array_ops.ones((10, 1)), 'out2': 2 * array_ops.ones((10, 1))}
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
mse_metric = metric_container.metrics[0]
@ -440,7 +440,7 @@ class MetricsContainerTest(keras_parameterized.TestCase):
y_t = [array_ops.ones((10, 1)), array_ops.zeros((10, 1))]
y_p = [array_ops.ones((10, 1)), array_ops.ones((10, 1))]
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 1)
@ -457,7 +457,7 @@ class MetricsContainerTest(keras_parameterized.TestCase):
y_t = {'out1': array_ops.ones((10, 1)), 'out2': array_ops.zeros((10, 1))}
y_p = {'out1': array_ops.ones((10, 1)), 'out2': array_ops.ones((10, 1))}
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 1)
@ -487,7 +487,7 @@ class MetricsContainerTest(keras_parameterized.TestCase):
array_ops.zeros((10, 1))],
'a': array_ops.ones((10, 1))
}
sw = ops.convert_to_tensor_v2([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 3)
@ -548,9 +548,9 @@ class MetricsContainerTest(keras_parameterized.TestCase):
metric_container = compile_utils.MetricsContainer(
metrics=['mae'], weighted_metrics=['mae'])
y_t = ops.convert_to_tensor_v2([[0], [3], [0]])
y_p = ops.convert_to_tensor_v2([[0], [0], [0]])
sw = ops.convert_to_tensor_v2([[1], [0], [1]])
y_t = ops.convert_to_tensor_v2_with_dispatch([[0], [3], [0]])
y_p = ops.convert_to_tensor_v2_with_dispatch([[0], [0], [0]])
sw = ops.convert_to_tensor_v2_with_dispatch([[1], [0], [1]])
metric_container.update_state(y_t, y_p, sample_weight=sw)
self.assertLen(metric_container.metrics, 2)
@ -566,8 +566,8 @@ class MetricsContainerTest(keras_parameterized.TestCase):
def test_broadcast_metrics_to_dict(self):
metric_container = compile_utils.MetricsContainer(metrics=['mae'])
y_p = {'output': ops.convert_to_tensor([[0], [1], [2]])}
y_t = {'output': ops.convert_to_tensor([[1], [2], [3]])}
y_p = {'output': ops.convert_to_tensor_v2_with_dispatch([[0], [1], [2]])}
y_t = {'output': ops.convert_to_tensor_v2_with_dispatch([[1], [2], [3]])}
metric_container.update_state(y_t, y_p)
mae_metric = metric_container.metrics[0]
@ -578,8 +578,8 @@ class MetricsContainerTest(keras_parameterized.TestCase):
metric_container = compile_utils.MetricsContainer(
metrics=['mae'], output_names=['output'])
y_p = ops.convert_to_tensor([[0], [1], [2]])
y_t = {'output': ops.convert_to_tensor([[1], [2], [3]])}
y_p = ops.convert_to_tensor_v2_with_dispatch([[0], [1], [2]])
y_t = {'output': ops.convert_to_tensor_v2_with_dispatch([[1], [2], [3]])}
metric_container.update_state(y_t, y_p)
mae_metric = metric_container.metrics[0]
@ -595,13 +595,13 @@ class MetricsContainerTest(keras_parameterized.TestCase):
})
y_p = {
'output1': ops.convert_to_tensor([[0], [1], [2]]),
'output2': ops.convert_to_tensor([[3], [4], [5]]),
'output3': ops.convert_to_tensor([[6], [7], [8]])
'output1': ops.convert_to_tensor_v2_with_dispatch([[0], [1], [2]]),
'output2': ops.convert_to_tensor_v2_with_dispatch([[3], [4], [5]]),
'output3': ops.convert_to_tensor_v2_with_dispatch([[6], [7], [8]])
}
y_t = {
'output1': ops.convert_to_tensor([[1], [2], [3]]),
'output3': ops.convert_to_tensor([[4], [5], [6]])
'output1': ops.convert_to_tensor_v2_with_dispatch([[1], [2], [3]]),
'output3': ops.convert_to_tensor_v2_with_dispatch([[4], [5], [6]])
}
metric_container.update_state(y_t, y_p)

View File

@ -1006,7 +1006,7 @@ def _process_tensorlike(inputs):
dtype = None
if issubclass(x.dtype.type, np.floating):
dtype = backend.floatx()
return ops.convert_to_tensor(x, dtype=dtype)
return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
elif scipy_sparse and scipy_sparse.issparse(x):
return _scipy_sparse_to_sparse_tensor(x)
return x
@ -1281,7 +1281,7 @@ def _make_class_weight_map_fn(class_weight):
"than the number of classes, found {}").format(class_weight)
raise ValueError(error_msg)
class_weight_tensor = ops.convert_to_tensor_v2(
class_weight_tensor = ops.convert_to_tensor_v2_with_dispatch(
[class_weight[int(c)] for c in class_ids])
def _class_weights_map_fn(*data):

View File

@ -446,7 +446,7 @@ class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
ops.convert_to_tensor_v2(self.arraylike_input)
ops.convert_to_tensor_v2_with_dispatch(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it into
@ -914,7 +914,7 @@ class DataHandlerTest(keras_parameterized.TestCase):
def generator():
for _ in range(2):
for step in range(3):
yield (ops.convert_to_tensor_v2([step]),)
yield (ops.convert_to_tensor_v2_with_dispatch([step]),)
data_handler = data_adapter.DataHandler(
generator(), epochs=2, steps_per_epoch=3)
@ -1007,20 +1007,20 @@ class TestValidationSplit(keras_parameterized.TestCase):
y = np.array([0, 2, 4, 6, 8])
sw = np.array([0, 4, 8, 12, 16])
else:
x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4])
y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8])
sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16])
x = ops.convert_to_tensor_v2_with_dispatch([0, 1, 2, 3, 4])
y = ops.convert_to_tensor_v2_with_dispatch([0, 2, 4, 6, 8])
sw = ops.convert_to_tensor_v2_with_dispatch([0, 4, 8, 12, 16])
(train_x, train_y, train_sw), (val_x, val_y, val_sw) = (
data_adapter.train_validation_split((x, y, sw), validation_split=0.2))
if use_numpy:
train_x = ops.convert_to_tensor_v2(train_x)
train_y = ops.convert_to_tensor_v2(train_y)
train_sw = ops.convert_to_tensor_v2(train_sw)
val_x = ops.convert_to_tensor_v2(val_x)
val_y = ops.convert_to_tensor_v2(val_y)
val_sw = ops.convert_to_tensor_v2(val_sw)
train_x = ops.convert_to_tensor_v2_with_dispatch(train_x)
train_y = ops.convert_to_tensor_v2_with_dispatch(train_y)
train_sw = ops.convert_to_tensor_v2_with_dispatch(train_sw)
val_x = ops.convert_to_tensor_v2_with_dispatch(val_x)
val_y = ops.convert_to_tensor_v2_with_dispatch(val_y)
val_sw = ops.convert_to_tensor_v2_with_dispatch(val_sw)
self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3])
self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6])

View File

@ -121,7 +121,7 @@ def _model_loss(model,
if any(
isinstance(input_t, (np.ndarray, float, int))
for input_t in nest.flatten(inputs)):
inputs = nest.map_structure(ops.convert_to_tensor_v2, inputs)
inputs = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch, inputs)
outs = model(inputs, **kwargs)
outs = nest.flatten(outs)
@ -131,7 +131,8 @@ def _model_loss(model,
# TODO(sallymatson/psv): check if we should do same mismatch fix for weights
if sample_weights:
sample_weights = [
training_utils.cast_if_floating_dtype(ops.convert_to_tensor_v2(val))
training_utils.cast_if_floating_dtype(
ops.convert_to_tensor_v2_with_dispatch(val))
if val is not None else None for val in sample_weights
]

View File

@ -1009,7 +1009,7 @@ def standardize_weights(y,
class_sample_weight = math_ops.cast(class_sample_weight, K.floatx())
if sample_weight is not None:
sample_weight = math_ops.cast(
ops.convert_to_tensor_v2(sample_weight), K.floatx())
ops.convert_to_tensor_v2_with_dispatch(sample_weight), K.floatx())
else:
y_classes = y
if len(y.shape) == 2:
@ -1365,7 +1365,7 @@ def check_steps_argument(input_data, steps, steps_name):
def cast_single_tensor(x, dtype=None):
if isinstance(x, np.ndarray):
x = ops.convert_to_tensor_v2(x)
x = ops.convert_to_tensor_v2_with_dispatch(x)
dtype = dtype or K.floatx()
if x.dtype.is_floating:
return math_ops.cast(x, dtype=dtype)
@ -1391,7 +1391,7 @@ def cast_if_floating_dtype_and_mismatch(targets, outputs):
new_targets = []
for target, out in zip(targets, outputs):
if isinstance(target, np.ndarray):
target = ops.convert_to_tensor_v2(target)
target = ops.convert_to_tensor_v2_with_dispatch(target)
if target.dtype != out.dtype:
new_targets.append(cast_single_tensor(target, dtype=out.dtype))
else:

View File

@ -201,7 +201,7 @@ class Dropout(Layer):
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return ops.convert_to_tensor_v2(noise_shape)
return ops.convert_to_tensor_v2_with_dispatch(noise_shape)
def call(self, inputs, training=None):
if training is None:

View File

@ -504,14 +504,14 @@ class CoreLayersTest(keras_parameterized.TestCase):
keras.layers.Dense, kwargs={'units': 3}, input_shape=(3, 4, 5, 2))
def test_dense_dtype(self):
inputs = ops.convert_to_tensor_v2(
inputs = ops.convert_to_tensor_v2_with_dispatch(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype='float32')
outputs = layer(inputs)
self.assertEqual(outputs.dtype, 'float32')
def test_dense_with_policy(self):
inputs = ops.convert_to_tensor_v2(
inputs = ops.convert_to_tensor_v2_with_dispatch(
np.random.randint(low=0, high=7, size=(2, 2)))
layer = keras.layers.Dense(5, dtype=policy.Policy('mixed_float16'))
outputs = layer(inputs)

View File

@ -180,7 +180,7 @@ class BaseDenseAttention(Layer):
q_mask = mask[0]
if q_mask is None:
return None
return ops.convert_to_tensor_v2(q_mask)
return ops.convert_to_tensor_v2_with_dispatch(q_mask)
return None
def _validate_call_args(self, inputs, mask):

View File

@ -218,7 +218,7 @@ class RandomFourierFeatures(base_layer.Layer):
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = ops.convert_to_tensor_v2(inputs, dtype=self.dtype)
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs, dtype=self.dtype)
inputs = gen_math_ops.cast(inputs, dtypes.float32)
kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
outputs = gen_math_ops.mat_mul(inputs, kernel)

View File

@ -282,7 +282,7 @@ class RNNCell(base_layer.Layer):
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
# Validate the given batch_size and dtype against inputs if provided.
inputs = ops.convert_to_tensor(inputs, name="inputs")
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs, name="inputs")
if batch_size is not None:
if tensor_util.is_tensor(batch_size):
static_batch_size = tensor_util.constant_value(

View File

@ -116,7 +116,7 @@ class DropoutWrapperBase(object):
with ops.name_scope_v2("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
tensor_value = ops.convert_to_tensor_v2_with_dispatch(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)

View File

@ -490,7 +490,8 @@ class BatchNormalizationBase(Layer):
def _assign_moving_average(self, variable, value, momentum, inputs_size):
with K.name_scope('AssignMovingAvg') as scope:
with ops.colocate_with(variable):
decay = ops.convert_to_tensor_v2(1.0 - momentum, name='decay')
decay = ops.convert_to_tensor_v2_with_dispatch(
1.0 - momentum, name='decay')
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
update_delta = (variable - math_ops.cast(value, variable.dtype)) * decay
@ -595,7 +596,7 @@ class BatchNormalizationBase(Layer):
lambda: self.momentum,
lambda: 1.0)
else:
momentum = ops.convert_to_tensor_v2(self.momentum)
momentum = ops.convert_to_tensor_v2_with_dispatch(self.momentum)
def mean_update():
"""Update self.moving_mean with the most recent data point."""
@ -797,10 +798,11 @@ class BatchNormalizationBase(Layer):
moving_variance = self.moving_variance
mean = control_flow_util.smart_cond(
training, lambda: mean, lambda: ops.convert_to_tensor_v2(moving_mean))
training, lambda: mean,
lambda: ops.convert_to_tensor_v2_with_dispatch(moving_mean))
variance = control_flow_util.smart_cond(
training, lambda: variance,
lambda: ops.convert_to_tensor_v2(moving_variance))
lambda: ops.convert_to_tensor_v2_with_dispatch(moving_variance))
if self.virtual_batch_size is not None:
# This isn't strictly correct since in ghost batch norm, you are

View File

@ -143,7 +143,7 @@ class CategoryCrossing(base_preprocessing_layer.PreprocessingLayer):
def _preprocess_input(self, inp):
if isinstance(inp, (list, tuple, np.ndarray)):
inp = ops.convert_to_tensor(inp)
inp = ops.convert_to_tensor_v2_with_dispatch(inp)
if inp.shape.rank == 1:
inp = array_ops.expand_dims(inp, axis=-1)
return inp

View File

@ -269,7 +269,7 @@ class CategoryEncoding(base_preprocessing_layer.CombinerPreprocessingLayer):
def call(self, inputs, count_weights=None):
if isinstance(inputs, (list, np.ndarray)):
inputs = ops.convert_to_tensor_v2(inputs)
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
if inputs.shape.rank == 1:
inputs = array_ops.expand_dims(inputs, 1)

View File

@ -154,7 +154,7 @@ class Hashing(base_preprocessing_layer.PreprocessingLayer):
def _preprocess_single_input(self, inp):
if isinstance(inp, (list, tuple, np.ndarray)):
inp = ops.convert_to_tensor(inp)
inp = ops.convert_to_tensor_v2_with_dispatch(inp)
return inp
def _preprocess_inputs(self, inputs):

View File

@ -681,7 +681,7 @@ def transform(images,
if output_shape_value is not None:
output_shape = output_shape_value
output_shape = ops.convert_to_tensor_v2(
output_shape = ops.convert_to_tensor_v2_with_dispatch(
output_shape, dtypes.int32, name='output_shape')
if not output_shape.get_shape().is_compatible_with([2]):

View File

@ -145,7 +145,7 @@ class Normalization(base_preprocessing_layer.CombinerPreprocessingLayer):
super(Normalization, self).build(input_shape)
def call(self, inputs):
inputs = ops.convert_to_tensor_v2(inputs)
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
if inputs.shape.rank == 1:
inputs = array_ops.expand_dims(inputs, 1)
# If the inputs are not floats, cast them to floats. This avoids issues

View File

@ -62,8 +62,10 @@ class TableHandler(object):
raise RuntimeError("Size mismatch between values and key arrays. "
"Keys had size %s, values had size %s." %
(len(keys), len(values)))
keys = ops.convert_to_tensor(keys, dtype=self.table._key_dtype) # pylint: disable=protected-access
values = ops.convert_to_tensor(values, dtype=self.table._value_dtype) # pylint: disable=protected-access
keys = ops.convert_to_tensor_v2_with_dispatch(
keys, dtype=self.table._key_dtype) # pylint: disable=protected-access
values = ops.convert_to_tensor_v2_with_dispatch(
values, dtype=self.table._value_dtype) # pylint: disable=protected-access
if values.shape.ndims != 1:
raise ValueError("`values` must be 1-dimensional, got an input with "
" %s dimensions." % values.shape.ndims)

View File

@ -367,7 +367,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, (list, tuple, np.ndarray)):
data = ops.convert_to_tensor(data)
data = ops.convert_to_tensor_v2_with_dispatch(data)
if isinstance(data, ops.Tensor):
if data.shape.rank == 1:
@ -566,7 +566,7 @@ class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = ops.convert_to_tensor(inputs)
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
self._called = True
inputs = self._preprocess(inputs)

View File

@ -40,8 +40,10 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
def testResidualWrapper(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2(np.array([[1., 1., 1.]]), dtype="float32")
m = ops.convert_to_tensor_v2(np.array([[0.1, 0.1, 0.1]]), dtype="float32")
x = ops.convert_to_tensor_v2_with_dispatch(
np.array([[1., 1., 1.]]), dtype="float32")
m = ops.convert_to_tensor_v2_with_dispatch(
np.array([[0.1, 0.1, 0.1]]), dtype="float32")
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
@ -62,9 +64,10 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
def testResidualWrapperWithSlice(self):
wrapper_type = rnn_cell_wrapper_v2.ResidualWrapper
x = ops.convert_to_tensor_v2(
x = ops.convert_to_tensor_v2_with_dispatch(
np.array([[1., 1., 1., 1., 1.]]), dtype="float32")
m = ops.convert_to_tensor_v2(np.array([[0.1, 0.1, 0.1]]), dtype="float32")
m = ops.convert_to_tensor_v2_with_dispatch(
np.array([[0.1, 0.1, 0.1]]), dtype="float32")
base_cell = rnn_cell_impl.GRUCell(
3, kernel_initializer=init_ops.constant_initializer(0.5),
bias_initializer=init_ops.constant_initializer(0.5))
@ -116,7 +119,8 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
base_cell = layers.SimpleRNNCell(1, name="basic_rnn_cell")
rnn_cell = wrapper(base_cell)
rnn_layer = layers.RNN(rnn_cell)
inputs = ops.convert_to_tensor_v2([[[1]]], dtype=dtypes.float32)
inputs = ops.convert_to_tensor_v2_with_dispatch([[[1]]],
dtype=dtypes.float32)
rnn_layer(inputs)
wrapper_name = generic_utils.to_snake_case(wrapper.__name__)
@ -140,8 +144,8 @@ class RNNCellWrapperTest(test.TestCase, parameterized.TestCase):
base_cell = rnn_cell_impl.MultiRNNCell(
[rnn_cell_impl.BasicRNNCell(1) for _ in range(2)])
rnn_cell = wrapper(base_cell)
inputs = ops.convert_to_tensor_v2([[1]], dtype=dtypes.float32)
state = ops.convert_to_tensor_v2([[1]], dtype=dtypes.float32)
inputs = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32)
state = ops.convert_to_tensor_v2_with_dispatch([[1]], dtype=dtypes.float32)
_ = rnn_cell(inputs, [state, state])
weights = base_cell._cells[0].weights
self.assertLen(weights, expected_len=2)

View File

@ -37,7 +37,7 @@ class SubclassedLayersTest(keras_parameterized.TestCase):
class BuildConstantLayer(keras.layers.Layer):
def build(self, input_shape):
self.b = ops.convert_to_tensor_v2(2.0)
self.b = ops.convert_to_tensor_v2_with_dispatch(2.0)
def call(self, inputs):
return self.b * inputs
@ -46,7 +46,7 @@ class SubclassedLayersTest(keras_parameterized.TestCase):
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(1,))
x = ops.convert_to_tensor_v2([[3.0]])
x = ops.convert_to_tensor_v2_with_dispatch([[3.0]])
self.assertEqual(
tf_utils.is_symbolic_tensor(model(x)), not context.executing_eagerly())
self.assertEqual(
@ -58,10 +58,10 @@ class SubclassedLayersTest(keras_parameterized.TestCase):
class BuildDerivedConstantLayer(keras.layers.Layer):
def build(self, input_shape):
a = ops.convert_to_tensor_v2(1.0)
a = ops.convert_to_tensor_v2_with_dispatch(1.0)
b = 2.0 * a
self.variable = variables.Variable(b)
self.constant = ops.convert_to_tensor_v2(self.variable)
self.constant = ops.convert_to_tensor_v2_with_dispatch(self.variable)
def call(self, inputs):
return self.variable * self.constant * inputs
@ -70,7 +70,7 @@ class SubclassedLayersTest(keras_parameterized.TestCase):
model = testing_utils.get_model_from_layers(
[layer, keras.layers.Dense(1)], input_shape=(1,))
x = ops.convert_to_tensor_v2([[3.0]])
x = ops.convert_to_tensor_v2_with_dispatch([[3.0]])
self.assertEqual(
tf_utils.is_symbolic_tensor(model(x)), not context.executing_eagerly())
self.assertEqual(

View File

@ -637,7 +637,7 @@ class AutoLambdaTest(keras_parameterized.TestCase):
self.assertAllEqual(model(ones), 3.0 * ones)
def test_numerical_correctness_simple(self):
x = ops.convert_to_tensor_v2([[-1., 0., -2., 1.]])
x = ops.convert_to_tensor_v2_with_dispatch([[-1., 0., -2., 1.]])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model = keras.Model(inputs, outputs)
@ -645,7 +645,7 @@ class AutoLambdaTest(keras_parameterized.TestCase):
self.assertAllClose(y, [[0., 0., 0., 1.]])
def test_numerical_correctness_with_attrs(self):
x = ops.convert_to_tensor_v2([[1.5, 1.5], [2.5, 3.5]])
x = ops.convert_to_tensor_v2_with_dispatch([[1.5, 1.5], [2.5, 3.5]])
inputs = keras.Input(shape=(2,))
outputs = math_ops.reduce_mean(inputs, axis=1)
model = keras.Model(inputs, outputs)
@ -653,7 +653,7 @@ class AutoLambdaTest(keras_parameterized.TestCase):
self.assertAllClose(y, [1.5, 3.])
def test_numerical_correctness_serialization(self):
x = ops.convert_to_tensor_v2([[-1., 0., -2., 1.]])
x = ops.convert_to_tensor_v2_with_dispatch([[-1., 0., -2., 1.]])
inputs = keras.Input(shape=(4,))
outputs = gen_nn_ops.relu(inputs)
model1 = keras.Model(inputs, outputs)

View File

@ -1189,7 +1189,7 @@ def mean_squared_error(y_true, y_pred):
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
@ -1222,7 +1222,7 @@ def mean_absolute_error(y_true, y_pred):
Returns:
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@ -1257,7 +1257,7 @@ def mean_absolute_percentage_error(y_true, y_pred):
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon()))
@ -1296,7 +1296,7 @@ def mean_squared_logarithmic_error(y_true, y_pred):
Returns:
Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.)
second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.)
@ -1344,7 +1344,7 @@ def squared_hinge(y_true, y_pred):
Returns:
Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
@ -1377,7 +1377,7 @@ def hinge(y_true, y_pred):
Returns:
Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@ -1409,7 +1409,7 @@ def categorical_hinge(y_true, y_pred):
Returns:
Categorical hinge loss values.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
@ -1444,7 +1444,7 @@ def huber(y_true, y_pred, delta=1.0):
delta = math_ops.cast(delta, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
half = ops.convert_to_tensor_v2(0.5, dtype=abs_error.dtype)
half = ops.convert_to_tensor_v2_with_dispatch(0.5, dtype=abs_error.dtype)
return K.mean(
array_ops.where_v2(
abs_error <= delta, half * math_ops.pow(error, 2),
@ -1481,7 +1481,7 @@ def log_cosh(y_true, y_pred):
Returns:
Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
@ -1518,9 +1518,10 @@ def categorical_crossentropy(y_true,
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2(label_smoothing, dtype=K.floatx())
label_smoothing = ops.convert_to_tensor_v2_with_dispatch(
label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[-1], y_pred.dtype)
@ -1557,7 +1558,7 @@ def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
Returns:
Sparse categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@ -1588,9 +1589,10 @@ def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2(label_smoothing, dtype=K.floatx())
label_smoothing = ops.convert_to_tensor_v2_with_dispatch(
label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
@ -1638,7 +1640,7 @@ def kl_divergence(y_true, y_pred):
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
@ -1674,7 +1676,7 @@ def poisson(y_true, y_pred):
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)

View File

@ -95,16 +95,19 @@ class KerasLossesTest(test.TestCase, parameterized.TestCase):
p = backend.placeholder()
o = losses.categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor_v2([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
p_val = ops.convert_to_tensor_v2([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
t_val = ops.convert_to_tensor_v2_with_dispatch([[1., 0., 0.], [0., 1., 0.],
[0., 0., 1.]])
p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],
[.05, .89, .06],
[.05, .01, .94]])
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# from logits
p_val = ops.convert_to_tensor_v2([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],
[2., 3., 5.]])
o = losses.categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)
@ -133,16 +136,18 @@ class KerasLossesTest(test.TestCase, parameterized.TestCase):
p = backend.placeholder()
o = losses.sparse_categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor_v2([0, 1, 2])
p_val = ops.convert_to_tensor_v2([[.9, .05, .05], [.05, .89, .06],
[.05, .01, .94]])
t_val = ops.convert_to_tensor_v2_with_dispatch([0, 1, 2])
p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],
[.05, .89, .06],
[.05, .01, .94]])
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# from logits
p_val = ops.convert_to_tensor_v2([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],
[2., 3., 5.]])
o = losses.sparse_categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)

View File

@ -963,7 +963,7 @@ class _ConfusionMatrixConditionCount(Metric):
result = self.accumulator[0]
else:
result = self.accumulator
return ops.convert_to_tensor_v2(result)
return ops.convert_to_tensor_v2_with_dispatch(result)
def reset_states(self):
num_thresholds = len(to_list(self.thresholds))
@ -3239,7 +3239,7 @@ def binary_accuracy(y_true, y_pred, threshold=0.5):
Returns:
Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
threshold = math_ops.cast(threshold, y_pred.dtype)
y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype)
return K.mean(math_ops.equal(y_true, y_pred), axis=-1)
@ -3297,8 +3297,8 @@ def sparse_categorical_accuracy(y_true, y_pred):
Returns:
Sparse categorical accuracy values.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = ops.convert_to_tensor_v2(y_true)
y_pred = ops.convert_to_tensor_v2_with_dispatch(y_pred)
y_true = ops.convert_to_tensor_v2_with_dispatch(y_true)
y_pred_rank = y_pred.shape.ndims
y_true_rank = y_true.shape.ndims
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
@ -3364,8 +3364,8 @@ def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
Returns:
Sparse top K categorical accuracy value.
"""
y_pred_rank = ops.convert_to_tensor_v2(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor_v2(y_true).shape.ndims
y_pred_rank = ops.convert_to_tensor_v2_with_dispatch(y_pred).shape.ndims
y_true_rank = ops.convert_to_tensor_v2_with_dispatch(y_true).shape.ndims
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:

View File

@ -71,7 +71,7 @@ class KerasSumTest(test.TestCase, parameterized.TestCase):
self.assertEqual(self.evaluate(m.total), 100)
# check update_state() and result() + state accumulation + tensor input
update_op = m.update_state(ops.convert_to_tensor_v2([1, 5]))
update_op = m.update_state(ops.convert_to_tensor_v2_with_dispatch([1, 5]))
self.evaluate(update_op)
self.assertAlmostEqual(self.evaluate(m.result()), 106)
self.assertEqual(self.evaluate(m.total), 106) # 100 + 1 + 5

View File

@ -126,7 +126,7 @@ class AutoCastVariable(variables.Variable, core.Tensor):
raise ValueError(
'Incompatible type conversion requested to type {!r} for variable '
'of type {!r}'.format(dtype.name, self.dtype.name))
val = ops.convert_to_tensor_v2(
val = ops.convert_to_tensor_v2_with_dispatch(
self._variable, dtype=self._variable.dtype, name=name)
return math_ops.cast(val, self.dtype)

View File

@ -124,10 +124,10 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
def testGetScaledLoss(self):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2.)
loss = ops.convert_to_tensor_v2(5.)
loss = ops.convert_to_tensor_v2_with_dispatch(5.)
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)()))
loss = ops.convert_to_tensor_v2(5., dtype='float16')
loss = ops.convert_to_tensor_v2_with_dispatch(5., dtype='float16')
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(loss)))
self.assertEqual(10., self.evaluate(opt.get_scaled_loss(lambda: loss)()))
@ -135,8 +135,8 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2)
scaled_grads = [
ops.convert_to_tensor_v2(3.), None,
ops.convert_to_tensor_v2(-4., dtype='float16')
ops.convert_to_tensor_v2_with_dispatch(3.), None,
ops.convert_to_tensor_v2_with_dispatch(-4., dtype='float16')
]
grads = opt.get_unscaled_gradients(scaled_grads)
grads = [self.evaluate(g) if g is not None else g for g in grads]
@ -146,9 +146,10 @@ class LossScaleOptimizerTest(test.TestCase, parameterized.TestCase):
opt = gradient_descent.SGD(2.0)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale=2)
sparse_scaled_grad = ops.IndexedSlices(
ops.convert_to_tensor_v2([[4., 2.], [8., 5.]]),
ops.convert_to_tensor_v2([1, 3], dtype='int32'),
dense_shape=ops.convert_to_tensor_v2([5, 2], dtype='int32'))
ops.convert_to_tensor_v2_with_dispatch([[4., 2.], [8., 5.]]),
ops.convert_to_tensor_v2_with_dispatch([1, 3], dtype='int32'),
dense_shape=ops.convert_to_tensor_v2_with_dispatch([5, 2],
dtype='int32'))
sparse_grad = opt.get_unscaled_gradients([sparse_scaled_grad])[0]
self.assertIsInstance(sparse_grad, ops.IndexedSlices)
self.assertAllEqual([[2., 1.], [4., 2.5]],

View File

@ -55,7 +55,7 @@ def create_identity_with_grad_check_fn(expected_gradient, expected_dtype=None):
if expected_dtype:
assert dx.dtype == expected_dtype, (
'dx.dtype should be %s but is: %s' % (expected_dtype, dx.dtype))
expected_tensor = ops.convert_to_tensor_v2(
expected_tensor = ops.convert_to_tensor_v2_with_dispatch(
expected_gradient, dtype=dx.dtype, name='expected_gradient')
# Control dependency is to ensure input is available. It's possible the
# dataset will throw a StopIteration to indicate there is no more data, in

View File

@ -101,7 +101,8 @@ class Adadelta(optimizer_v2.OptimizerV2):
super(Adadelta, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)].update(
dict(
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
rho=array_ops.identity(self._get_hyper('rho', var_dtype))))
def set_weights(self, weights):

View File

@ -87,7 +87,8 @@ class Adagrad(optimizer_v2.OptimizerV2):
super(Adagrad, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)].update(
dict(
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
neg_lr_t=-apply_state[(var_device, var_dtype)]['lr_t'],
zero=array_ops.zeros((), dtype=dtypes.int64)))

View File

@ -144,7 +144,8 @@ class Adam(optimizer_v2.OptimizerV2):
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,
@ -396,7 +397,8 @@ class NonFusedAdam(optimizer_v2.OptimizerV2):
apply_state[(var_device, var_dtype)].update(
dict(
lr=lr,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,

View File

@ -122,7 +122,8 @@ class Adamax(optimizer_v2.OptimizerV2):
apply_state[(var_device, var_dtype)].update(
dict(
neg_scaled_lr=-lr_t / (1 - beta_1_power),
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_1_power=beta_1_power,
one_minus_beta_1_t=1 - beta_1_t,

View File

@ -143,7 +143,7 @@ class ExponentialDecay(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2(
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
@ -237,11 +237,11 @@ class PiecewiseConstantDecay(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "PiecewiseConstant"):
boundaries = nest.map_structure(ops.convert_to_tensor_v2,
boundaries = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.boundaries))
values = nest.map_structure(ops.convert_to_tensor_v2,
values = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.values))
x_recomp = ops.convert_to_tensor_v2(step)
x_recomp = ops.convert_to_tensor_v2_with_dispatch(step)
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We cast the boundaries to have the same type as the step
@ -374,7 +374,7 @@ class PolynomialDecay(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "PolynomialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2(
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
@ -494,7 +494,7 @@ class InverseTimeDecay(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "InverseTimeDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2(
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
@ -588,7 +588,7 @@ class CosineDecay(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor_v2(
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
@ -687,7 +687,7 @@ class CosineDecayRestarts(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "SGDRDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2(
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)
@ -824,7 +824,7 @@ class LinearCosineDecay(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "LinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2(
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
@ -950,7 +950,7 @@ class NoisyLinearCosineDecay(LearningRateSchedule):
def __call__(self, step):
with ops.name_scope_v2(self.name or "NoisyLinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2(
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)

View File

@ -148,10 +148,11 @@ def piecewise_constant(x, boundaries, values, name=None):
the learning rate value across different invocations of optimizer functions.
@end_compatibility
"""
boundaries = nest.map_structure(ops.convert_to_tensor_v2,
boundaries = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(boundaries))
values = nest.map_structure(ops.convert_to_tensor_v2, nest.flatten(values))
x_recomp = ops.convert_to_tensor(x)
values = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(values))
x_recomp = ops.convert_to_tensor_v2_with_dispatch(x)
# Avoid explicit conversion to x's dtype. This could result in faulty
# comparisons, for example if floats are converted to integers.
for i, b in enumerate(boundaries):

View File

@ -122,7 +122,7 @@ class Nadam(optimizer_v2.OptimizerV2):
apply_state[(var_device, var_dtype)] = dict(
lr_t=lr_t,
neg_lr_t=-lr_t,
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
epsilon=ops.convert_to_tensor_v2_with_dispatch(self.epsilon, var_dtype),
beta_1_t=beta_1_t,
beta_2_t=beta_2_t,
m_t=m_t,

View File

@ -237,7 +237,7 @@ class OptimizerTest(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testComputeGradientsWithTensors(self):
with testing_utils.use_gpu():
x = ops.convert_to_tensor_v2(1.0)
x = ops.convert_to_tensor_v2_with_dispatch(1.0)
def f():
return x * x

View File

@ -167,7 +167,8 @@ class RMSprop(optimizer_v2.OptimizerV2):
apply_state[(var_device, var_dtype)].update(
dict(
neg_lr_t=-apply_state[(var_device, var_dtype)]["lr_t"],
epsilon=ops.convert_to_tensor_v2(self.epsilon, var_dtype),
epsilon=ops.convert_to_tensor_v2_with_dispatch(
self.epsilon, var_dtype),
rho=rho,
momentum=array_ops.identity(self._get_hyper("momentum", var_dtype)),
one_minus_rho=1. - rho))

View File

@ -111,7 +111,7 @@ def smart_resize(x, size, interpolation='bilinear'):
if len(size) != 2:
raise ValueError('Expected `size` to be a tuple of 2 integers, '
'but got: %s' % (size,))
img = ops.convert_to_tensor(x)
img = ops.convert_to_tensor_v2_with_dispatch(x)
if img.shape.rank is not None:
if img.shape.rank != 3:
raise ValueError(

View File

@ -507,7 +507,7 @@ class TestModelSavingAndLoadingV2(keras_parameterized.TestCase):
self.assertAllClose(
model.predict(input_arr),
loaded.signatures['predict'](ops.convert_to_tensor_v2(
loaded.signatures['predict'](ops.convert_to_tensor_v2_with_dispatch(
input_arr.astype('float32')))['predictions'])
feature = {
@ -517,7 +517,7 @@ class TestModelSavingAndLoadingV2(keras_parameterized.TestCase):
example = example_pb2.Example(
features=feature_pb2.Features(feature=feature))
outputs = loaded.signatures['parse_and_predict'](
ops.convert_to_tensor_v2([example.SerializeToString()]))
ops.convert_to_tensor_v2_with_dispatch([example.SerializeToString()]))
self.assertAllClose(model.predict(input_arr), outputs['predictions'])
self.assertAllClose(model.layers[0](input_arr), outputs['layer_1_outputs'])

View File

@ -428,7 +428,7 @@ class ModelSubclassingTest(keras_parameterized.TestCase):
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor_v2(np.ones((10, 10), 'float32'))
x = ops.convert_to_tensor_v2_with_dispatch(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)
self.assertEqual(1, len(model.trainable_weights))
@ -444,7 +444,7 @@ class ModelSubclassingTest(keras_parameterized.TestCase):
def call(self, inputs):
return inputs + self.b + self.c
x = ops.convert_to_tensor_v2(np.ones((10, 10), 'float32'))
x = ops.convert_to_tensor_v2_with_dispatch(np.ones((10, 10), 'float32'))
model = MyModelCustomBuild()
model(x)
self.assertEqual(1, len(model.trainable_weights))
@ -467,7 +467,7 @@ class ModelSubclassingTest(keras_parameterized.TestCase):
self.add_update(self.c.assign(inputs[1, :]))
return inputs + self.b + self.c
x = ops.convert_to_tensor_v2(np.ones((10, 10), 'float32'))
x = ops.convert_to_tensor_v2_with_dispatch(np.ones((10, 10), 'float32'))
model = MyModel()
model(x)

View File

@ -253,11 +253,11 @@ def compute_weighted_loss(losses,
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
if not isinstance(losses, keras_tensor.KerasTensor):
losses = ops.convert_to_tensor_v2(losses)
losses = ops.convert_to_tensor_v2_with_dispatch(losses)
input_dtype = losses.dtype
if not isinstance(sample_weight, keras_tensor.KerasTensor):
sample_weight = ops.convert_to_tensor_v2(sample_weight)
sample_weight = ops.convert_to_tensor_v2_with_dispatch(sample_weight)
# TODO(psv): Handle casting here in a better way, eg. if losses is float64
# we do not want to lose precision.

View File

@ -311,7 +311,8 @@ def update_confusion_matrix_variables(variables_to_update,
y_true = math_ops.cast(y_true, dtype=variable_dtype)
y_pred = math_ops.cast(y_pred, dtype=variable_dtype)
thresholds = ops.convert_to_tensor_v2(thresholds, dtype=variable_dtype)
thresholds = ops.convert_to_tensor_v2_with_dispatch(
thresholds, dtype=variable_dtype)
num_thresholds = thresholds.shape[0]
if multi_label:
one_thresh = math_ops.equal(

View File

@ -44,14 +44,17 @@ class TestIsSymbolicTensor(test.TestCase, parameterized.TestCase):
self.assertFalse(tf_utils.is_symbolic_tensor(
variables.Variable(name='blah', initial_value=0.)))
self.assertFalse(
tf_utils.is_symbolic_tensor(ops.convert_to_tensor_v2(0.)))
tf_utils.is_symbolic_tensor(
ops.convert_to_tensor_v2_with_dispatch(0.)))
self.assertFalse(tf_utils.is_symbolic_tensor(
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
else:
self.assertTrue(tf_utils.is_symbolic_tensor(
variables.Variable(name='blah', initial_value=0.)))
self.assertTrue(tf_utils.is_symbolic_tensor(ops.convert_to_tensor_v2(0.)))
self.assertTrue(
tf_utils.is_symbolic_tensor(
ops.convert_to_tensor_v2_with_dispatch(0.)))
self.assertTrue(tf_utils.is_symbolic_tensor(
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
@ -61,7 +64,7 @@ class TestIsSymbolicTensor(test.TestCase, parameterized.TestCase):
class CustomClass(object):
def value(self):
return ops.convert_to_tensor_v2(42.)
return ops.convert_to_tensor_v2_with_dispatch(42.)
ops.register_tensor_conversion_function(
CustomClass, lambda value, **_: value.value())
@ -72,7 +75,8 @@ class TestIsSymbolicTensor(test.TestCase, parameterized.TestCase):
self.assertFalse(tf_utils.is_symbolic_tensor(
variables.Variable(name='blah', initial_value=0.)))
self.assertFalse(
tf_utils.is_symbolic_tensor(ops.convert_to_tensor_v2(0.)))
tf_utils.is_symbolic_tensor(
ops.convert_to_tensor_v2_with_dispatch(0.)))
self.assertFalse(tf_utils.is_symbolic_tensor(
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
@ -80,7 +84,9 @@ class TestIsSymbolicTensor(test.TestCase, parameterized.TestCase):
else:
self.assertTrue(tf_utils.is_symbolic_tensor(
variables.Variable(name='blah', initial_value=0.)))
self.assertTrue(tf_utils.is_symbolic_tensor(ops.convert_to_tensor_v2(0.)))
self.assertTrue(
tf_utils.is_symbolic_tensor(
ops.convert_to_tensor_v2_with_dispatch(0.)))
self.assertTrue(tf_utils.is_symbolic_tensor(
sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])))
@ -95,7 +101,7 @@ class TestIsSymbolicTensor(test.TestCase, parameterized.TestCase):
def __init__(self, input_):
self._input = input_
self.value = ops.convert_to_tensor_v2([[42.]])
self.value = ops.convert_to_tensor_v2_with_dispatch([[42.]])
@property
def dtype(self):
@ -110,7 +116,7 @@ class TestIsSymbolicTensor(test.TestCase, parameterized.TestCase):
def __init__(self, fn, **kwargs):
def _fn(*fargs, **fkwargs):
d = fn(*fargs, **fkwargs)
x = ops.convert_to_tensor_v2(d)
x = ops.convert_to_tensor_v2_with_dispatch(d)
d.shape = x.shape
d.get_shape = x.get_shape
return d, x
@ -138,7 +144,7 @@ class TestIsSymbolicTensor(test.TestCase, parameterized.TestCase):
model = keras.Model(model.inputs, model(model.outputs))
# Now we instantiate the model and verify we have a `Foo` object, not a
# `Tensor`.
y = model(ops.convert_to_tensor_v2([[7.]]))
y = model(ops.convert_to_tensor_v2_with_dispatch([[7.]]))
self.assertIsInstance(y, Foo)
# Confirm that (custom) loss sees `Foo` instance, not Tensor.
obtained_prediction_box = [None]