Remove some of the usages of ops.EagerTensor from Keras. Specifically, the no-op usages (v1 keras code never runs when there are EagerTensors, so it never needs to be checked for v1 only code)

PiperOrigin-RevId: 334210496
Change-Id: I0b2e070dcf38e3ba349241a5fb8941c47e7ee6ff
This commit is contained in:
Tomer Kaftan 2020-09-28 12:12:14 -07:00 committed by TensorFlower Gardener
parent e31695e95f
commit 381dadbf32
5 changed files with 11 additions and 39 deletions

View File

@ -789,7 +789,6 @@ class SymbolicSupportTest(keras_parameterized.TestCase):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegex(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)

View File

@ -271,7 +271,7 @@ def convert_to_list(values, sparse_default_value=None):
values, default_value=sparse_default_value)
values = K.get_value(dense_tensor)
if isinstance(values, (ops.EagerTensor, ops.Tensor)):
if isinstance(values, ops.Tensor):
values = K.get_value(values)
# We may get passed a ndarray or the code above may give us a ndarray.

View File

@ -153,10 +153,6 @@ def model_iteration(model,
use_steps = is_dataset or steps_per_epoch is not None
do_validation = val_inputs is not None
# Convert Eager Tensors to NumPy arrays to support batching/shuffling.
inputs, targets, sample_weights = training_utils_v1. \
convert_eager_tensors_to_numpy((inputs, targets, sample_weights))
# Prepare input data.
inputs = input_iterator or inputs
if validation_in_fit and prepared_feed_values_from_dataset:

View File

@ -186,9 +186,6 @@ class ConcatAggregator(Aggregator):
else:
self.results = np.concatenate(self.results, axis=0)
if isinstance(self.results, ops.EagerTensor):
self.results = self.results._numpy() # pylint: disable=protected-access
_COPY_THREADS = 4
_COPY_POOL = None
@ -250,8 +247,6 @@ class SliceAggregator(Aggregator):
# initialization is effectively instantaneous.
shape = (self.num_samples,) + batch_element.shape[1:]
dtype = batch_element.dtype
if isinstance(batch_element, ops.EagerTensor):
dtype = dtype.as_numpy_dtype
self.results = np.empty(shape=shape, dtype=dtype)
@ -328,7 +323,7 @@ class OutputsAggregator(Aggregator):
# or a composite tensor's Value object. In either case, we can't
# allocate an array to hold the object - we'll handle it later.
self.results.append(ConcatAggregator(self.batch_size))
elif isinstance(batch_element, (np.ndarray, ops.EagerTensor)):
elif isinstance(batch_element, np.ndarray):
self.results.append(
(ConcatAggregator(self.batch_size) if self.use_steps else
SliceAggregator(self.num_samples, self.batch_size)))
@ -866,12 +861,13 @@ def standardize_weights(y,
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise ValueError('Found a sample_weight array with shape {}. In order to '
'use timestep-wise sample weights, you should specify '
'sample_weight_mode="temporal" in compile(); found "{}" '
'instead. If you just mean to use sample-wise weights, '
'make sure your sample_weight array is 1D.'
.format(sample_weight.shape, sample_weight_mode))
raise ValueError(
'Found a sample_weight array with shape {}. In order to '
'use timestep-wise sample weights, you should specify '
'sample_weight_mode="temporal" in compile(); founssd "{}" '
'instead. If you just mean to use sample-wise weights, '
'make sure your sample_weight array is 1D.'.format(
sample_weight.shape, sample_weight_mode))
if sample_weight is not None:
if len(sample_weight.shape) > len(y.shape):
@ -1628,7 +1624,7 @@ class ModelInputs(object):
if v.ndim == 1:
v = np.expand_dims(v, 1)
if isinstance(v, (np.ndarray, ops.EagerTensor)):
if isinstance(v, np.ndarray):
# We fix the placeholder shape except the batch size.
# This is suboptimal, but it is the best we can do with the info
# we have. The user should call `model._set_inputs(placeholders)`
@ -1675,25 +1671,6 @@ def generic_output_names(outputs_list):
return ['output_%d' % (i + 1) for i in range(len(outputs_list))]
def convert_eager_tensors_to_numpy(structure):
"""Convert every EagerTensor in `structure` to NumPy.
Arguments:
structure: An arbitrary structure of elements to be converted to NumPy
arrays.
Returns:
An identical structure with EagerTensors converted to NumPy arrays.
"""
def _convert(element):
if isinstance(element, ops.EagerTensor):
return element.numpy()
return element
return nest.map_structure(_convert, structure)
def should_run_validation(validation_freq, epoch):
"""Checks if validation should be run this epoch.

View File

@ -3141,7 +3141,7 @@ class _TrainingTarget(object):
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
return tensor_util.is_tensor(x)
def _convert_scipy_sparse_tensor(value, expected_input):