Renames the run_distributed flag to experimental_run_tf_function and modifies the tests generated by the keras_all_modes decorator to better reflect that.

PiperOrigin-RevId: 260767064
This commit is contained in:
Pavithra Vijay 2019-07-30 12:06:33 -07:00 committed by TensorFlower Gardener
parent 8b0f14320b
commit c7933ce9f3
62 changed files with 1089 additions and 849 deletions

View File

@ -374,7 +374,7 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
loss,
metrics=metrics,
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
@ -405,7 +405,10 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer, loss, distribute=distribution, run_distributed=False)
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
@ -439,7 +442,10 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer, loss, distribute=distribution, run_distributed=False)
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
inputs = np.zeros((20, 3), np.float32)
targets = np.zeros((20, 4), np.float32)
@ -456,7 +462,10 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer, loss, distribute=distribution, run_distributed=False)
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
@ -491,7 +500,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss,
metrics=metrics,
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
@ -511,7 +520,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
@ -520,7 +529,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
@ -566,7 +575,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss,
metrics=metrics,
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 5))
@ -603,7 +612,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss,
metrics=metrics,
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
@ -618,7 +627,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss = 'mse'
model.compile(
optimizer(), loss, distribute=distribution, run_distributed=False)
optimizer(),
loss,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
@ -632,7 +644,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer, loss, distribute=distribution, run_distributed=False)
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
@ -661,7 +676,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer, loss, distribute=distribution, run_distributed=False)
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
@ -689,7 +707,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(
optimizer, loss, distribute=distribution, run_distributed=False)
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
# User forgets to batch the dataset
inputs = np.zeros((10, 3), dtype=np.float32)
@ -726,7 +747,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss,
metrics=metrics,
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
batch_size = 8
if isinstance(distribution, mirrored_strategy.CoreMirroredStrategy):
@ -762,7 +783,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(
optimizer, loss, distribute=distribution, run_distributed=False)
optimizer,
loss,
distribute=distribution,
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
@ -801,7 +825,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
loss,
metrics=metrics,
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
@ -861,7 +885,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
loss,
metrics=metrics,
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
dataset = get_dataset(distribution)
@ -905,7 +929,7 @@ class TestDistributionStrategyWithLossMasking(test.TestCase,
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
@ -928,7 +952,7 @@ class TestDistributionStrategyWithNormalizationLayer(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
@ -974,7 +998,7 @@ class TestDistributionStrategyCorrectness(test.TestCase,
optimizer=gradient_descent.GradientDescentOptimizer(0.5),
metrics=[keras.metrics.BinaryAccuracy()],
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
batch_size = 64
if not distributed_training_utils.global_batch_size_supported(
@ -1001,7 +1025,7 @@ class TestDistributionStrategyCorrectness(test.TestCase,
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
distribute=distribution,
run_distributed=False)
experimental_run_tf_function=False)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
@ -1078,7 +1102,7 @@ class TestDistributionStrategyCorrectness(test.TestCase,
optimizer=gradient_descent_keras.SGD(0.5),
metrics=['mse'],
distribute=with_distribution,
run_distributed=False)
experimental_run_tf_function=False)
training_inputs, eval_inputs, predict_inputs = (
get_correctness_test_inputs(use_numpy, use_validation_data,

View File

@ -150,7 +150,7 @@ class AttentionMechanismTest(test.TestCase, parameterized.TestCase):
y = np.random.randn(self.batch, self.timestep)
model = keras.models.Model([inputs, query, state], score)
# TODO(b/138592586): Run with single-execution-path
model.compile("rmsprop", "mse", run_distributed=False)
model.compile("rmsprop", "mse", experimental_run_tf_function=False)
model.fit([x, self.query, self.state], (y, y))
y_ref = model.predict_on_batch([x_test, self.query, self.state])

View File

@ -34,28 +34,29 @@ class KerasExperimentalSaveLoadTest(test_base.TestSavedModelBase):
saved_model.export_saved_model(model, saved_dir)
def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
output_name, run_distributed):
output_name, experimental_run_tf_function):
restored_keras_model = saved_model.load_from_saved_model(saved_dir)
restored_keras_model._run_distributed = run_distributed
restored_keras_model._experimental_run_tf_function = (
experimental_run_tf_function)
return restored_keras_model.predict(
predict_dataset, steps=test_base.PREDICT_STEPS)
@combinations.generate(test_base.simple_models_with_strategies())
def test_save_no_strategy_restore_strategy(self, model_and_input,
distribution, run_distributed):
self.run_test_save_no_strategy_restore_strategy(model_and_input,
distribution,
run_distributed)
distribution,
experimental_run_tf_function):
self.run_test_save_no_strategy_restore_strategy(
model_and_input, distribution, experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategies(),
combinations.combine(save_in_scope=[True, False])))
def test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope,
run_distributed):
self.run_test_save_strategy_restore_no_strategy(model_and_input,
distribution, save_in_scope,
run_distributed)
experimental_run_tf_function):
self.run_test_save_strategy_restore_no_strategy(
model_and_input, distribution, save_in_scope,
experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategy_pairs(),
@ -63,11 +64,13 @@ class KerasExperimentalSaveLoadTest(test_base.TestSavedModelBase):
def test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed):
save_in_scope,
experimental_run_tf_function):
self.run_test_save_strategy_restore_strategy(model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed)
save_in_scope,
experimental_run_tf_function)
if __name__ == '__main__':

View File

@ -34,31 +34,32 @@ class KerasSaveLoadTest(test_base.TestSavedModelBase):
model.save(saved_dir, save_format='tf')
def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
output_name, run_distributed):
output_name, experimental_run_tf_function):
restored_keras_model = save.load_model(saved_dir)
restored_keras_model._run_distributed = run_distributed
restored_keras_model._experimental_run_tf_function = (
experimental_run_tf_function)
return restored_keras_model.predict(
predict_dataset, steps=test_base.PREDICT_STEPS)
@combinations.generate(test_base.simple_models_with_strategies())
def test_save_no_strategy_restore_strategy(self, model_and_input,
distribution, run_distributed):
self.run_test_save_no_strategy_restore_strategy(model_and_input,
distribution,
run_distributed)
distribution,
experimental_run_tf_function):
self.run_test_save_no_strategy_restore_strategy(
model_and_input, distribution, experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategies(),
combinations.combine(save_in_scope=[True, False])))
def test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope,
run_distributed):
experimental_run_tf_function):
if save_in_scope:
self.skipTest(('b/134703272 - Saving model in tf.distribute.Strategy ',
'scope is not supported.'))
self.run_test_save_strategy_restore_no_strategy(model_and_input,
distribution, save_in_scope,
run_distributed)
self.run_test_save_strategy_restore_no_strategy(
model_and_input, distribution, save_in_scope,
experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategy_pairs(),
@ -66,14 +67,16 @@ class KerasSaveLoadTest(test_base.TestSavedModelBase):
def test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed):
save_in_scope,
experimental_run_tf_function):
if save_in_scope:
self.skipTest(('b/134703272 - Saving model in tf.distribute.Strategy ',
'scope is not supported.'))
self.run_test_save_strategy_restore_strategy(model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed)
save_in_scope,
experimental_run_tf_function)
if __name__ == '__main__':

View File

@ -49,13 +49,14 @@ class SimpleFunctionalModel(model_collection_base.ModelAndInput):
model = keras.Model(inputs=x, outputs=y)
optimizer = gradient_descent.SGD(learning_rate=0.001)
run_distributed = kwargs.pop('run_distributed', None)
assert run_distributed is not None
experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',
None)
assert experimental_run_tf_function is not None
model.compile(
loss='mse',
metrics=['mae'],
optimizer=optimizer,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model, output_name
@ -77,13 +78,14 @@ class SimpleSequentialModel(model_collection_base.ModelAndInput):
5, dtype=dtypes.float32, name=output_name, input_dim=3)
model.add(y)
optimizer = gradient_descent.SGD(learning_rate=0.001)
run_distributed = kwargs.pop('run_distributed', None)
assert run_distributed is not None
experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',
None)
assert experimental_run_tf_function is not None
model.compile(
loss='mse',
metrics=['mae'],
optimizer=optimizer,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model, output_name
@ -112,14 +114,15 @@ class SimpleSubclassModel(model_collection_base.ModelAndInput):
def get_model(self, **kwargs):
model = _SimpleModel()
optimizer = gradient_descent.SGD(learning_rate=0.001)
run_distributed = kwargs.pop('run_distributed', None)
assert run_distributed is not None
experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',
None)
assert experimental_run_tf_function is not None
model.compile(
loss='mse',
metrics=['mae'],
cloning=False,
optimizer=optimizer,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model, model.output_name

View File

@ -42,30 +42,30 @@ class SavedModelSaveAndLoadTest(test_base.TestSavedModelBase):
keras_saved_model.export_saved_model(model, saved_dir, serving_only=True)
def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
output_name, run_distributed):
output_name, experimental_run_tf_function):
return test_base.load_and_run_with_saved_model_api(distribution, saved_dir,
predict_dataset,
output_name)
@combinations.generate(test_base.simple_models_with_strategies())
def test_save_no_strategy_restore_strategy(self, model_and_input,
distribution, run_distributed):
self.run_test_save_no_strategy_restore_strategy(model_and_input,
distribution,
run_distributed)
distribution,
experimental_run_tf_function):
self.run_test_save_no_strategy_restore_strategy(
model_and_input, distribution, experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategies(),
combinations.combine(save_in_scope=[True, False])))
def test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope,
run_distributed):
experimental_run_tf_function):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_no_strategy(model_and_input,
distribution, save_in_scope,
run_distributed)
self.run_test_save_strategy_restore_no_strategy(
model_and_input, distribution, save_in_scope,
experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategy_pairs(),
@ -73,14 +73,16 @@ class SavedModelSaveAndLoadTest(test_base.TestSavedModelBase):
def test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed):
save_in_scope,
experimental_run_tf_function):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_strategy(model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed)
save_in_scope,
experimental_run_tf_function)
if __name__ == '__main__':

View File

@ -34,30 +34,30 @@ class SavedModelSaveAndLoadTest(test_base.TestSavedModelBase):
saved_model.save(model, saved_dir)
def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
output_name, run_distributed):
output_name, experimental_run_tf_function):
return test_base.load_and_run_with_saved_model_api(distribution, saved_dir,
predict_dataset,
output_name)
@combinations.generate(test_base.simple_models_with_strategies())
def test_save_no_strategy_restore_strategy(self, model_and_input,
distribution, run_distributed):
self.run_test_save_no_strategy_restore_strategy(model_and_input,
distribution,
run_distributed)
distribution,
experimental_run_tf_function):
self.run_test_save_no_strategy_restore_strategy(
model_and_input, distribution, experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategies(),
combinations.combine(save_in_scope=[True, False])))
def test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope,
run_distributed):
experimental_run_tf_function):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_no_strategy(model_and_input,
distribution, save_in_scope,
run_distributed)
self.run_test_save_strategy_restore_no_strategy(
model_and_input, distribution, save_in_scope,
experimental_run_tf_function)
@combinations.generate(
combinations.times(test_base.simple_models_with_strategy_pairs(),
@ -65,14 +65,16 @@ class SavedModelSaveAndLoadTest(test_base.TestSavedModelBase):
def test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed):
save_in_scope,
experimental_run_tf_function):
if save_in_scope:
self.skipTest(('Saving model within tf.distribute.Strategy scope is not ',
'supported.'))
self.run_test_save_strategy_restore_strategy(model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed)
save_in_scope,
experimental_run_tf_function)
if __name__ == '__main__':

View File

@ -63,7 +63,7 @@ def simple_models_with_strategies():
model_and_input=simple_models,
distribution=strategies_minus_tpu,
mode=['eager'],
run_distributed=[True, False])
experimental_run_tf_function=[True, False])
def simple_models_with_strategy_pairs():
@ -72,7 +72,7 @@ def simple_models_with_strategy_pairs():
distribution_for_saving=strategies_minus_tpu,
distribution_for_restoring=strategies_minus_tpu,
mode=['eager'],
run_distributed=[True, False])
experimental_run_tf_function=[True, False])
def load_and_run_with_saved_model_api(distribution, saved_dir, predict_dataset,
@ -118,7 +118,7 @@ class TestSavedModelBase(test.TestCase, parameterized.TestCase):
raise NotImplementedError('must be implemented in descendants')
def _load_and_run_model(self, distribution, saved_dir, predict_dataset,
output_name, run_distributed):
output_name, experimental_run_tf_function):
"""Load the model and run 1 step of predict with it.
This method must be implemented by the subclasses.
@ -131,7 +131,8 @@ class TestSavedModelBase(test.TestCase, parameterized.TestCase):
cross_replica context.
output_name: the string representing the name of the output layer of the
model.
run_distributed: Whether to use the v2 execution path for models.
experimental_run_tf_function: Whether to use the single execution path
for models.
"""
raise NotImplementedError('must be implemented in descendants')
@ -152,13 +153,14 @@ class TestSavedModelBase(test.TestCase, parameterized.TestCase):
return predict_dataset
def run_test_save_no_strategy_restore_strategy(self, model_and_input,
distribution, run_distributed):
distribution,
experimental_run_tf_function):
"""Save a model without DS, and restore it with DS."""
saved_dir = os.path.join(self.get_temp_dir(), '0')
model, output_name = model_and_input.get_model(
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
@ -174,20 +176,20 @@ class TestSavedModelBase(test.TestCase, parameterized.TestCase):
saved_dir=saved_dir,
predict_dataset=predict_dataset,
output_name=output_name,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
self.assertAllClose(result_before_save, result_after_save, atol=_TOLERANCE)
def run_test_save_strategy_restore_no_strategy(self, model_and_input,
distribution, save_in_scope,
run_distributed):
experimental_run_tf_function):
"""Save a model with DS, and restore it without DS."""
saved_dir = os.path.join(self.get_temp_dir(), '1')
with distribution.scope():
model, output_name = model_and_input.get_model(
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
@ -206,21 +208,22 @@ class TestSavedModelBase(test.TestCase, parameterized.TestCase):
saved_dir=saved_dir,
predict_dataset=predict_dataset,
output_name=output_name,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
self.assertAllClose(result_before_save, load_result, atol=_TOLERANCE)
def run_test_save_strategy_restore_strategy(self, model_and_input,
distribution_for_saving,
distribution_for_restoring,
save_in_scope, run_distributed):
save_in_scope,
experimental_run_tf_function):
"""Save a model with DS, and restore it with potentially different DS."""
saved_dir = os.path.join(self.get_temp_dir(), '2')
with distribution_for_saving.scope():
model, output_name = model_and_input.get_model(
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
x_train, y_train, x_predict = model_and_input.get_data()
batch_size = model_and_input.get_batch_size()
@ -241,6 +244,6 @@ class TestSavedModelBase(test.TestCase, parameterized.TestCase):
saved_dir=saved_dir,
predict_dataset=predict_dataset,
output_name=output_name,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
self.assertAllClose(result_before_save, load_result, atol=_TOLERANCE)

View File

@ -135,7 +135,7 @@ class CallbackCountsTest(keras_parameterized.TestCase):
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
@ -238,7 +238,7 @@ class KerasCallbacksTest(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@ -1292,7 +1292,7 @@ class TestTensorBoardV2(keras_parameterized.TestCase):
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_TensorBoard_default_logdir(self):
@ -1526,7 +1526,7 @@ class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def fitModelAndAssertKerasModelWritten(self, model):

View File

@ -249,10 +249,10 @@ def all_strategy_combinations_plus_run_distributed():
return (combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'],
run_distributed=[True, False]) + combinations.combine(
experimental_run_tf_function=[True, False]) + combinations.combine(
distribution=tpu_strategies,
mode=['graph', 'eager'],
run_distributed=[False]))
experimental_run_tf_function=[False]))
def all_strategy_minus_default_and_tpu_combinations():
@ -285,11 +285,11 @@ def strategy_and_optimizer_combinations():
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
],
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
tpu_strategies_graph = combinations.combine(
distribution=tpu_strategies,
mode=['graph'],
run_distributed=[True],
experimental_run_tf_function=[True],
optimizer=[
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adam_optimizer_v1_fn,
@ -303,7 +303,7 @@ def strategy_and_optimizer_combinations():
tpu_strategies_eager = combinations.combine(
distribution=tpu_strategies,
mode=['eager'],
run_distributed=[False],
experimental_run_tf_function=[False],
optimizer=[
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
@ -430,7 +430,8 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
distribution, input_64_samples, steps=10, batch_size=13)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_calling_model_with_numpy_arrays(self, distribution, run_distributed):
def test_calling_model_with_numpy_arrays(self, distribution,
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
@ -439,7 +440,10 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
@ -463,14 +467,17 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_calling_model_with_nested_numpy_arrays(self, distribution,
run_distributed):
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = multi_input_output_model()
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
input_a_np = np.asarray(np.random.random((64, 3)), dtype=np.float32)
input_b_np = np.asarray(np.random.random((64, 5)), dtype=np.float32)
@ -495,13 +502,17 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'],
run_distributed=[True, False]))
def test_numpy_with_sample_weights(self, distribution, run_distributed):
experimental_run_tf_function=[True, False]))
def test_numpy_with_sample_weights(self, distribution,
experimental_run_tf_function):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
@ -532,14 +543,18 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
self.assertAllClose(result, 13.5)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_flatten_predict_outputs(self, distribution, run_distributed):
def test_flatten_predict_outputs(self, distribution,
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
model = multi_input_output_model()
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
# We take 6 input samples with each input having a dimension of 3 or 5.
input_a_np = np.asarray(np.random.random((6, 3)), dtype=np.float32)
@ -600,9 +615,11 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
rtol=1e-5)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(run_distributed=[True, False])))
def test_predict_with_partial_batch(self, distribution, run_distributed):
combinations.times(
tpu_strategy_combinations_graph_only(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_predict_with_partial_batch(self, distribution,
experimental_run_tf_function):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
@ -610,7 +627,9 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(
optimizer, loss, run_distributed=run_distributed)
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
@ -661,10 +680,11 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
model.evaluate(inputs, steps=1)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
tpu_strategy_combinations_graph_only(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_predict_multi_output_model_with_partial_batch(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
@ -672,7 +692,9 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
with distribution.scope():
model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
model_with_ds_strategy.compile(
optimizer, loss, run_distributed=run_distributed)
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
@ -699,7 +721,8 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_calling_model_on_same_dataset(self, distribution, run_distributed):
def test_calling_model_on_same_dataset(self, distribution,
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
@ -708,7 +731,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
dataset = get_dataset(distribution)
@ -730,8 +756,8 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.predict(get_predict_dataset(distribution), steps=2)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_model_interleaved_eval_same_as_direct_eval(self, distribution,
run_distributed):
def test_model_interleaved_eval_same_as_direct_eval(
self, distribution, experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
@ -740,7 +766,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
optimizer_fn(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
interleaved_model = get_model()
interleaved_model.set_weights(user_controlled_model.get_weights())
@ -748,7 +774,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
optimizer_fn(0.001),
loss='mse',
metrics=['mae', keras.metrics.CategoricalAccuracy()],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
dataset = get_dataset(distribution)
@ -784,7 +810,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution,
run_distributed):
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
@ -793,7 +819,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
input_a_np = np.random.random((10, 3)).astype('float32')
input_b_np = np.random.random((10, 5)).astype('float32')
@ -820,7 +849,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_with_dictionary_in_the_dataset_b135161171(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
def custom_loss(predict, label, weight):
bce = keras.losses.binary_crossentropy(label, predict)
@ -839,7 +868,9 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
inputs=[input_img, input_lbl, input_weight],
outputs=[predict, my_loss])
model.add_loss(model.get_layer('my_loss').output)
model.compile(optimizer='adam', run_distributed=run_distributed)
model.compile(
optimizer='adam',
experimental_run_tf_function=experimental_run_tf_function)
def map_fn(img, lbl, weight):
inputs = {'img': img, 'lbl': lbl, 'weight': weight}
@ -857,7 +888,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_eval_and_predict_methods_on_dataset_without_steps(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
@ -866,7 +897,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
@ -890,10 +924,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
predict_with_numpy, predict_with_ds, atol=1e-4, rtol=1e-4)
@combinations.generate(
combinations.times(strategy_minus_tpu_combinations(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
strategy_minus_tpu_combinations(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_on_dataset_with_unknown_cardinality_without_steps(
self, distribution, run_distributed, mode):
self, distribution, experimental_run_tf_function, mode):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
@ -902,7 +937,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
@ -943,10 +981,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
rtol=1e-4)
@combinations.generate(
combinations.times(tpu_strategy_combinations(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
tpu_strategy_combinations(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_on_dataset_with_unknown_cardinality(self, distribution,
run_distributed):
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
model = get_model()
@ -956,7 +995,7 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
gradient_descent.GradientDescentOptimizer(0.001),
loss,
metrics=metrics,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.zeros((1000, 3), dtype=np.float32)
targets = np.zeros((1000, 4), dtype=np.float32)
@ -988,8 +1027,8 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.fit(dataset, epochs=1)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_fit_eval_and_predict_methods_on_dataset(self, distribution,
run_distributed):
def test_fit_eval_and_predict_methods_on_dataset(
self, distribution, experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
@ -998,7 +1037,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
dataset = get_dataset(distribution)
@ -1008,14 +1050,17 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
@combinations.generate(strategy_and_optimizer_combinations())
def test_fit_eval_and_predict_with_optimizer(self, distribution, optimizer,
run_distributed):
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
model = get_model()
loss = 'mse'
model.compile(optimizer(), loss, run_distributed=run_distributed)
model.compile(
optimizer(),
loss,
experimental_run_tf_function=experimental_run_tf_function)
dataset = get_dataset(distribution)
@ -1030,8 +1075,9 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
strategy_combinations.one_device_strategy
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
def test_dataset_wrong_input_shape(self, distribution, run_distributed, mode):
experimental_run_tf_function=[True, False]))
def test_dataset_wrong_input_shape(self, distribution,
experimental_run_tf_function, mode):
if mode == 'graph':
self.skipTest(
'TODO(b/120943676, b/120957836): Re-enable for graph once the '
@ -1042,7 +1088,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
# Wrong input shape
inputs = np.zeros((10, 5), dtype=np.float32)
@ -1060,16 +1109,19 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
def test_dataset_external_batch_input_validation(self, distribution,
run_distributed):
experimental_run_tf_function=[True, False]))
def test_dataset_external_batch_input_validation(
self, distribution, experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
# Batching is done outside tf.data's `batch`
inputs = np.zeros((100, 10, 3), dtype=np.float32)
@ -1086,8 +1138,9 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
def test_learning_phase_value(self, distribution, run_distributed):
experimental_run_tf_function=[True, False]))
def test_learning_phase_value(self, distribution,
experimental_run_tf_function):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
# Lambda layer uses the learning phase.
@ -1104,7 +1157,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
loss = 'mse'
metrics = ['acc']
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
batch_size = 8
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
@ -1134,13 +1190,17 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
self.assertArrayNear(output, ref_output, 1e-1)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def testOptimizerWithCallbacks(self, distribution, run_distributed):
def testOptimizerWithCallbacks(self, distribution,
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
model = get_model()
optimizer = gradient_descent_keras.SGD(0.01)
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
dataset = get_dataset(distribution)
@ -1197,10 +1257,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
rtol=1e-5)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(run_distributed=[True, False])))
def test_predict_with_dataset_with_partial_batch(self, distribution,
run_distributed):
combinations.times(
tpu_strategy_combinations_graph_only(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_predict_with_dataset_with_partial_batch(
self, distribution, experimental_run_tf_function):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
@ -1208,7 +1269,9 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with distribution.scope():
model_with_ds_strategy = get_model()
model_with_ds_strategy.compile(
optimizer, loss, run_distributed=run_distributed)
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
cpu_model = get_model()
cpu_model.compile(optimizer, loss)
@ -1228,10 +1291,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
rtol=1e-5)
@combinations.generate(
combinations.times(tpu_strategy_combinations_graph_only(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
tpu_strategy_combinations_graph_only(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_predict_multi_output_model_with_dataset_with_partial_batch(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
with self.cached_session():
optimizer = gradient_descent.GradientDescentOptimizer(0.001)
loss = 'mse'
@ -1239,7 +1303,9 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with distribution.scope():
model_with_ds_strategy = simple_multi_inputs_multi_outputs_model()
model_with_ds_strategy.compile(
optimizer, loss, run_distributed=run_distributed)
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
cpu_model = simple_multi_inputs_multi_outputs_model()
cpu_model.compile(optimizer, loss)
@ -1320,13 +1386,17 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
combinations.combine(
distribution=strategies_minus_tpu,
mode=['graph', 'eager'],
run_distributed=[True, False]))
def test_dataset_with_sample_weights(self, distribution, run_distributed):
experimental_run_tf_function=[True, False]))
def test_dataset_with_sample_weights(self, distribution,
experimental_run_tf_function):
with self.cached_session(), distribution.scope():
model = get_sample_weights_model()
optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.array([[0], [1], [2], [3]], np.float32)
targets = np.array([[2], [4], [6], [8]], np.float32)
@ -1379,8 +1449,8 @@ class TestRegularizerLoss(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
strategy_combinations.all_strategy_combinations_minus_default(),
combinations.combine(run_distributed=[True, False])))
def test_regularizer_loss(self, distribution, run_distributed):
combinations.combine(experimental_run_tf_function=[True, False])))
def test_regularizer_loss(self, distribution, experimental_run_tf_function):
batch_size = 2
if not distributed_training_utils.global_batch_size_supported(distribution):
batch_size //= distribution.num_replicas_in_sync
@ -1402,7 +1472,7 @@ class TestRegularizerLoss(test.TestCase, parameterized.TestCase):
model.compile(
opt,
loss=TestRegularizerLoss.loss_fn,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
model.fit(
x=np.array([[1.], [1.]], dtype=np.float32),
y=np.array([[1.], [1.]], dtype=np.float32),
@ -1415,14 +1485,17 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
parameterized.TestCase):
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_distribution_strategy_on_sequential_model(self, distribution,
run_distributed):
def test_distribution_strategy_on_sequential_model(
self, distribution, experimental_run_tf_function):
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = simple_sequential_model()
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.zeros((20, 10), np.float32)
targets = np.zeros((20, 2), np.float32)
@ -1432,14 +1505,17 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
model.evaluate(inputs, targets, batch_size=10)
@combinations.generate(all_strategy_combinations_plus_run_distributed())
def test_distribution_strategy_on_functional_model(self, distribution,
run_distributed):
def test_distribution_strategy_on_functional_model(
self, distribution, experimental_run_tf_function):
with distribution.scope():
optimizer_fn = gradient_descent_keras.SGD
optimizer = optimizer_fn(learning_rate=0.001)
model = get_model()
loss = 'mse'
model.compile(optimizer, loss, run_distributed=run_distributed)
model.compile(
optimizer,
loss,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)
@ -1449,10 +1525,11 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
model.evaluate(inputs, targets)
@combinations.generate(
combinations.times(all_strategy_combinations_minus_default(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
all_strategy_combinations_minus_default(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_distribution_strategy_one_dimensional(self, distribution,
run_distributed):
experimental_run_tf_function):
with distribution.scope():
inp = keras.layers.Input(shape=(10,))
out = keras.layers.Dense(3, activation='softmax')(inp)
@ -1461,7 +1538,7 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
x = np.random.random((64, 10)).astype('float32')
y = np.random.randint(3, size=64)
@ -1475,14 +1552,14 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph', 'eager'],
run_distributed=[True, False],
experimental_run_tf_function=[True, False],
reduction=[
loss_reduction.ReductionV2.AUTO,
loss_reduction.ReductionV2.SUM_OVER_BATCH_SIZE,
loss_reduction.ReductionV2.SUM
]))
def test_distribution_strategy_with_loss_reduction_types(
self, distribution, run_distributed, reduction):
self, distribution, experimental_run_tf_function, reduction):
np.random.seed(_RANDOM_SEED)
def _get_model():
@ -1508,22 +1585,23 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
ds_model.compile(
'sgd',
loss=keras.losses.MeanSquaredError(reduction=reduction),
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
ds_history = ds_model.fit(
dataset, steps_per_epoch=2, epochs=1, shuffle=False)
self.assertArrayNear(history.history['loss'], ds_history.history['loss'],
1e-5)
@combinations.generate(
combinations.times(all_strategy_combinations_minus_default(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
all_strategy_combinations_minus_default(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_distribution_strategy_with_symbolic_add_loss(
self, mode, distribution, run_distributed):
self, mode, distribution, experimental_run_tf_function):
# TODO(b/123533246): Enable the test for TPU once bug is fixed
if (isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and
mode == 'graph' and not run_distributed):
mode == 'graph' and not experimental_run_tf_function):
self.skipTest('TPU Strategy in graph mode fails with this test.')
def _make_model_with_add_loss():
@ -1544,7 +1622,8 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
with distribution.scope():
ds_model = _make_model_with_add_loss()
ds_model.compile('sgd', run_distributed=run_distributed)
ds_model.compile(
'sgd', experimental_run_tf_function=experimental_run_tf_function)
ds_history = ds_model.fit(x, epochs=1)
self.assertAllClose(history.history, ds_history.history)
@ -1582,10 +1661,11 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
self.assertAllClose(history.history, ds_history.history)
@combinations.generate(
combinations.times(all_strategy_minus_default_and_tpu_combinations(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
all_strategy_minus_default_and_tpu_combinations(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_distribution_strategy_with_add_metric_in_call(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
class Bias(keras.layers.Layer):
@ -1618,7 +1698,10 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile('sgd', 'mse', run_distributed=run_distributed)
ds_model.compile(
'sgd',
'mse',
experimental_run_tf_function=experimental_run_tf_function)
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
self.assertLen(ds_model.metrics, 1)
@ -1634,9 +1717,9 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['eager'],
run_distributed=[False]))
def test_distribution_strategy_with_add_metric_object(self, distribution,
run_distributed):
experimental_run_tf_function=[False]))
def test_distribution_strategy_with_add_metric_object(
self, distribution, experimental_run_tf_function):
class Bias(keras.layers.Layer):
@ -1669,7 +1752,10 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
with distribution.scope():
ds_model = _make_model_with_add_metric_object()
self.assertLen(ds_model.metrics, 1)
ds_model.compile('sgd', 'mse', run_distributed=run_distributed)
ds_model.compile(
'sgd',
'mse',
experimental_run_tf_function=experimental_run_tf_function)
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
self.assertLen(ds_model.metrics, 1)
@ -1678,10 +1764,11 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
@combinations.generate(
# TODO(phillypham): Why does validation_steps > 1 not work on TPUs?
combinations.times(all_strategy_minus_default_and_tpu_combinations(),
combinations.combine(run_distributed=[True, False])))
combinations.times(
all_strategy_minus_default_and_tpu_combinations(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_distribution_strategy_with_add_metric_outside_call(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
def _make_model_with_add_metric():
inputs = keras.Input((10,))
@ -1705,7 +1792,10 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
with distribution.scope():
ds_model = _make_model_with_add_metric()
self.assertLen(ds_model.metrics, 1)
ds_model.compile('sgd', 'mse', run_distributed=run_distributed)
ds_model.compile(
'sgd',
'mse',
experimental_run_tf_function=experimental_run_tf_function)
ds_history = ds_model.fit(
x, y, validation_data=(x, y), validation_steps=2, epochs=2)
self.assertLen(ds_model.metrics, 1)
@ -1716,8 +1806,9 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
combinations.combine(
distribution=strategies_minus_tpu,
mode=['eager'],
run_distributed=[True]))
def test_sparse_tensor_outputs(self, distribution, run_distributed):
experimental_run_tf_function=[True]))
def test_sparse_tensor_outputs(self, distribution,
experimental_run_tf_function):
class ToSparse(keras.layers.Layer):
"""Create a sparse tensor based on a given dense tensor."""
@ -1729,7 +1820,7 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
return sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
model = keras.Sequential([ToSparse()])
model._run_distributed = run_distributed
model._experimental_run_tf_function = experimental_run_tf_function
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])
@ -1747,8 +1838,9 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
combinations.combine(
distribution=strategies_minus_tpu,
mode=['eager'],
run_distributed=[True]))
def test_ragged_tensor_outputs(self, distribution, run_distributed):
experimental_run_tf_function=[True]))
def test_ragged_tensor_outputs(self, distribution,
experimental_run_tf_function):
class ToRagged(keras.layers.Layer):
"""Create a ragged tensor based on a given dense tensor."""
@ -1763,7 +1855,7 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
inputs, padding=self._padding, ragged_rank=self._ragged_rank)
model = keras.Sequential([ToRagged(padding=0)])
model._run_distributed = run_distributed
model._experimental_run_tf_function = experimental_run_tf_function
# Define some input data with additional padding.
input_data = np.array([[1, 0, 0], [2, 3, 0]])

View File

@ -64,7 +64,8 @@ def graph_mode_test_configuration():
def all_strategy_and_input_config_combinations():
return (combinations.times(
combinations.combine(
distribution=all_strategies, run_distributed=[True, False]),
distribution=all_strategies,
experimental_run_tf_function=[True, False]),
eager_mode_test_configuration() + graph_mode_test_configuration()))
@ -97,10 +98,11 @@ def test_combinations_for_embedding_model():
return (combinations.times(
combinations.combine(
distribution=strategies_for_embedding_models(),
run_distributed=[True, False]),
experimental_run_tf_function=[True, False]),
(graph_mode_test_configuration())) + combinations.times(
combinations.combine(
distribution=eager_mode_strategies, run_distributed=[False]),
distribution=eager_mode_strategies,
experimental_run_tf_function=[False]),
(eager_mode_test_configuration())))
@ -244,13 +246,13 @@ def get_correctness_test_inputs(use_numpy, use_validation_data,
def fit_eval_and_predict(initial_weights,
input_fn,
model_fn,
run_distributed=None,
experimental_run_tf_function=None,
distribution=None,
is_stateful_model=False):
"""Generates results for fit/predict/evaluate for given model."""
training_inputs, eval_inputs, predict_inputs = input_fn()
model = model_fn(
run_distributed=run_distributed,
experimental_run_tf_function=experimental_run_tf_function,
initial_weights=initial_weights,
distribution=distribution,
input_shapes=get_shapes(training_inputs['x']))
@ -418,28 +420,31 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase,
def get_model(self,
distribution=None,
run_distributed=None,
experimental_run_tf_function=None,
input_shapes=None):
raise NotImplementedError
def skip_unsupported_test_configuration(self, distribution, run_distributed):
if should_skip_tpu_with_eager(distribution) and run_distributed:
self.skipTest(
'TPUStrategy does not support eager mode with run_distributed.')
def skip_unsupported_test_configuration(self, distribution,
experimental_run_tf_function):
if should_skip_tpu_with_eager(
distribution) and experimental_run_tf_function:
self.skipTest('TPUStrategy does not support eager mode with '
'experimental_run_tf_function.')
return
def run_correctness_test(self,
distribution,
use_numpy,
use_validation_data,
run_distributed=None,
experimental_run_tf_function=None,
with_batch_norm=False,
is_stateful_model=False,
partial_last_batch=None,
training_epochs=2):
with self.cached_session():
self.set_up_test_config(use_numpy, use_validation_data, with_batch_norm)
self.skip_unsupported_test_configuration(distribution, run_distributed)
self.skip_unsupported_test_configuration(distribution,
experimental_run_tf_function)
if partial_last_batch == 'eval':
x_train, y_train, x_eval, y_eval, x_predict = (
@ -456,7 +461,8 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase,
# This is used to initialize the model for both the distribution and
# non-distribution run.
model = self.get_model(
run_distributed=run_distributed, input_shapes=get_shapes(x_train))
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
ds_input_fn = functools.partial(
@ -487,14 +493,14 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase,
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
run_distributed=run_distributed,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution,
is_stateful_model=is_stateful_model)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
run_distributed=run_distributed,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None,
is_stateful_model=is_stateful_model)
@ -534,14 +540,18 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase,
training_input = kwargs
return training_input, None, None
def run_dynamic_lr_test(self, distribution, run_distributed=None):
def run_dynamic_lr_test(self,
distribution,
experimental_run_tf_function=None):
with self.cached_session():
self.set_up_test_config()
self.skip_unsupported_test_configuration(distribution, run_distributed)
self.skip_unsupported_test_configuration(distribution,
experimental_run_tf_function)
x_train, y_train, _ = self.get_data()
model = self.get_model(
run_distributed=run_distributed, input_shapes=get_shapes(x_train))
experimental_run_tf_function=experimental_run_tf_function,
input_shapes=get_shapes(x_train))
initial_weights = model.get_weights()
update_freq = None
@ -582,13 +592,13 @@ class TestDistributionStrategyCorrectnessBase(test.TestCase,
initial_weights,
input_fn=ds_input_fn,
model_fn=self.get_model,
run_distributed=run_distributed,
experimental_run_tf_function=experimental_run_tf_function,
distribution=distribution)
results_without_ds = fit_eval_and_predict(
initial_weights,
input_fn=nods_input_fn,
model_fn=self.get_model,
run_distributed=run_distributed,
experimental_run_tf_function=experimental_run_tf_function,
distribution=None)
compare_results(
results_with_ds, results_without_ds, distribution, testcase=self)

View File

@ -34,14 +34,14 @@ def all_strategy_combinations_with_eager_and_graph_modes():
return (combinations.combine(
distribution=keras_correctness_test_base.all_strategies,
mode=['graph', 'eager'],
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
def all_strategy_combinations_with_graph_mode():
return (combinations.combine(
distribution=keras_correctness_test_base.all_strategies,
mode=['graph'],
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
def is_default_strategy(strategy):
@ -53,7 +53,7 @@ class TestDistributionStrategyDnnCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
run_distributed,
experimental_run_tf_function,
initial_weights=None,
distribution=None,
input_shapes=None):
@ -76,7 +76,7 @@ class TestDistributionStrategyDnnCorrectness(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=['mse'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model
def get_data(self):
@ -104,9 +104,9 @@ class TestDistributionStrategyDnnCorrectness(
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_dnn_correctness(self, distribution, use_numpy, use_validation_data,
run_distributed):
experimental_run_tf_function):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies())
@ -131,14 +131,18 @@ class TestDistributionStrategyDnnCorrectness(
training_epochs=1)
@combinations.generate(all_strategy_combinations_with_graph_mode())
def test_dnn_with_dynamic_learning_rate(self, distribution, run_distributed):
self.run_dynamic_lr_test(distribution, run_distributed)
def test_dnn_with_dynamic_learning_rate(self, distribution,
experimental_run_tf_function):
self.run_dynamic_lr_test(distribution, experimental_run_tf_function)
class TestDistributionStrategyDnnMetricCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self, run_distributed, distribution=None, input_shapes=None):
def get_model(self,
experimental_run_tf_function,
distribution=None,
input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
@ -147,16 +151,19 @@ class TestDistributionStrategyDnnMetricCorrectness(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=[keras.metrics.BinaryAccuracy()],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model
def run_metric_correctness_test(self, distribution, run_distributed):
def run_metric_correctness_test(self, distribution,
experimental_run_tf_function):
with self.cached_session():
self.set_up_test_config()
self.skip_unsupported_test_configuration(distribution, run_distributed)
self.skip_unsupported_test_configuration(distribution,
experimental_run_tf_function)
x_train, y_train, _ = self.get_data()
model = self.get_model(run_distributed, distribution=distribution)
model = self.get_model(
experimental_run_tf_function, distribution=distribution)
batch_size = 64
batch_size = (
@ -169,14 +176,18 @@ class TestDistributionStrategyDnnMetricCorrectness(
self.assertEqual(history.history['binary_accuracy'], [1.0, 1.0])
@combinations.generate(all_strategy_combinations_with_eager_and_graph_modes())
def test_simple_dnn_metric_correctness(self, distribution, run_distributed):
self.run_metric_correctness_test(distribution, run_distributed)
def test_simple_dnn_metric_correctness(self, distribution,
experimental_run_tf_function):
self.run_metric_correctness_test(distribution, experimental_run_tf_function)
class TestDistributionStrategyDnnMetricEvalCorrectness(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self, run_distributed, distribution=None, input_shapes=None):
def get_model(self,
experimental_run_tf_function,
distribution=None,
input_shapes=None):
with distribution.scope():
model = keras.Sequential()
model.add(
@ -189,15 +200,18 @@ class TestDistributionStrategyDnnMetricEvalCorrectness(
loss='mae',
metrics=['accuracy', keras.metrics.BinaryAccuracy()],
optimizer=gradient_descent.GradientDescentOptimizer(0.001),
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model
def run_eval_metrics_correctness_test(self, distribution, run_distributed):
def run_eval_metrics_correctness_test(self, distribution,
experimental_run_tf_function):
with self.cached_session():
self.set_up_test_config()
self.skip_unsupported_test_configuration(distribution, run_distributed)
self.skip_unsupported_test_configuration(distribution,
experimental_run_tf_function)
model = self.get_model(run_distributed, distribution=distribution)
model = self.get_model(
experimental_run_tf_function, distribution=distribution)
# verify correctness of stateful and stateless metrics.
x = np.ones((100, 4)).astype('float32')
@ -217,8 +231,9 @@ class TestDistributionStrategyDnnMetricEvalCorrectness(
@combinations.generate(all_strategy_combinations_with_eager_and_graph_modes())
def test_identity_model_metric_eval_correctness(self, distribution,
run_distributed):
self.run_eval_metrics_correctness_test(distribution, run_distributed)
experimental_run_tf_function):
self.run_eval_metrics_correctness_test(distribution,
experimental_run_tf_function)
class SubclassedModel(keras.Model):
@ -249,7 +264,7 @@ class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
TestDistributionStrategyDnnCorrectness):
def get_model(self,
run_distributed,
experimental_run_tf_function,
initial_weights=None,
distribution=None,
input_shapes=None):
@ -260,23 +275,23 @@ class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
loss=keras.losses.mean_squared_error,
optimizer=gradient_descent_keras.SGD(0.05),
metrics=['mse'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_dnn_correctness(self, distribution, use_numpy, use_validation_data,
run_distributed):
experimental_run_tf_function):
if (context.executing_eagerly()) or is_default_strategy(distribution):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
elif K.is_tpu_strategy(distribution) and not context.executing_eagerly():
with self.assertRaisesRegexp(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclass model instead.'):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
else:
with self.assertRaisesRegexp(
ValueError,
@ -284,27 +299,28 @@ class TestDistributionStrategyDnnCorrectnessWithSubclassedModel(
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
@combinations.generate(all_strategy_combinations_with_graph_mode())
def test_dnn_with_dynamic_learning_rate(self, distribution, run_distributed):
if ((not run_distributed and context.executing_eagerly() and
def test_dnn_with_dynamic_learning_rate(self, distribution,
experimental_run_tf_function):
if ((not experimental_run_tf_function and context.executing_eagerly() and
not K.is_tpu_strategy(distribution)) or
is_default_strategy(distribution)):
self.run_dynamic_lr_test(distribution, run_distributed)
self.run_dynamic_lr_test(distribution, experimental_run_tf_function)
elif K.is_tpu_strategy(distribution):
with self.assertRaisesRegexp(
ValueError,
'Expected `model` argument to be a functional `Model` instance, '
'but got a subclass model instead.'):
self.run_dynamic_lr_test(distribution, run_distributed)
self.run_dynamic_lr_test(distribution, experimental_run_tf_function)
else:
with self.assertRaisesRegexp(
ValueError,
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
self.run_dynamic_lr_test(distribution, run_distributed)
self.run_dynamic_lr_test(distribution, experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies())

View File

@ -33,7 +33,7 @@ class DistributionStrategyEmbeddingModelCorrectnessTest(
max_words=10,
initial_weights=None,
distribution=None,
run_distributed=None,
experimental_run_tf_function=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
@ -54,27 +54,27 @@ class DistributionStrategyEmbeddingModelCorrectnessTest(
optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model
@combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model())
def test_embedding_model_correctness(self, distribution, use_numpy,
use_validation_data, run_distributed):
use_validation_data,
experimental_run_tf_function):
self.use_distributed_dense = False
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model())
def test_embedding_time_distributed_model_correctness(self, distribution,
use_numpy,
use_validation_data,
run_distributed):
def test_embedding_time_distributed_model_correctness(
self, distribution, use_numpy, use_validation_data,
experimental_run_tf_function):
self.use_distributed_dense = True
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
class DistributionStrategySiameseEmbeddingModelCorrectnessTest(
@ -85,7 +85,7 @@ class DistributionStrategySiameseEmbeddingModelCorrectnessTest(
max_words=10,
initial_weights=None,
distribution=None,
run_distributed=None,
experimental_run_tf_function=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
@ -119,7 +119,7 @@ class DistributionStrategySiameseEmbeddingModelCorrectnessTest(
model.compile(
optimizer=gradient_descent_keras.SGD(learning_rate=0.1),
loss='mse',
run_distributed=run_distributed,
experimental_run_tf_function=experimental_run_tf_function,
metrics=['mse'])
return model
@ -157,9 +157,9 @@ class DistributionStrategySiameseEmbeddingModelCorrectnessTest(
keras_correctness_test_base.test_combinations_for_embedding_model())
def test_siamese_embedding_model_correctness(self, distribution, use_numpy,
use_validation_data,
run_distributed):
experimental_run_tf_function):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
if __name__ == '__main__':

View File

@ -31,7 +31,7 @@ class DistributionStrategyCnnCorrectnessTest(
def get_model(self,
initial_weights=None,
distribution=None,
run_distributed=None,
experimental_run_tf_function=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
@ -58,7 +58,7 @@ class DistributionStrategyCnnCorrectnessTest(
optimizer=gradient_descent.SGD(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model
@ -93,22 +93,22 @@ class DistributionStrategyCnnCorrectnessTest(
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_correctness(self, distribution, use_numpy, use_validation_data,
run_distributed):
experimental_run_tf_function):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_with_batch_norm_correctness(self, distribution, use_numpy,
use_validation_data,
run_distributed):
experimental_run_tf_function):
self.skipTest('Flakily times out, b/134670856')
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
with_batch_norm=True,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies() +

View File

@ -37,7 +37,7 @@ class DistributionStrategyLstmModelCorrectnessTest(
max_words=10,
initial_weights=None,
distribution=None,
run_distributed=None,
experimental_run_tf_function=None,
input_shapes=None):
del input_shapes
@ -67,15 +67,16 @@ class DistributionStrategyLstmModelCorrectnessTest(
optimizer=optimizer_fn(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
return model
@combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model())
def test_lstm_model_correctness(self, distribution, use_numpy,
use_validation_data, run_distributed):
use_validation_data,
experimental_run_tf_function):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
run_distributed)
experimental_run_tf_function)
if __name__ == '__main__':

View File

@ -108,9 +108,9 @@ class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase):
strategy_combinations.central_storage_strategy_with_two_gpus,
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
def testOptimizerWithKerasModelAndNumpyArrays(self, distribution,
run_distributed):
experimental_run_tf_function):
self.skipTest('b/130309197')
with self.cached_session():
with distribution.scope():
@ -119,7 +119,10 @@ class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase):
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
inputs = np.zeros((64, 3), dtype=np.float32)
targets = np.zeros((64, 4), dtype=np.float32)

View File

@ -41,7 +41,7 @@ def test_combinations_for_stateful_embedding_model():
mode='graph',
use_numpy=False,
use_validation_data=False,
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
class DistributionStrategyStatefulLstmModelCorrectnessTest(
@ -52,7 +52,7 @@ class DistributionStrategyStatefulLstmModelCorrectnessTest(
max_words=10,
initial_weights=None,
distribution=None,
run_distributed=None,
experimental_run_tf_function=None,
input_shapes=None):
del input_shapes
batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE
@ -86,20 +86,22 @@ class DistributionStrategyStatefulLstmModelCorrectnessTest(
# doesn't work and enable for DistributionStrategy more generally.
@combinations.generate(test_combinations_for_stateful_embedding_model())
def disabled_test_stateful_lstm_model_correctness(
self, distribution, use_numpy, use_validation_data, run_distributed):
self, distribution, use_numpy, use_validation_data,
experimental_run_tf_function):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
combinations.times(
keras_correctness_test_base.test_combinations_with_tpu_strategies(),
combinations.combine(run_distributed=[True, False])))
combinations.combine(experimental_run_tf_function=[True, False])))
def test_incorrectly_use_multiple_cores_for_stateful_lstm_model(
self, distribution, use_numpy, use_validation_data, run_distributed):
self, distribution, use_numpy, use_validation_data,
experimental_run_tf_function):
with self.assertRaisesRegexp(
ValueError,
'Single core must be used for computation on stateful models. Consider '
@ -109,7 +111,7 @@ class DistributionStrategyStatefulLstmModelCorrectnessTest(
use_numpy,
use_validation_data,
is_stateful_model=True,
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
if __name__ == '__main__':

View File

@ -72,16 +72,17 @@ class TestDistributionStrategyWithCallbacks(test.TestCase,
parameterized.TestCase):
@combinations.generate(
combinations.times(keras_test_lib.all_strategy_combinations(),
combinations.combine(run_distributed=[True, False])))
def test_callbacks_in_fit(self, distribution, run_distributed):
combinations.times(
keras_test_lib.all_strategy_combinations(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_callbacks_in_fit(self, distribution, experimental_run_tf_function):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
@ -127,16 +128,17 @@ class TestDistributionStrategyWithCallbacks(test.TestCase,
})
@combinations.generate(
combinations.times(keras_test_lib.all_strategy_combinations(),
combinations.combine(run_distributed=[True, False])))
def test_callbacks_in_eval(self, distribution, run_distributed):
combinations.times(
keras_test_lib.all_strategy_combinations(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_callbacks_in_eval(self, distribution, experimental_run_tf_function):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
@ -152,16 +154,18 @@ class TestDistributionStrategyWithCallbacks(test.TestCase,
})
@combinations.generate(
combinations.times(keras_test_lib.all_strategy_combinations(),
combinations.combine(run_distributed=[True, False])))
def test_callbacks_in_predict(self, distribution, run_distributed):
combinations.times(
keras_test_lib.all_strategy_combinations(),
combinations.combine(experimental_run_tf_function=[True, False])))
def test_callbacks_in_predict(self, distribution,
experimental_run_tf_function):
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(
optimizer='sgd',
loss='mse',
metrics=['mae'],
run_distributed=run_distributed)
experimental_run_tf_function=experimental_run_tf_function)
dataset = keras_test_lib.get_dataset(distribution)
counter = Counter()
@ -238,8 +242,9 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
def test_unsupported_features(self, distribution, run_distributed, mode):
experimental_run_tf_function=[True, False]))
def test_unsupported_features(self, distribution,
experimental_run_tf_function, mode):
with self.cached_session():
with distribution.scope():
model = keras_test_lib.get_model()
@ -247,11 +252,14 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
dataset = keras_test_lib.get_dataset(distribution)
if run_distributed and mode == 'eager':
if experimental_run_tf_function and mode == 'eager':
exception_error_message = (
'`validation_split` argument is not supported when data adapter'
' is.+')
@ -308,9 +316,9 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
def test_calling_with_unsupported_predefined_callbacks(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
model = keras_test_lib.get_model()
@ -318,7 +326,10 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
loss = 'mse'
metrics = ['mae']
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
dataset = keras_test_lib.get_dataset(distribution)
@ -349,22 +360,27 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
combinations.combine(
distribution=[strategy_combinations.one_device_strategy],
mode=['eager'],
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
def test_distribution_strategy_with_run_eagerly(self, distribution,
run_distributed):
experimental_run_tf_function):
with distribution.scope():
x = keras.layers.Input(shape=(1,))
y = keras.layers.Dense(1, kernel_initializer='ones')(x)
model = keras.models.Model(x, y)
if run_distributed:
model.compile('sgd', run_eagerly=True, run_distributed=run_distributed)
if experimental_run_tf_function:
model.compile(
'sgd',
run_eagerly=True,
experimental_run_tf_function=experimental_run_tf_function)
else:
err_msg = ('We currently do not support enabling `run_eagerly` with '
'distribution strategy.')
with self.assertRaisesRegex(ValueError, err_msg):
model.compile(
'sgd', run_eagerly=True, run_distributed=run_distributed)
'sgd',
run_eagerly=True,
experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
combinations.combine(
@ -373,9 +389,9 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
strategy_combinations.one_device_strategy,
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
def test_distribution_strategy_on_subclassed_model(self, distribution,
run_distributed):
experimental_run_tf_function=[True, False]))
def test_distribution_strategy_on_subclassed_model(
self, distribution, experimental_run_tf_function):
with distribution.scope():
class _SimpleMLP(keras.Model):
@ -395,9 +411,11 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.'):
model.compile('sgd', run_distributed=run_distributed)
model.compile(
'sgd', experimental_run_tf_function=experimental_run_tf_function)
else:
model.compile('sgd', run_distributed=run_distributed)
model.compile(
'sgd', experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
combinations.combine(
@ -406,16 +424,17 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
strategy_combinations.one_device_strategy,
],
mode=['graph', 'eager'],
run_distributed=[True, False]))
experimental_run_tf_function=[True, False]))
def test_distribution_strategy_on_deferred_sequential_model(
self, distribution, run_distributed):
self, distribution, experimental_run_tf_function):
with distribution.scope():
model = keras.models.Sequential()
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(3, activation='softmax'))
if context.executing_eagerly():
model.compile('sgd', run_distributed=run_distributed)
model.compile(
'sgd', experimental_run_tf_function=experimental_run_tf_function)
else:
with self.assertRaisesRegexp(
ValueError,
@ -423,7 +442,8 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
'`Sequential` model that is created without '
'`input_shape`/`input_dim` set in its first layer or '
'a subclassed model.'):
model.compile('sgd', run_distributed=run_distributed)
model.compile(
'sgd', experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
keras_test_lib.all_strategy_combinations_minus_default())
@ -449,10 +469,10 @@ class TestDistributionStrategyWithLossMasking(test.TestCase,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager'],
run_distributed=[True, False],
experimental_run_tf_function=[True, False],
optimizer=strategy_combinations.gradient_descent_optimizer_keras_v2_fn
))
def test_masking(self, distribution, run_distributed, optimizer):
def test_masking(self, distribution, experimental_run_tf_function, optimizer):
with self.cached_session():
np.random.seed(1337)
x = np.array([[[1], [1]], [[0], [0]]])
@ -463,7 +483,9 @@ class TestDistributionStrategyWithLossMasking(test.TestCase,
keras.layers.TimeDistributed(
keras.layers.Dense(1, kernel_initializer='one')))
model.compile(
loss='mse', optimizer=optimizer(), run_distributed=run_distributed)
loss='mse',
optimizer=optimizer(),
experimental_run_tf_function=experimental_run_tf_function)
y = np.array([[[1], [1]], [[1], [1]]])
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
@ -480,11 +502,11 @@ class TestDistributionStrategyWithNormalizationLayer(test.TestCase,
keras_test_lib.all_strategy_combinations(),
combinations.combine(
fused=[True, False],
run_distributed=[True, False],
experimental_run_tf_function=[True, False],
optimizer=strategy_combinations
.gradient_descent_optimizer_keras_v2_fn)))
def test_batchnorm_correctness(self, distribution, fused, optimizer,
run_distributed):
experimental_run_tf_function):
with self.cached_session():
with distribution.scope():
model = keras.models.Sequential()
@ -496,7 +518,9 @@ class TestDistributionStrategyWithNormalizationLayer(test.TestCase,
), momentum=0.8, fused=fused)
model.add(norm)
model.compile(
loss='mse', optimizer=optimizer(), run_distributed=run_distributed)
loss='mse',
optimizer=optimizer(),
experimental_run_tf_function=experimental_run_tf_function)
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 20, 30))
@ -525,21 +549,28 @@ class TestDistributionStrategySaveLoadWeights(test.TestCase,
combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
combinations.combine(
run_distributed=[True, False],
experimental_run_tf_function=[True, False],
optimizer=strategy_combinations.rmsprop_optimizer_keras_v2_fn)))
def test_save_load_h5(self, distribution, optimizer, run_distributed):
def test_save_load_h5(self, distribution, optimizer,
experimental_run_tf_function):
with self.cached_session():
dataset = keras_test_lib.get_dataset(distribution)
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(optimizer(), 'mse', run_distributed=run_distributed)
model.compile(
optimizer(),
'mse',
experimental_run_tf_function=experimental_run_tf_function)
model.fit(dataset, epochs=1, steps_per_epoch=1)
weights_file = tempfile.mktemp('.h5')
model.save_weights(weights_file)
model_2 = keras_test_lib.get_model()
model_2.compile(optimizer(), 'mse', run_distributed=run_distributed)
model_2.compile(
optimizer(),
'mse',
experimental_run_tf_function=experimental_run_tf_function)
model_2.load_weights(weights_file)
model_2.predict(
keras_test_lib.get_predict_dataset(distribution), steps=2)
@ -549,9 +580,10 @@ class TestDistributionStrategySaveLoadWeights(test.TestCase,
combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
combinations.combine(
run_distributed=[True, False],
experimental_run_tf_function=[True, False],
optimizer=strategy_combinations.rmsprop_optimizer_keras_v2_fn)))
def test_save_load_trackable(self, distribution, optimizer, run_distributed):
def test_save_load_trackable(self, distribution, optimizer,
experimental_run_tf_function):
# TODO(b/123533246): Enable the test for TPU once bug is fixed
if (isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)) and
@ -561,14 +593,20 @@ class TestDistributionStrategySaveLoadWeights(test.TestCase,
dataset = keras_test_lib.get_dataset(distribution)
with distribution.scope():
model = keras_test_lib.get_model()
model.compile(optimizer(), 'mse', run_distributed=run_distributed)
model.compile(
optimizer(),
'mse',
experimental_run_tf_function=experimental_run_tf_function)
model.fit(dataset, epochs=1, steps_per_epoch=1)
weights_file = tempfile.mktemp()
model.save_weights(weights_file)
model_2 = keras_test_lib.get_model()
model_2.compile(optimizer(), 'mse', run_distributed=run_distributed)
model_2.compile(
optimizer(),
'mse',
experimental_run_tf_function=experimental_run_tf_function)
model_2.load_weights(weights_file)
model_2.predict(
keras_test_lib.get_predict_dataset(distribution), steps=2)
@ -580,8 +618,9 @@ class TestDistributionStrategyValidation(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
combinations.combine(run_distributed=[True, False])))
def test_layer_outside_scope(self, distribution, run_distributed):
combinations.combine(experimental_run_tf_function=[True, False])))
def test_layer_outside_scope(self, distribution,
experimental_run_tf_function):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, 'was not created in the distribution strategy'):
@ -593,13 +632,17 @@ class TestDistributionStrategyValidation(test.TestCase, parameterized.TestCase):
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
@combinations.generate(
combinations.times(
keras_test_lib.all_strategy_combinations_minus_default(),
combinations.combine(run_distributed=[True, False])))
def test_model_outside_scope(self, distribution, run_distributed):
combinations.combine(experimental_run_tf_function=[True, False])))
def test_model_outside_scope(self, distribution,
experimental_run_tf_function):
with self.cached_session():
with self.assertRaisesRegexp(
ValueError, 'was not created in the distribution strategy'):
@ -611,7 +654,10 @@ class TestDistributionStrategyValidation(test.TestCase, parameterized.TestCase):
loss = 'mse'
metrics = ['mae', keras.metrics.CategoricalAccuracy()]
model.compile(
optimizer, loss, metrics=metrics, run_distributed=run_distributed)
optimizer,
loss,
metrics=metrics,
experimental_run_tf_function=experimental_run_tf_function)
class TestDistributionStrategyWithStaticShapes(test.TestCase,

View File

@ -221,7 +221,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@ -313,7 +313,8 @@ class BaseLayerTest(keras_parameterized.TestCase):
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return np.sum(model(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
@ -334,7 +335,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
@keras_parameterized.run_all_keras_modes
def test_learning_phase_freezing_for_layers_in_predict(self):
if not (testing_utils.should_run_eagerly() or
testing_utils.should_run_distributed()):
testing_utils.should_run_tf_function()):
self.skipTest('Predict fails to override the outer learning phase in'
'the FuncGraph path.')
@ -348,7 +349,8 @@ class BaseLayerTest(keras_parameterized.TestCase):
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return np.sum(model.predict(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
@ -447,7 +449,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
@ -494,7 +496,7 @@ class BaseLayerTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((3, 10))
out = model.predict(inputs)
self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)
@ -916,7 +918,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 0.)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
@ -941,7 +943,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 2 * 3)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
@ -966,7 +968,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
_, train_metric = model.train_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(train_metric, 2 * 3)
@ -998,7 +1000,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 1.)
@ -1032,7 +1034,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 6.)
else:
@ -1068,7 +1070,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
else:
@ -1082,7 +1084,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
1, kernel_regularizer=keras.regularizers.l2(1e-4), input_shape=(1,))
])
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
def assert_graph(t):
if not context.executing_eagerly():
@ -1125,7 +1127,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(history.history['sum'][-1], 2 * 3)
else:
@ -1154,7 +1156,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
@ -1188,7 +1190,7 @@ class AutographControlFlowTest(keras_parameterized.TestCase):
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 3, 4))
y = np.ones(shape=(10, 3, 2))

View File

@ -160,7 +160,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
layer.set_total(15)
@ -177,7 +177,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
@ -190,7 +190,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
layer.adapt(input_dataset)
@ -211,7 +211,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
@ -223,7 +223,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
@ -243,7 +243,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
@ -257,7 +257,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
layer.adapt(input_dataset)
@ -275,7 +275,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
self.assertAllEqual([[16], [17], [18]], model.predict([1., 2., 3.]))
@ -293,7 +293,7 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
combiner = layer._combiner
updates = combiner.extract(combiner.compute(input_dataset))
@ -312,7 +312,8 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])
@ -338,7 +339,8 @@ class PreprocessingLayerTest(keras_parameterized.TestCase):
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = (
testing_utils.should_run_tf_function())
return (model, layer)
input_dataset = np.array([1, 2, 3, 4, 5])

View File

@ -70,7 +70,7 @@ class SimpleBiasTest(keras_parameterized.TestCase):
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def test_simple_bias_fit(self):
@ -109,7 +109,7 @@ class MultipleInputTest(keras_parameterized.TestCase):
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@parameterized.named_parameters(('subclassed', True), ('functional', False))

View File

@ -60,7 +60,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = {'a': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
@ -83,7 +83,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
y = np.random.randint(20, size=(100, 1))
y = keras.utils.to_categorical(y, num_classes=20)
@ -147,7 +147,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
@ -169,7 +169,7 @@ class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
y = np.random.randint(20, size=(100, 1))
y = keras.utils.to_categorical(y, num_classes=20)

View File

@ -810,7 +810,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
m.run_eagerly = testing_utils.should_run_eagerly()
m._run_distributed = testing_utils.should_run_distributed()
m._experimental_run_tf_function = testing_utils.should_run_tf_function()
output_val = m.predict(x_val)
@ -838,7 +838,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
m = keras.models.Model(inputs=input_layer, outputs=output)
m.run_eagerly = testing_utils.should_run_eagerly()
m._run_distributed = testing_utils.should_run_distributed()
m._experimental_run_tf_function = testing_utils.should_run_tf_function()
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
@ -868,7 +868,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0) # In inference mode, output is equal to input.
@ -888,8 +888,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed()
)
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(
x=[np.ones((10, 5, 10)), np.zeros((10, 5))],
y=np.zeros((10, 100)),
@ -919,7 +918,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
@ -945,7 +944,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(
x=[3 * np.ones((10, 10)), 7 * np.ones((10, 10))],
y=10 * np.ones((10, 10)),
@ -981,7 +980,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(
x=[np.ones((10, 10)), 2 * np.ones((10, 10)), 3 * np.ones((10, 10))],
y=15 * np.ones((10, 10)),
@ -1006,13 +1005,14 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
o = keras.layers.add(o)
model = keras.Model(i, o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
i2 = keras.layers.Input(shape=(3, 2, 1))
o2 = model(i2)
model2 = keras.Model(i2, o2)
model2.run_eagerly = testing_utils.should_run_eagerly()
model2._run_distributed = testing_utils.should_run_distributed()
model2._experimental_run_tf_function = testing_utils.should_run_tf_function(
)
x = np.random.random((4, 3, 2, 1))
out = model2.predict(x)
@ -1031,7 +1031,7 @@ class NetworkConstructionTest(keras_parameterized.TestCase):
optimizer='sgd',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
json_str = model.to_json()
keras.models.model_from_json(json_str)
@ -1331,7 +1331,7 @@ class DefaultShapeInferenceBehaviorTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model_input = np.random.randint(
low=1, high=5, size=(10, 3, 4)).astype('float32')
@ -1516,14 +1516,14 @@ class AddLossTest(keras_parameterized.TestCase):
model.compile(
'sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, batch_size=2, epochs=1)
model2 = model.from_config(model.get_config())
model2.compile(
'sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model2.set_weights(initial_weights)
model2.fit(x, batch_size=2, epochs=1)
@ -1548,7 +1548,7 @@ class AddLossTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=2, epochs=1)
model2 = model.from_config(model.get_config())
@ -1556,7 +1556,7 @@ class AddLossTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model2.set_weights(initial_weights)
model2.fit(x, y, batch_size=2, epochs=1)

View File

@ -78,7 +78,7 @@ class TestSequential(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((batch_size, input_dim))
y = np.random.random((batch_size, num_classes))
model.fit(x, y, epochs=1)
@ -89,7 +89,7 @@ class TestSequential(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
y = np.random.random((batch_size, num_hidden))
model.fit(x, y, epochs=1)
@ -118,7 +118,7 @@ class TestSequential(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.layers), 2)
with self.assertRaisesRegexp(
ValueError, 'Weights for model .* have not yet been created'):
@ -146,7 +146,7 @@ class TestSequential(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.layers), 2)
with self.assertRaisesRegexp(
ValueError, 'Weights for model .* have not yet been created'):
@ -295,7 +295,7 @@ class TestSequential(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertFalse(model.built)
x = np.random.random((batch_size, input_dim))
@ -344,7 +344,7 @@ class TestSequential(keras_parameterized.TestCase):
'rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.outputs), 0)
model.train_on_batch(np.zeros((1, 2)), np.zeros((1, 5)))
self.assertEqual(len(model.outputs), 1)
@ -359,7 +359,7 @@ class TestSequential(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((2, 6))
y = np.random.random((2, 5))
model.fit(x, y, epochs=1)
@ -390,7 +390,7 @@ class TestSequential(keras_parameterized.TestCase):
keras.layers.Lambda(lambda x: x[0])
])
seq.run_eagerly = testing_utils.should_run_eagerly()
seq._run_distributed = testing_utils.should_run_distributed()
seq._experimental_run_tf_function = testing_utils.should_run_tf_function()
preds = seq.predict([['tensorflow eager']])
self.assertEqual(preds.shape, (1,))
@ -470,7 +470,7 @@ class TestSequentialEagerIntegration(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((2, 6))
y = np.random.random((2, 5))
@ -484,7 +484,7 @@ class TestSequentialEagerIntegration(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.build((None, 6))
@ -503,7 +503,7 @@ class TestSequentialEagerIntegration(keras_parameterized.TestCase):
weighted_metrics=['mae'],
loss='categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = {'dense_input': np.random.random((10, 1))}
y = np.random.randint(num_classes, size=(10, 1))

View File

@ -155,7 +155,7 @@ class Model(network.Network):
self._compile_distribution = False
self._run_eagerly = None
self._run_distributed = False
self._experimental_run_tf_function = False
def get_weights(self):
"""Retrieves the weights of the model.
@ -244,20 +244,21 @@ class Model(network.Network):
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
self._run_eagerly = kwargs.pop('run_eagerly', None)
self._run_distributed = kwargs.pop('run_distributed', False)
self._experimental_run_tf_function = kwargs.pop(
'experimental_run_tf_function', False)
if ((sample_weight_mode is not None)
or (target_tensors is not None)
or (weighted_metrics is not None)
or not context.executing_eagerly()):
# Fallback out of things that aren't supported with v2 loops
self._run_distributed = False
self._experimental_run_tf_function = False
self._compile_time_distribution_strategy = (
distribution_strategy_context.get_strategy())
if distribute is not None:
if tf2.enabled() or self._run_distributed:
if tf2.enabled() or self._experimental_run_tf_function:
raise ValueError(
'Distribute argument in compile is not available in TF 2.0 please '
'create the model under the distribution strategy scope.')
@ -275,7 +276,7 @@ class Model(network.Network):
self._distribution_strategy = (
distribution_strategy_context.get_strategy())
if not self._run_distributed:
if not self._experimental_run_tf_function:
self._validate_compile_param_for_distribution_strategy(self.run_eagerly,
sample_weight_mode,
target_tensors,
@ -491,8 +492,7 @@ class Model(network.Network):
'`iter(dataset)`.')
# Experiment training loop with default DS path.
if (context.executing_eagerly()
and self._run_distributed
if (context.executing_eagerly() and self._experimental_run_tf_function
# TODO(scottzhu): Finish getting sequences working with the v2 loops.
and not isinstance(inputs, (data_utils.Sequence))
and not distributed_training_utils.is_tpu_strategy(
@ -963,7 +963,7 @@ class Model(network.Network):
"""
self._assert_compile_was_called()
self._check_call_args('train_on_batch')
if self._run_distributed:
if self._experimental_run_tf_function:
outputs = training_v2_utils.train_on_batch(
self, x, y=y, sample_weight=sample_weight,
class_weight=class_weight, reset_metrics=reset_metrics)
@ -1056,7 +1056,7 @@ class Model(network.Network):
"""
self._assert_compile_was_called()
self._check_call_args('test_on_batch')
if self._run_distributed:
if self._experimental_run_tf_function:
outputs = training_v2_utils.test_on_batch(
self, x, y=y, sample_weight=sample_weight,
reset_metrics=reset_metrics)
@ -1119,7 +1119,7 @@ class Model(network.Network):
expectations of the model.
"""
self._check_call_args('predict_on_batch')
if self._run_distributed:
if self._experimental_run_tf_function:
return training_v2_utils.predict_on_batch(self, x)
if (self._distribution_strategy and
@ -2608,7 +2608,7 @@ class Model(network.Network):
target_tensors=target_tensors,
sample_weight_mode=self.sample_weight_mode,
run_eagerly=self.run_eagerly,
run_distributed=self._run_distributed)
experimental_run_tf_function=self._experimental_run_tf_function)
# TODO(omalleyt): Consider changing to a more descriptive function name.
def _set_inputs(self, inputs, outputs=None, training=None):

View File

@ -52,10 +52,10 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_calling_model_on_same_dataset(self):
if ((not testing_utils.should_run_eagerly())
and testing_utils.get_model_type() == 'subclass'
and context.executing_eagerly()
and (not testing_utils.should_run_distributed())):
if ((not testing_utils.should_run_eagerly()) and
testing_utils.get_model_type() == 'subclass' and
context.executing_eagerly() and
(not testing_utils.should_run_tf_function())):
self.skipTest('b/120673224')
model = testing_utils.get_small_mlp(1, 4, input_dim=3)
@ -67,7 +67,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
@ -93,7 +93,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
@ -175,7 +175,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
input_a_np = np.random.random((10, 3)).astype(dtype=np.float32)
input_b_np = np.random.random((10, 3)).astype(dtype=np.float32)
@ -232,7 +232,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), np.float32)
targets = np.zeros((10, 4), np.float32)
@ -279,7 +279,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
optimizer,
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=10, dtype=np.int32)
@ -304,7 +304,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
'rmsprop',
loss='mae',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((40, 2), dtype=np.float32)
inputs[10:20, :] = 2
@ -375,7 +375,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
@ -399,7 +399,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
@ -439,7 +439,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
@ -476,7 +476,7 @@ class TestTrainingWithDataset(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((100, 3), dtype=np.float32)
targets = np.random.randint(0, 4, size=100, dtype=np.int32)
@ -542,7 +542,7 @@ class TestMetricsWithDatasets(keras_parameterized.TestCase):
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)

View File

@ -88,7 +88,7 @@ class TrainingTest(keras_parameterized.TestCase):
metrics=metrics,
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed(),
experimental_run_tf_function=testing_utils.should_run_tf_function(),
sample_weight_mode=None)
input_a = array_ops.zeros(shape=(10, 3))
@ -159,7 +159,7 @@ class TrainingTest(keras_parameterized.TestCase):
loss,
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = array_ops.zeros(shape=(10, 3))
targets = array_ops.zeros(shape=(10, 4))
@ -244,7 +244,7 @@ class CorrectnessTest(keras_parameterized.TestCase):
loss='sparse_categorical_crossentropy',
optimizer=rmsprop.RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((100, 4))
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))
@ -265,7 +265,7 @@ class CorrectnessTest(keras_parameterized.TestCase):
loss='sparse_categorical_crossentropy',
optimizer=rmsprop.RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((100, 4), dtype=np.float32)
np.random.seed(123)
y = np.random.randint(0, 1, size=(100, 1))

View File

@ -152,7 +152,7 @@ class TestGeneratorMethods(ForkRobustTestCase):
optimizer=rmsprop.RMSprop(1e-3),
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self._sleep_at_end = True
model.evaluate_generator(custom_generator(),
@ -180,7 +180,7 @@ class TestGeneratorMethods(ForkRobustTestCase):
model = testing_utils.get_small_mlp(
num_hidden=3, num_classes=4, input_dim=2)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
self._sleep_at_end = True
model.predict_generator(custom_generator(),
@ -221,7 +221,7 @@ class TestGeneratorMethods(ForkRobustTestCase):
optimizer=rmsprop.RMSprop(1e-3),
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit_generator(custom_generator(mode=3),
steps_per_epoch=5,
@ -259,7 +259,7 @@ class TestGeneratorMethods(ForkRobustTestCase):
loss='mse',
optimizer=rmsprop.RMSprop(1e-3),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
err_msg = 'Output of generator should be a tuple of 1 or 2 or 3 elements'
with self.assertRaisesRegex(ValueError, err_msg):
@ -305,7 +305,7 @@ class TestGeneratorMethods(ForkRobustTestCase):
rmsprop.RMSprop(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(
ones_generator(),
steps_per_epoch=2,

View File

@ -70,7 +70,7 @@ class CompileTest(keras_parameterized.TestCase):
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(model.loss, loss)
loss = losses.get(loss)
@ -120,7 +120,7 @@ class CompileTest(keras_parameterized.TestCase):
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(model.loss_functions[0].fn, losses.mean_squared_error)
self.assertEqual(model.loss_functions[1].fn, losses.mean_absolute_error)
self.assertAllEqual(model._loss_weights_list, [1., 1.])
@ -131,7 +131,7 @@ class CompileTest(keras_parameterized.TestCase):
optimizer='adam',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(model.loss_functions[0].fn, losses.mean_absolute_error)
self.assertEqual(model.loss_functions[1].fn, losses.mean_squared_error)
self.assertAllEqual(model._loss_weights_list, [1., 1.])
@ -145,7 +145,7 @@ class CompileTest(keras_parameterized.TestCase):
loss='mse',
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertAllEqual(model._loss_weights_list, [1., 2.])
def test_compile_with_multi_output_and_loss_weights_dict(self):
@ -183,7 +183,7 @@ class CompileTest(keras_parameterized.TestCase):
optimizer='adam',
loss=['mse', 'mae'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_key(self):
@ -197,7 +197,7 @@ class CompileTest(keras_parameterized.TestCase):
optimizer='adam',
loss={'unknown_output': 'mse'},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_weights_size(self):
@ -210,7 +210,7 @@ class CompileTest(keras_parameterized.TestCase):
loss='mse',
loss_weights=[1., 2.],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_loss_weights_key(self):
@ -225,7 +225,7 @@ class CompileTest(keras_parameterized.TestCase):
loss='mse',
loss_weights={'unknown_output': 1.},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_compile_with_incorrect_sample_weight_mode(self):
@ -240,7 +240,7 @@ class CompileTest(keras_parameterized.TestCase):
loss='mse',
sample_weight_mode={'unknown': 'temporal'},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
class TrainingTest(keras_parameterized.TestCase):
@ -262,7 +262,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
hist = model.fit(x=np.array([0.]), y=np.array([0.]))
self.assertAllClose(hist.history['loss'][0], 10000)
@ -281,7 +281,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
loss='mae',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.ones((40, 2), dtype=np.float32)
targets = np.ones((40, 1), dtype=np.float32)
@ -315,7 +315,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
loss='mae',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.ones((40, 2), dtype=np.float32)
targets = np.ones((40, 1), dtype=np.float32)
@ -362,7 +362,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
loss='mae',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.ones((40, 2), dtype=np.float32)
targets = np.ones((40, 1), dtype=np.float32)
@ -402,7 +402,7 @@ class TrainingTest(keras_parameterized.TestCase):
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
@ -515,7 +515,7 @@ class TrainingTest(keras_parameterized.TestCase):
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
@ -536,7 +536,7 @@ class TrainingTest(keras_parameterized.TestCase):
metrics=metrics,
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(
[input_a_np, input_b_np], [output_d_np, output_e_np],
epochs=1,
@ -576,7 +576,7 @@ class TrainingTest(keras_parameterized.TestCase):
optimizer,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# This will work
model.fit([input_a_np], output_d_np, epochs=1)
# TODO(gsundeep) Test only works in eager, file ticket
@ -588,7 +588,7 @@ class TrainingTest(keras_parameterized.TestCase):
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 4))
if testing_utils.should_run_distributed():
if testing_utils.should_run_tf_function():
model.fit(np.ndarray.tolist(input_a_np),
np.ndarray.tolist(input_b_np),
epochs=2,
@ -623,7 +623,7 @@ class TrainingTest(keras_parameterized.TestCase):
loss_weights=loss_weights,
sample_weight_mode=None,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
@ -709,7 +709,7 @@ class TrainingTest(keras_parameterized.TestCase):
optimizer,
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=2, epochs=5)
loss[reg] = model.evaluate(x, y)
self.assertLess(loss[None], loss['l2'])
@ -730,7 +730,7 @@ class TrainingTest(keras_parameterized.TestCase):
optimizer,
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.test_on_batch(x, y)
self.assertAlmostEqual(0.01, loss, places=4)
@ -748,7 +748,7 @@ class TrainingTest(keras_parameterized.TestCase):
optimizer,
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((10, 10), 'float32')
y = np.ones((10, 1), 'float32')
@ -816,7 +816,7 @@ class TrainingTest(keras_parameterized.TestCase):
'mse',
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(test_inputs, test_outputs,
epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@ -838,7 +838,7 @@ class TrainingTest(keras_parameterized.TestCase):
optimizer=keras.optimizers.Adam(lr=0.0001),
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_that_trainable_disables_updates(self):
@ -857,7 +857,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
assert not model.updates
x1 = model.predict(val_a)
@ -870,7 +870,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
assert model.updates
model.train_on_batch(val_a, val_out)
@ -882,7 +882,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
assert not model.updates
x1 = model.predict(val_a)
@ -997,7 +997,7 @@ class TrainingTest(keras_parameterized.TestCase):
RMSPropOptimizer(learning_rate=0.001),
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Test with Numpy data
x_train = np.random.random((10, 3, 4))
y_train = np.random.randint(0, 5, size=(10, 3))
@ -1041,7 +1041,7 @@ class TrainingTest(keras_parameterized.TestCase):
RMSPropOptimizer(learning_rate=0.001),
loss='binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
with test.mock.patch.object(sys, 'stdout', mock_stdout):
model.fit(
np.ones((10, 10), 'float32'), np.ones((10, 1), 'float32'), epochs=10)
@ -1230,7 +1230,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
class ValCounter(keras.callbacks.Callback):
@ -1259,7 +1259,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
with self.assertRaisesRegexp(
ValueError, '`validation_steps` should not be specified if '
@ -1290,7 +1290,7 @@ class TrainingTest(keras_parameterized.TestCase):
keras.optimizer_v2.gradient_descent.SGD(0.025),
loss=keras.losses.MeanAbsoluteError(),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.array([[0.], [1.], [2.]])
y = np.array([[0.5], [2.], [3.5]])
@ -1316,7 +1316,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
@ -1397,7 +1397,7 @@ class TrainingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((10, 10), dtype=dtype)
y = np.ones((10, 10), dtype=dtype)
@ -1430,11 +1430,11 @@ class TrainingTest(keras_parameterized.TestCase):
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, x, epochs=1)
if (testing_utils.should_run_eagerly() or
testing_utils.should_run_distributed()):
testing_utils.should_run_tf_function()):
expected_training_arg = True
else:
expected_training_arg = keras.backend.symbolic_learning_phase()
@ -1515,7 +1515,7 @@ class TestExceptionsAndWarnings(keras_parameterized.TestCase):
optimizer,
loss=None,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_compile_warning_for_loss_missing_output(self):
@ -1537,7 +1537,7 @@ class TestExceptionsAndWarnings(keras_parameterized.TestCase):
'dense_1': metrics_module.CategoricalAccuracy(),
},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
msg = ('Output dense_1 missing from loss dictionary. We assume this '
'was done on purpose. The fit and evaluate APIs will not be '
'expecting any data to be passed to dense_1.')
@ -1553,10 +1553,11 @@ class TestExceptionsAndWarnings(keras_parameterized.TestCase):
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
err_msg = 'When passing input data as arrays, do not specify'
if testing_utils.should_run_eagerly() and not model._run_distributed:
if testing_utils.should_run_eagerly(
) and not model._experimental_run_tf_function:
with self.assertRaisesRegex(ValueError, err_msg):
model.fit(x=np.zeros((100, 1)), y=np.ones((100, 1)), steps_per_epoch=4)
@ -1598,7 +1599,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
optimizer=RMSPropOptimizer(learning_rate=learning_rate),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
@ -1667,7 +1668,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
loss='categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.random.seed(43)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
@ -1779,7 +1780,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
weighted_metrics=['mae', metrics_module.CategoricalAccuracy()],
sample_weight_mode='temporal',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(
temporal_x_train,
@ -1827,7 +1828,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
optimizer='adam',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((10, 3))
y = np.random.random((10, 2))
@ -1866,7 +1867,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
optimizer,
loss='binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
@ -1888,7 +1889,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
loss='binary_crossentropy',
sample_weight_mode=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Build multi-output model
x = keras.Input((3,))
@ -1899,7 +1900,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
optimizer,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x_np = np.random.random((10, 3))
y_np = np.random.random((10, 4))
w_np = np.random.random((10,))
@ -1951,7 +1952,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
loss='mse',
sample_weight_mode=[None],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a list and mode value is `temporal`
@ -1960,7 +1961,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
loss='mse',
sample_weight_mode=['temporal'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is None
@ -1969,7 +1970,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
loss='mse',
sample_weight_mode={'time_distributed': None},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a dict and mode value is `temporal`
@ -1978,7 +1979,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
loss='mse',
sample_weight_mode={'time_distributed': 'temporal'},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is None
@ -1987,7 +1988,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
loss='mse',
sample_weight_mode=None,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, batch_size=10)
# sample_weight_mode is a not a list/dict and mode value is `temporal`
@ -1996,7 +1997,7 @@ class LossWeightingTest(keras_parameterized.TestCase):
loss='mse',
sample_weight_mode='temporal',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, batch_size=10)
def test_sample_weight_tensor(self):
@ -2076,7 +2077,7 @@ class MaskingTest(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
@keras_parameterized.run_with_all_model_types
@ -2122,7 +2123,7 @@ class MaskingTest(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
y = np.random.random((5, 3))
model.train_on_batch(x, y)
@ -2141,7 +2142,7 @@ class TestDynamicTrainability(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.trainable = True
model.train_on_batch(x, y)
self.assertRaises(Warning)
@ -2157,7 +2158,7 @@ class TestDynamicTrainability(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
@ -2171,7 +2172,7 @@ class TestDynamicTrainability(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
@ -2289,7 +2290,7 @@ class TestDynamicTrainability(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs2 = keras.Input(10)
outputs2 = shared_layer(inputs2)
@ -2299,7 +2300,7 @@ class TestDynamicTrainability(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x, y = np.ones((10, 10)), np.ones((10, 10))
@ -2333,7 +2334,7 @@ class TestTrainingWithDataTensors(keras_parameterized.TestCase):
loss,
metrics=['mae', metrics_module.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = keras.backend.zeros(shape=(10, 3))
targets = keras.backend.zeros(shape=(10, 4))
@ -2387,7 +2388,7 @@ class TestTrainingWithDataTensors(keras_parameterized.TestCase):
metrics=['mae', metrics_module.CategoricalAccuracy()],
loss_weights=loss_weights,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
input_a_tf = keras.backend.zeros(shape=(10, 3))
input_b_tf = keras.backend.zeros(shape=(10, 3))
@ -2919,7 +2920,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mae',
metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
mse_metric = 'mse' if tf2.enabled() else 'mean_squared_error'
reference_metric_names = [
@ -2952,7 +2953,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
metrics=[acc_obj],
optimizer=RMSPropOptimizer(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x_train = np.random.random((100, 4))
y_train = np.random.random((100, 1))
@ -2986,7 +2987,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
metrics=[keras.metrics.MeanSquaredError()],
weighted_metrics=[keras.metrics.MeanSquaredError()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# list of list of metrics.
model.compile(
@ -3003,7 +3004,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
keras.metrics.Accuracy()]
],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# dict of metrics.
model.compile(
@ -3026,7 +3027,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
],
},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_invalid_metrics(self):
@ -3044,7 +3045,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=metrics_module.CategoricalAccuracy(),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inp = keras.layers.Input(shape=(1,))
x = keras.layers.Dense(3, activation='relu')(inp)
@ -3069,7 +3070,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
'output_3': 'mse',
},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
with self.assertRaisesRegex(
ValueError,
@ -3083,7 +3084,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
'output_3': 'mse',
},
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
@keras_parameterized.run_all_keras_modes
def test_metrics_masking(self):
@ -3101,7 +3102,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
weighted_metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# verify that masking is applied.
x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]])
@ -3138,7 +3139,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
'sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.ones(shape=(10, 1))
targets = np.ones(shape=(10, 1))
@ -3181,7 +3182,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
@ -3224,7 +3225,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
@ -3283,7 +3284,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
metrics=[metrics_module.Accuracy('metric_4')],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Verify that the metrics added using `compile` and `add_metric` API are
# included
@ -3311,7 +3312,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
optimizer=RMSPropOptimizer(0.01),
metrics=[metrics_module.Accuracy('acc')],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@ -3342,7 +3343,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
@ -3385,7 +3386,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=2, batch_size=5, validation_data=(x, y))
@keras_parameterized.run_all_keras_modes
@ -3408,7 +3409,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
@ -3436,7 +3437,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
optimizer=RMSPropOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
@ -3474,7 +3475,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
optimizer=keras.optimizer_v2.gradient_descent.SGD(0.1),
metrics=[metrics_module.MeanAbsoluteError(name='mae_3')],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.array([[0.], [1.], [2.]])
y = np.array([[0.5], [2.], [3.5]])
@ -3511,7 +3512,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
metrics=[metrics_module.Accuracy('acc')],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual([m.name for m in inner_model.metrics],
['acc', 'mean', 'mean1'])
@ -3527,7 +3528,7 @@ class TestTrainingWithMetrics(keras_parameterized.TestCase):
loss='mse',
metrics=[metrics_module.Accuracy('acc2')],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual([m.name for m in outer_model.metrics],
['acc2', 'mean', 'mean1', 'mean2'])
@ -3611,7 +3612,7 @@ class TestAutoUpdates(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@ -3625,7 +3626,7 @@ class TestAutoUpdates(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
layer.trainable = False
@ -3633,7 +3634,7 @@ class TestAutoUpdates(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@ -3647,7 +3648,7 @@ class TestAutoUpdates(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=2, epochs=1)
self.assertEqual(self.evaluate(layer.counter), 5)
@ -3681,7 +3682,7 @@ class TestAutoUpdates(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x, y = np.ones((10, 10)), np.ones((10, 1))
model.fit(x, y, batch_size=2, epochs=1)
self.assertAllEqual(self.evaluate(bn.moving_mean), np.zeros((10,)))

View File

@ -74,7 +74,7 @@ class VectorClassificationIntegrationTest(keras_parameterized.TestCase):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
@ -111,7 +111,7 @@ class VectorClassificationIntegrationTest(keras_parameterized.TestCase):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
if not testing_utils.should_run_eagerly():
self.assertEqual(len(model.get_losses_for(None)), 2)
self.assertEqual(len(model.get_updates_for(x)), 2)
@ -152,7 +152,7 @@ class SequentialIntegrationTest(KerasIntegrationTest):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x_train, y_train, epochs=1, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
@ -175,7 +175,7 @@ class SequentialIntegrationTest(KerasIntegrationTest):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
@ -212,7 +212,7 @@ class TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(x_train, y_train, epochs=15, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
@ -242,7 +242,7 @@ class TimeseriesClassificationIntegrationTest(keras_parameterized.TestCase):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(x_train, y_train, epochs=15, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
@ -281,7 +281,7 @@ class ImageClassificationIntegrationTest(keras_parameterized.TestCase):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
history = model.fit(x_train, y_train, epochs=10, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)
@ -326,7 +326,7 @@ class ActivationV2IntegrationTest(keras_parameterized.TestCase):
optimizer=keras.optimizer_v2.adam.Adam(0.005),
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x_train, y_train, epochs=2, batch_size=10,
validation_data=(x_train, y_train),
verbose=2)

View File

@ -204,9 +204,10 @@ def run_all_keras_modes(test_or_class=None,
optimizer = RMSPropOptimizer(learning_rate=0.001)
loss = 'mse'
metrics = ['mae']
model.compile(optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
model.compile(
optimizer, loss, metrics=metrics,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.zeros((10, 3))
targets = np.zeros((10, 4))
@ -243,12 +244,11 @@ def run_all_keras_modes(test_or_class=None,
a target dependency.
"""
params = [('_v2_function', 'v2_function'),
('_v2_distributed', 'v2_distributed')]
params = [('_v2_function', 'v2_function'), ('_v2_funcgraph', 'v2_funcgraph')]
if not always_skip_eager:
params.append(('_v2_eager', 'v2_eager'))
if not (always_skip_v1 or tf2.enabled()):
params.append(('_v1_graph', 'v1_graph'))
params.append(('_v1_session', 'v1_session'))
def single_method_decorator(f):
"""Decorator that constructs the test cases."""
@ -258,14 +258,14 @@ def run_all_keras_modes(test_or_class=None,
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
"""A run of a single test case w/ specified run mode."""
if run_mode == 'v1_graph':
_v1_graph_test(f, self, config, *args, **kwargs)
elif run_mode == 'v2_function':
if run_mode == 'v1_session':
_v1_session_test(f, self, config, *args, **kwargs)
elif run_mode == 'v2_funcgraph':
_v2_graph_functions_test(f, self, *args, **kwargs)
elif run_mode == 'v2_eager':
_v2_eager_test(f, self, *args, **kwargs)
elif run_mode == 'v2_distributed':
_v2_distributed_test(f, self, *args, **kwargs)
elif run_mode == 'v2_function':
_v2_function_test(f, self, *args, **kwargs)
else:
return ValueError('Unknown run mode %s' % run_mode)
@ -274,9 +274,9 @@ def run_all_keras_modes(test_or_class=None,
return _test_or_class_decorator(test_or_class, single_method_decorator)
def _v1_graph_test(f, test_or_class, config, *args, **kwargs):
def _v1_session_test(f, test_or_class, config, *args, **kwargs):
with context.graph_mode(), testing_utils.run_eagerly_scope(False):
with testing_utils.run_distributed_scope(False):
with testing_utils.experimental_run_tf_function_scope(False):
with test_or_class.test_session(use_gpu=True, config=config):
f(test_or_class, *args, **kwargs)
@ -284,21 +284,21 @@ def _v1_graph_test(f, test_or_class, config, *args, **kwargs):
def _v2_graph_functions_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(False):
with testing_utils.run_distributed_scope(False):
with testing_utils.experimental_run_tf_function_scope(False):
f(test_or_class, *args, **kwargs)
def _v2_eager_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(True):
with testing_utils.run_distributed_scope(False):
with testing_utils.experimental_run_tf_function_scope(True):
f(test_or_class, *args, **kwargs)
def _v2_distributed_test(f, test_or_class, *args, **kwargs):
def _v2_function_test(f, test_or_class, *args, **kwargs):
with context.eager_mode():
with testing_utils.run_eagerly_scope(False):
with testing_utils.run_distributed_scope(True):
with testing_utils.experimental_run_tf_function_scope(True):
f(test_or_class, *args, **kwargs)

View File

@ -210,21 +210,21 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_distributed = testing_utils.should_run_distributed()
l.append((mode, should_run_eagerly, should_run_distributed))
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function))
e = ExampleTest()
if not tf2.enabled():
e.testBody_v1_graph()
e.testBody_v1_session()
e.testBody_v2_eager()
e.testBody_v2_funcgraph()
e.testBody_v2_function()
e.testBody_v2_distributed()
if not tf2.enabled():
self.assertLen(l, 4)
self.assertAllEqual(l, [
("graph", False, False),
("eager", True, False),
("eager", True, True),
("eager", False, False),
("eager", False, True),
])
@ -236,7 +236,7 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
else:
self.assertLen(l, 3)
self.assertAllEqual(l, [
("eager", True, False),
("eager", True, True),
("eager", False, False),
("eager", False, True),
])
@ -262,27 +262,27 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_distributed = testing_utils.should_run_distributed()
l.append((with_brackets, mode, should_run_eagerly,
should_run_distributed))
should_run_tf_function = testing_utils.should_run_tf_function()
l.append(
(with_brackets, mode, should_run_eagerly, should_run_tf_function))
e = ExampleTest()
if not tf2.enabled():
e.testBody_0_v1_graph()
e.testBody_1_v1_graph()
e.testBody_0_v1_session()
e.testBody_1_v1_session()
e.testBody_0_v2_eager()
e.testBody_0_v2_funcgraph()
e.testBody_0_v2_function()
e.testBody_0_v2_distributed()
e.testBody_1_v2_eager()
e.testBody_1_v2_funcgraph()
e.testBody_1_v2_function()
e.testBody_1_v2_distributed()
expected_combinations = {
("with_brackets", "eager", True, False),
("with_brackets", "eager", True, True),
("with_brackets", "eager", False, False),
("with_brackets", "eager", False, True),
("without_brackets", "eager", True, False),
("without_brackets", "eager", True, True),
("without_brackets", "eager", False, False),
("without_brackets", "eager", False, True),
}
@ -314,25 +314,26 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_distributed = testing_utils.should_run_distributed()
l.append((mode, should_run_eagerly, should_run_distributed))
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function))
e = ExampleTest()
if hasattr(e, "testBody_v1_graph"):
e.testBody_v1_graph()
if hasattr(e, "testBody_v1_session"):
e.testBody_v1_session()
if hasattr(e, "testBody_v2_eager"):
e.testBody_v2_eager()
if hasattr(e, "testBody_v2_funcgraph"):
e.testBody_v2_funcgraph()
if hasattr(e, "testBody_v2_function"):
e.testBody_v2_function()
if hasattr(e, "testBody_v2_distributed"):
e.testBody_v2_distributed()
self.assertLen(l, 3)
self.assertEqual(set(l), {
("eager", True, False),
("eager", False, False),
("eager", False, True),
})
self.assertEqual(
set(l), {
("eager", True, True),
("eager", False, False),
("eager", False, True),
})
def test_run_all_keras_modes_with_all_model_types(self):
l = []
@ -347,34 +348,34 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_distributed = testing_utils.should_run_distributed()
l.append((mode, should_run_eagerly, should_run_distributed,
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_v2_eager_functional()
e.testBody_v2_funcgraph_functional()
e.testBody_v2_function_functional()
e.testBody_v2_distributed_functional()
e.testBody_v2_eager_sequential()
e.testBody_v2_funcgraph_sequential()
e.testBody_v2_function_sequential()
e.testBody_v2_distributed_sequential()
e.testBody_v2_eager_subclass()
e.testBody_v2_funcgraph_subclass()
e.testBody_v2_function_subclass()
e.testBody_v2_distributed_subclass()
if not tf2.enabled():
e.testBody_v1_graph_functional()
e.testBody_v1_graph_sequential()
e.testBody_v1_graph_subclass()
e.testBody_v1_session_functional()
e.testBody_v1_session_sequential()
e.testBody_v1_session_subclass()
expected_combinations = {
("eager", True, False, "functional"),
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, False, "sequential"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, False, "subclass"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}
@ -408,34 +409,34 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
def testBody(self):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_distributed = testing_utils.should_run_distributed()
l.append((mode, should_run_eagerly, should_run_distributed,
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_functional_v2_eager()
e.testBody_functional_v2_funcgraph()
e.testBody_functional_v2_function()
e.testBody_functional_v2_distributed()
e.testBody_sequential_v2_eager()
e.testBody_sequential_v2_funcgraph()
e.testBody_sequential_v2_function()
e.testBody_sequential_v2_distributed()
e.testBody_subclass_v2_eager()
e.testBody_subclass_v2_funcgraph()
e.testBody_subclass_v2_function()
e.testBody_subclass_v2_distributed()
if not tf2.enabled():
e.testBody_functional_v1_graph()
e.testBody_sequential_v1_graph()
e.testBody_subclass_v1_graph()
e.testBody_functional_v1_session()
e.testBody_sequential_v1_session()
e.testBody_subclass_v1_session()
expected_combinations = {
("eager", True, False, "functional"),
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, False, "sequential"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, False, "subclass"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}
@ -471,34 +472,34 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
def testBody(self, arg):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_distributed = testing_utils.should_run_distributed()
l.append((mode, should_run_eagerly, should_run_distributed,
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_funcgraph_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_distributed_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_funcgraph_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_distributed_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_funcgraph_subclass()
e.testBody_arg_v2_function_subclass()
e.testBody_arg_v2_distributed_subclass()
if not tf2.enabled():
e.testBody_arg_v1_graph_functional()
e.testBody_arg_v1_graph_sequential()
e.testBody_arg_v1_graph_subclass()
e.testBody_arg_v1_session_functional()
e.testBody_arg_v1_session_sequential()
e.testBody_arg_v1_session_subclass()
expected_combinations = {
("eager", True, False, "functional"),
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, False, "sequential"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, False, "subclass"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}
@ -534,34 +535,34 @@ class KerasParameterizedTest(keras_parameterized.TestCase):
def testBody(self, arg):
mode = "eager" if context.executing_eagerly() else "graph"
should_run_eagerly = testing_utils.should_run_eagerly()
should_run_distributed = testing_utils.should_run_distributed()
l.append((mode, should_run_eagerly, should_run_distributed,
should_run_tf_function = testing_utils.should_run_tf_function()
l.append((mode, should_run_eagerly, should_run_tf_function,
testing_utils.get_model_type()))
e = ExampleTest()
e.testBody_arg_v2_eager_functional()
e.testBody_arg_v2_funcgraph_functional()
e.testBody_arg_v2_function_functional()
e.testBody_arg_v2_distributed_functional()
e.testBody_arg_v2_eager_sequential()
e.testBody_arg_v2_funcgraph_sequential()
e.testBody_arg_v2_function_sequential()
e.testBody_arg_v2_distributed_sequential()
e.testBody_arg_v2_eager_subclass()
e.testBody_arg_v2_funcgraph_subclass()
e.testBody_arg_v2_function_subclass()
e.testBody_arg_v2_distributed_subclass()
if not tf2.enabled():
e.testBody_arg_v1_graph_functional()
e.testBody_arg_v1_graph_sequential()
e.testBody_arg_v1_graph_subclass()
e.testBody_arg_v1_session_functional()
e.testBody_arg_v1_session_sequential()
e.testBody_arg_v1_session_subclass()
expected_combinations = {
("eager", True, False, "functional"),
("eager", True, True, "functional"),
("eager", False, False, "functional"),
("eager", False, True, "functional"),
("eager", True, False, "sequential"),
("eager", True, True, "sequential"),
("eager", False, False, "sequential"),
("eager", False, True, "sequential"),
("eager", True, False, "subclass"),
("eager", True, True, "subclass"),
("eager", False, False, "subclass"),
("eager", False, True, "subclass"),
}

View File

@ -98,7 +98,7 @@ class AdvancedActivationsTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2)

View File

@ -289,7 +289,7 @@ class TestStatefulLambda(keras_parameterized.TestCase):
keras.optimizer_v2.gradient_descent.SGD(0.1),
'mae',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x, y = np.ones((10, 10), 'float32'), 2 * np.ones((10, 10), 'float32')
model.fit(x, y, batch_size=2, epochs=2, validation_data=(x, y))
self.assertLen(model.trainable_weights, 1)

View File

@ -87,7 +87,7 @@ class CuDNNTest(keras_parameterized.TestCase):
self.assertEqual(len(state), num_states)
model = keras.models.Model(inputs, state[0])
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
inputs = np.random.random((num_samples, timesteps, input_size))
state = model.predict(inputs)
@ -146,7 +146,7 @@ class CuDNNTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer=RMSprop(learning_rate=0.001),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((num_samples, timesteps, input_size))
initial_state = [

View File

@ -80,7 +80,7 @@ class EmbeddingTest(keras_parameterized.TestCase):
layer.set_weights([np.array([[1, 1], [2, 2]])])
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
outputs = model.predict(np.array([[0, 1, 0]], dtype='int32'))
self.assertAllClose(outputs, [[[1, 1], [2, 2], [1, 1]]])

View File

@ -55,7 +55,7 @@ class GRULayerTest(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@ -106,7 +106,7 @@ class GRULayerTest(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
gru_model.fit(x_train, y_train)
gru_model.predict(x_train)
@ -122,7 +122,7 @@ class GRULayerTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_statefulness_GRU(self):
@ -147,7 +147,7 @@ class GRULayerTest(keras_parameterized.TestCase):
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))

View File

@ -445,7 +445,7 @@ class GRUV2Test(keras_parameterized.TestCase):
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
@ -518,7 +518,7 @@ class GRUV2Test(keras_parameterized.TestCase):
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, shuffle=False)
@test_util.run_v2_only
@ -593,9 +593,10 @@ class GRUGraphRewriteTest(keras_parameterized.TestCase):
num_classes=self.output_shape)
y_train = keras.utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer='sgd',
loss=['categorical_crossentropy', None],
run_distributed=testing_utils.should_run_distributed())
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
experimental_run_tf_function=testing_utils.should_run_tf_function())
existing_loss = 0
for _ in range(self.epoch):
@ -650,10 +651,11 @@ class GRUGraphRewriteTest(keras_parameterized.TestCase):
num_classes=self.output_shape)
y_train = keras.utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x_train, y_train)

View File

@ -71,7 +71,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
@ -132,7 +132,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_masking_with_stacking_LSTM(self):
@ -147,7 +147,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_LSTM(self):
@ -179,7 +179,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
@ -207,7 +207,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
@ -260,7 +260,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
@ -324,7 +324,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
optimizer=adam.AdamOptimizer(),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
@ -374,7 +374,7 @@ class LSTMLayerTest(keras_parameterized.TestCase):
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))

View File

@ -609,7 +609,7 @@ class LSTMV2Test(keras_parameterized.TestCase):
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
@ -682,7 +682,7 @@ class LSTMV2Test(keras_parameterized.TestCase):
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, shuffle=False)
def test_dropout_LSTM(self):
@ -767,10 +767,11 @@ class LSTMGraphRewriteTest(keras_parameterized.TestCase):
num_classes=self.output_shape)
y_train = keras.utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
existing_loss = 0
for _ in range(self.epoch):
@ -825,10 +826,11 @@ class LSTMGraphRewriteTest(keras_parameterized.TestCase):
num_classes=self.output_shape)
y_train = keras.utils.to_categorical(y_train, self.output_shape)
model.compile(optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
model.compile(
optimizer='sgd',
loss=['categorical_crossentropy', None],
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x_train, y_train)

View File

@ -41,7 +41,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
@ -75,7 +75,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
@ -109,7 +109,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
@ -125,7 +125,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
@ -140,7 +140,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
@ -155,7 +155,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
@ -171,7 +171,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
@ -203,7 +203,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
_ = keras.layers.Dot(axes=1).get_config()
x1 = np.random.random((2, 4))
@ -220,7 +220,7 @@ class MergeLayersTest(keras_parameterized.TestCase):
self.assertListEqual(o.shape.as_list(), [None, 1])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 1))
self.assertAllClose(out, expected, atol=1e-4)

View File

@ -104,7 +104,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))
@ -126,7 +126,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))
@ -175,7 +175,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(np.random.random((100, 3)), np.random.random((100, 3)))
test_data = np.random.random((10, 3))
@ -187,7 +187,7 @@ class BatchNormalizationTest(keras_parameterized.TestCase):
'rmsprop',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
train_loss = model.train_on_batch(test_data, test_targets)
self.assertAlmostEqual(test_loss, train_loss)
@ -366,7 +366,7 @@ def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False):
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
@ -498,10 +498,11 @@ def _run_layernorm_correctness_test(layer, dtype='float32'):
model = keras.models.Sequential()
norm = layer(input_shape=(2, 2, 2))
model.add(norm)
model.compile(loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
model.compile(
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
# centered on 5.0, variance 10.0
x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2))
@ -573,7 +574,7 @@ class LayerNormalizationTest(keras_parameterized.TestCase):
loss='mse',
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3))

View File

@ -171,7 +171,7 @@ class NormalizationTest(keras_parameterized.TestCase,
output = layer(input_data)
model = keras.Model(input_data, output)
model._run_eagerly = testing_utils.should_run_eagerly()
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
output_data = model.predict(test_data)
self.assertAllClose(expected, output_data)

View File

@ -83,7 +83,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
@ -97,7 +97,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
@ -128,7 +128,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
@ -144,7 +144,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
@ -187,7 +187,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
@ -214,7 +214,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
@ -271,7 +271,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer="rmsprop",
loss="mse",
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
@ -285,7 +285,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_with_time_major(self):
@ -314,7 +314,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
@ -335,7 +335,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, cell_units[-1])))
@ -353,7 +353,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
@ -368,7 +368,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
@ -403,7 +403,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
@ -444,7 +444,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
@ -461,7 +461,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
@ -494,7 +494,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
@ -508,7 +508,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_cell_with_constants_layer_passing_initial_state(self):
@ -524,7 +524,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32))
@ -574,7 +574,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
@ -591,7 +591,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_stacked_rnn_attributes(self):
@ -693,7 +693,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
@ -718,7 +718,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
@ -749,7 +749,7 @@ class RNNTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
@ -774,7 +774,7 @@ class RNNTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
@ -852,7 +852,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
@ -888,7 +888,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)))
@ -907,7 +907,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a * 4, unit_b * 4)))
@ -933,7 +933,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch([
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b))
@ -972,7 +972,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, time_step, input_size)),
np.zeros((batch, input_size)))
@ -1030,7 +1030,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
@ -1054,7 +1054,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
@ -1085,7 +1085,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
@ -1112,7 +1112,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
@ -1148,7 +1148,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
@ -1182,7 +1182,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
@ -1260,7 +1260,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# last time step masked
x_np = np.array([[[1.], [2.], [0.]]])
@ -1287,7 +1287,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np_x = np.ones((6, 5, 5))
result_1 = model.predict(np_x)
@ -1312,7 +1312,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np_x = np.ones((6, 1, 5))
result = model.predict(np_x)
@ -1368,7 +1368,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)))
@ -1419,7 +1419,7 @@ class RNNTest(keras_parameterized.TestCase):
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)))

View File

@ -61,7 +61,7 @@ class RNNV2Test(keras_parameterized.TestCase):
optimizer='adam',
loss='sparse_categorical_crossentropy',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, epochs=1, shuffle=False)
@parameterized.parameters([rnn_v2.LSTM, rnn_v2.GRU])

View File

@ -159,7 +159,7 @@ class SimpleRNNLayerTest(keras_parameterized.TestCase):
optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))

View File

@ -188,7 +188,7 @@ class AutoLambdaTest(keras_parameterized.TestCase):
adam.Adam(0.001),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np_inputs = nest.map_structure(
lambda x: np.ones((10,) + tuple(x.shape[1:]), 'float32'), model.inputs)
@ -203,7 +203,7 @@ class AutoLambdaTest(keras_parameterized.TestCase):
adam.Adam(0.001),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.fit(np_inputs, np_outputs, batch_size=2)
new_model(np_inputs) # Test calling the new model directly on inputs.
# Assert that metrics are preserved and in the right order.

View File

@ -90,7 +90,7 @@ class TestMetricsCorrectnessMultiIO(keras_parameterized.TestCase):
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def setUp(self):
@ -429,7 +429,7 @@ class TestMetricsCorrectnessSingleIO(keras_parameterized.TestCase):
metrics.MeanSquaredError(name='mean_squared_error_2')
],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def _custom_generator(self, sample_weight=None):
@ -640,7 +640,7 @@ class TestOutputLossMetrics(keras_parameterized.TestCase):
optimizer='rmsprop',
loss=loss,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model
def setUp(self):

View File

@ -1968,7 +1968,7 @@ def _get_model(compile_metrics):
metrics=compile_metrics,
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model

View File

@ -368,13 +368,13 @@ class KerasModelTest(keras_parameterized.TestCase):
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'run_distributed': False
'experimental_run_tf_function': False
})
def test_model(self,
strategy_fn,
use_operator=False,
use_regularizer=False,
run_distributed=True):
experimental_run_tf_function=True):
if not self._is_strategy_supported(strategy_fn, check_model_type=True):
return
regularizer = IdentityRegularizer() if use_regularizer else None
@ -410,7 +410,7 @@ class KerasModelTest(keras_parameterized.TestCase):
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((2, 1))
y = np.ones((2, 1))
@ -435,9 +435,11 @@ class KerasModelTest(keras_parameterized.TestCase):
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'run_distributed': False,
'experimental_run_tf_function': False,
})
def test_fixed_loss_scaling(self, strategy_fn, run_distributed=True):
def test_fixed_loss_scaling(self,
strategy_fn,
experimental_run_tf_function=True):
# Note: We do not test mixed precision in this method, only loss scaling.
if not self._is_strategy_supported(strategy_fn):
return
@ -467,7 +469,7 @@ class KerasModelTest(keras_parameterized.TestCase):
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
@ -549,7 +551,7 @@ class KerasModelTest(keras_parameterized.TestCase):
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((2, 1))
y = np.ones((2, 1))
@ -574,9 +576,11 @@ class KerasModelTest(keras_parameterized.TestCase):
}, {
'testcase_name': 'norun_distributed',
'strategy_fn': create_mirrored_strategy,
'run_distributed': False,
'experimental_run_tf_function': False,
})
def test_dynamic_loss_scaling(self, strategy_fn, run_distributed=True):
def test_dynamic_loss_scaling(self,
strategy_fn,
experimental_run_tf_function=True):
if not self._is_strategy_supported(strategy_fn):
return
strategy = strategy_fn()
@ -616,7 +620,7 @@ class KerasModelTest(keras_parameterized.TestCase):
opt,
loss=loss_fn,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
@ -727,7 +731,7 @@ class KerasModelTest(keras_parameterized.TestCase):
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
weights_file = os.path.join(self.get_temp_dir(), 'weights')
@ -767,7 +771,7 @@ class KerasModelTest(keras_parameterized.TestCase):
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Run for 3 steps (6 examples with a batch size of 2)
model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
self.assertEqual(backend.get_value(loss_scale()), 2)

View File

@ -242,7 +242,7 @@ class ModelSubclassingTest(keras_parameterized.TestCase):
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(np.ones((10, 10)), np.ones((10, 1)), batch_size=2, epochs=2)
self.assertLen(model.layers, 2)
self.assertLen(model.trainable_variables, 4)
@ -615,7 +615,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=['acc', keras.metrics.CategoricalAccuracy()],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
@ -636,7 +636,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
@ -657,7 +657,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((num_samples, input_dim), dtype=np.float32)
y = np.zeros((num_samples, num_classes), dtype=np.float32)
@ -690,7 +690,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
@ -724,7 +724,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
y_ref = model.predict(x)
model.train_on_batch(x, y)
@ -758,7 +758,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
@ -780,7 +780,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
@ -793,7 +793,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
@ -815,7 +815,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
@ -840,7 +840,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
@ -880,7 +880,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
@ -905,7 +905,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
@ -930,7 +930,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
@ -966,7 +966,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
optimizer='rmsprop',
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
@ -1006,7 +1006,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
loss='mse',
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
@ -1029,7 +1029,7 @@ class ModelSubclassCompiledTest(keras_parameterized.TestCase):
model.compile(
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x)
model.evaluate(x)

View File

@ -177,7 +177,7 @@ class TestModelCloning(keras_parameterized.TestCase):
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
@ -190,7 +190,7 @@ class TestModelCloning(keras_parameterized.TestCase):
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
@ -205,7 +205,7 @@ class TestModelCloning(keras_parameterized.TestCase):
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch(None, val_out)
@keras_parameterized.run_all_keras_modes
@ -232,7 +232,7 @@ class TestModelCloning(keras_parameterized.TestCase):
loss='mse',
optimizer=testing_utils.get_v2_optimizer('adam'),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
y = np.array([[[1], [1]], [[1], [1]]])
loss = model.train_on_batch(x, y)
self.assertEqual(float(loss), 0.)
@ -297,7 +297,7 @@ class CheckpointingTests(keras_parameterized.TestCase):
optimizer=opt,
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(
x=np.array([[1., 2., 3., 4.]]),
@ -327,7 +327,7 @@ class TestModelBackend(keras_parameterized.TestCase):
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
keras.backend.set_floatx(floatx)
@ -357,7 +357,7 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch(inp, out)
# Create new tensors for inputs and targets
@ -374,7 +374,7 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
testing_utils.get_v2_optimizer('rmsprop'),
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
new_model.train_on_batch(inp, out)
def _assert_same_compile_params(self, model):
@ -428,7 +428,7 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self._clone_and_build_test_helper(model, testing_utils.get_model_type())
@ -440,7 +440,7 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self._clone_and_build_test_helper(model, 'sequential')
inp = np.random.random((10, 4))
@ -455,7 +455,7 @@ class TestCloneAndBuildModel(keras_parameterized.TestCase):
'mse',
metrics=['acc', metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
global_step = keras.backend.variable(123, dtype=dtypes.int64)
clone_model = models.clone_and_build_model(

View File

@ -612,12 +612,13 @@ class OptimizerTest(test.TestCase):
@keras_parameterized.run_all_keras_modes
class OptimizersCompatibilityTest(keras_parameterized.TestCase):
# After run_distributed is turned on, optimizer v1 can no longer work in
# eager mode, skipping the test if so.
# After experimental_run_tf_function is turned on, optimizer v1 can no longer
# work in eager mode, skipping the test if so.
def _testOptimizersCompatibility(self, opt_v1, opt_v2, test_weights=True):
if testing_utils.should_run_distributed() or context.executing_eagerly():
self.skipTest('v1 optimizer does not run in run_distributed mode or '
'eager mode')
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
np.random.seed(1331)
with self.cached_session():
train_samples = 20
@ -638,7 +639,7 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model_v1.fit(x, y, batch_size=5, epochs=1)
model_v2 = testing_utils.get_small_sequential_mlp(
@ -649,7 +650,7 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model_v2._make_train_function()
if test_weights:
opt_v2.set_weights(opt_v1.get_weights())
@ -702,9 +703,10 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
self._testOptimizersCompatibility(opt_v1, opt_v2, False)
def testNumericEquivalenceForNesterovMomentum(self):
if testing_utils.should_run_distributed() or context.executing_eagerly():
self.skipTest('v1 optimizer does not run in run_distributed mode or '
'eager mode')
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
np.random.seed(1331)
with self.cached_session():
train_samples = 20
@ -737,19 +739,19 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model_tf.compile(
opt_tf,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)
@ -762,9 +764,10 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
self.assertAllClose(hist_k_v1.history['loss'], hist_k_v2.history['loss'])
def testNumericEquivalenceForAmsgrad(self):
if testing_utils.should_run_distributed() or context.executing_eagerly():
self.skipTest('v1 optimizer does not run in run_distributed mode or '
'eager mode')
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
np.random.seed(1331)
with self.cached_session():
train_samples = 20
@ -792,13 +795,13 @@ class OptimizersCompatibilityTest(keras_parameterized.TestCase):
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model_k_v2.compile(
opt_k_v2,
loss='categorical_crossentropy',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
hist_k_v1 = model_k_v1.fit(x, y, batch_size=5, epochs=10, shuffle=False)
hist_k_v2 = model_k_v2.fit(x, y, batch_size=5, epochs=10, shuffle=False)

View File

@ -44,12 +44,13 @@ def _get_model(input_dim, num_hidden, output_dim):
@keras_parameterized.run_all_keras_modes
class KerasOptimizersTest(keras_parameterized.TestCase):
# After run_distributed is turned on, optimizer v1 can no longer work in
# eager mode, skipping the test if so.
# After experimental_run_tf_function is turned on, optimizer v1 can no longer
# work in eager mode, skipping the test if so.
def _test_optimizer(self, optimizer, target=0.75):
if testing_utils.should_run_distributed() or context.executing_eagerly():
self.skipTest('v1 optimizer does not run in run_distributed mode or '
'eager mode')
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=1000, test_samples=200, input_shape=(10,), num_classes=2)
@ -60,7 +61,7 @@ class KerasOptimizersTest(keras_parameterized.TestCase):
optimizer=optimizer,
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations), 0)
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
@ -98,7 +99,7 @@ class KerasOptimizersTest(keras_parameterized.TestCase):
optimizer=optimizer,
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
np.testing.assert_equal(
keras.backend.get_value(model.optimizer.iterations),
126) # Using same optimizer from before
@ -164,18 +165,20 @@ class KerasOptimizersTest(keras_parameterized.TestCase):
keras.optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=0.5))
def test_tf_optimizer(self):
if testing_utils.should_run_distributed() or context.executing_eagerly():
self.skipTest('v1 optimizer does not run in run_distributed mode or '
'eager mode')
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
model = keras.models.Sequential()
model.add(keras.layers.Dense(
2, input_shape=(3,), kernel_constraint=keras.constraints.MaxNorm(1)))
# This is possible
model.compile(loss='mean_squared_error',
optimizer=optimizer,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
model.compile(
loss='mean_squared_error',
optimizer=optimizer,
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
keras.backend.track_tf_optimizer(optimizer)
model.fit(np.random.random((5, 3)),
np.random.random((5, 2)),
@ -191,9 +194,10 @@ class KerasOptimizersTest(keras_parameterized.TestCase):
optimizer.from_config(None)
def test_optimizer_garbage_collection(self):
if testing_utils.should_run_distributed() or context.executing_eagerly():
self.skipTest('v1 optimizer does not run in run_distributed mode or '
'eager mode')
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
graph = ops.Graph()
with graph.as_default():
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
@ -207,9 +211,10 @@ class KerasOptimizersTest(keras_parameterized.TestCase):
self.assertIs(optimizer_weak(), None)
def test_tf_optimizer_iterations(self):
if testing_utils.should_run_distributed() or context.executing_eagerly():
self.skipTest('v1 optimizer does not run in run_distributed mode or '
'eager mode')
if testing_utils.should_run_tf_function() or context.executing_eagerly():
self.skipTest(
'v1 optimizer does not run in experimental_run_tf_function mode or '
'eager mode')
with self.cached_session():
optimizer = keras.optimizers.TFOptimizer(AdamOptimizer(0.01))
model = keras.models.Sequential()
@ -219,7 +224,7 @@ class KerasOptimizersTest(keras_parameterized.TestCase):
loss='mean_squared_error',
optimizer=optimizer,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
keras.backend.track_tf_optimizer(optimizer)
self.assertEqual(keras.backend.get_value(model.optimizer.iterations), 0)

View File

@ -49,7 +49,7 @@ class WideDeepModelTest(keras_parameterized.TestCase):
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
wide_deep_model.fit(inputs, output, epochs=5)
self.assertTrue(wide_deep_model.built)
@ -70,7 +70,7 @@ class WideDeepModelTest(keras_parameterized.TestCase):
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.evaluate(variables.global_variables_initializer())
wide_deep_model.fit(inputs, output, epochs=1)
self.assertAllClose(
@ -91,7 +91,7 @@ class WideDeepModelTest(keras_parameterized.TestCase):
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
wide_deep_model.fit(inputs, output, epochs=5)
def test_wide_deep_model_with_single_optimizer(self):
@ -107,7 +107,7 @@ class WideDeepModelTest(keras_parameterized.TestCase):
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
wide_deep_model.fit(inputs, output, epochs=5)
self.assertTrue(wide_deep_model.built)
@ -132,7 +132,7 @@ class WideDeepModelTest(keras_parameterized.TestCase):
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5)
def test_wide_deep_model_with_sub_model_trained(self):
@ -150,13 +150,13 @@ class WideDeepModelTest(keras_parameterized.TestCase):
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
dnn_model.compile(
optimizer='adam',
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
linear_model.fit(linear_inp, output, epochs=50)
dnn_model.fit(dnn_inp, output, epochs=50)
wide_deep_model.compile(
@ -164,7 +164,7 @@ class WideDeepModelTest(keras_parameterized.TestCase):
loss='mse',
metrics=[],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
wide_deep_model.fit(inputs, output, epochs=50)

View File

@ -79,7 +79,7 @@ class KerasRegularizersTest(keras_parameterized.TestCase,
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@ -97,7 +97,7 @@ class KerasRegularizersTest(keras_parameterized.TestCase,
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@ -113,7 +113,7 @@ class KerasRegularizersTest(keras_parameterized.TestCase,
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=5, epochs=1)
def test_custom_regularizer_saving(self):
@ -144,7 +144,7 @@ class KerasRegularizersTest(keras_parameterized.TestCase,
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 5)
@keras_parameterized.run_all_keras_modes
@ -167,7 +167,7 @@ class KerasRegularizersTest(keras_parameterized.TestCase,
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 6)
@keras_parameterized.run_all_keras_modes
@ -195,7 +195,7 @@ class KerasRegularizersTest(keras_parameterized.TestCase,
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 14)

View File

@ -67,7 +67,7 @@ class TestModelSavingandLoading(parameterized.TestCase, test.TestCase):
metrics=[keras.metrics.categorical_accuracy],
sample_weight_mode='temporal',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
@ -111,7 +111,7 @@ class TestModelSavingandLoading(parameterized.TestCase, test.TestCase):
optimizer=rmsprop.RMSprop(lr=0.0001),
metrics=[keras.metrics.categorical_accuracy],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
@ -169,7 +169,7 @@ class TestModelSavingandLoading(parameterized.TestCase, test.TestCase):
optimizer=training_module.RMSPropOptimizer(0.1),
metrics=['acc'],
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
y = loaded_model.predict(x)
self.assertAllClose(ref_y, y, atol=1e-05)

View File

@ -89,7 +89,7 @@ class TraceModelCallTest(keras_parameterized.TestCase):
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x=np.random.random((8, 5)),
y=np.random.random((8, 3)), epochs=2)
@ -130,7 +130,7 @@ class TraceModelCallTest(keras_parameterized.TestCase):
optimizer='sgd',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x=[np.random.random((8, input_dim)).astype(np.float32),
np.random.random((8, input_dim)).astype(np.float32)],
y=[np.random.random((8, num_classes)).astype(np.float32),
@ -310,7 +310,7 @@ class ExtractModelMetricsTest(keras_parameterized.TestCase):
],
optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01),
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
extract_metrics = saving_utils.extract_model_metrics(model)
self.assertEqual(set(model_metric_names), set(model.metrics_names))
self.assertEqual(set(extract_metric_names), set(extract_metrics.keys()))

View File

@ -64,7 +64,7 @@ def get_compiled_multi_io_model_temporal(sample_weight_mode):
weighted_metrics=[metrics.MeanAbsoluteError(name='mae_2')],
sample_weight_mode=sample_weight_mode,
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
return model

View File

@ -261,7 +261,7 @@ def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.run_distributed = None
_thread_local_data.experimental_run_tf_function = None
@tf_contextlib.contextmanager
@ -318,7 +318,7 @@ def should_run_eagerly():
@tf_contextlib.contextmanager
def run_distributed_scope(value):
def experimental_run_tf_function_scope(value):
"""Provides a scope within which we compile models to run with distribution.
The boolean gets restored to its original value upon exiting the scope.
@ -330,23 +330,25 @@ def run_distributed_scope(value):
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_distributed
previous_value = _thread_local_data.experimental_run_tf_function
try:
_thread_local_data.run_distributed = value
_thread_local_data.experimental_run_tf_function = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_distributed = previous_value
_thread_local_data.experimental_run_tf_function = previous_value
def should_run_distributed():
def should_run_tf_function():
"""Returns whether the models we are testing should be run distributed."""
if _thread_local_data.run_distributed is None:
raise ValueError('Cannot call `should_run_distributed()` outside of a '
'`run_distributed_scope()` or `run_all_keras_modes` '
'decorator.')
if _thread_local_data.experimental_run_tf_function is None:
raise ValueError(
'Cannot call `should_run_tf_function()` outside of a '
'`experimental_run_tf_function_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_distributed and context.executing_eagerly()
return (_thread_local_data.experimental_run_tf_function and
context.executing_eagerly())
def get_model_type():

View File

@ -158,10 +158,13 @@ def get_test_mode_kwargs():
# Certain things weren't supported correctly in the old path, therefore
# with these changes, some tests now only pass in the single code path in V2.
if run_eagerly or context.executing_eagerly():
run_distributed = True
experimental_run_tf_function = True
else:
run_distributed = testing_utils.should_run_distributed()
return {"run_eagerly": run_eagerly, "run_distributed": run_distributed}
experimental_run_tf_function = testing_utils.should_run_tf_function()
return {
"run_eagerly": run_eagerly,
"experimental_run_tf_function": experimental_run_tf_function
}
@keras_parameterized.run_with_all_model_types
@ -220,7 +223,7 @@ class CompositeTensorOutputTest(keras_parameterized.TestCase):
# converts the ragged tensor back to a dense tensor.
layers = [ToRagged(padding=0)]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
@ -235,7 +238,7 @@ class CompositeTensorOutputTest(keras_parameterized.TestCase):
# converts the ragged tensor back to a dense tensor.
layers = [ToRagged(padding=0)]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
@ -250,7 +253,7 @@ class CompositeTensorOutputTest(keras_parameterized.TestCase):
# converts the ragged tensor back to a dense tensor.
layers = [ToSparse()]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
@ -270,7 +273,7 @@ class CompositeTensorOutputTest(keras_parameterized.TestCase):
# converts the ragged tensor back to a dense tensor.
layers = [ToSparse()]
model = testing_utils.get_model_from_layers(layers, input_shape=(None,))
model._run_distributed = testing_utils.should_run_distributed()
model._experimental_run_tf_function = testing_utils.should_run_tf_function()
model._run_eagerly = testing_utils.should_run_eagerly()
# Define some input data with additional padding.
@ -407,7 +410,7 @@ class ScipySparseTensorInputTest(keras_parameterized.TestCase,
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
input_data = scipy.sparse.coo_matrix(([1, 2, 3], ([0, 1, 1], [0, 0, 1])),
shape=[2, 3])
@ -469,7 +472,7 @@ class ScipySparseTensorInputTest(keras_parameterized.TestCase,
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
run_distributed=testing_utils.should_run_distributed())
experimental_run_tf_function=testing_utils.should_run_tf_function())
input_data = {
input_name:

View File

@ -84,9 +84,11 @@ class TestIOUtils(keras_parameterized.TestCase):
model = keras.models.Sequential()
model.add(keras.layers.Dense(64, input_shape=(10,), activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
run_distributed=testing_utils.should_run_distributed())
model.compile(
loss='binary_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
# Note: you have to use shuffle='batch' or False with HDF5Matrix
model.fit(x_train, y_train, batch_size=32, shuffle='batch', verbose=False)