Fix a few more failed test in OSS keras.
PiperOrigin-RevId: 336967487 Change-Id: I496078cee0aa946c636b35df3f3037f9f7aef556
This commit is contained in:
parent
8572e0189a
commit
c290421e1d
@ -831,29 +831,30 @@ class TestWholeModelSaving(keras_parameterized.TestCase):
|
||||
saved_model_dir = self._save_model_dir()
|
||||
save_format = testing_utils.get_save_format()
|
||||
|
||||
model = _make_model()
|
||||
model.compile(
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(),
|
||||
optimizer=optimizers.gradient_descent_v2.SGD(),
|
||||
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
|
||||
x = np.random.normal(size=(32, 4))
|
||||
y = np.random.randint(0, 3, size=32)
|
||||
model.train_on_batch(x, y)
|
||||
evaluation_results = model.evaluate(x, y)
|
||||
# Save and reload model.
|
||||
model.save(saved_model_dir, save_format=save_format)
|
||||
del model # Prevent misuse.
|
||||
loaded_model = keras.models.load_model(saved_model_dir)
|
||||
loaded_model_eval_results = loaded_model.evaluate(x, y)
|
||||
# Assert all evaluation results are the same.
|
||||
self.assertAllClose(evaluation_results, loaded_model_eval_results, 1e-9)
|
||||
# Check correctness of the loss calculation.
|
||||
self.assertAllGreater(evaluation_results, 0.)
|
||||
evaluation_results = dict(
|
||||
zip(loaded_model.metrics_names, evaluation_results))
|
||||
self.assertNear(
|
||||
evaluation_results['sparse_categorical_crossentropy'] +
|
||||
evaluation_results['custom_loss'], evaluation_results['loss'], 1e-6)
|
||||
with self.cached_session():
|
||||
model = _make_model()
|
||||
model.compile(
|
||||
loss=keras.losses.SparseCategoricalCrossentropy(),
|
||||
optimizer=optimizers.gradient_descent_v2.SGD(),
|
||||
metrics=[keras.metrics.SparseCategoricalCrossentropy()])
|
||||
x = np.random.normal(size=(32, 4))
|
||||
y = np.random.randint(0, 3, size=32)
|
||||
model.train_on_batch(x, y)
|
||||
evaluation_results = model.evaluate(x, y)
|
||||
# Save and reload model.
|
||||
model.save(saved_model_dir, save_format=save_format)
|
||||
del model # Prevent misuse.
|
||||
loaded_model = keras.models.load_model(saved_model_dir)
|
||||
loaded_model_eval_results = loaded_model.evaluate(x, y)
|
||||
# Assert all evaluation results are the same.
|
||||
self.assertAllClose(evaluation_results, loaded_model_eval_results, 1e-9)
|
||||
# Check correctness of the loss calculation.
|
||||
self.assertAllGreater(evaluation_results, 0.)
|
||||
evaluation_results = dict(
|
||||
zip(loaded_model.metrics_names, evaluation_results))
|
||||
self.assertNear(
|
||||
evaluation_results['sparse_categorical_crossentropy'] +
|
||||
evaluation_results['custom_loss'], evaluation_results['loss'], 1e-6)
|
||||
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
def test_save_uncompiled_model_with_optimizer(self):
|
||||
|
@ -33,6 +33,7 @@ from tensorflow.python.keras import losses
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import sequential
|
||||
from tensorflow.python.keras.feature_column import dense_features
|
||||
from tensorflow.python.keras.feature_column import sequence_feature_column as ksfc
|
||||
from tensorflow.python.keras.layers import core
|
||||
from tensorflow.python.keras.saving import model_config
|
||||
from tensorflow.python.keras.saving import save
|
||||
@ -190,7 +191,7 @@ class TestSaveModel(test.TestCase, parameterized.TestCase):
|
||||
shape=(None, 1), sparse=True, name='b', dtype='string')
|
||||
}
|
||||
|
||||
fc_layer, _ = feature_column_lib.SequenceFeatures(cols)(input_layers)
|
||||
fc_layer, _ = ksfc.SequenceFeatures(cols)(input_layers)
|
||||
# TODO(tibell): Figure out the right dtype and apply masking.
|
||||
# sequence_length_mask = array_ops.sequence_mask(sequence_length)
|
||||
# x = keras.layers.GRU(32)(fc_layer, mask=sequence_length_mask)
|
||||
|
@ -965,33 +965,34 @@ class MetricTest(test.TestCase, parameterized.TestCase):
|
||||
num_tensor_args,
|
||||
shape=(1, 5),
|
||||
test_sample_weight=True):
|
||||
tf_save.save(metric, save_dir)
|
||||
loaded = keras_load.load(save_dir)
|
||||
self.evaluate([v.initializer for v in loaded.variables])
|
||||
self.assertEqual(metric.name, loaded.name)
|
||||
self.assertEqual(metric.dtype, loaded.dtype)
|
||||
with self.cached_session():
|
||||
tf_save.save(metric, save_dir)
|
||||
loaded = keras_load.load(save_dir)
|
||||
self.evaluate([v.initializer for v in loaded.variables])
|
||||
self.assertEqual(metric.name, loaded.name)
|
||||
self.assertEqual(metric.dtype, loaded.dtype)
|
||||
|
||||
inputs = self.generate_inputs(num_tensor_args, shape)
|
||||
actual = self.evaluate(metric(*inputs))
|
||||
self.assertAllClose(actual, loaded(*inputs))
|
||||
self.assertAllClose(metric.variables, loaded.variables)
|
||||
|
||||
# Test with separate calls to update state and result.
|
||||
inputs = self.generate_inputs(num_tensor_args, shape)
|
||||
self.evaluate(metric.update_state(*inputs))
|
||||
self.evaluate(loaded.update_state(*inputs))
|
||||
actual = self.evaluate(metric.result())
|
||||
self.assertAllClose(actual, loaded.result())
|
||||
|
||||
if test_sample_weight:
|
||||
# Test with sample weights input.
|
||||
inputs = self.generate_inputs(num_tensor_args, shape)
|
||||
sample_weight = self.generate_inputs(1, [])[0]
|
||||
inputs.append(sample_weight)
|
||||
|
||||
actual = self.evaluate(metric(*inputs))
|
||||
self.assertAllClose(actual, loaded(*inputs))
|
||||
return loaded
|
||||
self.assertAllClose(metric.variables, loaded.variables)
|
||||
|
||||
# Test with separate calls to update state and result.
|
||||
inputs = self.generate_inputs(num_tensor_args, shape)
|
||||
self.evaluate(metric.update_state(*inputs))
|
||||
self.evaluate(loaded.update_state(*inputs))
|
||||
actual = self.evaluate(metric.result())
|
||||
self.assertAllClose(actual, loaded.result())
|
||||
|
||||
if test_sample_weight:
|
||||
# Test with sample weights input.
|
||||
inputs = self.generate_inputs(num_tensor_args, shape)
|
||||
sample_weight = self.generate_inputs(1, [])[0]
|
||||
inputs.append(sample_weight)
|
||||
|
||||
actual = self.evaluate(metric(*inputs))
|
||||
self.assertAllClose(actual, loaded(*inputs))
|
||||
return loaded
|
||||
|
||||
@parameterized.named_parameters([
|
||||
('mean', keras.metrics.Mean, 1, (1, 5)),
|
||||
|
Loading…
x
Reference in New Issue
Block a user