Add v1
to the names of Keras files that are legacy-training-loop-specific.
PiperOrigin-RevId: 331851473 Change-Id: I18dfafc0e30627bb328ce945d822d8be80672e3e
This commit is contained in:
parent
67548eff59
commit
e8384561f1
@ -33,10 +33,10 @@ py_library(
|
||||
"saving.py",
|
||||
"sequential.py",
|
||||
"training.py",
|
||||
"training_arrays.py",
|
||||
"training_distributed.py",
|
||||
"training_eager.py",
|
||||
"training_generator.py",
|
||||
"training_arrays_v1.py",
|
||||
"training_distributed_v1.py",
|
||||
"training_eager_v1.py",
|
||||
"training_generator_v1.py",
|
||||
"training_utils.py",
|
||||
"training_v1.py",
|
||||
],
|
||||
|
@ -31,7 +31,7 @@ from tensorflow.python.framework import errors
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import sparse_tensor
|
||||
from tensorflow.python.keras import backend as K
|
||||
from tensorflow.python.keras.engine import training_generator
|
||||
from tensorflow.python.keras.engine import training_generator_v1
|
||||
from tensorflow.python.keras.engine.base_layer import Layer
|
||||
from tensorflow.python.keras.utils import tf_utils
|
||||
from tensorflow.python.ops import sparse_ops
|
||||
@ -175,7 +175,7 @@ class CombinerPreprocessingLayer(PreprocessingLayer):
|
||||
next_data = self._get_dataset_iterator(
|
||||
dataset_ops.Dataset.from_tensor_slices(data).batch(512))
|
||||
else:
|
||||
generator, _ = training_generator.convert_to_generator_like(
|
||||
generator, _ = training_generator_v1.convert_to_generator_like(
|
||||
data, batch_size=512)
|
||||
# If the data is not a dataset, we can iterate over it using next(foo);
|
||||
# here, we wrap that into a callable.
|
||||
|
@ -33,7 +33,7 @@ from tensorflow.python.keras import backend as K
|
||||
from tensorflow.python.keras import callbacks as cbks
|
||||
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
|
||||
from tensorflow.python.keras.engine import partial_batch_padding_handler as padding_util
|
||||
from tensorflow.python.keras.engine import training_arrays
|
||||
from tensorflow.python.keras.engine import training_arrays_v1
|
||||
from tensorflow.python.keras.engine import training_utils
|
||||
from tensorflow.python.keras.utils.generic_utils import Progbar
|
||||
from tensorflow.python.keras.utils.mode_keys import ModeKeys
|
||||
@ -669,7 +669,7 @@ class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
|
||||
validation_steps=validation_steps,
|
||||
validation_freq=validation_freq)
|
||||
|
||||
return training_arrays.fit_loop(
|
||||
return training_arrays_v1.fit_loop(
|
||||
model,
|
||||
dataset,
|
||||
batch_size=batch_size,
|
||||
@ -717,7 +717,7 @@ class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
|
||||
return experimental_tpu_test_loop(
|
||||
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
|
||||
|
||||
return training_arrays.test_loop(
|
||||
return training_arrays_v1.test_loop(
|
||||
model,
|
||||
inputs=dataset,
|
||||
batch_size=batch_size,
|
||||
@ -751,7 +751,7 @@ class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
|
||||
if not context.executing_eagerly():
|
||||
return experimental_tpu_predict_loop(
|
||||
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
|
||||
return training_arrays.predict_loop(
|
||||
return training_arrays_v1.predict_loop(
|
||||
model,
|
||||
dataset,
|
||||
batch_size=batch_size,
|
@ -34,7 +34,7 @@ from tensorflow.python.keras import metrics as metrics_module
|
||||
from tensorflow.python.keras import testing_utils
|
||||
from tensorflow.python.keras.engine import input_layer
|
||||
from tensorflow.python.keras.engine import training
|
||||
from tensorflow.python.keras.engine import training_generator
|
||||
from tensorflow.python.keras.engine import training_generator_v1
|
||||
from tensorflow.python.keras.optimizer_v2 import rmsprop
|
||||
from tensorflow.python.keras.utils import data_utils
|
||||
from tensorflow.python.platform import test
|
||||
@ -527,7 +527,7 @@ class TestConvertToGeneratorLike(test.TestCase, parameterized.TestCase):
|
||||
isinstance(data, (dataset_ops.DatasetV2, iterator_ops.Iterator))):
|
||||
return
|
||||
|
||||
generator, steps = training_generator.convert_to_generator_like(
|
||||
generator, steps = training_generator_v1.convert_to_generator_like(
|
||||
data, batch_size=2, steps_per_epoch=expected_batches)
|
||||
self.assertEqual(steps, expected_batches)
|
||||
|
||||
|
@ -45,10 +45,10 @@ from tensorflow.python.keras import optimizers
|
||||
from tensorflow.python.keras.distribute import distributed_training_utils
|
||||
from tensorflow.python.keras.engine import base_layer
|
||||
from tensorflow.python.keras.engine import training as training_lib
|
||||
from tensorflow.python.keras.engine import training_arrays
|
||||
from tensorflow.python.keras.engine import training_distributed
|
||||
from tensorflow.python.keras.engine import training_eager
|
||||
from tensorflow.python.keras.engine import training_generator
|
||||
from tensorflow.python.keras.engine import training_arrays_v1
|
||||
from tensorflow.python.keras.engine import training_distributed_v1
|
||||
from tensorflow.python.keras.engine import training_eager_v1
|
||||
from tensorflow.python.keras.engine import training_generator_v1
|
||||
from tensorflow.python.keras.engine import training_utils
|
||||
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
|
||||
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
|
||||
@ -582,25 +582,25 @@ class Model(training_lib.Model):
|
||||
# Case 1: distribution strategy.
|
||||
if self._distribution_strategy:
|
||||
if self._in_multi_worker_mode():
|
||||
return training_distributed.DistributionMultiWorkerTrainingLoop(
|
||||
training_distributed.DistributionSingleWorkerTrainingLoop())
|
||||
return training_distributed_v1.DistributionMultiWorkerTrainingLoop(
|
||||
training_distributed_v1.DistributionSingleWorkerTrainingLoop())
|
||||
else:
|
||||
return training_distributed.DistributionSingleWorkerTrainingLoop()
|
||||
return training_distributed_v1.DistributionSingleWorkerTrainingLoop()
|
||||
|
||||
# Case 2: generator-like. Input is Python generator, or Sequence object,
|
||||
# or a non-distributed Dataset or iterator in eager execution.
|
||||
if data_utils.is_generator_or_sequence(inputs):
|
||||
return training_generator.GeneratorOrSequenceTrainingLoop()
|
||||
return training_generator_v1.GeneratorOrSequenceTrainingLoop()
|
||||
if training_utils.is_eager_dataset_or_iterator(inputs):
|
||||
return training_generator.EagerDatasetOrIteratorTrainingLoop()
|
||||
return training_generator_v1.EagerDatasetOrIteratorTrainingLoop()
|
||||
|
||||
# Case 3: Symbolic tensors or Numpy array-like.
|
||||
# This includes Datasets and iterators in graph mode (since they
|
||||
# generate symbolic tensors).
|
||||
if self.run_eagerly:
|
||||
return training_generator.GeneratorLikeTrainingLoop()
|
||||
return training_generator_v1.GeneratorLikeTrainingLoop()
|
||||
else:
|
||||
return training_arrays.ArrayLikeTrainingLoop()
|
||||
return training_arrays_v1.ArrayLikeTrainingLoop()
|
||||
|
||||
def fit(self,
|
||||
x=None,
|
||||
@ -1062,7 +1062,7 @@ class Model(training_lib.Model):
|
||||
# for each replica by `self._distribution_strategy` and the same code path
|
||||
# as Eager is expected to be taken.
|
||||
if self.run_eagerly or self._distribution_strategy:
|
||||
output_dict = training_eager.train_on_batch(
|
||||
output_dict = training_eager_v1.train_on_batch(
|
||||
self,
|
||||
x,
|
||||
y,
|
||||
@ -1141,7 +1141,7 @@ class Model(training_lib.Model):
|
||||
# If `self._distribution_strategy` is True, then we are in a replica context
|
||||
# at this point.
|
||||
if self.run_eagerly or self._distribution_strategy:
|
||||
output_dict = training_eager.test_on_batch(
|
||||
output_dict = training_eager_v1.test_on_batch(
|
||||
self,
|
||||
x,
|
||||
y,
|
||||
|
Loading…
Reference in New Issue
Block a user