Add v1 to the names of Keras files that are legacy-training-loop-specific.

PiperOrigin-RevId: 331851473
Change-Id: I18dfafc0e30627bb328ce945d822d8be80672e3e
This commit is contained in:
Tomer Kaftan 2020-09-15 14:11:21 -07:00 committed by TensorFlower Gardener
parent 67548eff59
commit e8384561f1
8 changed files with 25 additions and 25 deletions

View File

@ -33,10 +33,10 @@ py_library(
"saving.py", "saving.py",
"sequential.py", "sequential.py",
"training.py", "training.py",
"training_arrays.py", "training_arrays_v1.py",
"training_distributed.py", "training_distributed_v1.py",
"training_eager.py", "training_eager_v1.py",
"training_generator.py", "training_generator_v1.py",
"training_utils.py", "training_utils.py",
"training_v1.py", "training_v1.py",
], ],

View File

@ -31,7 +31,7 @@ from tensorflow.python.framework import errors
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import sparse_tensor
from tensorflow.python.keras import backend as K from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import training_generator from tensorflow.python.keras.engine import training_generator_v1
from tensorflow.python.keras.engine.base_layer import Layer from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import sparse_ops
@ -175,7 +175,7 @@ class CombinerPreprocessingLayer(PreprocessingLayer):
next_data = self._get_dataset_iterator( next_data = self._get_dataset_iterator(
dataset_ops.Dataset.from_tensor_slices(data).batch(512)) dataset_ops.Dataset.from_tensor_slices(data).batch(512))
else: else:
generator, _ = training_generator.convert_to_generator_like( generator, _ = training_generator_v1.convert_to_generator_like(
data, batch_size=512) data, batch_size=512)
# If the data is not a dataset, we can iterate over it using next(foo); # If the data is not a dataset, we can iterate over it using next(foo);
# here, we wrap that into a callable. # here, we wrap that into a callable.

View File

@ -33,7 +33,7 @@ from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks as cbks from tensorflow.python.keras import callbacks as cbks
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import partial_batch_padding_handler as padding_util from tensorflow.python.keras.engine import partial_batch_padding_handler as padding_util
from tensorflow.python.keras.engine import training_arrays from tensorflow.python.keras.engine import training_arrays_v1
from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils.generic_utils import Progbar from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.keras.utils.mode_keys import ModeKeys from tensorflow.python.keras.utils.mode_keys import ModeKeys
@ -669,7 +669,7 @@ class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
validation_steps=validation_steps, validation_steps=validation_steps,
validation_freq=validation_freq) validation_freq=validation_freq)
return training_arrays.fit_loop( return training_arrays_v1.fit_loop(
model, model,
dataset, dataset,
batch_size=batch_size, batch_size=batch_size,
@ -717,7 +717,7 @@ class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
return experimental_tpu_test_loop( return experimental_tpu_test_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks) model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
return training_arrays.test_loop( return training_arrays_v1.test_loop(
model, model,
inputs=dataset, inputs=dataset,
batch_size=batch_size, batch_size=batch_size,
@ -751,7 +751,7 @@ class DistributionSingleWorkerTrainingLoop(training_utils.TrainingLoop):
if not context.executing_eagerly(): if not context.executing_eagerly():
return experimental_tpu_predict_loop( return experimental_tpu_predict_loop(
model, dataset, verbose=verbose, steps=steps, callbacks=callbacks) model, dataset, verbose=verbose, steps=steps, callbacks=callbacks)
return training_arrays.predict_loop( return training_arrays_v1.predict_loop(
model, model,
dataset, dataset,
batch_size=batch_size, batch_size=batch_size,

View File

@ -34,7 +34,7 @@ from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training from tensorflow.python.keras.engine import training
from tensorflow.python.keras.engine import training_generator from tensorflow.python.keras.engine import training_generator_v1
from tensorflow.python.keras.optimizer_v2 import rmsprop from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import data_utils from tensorflow.python.keras.utils import data_utils
from tensorflow.python.platform import test from tensorflow.python.platform import test
@ -527,7 +527,7 @@ class TestConvertToGeneratorLike(test.TestCase, parameterized.TestCase):
isinstance(data, (dataset_ops.DatasetV2, iterator_ops.Iterator))): isinstance(data, (dataset_ops.DatasetV2, iterator_ops.Iterator))):
return return
generator, steps = training_generator.convert_to_generator_like( generator, steps = training_generator_v1.convert_to_generator_like(
data, batch_size=2, steps_per_epoch=expected_batches) data, batch_size=2, steps_per_epoch=expected_batches)
self.assertEqual(steps, expected_batches) self.assertEqual(steps, expected_batches)

View File

@ -45,10 +45,10 @@ from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import distributed_training_utils from tensorflow.python.keras.distribute import distributed_training_utils
from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import training as training_lib from tensorflow.python.keras.engine import training as training_lib
from tensorflow.python.keras.engine import training_arrays from tensorflow.python.keras.engine import training_arrays_v1
from tensorflow.python.keras.engine import training_distributed from tensorflow.python.keras.engine import training_distributed_v1
from tensorflow.python.keras.engine import training_eager from tensorflow.python.keras.engine import training_eager_v1
from tensorflow.python.keras.engine import training_generator from tensorflow.python.keras.engine import training_generator_v1
from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.optimizer_v2 import optimizer_v2 from tensorflow.python.keras.optimizer_v2 import optimizer_v2
@ -582,25 +582,25 @@ class Model(training_lib.Model):
# Case 1: distribution strategy. # Case 1: distribution strategy.
if self._distribution_strategy: if self._distribution_strategy:
if self._in_multi_worker_mode(): if self._in_multi_worker_mode():
return training_distributed.DistributionMultiWorkerTrainingLoop( return training_distributed_v1.DistributionMultiWorkerTrainingLoop(
training_distributed.DistributionSingleWorkerTrainingLoop()) training_distributed_v1.DistributionSingleWorkerTrainingLoop())
else: else:
return training_distributed.DistributionSingleWorkerTrainingLoop() return training_distributed_v1.DistributionSingleWorkerTrainingLoop()
# Case 2: generator-like. Input is Python generator, or Sequence object, # Case 2: generator-like. Input is Python generator, or Sequence object,
# or a non-distributed Dataset or iterator in eager execution. # or a non-distributed Dataset or iterator in eager execution.
if data_utils.is_generator_or_sequence(inputs): if data_utils.is_generator_or_sequence(inputs):
return training_generator.GeneratorOrSequenceTrainingLoop() return training_generator_v1.GeneratorOrSequenceTrainingLoop()
if training_utils.is_eager_dataset_or_iterator(inputs): if training_utils.is_eager_dataset_or_iterator(inputs):
return training_generator.EagerDatasetOrIteratorTrainingLoop() return training_generator_v1.EagerDatasetOrIteratorTrainingLoop()
# Case 3: Symbolic tensors or Numpy array-like. # Case 3: Symbolic tensors or Numpy array-like.
# This includes Datasets and iterators in graph mode (since they # This includes Datasets and iterators in graph mode (since they
# generate symbolic tensors). # generate symbolic tensors).
if self.run_eagerly: if self.run_eagerly:
return training_generator.GeneratorLikeTrainingLoop() return training_generator_v1.GeneratorLikeTrainingLoop()
else: else:
return training_arrays.ArrayLikeTrainingLoop() return training_arrays_v1.ArrayLikeTrainingLoop()
def fit(self, def fit(self,
x=None, x=None,
@ -1062,7 +1062,7 @@ class Model(training_lib.Model):
# for each replica by `self._distribution_strategy` and the same code path # for each replica by `self._distribution_strategy` and the same code path
# as Eager is expected to be taken. # as Eager is expected to be taken.
if self.run_eagerly or self._distribution_strategy: if self.run_eagerly or self._distribution_strategy:
output_dict = training_eager.train_on_batch( output_dict = training_eager_v1.train_on_batch(
self, self,
x, x,
y, y,
@ -1141,7 +1141,7 @@ class Model(training_lib.Model):
# If `self._distribution_strategy` is True, then we are in a replica context # If `self._distribution_strategy` is True, then we are in a replica context
# at this point. # at this point.
if self.run_eagerly or self._distribution_strategy: if self.run_eagerly or self._distribution_strategy:
output_dict = training_eager.test_on_batch( output_dict = training_eager_v1.test_on_batch(
self, self,
x, x,
y, y,