Branch contrib/distribute/python/combinations.py to python/distribute/combinations.py and python/distribute/strategy_combinations.py with replaced dependencies. Branch keras_test.

PiperOrigin-RevId: 239438725
This commit is contained in:
Rick Chao 2019-03-20 11:27:27 -07:00 committed by TensorFlower Gardener
parent 41e7b3ca0a
commit 41dac366fb
36 changed files with 763 additions and 620 deletions

View File

@ -19,12 +19,13 @@ py_library(
name = "distribute_test_lib_pip",
visibility = ["//tensorflow:internal"],
deps = [
":combinations",
":keras_correctness_test_lib",
":keras_test_lib",
":multi_worker_test_base",
":single_loss_example",
":strategy_test_lib",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
],
)
@ -33,8 +34,8 @@ distribute_py_test(
srcs = ["values_test.py"],
main = "values_test.py",
deps = [
":combinations",
":mirrored_strategy",
":parameter_server_strategy",
"//tensorflow/core:protos_all_py",
"//tensorflow/python:array_ops",
"//tensorflow/python:constant_op",
@ -42,7 +43,9 @@ distribute_py_test(
"//tensorflow/python:framework_test_lib",
"//tensorflow/python:training",
"//tensorflow/python:variable_scope",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:device_util",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/distribute:values",
"//tensorflow/python/eager:context",
"//tensorflow/python/eager:test",
@ -55,7 +58,8 @@ cuda_py_test(
name = "input_lib_test",
srcs = ["input_lib_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":mirrored_strategy",
":multi_worker_test_base",
"@absl_py//absl/testing:parameterized",
@ -95,7 +99,8 @@ cuda_py_test(
name = "parameter_server_strategy_test",
srcs = ["parameter_server_strategy_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":multi_worker_test_base",
":parameter_server_strategy",
":strategy_test_lib",
@ -137,7 +142,8 @@ cuda_py_test(
srcs = ["one_device_strategy_test.py"],
additional_deps = [
":strategy_test_lib",
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test",
],
)
@ -182,42 +188,13 @@ py_library(
],
)
py_library(
name = "combinations",
srcs = ["combinations.py"],
srcs_version = "PY2AND3",
deps = [
":mirrored_strategy",
":one_device_strategy",
":parameter_server_strategy",
":tpu_strategy",
"//tensorflow/contrib/cluster_resolver:cluster_resolver_pip",
"//tensorflow/contrib/optimizer_v2:training",
"//tensorflow/python:framework_ops",
"//tensorflow/python:training",
"//tensorflow/python:util",
"//tensorflow/python/distribute:distribute_lib",
"//tensorflow/python/eager:context",
"//tensorflow/python/keras/optimizer_v2",
"@absl_py//absl/testing:parameterized",
],
)
py_test(
name = "combinations_test",
srcs = ["combinations_test.py"],
deps = [
":combinations",
"//tensorflow/python/eager:test",
],
)
# TODO(priyag): Rename this test to mirrored_strategy_test
cuda_py_test(
name = "mirrored_strategy_multigpu_test",
srcs = ["mirrored_strategy_multigpu_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":mirrored_strategy",
":multi_worker_test_base",
":strategy_test_lib",
@ -260,7 +237,8 @@ cuda_py_test(
srcs = ["keras_multi_worker_test.py"],
additional_deps = [
"//tensorflow/contrib/distribute/python:collective_all_reduce_strategy",
"//tensorflow/contrib/distribute/python:combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/contrib/distribute/python:mirrored_strategy",
"//tensorflow/contrib/distribute/python:multi_worker_test_base",
"//tensorflow/contrib/distribute/python:parameter_server_strategy",
@ -307,7 +285,8 @@ cuda_py_test(
srcs = ["collective_all_reduce_strategy_test.py"],
additional_deps = [
":collective_all_reduce_strategy",
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":multi_worker_test_base",
":strategy_test_lib",
"@absl_py//absl/testing:parameterized",
@ -340,7 +319,6 @@ distribute_py_test(
"multi_and_single_gpu",
],
deps = [
":combinations",
":mirrored_strategy",
":single_loss_example",
"//tensorflow/contrib/tpu:tpu_lib",
@ -350,6 +328,8 @@ distribute_py_test(
"//tensorflow/python:variable_scope",
"//tensorflow/python:variables",
"//tensorflow/python/data/ops:dataset_ops",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:context",
"//tensorflow/python/eager:test",
"//tensorflow/python/ops/losses",
@ -362,7 +342,8 @@ cuda_py_test(
name = "moving_averages_test",
srcs = ["moving_averages_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"@absl_py//absl/testing:parameterized",
"//tensorflow/python/eager:test",
"//tensorflow/python:array_ops",
@ -377,14 +358,17 @@ cuda_py_test(
name = "optimizer_v2_test",
srcs = ["optimizer_v2_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":single_loss_example",
":mirrored_strategy",
"@absl_py//absl/testing:parameterized",
"//third_party/py/numpy",
"//tensorflow/python:control_flow_ops",
"//tensorflow/python:variables",
"//tensorflow/python/eager:context",
"//tensorflow/python/eager:test",
"//tensorflow/contrib/optimizer_v2:training",
],
tags = [
"multi_and_single_gpu",
@ -395,7 +379,8 @@ cuda_py_test(
name = "estimator_integration_test",
srcs = ["estimator_integration_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"@absl_py//absl/testing:parameterized",
"//third_party/py/numpy",
"//tensorflow/contrib/optimizer_v2:training",
@ -433,7 +418,8 @@ cuda_py_test(
srcs = ["estimator_training_test.py"],
additional_deps = [
":collective_all_reduce_strategy",
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":mirrored_strategy",
":multi_worker_test_base",
":parameter_server_strategy",
@ -482,10 +468,11 @@ distribute_py_test(
"multi_and_single_gpu",
],
deps = [
":combinations",
":single_loss_example",
"//tensorflow/contrib/tpu:tpu_lib",
"//tensorflow/python:variables",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:context",
"//tensorflow/python/eager:test",
"//third_party/py/numpy",
@ -507,7 +494,8 @@ cuda_py_test(
name = "monitor_test",
srcs = ["monitor_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":monitor",
":single_loss_example",
"@absl_py//absl/testing:parameterized",
@ -527,7 +515,8 @@ cuda_py_test(
name = "cross_device_utils_test",
srcs = ["cross_device_utils_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"@absl_py//absl/testing:parameterized",
"//tensorflow/python:constant_op",
"//tensorflow/python:framework_ops",
@ -543,7 +532,8 @@ cuda_py_test(
srcs = ["cross_device_ops_test.py"],
additional_deps = [
":collective_all_reduce_strategy",
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":multi_worker_test_base",
":mirrored_strategy",
"@absl_py//absl/testing:parameterized",
@ -565,15 +555,16 @@ py_library(
name = "keras_test_lib",
srcs = [
"keras_backward_compat_test.py",
"keras_test.py",
"keras_utils_test.py",
],
deps = [
":combinations",
":parameter_server_strategy",
"//tensorflow/contrib/distribute/python:mirrored_strategy",
"//tensorflow/contrib/distribute/python:tpu_strategy",
"//tensorflow/python:client_testlib",
"//tensorflow/python:training",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test",
"//tensorflow/python/estimator:estimator_py",
"//tensorflow/python/keras",
@ -582,23 +573,6 @@ py_library(
],
)
distribute_py_test(
name = "keras_test",
srcs = ["keras_test.py"],
full_precision = True,
main = "keras_test.py",
shard_count = 32,
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/117919883)
"no_windows_gpu",
"notsan",
],
deps = [
":keras_test_lib",
],
)
distribute_py_test(
name = "keras_utils_test",
srcs = ["keras_utils_test.py"],
@ -611,8 +585,8 @@ distribute_py_test(
"notsan",
],
deps = [
":keras_test",
":keras_test_lib",
"//tensorflow/python/keras:distribute_strategy_test",
],
)
@ -644,11 +618,12 @@ py_library(
"keras_stateful_lstm_model_correctness_test.py",
],
deps = [
":combinations",
"//tensorflow/contrib/distribute/python:mirrored_strategy",
"//tensorflow/contrib/distribute/python:tpu_strategy",
"//tensorflow/python:client_testlib",
"//tensorflow/python:training",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test",
"//tensorflow/python/estimator:estimator_py",
"//tensorflow/python/keras",
@ -761,11 +736,13 @@ distribute_py_test(
"multi_and_single_gpu",
],
deps = [
":combinations",
":tpu_strategy",
"//tensorflow/python:math_ops",
"//tensorflow/python:metrics",
"//tensorflow/python:variables",
"//tensorflow/python/data/ops:dataset_ops",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test",
"@absl_py//absl/testing:parameterized",
],
@ -776,7 +753,8 @@ cuda_py_test(
size = "medium",
srcs = ["warm_starting_util_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_ops",
"//tensorflow/python:training",
@ -793,7 +771,8 @@ cuda_py_test(
size = "medium",
srcs = ["checkpoint_utils_test.py"],
additional_deps = [
":combinations",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python:client_testlib",
"//tensorflow/python:framework_ops",
"//tensorflow/python:training",

View File

@ -28,7 +28,8 @@ from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
@ -62,15 +63,16 @@ class CheckpointUtilsWithDistributionStrategyTest(
v1, v2 = _create_checkpoints(session, checkpoint_dir)
return checkpoint_dir, v1, v2
@combinations.generate(combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
in_replica_mode=[True, False],
mode=["graph"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
in_replica_mode=[True, False],
mode=["graph"]))
def testInitFromCheckpoint(self, distribution, in_replica_mode):
checkpoint_dir, v1_value, v2_value = self._get_test_object()
@ -98,11 +100,10 @@ class CheckpointUtilsWithDistributionStrategyTest(
@combinations.generate(
combinations.combine(
distribution=[
combinations.default_strategy, combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
in_replica_mode=[True, False],
mode=["graph"]))

View File

@ -22,7 +22,6 @@ from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2
@ -30,6 +29,7 @@ from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import collective_all_reduce_strategy as core_collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import distribute_lib

View File

@ -24,14 +24,15 @@ from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import test
@ -246,9 +247,8 @@ class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
# strategy.
reduction_to_one_combinations = combinations.combine(
cross_device_ops=[
combinations.NamedObject(
"DefaultReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject("DefaultReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject(
"ReductionToCPUDeviceCrossDeviceOps",
cross_device_ops_lib.ReductionToOneDevice(
@ -259,11 +259,9 @@ class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
accumulation_fn=math_ops.accumulate_n)),
],
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph", "eager"])
allreduce_combinations = combinations.combine(
@ -285,8 +283,7 @@ class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
"hierarchical_copy", 0, 100, 10))
],
distribution=[
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_two_gpus
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph", "eager"])

View File

@ -20,7 +20,7 @@ from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import values as value_lib

View File

@ -22,10 +22,10 @@ import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import training
@ -60,11 +60,9 @@ class DNNLinearCombinedClassifierIntegrationTest(test.TestCase,
combinations.combine(
mode=['graph'],
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
use_train_and_evaluate=[True, False]))
def test_complete_flow_with_mode(self, distribution, use_train_and_evaluate):

View File

@ -28,12 +28,12 @@ from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import estimator_training as dc_training

View File

@ -19,10 +19,9 @@ from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import values

View File

@ -19,13 +19,13 @@ from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
@ -290,16 +290,16 @@ def get_correctness_test_inputs(use_numpy, use_validation_data,
strategies_minus_tpu = [
combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus]
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
]
tpu_strategies = [
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step]
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step
]
def strategy_minus_tpu_combinations():
@ -322,14 +322,14 @@ def strategy_and_optimizer_combinations():
return combinations.times(
all_strategy_combinations(),
combinations.combine(optimizer=[
combinations.adagrad_optimizer_v1_fn,
combinations.adagrad_optimizer_keras_v2_fn,
combinations.adam_optimizer_v1_fn,
combinations.adam_optimizer_keras_v2_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_keras_v2_fn,
combinations.rmsprop_optimizer_v1_fn,
combinations.rmsprop_optimizer_keras_v2_fn
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
]))
@ -532,11 +532,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
model = multi_input_output_model()
@ -617,11 +618,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_wrong_input_shape(self, distribution):
@ -643,9 +645,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_no_batch_input_validation(self, distribution):
@ -665,9 +670,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
model = get_model()
@ -684,13 +690,13 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
@ -761,11 +767,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution):
with self.cached_session():
model = get_model()
@ -815,11 +822,12 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
'`steps` argument'):
model.predict(dataset, verbose=0)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution):
with self.cached_session():
model = get_model()
@ -852,11 +860,12 @@ class TestDistributionStrategyWithLossMasking(test.TestCase,
# TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype.
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_masking(self, distribution):
with self.cached_session():
np.random.seed(1337)

View File

@ -21,13 +21,13 @@ import functools
from absl.testing import parameterized
import numpy as np
import six
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed
@ -42,14 +42,12 @@ _GLOBAL_BATCH_SIZE = 64
all_strategies = [
combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus,
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step,
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step,
]
@ -92,8 +90,10 @@ def test_combinations_for_embedding_model():
def test_combinations_with_tpu_strategies():
tpu_strategies = [combinations.tpu_strategy,
combinations.tpu_strategy_one_step]
tpu_strategies = [
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_one_step
]
return (
combinations.times(

View File

@ -18,11 +18,10 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.training import gradient_descent

View File

@ -18,10 +18,9 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test
from tensorflow.python.training import gradient_descent

View File

@ -18,10 +18,9 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test
from tensorflow.python.keras.optimizer_v2 import gradient_descent

View File

@ -18,10 +18,9 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test
from tensorflow.python.training import gradient_descent

View File

@ -32,12 +32,12 @@ from absl.testing import parameterized
# pylint: disable=g-direct-tensorflow-import
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy as collective_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base as test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.eager import context

View File

@ -20,9 +20,9 @@ from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
@ -36,6 +36,16 @@ from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# TODO(rchao): Merge parameter_server_strategy_with_two_gpus into
# third_party/tensorflow/python/distribute/strategy_combinations.py
# pylint: disable=g-long-lambda
parameter_server_strategy_with_two_gpus = combinations.NamedDistribution(
'ParameterServer2GPUs',
lambda: parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2),
required_gpus=2)
def get_model():
x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x)
@ -48,9 +58,7 @@ class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus,
combinations.parameter_server_strategy_with_two_gpus,
parameter_server_strategy_with_two_gpus,
],
mode=['graph', 'eager']))
def testKerasOptimizerWithUnequalInput(self, distribution):
@ -106,8 +114,7 @@ class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.parameter_server_strategy_with_two_gpus,
parameter_server_strategy_with_two_gpus,
],
mode=['graph', 'eager']))
def testOptimizerWithKerasModelAndNumpyArrays(self, distribution):

View File

@ -18,10 +18,10 @@ from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.training import gradient_descent
@ -29,8 +29,10 @@ from tensorflow.python.training import gradient_descent
def strategies_for_stateful_embedding_model():
"""Returns TPUStrategy with single core device assignment."""
return [combinations.tpu_strategy_one_core,
combinations.tpu_strategy_one_step_one_core]
return [
strategy_combinations.tpu_strategy_one_core,
strategy_combinations.tpu_strategy_one_step_one_core
]
def test_combinations_for_stateful_embedding_model():

View File

@ -22,16 +22,16 @@ import collections
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_test as keras_test_lib
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras import distribute_strategy_test as keras_test_lib
from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import rmsprop as rms_prop_keras
from tensorflow.python.ops import math_ops
@ -166,8 +166,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_shape_mismatch(
@ -192,8 +191,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_dtype_mismatch(
@ -218,8 +216,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_unsupported_features(self, distribution):
@ -281,8 +278,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution):
@ -321,7 +317,8 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[combinations.one_device_strategy], mode=['graph']))
distribution=[strategy_combinations.one_device_strategy],
mode=['graph']))
def test_distribution_strategy_with_add_metric_add_loss(self, distribution):
with distribution.scope():
x = keras.layers.Input(shape=(1,))
@ -347,7 +344,8 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[combinations.one_device_strategy], mode=['eager']))
distribution=[strategy_combinations.one_device_strategy],
mode=['eager']))
def test_distribution_strategy_with_run_eagerly(self, distribution):
with distribution.scope():
x = keras.layers.Input(shape=(1,))
@ -364,8 +362,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_distribution_strategy_on_subclassed_model(self, distribution):
@ -393,8 +390,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_distribution_strategy_on_deferred_sequential_model(
@ -421,8 +417,7 @@ class TestDistributionStrategyWithLossMasking(test.TestCase,
@combinations.generate(
combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=['graph', 'eager']))
def test_masking(self, distribution):

View File

@ -18,10 +18,10 @@ from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
@ -74,19 +74,22 @@ def _regression_dataset_fn():
def all_combinations():
return combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph"])
def tpu_combinations():
return combinations.combine(distribution=[combinations.tpu_strategy_one_step,
combinations.tpu_strategy],
mode=["graph"])
return combinations.combine(
distribution=[
strategy_combinations.tpu_strategy_one_step,
strategy_combinations.tpu_strategy
],
mode=["graph"])
# TODO(josh11b): Test metrics.recall_at_top_k, metrics.average_precision_at_k,

View File

@ -20,12 +20,12 @@ from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python.single_loss_example import batchnorm_example
from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
@ -48,12 +48,12 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"],
use_callable_loss=[True, False]))
def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss):
@ -90,7 +90,7 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])))
def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn,
@ -124,13 +124,11 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers() +
combinations.distributions_and_v2_optimizers(),
combinations.combine(mode=["graph", "eager"])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1+combinations.optimizers_v2,
mode=["graph"]))
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph", "eager"])) + combinations.combine(
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"]))
def testOptimizerInsideModelFn(self, distribution, optimizer_fn):
created_variables = []
trainable_variables = []
@ -195,15 +193,15 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
combinations.times(
combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]),
combinations.times(
combinations.distributions_and_v1_optimizers(),
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(
mode=["graph", "eager"],
# TODO(isaprykin): Allow False here. Currently subsequent
# replicas will re-execute UPDATE_OPS of previous replicas.
update_ops_in_cross_replica_mode=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"],
update_ops_in_cross_replica_mode=[False])))
def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum,
@ -262,8 +260,7 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
combinations.times(
combinations.combine(
optimizer_fn=[
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn
strategy_combinations.gradient_descent_optimizer_v1_fn,
],
loss_reduction=[
losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN,
@ -271,19 +268,16 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS
]),
combinations.times(
combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
]),
combinations.combine(distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
]),
combinations.combine(
mode=["graph"], use_callable_loss=[True, False]) +
combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
distribution=[strategy_combinations.tpu_strategy],
mode=["graph"],
use_callable_loss=[True, False])))
def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,
@ -361,14 +355,13 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph", "eager"]),
combinations.combine(is_tpu=[False])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
is_tpu=[True]))
combinations.combine(is_tpu=[False])) + combinations.combine(
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"],
is_tpu=[True]))
def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu):
with distribution.scope():
def dataset_fn():

View File

@ -23,17 +23,17 @@ import sys
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
@ -62,13 +62,13 @@ from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
@ -171,10 +171,9 @@ class MirroredTwoDeviceDistributionTest(
def one_device_combinations():
return combinations.combine(
distribution=[
combinations.mirrored_strategy_with_one_cpu,
combinations.mirrored_strategy_with_one_gpu,
combinations.core_mirrored_strategy_with_one_cpu,
combinations.core_mirrored_strategy_with_one_gpu],
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph", "eager"])
@ -221,10 +220,12 @@ class MirroredOneDeviceDistributionTest(
class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn():
replica_id_str = str(self.evaluate(_replica_id()))
@ -253,11 +254,13 @@ class MirroredStrategyVariableCreatorStackTest(
expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
@ -304,11 +307,13 @@ class MirroredStrategyCallForEachReplicaTest(test.TestCase):
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyVariableCreationTest(test.TestCase):
# TODO(priyag): Modify more tests to use this helper and check more
@ -794,11 +799,12 @@ class MirroredStrategyVariableCreationTest(test.TestCase):
self.assertIs(distribution, sync_on_read.distribute_strategy)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
@ -965,11 +971,12 @@ class MirroredThreeDeviceDistributionTest(
self.assertEqual("foo:0", result.name)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in replica and cross replica context.
@ -1184,11 +1191,12 @@ class MirroredVariableUpdateTest(test.TestCase):
self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
def testAssignMirroredVarInitializer(self, distribution):
@ -1227,11 +1235,12 @@ class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
@ -1317,11 +1326,12 @@ class MiniModel(keras_training.Model):
return self.fc(inputs)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase):
def _call_and_check(self, distribution, model_fn, inputs, expected_result,

View File

@ -20,12 +20,12 @@ from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import monitor as monitor_lib
from tensorflow.contrib.distribute.python.single_loss_example import single_loss_example
from tensorflow.python.client import session
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
@ -36,8 +36,9 @@ class MonitorTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=combinations.graph_and_eager_modes)))
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(
mode=strategy_combinations.graph_and_eager_modes)))
def testTrainNetwork(self, distribution, optimizer_fn):
with distribution.scope():
single_loss_step, layer = single_loss_example(optimizer_fn, distribution)

View File

@ -20,7 +20,8 @@ from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
@ -30,10 +31,11 @@ from tensorflow.python.training import moving_averages
all_combinations = combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"])

View File

@ -17,19 +17,21 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import test
@combinations.generate(combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.one_device_strategy_gpu],
mode=["eager", "graph"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu
],
mode=["eager", "graph"]))
class OneDeviceStrategyTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase):

View File

@ -20,20 +20,53 @@ from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
# pylint: disable=g-long-lambda
gradient_descent_optimizer_v2_fn = combinations.NamedObject(
"GradientDescentV2", lambda: gradient_descent_v2.GradientDescentOptimizer(
0.2))
adagrad_optimizer_v2_fn = combinations.NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)
class MinimizeLossOptimizerV2Test(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v2_optimizers(),
distributions_and_v2_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])))
def testTrainNetwork(self, distribution, optimizer_fn,

View File

@ -21,13 +21,12 @@ from __future__ import print_function
import copy
import threading
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context

View File

@ -20,9 +20,9 @@ from __future__ import print_function
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python.single_loss_example import single_loss_example
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.ops import variables
@ -32,14 +32,14 @@ class SingleLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=combinations.graph_and_eager_modes),
combinations.combine(is_tpu=[False])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
is_tpu=[True]))
strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(
mode=strategy_combinations.graph_and_eager_modes),
combinations.combine(is_tpu=[False])) + combinations.combine(
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"],
is_tpu=[True]))
def testTrainNetwork(self, distribution, optimizer_fn, is_tpu):
with distribution.scope():
single_loss_step, layer = single_loss_example(

View File

@ -20,10 +20,11 @@ from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import test
@ -38,6 +39,16 @@ from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import saver as saver_lib
# TODO(rchao): Merge parameter_server_strategy_with_two_gpus into
# third_party/tensorflow/python/distribute/strategy_combinations.py
# pylint: disable=g-long-lambda
parameter_server_strategy_with_two_gpus = combinations.NamedDistribution(
"ParameterServer2GPUs",
lambda: parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2),
required_gpus=2)
class DistributedValuesTest(test.TestCase):
def testGetEager(self):
@ -495,11 +506,12 @@ class MirroredVariableTest(test.TestCase, parameterized.TestCase):
save_path = self._save_normal()
self._restore_mirrored(save_path)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_one_gpu,
combinations.core_mirrored_strategy_with_one_gpu],
mode=["graph"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_gpu,
],
mode=["graph"]))
def testFetchAMirroredVariable(self, distribution):
with self.session(graph=ops.Graph()) as sess, distribution.scope():
with ops.device("/device:GPU:0"):
@ -511,14 +523,14 @@ class MirroredVariableTest(test.TestCase, parameterized.TestCase):
sess.run(variables_lib.global_variables_initializer())
sess.run({"complicated": mirrored})
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_one_cpu,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.tpu_strategy,
],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
],
mode=["graph", "eager"]))
def testAssignOutOfScope_mirrored(self, distribution):
with distribution.scope():
mirrored = variables_lib.Variable(1.)
@ -529,9 +541,10 @@ class MirroredVariableTest(test.TestCase, parameterized.TestCase):
for component in mirrored.values:
self.assertEqual(self.evaluate(component.read_value()), 3.)
@combinations.generate(combinations.combine(
distribution=[combinations.parameter_server_strategy_with_two_gpus],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[parameter_server_strategy_with_two_gpus],
mode=["graph", "eager"]))
def testAssignOutOfScope_aggregating(self, distribution):
with distribution.scope():
aggregating = variables_lib.Variable(1.)
@ -600,11 +613,12 @@ class SyncOnReadVariablePropertiesTest(test.TestCase):
self.assertEqual(converted.dtype, replica_local.dtype)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"]))
class SyncOnReadVariableTest(test.TestCase, parameterized.TestCase):
def _assign_replica_local(self, devices, v, new):

View File

@ -28,7 +28,8 @@ from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
@ -40,16 +41,17 @@ from tensorflow.python.training import warm_starting_util as ws_util
class WarmStartingUtilWithDistributionStrategyTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
save_with_distribution=[True, False],
restore_with_distribution=[True, False],
mode=["graph"]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
save_with_distribution=[True, False],
restore_with_distribution=[True, False],
mode=["graph"]))
def testWarmStart(self, distribution, save_with_distribution,
restore_with_distribution):

View File

@ -496,3 +496,45 @@ py_library(
"@six_archive//:six",
],
)
py_library(
name = "combinations",
srcs = ["combinations.py"],
srcs_version = "PY2AND3",
deps = [
"//tensorflow/python:framework_ops",
"//tensorflow/python:util",
"//tensorflow/python/eager:context",
"@absl_py//absl/testing:parameterized",
],
)
py_library(
name = "strategy_combinations",
srcs = ["strategy_combinations.py"],
srcs_version = "PY2AND3",
deps = [
":combinations",
":distribute_lib",
":mirrored_strategy",
":one_device_strategy",
":parameter_server_strategy",
":tpu_strategy",
"//tensorflow/python:framework_ops",
"//tensorflow/python:training",
"//tensorflow/python/distribute/cluster_resolver:cluster_resolver_lib",
"//tensorflow/python/eager:context",
"//tensorflow/python/keras/optimizer_v2",
"@absl_py//absl/testing:parameterized",
],
)
py_test(
name = "combinations_test",
srcs = ["combinations_test.py"],
deps = [
":combinations",
"//tensorflow/python/eager:test",
"//tensorflow/python/keras:backend",
],
)

View File

@ -43,29 +43,12 @@ from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_keras_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_keras_v2
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import tf_inspect
@ -73,6 +56,8 @@ GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
# TODO(rchao): Rename `distribution` parameter to `strategy` or
# `distribute_strategy`
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
@ -325,143 +310,3 @@ class NamedDistribution(object):
@property
def required_tpu(self):
return self._required_tpu
def _get_tpu_strategy_creator(steps_per_run, use_single_core=False, **kwargs):
def _create_tpu_strategy():
resolver = tpu_cluster_resolver.TPUClusterResolver("")
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=device_assignment_lib.
SINGLE_CORE_ASSIGNMENT)
strategy = tpu_lib.TPUStrategy(resolver, steps_per_run=steps_per_run,
device_assignment=device_assignment,
**kwargs)
return strategy
return _create_tpu_strategy
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
one_device_strategy_gpu = NamedDistribution(
"OneDeviceGPU", lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
required_gpus=1)
tpu_strategy = NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2),
required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1),
required_tpu=True)
tpu_strategy_one_core = NamedDistribution(
"TPUOneCore", _get_tpu_strategy_creator(
steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = NamedDistribution(
"TPUOneStepOneCore", _get_tpu_strategy_creator(
steps_per_run=1, use_single_core=True),
required_tpu=True)
mirrored_strategy_with_one_cpu = NamedDistribution(
"Mirrored1CPU",
lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
core_mirrored_strategy_with_one_cpu = NamedDistribution(
"CoreMirrored1CPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/cpu:0"]))
core_mirrored_strategy_with_one_gpu = NamedDistribution(
"CoreMirrored1GPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"CoreMirroredCPUAndGPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_two_gpus = NamedDistribution(
"CoreMirrored2GPUs",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
parameter_server_strategy_with_two_gpus = NamedDistribution(
"ParameterServer2GPUs",
lambda: parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2),
required_gpus=2)
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = NamedObject("AdamV1",
lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1.0))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
gradient_descent_optimizer_keras_v2_fn = NamedObject(
"GradientDescentKerasV2",
lambda: gradient_descent_keras_v2.SGD(0.2))
adagrad_optimizer_keras_v2_fn = NamedObject(
"AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = NamedObject(
"AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
rmsprop_optimizer_keras_v2_fn = NamedObject(
"RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)

View File

@ -19,9 +19,10 @@ from __future__ import division
from __future__ import print_function
from collections import OrderedDict
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test
@ -70,8 +71,7 @@ class TestingCombinationsTest(test.TestCase):
}, {
"b": 3
}],
combinations.combine(a=[1, 2]) +
combinations.combine(b=[2, 3]))
combinations.combine(a=[1, 2]) + combinations.combine(b=[2, 3]))
def test_times(self):
c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"])

View File

@ -0,0 +1,165 @@
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy and optimizer combinations for combinations.combine()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy as mirrored_lib
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_keras_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_keras_v2
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
# pylint: disable=missing-docstring
def _get_tpu_strategy_creator(steps_per_run, use_single_core=False, **kwargs):
def _create_tpu_strategy():
resolver = tpu_cluster_resolver.TPUClusterResolver("")
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=device_assignment_lib.
SINGLE_CORE_ASSIGNMENT)
strategy = tpu_lib.TPUStrategy(resolver, steps_per_run=steps_per_run,
device_assignment=device_assignment,
**kwargs)
return strategy
return _create_tpu_strategy
# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
"OneDeviceCPU",
lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
"OneDeviceGPU",
lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
"TPUOneCore",
_get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = combinations.NamedDistribution(
"TPUOneStepOneCore",
_get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),
required_tpu=True)
mirrored_strategy_with_one_cpu = combinations.NamedDistribution(
"Mirrored1CPU", lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = combinations.NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
gradient_descent_optimizer_v1_fn = combinations.NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = combinations.NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = combinations.NamedObject(
"AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = combinations.NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_keras_v2_fn = combinations.NamedObject(
"GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.2))
adagrad_optimizer_keras_v2_fn = combinations.NamedObject(
"AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = combinations.NamedObject(
"AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
rmsprop_optimizer_keras_v2_fn = combinations.NamedObject(
"RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combinations.combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
strategies_minus_tpu = [
default_strategy, one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
]
tpu_strategies = [
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"])
def tpu_strategy_combinations():
return combinations.combine(distribution=tpu_strategies, mode=["graph"])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())

View File

@ -2,6 +2,7 @@
# Contains the Keras API (internal TensorFlow version).
load("//tensorflow:tensorflow.bzl", "tf_py_test")
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
load("//tensorflow/core:platform/default/distribute.bzl", "distribute_py_test")
licenses(["notice"]) # Apache 2.0
@ -1404,3 +1405,40 @@ tf_py_test(
],
tags = ["notsan"],
)
py_library(
name = "distribute_strategy_test_lib",
srcs = [
"distribute_strategy_test.py",
],
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python:training",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:mirrored_strategy",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/distribute:tpu_strategy",
"//tensorflow/python/eager:test",
"//tensorflow/python/estimator:estimator_py",
"//tensorflow/python/keras",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
)
distribute_py_test(
name = "distribute_strategy_test",
srcs = ["distribute_strategy_test.py"],
full_precision = True,
main = "distribute_strategy_test.py",
shard_count = 32,
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/117919883): Fix python error.
"no_windows_gpu",
"notsan",
],
deps = [
":distribute_strategy_test_lib",
],
)

View File

@ -20,13 +20,13 @@ from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import test
from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib
@ -261,17 +261,17 @@ def multi_input_output_model():
# TODO(josh11b): Add combinations.one_device_strategy_gpu once it works with
# TestDistributionStrategyWithCallbacks.test_callbacks_in_predict.
strategies_minus_tpu = [
combinations.default_strategy,
combinations.one_device_strategy,
combinations.one_device_strategy_gpu,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus]
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
]
tpu_strategies = [
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step]
strategy_combinations.tpu_strategy, # steps_per_run=2
strategy_combinations.tpu_strategy_one_step
]
def strategy_minus_tpu_combinations():
@ -291,12 +291,11 @@ def all_strategy_combinations():
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.one_device_strategy_gpu,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
mode=['graph', 'eager'])
@ -309,14 +308,14 @@ def strategy_and_optimizer_combinations():
return combinations.times(
all_strategy_combinations(),
combinations.combine(optimizer=[
combinations.adagrad_optimizer_v1_fn,
combinations.adagrad_optimizer_keras_v2_fn,
combinations.adam_optimizer_v1_fn,
combinations.adam_optimizer_keras_v2_fn,
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_keras_v2_fn,
combinations.rmsprop_optimizer_v1_fn,
combinations.rmsprop_optimizer_keras_v2_fn
strategy_combinations.adagrad_optimizer_v1_fn,
strategy_combinations.adagrad_optimizer_keras_v2_fn,
strategy_combinations.adam_optimizer_v1_fn,
strategy_combinations.adam_optimizer_keras_v2_fn,
strategy_combinations.gradient_descent_optimizer_v1_fn,
strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
strategy_combinations.rmsprop_optimizer_v1_fn,
strategy_combinations.rmsprop_optimizer_keras_v2_fn
]))
@ -337,13 +336,13 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph']))
def test_train_functional_with_distribution_strategy(self, distribution):
keras_model = simple_functional_model()
keras_model.compile(
@ -367,13 +366,13 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph']))
def test_train_sequential_with_distribution_strategy(self, distribution):
keras_model = simple_sequential_model()
keras_model.compile(
@ -396,11 +395,12 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph']))
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self, distribution):
train_data, test_data = get_multi_inputs_multi_outputs_data()
@ -448,11 +448,12 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph']))
def test_keras_optimizer_with_distribution_strategy(self, distribution):
keras_model = simple_sequential_model()
keras_model.compile(
@ -671,8 +672,8 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
# with batch_size
model.predict(inputs, batch_size=8)
@combinations.generate(combinations.combine(
distribution=strategies_minus_tpu, mode=['graph']))
@combinations.generate(
combinations.combine(distribution=strategies_minus_tpu, mode=['graph']))
def test_numpy_with_sample_weights(self, distribution):
with self.cached_session():
with distribution.scope():
@ -847,11 +848,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
# as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict.
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session():
with distribution.scope():
@ -1024,11 +1026,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_wrong_input_shape(self, distribution):
@ -1050,9 +1053,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored.
def DISABLED_test_dataset_no_batch_input_validation(self, distribution):
@ -1072,9 +1078,10 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[combinations.tpu_strategy_one_step],
mode=['graph']))
@combinations.generate(
combinations.combine(
distribution=[strategy_combinations.tpu_strategy_one_step],
mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session():
with distribution.scope():
@ -1091,13 +1098,13 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager']))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
# meaningful values. Currently we don't pass the learning phase if the
@ -1116,8 +1123,8 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.compile(optimizer, loss, metrics=metrics)
batch_size = 8
if isinstance(distribution, mirrored_strategy.CoreMirroredStrategy):
# CoreMirroredStrategy uses global batch size.
if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# MirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32)
@ -1239,7 +1246,8 @@ class TestRegularizerLoss(test.TestCase, parameterized.TestCase):
def loss_fn(_, y_pred):
return math_ops.reduce_mean(y_pred)
@combinations.generate(all_strategy_combinations_minus_default())
@combinations.generate(
strategy_combinations.all_strategy_combinations_minus_default())
def test_regularizer_loss(self, distribution):
batch_size = 2
if not distributed_training_utils.global_batch_size_supported(distribution):
@ -1301,7 +1309,8 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
model.predict(inputs, steps=1)
model.evaluate(inputs, targets, steps=1)
@combinations.generate(all_strategy_combinations_minus_default())
@combinations.generate(
strategy_combinations.all_strategy_combinations_minus_default())
def test_distribution_strategy_one_dimensional(self, distribution):
with distribution.scope():
inp = keras.layers.Input(shape=(10,))

View File

@ -77,7 +77,9 @@ COMMON_PIP_DEPS = [
"//tensorflow/python/data/kernel_tests:filter_test_base",
"//tensorflow/python/data/kernel_tests:test_base",
"//tensorflow/python/debug:debug_pip",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/eager:eager_pip",
"//tensorflow/python/keras:distribute_strategy_test_lib",
"//tensorflow/python/keras/mixed_precision/experimental:test_util",
"//tensorflow/python/kernel_tests/random:util",
"//tensorflow/python/kernel_tests/signal:test_util",