Branch contrib/distribute/python/combinations.py to python/distribute/combinations.py and python/distribute/strategy_combinations.py with replaced dependencies. Branch keras_test.

PiperOrigin-RevId: 239438725
This commit is contained in:
Rick Chao 2019-03-20 11:27:27 -07:00 committed by TensorFlower Gardener
parent 41e7b3ca0a
commit 41dac366fb
36 changed files with 763 additions and 620 deletions

View File

@ -19,12 +19,13 @@ py_library(
name = "distribute_test_lib_pip", name = "distribute_test_lib_pip",
visibility = ["//tensorflow:internal"], visibility = ["//tensorflow:internal"],
deps = [ deps = [
":combinations",
":keras_correctness_test_lib", ":keras_correctness_test_lib",
":keras_test_lib", ":keras_test_lib",
":multi_worker_test_base", ":multi_worker_test_base",
":single_loss_example", ":single_loss_example",
":strategy_test_lib", ":strategy_test_lib",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
], ],
) )
@ -33,8 +34,8 @@ distribute_py_test(
srcs = ["values_test.py"], srcs = ["values_test.py"],
main = "values_test.py", main = "values_test.py",
deps = [ deps = [
":combinations",
":mirrored_strategy", ":mirrored_strategy",
":parameter_server_strategy",
"//tensorflow/core:protos_all_py", "//tensorflow/core:protos_all_py",
"//tensorflow/python:array_ops", "//tensorflow/python:array_ops",
"//tensorflow/python:constant_op", "//tensorflow/python:constant_op",
@ -42,7 +43,9 @@ distribute_py_test(
"//tensorflow/python:framework_test_lib", "//tensorflow/python:framework_test_lib",
"//tensorflow/python:training", "//tensorflow/python:training",
"//tensorflow/python:variable_scope", "//tensorflow/python:variable_scope",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:device_util", "//tensorflow/python/distribute:device_util",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/distribute:values", "//tensorflow/python/distribute:values",
"//tensorflow/python/eager:context", "//tensorflow/python/eager:context",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
@ -55,7 +58,8 @@ cuda_py_test(
name = "input_lib_test", name = "input_lib_test",
srcs = ["input_lib_test.py"], srcs = ["input_lib_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":mirrored_strategy", ":mirrored_strategy",
":multi_worker_test_base", ":multi_worker_test_base",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
@ -95,7 +99,8 @@ cuda_py_test(
name = "parameter_server_strategy_test", name = "parameter_server_strategy_test",
srcs = ["parameter_server_strategy_test.py"], srcs = ["parameter_server_strategy_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":multi_worker_test_base", ":multi_worker_test_base",
":parameter_server_strategy", ":parameter_server_strategy",
":strategy_test_lib", ":strategy_test_lib",
@ -137,7 +142,8 @@ cuda_py_test(
srcs = ["one_device_strategy_test.py"], srcs = ["one_device_strategy_test.py"],
additional_deps = [ additional_deps = [
":strategy_test_lib", ":strategy_test_lib",
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
], ],
) )
@ -182,42 +188,13 @@ py_library(
], ],
) )
py_library(
name = "combinations",
srcs = ["combinations.py"],
srcs_version = "PY2AND3",
deps = [
":mirrored_strategy",
":one_device_strategy",
":parameter_server_strategy",
":tpu_strategy",
"//tensorflow/contrib/cluster_resolver:cluster_resolver_pip",
"//tensorflow/contrib/optimizer_v2:training",
"//tensorflow/python:framework_ops",
"//tensorflow/python:training",
"//tensorflow/python:util",
"//tensorflow/python/distribute:distribute_lib",
"//tensorflow/python/eager:context",
"//tensorflow/python/keras/optimizer_v2",
"@absl_py//absl/testing:parameterized",
],
)
py_test(
name = "combinations_test",
srcs = ["combinations_test.py"],
deps = [
":combinations",
"//tensorflow/python/eager:test",
],
)
# TODO(priyag): Rename this test to mirrored_strategy_test # TODO(priyag): Rename this test to mirrored_strategy_test
cuda_py_test( cuda_py_test(
name = "mirrored_strategy_multigpu_test", name = "mirrored_strategy_multigpu_test",
srcs = ["mirrored_strategy_multigpu_test.py"], srcs = ["mirrored_strategy_multigpu_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":mirrored_strategy", ":mirrored_strategy",
":multi_worker_test_base", ":multi_worker_test_base",
":strategy_test_lib", ":strategy_test_lib",
@ -260,7 +237,8 @@ cuda_py_test(
srcs = ["keras_multi_worker_test.py"], srcs = ["keras_multi_worker_test.py"],
additional_deps = [ additional_deps = [
"//tensorflow/contrib/distribute/python:collective_all_reduce_strategy", "//tensorflow/contrib/distribute/python:collective_all_reduce_strategy",
"//tensorflow/contrib/distribute/python:combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/contrib/distribute/python:mirrored_strategy", "//tensorflow/contrib/distribute/python:mirrored_strategy",
"//tensorflow/contrib/distribute/python:multi_worker_test_base", "//tensorflow/contrib/distribute/python:multi_worker_test_base",
"//tensorflow/contrib/distribute/python:parameter_server_strategy", "//tensorflow/contrib/distribute/python:parameter_server_strategy",
@ -307,7 +285,8 @@ cuda_py_test(
srcs = ["collective_all_reduce_strategy_test.py"], srcs = ["collective_all_reduce_strategy_test.py"],
additional_deps = [ additional_deps = [
":collective_all_reduce_strategy", ":collective_all_reduce_strategy",
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":multi_worker_test_base", ":multi_worker_test_base",
":strategy_test_lib", ":strategy_test_lib",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
@ -340,7 +319,6 @@ distribute_py_test(
"multi_and_single_gpu", "multi_and_single_gpu",
], ],
deps = [ deps = [
":combinations",
":mirrored_strategy", ":mirrored_strategy",
":single_loss_example", ":single_loss_example",
"//tensorflow/contrib/tpu:tpu_lib", "//tensorflow/contrib/tpu:tpu_lib",
@ -350,6 +328,8 @@ distribute_py_test(
"//tensorflow/python:variable_scope", "//tensorflow/python:variable_scope",
"//tensorflow/python:variables", "//tensorflow/python:variables",
"//tensorflow/python/data/ops:dataset_ops", "//tensorflow/python/data/ops:dataset_ops",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:context", "//tensorflow/python/eager:context",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
"//tensorflow/python/ops/losses", "//tensorflow/python/ops/losses",
@ -362,7 +342,8 @@ cuda_py_test(
name = "moving_averages_test", name = "moving_averages_test",
srcs = ["moving_averages_test.py"], srcs = ["moving_averages_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
"//tensorflow/python:array_ops", "//tensorflow/python:array_ops",
@ -377,14 +358,17 @@ cuda_py_test(
name = "optimizer_v2_test", name = "optimizer_v2_test",
srcs = ["optimizer_v2_test.py"], srcs = ["optimizer_v2_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":single_loss_example", ":single_loss_example",
":mirrored_strategy",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
"//third_party/py/numpy", "//third_party/py/numpy",
"//tensorflow/python:control_flow_ops", "//tensorflow/python:control_flow_ops",
"//tensorflow/python:variables", "//tensorflow/python:variables",
"//tensorflow/python/eager:context", "//tensorflow/python/eager:context",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
"//tensorflow/contrib/optimizer_v2:training",
], ],
tags = [ tags = [
"multi_and_single_gpu", "multi_and_single_gpu",
@ -395,7 +379,8 @@ cuda_py_test(
name = "estimator_integration_test", name = "estimator_integration_test",
srcs = ["estimator_integration_test.py"], srcs = ["estimator_integration_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
"//third_party/py/numpy", "//third_party/py/numpy",
"//tensorflow/contrib/optimizer_v2:training", "//tensorflow/contrib/optimizer_v2:training",
@ -433,7 +418,8 @@ cuda_py_test(
srcs = ["estimator_training_test.py"], srcs = ["estimator_training_test.py"],
additional_deps = [ additional_deps = [
":collective_all_reduce_strategy", ":collective_all_reduce_strategy",
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":mirrored_strategy", ":mirrored_strategy",
":multi_worker_test_base", ":multi_worker_test_base",
":parameter_server_strategy", ":parameter_server_strategy",
@ -482,10 +468,11 @@ distribute_py_test(
"multi_and_single_gpu", "multi_and_single_gpu",
], ],
deps = [ deps = [
":combinations",
":single_loss_example", ":single_loss_example",
"//tensorflow/contrib/tpu:tpu_lib", "//tensorflow/contrib/tpu:tpu_lib",
"//tensorflow/python:variables", "//tensorflow/python:variables",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:context", "//tensorflow/python/eager:context",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
"//third_party/py/numpy", "//third_party/py/numpy",
@ -507,7 +494,8 @@ cuda_py_test(
name = "monitor_test", name = "monitor_test",
srcs = ["monitor_test.py"], srcs = ["monitor_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":monitor", ":monitor",
":single_loss_example", ":single_loss_example",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
@ -527,7 +515,8 @@ cuda_py_test(
name = "cross_device_utils_test", name = "cross_device_utils_test",
srcs = ["cross_device_utils_test.py"], srcs = ["cross_device_utils_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
"//tensorflow/python:constant_op", "//tensorflow/python:constant_op",
"//tensorflow/python:framework_ops", "//tensorflow/python:framework_ops",
@ -543,7 +532,8 @@ cuda_py_test(
srcs = ["cross_device_ops_test.py"], srcs = ["cross_device_ops_test.py"],
additional_deps = [ additional_deps = [
":collective_all_reduce_strategy", ":collective_all_reduce_strategy",
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
":multi_worker_test_base", ":multi_worker_test_base",
":mirrored_strategy", ":mirrored_strategy",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
@ -565,15 +555,16 @@ py_library(
name = "keras_test_lib", name = "keras_test_lib",
srcs = [ srcs = [
"keras_backward_compat_test.py", "keras_backward_compat_test.py",
"keras_test.py",
"keras_utils_test.py", "keras_utils_test.py",
], ],
deps = [ deps = [
":combinations", ":parameter_server_strategy",
"//tensorflow/contrib/distribute/python:mirrored_strategy", "//tensorflow/contrib/distribute/python:mirrored_strategy",
"//tensorflow/contrib/distribute/python:tpu_strategy", "//tensorflow/contrib/distribute/python:tpu_strategy",
"//tensorflow/python:client_testlib", "//tensorflow/python:client_testlib",
"//tensorflow/python:training", "//tensorflow/python:training",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
"//tensorflow/python/estimator:estimator_py", "//tensorflow/python/estimator:estimator_py",
"//tensorflow/python/keras", "//tensorflow/python/keras",
@ -582,23 +573,6 @@ py_library(
], ],
) )
distribute_py_test(
name = "keras_test",
srcs = ["keras_test.py"],
full_precision = True,
main = "keras_test.py",
shard_count = 32,
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/117919883)
"no_windows_gpu",
"notsan",
],
deps = [
":keras_test_lib",
],
)
distribute_py_test( distribute_py_test(
name = "keras_utils_test", name = "keras_utils_test",
srcs = ["keras_utils_test.py"], srcs = ["keras_utils_test.py"],
@ -611,8 +585,8 @@ distribute_py_test(
"notsan", "notsan",
], ],
deps = [ deps = [
":keras_test",
":keras_test_lib", ":keras_test_lib",
"//tensorflow/python/keras:distribute_strategy_test",
], ],
) )
@ -644,11 +618,12 @@ py_library(
"keras_stateful_lstm_model_correctness_test.py", "keras_stateful_lstm_model_correctness_test.py",
], ],
deps = [ deps = [
":combinations",
"//tensorflow/contrib/distribute/python:mirrored_strategy", "//tensorflow/contrib/distribute/python:mirrored_strategy",
"//tensorflow/contrib/distribute/python:tpu_strategy", "//tensorflow/contrib/distribute/python:tpu_strategy",
"//tensorflow/python:client_testlib", "//tensorflow/python:client_testlib",
"//tensorflow/python:training", "//tensorflow/python:training",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
"//tensorflow/python/estimator:estimator_py", "//tensorflow/python/estimator:estimator_py",
"//tensorflow/python/keras", "//tensorflow/python/keras",
@ -761,11 +736,13 @@ distribute_py_test(
"multi_and_single_gpu", "multi_and_single_gpu",
], ],
deps = [ deps = [
":combinations", ":tpu_strategy",
"//tensorflow/python:math_ops", "//tensorflow/python:math_ops",
"//tensorflow/python:metrics", "//tensorflow/python:metrics",
"//tensorflow/python:variables", "//tensorflow/python:variables",
"//tensorflow/python/data/ops:dataset_ops", "//tensorflow/python/data/ops:dataset_ops",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/eager:test", "//tensorflow/python/eager:test",
"@absl_py//absl/testing:parameterized", "@absl_py//absl/testing:parameterized",
], ],
@ -776,7 +753,8 @@ cuda_py_test(
size = "medium", size = "medium",
srcs = ["warm_starting_util_test.py"], srcs = ["warm_starting_util_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python:client_testlib", "//tensorflow/python:client_testlib",
"//tensorflow/python:framework_ops", "//tensorflow/python:framework_ops",
"//tensorflow/python:training", "//tensorflow/python:training",
@ -793,7 +771,8 @@ cuda_py_test(
size = "medium", size = "medium",
srcs = ["checkpoint_utils_test.py"], srcs = ["checkpoint_utils_test.py"],
additional_deps = [ additional_deps = [
":combinations", "//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python:client_testlib", "//tensorflow/python:client_testlib",
"//tensorflow/python:framework_ops", "//tensorflow/python:framework_ops",
"//tensorflow/python:training", "//tensorflow/python:training",

View File

@ -28,7 +28,8 @@ from __future__ import print_function
import os import os
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables from tensorflow.python.ops import variables
@ -62,13 +63,14 @@ class CheckpointUtilsWithDistributionStrategyTest(
v1, v2 = _create_checkpoints(session, checkpoint_dir) v1, v2 = _create_checkpoints(session, checkpoint_dir)
return checkpoint_dir, v1, v2 return checkpoint_dir, v1, v2
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.default_strategy, combinations.combine(
combinations.one_device_strategy, distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.default_strategy,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.one_device_strategy,
combinations.core_mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus], strategy_combinations.mirrored_strategy_with_two_gpus,
],
in_replica_mode=[True, False], in_replica_mode=[True, False],
mode=["graph"])) mode=["graph"]))
def testInitFromCheckpoint(self, distribution, in_replica_mode): def testInitFromCheckpoint(self, distribution, in_replica_mode):
@ -98,11 +100,10 @@ class CheckpointUtilsWithDistributionStrategyTest(
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.default_strategy, combinations.one_device_strategy, strategy_combinations.default_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_two_gpus
], ],
in_replica_mode=[True, False], in_replica_mode=[True, False],
mode=["graph"])) mode=["graph"]))

View File

@ -22,7 +22,6 @@ from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import config_pb2
@ -30,6 +29,7 @@ from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import collective_all_reduce_strategy as core_collective_all_reduce_strategy from tensorflow.python.distribute import collective_all_reduce_strategy as core_collective_all_reduce_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribute_lib

View File

@ -24,14 +24,15 @@ from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values as value_lib from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
@ -246,8 +247,7 @@ class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
# strategy. # strategy.
reduction_to_one_combinations = combinations.combine( reduction_to_one_combinations = combinations.combine(
cross_device_ops=[ cross_device_ops=[
combinations.NamedObject( combinations.NamedObject("DefaultReductionToOneDevice",
"DefaultReductionToOneDevice",
cross_device_ops_lib.ReductionToOneDevice()), cross_device_ops_lib.ReductionToOneDevice()),
combinations.NamedObject( combinations.NamedObject(
"ReductionToCPUDeviceCrossDeviceOps", "ReductionToCPUDeviceCrossDeviceOps",
@ -259,11 +259,9 @@ class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
accumulation_fn=math_ops.accumulate_n)), accumulation_fn=math_ops.accumulate_n)),
], ],
distribution=[ distribution=[
combinations.one_device_strategy, strategy_combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
], ],
mode=["graph", "eager"]) mode=["graph", "eager"])
allreduce_combinations = combinations.combine( allreduce_combinations = combinations.combine(
@ -285,8 +283,7 @@ class SingleWorkerCrossDeviceOpsTest(CrossDeviceOpsTestBase):
"hierarchical_copy", 0, 100, 10)) "hierarchical_copy", 0, 100, 10))
], ],
distribution=[ distribution=[
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_two_gpus
], ],
mode=["graph", "eager"]) mode=["graph", "eager"])

View File

@ -20,7 +20,7 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_utils from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import values as value_lib from tensorflow.python.distribute import values as value_lib

View File

@ -22,10 +22,10 @@ import shutil
import tempfile import tempfile
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.optimizer_v2 import adagrad from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.estimator import run_config from tensorflow.python.estimator import run_config
from tensorflow.python.estimator import training from tensorflow.python.estimator import training
@ -60,11 +60,9 @@ class DNNLinearCombinedClassifierIntegrationTest(test.TestCase,
combinations.combine( combinations.combine(
mode=['graph'], mode=['graph'],
distribution=[ distribution=[
combinations.one_device_strategy, strategy_combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
], ],
use_train_and_evaluate=[True, False])) use_train_and_evaluate=[True, False]))
def test_complete_flow_with_mode(self, distribution, use_train_and_evaluate): def test_complete_flow_with_mode(self, distribution, use_train_and_evaluate):

View File

@ -28,12 +28,12 @@ from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy from tensorflow.contrib.distribute.python import collective_all_reduce_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.optimizer_v2 import adagrad from tensorflow.contrib.optimizer_v2 import adagrad
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import estimator_training as dc_training from tensorflow.python.distribute import estimator_training as dc_training

View File

@ -19,10 +19,9 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import values from tensorflow.python.distribute import values

View File

@ -19,13 +19,13 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util from tensorflow.python.framework import test_util
@ -290,16 +290,16 @@ def get_correctness_test_inputs(use_numpy, use_validation_data,
strategies_minus_tpu = [ strategies_minus_tpu = [
combinations.default_strategy, strategy_combinations.default_strategy,
combinations.one_device_strategy, strategy_combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu, ]
combinations.core_mirrored_strategy_with_two_gpus]
tpu_strategies = [ tpu_strategies = [
combinations.tpu_strategy, # steps_per_run=2 strategy_combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step] strategy_combinations.tpu_strategy_one_step
]
def strategy_minus_tpu_combinations(): def strategy_minus_tpu_combinations():
@ -322,14 +322,14 @@ def strategy_and_optimizer_combinations():
return combinations.times( return combinations.times(
all_strategy_combinations(), all_strategy_combinations(),
combinations.combine(optimizer=[ combinations.combine(optimizer=[
combinations.adagrad_optimizer_v1_fn, strategy_combinations.adagrad_optimizer_v1_fn,
combinations.adagrad_optimizer_keras_v2_fn, strategy_combinations.adagrad_optimizer_keras_v2_fn,
combinations.adam_optimizer_v1_fn, strategy_combinations.adam_optimizer_v1_fn,
combinations.adam_optimizer_keras_v2_fn, strategy_combinations.adam_optimizer_keras_v2_fn,
combinations.gradient_descent_optimizer_v1_fn, strategy_combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_keras_v2_fn, strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
combinations.rmsprop_optimizer_v1_fn, strategy_combinations.rmsprop_optimizer_v1_fn,
combinations.rmsprop_optimizer_keras_v2_fn strategy_combinations.rmsprop_optimizer_keras_v2_fn
])) ]))
@ -532,10 +532,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
# as clone_model's input_tensors argument only seems to accept list and not # as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict. # tuples or dict.
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution): def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session(): with self.cached_session():
@ -617,10 +618,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.evaluate(dataset, steps=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2) model.predict(dataset, steps=2)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is # TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored. # restored.
@ -643,8 +645,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
'expected input to have shape'): 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu], combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is # TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored. # restored.
@ -665,8 +670,9 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'): with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.tpu_strategy_one_step], combinations.combine(
distribution=[strategy_combinations.tpu_strategy_one_step],
mode=['graph'])) mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution): def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session(): with self.cached_session():
@ -684,12 +690,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'): with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu, ],
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution): def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
@ -761,10 +767,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
@test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1') @test_util.run_v1_only('model.compile(..distribute=..) only works in TF v1')
class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase): class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_unsupported_features(self, distribution): def test_unsupported_features(self, distribution):
with self.cached_session(): with self.cached_session():
@ -815,10 +822,11 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
'`steps` argument'): '`steps` argument'):
model.predict(dataset, verbose=0) model.predict(dataset, verbose=0)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution): def test_calling_with_unsupported_predefined_callbacks(self, distribution):
with self.cached_session(): with self.cached_session():
@ -852,10 +860,11 @@ class TestDistributionStrategyWithLossMasking(test.TestCase,
# TODO(priyag): Enable all strategies for this test. Currently it does not # TODO(priyag): Enable all strategies for this test. Currently it does not
# work for TPU due to some invalid datatype. # work for TPU due to some invalid datatype.
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_masking(self, distribution): def test_masking(self, distribution):
with self.cached_session(): with self.cached_session():

View File

@ -21,13 +21,13 @@ import functools
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
import six import six
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.framework import random_seed from tensorflow.python.framework import random_seed
@ -42,14 +42,12 @@ _GLOBAL_BATCH_SIZE = 64
all_strategies = [ all_strategies = [
combinations.default_strategy, strategy_combinations.default_strategy,
combinations.one_device_strategy, strategy_combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu, strategy_combinations.tpu_strategy, # steps_per_run=2
combinations.core_mirrored_strategy_with_two_gpus, strategy_combinations.tpu_strategy_one_step,
combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step,
] ]
@ -92,8 +90,10 @@ def test_combinations_for_embedding_model():
def test_combinations_with_tpu_strategies(): def test_combinations_with_tpu_strategies():
tpu_strategies = [combinations.tpu_strategy, tpu_strategies = [
combinations.tpu_strategy_one_step] strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_one_step
]
return ( return (
combinations.times( combinations.times(

View File

@ -18,11 +18,10 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
from tensorflow.python.training import gradient_descent from tensorflow.python.training import gradient_descent

View File

@ -18,10 +18,9 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.training import gradient_descent from tensorflow.python.training import gradient_descent

View File

@ -18,10 +18,9 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.keras.optimizer_v2 import gradient_descent from tensorflow.python.keras.optimizer_v2 import gradient_descent

View File

@ -18,10 +18,9 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.training import gradient_descent from tensorflow.python.training import gradient_descent

View File

@ -32,12 +32,12 @@ from absl.testing import parameterized
# pylint: disable=g-direct-tensorflow-import # pylint: disable=g-direct-tensorflow-import
from tensorflow.contrib.distribute.python import collective_all_reduce_strategy as collective_strategy from tensorflow.contrib.distribute.python import collective_all_reduce_strategy as collective_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base as test_base from tensorflow.contrib.distribute.python import multi_worker_test_base as test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_coordinator as dc from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.eager import context from tensorflow.python.eager import context

View File

@ -20,9 +20,9 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
@ -36,6 +36,16 @@ from tensorflow.python.ops import variables
from tensorflow.python.platform import test from tensorflow.python.platform import test
# TODO(rchao): Merge parameter_server_strategy_with_two_gpus into
# third_party/tensorflow/python/distribute/strategy_combinations.py
# pylint: disable=g-long-lambda
parameter_server_strategy_with_two_gpus = combinations.NamedDistribution(
'ParameterServer2GPUs',
lambda: parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2),
required_gpus=2)
def get_model(): def get_model():
x = keras.layers.Input(shape=(3,), name='input') x = keras.layers.Input(shape=(3,), name='input')
y = keras.layers.Dense(4, name='dense')(x) y = keras.layers.Dense(4, name='dense')(x)
@ -48,9 +58,7 @@ class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.core_mirrored_strategy_with_gpu_and_cpu, parameter_server_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_two_gpus,
combinations.parameter_server_strategy_with_two_gpus,
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def testKerasOptimizerWithUnequalInput(self, distribution): def testKerasOptimizerWithUnequalInput(self, distribution):
@ -106,8 +114,7 @@ class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.core_mirrored_strategy_with_gpu_and_cpu, parameter_server_strategy_with_two_gpus,
combinations.parameter_server_strategy_with_two_gpus,
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def testOptimizerWithKerasModelAndNumpyArrays(self, distribution): def testOptimizerWithKerasModelAndNumpyArrays(self, distribution):

View File

@ -18,10 +18,10 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.training import gradient_descent from tensorflow.python.training import gradient_descent
@ -29,8 +29,10 @@ from tensorflow.python.training import gradient_descent
def strategies_for_stateful_embedding_model(): def strategies_for_stateful_embedding_model():
"""Returns TPUStrategy with single core device assignment.""" """Returns TPUStrategy with single core device assignment."""
return [combinations.tpu_strategy_one_core, return [
combinations.tpu_strategy_one_step_one_core] strategy_combinations.tpu_strategy_one_core,
strategy_combinations.tpu_strategy_one_step_one_core
]
def test_combinations_for_stateful_embedding_model(): def test_combinations_for_stateful_embedding_model():

View File

@ -22,16 +22,16 @@ import collections
import tempfile import tempfile
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_test as keras_test_lib
from tensorflow.contrib.distribute.python import tpu_strategy from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values from tensorflow.python.distribute import values
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
from tensorflow.python.keras import distribute_strategy_test as keras_test_lib
from tensorflow.python.keras.engine import distributed_training_utils from tensorflow.python.keras.engine import distributed_training_utils
from tensorflow.python.keras.optimizer_v2 import rmsprop as rms_prop_keras from tensorflow.python.keras.optimizer_v2 import rmsprop as rms_prop_keras
from tensorflow.python.ops import math_ops from tensorflow.python.ops import math_ops
@ -166,8 +166,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_shape_mismatch( def test_validating_dataset_input_tensors_with_shape_mismatch(
@ -192,8 +191,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_validating_dataset_input_tensors_with_dtype_mismatch( def test_validating_dataset_input_tensors_with_dtype_mismatch(
@ -218,8 +216,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_unsupported_features(self, distribution): def test_unsupported_features(self, distribution):
@ -281,8 +278,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_calling_with_unsupported_predefined_callbacks(self, distribution): def test_calling_with_unsupported_predefined_callbacks(self, distribution):
@ -321,7 +317,8 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[combinations.one_device_strategy], mode=['graph'])) distribution=[strategy_combinations.one_device_strategy],
mode=['graph']))
def test_distribution_strategy_with_add_metric_add_loss(self, distribution): def test_distribution_strategy_with_add_metric_add_loss(self, distribution):
with distribution.scope(): with distribution.scope():
x = keras.layers.Input(shape=(1,)) x = keras.layers.Input(shape=(1,))
@ -347,7 +344,8 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[combinations.one_device_strategy], mode=['eager'])) distribution=[strategy_combinations.one_device_strategy],
mode=['eager']))
def test_distribution_strategy_with_run_eagerly(self, distribution): def test_distribution_strategy_with_run_eagerly(self, distribution):
with distribution.scope(): with distribution.scope():
x = keras.layers.Input(shape=(1,)) x = keras.layers.Input(shape=(1,))
@ -364,8 +362,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_distribution_strategy_on_subclassed_model(self, distribution): def test_distribution_strategy_on_subclassed_model(self, distribution):
@ -393,8 +390,7 @@ class TestDistributionStrategyErrorCases(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_distribution_strategy_on_deferred_sequential_model( def test_distribution_strategy_on_deferred_sequential_model(
@ -421,8 +417,7 @@ class TestDistributionStrategyWithLossMasking(test.TestCase,
@combinations.generate( @combinations.generate(
combinations.combine( combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu
], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_masking(self, distribution): def test_masking(self, distribution):

View File

@ -18,10 +18,10 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import tpu_strategy from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops from tensorflow.python.ops import math_ops
@ -74,18 +74,21 @@ def _regression_dataset_fn():
def all_combinations(): def all_combinations():
return combinations.combine( return combinations.combine(
distribution=[combinations.default_strategy, distribution=[
combinations.one_device_strategy, strategy_combinations.default_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_two_gpus], ],
mode=["graph"]) mode=["graph"])
def tpu_combinations(): def tpu_combinations():
return combinations.combine(distribution=[combinations.tpu_strategy_one_step, return combinations.combine(
combinations.tpu_strategy], distribution=[
strategy_combinations.tpu_strategy_one_step,
strategy_combinations.tpu_strategy
],
mode=["graph"]) mode=["graph"])

View File

@ -20,12 +20,12 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
import numpy import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python.single_loss_example import batchnorm_example from tensorflow.contrib.distribute.python.single_loss_example import batchnorm_example
from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
@ -48,12 +48,12 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.times( combinations.times(
combinations.distributions_and_v1_optimizers(), strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False]) combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True])) + + combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine( combinations.combine(
distribution=[combinations.tpu_strategy], distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1, optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"], mode=["graph"],
use_callable_loss=[True, False])) use_callable_loss=[True, False]))
def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss): def testTrainNetwork(self, distribution, optimizer_fn, use_callable_loss):
@ -90,7 +90,7 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.times( combinations.times(
combinations.distributions_and_v1_optimizers(), strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False]) combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True]))) + combinations.combine(mode=["eager"], use_callable_loss=[True])))
def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn, def testTrainNetworkByCallForEachReplica(self, distribution, optimizer_fn,
@ -124,12 +124,10 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.times( combinations.times(
combinations.distributions_and_v1_optimizers() + strategy_combinations.distributions_and_v1_optimizers(),
combinations.distributions_and_v2_optimizers(), combinations.combine(mode=["graph", "eager"])) + combinations.combine(
combinations.combine(mode=["graph", "eager"])) + distribution=[strategy_combinations.tpu_strategy],
combinations.combine( optimizer_fn=strategy_combinations.optimizers_v1,
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1+combinations.optimizers_v2,
mode=["graph"])) mode=["graph"]))
def testOptimizerInsideModelFn(self, distribution, optimizer_fn): def testOptimizerInsideModelFn(self, distribution, optimizer_fn):
created_variables = [] created_variables = []
@ -195,15 +193,15 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
combinations.times( combinations.times(
combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]), combinations.combine(momentum=[0.8, 0.9, 0.99], renorm=[False, True]),
combinations.times( combinations.times(
combinations.distributions_and_v1_optimizers(), strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine( combinations.combine(
mode=["graph", "eager"], mode=["graph", "eager"],
# TODO(isaprykin): Allow False here. Currently subsequent # TODO(isaprykin): Allow False here. Currently subsequent
# replicas will re-execute UPDATE_OPS of previous replicas. # replicas will re-execute UPDATE_OPS of previous replicas.
update_ops_in_cross_replica_mode=[True])) + update_ops_in_cross_replica_mode=[True])) +
combinations.combine( combinations.combine(
distribution=[combinations.tpu_strategy], distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1, optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"], mode=["graph"],
update_ops_in_cross_replica_mode=[False]))) update_ops_in_cross_replica_mode=[False])))
def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum, def testTrainNetworkWithBatchNorm(self, distribution, optimizer_fn, momentum,
@ -262,8 +260,7 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
combinations.times( combinations.times(
combinations.combine( combinations.combine(
optimizer_fn=[ optimizer_fn=[
combinations.gradient_descent_optimizer_v1_fn, strategy_combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn
], ],
loss_reduction=[ loss_reduction=[
losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN, losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN,
@ -271,19 +268,16 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS
]), ]),
combinations.times( combinations.times(
combinations.combine( combinations.combine(distribution=[
distribution=[ strategy_combinations.one_device_strategy,
combinations.one_device_strategy, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
]), ]),
combinations.combine( combinations.combine(
mode=["graph"], use_callable_loss=[True, False]) + mode=["graph"], use_callable_loss=[True, False]) +
combinations.combine(mode=["eager"], use_callable_loss=[True])) + combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine( combinations.combine(
distribution=[combinations.tpu_strategy], distribution=[strategy_combinations.tpu_strategy],
mode=["graph"], mode=["graph"],
use_callable_loss=[True, False]))) use_callable_loss=[True, False])))
def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction, def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,
@ -361,12 +355,11 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.times( combinations.times(
combinations.distributions_and_v1_optimizers(), strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph", "eager"]), combinations.combine(mode=["graph", "eager"]),
combinations.combine(is_tpu=[False])) + combinations.combine(is_tpu=[False])) + combinations.combine(
combinations.combine( distribution=[strategy_combinations.tpu_strategy],
distribution=[combinations.tpu_strategy], optimizer_fn=strategy_combinations.optimizers_v1,
optimizer_fn=combinations.optimizers_v1,
mode=["graph"], mode=["graph"],
is_tpu=[True])) is_tpu=[True]))
def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu): def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu):

View File

@ -23,17 +23,17 @@ import sys
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop from tensorflow.python.eager import backprop
from tensorflow.python.eager import context from tensorflow.python.eager import context
@ -62,12 +62,12 @@ from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0] GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu, ],
combinations.core_mirrored_strategy_with_two_gpus],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest( class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase, strategy_test_lib.DistributionTestBase,
@ -171,10 +171,9 @@ class MirroredTwoDeviceDistributionTest(
def one_device_combinations(): def one_device_combinations():
return combinations.combine( return combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_one_cpu, strategy_combinations.mirrored_strategy_with_one_cpu,
combinations.mirrored_strategy_with_one_gpu, strategy_combinations.mirrored_strategy_with_one_gpu,
combinations.core_mirrored_strategy_with_one_cpu, ],
combinations.core_mirrored_strategy_with_one_gpu],
mode=["graph", "eager"]) mode=["graph", "eager"])
@ -221,9 +220,11 @@ class MirroredOneDeviceDistributionTest(
class MirroredStrategyVariableCreatorStackTest( class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase): test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.combine(
combinations.core_mirrored_strategy_with_gpu_and_cpu], distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"])) mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution): def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn(): def model_fn():
@ -253,10 +254,12 @@ class MirroredStrategyVariableCreatorStackTest(
expected = ("main_thread:thread_0", "main_thread:thread_1") expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result) self.assertEqual(expected, result)
@combinations.generate(combinations.combine(
@combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase): class MirroredStrategyCallForEachReplicaTest(test.TestCase):
@ -304,10 +307,12 @@ class MirroredStrategyCallForEachReplicaTest(test.TestCase):
RuntimeError, "`merge_call` called while defining a new graph."): RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn) distribution.extended.call_for_each_replica(model_fn)
@combinations.generate(combinations.combine(
@combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class MirroredStrategyVariableCreationTest(test.TestCase): class MirroredStrategyVariableCreationTest(test.TestCase):
@ -794,10 +799,11 @@ class MirroredStrategyVariableCreationTest(test.TestCase):
self.assertIs(distribution, sync_on_read.distribute_strategy) self.assertIs(distribution, sync_on_read.distribute_strategy)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph"])) mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase): class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not # NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
@ -965,10 +971,11 @@ class MirroredThreeDeviceDistributionTest(
self.assertEqual("foo:0", result.name) self.assertEqual("foo:0", result.name)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase): class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored # The following tests check assign, assign_add and assign_sub on Mirrored
@ -1184,10 +1191,11 @@ class MirroredVariableUpdateTest(test.TestCase):
self.assertEqual(4.0, self.evaluate(mirrored_var)) self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase): class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
@ -1227,10 +1235,11 @@ class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized())) self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase): class SyncOnReadVariableAssignTest(test.TestCase):
@ -1317,10 +1326,11 @@ class MiniModel(keras_training.Model):
return self.fc(inputs) return self.fc(inputs)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase): class MirroredStrategyDefunTest(test.TestCase):

View File

@ -20,12 +20,12 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
import numpy import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import monitor as monitor_lib from tensorflow.contrib.distribute.python import monitor as monitor_lib
from tensorflow.contrib.distribute.python.single_loss_example import single_loss_example from tensorflow.contrib.distribute.python.single_loss_example import single_loss_example
from tensorflow.python.client import session from tensorflow.python.client import session
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import one_device_strategy from tensorflow.python.distribute import one_device_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
@ -36,8 +36,9 @@ class MonitorTest(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.times( combinations.times(
combinations.distributions_and_v1_optimizers(), strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=combinations.graph_and_eager_modes))) combinations.combine(
mode=strategy_combinations.graph_and_eager_modes)))
def testTrainNetwork(self, distribution, optimizer_fn): def testTrainNetwork(self, distribution, optimizer_fn):
with distribution.scope(): with distribution.scope():
single_loss_step, layer = single_loss_example(optimizer_fn, distribution) single_loss_step, layer = single_loss_example(optimizer_fn, distribution)

View File

@ -20,7 +20,8 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes from tensorflow.python.framework import dtypes
@ -30,10 +31,11 @@ from tensorflow.python.training import moving_averages
all_combinations = combinations.combine( all_combinations = combinations.combine(
distribution=[combinations.default_strategy, distribution=[
combinations.one_device_strategy, strategy_combinations.default_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.one_device_strategy,
combinations.core_mirrored_strategy_with_gpu_and_cpu], strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph"]) mode=["graph"])

View File

@ -17,18 +17,20 @@
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import strategy_test_lib from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.one_device_strategy, strategy_combinations.one_device_strategy,
combinations.one_device_strategy_gpu], strategy_combinations.one_device_strategy_gpu
],
mode=["eager", "graph"])) mode=["eager", "graph"]))
class OneDeviceStrategyTest( class OneDeviceStrategyTest(
strategy_test_lib.DistributionTestBase, strategy_test_lib.DistributionTestBase,

View File

@ -20,20 +20,53 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
import numpy import numpy
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example from tensorflow.contrib.distribute.python.single_loss_example import minimize_loss_example
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables from tensorflow.python.ops import variables
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
# pylint: disable=g-long-lambda
gradient_descent_optimizer_v2_fn = combinations.NamedObject(
"GradientDescentV2", lambda: gradient_descent_v2.GradientDescentOptimizer(
0.2))
adagrad_optimizer_v2_fn = combinations.NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combinations.combine(
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)
class MinimizeLossOptimizerV2Test(test.TestCase, parameterized.TestCase): class MinimizeLossOptimizerV2Test(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.times( combinations.times(
combinations.distributions_and_v2_optimizers(), distributions_and_v2_optimizers(),
combinations.combine(mode=["graph"], use_callable_loss=[True, False]) combinations.combine(mode=["graph"], use_callable_loss=[True, False])
+ combinations.combine(mode=["eager"], use_callable_loss=[True]))) + combinations.combine(mode=["eager"], use_callable_loss=[True])))
def testTrainNetwork(self, distribution, optimizer_fn, def testTrainNetwork(self, distribution, optimizer_fn,

View File

@ -21,13 +21,12 @@ from __future__ import print_function
import copy import copy
import threading import threading
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import multi_worker_test_base from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import parameter_server_strategy from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.distribute.python import strategy_test_lib from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context from tensorflow.python.distribute import distribution_strategy_context as ds_context

View File

@ -20,9 +20,9 @@ from __future__ import print_function
from absl.testing import parameterized from absl.testing import parameterized
import numpy import numpy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python.single_loss_example import single_loss_example from tensorflow.contrib.distribute.python.single_loss_example import single_loss_example
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.ops import variables from tensorflow.python.ops import variables
@ -32,12 +32,12 @@ class SingleLossStepTest(test.TestCase, parameterized.TestCase):
@combinations.generate( @combinations.generate(
combinations.times( combinations.times(
combinations.distributions_and_v1_optimizers(), strategy_combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=combinations.graph_and_eager_modes),
combinations.combine(is_tpu=[False])) +
combinations.combine( combinations.combine(
distribution=[combinations.tpu_strategy], mode=strategy_combinations.graph_and_eager_modes),
optimizer_fn=combinations.optimizers_v1, combinations.combine(is_tpu=[False])) + combinations.combine(
distribution=[strategy_combinations.tpu_strategy],
optimizer_fn=strategy_combinations.optimizers_v1,
mode=["graph"], mode=["graph"],
is_tpu=[True])) is_tpu=[True]))
def testTrainNetwork(self, distribution, optimizer_fn, is_tpu): def testTrainNetwork(self, distribution, optimizer_fn, is_tpu):

View File

@ -20,10 +20,11 @@ from __future__ import print_function
import os import os
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.distribute.python import combinations
from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import config_pb2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import device_util from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import values from tensorflow.python.distribute import values
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.eager import test from tensorflow.python.eager import test
@ -38,6 +39,16 @@ from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import saver as saver_lib from tensorflow.python.training import saver as saver_lib
# TODO(rchao): Merge parameter_server_strategy_with_two_gpus into
# third_party/tensorflow/python/distribute/strategy_combinations.py
# pylint: disable=g-long-lambda
parameter_server_strategy_with_two_gpus = combinations.NamedDistribution(
"ParameterServer2GPUs",
lambda: parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2),
required_gpus=2)
class DistributedValuesTest(test.TestCase): class DistributedValuesTest(test.TestCase):
def testGetEager(self): def testGetEager(self):
@ -495,10 +506,11 @@ class MirroredVariableTest(test.TestCase, parameterized.TestCase):
save_path = self._save_normal() save_path = self._save_normal()
self._restore_mirrored(save_path) self._restore_mirrored(save_path)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_one_gpu, strategy_combinations.mirrored_strategy_with_one_gpu,
combinations.core_mirrored_strategy_with_one_gpu], ],
mode=["graph"])) mode=["graph"]))
def testFetchAMirroredVariable(self, distribution): def testFetchAMirroredVariable(self, distribution):
with self.session(graph=ops.Graph()) as sess, distribution.scope(): with self.session(graph=ops.Graph()) as sess, distribution.scope():
@ -511,12 +523,12 @@ class MirroredVariableTest(test.TestCase, parameterized.TestCase):
sess.run(variables_lib.global_variables_initializer()) sess.run(variables_lib.global_variables_initializer())
sess.run({"complicated": mirrored}) sess.run({"complicated": mirrored})
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_one_cpu, strategy_combinations.mirrored_strategy_with_one_cpu,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu, strategy_combinations.tpu_strategy,
combinations.tpu_strategy,
], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
def testAssignOutOfScope_mirrored(self, distribution): def testAssignOutOfScope_mirrored(self, distribution):
@ -529,8 +541,9 @@ class MirroredVariableTest(test.TestCase, parameterized.TestCase):
for component in mirrored.values: for component in mirrored.values:
self.assertEqual(self.evaluate(component.read_value()), 3.) self.assertEqual(self.evaluate(component.read_value()), 3.)
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.parameter_server_strategy_with_two_gpus], combinations.combine(
distribution=[parameter_server_strategy_with_two_gpus],
mode=["graph", "eager"])) mode=["graph", "eager"]))
def testAssignOutOfScope_aggregating(self, distribution): def testAssignOutOfScope_aggregating(self, distribution):
with distribution.scope(): with distribution.scope():
@ -600,10 +613,11 @@ class SyncOnReadVariablePropertiesTest(test.TestCase):
self.assertEqual(converted.dtype, replica_local.dtype) self.assertEqual(converted.dtype, replica_local.dtype)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=["graph", "eager"])) mode=["graph", "eager"]))
class SyncOnReadVariableTest(test.TestCase, parameterized.TestCase): class SyncOnReadVariableTest(test.TestCase, parameterized.TestCase):

View File

@ -28,7 +28,8 @@ from __future__ import print_function
import os import os
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables from tensorflow.python.ops import variables
@ -40,13 +41,14 @@ from tensorflow.python.training import warm_starting_util as ws_util
class WarmStartingUtilWithDistributionStrategyTest( class WarmStartingUtilWithDistributionStrategyTest(
test.TestCase, parameterized.TestCase): test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.default_strategy, combinations.combine(
combinations.one_device_strategy, distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.default_strategy,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.one_device_strategy,
combinations.core_mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus], strategy_combinations.mirrored_strategy_with_two_gpus,
],
save_with_distribution=[True, False], save_with_distribution=[True, False],
restore_with_distribution=[True, False], restore_with_distribution=[True, False],
mode=["graph"])) mode=["graph"]))

View File

@ -496,3 +496,45 @@ py_library(
"@six_archive//:six", "@six_archive//:six",
], ],
) )
py_library(
name = "combinations",
srcs = ["combinations.py"],
srcs_version = "PY2AND3",
deps = [
"//tensorflow/python:framework_ops",
"//tensorflow/python:util",
"//tensorflow/python/eager:context",
"@absl_py//absl/testing:parameterized",
],
)
py_library(
name = "strategy_combinations",
srcs = ["strategy_combinations.py"],
srcs_version = "PY2AND3",
deps = [
":combinations",
":distribute_lib",
":mirrored_strategy",
":one_device_strategy",
":parameter_server_strategy",
":tpu_strategy",
"//tensorflow/python:framework_ops",
"//tensorflow/python:training",
"//tensorflow/python/distribute/cluster_resolver:cluster_resolver_lib",
"//tensorflow/python/eager:context",
"//tensorflow/python/keras/optimizer_v2",
"@absl_py//absl/testing:parameterized",
],
)
py_test(
name = "combinations_test",
srcs = ["combinations_test.py"],
deps = [
":combinations",
"//tensorflow/python/eager:test",
"//tensorflow/python/keras:backend",
],
)

View File

@ -43,29 +43,12 @@ from collections import OrderedDict
import sys import sys
import types import types
import unittest import unittest
from absl.testing import parameterized from absl.testing import parameterized
import six import six
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import parameter_server_strategy
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context from tensorflow.python.eager import context
from tensorflow.python.framework import ops from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_keras_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_keras_v2
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import tf_inspect from tensorflow.python.util import tf_inspect
@ -73,6 +56,8 @@ GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0] TPU_TEST = "test_tpu" in sys.argv[0]
# TODO(rchao): Rename `distribution` parameter to `strategy` or
# `distribute_strategy`
def generate(combinations): def generate(combinations):
"""A decorator for generating test cases of a test method or a test class. """A decorator for generating test cases of a test method or a test class.
@ -325,143 +310,3 @@ class NamedDistribution(object):
@property @property
def required_tpu(self): def required_tpu(self):
return self._required_tpu return self._required_tpu
def _get_tpu_strategy_creator(steps_per_run, use_single_core=False, **kwargs):
def _create_tpu_strategy():
resolver = tpu_cluster_resolver.TPUClusterResolver("")
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=device_assignment_lib.
SINGLE_CORE_ASSIGNMENT)
strategy = tpu_lib.TPUStrategy(resolver, steps_per_run=steps_per_run,
device_assignment=device_assignment,
**kwargs)
return strategy
return _create_tpu_strategy
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
one_device_strategy_gpu = NamedDistribution(
"OneDeviceGPU", lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
required_gpus=1)
tpu_strategy = NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2),
required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1),
required_tpu=True)
tpu_strategy_one_core = NamedDistribution(
"TPUOneCore", _get_tpu_strategy_creator(
steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = NamedDistribution(
"TPUOneStepOneCore", _get_tpu_strategy_creator(
steps_per_run=1, use_single_core=True),
required_tpu=True)
mirrored_strategy_with_one_cpu = NamedDistribution(
"Mirrored1CPU",
lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
core_mirrored_strategy_with_one_cpu = NamedDistribution(
"CoreMirrored1CPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/cpu:0"]))
core_mirrored_strategy_with_one_gpu = NamedDistribution(
"CoreMirrored1GPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"CoreMirroredCPUAndGPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_two_gpus = NamedDistribution(
"CoreMirrored2GPUs",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
parameter_server_strategy_with_two_gpus = NamedDistribution(
"ParameterServer2GPUs",
lambda: parameter_server_strategy.ParameterServerStrategy(
num_gpus_per_worker=2),
required_gpus=2)
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = NamedObject("AdamV1",
lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1.0))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
gradient_descent_optimizer_keras_v2_fn = NamedObject(
"GradientDescentKerasV2",
lambda: gradient_descent_keras_v2.SGD(0.2))
adagrad_optimizer_keras_v2_fn = NamedObject(
"AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = NamedObject(
"AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
rmsprop_optimizer_keras_v2_fn = NamedObject(
"RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)

View File

@ -19,9 +19,10 @@ from __future__ import division
from __future__ import print_function from __future__ import print_function
from collections import OrderedDict from collections import OrderedDict
from absl.testing import parameterized from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations from tensorflow.python.distribute import combinations
from tensorflow.python.eager import test from tensorflow.python.eager import test
@ -70,8 +71,7 @@ class TestingCombinationsTest(test.TestCase):
}, { }, {
"b": 3 "b": 3
}], }],
combinations.combine(a=[1, 2]) + combinations.combine(a=[1, 2]) + combinations.combine(b=[2, 3]))
combinations.combine(b=[2, 3]))
def test_times(self): def test_times(self):
c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"]) c1 = combinations.combine(mode=["graph"], loss=["callable", "tensor"])

View File

@ -0,0 +1,165 @@
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Strategy and optimizer combinations for combinations.combine()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy as mirrored_lib
from tensorflow.python.distribute import one_device_strategy as one_device_lib
from tensorflow.python.distribute import tpu_strategy as tpu_lib
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_keras_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_keras_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_keras_v2
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
# pylint: disable=missing-docstring
def _get_tpu_strategy_creator(steps_per_run, use_single_core=False, **kwargs):
def _create_tpu_strategy():
resolver = tpu_cluster_resolver.TPUClusterResolver("")
topology = tpu_strategy_util.initialize_tpu_system(resolver)
device_assignment = None
if use_single_core:
device_assignment = device_assignment_lib.DeviceAssignment(
topology, core_assignment=device_assignment_lib.
SINGLE_CORE_ASSIGNMENT)
strategy = tpu_lib.TPUStrategy(resolver, steps_per_run=steps_per_run,
device_assignment=device_assignment,
**kwargs)
return strategy
return _create_tpu_strategy
# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
"Default",
distribution_strategy_context._get_default_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
"OneDeviceCPU",
lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
"OneDeviceGPU",
lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
"TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
"TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
"TPUOneCore",
_get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
required_tpu=True)
tpu_strategy_one_step_one_core = combinations.NamedDistribution(
"TPUOneStepOneCore",
_get_tpu_strategy_creator(steps_per_run=1, use_single_core=True),
required_tpu=True)
mirrored_strategy_with_one_cpu = combinations.NamedDistribution(
"Mirrored1CPU", lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = combinations.NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
gradient_descent_optimizer_v1_fn = combinations.NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = combinations.NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = combinations.NamedObject(
"AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = combinations.NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_keras_v2_fn = combinations.NamedObject(
"GradientDescentKerasV2", lambda: gradient_descent_keras_v2.SGD(0.2))
adagrad_optimizer_keras_v2_fn = combinations.NamedObject(
"AdagradKerasV2", lambda: adagrad_keras_v2.Adagrad(0.001))
adam_optimizer_keras_v2_fn = combinations.NamedObject(
"AdamKerasV2", lambda: adam_keras_v2.Adam(0.001, epsilon=1.0))
rmsprop_optimizer_keras_v2_fn = combinations.NamedObject(
"RmsPropKerasV2", lambda: rmsprop_keras_v2.RMSprop(0.001))
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combinations.combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
strategies_minus_tpu = [
default_strategy, one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
]
tpu_strategies = [
tpu_strategy, # steps_per_run=2
tpu_strategy_one_step
]
def strategy_minus_tpu_combinations():
return combinations.combine(
distribution=strategies_minus_tpu, mode=["graph", "eager"])
def tpu_strategy_combinations():
return combinations.combine(distribution=tpu_strategies, mode=["graph"])
def all_strategy_combinations():
return strategy_minus_tpu_combinations() + tpu_strategy_combinations()
def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine(
distribution=[
one_device_strategy, one_device_strategy_gpu,
mirrored_strategy_with_gpu_and_cpu, mirrored_strategy_with_two_gpus
],
mode=["graph", "eager"])
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())

View File

@ -2,6 +2,7 @@
# Contains the Keras API (internal TensorFlow version). # Contains the Keras API (internal TensorFlow version).
load("//tensorflow:tensorflow.bzl", "tf_py_test") load("//tensorflow:tensorflow.bzl", "tf_py_test")
load("//tensorflow:tensorflow.bzl", "cuda_py_test") load("//tensorflow:tensorflow.bzl", "cuda_py_test")
load("//tensorflow/core:platform/default/distribute.bzl", "distribute_py_test")
licenses(["notice"]) # Apache 2.0 licenses(["notice"]) # Apache 2.0
@ -1404,3 +1405,40 @@ tf_py_test(
], ],
tags = ["notsan"], tags = ["notsan"],
) )
py_library(
name = "distribute_strategy_test_lib",
srcs = [
"distribute_strategy_test.py",
],
deps = [
"//tensorflow/python:client_testlib",
"//tensorflow/python:training",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/distribute:mirrored_strategy",
"//tensorflow/python/distribute:strategy_combinations",
"//tensorflow/python/distribute:tpu_strategy",
"//tensorflow/python/eager:test",
"//tensorflow/python/estimator:estimator_py",
"//tensorflow/python/keras",
"//third_party/py/numpy",
"@absl_py//absl/testing:parameterized",
],
)
distribute_py_test(
name = "distribute_strategy_test",
srcs = ["distribute_strategy_test.py"],
full_precision = True,
main = "distribute_strategy_test.py",
shard_count = 32,
tags = [
"multi_and_single_gpu",
"no_oss", # TODO(b/117919883): Fix python error.
"no_windows_gpu",
"notsan",
],
deps = [
":distribute_strategy_test_lib",
],
)

View File

@ -20,13 +20,13 @@ from __future__ import print_function
import os import os
from absl.testing import parameterized from absl.testing import parameterized
import numpy as np import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import tpu_strategy
from tensorflow.python import keras from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import test from tensorflow.python.eager import test
from tensorflow.python.estimator import keras as keras_lib from tensorflow.python.estimator import keras as keras_lib
from tensorflow.python.estimator import run_config as run_config_lib from tensorflow.python.estimator import run_config as run_config_lib
@ -261,17 +261,17 @@ def multi_input_output_model():
# TODO(josh11b): Add combinations.one_device_strategy_gpu once it works with # TODO(josh11b): Add combinations.one_device_strategy_gpu once it works with
# TestDistributionStrategyWithCallbacks.test_callbacks_in_predict. # TestDistributionStrategyWithCallbacks.test_callbacks_in_predict.
strategies_minus_tpu = [ strategies_minus_tpu = [
combinations.default_strategy, strategy_combinations.default_strategy,
combinations.one_device_strategy, strategy_combinations.one_device_strategy,
combinations.one_device_strategy_gpu, strategy_combinations.one_device_strategy_gpu,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus
combinations.core_mirrored_strategy_with_gpu_and_cpu, ]
combinations.core_mirrored_strategy_with_two_gpus]
tpu_strategies = [ tpu_strategies = [
combinations.tpu_strategy, # steps_per_run=2 strategy_combinations.tpu_strategy, # steps_per_run=2
combinations.tpu_strategy_one_step] strategy_combinations.tpu_strategy_one_step
]
def strategy_minus_tpu_combinations(): def strategy_minus_tpu_combinations():
@ -291,12 +291,11 @@ def all_strategy_combinations():
def all_strategy_minus_default_and_tpu_combinations(): def all_strategy_minus_default_and_tpu_combinations():
return combinations.combine( return combinations.combine(
distribution=[ distribution=[
combinations.one_device_strategy, strategy_combinations.one_device_strategy,
combinations.one_device_strategy_gpu, strategy_combinations.one_device_strategy_gpu,
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu, ],
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager']) mode=['graph', 'eager'])
@ -309,14 +308,14 @@ def strategy_and_optimizer_combinations():
return combinations.times( return combinations.times(
all_strategy_combinations(), all_strategy_combinations(),
combinations.combine(optimizer=[ combinations.combine(optimizer=[
combinations.adagrad_optimizer_v1_fn, strategy_combinations.adagrad_optimizer_v1_fn,
combinations.adagrad_optimizer_keras_v2_fn, strategy_combinations.adagrad_optimizer_keras_v2_fn,
combinations.adam_optimizer_v1_fn, strategy_combinations.adam_optimizer_v1_fn,
combinations.adam_optimizer_keras_v2_fn, strategy_combinations.adam_optimizer_keras_v2_fn,
combinations.gradient_descent_optimizer_v1_fn, strategy_combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_keras_v2_fn, strategy_combinations.gradient_descent_optimizer_keras_v2_fn,
combinations.rmsprop_optimizer_v1_fn, strategy_combinations.rmsprop_optimizer_v1_fn,
combinations.rmsprop_optimizer_keras_v2_fn strategy_combinations.rmsprop_optimizer_keras_v2_fn
])) ]))
@ -337,12 +336,12 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
if os.path.isdir(self._base_dir): if os.path.isdir(self._base_dir):
gfile.DeleteRecursively(self._base_dir) gfile.DeleteRecursively(self._base_dir)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus
combinations.core_mirrored_strategy_with_gpu_and_cpu, ],
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph'])) mode=['graph']))
def test_train_functional_with_distribution_strategy(self, distribution): def test_train_functional_with_distribution_strategy(self, distribution):
keras_model = simple_functional_model() keras_model = simple_functional_model()
@ -367,12 +366,12 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
writer_cache.FileWriterCache.clear() writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir) gfile.DeleteRecursively(self._config.model_dir)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus
combinations.core_mirrored_strategy_with_gpu_and_cpu, ],
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph'])) mode=['graph']))
def test_train_sequential_with_distribution_strategy(self, distribution): def test_train_sequential_with_distribution_strategy(self, distribution):
keras_model = simple_sequential_model() keras_model = simple_sequential_model()
@ -396,10 +395,11 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
writer_cache.FileWriterCache.clear() writer_cache.FileWriterCache.clear()
gfile.DeleteRecursively(self._config.model_dir) gfile.DeleteRecursively(self._config.model_dir)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph'])) mode=['graph']))
def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self, distribution): def test_multi_inputs_multi_outputs_with_input_fn_as_dict(self, distribution):
train_data, test_data = get_multi_inputs_multi_outputs_data() train_data, test_data = get_multi_inputs_multi_outputs_data()
@ -448,10 +448,11 @@ class TestEstimatorDistributionStrategy(test_util.TensorFlowTestCase,
eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1) eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(eval_results['loss'], baseline_eval_results['loss']) self.assertLess(eval_results['loss'], baseline_eval_results['loss'])
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph'])) mode=['graph']))
def test_keras_optimizer_with_distribution_strategy(self, distribution): def test_keras_optimizer_with_distribution_strategy(self, distribution):
keras_model = simple_sequential_model() keras_model = simple_sequential_model()
@ -671,8 +672,8 @@ class TestDistributionStrategyWithNumpyArrays(test.TestCase,
# with batch_size # with batch_size
model.predict(inputs, batch_size=8) model.predict(inputs, batch_size=8)
@combinations.generate(combinations.combine( @combinations.generate(
distribution=strategies_minus_tpu, mode=['graph'])) combinations.combine(distribution=strategies_minus_tpu, mode=['graph']))
def test_numpy_with_sample_weights(self, distribution): def test_numpy_with_sample_weights(self, distribution):
with self.cached_session(): with self.cached_session():
with distribution.scope(): with distribution.scope():
@ -847,10 +848,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
# as clone_model's input_tensors argument only seems to accept list and not # as clone_model's input_tensors argument only seems to accept list and not
# tuples or dict. # tuples or dict.
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution): def test_fit_with_tuple_and_dict_dataset_inputs(self, distribution):
with self.cached_session(): with self.cached_session():
@ -1024,10 +1026,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.evaluate(dataset, steps=2, verbose=1) model.evaluate(dataset, steps=2, verbose=1)
model.predict(dataset, steps=2) model.predict(dataset, steps=2)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu
combinations.core_mirrored_strategy_with_gpu_and_cpu], ],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is # TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored. # restored.
@ -1050,8 +1053,11 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
'expected input to have shape'): 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu], combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
# TODO(b/120943676, b/120957836): Re-enable once the validation code is # TODO(b/120943676, b/120957836): Re-enable once the validation code is
# restored. # restored.
@ -1072,8 +1078,9 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'expected input to have shape'): with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine( @combinations.generate(
distribution=[combinations.tpu_strategy_one_step], combinations.combine(
distribution=[strategy_combinations.tpu_strategy_one_step],
mode=['graph'])) mode=['graph']))
def test_dataset_input_shape_fully_defined(self, distribution): def test_dataset_input_shape_fully_defined(self, distribution):
with self.cached_session(): with self.cached_session():
@ -1091,12 +1098,12 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'): with self.assertRaisesRegexp(ValueError, 'requires fully defined shapes'):
model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0) model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
@combinations.generate(combinations.combine( @combinations.generate(
combinations.combine(
distribution=[ distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu, strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus, strategy_combinations.mirrored_strategy_with_two_gpus
combinations.core_mirrored_strategy_with_gpu_and_cpu, ],
combinations.core_mirrored_strategy_with_two_gpus],
mode=['graph', 'eager'])) mode=['graph', 'eager']))
def test_learning_phase_value(self, distribution): def test_learning_phase_value(self, distribution):
# TODO(anjalisridhar): Modify this test to use Lambdas since we can compare # TODO(anjalisridhar): Modify this test to use Lambdas since we can compare
@ -1116,8 +1123,8 @@ class TestDistributionStrategyWithDatasets(test.TestCase,
model.compile(optimizer, loss, metrics=metrics) model.compile(optimizer, loss, metrics=metrics)
batch_size = 8 batch_size = 8
if isinstance(distribution, mirrored_strategy.CoreMirroredStrategy): if isinstance(distribution, mirrored_strategy.MirroredStrategy):
# CoreMirroredStrategy uses global batch size. # MirroredStrategy uses global batch size.
batch_size = 8 * distribution.num_replicas_in_sync batch_size = 8 * distribution.num_replicas_in_sync
inputs = np.ones((10, 1), dtype=np.float32) inputs = np.ones((10, 1), dtype=np.float32)
@ -1239,7 +1246,8 @@ class TestRegularizerLoss(test.TestCase, parameterized.TestCase):
def loss_fn(_, y_pred): def loss_fn(_, y_pred):
return math_ops.reduce_mean(y_pred) return math_ops.reduce_mean(y_pred)
@combinations.generate(all_strategy_combinations_minus_default()) @combinations.generate(
strategy_combinations.all_strategy_combinations_minus_default())
def test_regularizer_loss(self, distribution): def test_regularizer_loss(self, distribution):
batch_size = 2 batch_size = 2
if not distributed_training_utils.global_batch_size_supported(distribution): if not distributed_training_utils.global_batch_size_supported(distribution):
@ -1301,7 +1309,8 @@ class TestDistributionStrategyWithKerasModels(test.TestCase,
model.predict(inputs, steps=1) model.predict(inputs, steps=1)
model.evaluate(inputs, targets, steps=1) model.evaluate(inputs, targets, steps=1)
@combinations.generate(all_strategy_combinations_minus_default()) @combinations.generate(
strategy_combinations.all_strategy_combinations_minus_default())
def test_distribution_strategy_one_dimensional(self, distribution): def test_distribution_strategy_one_dimensional(self, distribution):
with distribution.scope(): with distribution.scope():
inp = keras.layers.Input(shape=(10,)) inp = keras.layers.Input(shape=(10,))

View File

@ -77,7 +77,9 @@ COMMON_PIP_DEPS = [
"//tensorflow/python/data/kernel_tests:filter_test_base", "//tensorflow/python/data/kernel_tests:filter_test_base",
"//tensorflow/python/data/kernel_tests:test_base", "//tensorflow/python/data/kernel_tests:test_base",
"//tensorflow/python/debug:debug_pip", "//tensorflow/python/debug:debug_pip",
"//tensorflow/python/distribute:combinations",
"//tensorflow/python/eager:eager_pip", "//tensorflow/python/eager:eager_pip",
"//tensorflow/python/keras:distribute_strategy_test_lib",
"//tensorflow/python/keras/mixed_precision/experimental:test_util", "//tensorflow/python/keras/mixed_precision/experimental:test_util",
"//tensorflow/python/kernel_tests/random:util", "//tensorflow/python/kernel_tests/random:util",
"//tensorflow/python/kernel_tests/signal:test_util", "//tensorflow/python/kernel_tests/signal:test_util",