Export strategy combinations that used by keras to tf.__internal__.

PiperOrigin-RevId: 335724503
Change-Id: I15e5adbba764d91a1d81ba60724719d678697e6f
This commit is contained in:
Scott Zhu 2020-10-06 14:40:50 -07:00 committed by TensorFlower Gardener
parent 203d2f4beb
commit 9fd6313f37
4 changed files with 1 additions and 132 deletions

View File

@ -222,7 +222,6 @@ py_library(
"//tensorflow/python/distribute:combinations", # For tf.__internal__ API.
"//tensorflow/python/distribute:distribute_config",
"//tensorflow/python/distribute:estimator_training",
"//tensorflow/python/distribute:strategy_combinations", # For tf.__internal__,
"//tensorflow/python/dlpack",
"//tensorflow/python/eager:def_function",
"//tensorflow/python/eager:monitoring",

View File

@ -35,9 +35,8 @@ from tensorflow.python.framework import config
from tensorflow.python.platform import flags
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.util.tf_export import tf_export
_TF_INTERNAL_API_PREFIX = "__internal__.distribute.combinations."
FLAGS = flags.FLAGS
_did_connect_to_cluster = False
CollectiveAllReduceExtended = (
@ -51,7 +50,6 @@ def _get_tpu_strategy_creator(steps_per_run,
**kwargs):
def _create_tpu_strategy():
FLAGS = flags.FLAGS # pylint: disable=invalid-name
global _did_connect_to_cluster
try:
@ -196,11 +194,6 @@ mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.
mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(
"Mirrored2CPU", lambda: mirrored_lib.MirroredStrategy(["/cpu:1", "/cpu:2"]))
mirrored_strategy_with_cpu_1_and_2.__doc__ = (
"""Mirrored strategy with 2 virtual CPUs.
Should call set_virtual_cpus_to_at_least(3) in the test's setUp methods.
""")
central_storage_strategy_with_two_gpus = combinations.NamedDistribution(
"CentralStorage2GPUs",
lambda: central_storage_strategy.CentralStorageStrategy._from_num_gpus(2), # pylint: disable=protected-access
@ -359,57 +352,3 @@ def all_strategy_minus_default_and_tpu_combinations():
def all_strategy_combinations_minus_default():
return (all_strategy_minus_default_and_tpu_combinations() +
tpu_strategy_combinations())
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__,
"central_storage_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "central_storage_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "central_storage_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "cloud_tpu_strategy",
v1=[]).export_constant(__name__, "cloud_tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "default_strategy",
v1=[]).export_constant(__name__, "default_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_cpu_1_and_2",
v1=[]).export_constant(__name__, "mirrored_strategy_with_cpu_1_and_2")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_gpu_and_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_gpu_and_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_cpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_one_gpu",
v1=[]).export_constant(__name__, "mirrored_strategy_with_one_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "mirrored_strategy_with_two_gpus",
v1=[]).export_constant(__name__, "mirrored_strategy_with_two_gpus")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_cpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_cpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x1_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x1_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "multi_worker_mirrored_2x2_gpu",
v1=[]).export_constant(__name__, "multi_worker_mirrored_2x2_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy",
v1=[]).export_constant(__name__, "one_device_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "one_device_strategy_gpu",
v1=[]).export_constant(__name__, "one_device_strategy_gpu")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy",
v1=[]).export_constant(__name__, "tpu_strategy")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_one_core",
v1=[]).export_constant(__name__, "tpu_strategy_one_core")
tf_export(
_TF_INTERNAL_API_PREFIX + "tpu_strategy_packed_var",
v1=[]).export_constant(__name__, "tpu_strategy_packed_var")

View File

@ -55,7 +55,6 @@ from tensorflow.python.util.tf_export import tf_export
# _internal APIs
from tensorflow.python.distribute.combinations import generate
from tensorflow.python.distribute.strategy_combinations import *
from tensorflow.python.framework.combinations import *
from tensorflow.python.framework.composite_tensor import *
from tensorflow.python.framework.test_combinations import *

View File

@ -1,73 +1,5 @@
path: "tensorflow.__internal__.distribute.combinations"
tf_module {
member {
name: "central_storage_strategy_with_gpu_and_cpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "central_storage_strategy_with_two_gpus"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "cloud_tpu_strategy"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "default_strategy"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "mirrored_strategy_with_cpu_1_and_2"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "mirrored_strategy_with_gpu_and_cpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "mirrored_strategy_with_one_cpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "mirrored_strategy_with_one_gpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "mirrored_strategy_with_two_gpus"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "multi_worker_mirrored_2x1_cpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "multi_worker_mirrored_2x1_gpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "multi_worker_mirrored_2x2_gpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "one_device_strategy"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "one_device_strategy_gpu"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "tpu_strategy"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "tpu_strategy_one_core"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member {
name: "tpu_strategy_packed_var"
mtype: "<class \'tensorflow.python.distribute.combinations.NamedDistribution\'>"
}
member_method {
name: "generate"
argspec: "args=[\'combinations\', \'test_combinations\'], varargs=None, keywords=None, defaults=[\'()\'], "