Expose MultiWorkerMirroredStrategy and ParameterServerStrategy
PiperOrigin-RevId: 234895866
This commit is contained in:
parent
3a83826bd1
commit
d5d39154db
tensorflow
python
distribute
tools/api/generator
tools/api/golden
@ -115,6 +115,7 @@ py_library(
|
||||
":distribute_lib",
|
||||
":mirrored_strategy",
|
||||
":one_device_strategy",
|
||||
"//tensorflow/python/distribute/experimental",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -25,4 +25,6 @@ from tensorflow.python.distribute import distribute_lib
|
||||
from tensorflow.python.distribute import distribution_strategy_context
|
||||
from tensorflow.python.distribute import mirrored_strategy
|
||||
from tensorflow.python.distribute import one_device_strategy
|
||||
from tensorflow.python.distribute.experimental import collective_all_reduce_strategy
|
||||
from tensorflow.python.distribute.experimental import parameter_server_strategy
|
||||
# pylint: enable=unused-import
|
||||
|
@ -39,9 +39,11 @@ from tensorflow.python.framework import ops
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import collective_ops
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
|
||||
# TODO(yuefengz): support in-graph replication.
|
||||
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy")
|
||||
class CollectiveAllReduceStrategy(distribute_lib.DistributionStrategy):
|
||||
"""Distribution strategy that uses collective ops for all-reduce.
|
||||
|
||||
|
19
tensorflow/python/distribute/experimental/BUILD
Normal file
19
tensorflow/python/distribute/experimental/BUILD
Normal file
@ -0,0 +1,19 @@
|
||||
package(
|
||||
default_visibility = ["//tensorflow:internal"],
|
||||
)
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
py_library(
|
||||
name = "experimental",
|
||||
srcs = [
|
||||
"__init__.py",
|
||||
],
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
"//tensorflow/python/distribute:collective_all_reduce_strategy",
|
||||
"//tensorflow/python/distribute:parameter_server_strategy",
|
||||
],
|
||||
)
|
24
tensorflow/python/distribute/experimental/__init__.py
Normal file
24
tensorflow/python/distribute/experimental/__init__.py
Normal file
@ -0,0 +1,24 @@
|
||||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Experimental Distribution Strategy library."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
# pylint: disable=unused-import
|
||||
from tensorflow.python.distribute import collective_all_reduce_strategy
|
||||
from tensorflow.python.distribute import parameter_server_strategy
|
||||
# pylint: enable=unused-import
|
@ -20,11 +20,12 @@ from __future__ import print_function
|
||||
|
||||
import copy
|
||||
|
||||
from tensorflow.contrib.distribute.python import mirrored_strategy
|
||||
|
||||
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
|
||||
from tensorflow.python.distribute import device_util
|
||||
from tensorflow.python.distribute import distribute_lib
|
||||
from tensorflow.python.distribute import input_lib
|
||||
from tensorflow.python.distribute import mirrored_strategy
|
||||
from tensorflow.python.distribute import multi_worker_util
|
||||
from tensorflow.python.distribute import numpy_dataset
|
||||
from tensorflow.python.distribute import values
|
||||
@ -39,12 +40,14 @@ from tensorflow.python.ops import variable_scope as vs
|
||||
from tensorflow.python.platform import tf_logging as logging
|
||||
from tensorflow.python.training import device_setter
|
||||
from tensorflow.python.util import nest
|
||||
from tensorflow.python.util.tf_export import tf_export
|
||||
|
||||
_LOCAL_CPU = "/device:CPU:0"
|
||||
_LOCAL_GPU_0 = "/device:GPU:0"
|
||||
|
||||
|
||||
# TODO(yuefengz): maybe cache variables on local CPU.
|
||||
@tf_export("distribute.experimental.ParameterServerStrategy")
|
||||
class ParameterServerStrategy(distribute_lib.DistributionStrategy):
|
||||
"""A parameter server DistributionStrategy.
|
||||
|
||||
|
@ -15,6 +15,7 @@ TENSORFLOW_API_INIT_FILES = [
|
||||
"debugging/__init__.py",
|
||||
"distribute/__init__.py",
|
||||
"distribute/cluster_resolver/__init__.py",
|
||||
"distribute/experimental/__init__.py",
|
||||
"dtypes/__init__.py",
|
||||
"errors/__init__.py",
|
||||
"experimental/__init__.py",
|
||||
|
@ -16,6 +16,7 @@ TENSORFLOW_API_INIT_FILES_V1 = [
|
||||
"debugging/__init__.py",
|
||||
"distribute/__init__.py",
|
||||
"distribute/cluster_resolver/__init__.py",
|
||||
"distribute/experimental/__init__.py",
|
||||
"distributions/__init__.py",
|
||||
"dtypes/__init__.py",
|
||||
"errors/__init__.py",
|
||||
|
@ -0,0 +1,66 @@
|
||||
path: "tensorflow.distribute.experimental.MultiWorkerMirroredStrategy"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.distribute.collective_all_reduce_strategy.CollectiveAllReduceStrategy\'>"
|
||||
is_instance: "<class \'tensorflow.python.distribute.distribute_lib.DistributionStrategy\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "extended"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "num_replicas_in_sync"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "broadcast"
|
||||
argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "colocate_vars_with"
|
||||
argspec: "args=[\'self\', \'colocate_with_variable\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "configure"
|
||||
argspec: "args=[\'self\', \'session_config\', \'cluster_spec\', \'task_type\', \'task_id\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_make_numpy_iterator"
|
||||
argspec: "args=[\'self\', \'numpy_input\', \'batch_size\', \'num_epochs\', \'shuffle\', \'session\'], varargs=None, keywords=None, defaults=[\'1\', \'1024\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_run"
|
||||
argspec: "args=[\'self\', \'fn\', \'input_iterator\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "group"
|
||||
argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "make_dataset_iterator"
|
||||
argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "make_input_fn_iterator"
|
||||
argspec: "args=[\'self\', \'input_fn\', \'replication_mode\'], varargs=None, keywords=None, defaults=[\'InputReplicationMode.PER_WORKER\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "reduce"
|
||||
argspec: "args=[\'self\', \'reduce_op\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "scope"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "unwrap"
|
||||
argspec: "args=[\'self\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "update_config_proto"
|
||||
argspec: "args=[\'self\', \'config_proto\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
path: "tensorflow.distribute.experimental.ParameterServerStrategy"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.distribute.parameter_server_strategy.ParameterServerStrategy\'>"
|
||||
is_instance: "<class \'tensorflow.python.distribute.distribute_lib.DistributionStrategy\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "extended"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "num_replicas_in_sync"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "broadcast"
|
||||
argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "colocate_vars_with"
|
||||
argspec: "args=[\'self\', \'colocate_with_variable\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "configure"
|
||||
argspec: "args=[\'self\', \'session_config\', \'cluster_spec\', \'task_type\', \'task_id\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_make_numpy_iterator"
|
||||
argspec: "args=[\'self\', \'numpy_input\', \'batch_size\', \'num_epochs\', \'shuffle\', \'session\'], varargs=None, keywords=None, defaults=[\'1\', \'1024\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_run"
|
||||
argspec: "args=[\'self\', \'fn\', \'input_iterator\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "group"
|
||||
argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "make_dataset_iterator"
|
||||
argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "make_input_fn_iterator"
|
||||
argspec: "args=[\'self\', \'input_fn\', \'replication_mode\'], varargs=None, keywords=None, defaults=[\'InputReplicationMode.PER_WORKER\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "reduce"
|
||||
argspec: "args=[\'self\', \'reduce_op\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "scope"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "unwrap"
|
||||
argspec: "args=[\'self\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "update_config_proto"
|
||||
argspec: "args=[\'self\', \'config_proto\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
path: "tensorflow.distribute.experimental"
|
||||
tf_module {
|
||||
member {
|
||||
name: "MultiWorkerMirroredStrategy"
|
||||
mtype: "<type \'type\'>"
|
||||
}
|
||||
member {
|
||||
name: "ParameterServerStrategy"
|
||||
mtype: "<type \'type\'>"
|
||||
}
|
||||
}
|
@ -56,6 +56,10 @@ tf_module {
|
||||
name: "cluster_resolver"
|
||||
mtype: "<type \'module\'>"
|
||||
}
|
||||
member {
|
||||
name: "experimental"
|
||||
mtype: "<type \'module\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "get_loss_reduction"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
|
@ -0,0 +1,66 @@
|
||||
path: "tensorflow.distribute.experimental.MultiWorkerMirroredStrategy"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.distribute.collective_all_reduce_strategy.CollectiveAllReduceStrategy\'>"
|
||||
is_instance: "<class \'tensorflow.python.distribute.distribute_lib.DistributionStrategy\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "extended"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "num_replicas_in_sync"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "broadcast"
|
||||
argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "colocate_vars_with"
|
||||
argspec: "args=[\'self\', \'colocate_with_variable\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "configure"
|
||||
argspec: "args=[\'self\', \'session_config\', \'cluster_spec\', \'task_type\', \'task_id\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_make_numpy_iterator"
|
||||
argspec: "args=[\'self\', \'numpy_input\', \'batch_size\', \'num_epochs\', \'shuffle\', \'session\'], varargs=None, keywords=None, defaults=[\'1\', \'1024\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_run"
|
||||
argspec: "args=[\'self\', \'fn\', \'input_iterator\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "group"
|
||||
argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "make_dataset_iterator"
|
||||
argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "make_input_fn_iterator"
|
||||
argspec: "args=[\'self\', \'input_fn\', \'replication_mode\'], varargs=None, keywords=None, defaults=[\'InputReplicationMode.PER_WORKER\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "reduce"
|
||||
argspec: "args=[\'self\', \'reduce_op\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "scope"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "unwrap"
|
||||
argspec: "args=[\'self\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "update_config_proto"
|
||||
argspec: "args=[\'self\', \'config_proto\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
path: "tensorflow.distribute.experimental.ParameterServerStrategy"
|
||||
tf_class {
|
||||
is_instance: "<class \'tensorflow.python.distribute.parameter_server_strategy.ParameterServerStrategy\'>"
|
||||
is_instance: "<class \'tensorflow.python.distribute.distribute_lib.DistributionStrategy\'>"
|
||||
is_instance: "<type \'object\'>"
|
||||
member {
|
||||
name: "extended"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member {
|
||||
name: "num_replicas_in_sync"
|
||||
mtype: "<type \'property\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "__init__"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "broadcast"
|
||||
argspec: "args=[\'self\', \'tensor\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "colocate_vars_with"
|
||||
argspec: "args=[\'self\', \'colocate_with_variable\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "configure"
|
||||
argspec: "args=[\'self\', \'session_config\', \'cluster_spec\', \'task_type\', \'task_id\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_make_numpy_iterator"
|
||||
argspec: "args=[\'self\', \'numpy_input\', \'batch_size\', \'num_epochs\', \'shuffle\', \'session\'], varargs=None, keywords=None, defaults=[\'1\', \'1024\', \'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "experimental_run"
|
||||
argspec: "args=[\'self\', \'fn\', \'input_iterator\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "group"
|
||||
argspec: "args=[\'self\', \'value\', \'name\'], varargs=None, keywords=None, defaults=[\'None\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "make_dataset_iterator"
|
||||
argspec: "args=[\'self\', \'dataset\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "make_input_fn_iterator"
|
||||
argspec: "args=[\'self\', \'input_fn\', \'replication_mode\'], varargs=None, keywords=None, defaults=[\'InputReplicationMode.PER_WORKER\'], "
|
||||
}
|
||||
member_method {
|
||||
name: "reduce"
|
||||
argspec: "args=[\'self\', \'reduce_op\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "scope"
|
||||
argspec: "args=[\'self\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "unwrap"
|
||||
argspec: "args=[\'self\', \'value\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
member_method {
|
||||
name: "update_config_proto"
|
||||
argspec: "args=[\'self\', \'config_proto\'], varargs=None, keywords=None, defaults=None"
|
||||
}
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
path: "tensorflow.distribute.experimental"
|
||||
tf_module {
|
||||
member {
|
||||
name: "MultiWorkerMirroredStrategy"
|
||||
mtype: "<type \'type\'>"
|
||||
}
|
||||
member {
|
||||
name: "ParameterServerStrategy"
|
||||
mtype: "<type \'type\'>"
|
||||
}
|
||||
}
|
@ -56,6 +56,10 @@ tf_module {
|
||||
name: "cluster_resolver"
|
||||
mtype: "<type \'module\'>"
|
||||
}
|
||||
member {
|
||||
name: "experimental"
|
||||
mtype: "<type \'module\'>"
|
||||
}
|
||||
member_method {
|
||||
name: "get_replica_context"
|
||||
argspec: "args=[], varargs=None, keywords=None, defaults=None"
|
||||
|
Loading…
Reference in New Issue
Block a user