STT-tensorflow/tensorflow/tools/api/golden/v1/tensorflow.distribute.-reduction-to-one-device.pbtxt
Ran Chen fa08cfd489 Add an experimental_hints to batch all reduce
This contains all performance hints to the API. Currently there's only bytes_per_pack, which splits large batches into multiple packs allows overlapping communication and computation.

Currently we can only pack if all Tensors in the batch have known shapes.

PiperOrigin-RevId: 297269428
Change-Id: Iaf7d7d3adf7c6cad59aa6079fbcd36b31e92c4b5
2020-02-25 20:32:44 -08:00

35 lines
1.6 KiB
Plaintext

path: "tensorflow.distribute.ReductionToOneDevice"
tf_class {
is_instance: "<class \'tensorflow.python.distribute.cross_device_ops.ReductionToOneDevice\'>"
is_instance: "<class \'tensorflow.python.distribute.cross_device_ops.CrossDeviceOps\'>"
is_instance: "<type \'object\'>"
member_method {
name: "__init__"
argspec: "args=[\'self\', \'reduce_to_device\', \'accumulation_fn\'], varargs=None, keywords=None, defaults=[\'None\', \'None\'], "
}
member_method {
name: "batch_reduce"
argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'experimental_hints\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "batch_reduce_implementation"
argspec: "args=[\'self\', \'reduce_op\', \'value_destination_pairs\', \'experimental_hints\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "broadcast"
argspec: "args=[\'self\', \'tensor\', \'destinations\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "broadcast_implementation"
argspec: "args=[\'self\', \'tensor\', \'destinations\'], varargs=None, keywords=None, defaults=None"
}
member_method {
name: "reduce"
argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'experimental_hints\'], varargs=None, keywords=None, defaults=[\'None\'], "
}
member_method {
name: "reduce_implementation"
argspec: "args=[\'self\', \'reduce_op\', \'per_replica_value\', \'destinations\', \'experimental_hints\'], varargs=None, keywords=None, defaults=None"
}
}