Go: Update generated wrapper functions for TensorFlow ops.

PiperOrigin-RevId: 328372956
Change-Id: I763361a219ff3839a97f674120699170ed26b418
This commit is contained in:
A. Unique TensorFlower 2020-08-25 11:47:57 -07:00 committed by TensorFlower Gardener
parent 65da7b87d6
commit 86f9d1fbae

View File

@ -7748,98 +7748,6 @@ func PriorityQueueV2(scope *Scope, shapes []tf.Shape, optional ...PriorityQueueV
return op.Output(0)
}
// Does nothing. Serves as a control trigger for scheduling.
//
// Only useful as a placeholder for control edges.
//
// Returns the created operation.
func ControlTrigger(scope *Scope) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "ControlTrigger",
}
return scope.AddOperation(opspec)
}
// Interleave the values from the `data` tensors into a single tensor.
//
// Builds a merged tensor such that
//
// ```python
// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
// ```
//
// For example, if each `indices[m]` is scalar or vector, we have
//
// ```python
// # Scalar indices:
// merged[indices[m], ...] = data[m][...]
//
// # Vector indices:
// merged[indices[m][i], ...] = data[m][i, ...]
// ```
//
// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
// must have `data[i].shape = indices[i].shape + constant`. In terms of this
// `constant`, the output shape is
//
// merged.shape = [max(indices)] + constant
//
// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
// and `indices[n][j]`, the result may be invalid. This differs from the normal
// DynamicStitch operator that defines the behavior in that case.
//
// For example:
//
// ```python
// indices[0] = 6
// indices[1] = [4, 1]
// indices[2] = [[5, 2], [0, 3]]
// data[0] = [61, 62]
// data[1] = [[41, 42], [11, 12]]
// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
// [51, 52], [61, 62]]
// ```
//
// This method can be used to merge partitions created by `dynamic_partition`
// as illustrated on the following example:
//
// ```python
// # Apply function (increments x_i) on elements for which a certain condition
// # apply (x_i != -1 in this example).
// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
// condition_mask=tf.not_equal(x,tf.constant(-1.))
// partitioned_data = tf.dynamic_partition(
// x, tf.cast(condition_mask, tf.int32) , 2)
// partitioned_data[1] = partitioned_data[1] + 1.0
// condition_indices = tf.dynamic_partition(
// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
// x = tf.dynamic_stitch(condition_indices, partitioned_data)
// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
// # unchanged.
// ```
//
// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
// </div>
func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "ParallelDynamicStitch",
Input: []tf.Input{
tf.OutputList(indices), tf.OutputList(data),
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
//
// For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]`
@ -15307,6 +15215,118 @@ func TensorListLength(scope *Scope, input_handle tf.Output) (length tf.Output) {
return op.Output(0)
}
// Does nothing. Serves as a control trigger for scheduling.
//
// Only useful as a placeholder for control edges.
//
// Returns the created operation.
func ControlTrigger(scope *Scope) (o *tf.Operation) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "ControlTrigger",
}
return scope.AddOperation(opspec)
}
// Interleave the values from the `data` tensors into a single tensor.
//
// Builds a merged tensor such that
//
// ```python
// merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
// ```
//
// For example, if each `indices[m]` is scalar or vector, we have
//
// ```python
// # Scalar indices:
// merged[indices[m], ...] = data[m][...]
//
// # Vector indices:
// merged[indices[m][i], ...] = data[m][i, ...]
// ```
//
// Each `data[i].shape` must start with the corresponding `indices[i].shape`,
// and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we
// must have `data[i].shape = indices[i].shape + constant`. In terms of this
// `constant`, the output shape is
//
// merged.shape = [max(indices)] + constant
//
// Values may be merged in parallel, so if an index appears in both `indices[m][i]`
// and `indices[n][j]`, the result may be invalid. This differs from the normal
// DynamicStitch operator that defines the behavior in that case.
//
// For example:
//
// ```python
// indices[0] = 6
// indices[1] = [4, 1]
// indices[2] = [[5, 2], [0, 3]]
// data[0] = [61, 62]
// data[1] = [[41, 42], [11, 12]]
// data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
// merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
// [51, 52], [61, 62]]
// ```
//
// This method can be used to merge partitions created by `dynamic_partition`
// as illustrated on the following example:
//
// ```python
// # Apply function (increments x_i) on elements for which a certain condition
// # apply (x_i != -1 in this example).
// x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
// condition_mask=tf.not_equal(x,tf.constant(-1.))
// partitioned_data = tf.dynamic_partition(
// x, tf.cast(condition_mask, tf.int32) , 2)
// partitioned_data[1] = partitioned_data[1] + 1.0
// condition_indices = tf.dynamic_partition(
// tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
// x = tf.dynamic_stitch(condition_indices, partitioned_data)
// # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
// # unchanged.
// ```
//
// <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
// <img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
// </div>
func ParallelDynamicStitch(scope *Scope, indices []tf.Output, data []tf.Output) (merged tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "ParallelDynamicStitch",
Input: []tf.Input{
tf.OutputList(indices), tf.OutputList(data),
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns a Tensor stack of all keys in a tensor map.
//
// input_handle: the input map
// keys: the returned Tensor of all keys in the map
func TensorMapStackKeys(scope *Scope, input_handle tf.Output, key_dtype tf.DataType) (keys tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"key_dtype": key_dtype}
opspec := tf.OpSpec{
Type: "TensorMapStackKeys",
Input: []tf.Input{
input_handle,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Returns whether the given key exists in the map.
//
// input_handle: the input map