Go: Update generated wrapper functions for TensorFlow ops.

PiperOrigin-RevId: 339911119
Change-Id: Iddf765ff3e58ea67c0cdf3b1ffe3065a92e8dcfc
This commit is contained in:
A. Unique TensorFlower 2020-10-30 11:46:19 -07:00 committed by TensorFlower Gardener
parent 1c47355978
commit baf2bfa96f

View File

@ -29727,6 +29727,21 @@ func RandomPoisson(scope *Scope, shape tf.Output, rate tf.Output, optional ...Ra
return op.Output(0)
}
// Computes the derivative of a Gamma random sample w.r.t. `alpha`.
func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "RandomGammaGrad",
Input: []tf.Input{
alpha, sample,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// Creates a dataset that takes a Bernoulli sample of the contents of another dataset.
//
// There is no transformation in the `tf.data` Python API for creating this dataset.
@ -29917,6 +29932,364 @@ func ParameterizedTruncatedNormal(scope *Scope, shape tf.Output, means tf.Output
return op.Output(0)
}
// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
type FractionalMaxPoolAttr func(optionalAttr)
// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
//
// value: When set to True, generates the pooling sequence in a
// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
// difference between pseudorandom and random.
// If not specified, defaults to false
func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["pseudo_random"] = value
}
}
// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
//
// value: When set to True, it means when pooling, the values at the boundary
// of adjacent pooling cells are used by both cells. For example:
//
// `index 0 1 2 3 4`
//
// `value 20 5 16 3 7`
//
// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
// The result would be [20, 16] for fractional max pooling.
// If not specified, defaults to false
func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["overlapping"] = value
}
}
// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
//
// value: When set to True, a fixed pooling region will be used when
// iterating over a FractionalMaxPool node in the computation graph. Mainly used
// in unit test to make FractionalMaxPool deterministic.
// If not specified, defaults to false
func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["deterministic"] = value
}
}
// FractionalMaxPoolSeed sets the optional seed attribute to value.
//
// value: If either seed or seed2 are set to be non-zero, the random number
// generator is seeded by the given seed. Otherwise, it is seeded by a
// random seed.
// If not specified, defaults to 0
func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["seed"] = value
}
}
// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
//
// value: An second seed to avoid seed collision.
// If not specified, defaults to 0
func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["seed2"] = value
}
}
// Performs fractional max pooling on the input.
//
// Fractional max pooling is slightly different than regular max pooling. In
// regular max pooling, you downsize an input set by taking the maximum value of
// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
// a factor of N, where N is an integer. Fractional max pooling, as you might
// expect from the word "fractional", means that the overall reduction ratio N
// does not have to be an integer.
//
// The sizes of the pooling regions are generated randomly but are fairly uniform.
// For example, let's look at the height dimension, and the constraints on the
// list of rows that will be pool boundaries.
//
// First we define the following:
//
// 1. input_row_length : the number of rows from the input set
// 2. output_row_length : which will be smaller than the input
// 3. alpha = input_row_length / output_row_length : our reduction ratio
// 4. K = floor(alpha)
// 5. row_pooling_sequence : this is the result list of pool boundary rows
//
// Then, row_pooling_sequence should satisfy:
//
// 1. a[0] = 0 : the first value of the sequence is 0
// 2. a[end] = input_row_length : the last value of the sequence is the size
// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
// 4. length(row_pooling_sequence) = output_row_length+1
//
// For more details on fractional max pooling, see this paper:
// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
//
// Arguments:
// value: 4-D with shape `[batch, height, width, channels]`.
// pooling_ratio: Pooling ratio for each dimension of `value`, currently only
// supports row and col dimension and should be >= 1.0. For example, a valid
// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
// must be 1.0 because we don't allow pooling on batch and channels
// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
// respectively.
//
// Returns:
// output: output tensor after fractional max pooling.
// row_pooling_sequence: row pooling sequence, needed to calculate gradient.
// col_pooling_sequence: column pooling sequence, needed to calculate gradient.
func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "FractionalMaxPool",
Input: []tf.Input{
value,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// Computes the reciprocal of x element-wise.
//
// I.e., \\(y = 1 / x\\).
func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Reciprocal",
Input: []tf.Input{
x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingAdagradParametersGradAccumDebug.
type LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr func(optionalAttr)
// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
// If not specified, defaults to -1
func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
return func(m optionalAttr) {
m["table_id"] = value
}
}
// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
// If not specified, defaults to ""
func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
return func(m optionalAttr) {
m["table_name"] = value
}
}
// LoadTPUEmbeddingAdagradParametersGradAccumDebugConfig sets the optional config attribute to value.
// If not specified, defaults to ""
func LoadTPUEmbeddingAdagradParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
return func(m optionalAttr) {
m["config"] = value
}
}
// Load Adagrad embedding parameters with debug support.
//
// An op that loads optimization parameters into HBM for embedding. Must be
// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
// embedding table configuration. For example, this op is used to install
// parameters that are loaded from a checkpoint before a training loop is
// executed.
//
// Arguments:
// parameters: Value of parameters used in the Adagrad optimization algorithm.
// accumulators: Value of accumulators used in the Adagrad optimization algorithm.
// gradient_accumulators: Value of gradient_accumulators used in the Adagrad optimization algorithm.
//
//
//
// Returns the created operation.
func LoadTPUEmbeddingAdagradParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "LoadTPUEmbeddingAdagradParametersGradAccumDebug",
Input: []tf.Input{
parameters, accumulators, gradient_accumulators,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
// MapPeekAttr is an optional argument to MapPeek.
type MapPeekAttr func(optionalAttr)
// MapPeekCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
func MapPeekCapacity(value int64) MapPeekAttr {
return func(m optionalAttr) {
m["capacity"] = value
}
}
// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
func MapPeekMemoryLimit(value int64) MapPeekAttr {
return func(m optionalAttr) {
m["memory_limit"] = value
}
}
// MapPeekContainer sets the optional container attribute to value.
// If not specified, defaults to ""
func MapPeekContainer(value string) MapPeekAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
// MapPeekSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
func MapPeekSharedName(value string) MapPeekAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
// Op peeks at the values at the specified key. If the
//
// underlying container does not contain this key
// this op will block until it does.
func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "MapPeek",
Input: []tf.Input{
key, indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
if scope.Err() != nil {
return
}
var idx int
var err error
if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
scope.UpdateErr("MapPeek", err)
return
}
return values
}
// RetrieveTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingCenteredRMSPropParameters.
type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
// RetrieveTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
// If not specified, defaults to -1
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
return func(m optionalAttr) {
m["table_id"] = value
}
}
// RetrieveTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
// If not specified, defaults to ""
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
return func(m optionalAttr) {
m["table_name"] = value
}
}
// RetrieveTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
// If not specified, defaults to ""
func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
return func(m optionalAttr) {
m["config"] = value
}
}
// Retrieve centered RMSProp embedding parameters.
//
// An op that retrieves optimization parameters from embedding to host
// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
// the correct embedding table configuration. For example, this op is
// used to retrieve updated parameters before saving a checkpoint.
//
// Returns:
// parameters: Parameter parameters updated by the centered RMSProp optimization algorithm.
// ms: Parameter ms updated by the centered RMSProp optimization algorithm.
// mom: Parameter mom updated by the centered RMSProp optimization algorithm.
// mg: Parameter mg updated by the centered RMSProp optimization algorithm.
func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingCenteredRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "RetrieveTPUEmbeddingCenteredRMSPropParameters",
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
}
// Returns x + y element-wise.
//
// *NOTE*: `RiscAdd` does not supports broadcasting.
//
// Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
//
// Both input and output have a range `(-inf, inf)`.
//
func RiscAdd(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "RiscAdd",
Input: []tf.Input{
x, y,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// QuantizedMatMulAttr is an optional argument to QuantizedMatMul.
type QuantizedMatMulAttr func(optionalAttr)
@ -38780,21 +39153,6 @@ func RaggedTensorToTensor(scope *Scope, shape tf.Output, values tf.Output, defau
return op.Output(0)
}
// Computes the derivative of a Gamma random sample w.r.t. `alpha`.
func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "RandomGammaGrad",
Input: []tf.Input{
alpha, sample,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// LRNGradAttr is an optional argument to LRNGrad.
type LRNGradAttr func(optionalAttr)
@ -41410,215 +41768,6 @@ func ResourceApplyAddSign(scope *Scope, var_ tf.Output, m tf.Output, lr tf.Outpu
return scope.AddOperation(opspec)
}
// FractionalMaxPoolAttr is an optional argument to FractionalMaxPool.
type FractionalMaxPoolAttr func(optionalAttr)
// FractionalMaxPoolPseudoRandom sets the optional pseudo_random attribute to value.
//
// value: When set to True, generates the pooling sequence in a
// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin
// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for
// difference between pseudorandom and random.
// If not specified, defaults to false
func FractionalMaxPoolPseudoRandom(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["pseudo_random"] = value
}
}
// FractionalMaxPoolOverlapping sets the optional overlapping attribute to value.
//
// value: When set to True, it means when pooling, the values at the boundary
// of adjacent pooling cells are used by both cells. For example:
//
// `index 0 1 2 3 4`
//
// `value 20 5 16 3 7`
//
// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice.
// The result would be [20, 16] for fractional max pooling.
// If not specified, defaults to false
func FractionalMaxPoolOverlapping(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["overlapping"] = value
}
}
// FractionalMaxPoolDeterministic sets the optional deterministic attribute to value.
//
// value: When set to True, a fixed pooling region will be used when
// iterating over a FractionalMaxPool node in the computation graph. Mainly used
// in unit test to make FractionalMaxPool deterministic.
// If not specified, defaults to false
func FractionalMaxPoolDeterministic(value bool) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["deterministic"] = value
}
}
// FractionalMaxPoolSeed sets the optional seed attribute to value.
//
// value: If either seed or seed2 are set to be non-zero, the random number
// generator is seeded by the given seed. Otherwise, it is seeded by a
// random seed.
// If not specified, defaults to 0
func FractionalMaxPoolSeed(value int64) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["seed"] = value
}
}
// FractionalMaxPoolSeed2 sets the optional seed2 attribute to value.
//
// value: An second seed to avoid seed collision.
// If not specified, defaults to 0
func FractionalMaxPoolSeed2(value int64) FractionalMaxPoolAttr {
return func(m optionalAttr) {
m["seed2"] = value
}
}
// Performs fractional max pooling on the input.
//
// Fractional max pooling is slightly different than regular max pooling. In
// regular max pooling, you downsize an input set by taking the maximum value of
// smaller N x N subsections of the set (often 2x2), and try to reduce the set by
// a factor of N, where N is an integer. Fractional max pooling, as you might
// expect from the word "fractional", means that the overall reduction ratio N
// does not have to be an integer.
//
// The sizes of the pooling regions are generated randomly but are fairly uniform.
// For example, let's look at the height dimension, and the constraints on the
// list of rows that will be pool boundaries.
//
// First we define the following:
//
// 1. input_row_length : the number of rows from the input set
// 2. output_row_length : which will be smaller than the input
// 3. alpha = input_row_length / output_row_length : our reduction ratio
// 4. K = floor(alpha)
// 5. row_pooling_sequence : this is the result list of pool boundary rows
//
// Then, row_pooling_sequence should satisfy:
//
// 1. a[0] = 0 : the first value of the sequence is 0
// 2. a[end] = input_row_length : the last value of the sequence is the size
// 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
// 4. length(row_pooling_sequence) = output_row_length+1
//
// For more details on fractional max pooling, see this paper:
// [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
//
// Arguments:
// value: 4-D with shape `[batch, height, width, channels]`.
// pooling_ratio: Pooling ratio for each dimension of `value`, currently only
// supports row and col dimension and should be >= 1.0. For example, a valid
// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements
// must be 1.0 because we don't allow pooling on batch and channels
// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions
// respectively.
//
// Returns:
// output: output tensor after fractional max pooling.
// row_pooling_sequence: row pooling sequence, needed to calculate gradient.
// col_pooling_sequence: column pooling sequence, needed to calculate gradient.
func FractionalMaxPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalMaxPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"pooling_ratio": pooling_ratio}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "FractionalMaxPool",
Input: []tf.Input{
value,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2)
}
// Computes the reciprocal of x element-wise.
//
// I.e., \\(y = 1 / x\\).
func Reciprocal(scope *Scope, x tf.Output) (y tf.Output) {
if scope.Err() != nil {
return
}
opspec := tf.OpSpec{
Type: "Reciprocal",
Input: []tf.Input{
x,
},
}
op := scope.AddOperation(opspec)
return op.Output(0)
}
// LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingAdagradParametersGradAccumDebug.
type LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr func(optionalAttr)
// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId sets the optional table_id attribute to value.
// If not specified, defaults to -1
func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableId(value int64) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
return func(m optionalAttr) {
m["table_id"] = value
}
}
// LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName sets the optional table_name attribute to value.
// If not specified, defaults to ""
func LoadTPUEmbeddingAdagradParametersGradAccumDebugTableName(value string) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
return func(m optionalAttr) {
m["table_name"] = value
}
}
// LoadTPUEmbeddingAdagradParametersGradAccumDebugConfig sets the optional config attribute to value.
// If not specified, defaults to ""
func LoadTPUEmbeddingAdagradParametersGradAccumDebugConfig(value string) LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr {
return func(m optionalAttr) {
m["config"] = value
}
}
// Load Adagrad embedding parameters with debug support.
//
// An op that loads optimization parameters into HBM for embedding. Must be
// preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
// embedding table configuration. For example, this op is used to install
// parameters that are loaded from a checkpoint before a training loop is
// executed.
//
// Arguments:
// parameters: Value of parameters used in the Adagrad optimization algorithm.
// accumulators: Value of accumulators used in the Adagrad optimization algorithm.
// gradient_accumulators: Value of gradient_accumulators used in the Adagrad optimization algorithm.
//
//
//
// Returns the created operation.
func LoadTPUEmbeddingAdagradParametersGradAccumDebug(scope *Scope, parameters tf.Output, accumulators tf.Output, gradient_accumulators tf.Output, num_shards int64, shard_id int64, optional ...LoadTPUEmbeddingAdagradParametersGradAccumDebugAttr) (o *tf.Operation) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "LoadTPUEmbeddingAdagradParametersGradAccumDebug",
Input: []tf.Input{
parameters, accumulators, gradient_accumulators,
},
Attrs: attrs,
}
return scope.AddOperation(opspec)
}
// Strip leading and trailing whitespaces from the Tensor.
//
// Arguments:
@ -45714,133 +45863,6 @@ func ParseExample(scope *Scope, serialized tf.Output, names tf.Output, sparse_ke
return sparse_indices, sparse_values, sparse_shapes, dense_values
}
// MapPeekAttr is an optional argument to MapPeek.
type MapPeekAttr func(optionalAttr)
// MapPeekCapacity sets the optional capacity attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
func MapPeekCapacity(value int64) MapPeekAttr {
return func(m optionalAttr) {
m["capacity"] = value
}
}
// MapPeekMemoryLimit sets the optional memory_limit attribute to value.
// If not specified, defaults to 0
//
// REQUIRES: value >= 0
func MapPeekMemoryLimit(value int64) MapPeekAttr {
return func(m optionalAttr) {
m["memory_limit"] = value
}
}
// MapPeekContainer sets the optional container attribute to value.
// If not specified, defaults to ""
func MapPeekContainer(value string) MapPeekAttr {
return func(m optionalAttr) {
m["container"] = value
}
}
// MapPeekSharedName sets the optional shared_name attribute to value.
// If not specified, defaults to ""
func MapPeekSharedName(value string) MapPeekAttr {
return func(m optionalAttr) {
m["shared_name"] = value
}
}
// Op peeks at the values at the specified key. If the
//
// underlying container does not contain this key
// this op will block until it does.
func MapPeek(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapPeekAttr) (values []tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"dtypes": dtypes}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "MapPeek",
Input: []tf.Input{
key, indices,
},
Attrs: attrs,
}
op := scope.AddOperation(opspec)
if scope.Err() != nil {
return
}
var idx int
var err error
if values, idx, err = makeOutputList(op, idx, "values"); err != nil {
scope.UpdateErr("MapPeek", err)
return
}
return values
}
// RetrieveTPUEmbeddingCenteredRMSPropParametersAttr is an optional argument to RetrieveTPUEmbeddingCenteredRMSPropParameters.
type RetrieveTPUEmbeddingCenteredRMSPropParametersAttr func(optionalAttr)
// RetrieveTPUEmbeddingCenteredRMSPropParametersTableId sets the optional table_id attribute to value.
// If not specified, defaults to -1
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableId(value int64) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
return func(m optionalAttr) {
m["table_id"] = value
}
}
// RetrieveTPUEmbeddingCenteredRMSPropParametersTableName sets the optional table_name attribute to value.
// If not specified, defaults to ""
func RetrieveTPUEmbeddingCenteredRMSPropParametersTableName(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
return func(m optionalAttr) {
m["table_name"] = value
}
}
// RetrieveTPUEmbeddingCenteredRMSPropParametersConfig sets the optional config attribute to value.
// If not specified, defaults to ""
func RetrieveTPUEmbeddingCenteredRMSPropParametersConfig(value string) RetrieveTPUEmbeddingCenteredRMSPropParametersAttr {
return func(m optionalAttr) {
m["config"] = value
}
}
// Retrieve centered RMSProp embedding parameters.
//
// An op that retrieves optimization parameters from embedding to host
// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
// the correct embedding table configuration. For example, this op is
// used to retrieve updated parameters before saving a checkpoint.
//
// Returns:
// parameters: Parameter parameters updated by the centered RMSProp optimization algorithm.
// ms: Parameter ms updated by the centered RMSProp optimization algorithm.
// mom: Parameter mom updated by the centered RMSProp optimization algorithm.
// mg: Parameter mg updated by the centered RMSProp optimization algorithm.
func RetrieveTPUEmbeddingCenteredRMSPropParameters(scope *Scope, num_shards int64, shard_id int64, optional ...RetrieveTPUEmbeddingCenteredRMSPropParametersAttr) (parameters tf.Output, ms tf.Output, mom tf.Output, mg tf.Output) {
if scope.Err() != nil {
return
}
attrs := map[string]interface{}{"num_shards": num_shards, "shard_id": shard_id}
for _, a := range optional {
a(attrs)
}
opspec := tf.OpSpec{
Type: "RetrieveTPUEmbeddingCenteredRMSPropParameters",
Attrs: attrs,
}
op := scope.AddOperation(opspec)
return op.Output(0), op.Output(1), op.Output(2), op.Output(3)
}
// Records the latency of producing `input_dataset` elements in a StatsAggregator.
func LatencyStatsDataset(scope *Scope, input_dataset tf.Output, tag tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) {
if scope.Err() != nil {