Go: Update generated wrapper functions for TensorFlow ops.
PiperOrigin-RevId: 157734029
This commit is contained in:
parent
f60b6bdcb5
commit
2b75a9a6ea
@ -208,6 +208,16 @@ func FakeQuantWithMinMaxVarsPerChannelGradientNumBits(value int64) FakeQuantWith
|
||||
}
|
||||
}
|
||||
|
||||
// FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange sets the optional narrow_range attribute to value.
|
||||
//
|
||||
// value: Whether to quantize into 2^num_bits - 1 distinct values.
|
||||
// If not specified, defaults to false
|
||||
func FakeQuantWithMinMaxVarsPerChannelGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelGradientAttr {
|
||||
return func(m optionalAttr) {
|
||||
m["narrow_range"] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
|
||||
//
|
||||
// Arguments:
|
||||
@ -254,16 +264,26 @@ func FakeQuantWithMinMaxVarsNumBits(value int64) FakeQuantWithMinMaxVarsAttr {
|
||||
}
|
||||
}
|
||||
|
||||
// FakeQuantWithMinMaxVarsNarrowRange sets the optional narrow_range attribute to value.
|
||||
// If not specified, defaults to false
|
||||
func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr {
|
||||
return func(m optionalAttr) {
|
||||
m["narrow_range"] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
|
||||
//
|
||||
// and `max` to 'outputs' tensor of same shape as `inputs`.
|
||||
//
|
||||
// [min; max] is the clamping range for the 'inputs' data. Op divides this range
|
||||
// into 255 steps (total of 256 values), then replaces each 'inputs' value with the
|
||||
// closest of the quantized step values.
|
||||
// 'num_bits' is the bitwidth of the quantization; between 2 and 8, inclusive.
|
||||
// `[min; max]` define the clamping range for the `inputs` data.
|
||||
// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
|
||||
// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
|
||||
// then de-quantized and output as floats in `[min; max]` interval.
|
||||
// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
|
||||
//
|
||||
// This operation has a gradient and thus allows for training `min` and `max` values.
|
||||
// This operation has a gradient and thus allows for training `min` and `max`
|
||||
// values.
|
||||
func FakeQuantWithMinMaxVars(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsAttr) (outputs tf.Output) {
|
||||
if scope.Err() != nil {
|
||||
return
|
||||
@ -2149,7 +2169,7 @@ func ZerosLike(scope *Scope, x tf.Output) (y tf.Output) {
|
||||
// dimension. Must sum to the dimension of value along split_dim.
|
||||
// Can contain one -1 indicating that dimension is to be inferred.
|
||||
// split_dim: 0-D. The dimension along which to split. Must be in the range
|
||||
// `[0, rank(value))`.
|
||||
// `[-rank(value), rank(value))`.
|
||||
//
|
||||
//
|
||||
// Returns Tensors whose shape matches that of `value`
|
||||
@ -2184,7 +2204,7 @@ func SplitV(scope *Scope, value tf.Output, size_splits tf.Output, split_dim tf.O
|
||||
//
|
||||
// Arguments:
|
||||
// split_dim: 0-D. The dimension along which to split. Must be in the range
|
||||
// `[0, rank(value))`.
|
||||
// `[-rank(value), rank(value))`.
|
||||
// value: The tensor to split.
|
||||
// num_split: The number of ways to split. Must evenly divide
|
||||
// `value.shape[split_dim]`.
|
||||
@ -3325,12 +3345,21 @@ func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr {
|
||||
}
|
||||
}
|
||||
|
||||
// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value.
|
||||
// If not specified, defaults to false
|
||||
func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr {
|
||||
return func(m optionalAttr) {
|
||||
m["narrow_range"] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
|
||||
//
|
||||
// Attributes [min; max] define the clamping range for the 'inputs' data. Op
|
||||
// divides this range into 255 steps (total of 256 values), then replaces each
|
||||
// 'inputs' value with the closest of the quantized step values.
|
||||
// 'num_bits' is the bitwidth of the quantization; between 2 and 8, inclusive.
|
||||
// Attributes `[min; max]` define the clamping range for the `inputs` data.
|
||||
// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
|
||||
// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
|
||||
// then de-quantized and output as floats in `[min; max]` interval.
|
||||
// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
|
||||
//
|
||||
// Quantization is called fake since the output is still in floating point.
|
||||
func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) {
|
||||
@ -6410,6 +6439,14 @@ func FakeQuantWithMinMaxArgsGradientNumBits(value int64) FakeQuantWithMinMaxArgs
|
||||
}
|
||||
}
|
||||
|
||||
// FakeQuantWithMinMaxArgsGradientNarrowRange sets the optional narrow_range attribute to value.
|
||||
// If not specified, defaults to false
|
||||
func FakeQuantWithMinMaxArgsGradientNarrowRange(value bool) FakeQuantWithMinMaxArgsGradientAttr {
|
||||
return func(m optionalAttr) {
|
||||
m["narrow_range"] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Compute gradients for a FakeQuantWithMinMaxArgs operation.
|
||||
//
|
||||
// Arguments:
|
||||
@ -8601,17 +8638,27 @@ func FakeQuantWithMinMaxVarsPerChannelNumBits(value int64) FakeQuantWithMinMaxVa
|
||||
}
|
||||
}
|
||||
|
||||
// FakeQuantWithMinMaxVarsPerChannelNarrowRange sets the optional narrow_range attribute to value.
|
||||
// If not specified, defaults to false
|
||||
func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMaxVarsPerChannelAttr {
|
||||
return func(m optionalAttr) {
|
||||
m["narrow_range"] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
|
||||
//
|
||||
// `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
|
||||
// to 'outputs' tensor of same shape as `inputs`.
|
||||
//
|
||||
// [min; max] is the clamping range for the 'inputs' data in the corresponding
|
||||
// depth channel. Op divides this range into 255 steps (total of 256 values), then
|
||||
// replaces each 'inputs' value with the closest of the quantized step values.
|
||||
// 'num_bits' is the bitwidth of the quantization; between 2 and 8, inclusive.
|
||||
// `[min; max]` define the clamping range for the `inputs` data.
|
||||
// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
|
||||
// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
|
||||
// then de-quantized and output as floats in `[min; max]` interval.
|
||||
// `num_bits` is the bitwidth of the quantization; between 2 and 8, inclusive.
|
||||
//
|
||||
// This operation has a gradient and thus allows for training `min` and `max` values.
|
||||
// This operation has a gradient and thus allows for training `min` and `max`
|
||||
// values.
|
||||
func FakeQuantWithMinMaxVarsPerChannel(scope *Scope, inputs tf.Output, min tf.Output, max tf.Output, optional ...FakeQuantWithMinMaxVarsPerChannelAttr) (outputs tf.Output) {
|
||||
if scope.Err() != nil {
|
||||
return
|
||||
@ -21779,6 +21826,16 @@ func FakeQuantWithMinMaxVarsGradientNumBits(value int64) FakeQuantWithMinMaxVars
|
||||
}
|
||||
}
|
||||
|
||||
// FakeQuantWithMinMaxVarsGradientNarrowRange sets the optional narrow_range attribute to value.
|
||||
//
|
||||
// value: Whether to quantize into 2^num_bits - 1 distinct values.
|
||||
// If not specified, defaults to false
|
||||
func FakeQuantWithMinMaxVarsGradientNarrowRange(value bool) FakeQuantWithMinMaxVarsGradientAttr {
|
||||
return func(m optionalAttr) {
|
||||
m["narrow_range"] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Compute gradients for a FakeQuantWithMinMaxVars operation.
|
||||
//
|
||||
// Arguments:
|
||||
|
Loading…
Reference in New Issue
Block a user