Go: Update generated wrapper functions for TensorFlow ops.

PiperOrigin-RevId: 314783162
Change-Id: I89794559babaed55caf4a430a3a343823d278e59
This commit is contained in:
A. Unique TensorFlower 2020-06-04 12:39:09 -07:00 committed by TensorFlower Gardener
parent c614b35b40
commit 6a13b7a995

View File

@ -9487,6 +9487,14 @@ func DebugIdentityV2DebugUrls(value []string) DebugIdentityV2Attr {
}
}
// DebugIdentityV2CircularBufferSize sets the optional circular_buffer_size attribute to value.
// If not specified, defaults to 1000
func DebugIdentityV2CircularBufferSize(value int64) DebugIdentityV2Attr {
return func(m optionalAttr) {
m["circular_buffer_size"] = value
}
}
// Debug Identity V2 Op.
//
// Provides an identity mapping from input to output, while writing the content of
@ -26646,19 +26654,23 @@ func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr
// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
//
// Attributes `[min; max]` define the clamping range for the `inputs` data.
// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
// then de-quantized and output as floats in `[min; max]` interval.
// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
// Attributes
//
// * `[min; max]` define the clamping range for the `inputs` data.
// * `inputs` values are quantized into the quantization range (
// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
// when it is true) and then de-quantized and output as floats in `[min; max]`
// interval.
// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
//
// Before quantization, `min` and `max` values are adjusted with the following
// logic.
// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
// the behavior can be unexpected:
// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
//
// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
//
// Quantization is called fake since the output is still in floating point.
@ -37416,24 +37428,29 @@ func FakeQuantWithMinMaxVarsPerChannelNarrowRange(value bool) FakeQuantWithMinMa
}
}
// Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
// Fake-quantize the 'inputs' tensor of type float via per-channel floats
//
// `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
// to 'outputs' tensor of same shape as `inputs`.
// Fake-quantize the `inputs` tensor of type float per-channel and one of the
// shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
// of shape `[d]` to `outputs` tensor of same shape as `inputs`.
//
// `[min; max]` define the clamping range for the `inputs` data.
// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
// then de-quantized and output as floats in `[min; max]` interval.
// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
// Attributes
//
// * `[min; max]` define the clamping range for the `inputs` data.
// * `inputs` values are quantized into the quantization range (
// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
// when it is true) and then de-quantized and output as floats in `[min; max]`
// interval.
// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
//
// Before quantization, `min` and `max` values are adjusted with the following
// logic.
// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
// the behavior can be unexpected:
// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
//
// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
//
// This operation has a gradient and thus allows for training `min` and `max`
@ -45265,23 +45282,28 @@ func FakeQuantWithMinMaxVarsNarrowRange(value bool) FakeQuantWithMinMaxVarsAttr
}
}
// Fake-quantize the 'inputs' tensor of type float via global float scalars `min`
// Fake-quantize the 'inputs' tensor of type float via global float scalars
//
// and `max` to 'outputs' tensor of same shape as `inputs`.
// Fake-quantize the `inputs` tensor of type float via global float scalars
// `min` and `max` to `outputs` tensor of same shape as `inputs`.
//
// `[min; max]` define the clamping range for the `inputs` data.
// `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]`
// when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and
// then de-quantized and output as floats in `[min; max]` interval.
// `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
// Attributes
//
// * `[min; max]` define the clamping range for the `inputs` data.
// * `inputs` values are quantized into the quantization range (
// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
// when it is true) and then de-quantized and output as floats in `[min; max]`
// interval.
// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
//
// Before quantization, `min` and `max` values are adjusted with the following
// logic.
// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
// the behavior can be unexpected:
// If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
// If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
// If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
//
// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
//
// This operation has a gradient and thus allows for training `min` and `max`