From 5b009f971678e47745dc03bbfd22c31dacc3c4bd Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Wed, 17 Jun 2020 21:45:48 -0700 Subject: [PATCH] Go: Update generated wrapper functions for TensorFlow ops. PiperOrigin-RevId: 317030798 Change-Id: I6ff071201e1c774e9a4e6a4f2d9751c4f70a79ff --- tensorflow/go/op/wrappers.go | 4434 +++++++++++++++++----------------- 1 file changed, 2197 insertions(+), 2237 deletions(-) diff --git a/tensorflow/go/op/wrappers.go b/tensorflow/go/op/wrappers.go index 485baa16f39..188bb3b78bb 100644 --- a/tensorflow/go/op/wrappers.go +++ b/tensorflow/go/op/wrappers.go @@ -10292,56 +10292,6 @@ func TensorArraySizeV2(scope *Scope, handle tf.Output, flow_in tf.Output) (size return op.Output(0) } -// Elementwise computes the bitwise right-shift of `x` and `y`. -// -// Performs a logical shift for unsigned integer types, and an arithmetic shift -// for signed integer types. -// -// If `y` is negative, or greater than or equal to than the width of `x` in bits -// the result is implementation defined. -// -// Example: -// -// ```python -// import tensorflow as tf -// from tensorflow.python.ops import bitwise_ops -// import numpy as np -// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] -// -// for dtype in dtype_list: -// lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) -// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) -// -// right_shift_result = bitwise_ops.right_shift(lhs, rhs) -// -// print(right_shift_result) -// -// # This will print: -// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) -// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) -// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) -// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) -// -// lhs = np.array([-2, 64, 101, 32], dtype=np.int8) -// rhs = np.array([-1, -5, -3, -14], dtype=np.int8) -// bitwise_ops.right_shift(lhs, rhs) -// # -// ``` -// -func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "RightShift", - Input: []tf.Input{ - x, y, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // RebatchDatasetAttr is an optional argument to RebatchDataset. type RebatchDatasetAttr func(optionalAttr) @@ -12732,6 +12682,77 @@ func AdjustSaturation(scope *Scope, images tf.Output, scale tf.Output) (output t return op.Output(0) } +// Adjust the contrast of one or more images. +// +// `images` is a tensor of at least 3 dimensions. The last 3 dimensions are +// interpreted as `[height, width, channels]`. The other dimensions only +// represent a collection of images, such as `[batch, height, width, channels].` +// +// Contrast is adjusted independently for each channel of each image. +// +// For each channel, the Op first computes the mean of the image pixels in the +// channel and then adjusts each component of each pixel to +// `(x - mean) * contrast_factor + mean`. +// +// Arguments: +// images: Images to adjust. At least 3-D. +// contrast_factor: A float multiplier for adjusting contrast. +// +// Returns The contrast-adjusted image or images. +func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "AdjustContrastv2", + Input: []tf.Input{ + images, contrast_factor, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Initializes the multi device iterator with the given dataset. +// +// Arguments: +// dataset: Dataset to be iterated upon. +// multi_device_iterator: A MultiDeviceIteratorResource. +// max_buffer_size: The maximum size of the host side per device buffer to keep. +// +// Returns An int64 indicating which incarnation of the MultiDeviceIterator +// is running. +func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, max_buffer_size tf.Output) (incarnation_id tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "MultiDeviceIteratorInit", + Input: []tf.Input{ + dataset, multi_device_iterator, max_buffer_size, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Deprecated. Disallowed in GraphDef version >= 2. +// +// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead +func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "AdjustContrast", + Input: []tf.Input{ + images, contrast_factor, min_value, max_value, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // ExtractJpegShapeAttr is an optional argument to ExtractJpegShape. type ExtractJpegShapeAttr func(optionalAttr) @@ -12773,6 +12794,312 @@ func ExtractJpegShape(scope *Scope, contents tf.Output, optional ...ExtractJpegS return op.Output(0) } +// JPEG encode input image with provided compression quality. +// +// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. +// `quality` is an int32 jpeg compression quality value between 0 and 100. +// +// +// Arguments: +// images: Images to adjust. At least 3-D. +// quality: An int quality to encode to. +// +// Returns 0-D. JPEG-encoded image. +func EncodeJpegVariableQuality(scope *Scope, images tf.Output, quality tf.Output) (contents tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "EncodeJpegVariableQuality", + Input: []tf.Input{ + images, quality, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Returns the rank of a tensor. +// +// This operation returns an integer representing the rank of `input`. +// +// For example: +// +// ``` +// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] +// # shape of tensor 't' is [2, 2, 3] +// rank(t) ==> 3 +// ``` +// +// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank +// of a tensor is the number of indices required to uniquely select each element +// of the tensor. Rank is also known as "order", "degree", or "ndims." +func Rank(scope *Scope, input tf.Output) (output tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "Rank", + Input: []tf.Input{ + input, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// EncodeJpegAttr is an optional argument to EncodeJpeg. +type EncodeJpegAttr func(optionalAttr) + +// EncodeJpegFormat sets the optional format attribute to value. +// +// value: Per pixel image format. +// If not specified, defaults to "" +func EncodeJpegFormat(value string) EncodeJpegAttr { + return func(m optionalAttr) { + m["format"] = value + } +} + +// EncodeJpegQuality sets the optional quality attribute to value. +// +// value: Quality of the compression from 0 to 100 (higher is better and slower). +// If not specified, defaults to 95 +func EncodeJpegQuality(value int64) EncodeJpegAttr { + return func(m optionalAttr) { + m["quality"] = value + } +} + +// EncodeJpegProgressive sets the optional progressive attribute to value. +// +// value: If True, create a JPEG that loads progressively (coarse to fine). +// If not specified, defaults to false +func EncodeJpegProgressive(value bool) EncodeJpegAttr { + return func(m optionalAttr) { + m["progressive"] = value + } +} + +// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value. +// +// value: If True, spend CPU/RAM to reduce size with no quality change. +// If not specified, defaults to false +func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr { + return func(m optionalAttr) { + m["optimize_size"] = value + } +} + +// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value. +// +// value: See http://en.wikipedia.org/wiki/Chroma_subsampling. +// If not specified, defaults to true +func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr { + return func(m optionalAttr) { + m["chroma_downsampling"] = value + } +} + +// EncodeJpegDensityUnit sets the optional density_unit attribute to value. +// +// value: Unit used to specify `x_density` and `y_density`: +// pixels per inch (`'in'`) or centimeter (`'cm'`). +// If not specified, defaults to "in" +func EncodeJpegDensityUnit(value string) EncodeJpegAttr { + return func(m optionalAttr) { + m["density_unit"] = value + } +} + +// EncodeJpegXDensity sets the optional x_density attribute to value. +// +// value: Horizontal pixels per density unit. +// If not specified, defaults to 300 +func EncodeJpegXDensity(value int64) EncodeJpegAttr { + return func(m optionalAttr) { + m["x_density"] = value + } +} + +// EncodeJpegYDensity sets the optional y_density attribute to value. +// +// value: Vertical pixels per density unit. +// If not specified, defaults to 300 +func EncodeJpegYDensity(value int64) EncodeJpegAttr { + return func(m optionalAttr) { + m["y_density"] = value + } +} + +// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value. +// +// value: If not empty, embed this XMP metadata in the image header. +// If not specified, defaults to "" +func EncodeJpegXmpMetadata(value string) EncodeJpegAttr { + return func(m optionalAttr) { + m["xmp_metadata"] = value + } +} + +// JPEG-encode an image. +// +// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. +// +// The attr `format` can be used to override the color format of the encoded +// output. Values can be: +// +// * `''`: Use a default format based on the number of channels in the image. +// * `grayscale`: Output a grayscale JPEG image. The `channels` dimension +// of `image` must be 1. +// * `rgb`: Output an RGB JPEG image. The `channels` dimension +// of `image` must be 3. +// +// If `format` is not specified or is the empty string, a default format is picked +// in function of the number of channels in `image`: +// +// * 1: Output a grayscale image. +// * 3: Output an RGB image. +// +// Arguments: +// image: 3-D with shape `[height, width, channels]`. +// +// Returns 0-D. JPEG-encoded image. +func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "EncodeJpeg", + Input: []tf.Input{ + image, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg. +type DecodeAndCropJpegAttr func(optionalAttr) + +// DecodeAndCropJpegChannels sets the optional channels attribute to value. +// +// value: Number of color channels for the decoded image. +// If not specified, defaults to 0 +func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr { + return func(m optionalAttr) { + m["channels"] = value + } +} + +// DecodeAndCropJpegRatio sets the optional ratio attribute to value. +// +// value: Downscaling ratio. +// If not specified, defaults to 1 +func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr { + return func(m optionalAttr) { + m["ratio"] = value + } +} + +// DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value. +// +// value: If true use a slower but nicer upscaling of the +// chroma planes (yuv420/422 only). +// If not specified, defaults to true +func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr { + return func(m optionalAttr) { + m["fancy_upscaling"] = value + } +} + +// DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value. +// +// value: If true try to recover an image from truncated input. +// If not specified, defaults to false +func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr { + return func(m optionalAttr) { + m["try_recover_truncated"] = value + } +} + +// DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value. +// +// value: The minimum required fraction of lines before a truncated +// input is accepted. +// If not specified, defaults to 1 +func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr { + return func(m optionalAttr) { + m["acceptable_fraction"] = value + } +} + +// DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value. +// +// value: string specifying a hint about the algorithm used for +// decompression. Defaults to "" which maps to a system-specific +// default. Currently valid values are ["INTEGER_FAST", +// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal +// jpeg library changes to a version that does not have that specific +// option.) +// If not specified, defaults to "" +func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr { + return func(m optionalAttr) { + m["dct_method"] = value + } +} + +// Decode and Crop a JPEG-encoded image to a uint8 tensor. +// +// The attr `channels` indicates the desired number of color channels for the +// decoded image. +// +// Accepted values are: +// +// * 0: Use the number of channels in the JPEG-encoded image. +// * 1: output a grayscale image. +// * 3: output an RGB image. +// +// If needed, the JPEG-encoded image is transformed to match the requested number +// of color channels. +// +// The attr `ratio` allows downscaling the image by an integer factor during +// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than +// downscaling the image later. +// +// +// It is equivalent to a combination of decode and crop, but much faster by only +// decoding partial jpeg image. +// +// Arguments: +// contents: 0-D. The JPEG-encoded image. +// crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. +// +// Returns 3-D with shape `[height, width, channels]`.. +func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "DecodeAndCropJpeg", + Input: []tf.Input{ + contents, crop_window, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // RandomCropAttr is an optional argument to RandomCrop. type RandomCropAttr func(optionalAttr) @@ -12883,46 +13210,6 @@ func ResizeNearestNeighborGrad(scope *Scope, grads tf.Output, size tf.Output, op return op.Output(0) } -// Initializes the multi device iterator with the given dataset. -// -// Arguments: -// dataset: Dataset to be iterated upon. -// multi_device_iterator: A MultiDeviceIteratorResource. -// max_buffer_size: The maximum size of the host side per device buffer to keep. -// -// Returns An int64 indicating which incarnation of the MultiDeviceIterator -// is running. -func MultiDeviceIteratorInit(scope *Scope, dataset tf.Output, multi_device_iterator tf.Output, max_buffer_size tf.Output) (incarnation_id tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "MultiDeviceIteratorInit", - Input: []tf.Input{ - dataset, multi_device_iterator, max_buffer_size, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// Deprecated. Disallowed in GraphDef version >= 2. -// -// DEPRECATED at GraphDef version 2: Use AdjustContrastv2 instead -func AdjustContrast(scope *Scope, images tf.Output, contrast_factor tf.Output, min_value tf.Output, max_value tf.Output) (output tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "AdjustContrast", - Input: []tf.Input{ - images, contrast_factor, min_value, max_value, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // A placeholder op for a value that will be fed into the computation. // // Arguments: @@ -20981,26 +21268,6 @@ func Floor(scope *Scope, x tf.Output) (y tf.Output) { return op.Output(0) } -// Computes the Bessel i0e function of `x` element-wise. -// -// Exponentially scaled modified Bessel function of order 0 defined as -// `bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`. -// -// This function is faster and numerically stabler than `bessel_i0(x)`. -func BesselI0e(scope *Scope, x tf.Output) (y tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "BesselI0e", - Input: []tf.Input{ - x, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // Computes the trignometric inverse tangent of x element-wise. // // The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that @@ -22983,122 +23250,6 @@ func QuantizedDepthwiseConv2D(scope *Scope, input tf.Output, filter tf.Output, m return op.Output(0), op.Output(1), op.Output(2) } -// DecodeAndCropJpegAttr is an optional argument to DecodeAndCropJpeg. -type DecodeAndCropJpegAttr func(optionalAttr) - -// DecodeAndCropJpegChannels sets the optional channels attribute to value. -// -// value: Number of color channels for the decoded image. -// If not specified, defaults to 0 -func DecodeAndCropJpegChannels(value int64) DecodeAndCropJpegAttr { - return func(m optionalAttr) { - m["channels"] = value - } -} - -// DecodeAndCropJpegRatio sets the optional ratio attribute to value. -// -// value: Downscaling ratio. -// If not specified, defaults to 1 -func DecodeAndCropJpegRatio(value int64) DecodeAndCropJpegAttr { - return func(m optionalAttr) { - m["ratio"] = value - } -} - -// DecodeAndCropJpegFancyUpscaling sets the optional fancy_upscaling attribute to value. -// -// value: If true use a slower but nicer upscaling of the -// chroma planes (yuv420/422 only). -// If not specified, defaults to true -func DecodeAndCropJpegFancyUpscaling(value bool) DecodeAndCropJpegAttr { - return func(m optionalAttr) { - m["fancy_upscaling"] = value - } -} - -// DecodeAndCropJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value. -// -// value: If true try to recover an image from truncated input. -// If not specified, defaults to false -func DecodeAndCropJpegTryRecoverTruncated(value bool) DecodeAndCropJpegAttr { - return func(m optionalAttr) { - m["try_recover_truncated"] = value - } -} - -// DecodeAndCropJpegAcceptableFraction sets the optional acceptable_fraction attribute to value. -// -// value: The minimum required fraction of lines before a truncated -// input is accepted. -// If not specified, defaults to 1 -func DecodeAndCropJpegAcceptableFraction(value float32) DecodeAndCropJpegAttr { - return func(m optionalAttr) { - m["acceptable_fraction"] = value - } -} - -// DecodeAndCropJpegDctMethod sets the optional dct_method attribute to value. -// -// value: string specifying a hint about the algorithm used for -// decompression. Defaults to "" which maps to a system-specific -// default. Currently valid values are ["INTEGER_FAST", -// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal -// jpeg library changes to a version that does not have that specific -// option.) -// If not specified, defaults to "" -func DecodeAndCropJpegDctMethod(value string) DecodeAndCropJpegAttr { - return func(m optionalAttr) { - m["dct_method"] = value - } -} - -// Decode and Crop a JPEG-encoded image to a uint8 tensor. -// -// The attr `channels` indicates the desired number of color channels for the -// decoded image. -// -// Accepted values are: -// -// * 0: Use the number of channels in the JPEG-encoded image. -// * 1: output a grayscale image. -// * 3: output an RGB image. -// -// If needed, the JPEG-encoded image is transformed to match the requested number -// of color channels. -// -// The attr `ratio` allows downscaling the image by an integer factor during -// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than -// downscaling the image later. -// -// -// It is equivalent to a combination of decode and crop, but much faster by only -// decoding partial jpeg image. -// -// Arguments: -// contents: 0-D. The JPEG-encoded image. -// crop_window: 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. -// -// Returns 3-D with shape `[height, width, channels]`.. -func DecodeAndCropJpeg(scope *Scope, contents tf.Output, crop_window tf.Output, optional ...DecodeAndCropJpegAttr) (image tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "DecodeAndCropJpeg", - Input: []tf.Input{ - contents, crop_window, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // QuantizedConv2DPerChannelAttr is an optional argument to QuantizedConv2DPerChannel. type QuantizedConv2DPerChannelAttr func(optionalAttr) @@ -23468,318 +23619,6 @@ func RetrieveTPUEmbeddingRMSPropParameters(scope *Scope, num_shards int64, shard return op.Output(0), op.Output(1), op.Output(2) } -// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2. -type Conv3DBackpropInputV2Attr func(optionalAttr) - -// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value. -// -// value: The data format of the input and output data. With the -// default format "NDHWC", the data is stored in the order of: -// [batch, in_depth, in_height, in_width, in_channels]. -// Alternatively, the format could be "NCDHW", the data storage order is: -// [batch, in_channels, in_depth, in_height, in_width]. -// If not specified, defaults to "NDHWC" -func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr { - return func(m optionalAttr) { - m["data_format"] = value - } -} - -// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value. -// -// value: 1-D tensor of length 5. The dilation factor for each dimension of -// `input`. If set to k > 1, there will be k-1 skipped cells between each -// filter element on that dimension. The dimension order is determined by the -// value of `data_format`, see above for details. Dilations in the batch and -// depth dimensions must be 1. -// If not specified, defaults to -func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr { - return func(m optionalAttr) { - m["dilations"] = value - } -} - -// Computes the gradients of 3-D convolution with respect to the input. -// -// Arguments: -// input_sizes: An integer vector representing the tensor shape of `input`, -// where `input` is a 5-D -// `[batch, depth, rows, cols, in_channels]` tensor. -// filter: Shape `[depth, rows, cols, in_channels, out_channels]`. -// `in_channels` must match between `input` and `filter`. -// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -// out_channels]`. -// strides: 1-D tensor of length 5. The stride of the sliding window for each -// dimension of `input`. Must have `strides[0] = strides[4] = 1`. -// padding: The type of padding algorithm to use. -func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"strides": strides, "padding": padding} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "Conv3DBackpropInputV2", - Input: []tf.Input{ - input_sizes, filter, out_backprop, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// LRNAttr is an optional argument to LRN. -type LRNAttr func(optionalAttr) - -// LRNDepthRadius sets the optional depth_radius attribute to value. -// -// value: 0-D. Half-width of the 1-D normalization window. -// If not specified, defaults to 5 -func LRNDepthRadius(value int64) LRNAttr { - return func(m optionalAttr) { - m["depth_radius"] = value - } -} - -// LRNBias sets the optional bias attribute to value. -// -// value: An offset (usually positive to avoid dividing by 0). -// If not specified, defaults to 1 -func LRNBias(value float32) LRNAttr { - return func(m optionalAttr) { - m["bias"] = value - } -} - -// LRNAlpha sets the optional alpha attribute to value. -// -// value: A scale factor, usually positive. -// If not specified, defaults to 1 -func LRNAlpha(value float32) LRNAttr { - return func(m optionalAttr) { - m["alpha"] = value - } -} - -// LRNBeta sets the optional beta attribute to value. -// -// value: An exponent. -// If not specified, defaults to 0.5 -func LRNBeta(value float32) LRNAttr { - return func(m optionalAttr) { - m["beta"] = value - } -} - -// Local Response Normalization. -// -// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last -// dimension), and each vector is normalized independently. Within a given vector, -// each component is divided by the weighted, squared sum of inputs within -// `depth_radius`. In detail, -// -// sqr_sum[a, b, c, d] = -// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) -// output = input / (bias + alpha * sqr_sum) ** beta -// -// For details, see [Krizhevsky et al., ImageNet classification with deep -// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). -// -// Arguments: -// input: 4-D. -func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "LRN", - Input: []tf.Input{ - input, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// QuantizedMatMulWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedMatMulWithBiasAndReluAndRequantize. -type QuantizedMatMulWithBiasAndReluAndRequantizeAttr func(optionalAttr) - -// QuantizedMatMulWithBiasAndReluAndRequantizeToutput sets the optional Toutput attribute to value. -// If not specified, defaults to DT_QUINT8 -func QuantizedMatMulWithBiasAndReluAndRequantizeToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { - return func(m optionalAttr) { - m["Toutput"] = value - } -} - -// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA sets the optional transpose_a attribute to value. -// -// value: If true, `a` is transposed before multiplication. -// If not specified, defaults to false -func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { - return func(m optionalAttr) { - m["transpose_a"] = value - } -} - -// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB sets the optional transpose_b attribute to value. -// -// value: If true, `b` is transposed before multiplication. -// If not specified, defaults to false -func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { - return func(m optionalAttr) { - m["transpose_b"] = value - } -} - -// QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode sets the optional input_quant_mode attribute to value. -// -// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED. -// If not specified, defaults to "MIN_FIRST" -func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { - return func(m optionalAttr) { - m["input_quant_mode"] = value - } -} - -// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias -// add and relu and requantize fusion. -// -// The inputs must be two-dimensional matrices and 1D bias vector. And the inner -// dimension of `a` (after being transposed if `transpose_a` is non-zero) must -// match the outer dimension of `b` (after being transposed if `transposed_b` is -// non-zero). Then do broadcast add operation with bias values on the matrix -// multiplication result. The bias size must match inner dimension of `b`. Then do -// relu activation to get non-negative result. Then do requantize operation to get -// final uint8 result. -// -// Arguments: -// a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. -// b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. -// bias: A 1D bias tensor with size matching with inner dimension of `b` (after being -// transposed if `transposed_b` is non-zero). -// min_a: The float value that the lowest quantized `a` value represents. -// max_a: The float value that the highest quantized `a` value represents. -// min_b: The float value that the lowest quantized `b` value represents. -// max_b: The float value that the highest quantized `b` value represents. -// min_freezed_output: The float value that the highest quantized output value after requantize. -// -// -// Returns: -// out -// min_out: The float value that the lowest quantized output value represents. -// max_out: The float value that the highest quantized output value represents. -func QuantizedMatMulWithBiasAndReluAndRequantize(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, optional ...QuantizedMatMulWithBiasAndReluAndRequantizeAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "QuantizedMatMulWithBiasAndReluAndRequantize", - Input: []tf.Input{ - a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0), op.Output(1), op.Output(2) -} - -// QuantizedMatMulWithBiasAndReluAttr is an optional argument to QuantizedMatMulWithBiasAndRelu. -type QuantizedMatMulWithBiasAndReluAttr func(optionalAttr) - -// QuantizedMatMulWithBiasAndReluToutput sets the optional Toutput attribute to value. -// If not specified, defaults to DT_QINT32 -func QuantizedMatMulWithBiasAndReluToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAttr { - return func(m optionalAttr) { - m["Toutput"] = value - } -} - -// QuantizedMatMulWithBiasAndReluTransposeA sets the optional transpose_a attribute to value. -// -// value: If true, `a` is transposed before multiplication. -// If not specified, defaults to false -func QuantizedMatMulWithBiasAndReluTransposeA(value bool) QuantizedMatMulWithBiasAndReluAttr { - return func(m optionalAttr) { - m["transpose_a"] = value - } -} - -// QuantizedMatMulWithBiasAndReluTransposeB sets the optional transpose_b attribute to value. -// -// value: If true, `b` is transposed before multiplication. -// If not specified, defaults to false -func QuantizedMatMulWithBiasAndReluTransposeB(value bool) QuantizedMatMulWithBiasAndReluAttr { - return func(m optionalAttr) { - m["transpose_b"] = value - } -} - -// QuantizedMatMulWithBiasAndReluInputQuantMode sets the optional input_quant_mode attribute to value. -// -// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED. -// If not specified, defaults to "MIN_FIRST" -func QuantizedMatMulWithBiasAndReluInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAttr { - return func(m optionalAttr) { - m["input_quant_mode"] = value - } -} - -// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias -// add and relu fusion. -// -// The inputs must be two-dimensional matrices and 1D bias vector. And the inner -// dimension of `a` (after being transposed if `transpose_a` is non-zero) must -// match the outer dimension of `b` (after being transposed if `transposed_b` is -// non-zero). Then do broadcast add operation with bias values on the matrix -// multiplication result. The bias size must match inner dimension of `b`. Then do -// relu activation to get non-negative result. -// -// Arguments: -// a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. -// b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. -// bias: A 1D bias tensor with size matching with inner dimension of `b` (after being -// transposed if `transposed_b` is non-zero). -// min_a: The float value that the lowest quantized `a` value represents. -// max_a: The float value that the highest quantized `a` value represents. -// min_b: The float value that the lowest quantized `b` value represents. -// max_b: The float value that the highest quantized `b` value represents. -// -// Returns: -// out -// min_out: The float value that the lowest quantized output value represents. -// max_out: The float value that the highest quantized output value represents. -func QuantizedMatMulWithBiasAndRelu(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAndReluAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "QuantizedMatMulWithBiasAndRelu", - Input: []tf.Input{ - a, b, bias, min_a, max_a, min_b, max_b, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0), op.Output(1), op.Output(2) -} - // QuantizedMatMulWithBiasAttr is an optional argument to QuantizedMatMulWithBias. type QuantizedMatMulWithBiasAttr func(optionalAttr) @@ -25672,6 +25511,234 @@ func Zeta(scope *Scope, x tf.Output, q tf.Output) (z tf.Output) { return op.Output(0) } +// QuantizedMatMulWithBiasAndReluAndRequantizeAttr is an optional argument to QuantizedMatMulWithBiasAndReluAndRequantize. +type QuantizedMatMulWithBiasAndReluAndRequantizeAttr func(optionalAttr) + +// QuantizedMatMulWithBiasAndReluAndRequantizeToutput sets the optional Toutput attribute to value. +// If not specified, defaults to DT_QUINT8 +func QuantizedMatMulWithBiasAndReluAndRequantizeToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { + return func(m optionalAttr) { + m["Toutput"] = value + } +} + +// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA sets the optional transpose_a attribute to value. +// +// value: If true, `a` is transposed before multiplication. +// If not specified, defaults to false +func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeA(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { + return func(m optionalAttr) { + m["transpose_a"] = value + } +} + +// QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB sets the optional transpose_b attribute to value. +// +// value: If true, `b` is transposed before multiplication. +// If not specified, defaults to false +func QuantizedMatMulWithBiasAndReluAndRequantizeTransposeB(value bool) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { + return func(m optionalAttr) { + m["transpose_b"] = value + } +} + +// QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode sets the optional input_quant_mode attribute to value. +// +// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED. +// If not specified, defaults to "MIN_FIRST" +func QuantizedMatMulWithBiasAndReluAndRequantizeInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAndRequantizeAttr { + return func(m optionalAttr) { + m["input_quant_mode"] = value + } +} + +// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias +// add and relu and requantize fusion. +// +// The inputs must be two-dimensional matrices and 1D bias vector. And the inner +// dimension of `a` (after being transposed if `transpose_a` is non-zero) must +// match the outer dimension of `b` (after being transposed if `transposed_b` is +// non-zero). Then do broadcast add operation with bias values on the matrix +// multiplication result. The bias size must match inner dimension of `b`. Then do +// relu activation to get non-negative result. Then do requantize operation to get +// final uint8 result. +// +// Arguments: +// a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. +// b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. +// bias: A 1D bias tensor with size matching with inner dimension of `b` (after being +// transposed if `transposed_b` is non-zero). +// min_a: The float value that the lowest quantized `a` value represents. +// max_a: The float value that the highest quantized `a` value represents. +// min_b: The float value that the lowest quantized `b` value represents. +// max_b: The float value that the highest quantized `b` value represents. +// min_freezed_output: The float value that the highest quantized output value after requantize. +// +// +// Returns: +// out +// min_out: The float value that the lowest quantized output value represents. +// max_out: The float value that the highest quantized output value represents. +func QuantizedMatMulWithBiasAndReluAndRequantize(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, min_freezed_output tf.Output, max_freezed_output tf.Output, optional ...QuantizedMatMulWithBiasAndReluAndRequantizeAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "QuantizedMatMulWithBiasAndReluAndRequantize", + Input: []tf.Input{ + a, b, bias, min_a, max_a, min_b, max_b, min_freezed_output, max_freezed_output, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0), op.Output(1), op.Output(2) +} + +// Conv3DBackpropInputV2Attr is an optional argument to Conv3DBackpropInputV2. +type Conv3DBackpropInputV2Attr func(optionalAttr) + +// Conv3DBackpropInputV2DataFormat sets the optional data_format attribute to value. +// +// value: The data format of the input and output data. With the +// default format "NDHWC", the data is stored in the order of: +// [batch, in_depth, in_height, in_width, in_channels]. +// Alternatively, the format could be "NCDHW", the data storage order is: +// [batch, in_channels, in_depth, in_height, in_width]. +// If not specified, defaults to "NDHWC" +func Conv3DBackpropInputV2DataFormat(value string) Conv3DBackpropInputV2Attr { + return func(m optionalAttr) { + m["data_format"] = value + } +} + +// Conv3DBackpropInputV2Dilations sets the optional dilations attribute to value. +// +// value: 1-D tensor of length 5. The dilation factor for each dimension of +// `input`. If set to k > 1, there will be k-1 skipped cells between each +// filter element on that dimension. The dimension order is determined by the +// value of `data_format`, see above for details. Dilations in the batch and +// depth dimensions must be 1. +// If not specified, defaults to +func Conv3DBackpropInputV2Dilations(value []int64) Conv3DBackpropInputV2Attr { + return func(m optionalAttr) { + m["dilations"] = value + } +} + +// Computes the gradients of 3-D convolution with respect to the input. +// +// Arguments: +// input_sizes: An integer vector representing the tensor shape of `input`, +// where `input` is a 5-D +// `[batch, depth, rows, cols, in_channels]` tensor. +// filter: Shape `[depth, rows, cols, in_channels, out_channels]`. +// `in_channels` must match between `input` and `filter`. +// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, +// out_channels]`. +// strides: 1-D tensor of length 5. The stride of the sliding window for each +// dimension of `input`. Must have `strides[0] = strides[4] = 1`. +// padding: The type of padding algorithm to use. +func Conv3DBackpropInputV2(scope *Scope, input_sizes tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputV2Attr) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"strides": strides, "padding": padding} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "Conv3DBackpropInputV2", + Input: []tf.Input{ + input_sizes, filter, out_backprop, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// LRNAttr is an optional argument to LRN. +type LRNAttr func(optionalAttr) + +// LRNDepthRadius sets the optional depth_radius attribute to value. +// +// value: 0-D. Half-width of the 1-D normalization window. +// If not specified, defaults to 5 +func LRNDepthRadius(value int64) LRNAttr { + return func(m optionalAttr) { + m["depth_radius"] = value + } +} + +// LRNBias sets the optional bias attribute to value. +// +// value: An offset (usually positive to avoid dividing by 0). +// If not specified, defaults to 1 +func LRNBias(value float32) LRNAttr { + return func(m optionalAttr) { + m["bias"] = value + } +} + +// LRNAlpha sets the optional alpha attribute to value. +// +// value: A scale factor, usually positive. +// If not specified, defaults to 1 +func LRNAlpha(value float32) LRNAttr { + return func(m optionalAttr) { + m["alpha"] = value + } +} + +// LRNBeta sets the optional beta attribute to value. +// +// value: An exponent. +// If not specified, defaults to 0.5 +func LRNBeta(value float32) LRNAttr { + return func(m optionalAttr) { + m["beta"] = value + } +} + +// Local Response Normalization. +// +// The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last +// dimension), and each vector is normalized independently. Within a given vector, +// each component is divided by the weighted, squared sum of inputs within +// `depth_radius`. In detail, +// +// sqr_sum[a, b, c, d] = +// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) +// output = input / (bias + alpha * sqr_sum) ** beta +// +// For details, see [Krizhevsky et al., ImageNet classification with deep +// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). +// +// Arguments: +// input: 4-D. +func LRN(scope *Scope, input tf.Output, optional ...LRNAttr) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "LRN", + Input: []tf.Input{ + input, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // Returns which elements of x are Inf. // // @compatibility(numpy) @@ -26270,34 +26337,6 @@ func FusedPadConv2D(scope *Scope, input tf.Output, paddings tf.Output, filter tf return op.Output(0) } -// Adjust the hue of one or more images. -// -// `images` is a tensor of at least 3 dimensions. The last dimension is -// interpreted as channels, and must be three. -// -// The input image is considered in the RGB colorspace. Conceptually, the RGB -// colors are first mapped into HSV. A delta is then applied all the hue values, -// and then remapped back to RGB colorspace. -// -// Arguments: -// images: Images to adjust. At least 3-D. -// delta: A float delta to add to the hue. -// -// Returns The hue-adjusted image or images. -func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "AdjustHue", - Input: []tf.Input{ - images, delta, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // List of the given size with empty elements. // // element_shape: the shape of the future elements of the list @@ -26481,36 +26520,267 @@ func BiasAddV1(scope *Scope, value tf.Output, bias tf.Output) (output tf.Output) return op.Output(0) } -// BiasAddAttr is an optional argument to BiasAdd. -type BiasAddAttr func(optionalAttr) - -// BiasAddDataFormat sets the optional data_format attribute to value. +// Creates a Dataset that returns pseudorandom numbers. // -// value: Specify the data format of the input and output data. With the -// default format "NHWC", the bias tensor will be added to the last dimension -// of the value tensor. -// Alternatively, the format could be "NCHW", the data storage order of: -// [batch, in_channels, in_height, in_width]. -// The tensor will be added to "in_channels", the third-to-the-last -// dimension. -// If not specified, defaults to "NHWC" -func BiasAddDataFormat(value string) BiasAddAttr { +// Creates a Dataset that returns a stream of uniformly distributed +// pseudorandom 64-bit signed integers. +// +// In the TensorFlow Python API, you can instantiate this dataset via the +// class `tf.data.experimental.RandomDataset`. +// +// Instances of this dataset are also created as a result of the +// `hoist_random_uniform` static optimization. Whether this optimization is +// performed is determined by the `experimental_optimization.hoist_random_uniform` +// option of `tf.data.Options`. +// +// Arguments: +// seed: A scalar seed for the random number generator. If either seed or +// seed2 is set to be non-zero, the random number generator is seeded +// by the given seed. Otherwise, a random seed is used. +// seed2: A second scalar seed to avoid seed collision. +// +// +func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes} + opspec := tf.OpSpec{ + Type: "RandomDataset", + Input: []tf.Input{ + seed, seed2, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool. +type FractionalAvgPoolAttr func(optionalAttr) + +// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value. +// +// value: When set to True, generates the pooling sequence in a +// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin +// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for +// difference between pseudorandom and random. +// If not specified, defaults to false +func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr { return func(m optionalAttr) { - m["data_format"] = value + m["pseudo_random"] = value } } -// Adds `bias` to `value`. +// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value. // -// This is a special case of `tf.add` where `bias` is restricted to be 1-D. -// Broadcasting is supported, so `value` may have any number of dimensions. +// value: When set to True, it means when pooling, the values at the boundary +// of adjacent pooling cells are used by both cells. For example: +// +// `index 0 1 2 3 4` +// +// `value 20 5 16 3 7` +// +// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. +// The result would be [41/3, 26/3] for fractional avg pooling. +// If not specified, defaults to false +func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr { + return func(m optionalAttr) { + m["overlapping"] = value + } +} + +// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value. +// +// value: When set to True, a fixed pooling region will be used when +// iterating over a FractionalAvgPool node in the computation graph. Mainly used +// in unit test to make FractionalAvgPool deterministic. +// If not specified, defaults to false +func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr { + return func(m optionalAttr) { + m["deterministic"] = value + } +} + +// FractionalAvgPoolSeed sets the optional seed attribute to value. +// +// value: If either seed or seed2 are set to be non-zero, the random number +// generator is seeded by the given seed. Otherwise, it is seeded by a +// random seed. +// If not specified, defaults to 0 +func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr { + return func(m optionalAttr) { + m["seed"] = value + } +} + +// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value. +// +// value: An second seed to avoid seed collision. +// If not specified, defaults to 0 +func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr { + return func(m optionalAttr) { + m["seed2"] = value + } +} + +// Performs fractional average pooling on the input. +// +// Fractional average pooling is similar to Fractional max pooling in the pooling +// region generation step. The only difference is that after pooling regions are +// generated, a mean operation is performed instead of a max operation in each +// pooling region. // // Arguments: -// value: Any number of dimensions. -// bias: 1-D with size the last dimension of `value`. +// value: 4-D with shape `[batch, height, width, channels]`. +// pooling_ratio: Pooling ratio for each dimension of `value`, currently only +// supports row and col dimension and should be >= 1.0. For example, a valid +// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements +// must be 1.0 because we don't allow pooling on batch and channels +// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions +// respectively. // -// Returns Broadcasted sum of `value` and `bias`. -func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) { +// Returns: +// output: output tensor after fractional avg pooling. +// row_pooling_sequence: row pooling sequence, needed to calculate gradient. +// col_pooling_sequence: column pooling sequence, needed to calculate gradient. +func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"pooling_ratio": pooling_ratio} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "FractionalAvgPool", + Input: []tf.Input{ + value, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0), op.Output(1), op.Output(2) +} + +// MapStageAttr is an optional argument to MapStage. +type MapStageAttr func(optionalAttr) + +// MapStageCapacity sets the optional capacity attribute to value. +// +// value: Maximum number of elements in the Staging Area. If > 0, inserts +// on the container will block when the capacity is reached. +// If not specified, defaults to 0 +// +// REQUIRES: value >= 0 +func MapStageCapacity(value int64) MapStageAttr { + return func(m optionalAttr) { + m["capacity"] = value + } +} + +// MapStageMemoryLimit sets the optional memory_limit attribute to value. +// If not specified, defaults to 0 +// +// REQUIRES: value >= 0 +func MapStageMemoryLimit(value int64) MapStageAttr { + return func(m optionalAttr) { + m["memory_limit"] = value + } +} + +// MapStageContainer sets the optional container attribute to value. +// +// value: If non-empty, this queue is placed in the given container. Otherwise, +// a default container is used. +// If not specified, defaults to "" +func MapStageContainer(value string) MapStageAttr { + return func(m optionalAttr) { + m["container"] = value + } +} + +// MapStageSharedName sets the optional shared_name attribute to value. +// +// value: It is necessary to match this name to the matching Unstage Op. +// If not specified, defaults to "" +func MapStageSharedName(value string) MapStageAttr { + return func(m optionalAttr) { + m["shared_name"] = value + } +} + +// Stage (key, values) in the underlying container which behaves like a hashtable. +// +// Arguments: +// key: int64 +// +// values: a list of tensors +// dtypes A list of data types that inserted values should adhere to. +// +// +// Returns the created operation. +func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"dtypes": dtypes} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "MapStage", + Input: []tf.Input{ + key, indices, tf.OutputList(values), + }, + Attrs: attrs, + } + return scope.AddOperation(opspec) +} + +// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul. +type SparseTensorDenseMatMulAttr func(optionalAttr) + +// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value. +// +// value: Use the adjoint of A in the matrix multiply. If A is complex, this +// is transpose(conj(A)). Otherwise it's transpose(A). +// If not specified, defaults to false +func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr { + return func(m optionalAttr) { + m["adjoint_a"] = value + } +} + +// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value. +// +// value: Use the adjoint of B in the matrix multiply. If B is complex, this +// is transpose(conj(B)). Otherwise it's transpose(B). +// If not specified, defaults to false +func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr { + return func(m optionalAttr) { + m["adjoint_b"] = value + } +} + +// Multiply SparseTensor (of rank 2) "A" by dense matrix "B". +// +// No validity checking is performed on the indices of A. However, the following +// input format is recommended for optimal behavior: +// +// if adjoint_a == false: +// A should be sorted in lexicographically increasing order. Use SparseReorder +// if you're not sure. +// if adjoint_a == true: +// A should be sorted in order of increasing dimension 1 (i.e., "column major" +// order instead of "row major" order). +// +// Arguments: +// a_indices: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. +// a_values: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. +// a_shape: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. +// b: 2-D. A dense Matrix. +func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) { if scope.Err() != nil { return } @@ -26519,9 +26789,9 @@ func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddA a(attrs) } opspec := tf.OpSpec{ - Type: "BiasAdd", + Type: "SparseTensorDenseMatMul", Input: []tf.Input{ - value, bias, + a_indices, a_values, a_shape, b, }, Attrs: attrs, } @@ -26529,6 +26799,86 @@ func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddA return op.Output(0) } +// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2. +type FusedBatchNormGradV2Attr func(optionalAttr) + +// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value. +// +// value: A small float number added to the variance of x. +// If not specified, defaults to 0.0001 +func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr { + return func(m optionalAttr) { + m["epsilon"] = value + } +} + +// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value. +// +// value: The data format for y_backprop, x, x_backprop. +// Either "NHWC" (default) or "NCHW". +// If not specified, defaults to "NHWC" +func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr { + return func(m optionalAttr) { + m["data_format"] = value + } +} + +// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value. +// +// value: A bool value to indicate the operation is for training (default) +// or inference. +// If not specified, defaults to true +func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr { + return func(m optionalAttr) { + m["is_training"] = value + } +} + +// Gradient for batch normalization. +// +// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". +// The size of 1D Tensors matches the dimension C of the 4D Tensors. +// +// Arguments: +// y_backprop: A 4D Tensor for the gradient with respect to y. +// x: A 4D Tensor for input data. +// scale: A 1D Tensor for scaling factor, to scale the normalized x. +// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch +// mean to be reused in gradient computation. When is_training is +// False, a 1D Tensor for the population mean to be reused in both +// 1st and 2nd order gradient computation. +// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch +// variance (inverted variance in the cuDNN case) to be reused in +// gradient computation. When is_training is False, a 1D Tensor +// for the population variance to be reused in both 1st and 2nd +// order gradient computation. +// +// Returns: +// x_backprop: A 4D Tensor for the gradient with respect to x. +// scale_backprop: A 1D Tensor for the gradient with respect to scale. +// offset_backprop: A 1D Tensor for the gradient with respect to offset. +// reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. +// reserve_space_4: Unused placeholder to match the variance input +// in FusedBatchNorm. +func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "FusedBatchNormGradV2", + Input: []tf.Input{ + y_backprop, x, scale, reserve_space_1, reserve_space_2, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4) +} + // Computes the gradient for the inverse of `x` wrt its input. // // Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` @@ -26580,132 +26930,44 @@ func SparseDenseCwiseAdd(scope *Scope, sp_indices tf.Output, sp_values tf.Output return op.Output(0) } -// EncodeBase64Attr is an optional argument to EncodeBase64. -type EncodeBase64Attr func(optionalAttr) - -// EncodeBase64Pad sets the optional pad attribute to value. +// Counts the number of occurrences of each value in an integer array. // -// value: Bool whether padding is applied at the ends. -// If not specified, defaults to false -func EncodeBase64Pad(value bool) EncodeBase64Attr { - return func(m optionalAttr) { - m["pad"] = value - } -} - -// Encode strings into web-safe base64 format. +// Outputs a vector with length `size` and the same dtype as `weights`. If +// `weights` are empty, then index `i` stores the number of times the value `i` is +// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of +// the value in `weights` at each index where the corresponding value in `arr` is +// `i`. // -// Refer to the following article for more information on base64 format: -// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the -// end so that the encoded has length multiple of 4. See Padding section of the -// link above. -// -// Web-safe means that the encoder uses - and _ instead of + and /. +// Values in `arr` outside of the range [0, size) are ignored. // // Arguments: -// input: Strings to be encoded. +// arr: int32 `Tensor`. +// size: non-negative int32 scalar `Tensor`. +// weights: is an int32, int64, float32, or float64 `Tensor` with the same +// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights +// equal to 1. // -// Returns Input strings encoded in base64. -func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) { +// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for +// each value in the range [0, size). +func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) { if scope.Err() != nil { return } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } opspec := tf.OpSpec{ - Type: "EncodeBase64", + Type: "Bincount", Input: []tf.Input{ - input, + arr, size, weights, }, - Attrs: attrs, } op := scope.AddOperation(opspec) return op.Output(0) } -// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs. -type FakeQuantWithMinMaxArgsAttr func(optionalAttr) - -// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value. -// If not specified, defaults to -6 -func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr { - return func(m optionalAttr) { - m["min"] = value - } -} - -// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value. -// If not specified, defaults to 6 -func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr { - return func(m optionalAttr) { - m["max"] = value - } -} - -// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value. -// If not specified, defaults to 8 -func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr { - return func(m optionalAttr) { - m["num_bits"] = value - } -} - -// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value. -// If not specified, defaults to false -func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr { - return func(m optionalAttr) { - m["narrow_range"] = value - } -} - -// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. -// -// Attributes -// -// * `[min; max]` define the clamping range for the `inputs` data. -// * `inputs` values are quantized into the quantization range ( -// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` -// when it is true) and then de-quantized and output as floats in `[min; max]` -// interval. -// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. -// -// Before quantization, `min` and `max` values are adjusted with the following -// logic. -// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, -// the behavior can be unexpected: -// -// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. -// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. -// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, -// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. -// -// Quantization is called fake since the output is still in floating point. -func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "FakeQuantWithMinMaxArgs", - Input: []tf.Input{ - inputs, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// Batch normalization. +// Gradients for batch normalization. // // DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization() // -// This op is deprecated. Prefer `tf.nn.batch_normalization`. +// This op is deprecated. See `tf.nn.batch_normalization`. // // Arguments: // t: A 4D input Tensor. @@ -26715,389 +26977,34 @@ func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQua // v: A 1D variance Tensor with size matching the last dimension of t. // This is the second output from tf.nn.moments, // or a saved moving average thereof. -// beta: A 1D beta Tensor with size matching the last dimension of t. -// An offset to be added to the normalized tensor. // gamma: A 1D gamma Tensor with size matching the last dimension of t. -// If "scale_after_normalization" is true, this tensor will be multiplied -// with the normalized tensor. +// If "scale_after_normalization" is true, this Tensor will be multiplied +// with the normalized Tensor. +// backprop: 4D backprop Tensor. // variance_epsilon: A small float number to avoid dividing by 0. // scale_after_normalization: A bool indicating whether the resulted tensor // needs to be multiplied with gamma. -func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) { +// +// Returns: +// dx: 4D backprop tensor for input. +// dm: 1D backprop tensor for mean. +// dv: 1D backprop tensor for variance. +// db: 1D backprop tensor for beta. +// dg: 1D backprop tensor for gamma. +func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) { if scope.Err() != nil { return } attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization} opspec := tf.OpSpec{ - Type: "BatchNormWithGlobalNormalization", + Type: "BatchNormWithGlobalNormalizationGrad", Input: []tf.Input{ - t, m, v, beta, gamma, + t, m, v, gamma, backprop, }, Attrs: attrs, } op := scope.AddOperation(opspec) - return op.Output(0) -} - -// QuantizedConv2DAttr is an optional argument to QuantizedConv2D. -type QuantizedConv2DAttr func(optionalAttr) - -// QuantizedConv2DOutType sets the optional out_type attribute to value. -// If not specified, defaults to DT_QINT32 -func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr { - return func(m optionalAttr) { - m["out_type"] = value - } -} - -// QuantizedConv2DDilations sets the optional dilations attribute to value. -// -// value: 1-D tensor of length 4. The dilation factor for each dimension of -// `input`. If set to k > 1, there will be k-1 skipped cells between each -// filter element on that dimension. The dimension order is determined by the -// value of `data_format`, see above for details. Dilations in the batch and -// depth dimensions must be 1. -// If not specified, defaults to -func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr { - return func(m optionalAttr) { - m["dilations"] = value - } -} - -// Computes a 2D convolution given quantized 4D input and filter tensors. -// -// The inputs are quantized tensors where the lowest value represents the real -// number of the associated minimum, and the highest represents the maximum. -// This means that you can only interpret the quantized output in the same way, by -// taking the returned minimum and maximum values into account. -// -// Arguments: -// -// filter: filter's input_depth dimension must match input's depth dimensions. -// min_input: The float value that the lowest quantized input value represents. -// max_input: The float value that the highest quantized input value represents. -// min_filter: The float value that the lowest quantized filter value represents. -// max_filter: The float value that the highest quantized filter value represents. -// strides: The stride of the sliding window for each dimension of the input -// tensor. -// padding: The type of padding algorithm to use. -// -// Returns: -// output -// min_output: The float value that the lowest quantized output value represents. -// max_output: The float value that the highest quantized output value represents. -func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"strides": strides, "padding": padding} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "QuantizedConv2D", - Input: []tf.Input{ - input, filter, min_input, max_input, min_filter, max_filter, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0), op.Output(1), op.Output(2) -} - -// Computes rectified linear 6 gradients for a Relu6 operation. -// -// Arguments: -// gradients: The backpropagated gradients to the corresponding Relu6 operation. -// features: The features passed as input to the corresponding Relu6 operation, or -// its output; using either one produces the same result. -// -// Returns The gradients: -// `gradients * (features > 0) * (features < 6)`. -func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "Relu6Grad", - Input: []tf.Input{ - gradients, features, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// StringSplitAttr is an optional argument to StringSplit. -type StringSplitAttr func(optionalAttr) - -// StringSplitSkipEmpty sets the optional skip_empty attribute to value. -// -// value: A `bool`. If `True`, skip the empty strings from the result. -// If not specified, defaults to true -func StringSplitSkipEmpty(value bool) StringSplitAttr { - return func(m optionalAttr) { - m["skip_empty"] = value - } -} - -// Split elements of `input` based on `delimiter` into a `SparseTensor`. -// -// Let N be the size of source (typically N will be the batch size). Split each -// element of `input` based on `delimiter` and return a `SparseTensor` -// containing the splitted tokens. Empty tokens are ignored. -// -// `delimiter` can be empty, or a string of split characters. If `delimiter` is an -// empty string, each element of `input` is split into individual single-byte -// character strings, including splitting of UTF-8 multibyte sequences. Otherwise -// every character of `delimiter` is a potential split point. -// -// For example: -// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output -// will be -// -// indices = [0, 0; -// 0, 1; -// 1, 0; -// 1, 1; -// 1, 2] -// shape = [2, 3] -// values = ['hello', 'world', 'a', 'b', 'c'] -// -// Arguments: -// input: 1-D. Strings to split. -// delimiter: 0-D. Delimiter characters (bytes), or empty string. -// -// Returns: -// indices: A dense matrix of int64 representing the indices of the sparse tensor. -// values: A vector of strings corresponding to the splited values. -// shape: a length-2 vector of int64 representing the shape of the sparse -// tensor, where the first value is N and the second value is the maximum number -// of tokens in a single input entry. -func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "StringSplit", - Input: []tf.Input{ - input, delimiter, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0), op.Output(1), op.Output(2) -} - -// Assigns sparse updates to the variable referenced by `resource`. -// -// This operation computes -// -// # Scalar indices -// ref[indices, ...] = updates[...] -// -// # Vector indices (for each i) -// ref[indices[i], ...] = updates[i, ...] -// -// # High rank indices (for each i, ..., j) -// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] -// -// Arguments: -// resource: Should be from a `Variable` node. -// indices: A tensor of indices into the first dimension of `ref`. -// updates: A tensor of updated values to add to `ref`. -// -// Returns the created operation. -func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "ResourceScatterUpdate", - Input: []tf.Input{ - resource, indices, updates, - }, - } - return scope.AddOperation(opspec) -} - -// EditDistanceAttr is an optional argument to EditDistance. -type EditDistanceAttr func(optionalAttr) - -// EditDistanceNormalize sets the optional normalize attribute to value. -// -// value: boolean (if true, edit distances are normalized by length of truth). -// -// The output is: -// If not specified, defaults to true -func EditDistanceNormalize(value bool) EditDistanceAttr { - return func(m optionalAttr) { - m["normalize"] = value - } -} - -// Computes the (possibly normalized) Levenshtein Edit Distance. -// -// The inputs are variable-length sequences provided by SparseTensors -// (hypothesis_indices, hypothesis_values, hypothesis_shape) -// and -// (truth_indices, truth_values, truth_shape). -// -// The inputs are: -// -// Arguments: -// hypothesis_indices: The indices of the hypothesis list SparseTensor. -// This is an N x R int64 matrix. -// hypothesis_values: The values of the hypothesis list SparseTensor. -// This is an N-length vector. -// hypothesis_shape: The shape of the hypothesis list SparseTensor. -// This is an R-length vector. -// truth_indices: The indices of the truth list SparseTensor. -// This is an M x R int64 matrix. -// truth_values: The values of the truth list SparseTensor. -// This is an M-length vector. -// truth_shape: truth indices, vector. -// -// Returns A dense float tensor with rank R - 1. -// -// For the example input: -// -// // hypothesis represents a 2x1 matrix with variable-length values: -// // (0,0) = ["a"] -// // (1,0) = ["b"] -// hypothesis_indices = [[0, 0, 0], -// [1, 0, 0]] -// hypothesis_values = ["a", "b"] -// hypothesis_shape = [2, 1, 1] -// -// // truth represents a 2x2 matrix with variable-length values: -// // (0,0) = [] -// // (0,1) = ["a"] -// // (1,0) = ["b", "c"] -// // (1,1) = ["a"] -// truth_indices = [[0, 1, 0], -// [1, 0, 0], -// [1, 0, 1], -// [1, 1, 0]] -// truth_values = ["a", "b", "c", "a"] -// truth_shape = [2, 2, 2] -// normalize = true -// -// The output will be: -// -// // output is a 2x2 matrix with edit distances normalized by truth lengths. -// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis -// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis -func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "EditDistance", - Input: []tf.Input{ - hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// Concatenates a list of `N` tensors along the first dimension. -// -// The input tensors are all required to have size 1 in the first dimension. -// -// For example: -// -// ``` -// # 'x' is [[1, 4]] -// # 'y' is [[2, 5]] -// # 'z' is [[3, 6]] -// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. -// ``` -// -// The difference between concat and parallel_concat is that concat requires all -// of the inputs be computed before the operation will begin but doesn't require -// that the input shapes be known during graph construction. Parallel concat -// will copy pieces of the input into the output as they become available, in -// some situations this can provide a performance benefit. -// -// Arguments: -// values: Tensors to be concatenated. All must have size 1 in the first dimension -// and same shape. -// shape: the final shape of the result; should be equal to the shapes of any input -// but with the number of input values in the first dimension. -// -// Returns The concatenated tensor. -func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"shape": shape} - opspec := tf.OpSpec{ - Type: "ParallelConcat", - Input: []tf.Input{ - tf.OutputList(values), - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// AvgPoolGradAttr is an optional argument to AvgPoolGrad. -type AvgPoolGradAttr func(optionalAttr) - -// AvgPoolGradDataFormat sets the optional data_format attribute to value. -// -// value: Specify the data format of the input and output data. With the -// default format "NHWC", the data is stored in the order of: -// [batch, in_height, in_width, in_channels]. -// Alternatively, the format could be "NCHW", the data storage order of: -// [batch, in_channels, in_height, in_width]. -// If not specified, defaults to "NHWC" -func AvgPoolGradDataFormat(value string) AvgPoolGradAttr { - return func(m optionalAttr) { - m["data_format"] = value - } -} - -// Computes gradients of the average pooling function. -// -// Arguments: -// orig_input_shape: 1-D. Shape of the original input to `avg_pool`. -// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. -// the output of `avg_pool`. -// ksize: The size of the sliding window for each dimension of the input. -// strides: The stride of the sliding window for each dimension of the input. -// padding: The type of padding algorithm to use. -// -// Returns 4-D. Gradients w.r.t. the input of `avg_pool`. -func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "AvgPoolGrad", - Input: []tf.Input{ - orig_input_shape, grad, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) + return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4) } // LoadAndRemapMatrixAttr is an optional argument to LoadAndRemapMatrix. @@ -28344,53 +28251,6 @@ func SaveSlices(scope *Scope, filename tf.Output, tensor_names tf.Output, shapes return scope.AddOperation(opspec) } -// AvgPool3DAttr is an optional argument to AvgPool3D. -type AvgPool3DAttr func(optionalAttr) - -// AvgPool3DDataFormat sets the optional data_format attribute to value. -// -// value: The data format of the input and output data. With the -// default format "NDHWC", the data is stored in the order of: -// [batch, in_depth, in_height, in_width, in_channels]. -// Alternatively, the format could be "NCDHW", the data storage order is: -// [batch, in_channels, in_depth, in_height, in_width]. -// If not specified, defaults to "NDHWC" -func AvgPool3DDataFormat(value string) AvgPool3DAttr { - return func(m optionalAttr) { - m["data_format"] = value - } -} - -// Performs 3D average pooling on the input. -// -// Arguments: -// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. -// ksize: 1-D tensor of length 5. The size of the window for each dimension of -// the input tensor. Must have `ksize[0] = ksize[4] = 1`. -// strides: 1-D tensor of length 5. The stride of the sliding window for each -// dimension of `input`. Must have `strides[0] = strides[4] = 1`. -// padding: The type of padding algorithm to use. -// -// Returns The average pooled output tensor. -func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "AvgPool3D", - Input: []tf.Input{ - input, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // FusedBatchNormGradV3Attr is an optional argument to FusedBatchNormGradV3. type FusedBatchNormGradV3Attr func(optionalAttr) @@ -28474,6 +28334,53 @@ func FusedBatchNormGradV3(scope *Scope, y_backprop tf.Output, x tf.Output, scale return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4) } +// AvgPool3DAttr is an optional argument to AvgPool3D. +type AvgPool3DAttr func(optionalAttr) + +// AvgPool3DDataFormat sets the optional data_format attribute to value. +// +// value: The data format of the input and output data. With the +// default format "NDHWC", the data is stored in the order of: +// [batch, in_depth, in_height, in_width, in_channels]. +// Alternatively, the format could be "NCDHW", the data storage order is: +// [batch, in_channels, in_depth, in_height, in_width]. +// If not specified, defaults to "NDHWC" +func AvgPool3DDataFormat(value string) AvgPool3DAttr { + return func(m optionalAttr) { + m["data_format"] = value + } +} + +// Performs 3D average pooling on the input. +// +// Arguments: +// input: Shape `[batch, depth, rows, cols, channels]` tensor to pool over. +// ksize: 1-D tensor of length 5. The size of the window for each dimension of +// the input tensor. Must have `ksize[0] = ksize[4] = 1`. +// strides: 1-D tensor of length 5. The stride of the sliding window for each +// dimension of `input`. Must have `strides[0] = strides[4] = 1`. +// padding: The type of padding algorithm to use. +// +// Returns The average pooled output tensor. +func AvgPool3D(scope *Scope, input tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPool3DAttr) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "AvgPool3D", + Input: []tf.Input{ + input, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // Returns the number of records this Reader has produced. // // This is the same as the number of ReaderRead executions that have @@ -29902,6 +29809,247 @@ func Neg(scope *Scope, x tf.Output) (y tf.Output) { return op.Output(0) } +// Concatenates tensors along one dimension. +// +// Arguments: +// values: List of `N` Tensors to concatenate. Their ranks and types must match, +// and their sizes must match in all dimensions except `concat_dim`. +// axis: 0-D. The dimension along which to concatenate. Must be in the +// range [-rank(values), rank(values)). +// +// Returns A `Tensor` with the concatenation of values stacked along the +// `concat_dim` dimension. This tensor's shape matches that of `values` except +// in `concat_dim` where it has the sum of the sizes. +func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "ConcatV2", + Input: []tf.Input{ + tf.OutputList(values), axis, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Elementwise computes the bitwise right-shift of `x` and `y`. +// +// Performs a logical shift for unsigned integer types, and an arithmetic shift +// for signed integer types. +// +// If `y` is negative, or greater than or equal to than the width of `x` in bits +// the result is implementation defined. +// +// Example: +// +// ```python +// import tensorflow as tf +// from tensorflow.python.ops import bitwise_ops +// import numpy as np +// dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] +// +// for dtype in dtype_list: +// lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) +// rhs = tf.constant([5, 0, 7, 11], dtype=dtype) +// +// right_shift_result = bitwise_ops.right_shift(lhs, rhs) +// +// print(right_shift_result) +// +// # This will print: +// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) +// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) +// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) +// # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) +// +// lhs = np.array([-2, 64, 101, 32], dtype=np.int8) +// rhs = np.array([-1, -5, -3, -14], dtype=np.int8) +// bitwise_ops.right_shift(lhs, rhs) +// # +// ``` +// +func RightShift(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "RightShift", + Input: []tf.Input{ + x, y, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle. +type IteratorFromStringHandleAttr func(optionalAttr) + +// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value. +// +// value: If specified, defines the type of each tuple component in an +// element produced by the resulting iterator. +// If not specified, defaults to <> +// +// REQUIRES: len(value) >= 0 +func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr { + return func(m optionalAttr) { + m["output_types"] = value + } +} + +// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value. +// +// value: If specified, defines the shape of each tuple component in an +// element produced by the resulting iterator. +// If not specified, defaults to <> +// +// REQUIRES: len(value) >= 0 +func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr { + return func(m optionalAttr) { + m["output_shapes"] = value + } +} + +// Converts the given string representing a handle to an iterator to a resource. +// +// Arguments: +// string_handle: A string representation of the given handle. +// +// Returns A handle to an iterator resource. +func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "IteratorFromStringHandle", + Input: []tf.Input{ + string_handle, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Performs gradient updates of embedding tables. +// +// Arguments: +// inputs: A TensorList of gradients with which to update embedding tables. +// This argument has the same length and shapes as the return value of +// RecvTPUEmbeddingActivations, but contains gradients of the model's loss +// with respect to the embedding activations. The embedding tables are updated +// from these gradients via the optimizer specified in the TPU embedding +// configuration given to tpu.initialize_system. +// learning_rates: A TensorList of float32 scalars, one for each dynamic learning +// rate tag: see the comments in +// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. +// Multiple tables can share the same dynamic learning rate tag as specified +// in the configuration. If the learning rates for all tables are constant, +// this list should be empty. +// config: Serialized TPUEmbeddingConfiguration proto. +// +// Returns the created operation. +func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"config": config} + opspec := tf.OpSpec{ + Type: "SendTPUEmbeddingGradients", + Input: []tf.Input{ + tf.OutputList(inputs), tf.OutputList(learning_rates), + }, + Attrs: attrs, + } + return scope.AddOperation(opspec) +} + +// CumsumAttr is an optional argument to Cumsum. +type CumsumAttr func(optionalAttr) + +// CumsumExclusive sets the optional exclusive attribute to value. +// +// value: If `True`, perform exclusive cumsum. +// If not specified, defaults to false +func CumsumExclusive(value bool) CumsumAttr { + return func(m optionalAttr) { + m["exclusive"] = value + } +} + +// CumsumReverse sets the optional reverse attribute to value. +// +// value: A `bool` (default: False). +// If not specified, defaults to false +func CumsumReverse(value bool) CumsumAttr { + return func(m optionalAttr) { + m["reverse"] = value + } +} + +// Compute the cumulative sum of the tensor `x` along `axis`. +// +// By default, this op performs an inclusive cumsum, which means that the first +// element of the input is identical to the first element of the output: +// +// ```python +// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] +// ``` +// +// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is +// performed instead: +// +// ```python +// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] +// ``` +// +// By setting the `reverse` kwarg to `True`, the cumsum is performed in the +// opposite direction: +// +// ```python +// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] +// ``` +// +// This is more efficient than using separate `tf.reverse` ops. +// +// The `reverse` and `exclusive` kwargs can also be combined: +// +// ```python +// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] +// ``` +// +// Arguments: +// x: A `Tensor`. Must be one of the following types: `float32`, `float64`, +// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, +// `complex128`, `qint8`, `quint8`, `qint32`, `half`. +// axis: A `Tensor` of type `int32` (default: 0). Must be in the range +// `[-rank(x), rank(x))`. +func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "Cumsum", + Input: []tf.Input{ + x, axis, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // Calculates the gradient of the SparseMatrixSoftmax op. // // Arguments: @@ -30416,6 +30564,367 @@ func StatefulUniform(scope *Scope, resource tf.Output, algorithm tf.Output, shap return op.Output(0) } +// QuantizedConv2DAttr is an optional argument to QuantizedConv2D. +type QuantizedConv2DAttr func(optionalAttr) + +// QuantizedConv2DOutType sets the optional out_type attribute to value. +// If not specified, defaults to DT_QINT32 +func QuantizedConv2DOutType(value tf.DataType) QuantizedConv2DAttr { + return func(m optionalAttr) { + m["out_type"] = value + } +} + +// QuantizedConv2DDilations sets the optional dilations attribute to value. +// +// value: 1-D tensor of length 4. The dilation factor for each dimension of +// `input`. If set to k > 1, there will be k-1 skipped cells between each +// filter element on that dimension. The dimension order is determined by the +// value of `data_format`, see above for details. Dilations in the batch and +// depth dimensions must be 1. +// If not specified, defaults to +func QuantizedConv2DDilations(value []int64) QuantizedConv2DAttr { + return func(m optionalAttr) { + m["dilations"] = value + } +} + +// Computes a 2D convolution given quantized 4D input and filter tensors. +// +// The inputs are quantized tensors where the lowest value represents the real +// number of the associated minimum, and the highest represents the maximum. +// This means that you can only interpret the quantized output in the same way, by +// taking the returned minimum and maximum values into account. +// +// Arguments: +// +// filter: filter's input_depth dimension must match input's depth dimensions. +// min_input: The float value that the lowest quantized input value represents. +// max_input: The float value that the highest quantized input value represents. +// min_filter: The float value that the lowest quantized filter value represents. +// max_filter: The float value that the highest quantized filter value represents. +// strides: The stride of the sliding window for each dimension of the input +// tensor. +// padding: The type of padding algorithm to use. +// +// Returns: +// output +// min_output: The float value that the lowest quantized output value represents. +// max_output: The float value that the highest quantized output value represents. +func QuantizedConv2D(scope *Scope, input tf.Output, filter tf.Output, min_input tf.Output, max_input tf.Output, min_filter tf.Output, max_filter tf.Output, strides []int64, padding string, optional ...QuantizedConv2DAttr) (output tf.Output, min_output tf.Output, max_output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"strides": strides, "padding": padding} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "QuantizedConv2D", + Input: []tf.Input{ + input, filter, min_input, max_input, min_filter, max_filter, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0), op.Output(1), op.Output(2) +} + +// Computes rectified linear 6 gradients for a Relu6 operation. +// +// Arguments: +// gradients: The backpropagated gradients to the corresponding Relu6 operation. +// features: The features passed as input to the corresponding Relu6 operation, or +// its output; using either one produces the same result. +// +// Returns The gradients: +// `gradients * (features > 0) * (features < 6)`. +func Relu6Grad(scope *Scope, gradients tf.Output, features tf.Output) (backprops tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "Relu6Grad", + Input: []tf.Input{ + gradients, features, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// EditDistanceAttr is an optional argument to EditDistance. +type EditDistanceAttr func(optionalAttr) + +// EditDistanceNormalize sets the optional normalize attribute to value. +// +// value: boolean (if true, edit distances are normalized by length of truth). +// +// The output is: +// If not specified, defaults to true +func EditDistanceNormalize(value bool) EditDistanceAttr { + return func(m optionalAttr) { + m["normalize"] = value + } +} + +// Computes the (possibly normalized) Levenshtein Edit Distance. +// +// The inputs are variable-length sequences provided by SparseTensors +// (hypothesis_indices, hypothesis_values, hypothesis_shape) +// and +// (truth_indices, truth_values, truth_shape). +// +// The inputs are: +// +// Arguments: +// hypothesis_indices: The indices of the hypothesis list SparseTensor. +// This is an N x R int64 matrix. +// hypothesis_values: The values of the hypothesis list SparseTensor. +// This is an N-length vector. +// hypothesis_shape: The shape of the hypothesis list SparseTensor. +// This is an R-length vector. +// truth_indices: The indices of the truth list SparseTensor. +// This is an M x R int64 matrix. +// truth_values: The values of the truth list SparseTensor. +// This is an M-length vector. +// truth_shape: truth indices, vector. +// +// Returns A dense float tensor with rank R - 1. +// +// For the example input: +// +// // hypothesis represents a 2x1 matrix with variable-length values: +// // (0,0) = ["a"] +// // (1,0) = ["b"] +// hypothesis_indices = [[0, 0, 0], +// [1, 0, 0]] +// hypothesis_values = ["a", "b"] +// hypothesis_shape = [2, 1, 1] +// +// // truth represents a 2x2 matrix with variable-length values: +// // (0,0) = [] +// // (0,1) = ["a"] +// // (1,0) = ["b", "c"] +// // (1,1) = ["a"] +// truth_indices = [[0, 1, 0], +// [1, 0, 0], +// [1, 0, 1], +// [1, 1, 0]] +// truth_values = ["a", "b", "c", "a"] +// truth_shape = [2, 2, 2] +// normalize = true +// +// The output will be: +// +// // output is a 2x2 matrix with edit distances normalized by truth lengths. +// output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis +// [0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis +func EditDistance(scope *Scope, hypothesis_indices tf.Output, hypothesis_values tf.Output, hypothesis_shape tf.Output, truth_indices tf.Output, truth_values tf.Output, truth_shape tf.Output, optional ...EditDistanceAttr) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "EditDistance", + Input: []tf.Input{ + hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Concatenates a list of `N` tensors along the first dimension. +// +// The input tensors are all required to have size 1 in the first dimension. +// +// For example: +// +// ``` +// # 'x' is [[1, 4]] +// # 'y' is [[2, 5]] +// # 'z' is [[3, 6]] +// parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. +// ``` +// +// The difference between concat and parallel_concat is that concat requires all +// of the inputs be computed before the operation will begin but doesn't require +// that the input shapes be known during graph construction. Parallel concat +// will copy pieces of the input into the output as they become available, in +// some situations this can provide a performance benefit. +// +// Arguments: +// values: Tensors to be concatenated. All must have size 1 in the first dimension +// and same shape. +// shape: the final shape of the result; should be equal to the shapes of any input +// but with the number of input values in the first dimension. +// +// Returns The concatenated tensor. +func ParallelConcat(scope *Scope, values []tf.Output, shape tf.Shape) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"shape": shape} + opspec := tf.OpSpec{ + Type: "ParallelConcat", + Input: []tf.Input{ + tf.OutputList(values), + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// AvgPoolGradAttr is an optional argument to AvgPoolGrad. +type AvgPoolGradAttr func(optionalAttr) + +// AvgPoolGradDataFormat sets the optional data_format attribute to value. +// +// value: Specify the data format of the input and output data. With the +// default format "NHWC", the data is stored in the order of: +// [batch, in_height, in_width, in_channels]. +// Alternatively, the format could be "NCHW", the data storage order of: +// [batch, in_channels, in_height, in_width]. +// If not specified, defaults to "NHWC" +func AvgPoolGradDataFormat(value string) AvgPoolGradAttr { + return func(m optionalAttr) { + m["data_format"] = value + } +} + +// Computes gradients of the average pooling function. +// +// Arguments: +// orig_input_shape: 1-D. Shape of the original input to `avg_pool`. +// grad: 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. +// the output of `avg_pool`. +// ksize: The size of the sliding window for each dimension of the input. +// strides: The stride of the sliding window for each dimension of the input. +// padding: The type of padding algorithm to use. +// +// Returns 4-D. Gradients w.r.t. the input of `avg_pool`. +func AvgPoolGrad(scope *Scope, orig_input_shape tf.Output, grad tf.Output, ksize []int64, strides []int64, padding string, optional ...AvgPoolGradAttr) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"ksize": ksize, "strides": strides, "padding": padding} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "AvgPoolGrad", + Input: []tf.Input{ + orig_input_shape, grad, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// StringSplitAttr is an optional argument to StringSplit. +type StringSplitAttr func(optionalAttr) + +// StringSplitSkipEmpty sets the optional skip_empty attribute to value. +// +// value: A `bool`. If `True`, skip the empty strings from the result. +// If not specified, defaults to true +func StringSplitSkipEmpty(value bool) StringSplitAttr { + return func(m optionalAttr) { + m["skip_empty"] = value + } +} + +// Split elements of `input` based on `delimiter` into a `SparseTensor`. +// +// Let N be the size of source (typically N will be the batch size). Split each +// element of `input` based on `delimiter` and return a `SparseTensor` +// containing the splitted tokens. Empty tokens are ignored. +// +// `delimiter` can be empty, or a string of split characters. If `delimiter` is an +// empty string, each element of `input` is split into individual single-byte +// character strings, including splitting of UTF-8 multibyte sequences. Otherwise +// every character of `delimiter` is a potential split point. +// +// For example: +// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output +// will be +// +// indices = [0, 0; +// 0, 1; +// 1, 0; +// 1, 1; +// 1, 2] +// shape = [2, 3] +// values = ['hello', 'world', 'a', 'b', 'c'] +// +// Arguments: +// input: 1-D. Strings to split. +// delimiter: 0-D. Delimiter characters (bytes), or empty string. +// +// Returns: +// indices: A dense matrix of int64 representing the indices of the sparse tensor. +// values: A vector of strings corresponding to the splited values. +// shape: a length-2 vector of int64 representing the shape of the sparse +// tensor, where the first value is N and the second value is the maximum number +// of tokens in a single input entry. +func StringSplit(scope *Scope, input tf.Output, delimiter tf.Output, optional ...StringSplitAttr) (indices tf.Output, values tf.Output, shape tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "StringSplit", + Input: []tf.Input{ + input, delimiter, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0), op.Output(1), op.Output(2) +} + +// Assigns sparse updates to the variable referenced by `resource`. +// +// This operation computes +// +// # Scalar indices +// ref[indices, ...] = updates[...] +// +// # Vector indices (for each i) +// ref[indices[i], ...] = updates[i, ...] +// +// # High rank indices (for each i, ..., j) +// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] +// +// Arguments: +// resource: Should be from a `Variable` node. +// indices: A tensor of indices into the first dimension of `ref`. +// updates: A tensor of updated values to add to `ref`. +// +// Returns the created operation. +func ResourceScatterUpdate(scope *Scope, resource tf.Output, indices tf.Output, updates tf.Output) (o *tf.Operation) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "ResourceScatterUpdate", + Input: []tf.Input{ + resource, indices, updates, + }, + } + return scope.AddOperation(opspec) +} + // Creates ngrams from ragged string data. // // This op accepts a ragged tensor with 1 ragged dimension containing only @@ -30903,32 +31412,49 @@ func ConsumeMutexLock(scope *Scope, mutex_lock tf.Output) (o *tf.Operation) { return scope.AddOperation(opspec) } -// Adjust the contrast of one or more images. +// BiasAddAttr is an optional argument to BiasAdd. +type BiasAddAttr func(optionalAttr) + +// BiasAddDataFormat sets the optional data_format attribute to value. // -// `images` is a tensor of at least 3 dimensions. The last 3 dimensions are -// interpreted as `[height, width, channels]`. The other dimensions only -// represent a collection of images, such as `[batch, height, width, channels].` +// value: Specify the data format of the input and output data. With the +// default format "NHWC", the bias tensor will be added to the last dimension +// of the value tensor. +// Alternatively, the format could be "NCHW", the data storage order of: +// [batch, in_channels, in_height, in_width]. +// The tensor will be added to "in_channels", the third-to-the-last +// dimension. +// If not specified, defaults to "NHWC" +func BiasAddDataFormat(value string) BiasAddAttr { + return func(m optionalAttr) { + m["data_format"] = value + } +} + +// Adds `bias` to `value`. // -// Contrast is adjusted independently for each channel of each image. -// -// For each channel, the Op first computes the mean of the image pixels in the -// channel and then adjusts each component of each pixel to -// `(x - mean) * contrast_factor + mean`. +// This is a special case of `tf.add` where `bias` is restricted to be 1-D. +// Broadcasting is supported, so `value` may have any number of dimensions. // // Arguments: -// images: Images to adjust. At least 3-D. -// contrast_factor: A float multiplier for adjusting contrast. +// value: Any number of dimensions. +// bias: 1-D with size the last dimension of `value`. // -// Returns The contrast-adjusted image or images. -func AdjustContrastv2(scope *Scope, images tf.Output, contrast_factor tf.Output) (output tf.Output) { +// Returns Broadcasted sum of `value` and `bias`. +func BiasAdd(scope *Scope, value tf.Output, bias tf.Output, optional ...BiasAddAttr) (output tf.Output) { if scope.Err() != nil { return } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } opspec := tf.OpSpec{ - Type: "AdjustContrastv2", + Type: "BiasAdd", Input: []tf.Input{ - images, contrast_factor, + value, bias, }, + Attrs: attrs, } op := scope.AddOperation(opspec) return op.Output(0) @@ -33112,60 +33638,6 @@ func CSRSparseMatrixToDense(scope *Scope, sparse_input tf.Output, type_ tf.DataT return op.Output(0) } -// IteratorFromStringHandleAttr is an optional argument to IteratorFromStringHandle. -type IteratorFromStringHandleAttr func(optionalAttr) - -// IteratorFromStringHandleOutputTypes sets the optional output_types attribute to value. -// -// value: If specified, defines the type of each tuple component in an -// element produced by the resulting iterator. -// If not specified, defaults to <> -// -// REQUIRES: len(value) >= 0 -func IteratorFromStringHandleOutputTypes(value []tf.DataType) IteratorFromStringHandleAttr { - return func(m optionalAttr) { - m["output_types"] = value - } -} - -// IteratorFromStringHandleOutputShapes sets the optional output_shapes attribute to value. -// -// value: If specified, defines the shape of each tuple component in an -// element produced by the resulting iterator. -// If not specified, defaults to <> -// -// REQUIRES: len(value) >= 0 -func IteratorFromStringHandleOutputShapes(value []tf.Shape) IteratorFromStringHandleAttr { - return func(m optionalAttr) { - m["output_shapes"] = value - } -} - -// Converts the given string representing a handle to an iterator to a resource. -// -// Arguments: -// string_handle: A string representation of the given handle. -// -// Returns A handle to an iterator resource. -func IteratorFromStringHandle(scope *Scope, string_handle tf.Output, optional ...IteratorFromStringHandleAttr) (resource_handle tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "IteratorFromStringHandle", - Input: []tf.Input{ - string_handle, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // Add all input tensors element wise. // // Inputs must be of same size and shape. @@ -34540,6 +35012,49 @@ func SparseCrossHashed(scope *Scope, indices []tf.Output, values []tf.Output, sh return op.Output(0), op.Output(1), op.Output(2) } +// Conv3DBackpropInputAttr is an optional argument to Conv3DBackpropInput. +type Conv3DBackpropInputAttr func(optionalAttr) + +// Conv3DBackpropInputDilations sets the optional dilations attribute to value. +// If not specified, defaults to +func Conv3DBackpropInputDilations(value []int64) Conv3DBackpropInputAttr { + return func(m optionalAttr) { + m["dilations"] = value + } +} + +// Computes the gradients of 3-D convolution with respect to the input. +// +// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2 +// +// Arguments: +// input: Shape `[batch, depth, rows, cols, in_channels]`. +// filter: Shape `[depth, rows, cols, in_channels, out_channels]`. +// `in_channels` must match between `input` and `filter`. +// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, +// out_channels]`. +// strides: 1-D tensor of length 5. The stride of the sliding window for each +// dimension of `input`. Must have `strides[0] = strides[4] = 1`. +// padding: The type of padding algorithm to use. +func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputAttr) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"strides": strides, "padding": padding} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "Conv3DBackpropInput", + Input: []tf.Input{ + input, filter, out_backprop, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // QuantizedInstanceNormAttr is an optional argument to QuantizedInstanceNorm. type QuantizedInstanceNormAttr func(optionalAttr) @@ -35985,222 +36500,6 @@ func DeserializeManySparse(scope *Scope, serialized_sparse tf.Output, dtype tf.D return op.Output(0), op.Output(1), op.Output(2) } -// MapStageAttr is an optional argument to MapStage. -type MapStageAttr func(optionalAttr) - -// MapStageCapacity sets the optional capacity attribute to value. -// -// value: Maximum number of elements in the Staging Area. If > 0, inserts -// on the container will block when the capacity is reached. -// If not specified, defaults to 0 -// -// REQUIRES: value >= 0 -func MapStageCapacity(value int64) MapStageAttr { - return func(m optionalAttr) { - m["capacity"] = value - } -} - -// MapStageMemoryLimit sets the optional memory_limit attribute to value. -// If not specified, defaults to 0 -// -// REQUIRES: value >= 0 -func MapStageMemoryLimit(value int64) MapStageAttr { - return func(m optionalAttr) { - m["memory_limit"] = value - } -} - -// MapStageContainer sets the optional container attribute to value. -// -// value: If non-empty, this queue is placed in the given container. Otherwise, -// a default container is used. -// If not specified, defaults to "" -func MapStageContainer(value string) MapStageAttr { - return func(m optionalAttr) { - m["container"] = value - } -} - -// MapStageSharedName sets the optional shared_name attribute to value. -// -// value: It is necessary to match this name to the matching Unstage Op. -// If not specified, defaults to "" -func MapStageSharedName(value string) MapStageAttr { - return func(m optionalAttr) { - m["shared_name"] = value - } -} - -// Stage (key, values) in the underlying container which behaves like a hashtable. -// -// Arguments: -// key: int64 -// -// values: a list of tensors -// dtypes A list of data types that inserted values should adhere to. -// -// -// Returns the created operation. -func MapStage(scope *Scope, key tf.Output, indices tf.Output, values []tf.Output, dtypes []tf.DataType, optional ...MapStageAttr) (o *tf.Operation) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"dtypes": dtypes} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "MapStage", - Input: []tf.Input{ - key, indices, tf.OutputList(values), - }, - Attrs: attrs, - } - return scope.AddOperation(opspec) -} - -// FusedBatchNormGradV2Attr is an optional argument to FusedBatchNormGradV2. -type FusedBatchNormGradV2Attr func(optionalAttr) - -// FusedBatchNormGradV2Epsilon sets the optional epsilon attribute to value. -// -// value: A small float number added to the variance of x. -// If not specified, defaults to 0.0001 -func FusedBatchNormGradV2Epsilon(value float32) FusedBatchNormGradV2Attr { - return func(m optionalAttr) { - m["epsilon"] = value - } -} - -// FusedBatchNormGradV2DataFormat sets the optional data_format attribute to value. -// -// value: The data format for y_backprop, x, x_backprop. -// Either "NHWC" (default) or "NCHW". -// If not specified, defaults to "NHWC" -func FusedBatchNormGradV2DataFormat(value string) FusedBatchNormGradV2Attr { - return func(m optionalAttr) { - m["data_format"] = value - } -} - -// FusedBatchNormGradV2IsTraining sets the optional is_training attribute to value. -// -// value: A bool value to indicate the operation is for training (default) -// or inference. -// If not specified, defaults to true -func FusedBatchNormGradV2IsTraining(value bool) FusedBatchNormGradV2Attr { - return func(m optionalAttr) { - m["is_training"] = value - } -} - -// Gradient for batch normalization. -// -// Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". -// The size of 1D Tensors matches the dimension C of the 4D Tensors. -// -// Arguments: -// y_backprop: A 4D Tensor for the gradient with respect to y. -// x: A 4D Tensor for input data. -// scale: A 1D Tensor for scaling factor, to scale the normalized x. -// reserve_space_1: When is_training is True, a 1D Tensor for the computed batch -// mean to be reused in gradient computation. When is_training is -// False, a 1D Tensor for the population mean to be reused in both -// 1st and 2nd order gradient computation. -// reserve_space_2: When is_training is True, a 1D Tensor for the computed batch -// variance (inverted variance in the cuDNN case) to be reused in -// gradient computation. When is_training is False, a 1D Tensor -// for the population variance to be reused in both 1st and 2nd -// order gradient computation. -// -// Returns: -// x_backprop: A 4D Tensor for the gradient with respect to x. -// scale_backprop: A 1D Tensor for the gradient with respect to scale. -// offset_backprop: A 1D Tensor for the gradient with respect to offset. -// reserve_space_3: Unused placeholder to match the mean input in FusedBatchNorm. -// reserve_space_4: Unused placeholder to match the variance input -// in FusedBatchNorm. -func FusedBatchNormGradV2(scope *Scope, y_backprop tf.Output, x tf.Output, scale tf.Output, reserve_space_1 tf.Output, reserve_space_2 tf.Output, optional ...FusedBatchNormGradV2Attr) (x_backprop tf.Output, scale_backprop tf.Output, offset_backprop tf.Output, reserve_space_3 tf.Output, reserve_space_4 tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "FusedBatchNormGradV2", - Input: []tf.Input{ - y_backprop, x, scale, reserve_space_1, reserve_space_2, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4) -} - -// SparseTensorDenseMatMulAttr is an optional argument to SparseTensorDenseMatMul. -type SparseTensorDenseMatMulAttr func(optionalAttr) - -// SparseTensorDenseMatMulAdjointA sets the optional adjoint_a attribute to value. -// -// value: Use the adjoint of A in the matrix multiply. If A is complex, this -// is transpose(conj(A)). Otherwise it's transpose(A). -// If not specified, defaults to false -func SparseTensorDenseMatMulAdjointA(value bool) SparseTensorDenseMatMulAttr { - return func(m optionalAttr) { - m["adjoint_a"] = value - } -} - -// SparseTensorDenseMatMulAdjointB sets the optional adjoint_b attribute to value. -// -// value: Use the adjoint of B in the matrix multiply. If B is complex, this -// is transpose(conj(B)). Otherwise it's transpose(B). -// If not specified, defaults to false -func SparseTensorDenseMatMulAdjointB(value bool) SparseTensorDenseMatMulAttr { - return func(m optionalAttr) { - m["adjoint_b"] = value - } -} - -// Multiply SparseTensor (of rank 2) "A" by dense matrix "B". -// -// No validity checking is performed on the indices of A. However, the following -// input format is recommended for optimal behavior: -// -// if adjoint_a == false: -// A should be sorted in lexicographically increasing order. Use SparseReorder -// if you're not sure. -// if adjoint_a == true: -// A should be sorted in order of increasing dimension 1 (i.e., "column major" -// order instead of "row major" order). -// -// Arguments: -// a_indices: 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. -// a_values: 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. -// a_shape: 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. -// b: 2-D. A dense Matrix. -func SparseTensorDenseMatMul(scope *Scope, a_indices tf.Output, a_values tf.Output, a_shape tf.Output, b tf.Output, optional ...SparseTensorDenseMatMulAttr) (product tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "SparseTensorDenseMatMul", - Input: []tf.Input{ - a_indices, a_values, a_shape, b, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // Sets the index-th position of the list to contain the given tensor. // // input_handle: the list @@ -36542,63 +36841,6 @@ func SparseAddGrad(scope *Scope, backprop_val_grad tf.Output, a_indices tf.Outpu return op.Output(0), op.Output(1) } -// Removes keys and its associated values from a table. -// -// The tensor `keys` must of the same type as the keys of the table. Keys not -// already in the table are silently ignored. -// -// Arguments: -// table_handle: Handle to the table. -// keys: Any shape. Keys of the elements to remove. -// -// Returns the created operation. -func LookupTableRemoveV2(scope *Scope, table_handle tf.Output, keys tf.Output) (o *tf.Operation) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "LookupTableRemoveV2", - Input: []tf.Input{ - table_handle, keys, - }, - } - return scope.AddOperation(opspec) -} - -// NotEqualAttr is an optional argument to NotEqual. -type NotEqualAttr func(optionalAttr) - -// NotEqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value. -// If not specified, defaults to true -func NotEqualIncompatibleShapeError(value bool) NotEqualAttr { - return func(m optionalAttr) { - m["incompatible_shape_error"] = value - } -} - -// Returns the truth value of (x != y) element-wise. -// -// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting -// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) -func NotEqual(scope *Scope, x tf.Output, y tf.Output, optional ...NotEqualAttr) (z tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "NotEqual", - Input: []tf.Input{ - x, y, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // Computes the complementary error function of `x` element-wise. func Erfc(scope *Scope, x tf.Output) (y tf.Output) { if scope.Err() != nil { @@ -37827,31 +38069,6 @@ func RFFT(scope *Scope, input tf.Output, fft_length tf.Output, optional ...RFFTA return op.Output(0) } -// JPEG encode input image with provided compression quality. -// -// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. -// `quality` is an int32 jpeg compression quality value between 0 and 100. -// -// -// Arguments: -// images: Images to adjust. At least 3-D. -// quality: An int quality to encode to. -// -// Returns 0-D. JPEG-encoded image. -func EncodeJpegVariableQuality(scope *Scope, images tf.Output, quality tf.Output) (contents tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "EncodeJpegVariableQuality", - Input: []tf.Input{ - images, quality, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // 3D fast Fourier transform. // // Computes the 3-dimensional discrete Fourier transform over the inner-most 3 @@ -38270,6 +38487,44 @@ func RecvTPUEmbeddingActivations(scope *Scope, num_outputs int64, config string) return outputs } +// InfeedEnqueuePrelinearizedBufferAttr is an optional argument to InfeedEnqueuePrelinearizedBuffer. +type InfeedEnqueuePrelinearizedBufferAttr func(optionalAttr) + +// InfeedEnqueuePrelinearizedBufferDeviceOrdinal sets the optional device_ordinal attribute to value. +// +// value: The TPU device to use. This should be -1 when the Op is running on a TPU device +// and = 0 when the Op is running on the CPU device. +// If not specified, defaults to -1 +func InfeedEnqueuePrelinearizedBufferDeviceOrdinal(value int64) InfeedEnqueuePrelinearizedBufferAttr { + return func(m optionalAttr) { + m["device_ordinal"] = value + } +} + +// An op which enqueues prelinearized buffer into TPU infeed. +// +// Arguments: +// input: A variant tensor representing linearized output. +// +// Returns the created operation. +func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, optional ...InfeedEnqueuePrelinearizedBufferAttr) (o *tf.Operation) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "InfeedEnqueuePrelinearizedBuffer", + Input: []tf.Input{ + input, + }, + Attrs: attrs, + } + return scope.AddOperation(opspec) +} + // Computes the derivative of a Gamma random sample w.r.t. `alpha`. func RandomGammaGrad(scope *Scope, alpha tf.Output, sample tf.Output) (output tf.Output) { if scope.Err() != nil { @@ -38549,171 +38804,6 @@ func StatefulTruncatedNormal(scope *Scope, resource tf.Output, algorithm tf.Outp return op.Output(0) } -// Returns the rank of a tensor. -// -// This operation returns an integer representing the rank of `input`. -// -// For example: -// -// ``` -// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] -// # shape of tensor 't' is [2, 2, 3] -// rank(t) ==> 3 -// ``` -// -// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank -// of a tensor is the number of indices required to uniquely select each element -// of the tensor. Rank is also known as "order", "degree", or "ndims." -func Rank(scope *Scope, input tf.Output) (output tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "Rank", - Input: []tf.Input{ - input, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// EncodeJpegAttr is an optional argument to EncodeJpeg. -type EncodeJpegAttr func(optionalAttr) - -// EncodeJpegFormat sets the optional format attribute to value. -// -// value: Per pixel image format. -// If not specified, defaults to "" -func EncodeJpegFormat(value string) EncodeJpegAttr { - return func(m optionalAttr) { - m["format"] = value - } -} - -// EncodeJpegQuality sets the optional quality attribute to value. -// -// value: Quality of the compression from 0 to 100 (higher is better and slower). -// If not specified, defaults to 95 -func EncodeJpegQuality(value int64) EncodeJpegAttr { - return func(m optionalAttr) { - m["quality"] = value - } -} - -// EncodeJpegProgressive sets the optional progressive attribute to value. -// -// value: If True, create a JPEG that loads progressively (coarse to fine). -// If not specified, defaults to false -func EncodeJpegProgressive(value bool) EncodeJpegAttr { - return func(m optionalAttr) { - m["progressive"] = value - } -} - -// EncodeJpegOptimizeSize sets the optional optimize_size attribute to value. -// -// value: If True, spend CPU/RAM to reduce size with no quality change. -// If not specified, defaults to false -func EncodeJpegOptimizeSize(value bool) EncodeJpegAttr { - return func(m optionalAttr) { - m["optimize_size"] = value - } -} - -// EncodeJpegChromaDownsampling sets the optional chroma_downsampling attribute to value. -// -// value: See http://en.wikipedia.org/wiki/Chroma_subsampling. -// If not specified, defaults to true -func EncodeJpegChromaDownsampling(value bool) EncodeJpegAttr { - return func(m optionalAttr) { - m["chroma_downsampling"] = value - } -} - -// EncodeJpegDensityUnit sets the optional density_unit attribute to value. -// -// value: Unit used to specify `x_density` and `y_density`: -// pixels per inch (`'in'`) or centimeter (`'cm'`). -// If not specified, defaults to "in" -func EncodeJpegDensityUnit(value string) EncodeJpegAttr { - return func(m optionalAttr) { - m["density_unit"] = value - } -} - -// EncodeJpegXDensity sets the optional x_density attribute to value. -// -// value: Horizontal pixels per density unit. -// If not specified, defaults to 300 -func EncodeJpegXDensity(value int64) EncodeJpegAttr { - return func(m optionalAttr) { - m["x_density"] = value - } -} - -// EncodeJpegYDensity sets the optional y_density attribute to value. -// -// value: Vertical pixels per density unit. -// If not specified, defaults to 300 -func EncodeJpegYDensity(value int64) EncodeJpegAttr { - return func(m optionalAttr) { - m["y_density"] = value - } -} - -// EncodeJpegXmpMetadata sets the optional xmp_metadata attribute to value. -// -// value: If not empty, embed this XMP metadata in the image header. -// If not specified, defaults to "" -func EncodeJpegXmpMetadata(value string) EncodeJpegAttr { - return func(m optionalAttr) { - m["xmp_metadata"] = value - } -} - -// JPEG-encode an image. -// -// `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. -// -// The attr `format` can be used to override the color format of the encoded -// output. Values can be: -// -// * `''`: Use a default format based on the number of channels in the image. -// * `grayscale`: Output a grayscale JPEG image. The `channels` dimension -// of `image` must be 1. -// * `rgb`: Output an RGB JPEG image. The `channels` dimension -// of `image` must be 3. -// -// If `format` is not specified or is the empty string, a default format is picked -// in function of the number of channels in `image`: -// -// * 1: Output a grayscale image. -// * 3: Output an RGB image. -// -// Arguments: -// image: 3-D with shape `[height, width, channels]`. -// -// Returns 0-D. JPEG-encoded image. -func EncodeJpeg(scope *Scope, image tf.Output, optional ...EncodeJpegAttr) (contents tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "EncodeJpeg", - Input: []tf.Input{ - image, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // Checks whether a quantile stream has been initialized. // // An Op that checks if quantile stream resource is initialized. @@ -41652,118 +41742,6 @@ func ResourceApplyAdam(scope *Scope, var_ tf.Output, m tf.Output, v tf.Output, b return scope.AddOperation(opspec) } -// CumsumAttr is an optional argument to Cumsum. -type CumsumAttr func(optionalAttr) - -// CumsumExclusive sets the optional exclusive attribute to value. -// -// value: If `True`, perform exclusive cumsum. -// If not specified, defaults to false -func CumsumExclusive(value bool) CumsumAttr { - return func(m optionalAttr) { - m["exclusive"] = value - } -} - -// CumsumReverse sets the optional reverse attribute to value. -// -// value: A `bool` (default: False). -// If not specified, defaults to false -func CumsumReverse(value bool) CumsumAttr { - return func(m optionalAttr) { - m["reverse"] = value - } -} - -// Compute the cumulative sum of the tensor `x` along `axis`. -// -// By default, this op performs an inclusive cumsum, which means that the first -// element of the input is identical to the first element of the output: -// -// ```python -// tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] -// ``` -// -// By setting the `exclusive` kwarg to `True`, an exclusive cumsum is -// performed instead: -// -// ```python -// tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] -// ``` -// -// By setting the `reverse` kwarg to `True`, the cumsum is performed in the -// opposite direction: -// -// ```python -// tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] -// ``` -// -// This is more efficient than using separate `tf.reverse` ops. -// -// The `reverse` and `exclusive` kwargs can also be combined: -// -// ```python -// tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] -// ``` -// -// Arguments: -// x: A `Tensor`. Must be one of the following types: `float32`, `float64`, -// `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, -// `complex128`, `qint8`, `quint8`, `qint32`, `half`. -// axis: A `Tensor` of type `int32` (default: 0). Must be in the range -// `[-rank(x), rank(x))`. -func Cumsum(scope *Scope, x tf.Output, axis tf.Output, optional ...CumsumAttr) (out tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "Cumsum", - Input: []tf.Input{ - x, axis, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// Performs gradient updates of embedding tables. -// -// Arguments: -// inputs: A TensorList of gradients with which to update embedding tables. -// This argument has the same length and shapes as the return value of -// RecvTPUEmbeddingActivations, but contains gradients of the model's loss -// with respect to the embedding activations. The embedding tables are updated -// from these gradients via the optimizer specified in the TPU embedding -// configuration given to tpu.initialize_system. -// learning_rates: A TensorList of float32 scalars, one for each dynamic learning -// rate tag: see the comments in -// //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto. -// Multiple tables can share the same dynamic learning rate tag as specified -// in the configuration. If the learning rates for all tables are constant, -// this list should be empty. -// config: Serialized TPUEmbeddingConfiguration proto. -// -// Returns the created operation. -func SendTPUEmbeddingGradients(scope *Scope, inputs []tf.Output, learning_rates []tf.Output, config string) (o *tf.Operation) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"config": config} - opspec := tf.OpSpec{ - Type: "SendTPUEmbeddingGradients", - Input: []tf.Input{ - tf.OutputList(inputs), tf.OutputList(learning_rates), - }, - Attrs: attrs, - } - return scope.AddOperation(opspec) -} - // ResourceApplyKerasMomentumAttr is an optional argument to ResourceApplyKerasMomentum. type ResourceApplyKerasMomentumAttr func(optionalAttr) @@ -42316,31 +42294,6 @@ func LoadTPUEmbeddingRMSPropParametersGradAccumDebug(scope *Scope, parameters tf return scope.AddOperation(opspec) } -// Concatenates tensors along one dimension. -// -// Arguments: -// values: List of `N` Tensors to concatenate. Their ranks and types must match, -// and their sizes must match in all dimensions except `concat_dim`. -// axis: 0-D. The dimension along which to concatenate. Must be in the -// range [-rank(values), rank(values)). -// -// Returns A `Tensor` with the concatenation of values stacked along the -// `concat_dim` dimension. This tensor's shape matches that of `values` except -// in `concat_dim` where it has the sum of the sizes. -func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "ConcatV2", - Input: []tf.Input{ - tf.OutputList(values), axis, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingFTRLParametersGradAccumDebug. type LoadTPUEmbeddingFTRLParametersGradAccumDebugAttr func(optionalAttr) @@ -43001,6 +42954,34 @@ func SparseSoftmax(scope *Scope, sp_indices tf.Output, sp_values tf.Output, sp_s return op.Output(0) } +// Adjust the hue of one or more images. +// +// `images` is a tensor of at least 3 dimensions. The last dimension is +// interpreted as channels, and must be three. +// +// The input image is considered in the RGB colorspace. Conceptually, the RGB +// colors are first mapped into HSV. A delta is then applied all the hue values, +// and then remapped back to RGB colorspace. +// +// Arguments: +// images: Images to adjust. At least 3-D. +// delta: A float delta to add to the hue. +// +// Returns The hue-adjusted image or images. +func AdjustHue(scope *Scope, images tf.Output, delta tf.Output) (output tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "AdjustHue", + Input: []tf.Input{ + images, delta, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // Computes hyperbolic cosine of x element-wise. // // Given an input tensor, this function computes hyperbolic cosine of every @@ -43204,26 +43185,6 @@ func SparseSparseMaximum(scope *Scope, a_indices tf.Output, a_values tf.Output, return op.Output(0), op.Output(1) } -// Computes the Bessel i1e function of `x` element-wise. -// -// Exponentially scaled modified Bessel function of order 0 defined as -// `bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`. -// -// This function is faster and numerically stabler than `bessel_i1(x)`. -func BesselI1e(scope *Scope, x tf.Output) (y tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "BesselI1e", - Input: []tf.Input{ - x, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // LSTMBlockCellAttr is an optional argument to LSTMBlockCell. type LSTMBlockCellAttr func(optionalAttr) @@ -44980,83 +44941,6 @@ func ResourceScatterNdAdd(scope *Scope, ref tf.Output, indices tf.Output, update return scope.AddOperation(opspec) } -// Counts the number of occurrences of each value in an integer array. -// -// Outputs a vector with length `size` and the same dtype as `weights`. If -// `weights` are empty, then index `i` stores the number of times the value `i` is -// counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of -// the value in `weights` at each index where the corresponding value in `arr` is -// `i`. -// -// Values in `arr` outside of the range [0, size) are ignored. -// -// Arguments: -// arr: int32 `Tensor`. -// size: non-negative int32 scalar `Tensor`. -// weights: is an int32, int64, float32, or float64 `Tensor` with the same -// shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights -// equal to 1. -// -// Returns 1D `Tensor` with length equal to `size`. The counts or summed weights for -// each value in the range [0, size). -func Bincount(scope *Scope, arr tf.Output, size tf.Output, weights tf.Output) (bins tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "Bincount", - Input: []tf.Input{ - arr, size, weights, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// Gradients for batch normalization. -// -// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization() -// -// This op is deprecated. See `tf.nn.batch_normalization`. -// -// Arguments: -// t: A 4D input Tensor. -// m: A 1D mean Tensor with size matching the last dimension of t. -// This is the first output from tf.nn.moments, -// or a saved moving average thereof. -// v: A 1D variance Tensor with size matching the last dimension of t. -// This is the second output from tf.nn.moments, -// or a saved moving average thereof. -// gamma: A 1D gamma Tensor with size matching the last dimension of t. -// If "scale_after_normalization" is true, this Tensor will be multiplied -// with the normalized Tensor. -// backprop: 4D backprop Tensor. -// variance_epsilon: A small float number to avoid dividing by 0. -// scale_after_normalization: A bool indicating whether the resulted tensor -// needs to be multiplied with gamma. -// -// Returns: -// dx: 4D backprop tensor for input. -// dm: 1D backprop tensor for mean. -// dv: 1D backprop tensor for variance. -// db: 1D backprop tensor for beta. -// dg: 1D backprop tensor for gamma. -func BatchNormWithGlobalNormalizationGrad(scope *Scope, t tf.Output, m tf.Output, v tf.Output, gamma tf.Output, backprop tf.Output, variance_epsilon float32, scale_after_normalization bool) (dx tf.Output, dm tf.Output, dv tf.Output, db tf.Output, dg tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization} - opspec := tf.OpSpec{ - Type: "BatchNormWithGlobalNormalizationGrad", - Input: []tf.Input{ - t, m, v, gamma, backprop, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0), op.Output(1), op.Output(2), op.Output(3), op.Output(4) -} - // CropAndResizeGradImageAttr is an optional argument to CropAndResizeGradImage. type CropAndResizeGradImageAttr func(optionalAttr) @@ -45355,141 +45239,6 @@ func OutfeedEnqueueTuple(scope *Scope, inputs []tf.Output) (o *tf.Operation) { return scope.AddOperation(opspec) } -// Returns the number of nonzeroes of `sparse_matrix`. -// -// Arguments: -// sparse_matrix: A CSRSparseMatrix. -// -// Returns The number of nonzeroes of `sparse_matrix`. -func SparseMatrixNNZ(scope *Scope, sparse_matrix tf.Output) (nnz tf.Output) { - if scope.Err() != nil { - return - } - opspec := tf.OpSpec{ - Type: "SparseMatrixNNZ", - Input: []tf.Input{ - sparse_matrix, - }, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// DecodeJpegAttr is an optional argument to DecodeJpeg. -type DecodeJpegAttr func(optionalAttr) - -// DecodeJpegChannels sets the optional channels attribute to value. -// -// value: Number of color channels for the decoded image. -// If not specified, defaults to 0 -func DecodeJpegChannels(value int64) DecodeJpegAttr { - return func(m optionalAttr) { - m["channels"] = value - } -} - -// DecodeJpegRatio sets the optional ratio attribute to value. -// -// value: Downscaling ratio. -// If not specified, defaults to 1 -func DecodeJpegRatio(value int64) DecodeJpegAttr { - return func(m optionalAttr) { - m["ratio"] = value - } -} - -// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value. -// -// value: If true use a slower but nicer upscaling of the -// chroma planes (yuv420/422 only). -// If not specified, defaults to true -func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr { - return func(m optionalAttr) { - m["fancy_upscaling"] = value - } -} - -// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value. -// -// value: If true try to recover an image from truncated input. -// If not specified, defaults to false -func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr { - return func(m optionalAttr) { - m["try_recover_truncated"] = value - } -} - -// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value. -// -// value: The minimum required fraction of lines before a truncated -// input is accepted. -// If not specified, defaults to 1 -func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr { - return func(m optionalAttr) { - m["acceptable_fraction"] = value - } -} - -// DecodeJpegDctMethod sets the optional dct_method attribute to value. -// -// value: string specifying a hint about the algorithm used for -// decompression. Defaults to "" which maps to a system-specific -// default. Currently valid values are ["INTEGER_FAST", -// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal -// jpeg library changes to a version that does not have that specific -// option.) -// If not specified, defaults to "" -func DecodeJpegDctMethod(value string) DecodeJpegAttr { - return func(m optionalAttr) { - m["dct_method"] = value - } -} - -// Decode a JPEG-encoded image to a uint8 tensor. -// -// The attr `channels` indicates the desired number of color channels for the -// decoded image. -// -// Accepted values are: -// -// * 0: Use the number of channels in the JPEG-encoded image. -// * 1: output a grayscale image. -// * 3: output an RGB image. -// -// If needed, the JPEG-encoded image is transformed to match the requested number -// of color channels. -// -// The attr `ratio` allows downscaling the image by an integer factor during -// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than -// downscaling the image later. -// -// -// This op also supports decoding PNGs and non-animated GIFs since the interface is -// the same, though it is cleaner to use `tf.io.decode_image`. -// -// Arguments: -// contents: 0-D. The JPEG-encoded image. -// -// Returns 3-D with shape `[height, width, channels]`.. -func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "DecodeJpeg", - Input: []tf.Input{ - contents, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // LoadTPUEmbeddingADAMParametersAttr is an optional argument to LoadTPUEmbeddingADAMParameters. type LoadTPUEmbeddingADAMParametersAttr func(optionalAttr) @@ -46398,6 +46147,165 @@ func LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug(scope *Sc return scope.AddOperation(opspec) } +// FakeQuantWithMinMaxArgsAttr is an optional argument to FakeQuantWithMinMaxArgs. +type FakeQuantWithMinMaxArgsAttr func(optionalAttr) + +// FakeQuantWithMinMaxArgsMin sets the optional min attribute to value. +// If not specified, defaults to -6 +func FakeQuantWithMinMaxArgsMin(value float32) FakeQuantWithMinMaxArgsAttr { + return func(m optionalAttr) { + m["min"] = value + } +} + +// FakeQuantWithMinMaxArgsMax sets the optional max attribute to value. +// If not specified, defaults to 6 +func FakeQuantWithMinMaxArgsMax(value float32) FakeQuantWithMinMaxArgsAttr { + return func(m optionalAttr) { + m["max"] = value + } +} + +// FakeQuantWithMinMaxArgsNumBits sets the optional num_bits attribute to value. +// If not specified, defaults to 8 +func FakeQuantWithMinMaxArgsNumBits(value int64) FakeQuantWithMinMaxArgsAttr { + return func(m optionalAttr) { + m["num_bits"] = value + } +} + +// FakeQuantWithMinMaxArgsNarrowRange sets the optional narrow_range attribute to value. +// If not specified, defaults to false +func FakeQuantWithMinMaxArgsNarrowRange(value bool) FakeQuantWithMinMaxArgsAttr { + return func(m optionalAttr) { + m["narrow_range"] = value + } +} + +// Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. +// +// Attributes +// +// * `[min; max]` define the clamping range for the `inputs` data. +// * `inputs` values are quantized into the quantization range ( +// `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` +// when it is true) and then de-quantized and output as floats in `[min; max]` +// interval. +// * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. +// +// Before quantization, `min` and `max` values are adjusted with the following +// logic. +// It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, +// the behavior can be unexpected: +// +// * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. +// * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. +// * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, +// `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. +// +// Quantization is called fake since the output is still in floating point. +func FakeQuantWithMinMaxArgs(scope *Scope, inputs tf.Output, optional ...FakeQuantWithMinMaxArgsAttr) (outputs tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "FakeQuantWithMinMaxArgs", + Input: []tf.Input{ + inputs, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Batch normalization. +// +// DEPRECATED at GraphDef version 9: Use tf.nn.batch_normalization() +// +// This op is deprecated. Prefer `tf.nn.batch_normalization`. +// +// Arguments: +// t: A 4D input Tensor. +// m: A 1D mean Tensor with size matching the last dimension of t. +// This is the first output from tf.nn.moments, +// or a saved moving average thereof. +// v: A 1D variance Tensor with size matching the last dimension of t. +// This is the second output from tf.nn.moments, +// or a saved moving average thereof. +// beta: A 1D beta Tensor with size matching the last dimension of t. +// An offset to be added to the normalized tensor. +// gamma: A 1D gamma Tensor with size matching the last dimension of t. +// If "scale_after_normalization" is true, this tensor will be multiplied +// with the normalized tensor. +// variance_epsilon: A small float number to avoid dividing by 0. +// scale_after_normalization: A bool indicating whether the resulted tensor +// needs to be multiplied with gamma. +func BatchNormWithGlobalNormalization(scope *Scope, t tf.Output, m tf.Output, v tf.Output, beta tf.Output, gamma tf.Output, variance_epsilon float32, scale_after_normalization bool) (result tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{"variance_epsilon": variance_epsilon, "scale_after_normalization": scale_after_normalization} + opspec := tf.OpSpec{ + Type: "BatchNormWithGlobalNormalization", + Input: []tf.Input{ + t, m, v, beta, gamma, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// EncodeBase64Attr is an optional argument to EncodeBase64. +type EncodeBase64Attr func(optionalAttr) + +// EncodeBase64Pad sets the optional pad attribute to value. +// +// value: Bool whether padding is applied at the ends. +// If not specified, defaults to false +func EncodeBase64Pad(value bool) EncodeBase64Attr { + return func(m optionalAttr) { + m["pad"] = value + } +} + +// Encode strings into web-safe base64 format. +// +// Refer to the following article for more information on base64 format: +// en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the +// end so that the encoded has length multiple of 4. See Padding section of the +// link above. +// +// Web-safe means that the encoder uses - and _ instead of + and /. +// +// Arguments: +// input: Strings to be encoded. +// +// Returns Input strings encoded in base64. +func EncodeBase64(scope *Scope, input tf.Output, optional ...EncodeBase64Attr) (output tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "EncodeBase64", + Input: []tf.Input{ + input, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // Creates a dataset that batches input elements into a SparseTensor. // // Arguments: @@ -46524,6 +46432,141 @@ func OutfeedEnqueue(scope *Scope, input tf.Output) (o *tf.Operation) { return scope.AddOperation(opspec) } +// DecodeJpegAttr is an optional argument to DecodeJpeg. +type DecodeJpegAttr func(optionalAttr) + +// DecodeJpegChannels sets the optional channels attribute to value. +// +// value: Number of color channels for the decoded image. +// If not specified, defaults to 0 +func DecodeJpegChannels(value int64) DecodeJpegAttr { + return func(m optionalAttr) { + m["channels"] = value + } +} + +// DecodeJpegRatio sets the optional ratio attribute to value. +// +// value: Downscaling ratio. +// If not specified, defaults to 1 +func DecodeJpegRatio(value int64) DecodeJpegAttr { + return func(m optionalAttr) { + m["ratio"] = value + } +} + +// DecodeJpegFancyUpscaling sets the optional fancy_upscaling attribute to value. +// +// value: If true use a slower but nicer upscaling of the +// chroma planes (yuv420/422 only). +// If not specified, defaults to true +func DecodeJpegFancyUpscaling(value bool) DecodeJpegAttr { + return func(m optionalAttr) { + m["fancy_upscaling"] = value + } +} + +// DecodeJpegTryRecoverTruncated sets the optional try_recover_truncated attribute to value. +// +// value: If true try to recover an image from truncated input. +// If not specified, defaults to false +func DecodeJpegTryRecoverTruncated(value bool) DecodeJpegAttr { + return func(m optionalAttr) { + m["try_recover_truncated"] = value + } +} + +// DecodeJpegAcceptableFraction sets the optional acceptable_fraction attribute to value. +// +// value: The minimum required fraction of lines before a truncated +// input is accepted. +// If not specified, defaults to 1 +func DecodeJpegAcceptableFraction(value float32) DecodeJpegAttr { + return func(m optionalAttr) { + m["acceptable_fraction"] = value + } +} + +// DecodeJpegDctMethod sets the optional dct_method attribute to value. +// +// value: string specifying a hint about the algorithm used for +// decompression. Defaults to "" which maps to a system-specific +// default. Currently valid values are ["INTEGER_FAST", +// "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal +// jpeg library changes to a version that does not have that specific +// option.) +// If not specified, defaults to "" +func DecodeJpegDctMethod(value string) DecodeJpegAttr { + return func(m optionalAttr) { + m["dct_method"] = value + } +} + +// Decode a JPEG-encoded image to a uint8 tensor. +// +// The attr `channels` indicates the desired number of color channels for the +// decoded image. +// +// Accepted values are: +// +// * 0: Use the number of channels in the JPEG-encoded image. +// * 1: output a grayscale image. +// * 3: output an RGB image. +// +// If needed, the JPEG-encoded image is transformed to match the requested number +// of color channels. +// +// The attr `ratio` allows downscaling the image by an integer factor during +// decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than +// downscaling the image later. +// +// +// This op also supports decoding PNGs and non-animated GIFs since the interface is +// the same, though it is cleaner to use `tf.io.decode_image`. +// +// Arguments: +// contents: 0-D. The JPEG-encoded image. +// +// Returns 3-D with shape `[height, width, channels]`.. +func DecodeJpeg(scope *Scope, contents tf.Output, optional ...DecodeJpegAttr) (image tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "DecodeJpeg", + Input: []tf.Input{ + contents, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + +// Returns the number of nonzeroes of `sparse_matrix`. +// +// Arguments: +// sparse_matrix: A CSRSparseMatrix. +// +// Returns The number of nonzeroes of `sparse_matrix`. +func SparseMatrixNNZ(scope *Scope, sparse_matrix tf.Output) (nnz tf.Output) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "SparseMatrixNNZ", + Input: []tf.Input{ + sparse_matrix, + }, + } + op := scope.AddOperation(opspec) + return op.Output(0) +} + // LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr is an optional argument to LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug. type LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugAttr func(optionalAttr) @@ -47233,149 +47276,6 @@ func LoadTPUEmbeddingRMSPropParameters(scope *Scope, parameters tf.Output, ms tf return scope.AddOperation(opspec) } -// Creates a Dataset that returns pseudorandom numbers. -// -// Creates a Dataset that returns a stream of uniformly distributed -// pseudorandom 64-bit signed integers. -// -// In the TensorFlow Python API, you can instantiate this dataset via the -// class `tf.data.experimental.RandomDataset`. -// -// Instances of this dataset are also created as a result of the -// `hoist_random_uniform` static optimization. Whether this optimization is -// performed is determined by the `experimental_optimization.hoist_random_uniform` -// option of `tf.data.Options`. -// -// Arguments: -// seed: A scalar seed for the random number generator. If either seed or -// seed2 is set to be non-zero, the random number generator is seeded -// by the given seed. Otherwise, a random seed is used. -// seed2: A second scalar seed to avoid seed collision. -// -// -func RandomDataset(scope *Scope, seed tf.Output, seed2 tf.Output, output_types []tf.DataType, output_shapes []tf.Shape) (handle tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"output_types": output_types, "output_shapes": output_shapes} - opspec := tf.OpSpec{ - Type: "RandomDataset", - Input: []tf.Input{ - seed, seed2, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - -// FractionalAvgPoolAttr is an optional argument to FractionalAvgPool. -type FractionalAvgPoolAttr func(optionalAttr) - -// FractionalAvgPoolPseudoRandom sets the optional pseudo_random attribute to value. -// -// value: When set to True, generates the pooling sequence in a -// pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin -// Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for -// difference between pseudorandom and random. -// If not specified, defaults to false -func FractionalAvgPoolPseudoRandom(value bool) FractionalAvgPoolAttr { - return func(m optionalAttr) { - m["pseudo_random"] = value - } -} - -// FractionalAvgPoolOverlapping sets the optional overlapping attribute to value. -// -// value: When set to True, it means when pooling, the values at the boundary -// of adjacent pooling cells are used by both cells. For example: -// -// `index 0 1 2 3 4` -// -// `value 20 5 16 3 7` -// -// If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. -// The result would be [41/3, 26/3] for fractional avg pooling. -// If not specified, defaults to false -func FractionalAvgPoolOverlapping(value bool) FractionalAvgPoolAttr { - return func(m optionalAttr) { - m["overlapping"] = value - } -} - -// FractionalAvgPoolDeterministic sets the optional deterministic attribute to value. -// -// value: When set to True, a fixed pooling region will be used when -// iterating over a FractionalAvgPool node in the computation graph. Mainly used -// in unit test to make FractionalAvgPool deterministic. -// If not specified, defaults to false -func FractionalAvgPoolDeterministic(value bool) FractionalAvgPoolAttr { - return func(m optionalAttr) { - m["deterministic"] = value - } -} - -// FractionalAvgPoolSeed sets the optional seed attribute to value. -// -// value: If either seed or seed2 are set to be non-zero, the random number -// generator is seeded by the given seed. Otherwise, it is seeded by a -// random seed. -// If not specified, defaults to 0 -func FractionalAvgPoolSeed(value int64) FractionalAvgPoolAttr { - return func(m optionalAttr) { - m["seed"] = value - } -} - -// FractionalAvgPoolSeed2 sets the optional seed2 attribute to value. -// -// value: An second seed to avoid seed collision. -// If not specified, defaults to 0 -func FractionalAvgPoolSeed2(value int64) FractionalAvgPoolAttr { - return func(m optionalAttr) { - m["seed2"] = value - } -} - -// Performs fractional average pooling on the input. -// -// Fractional average pooling is similar to Fractional max pooling in the pooling -// region generation step. The only difference is that after pooling regions are -// generated, a mean operation is performed instead of a max operation in each -// pooling region. -// -// Arguments: -// value: 4-D with shape `[batch, height, width, channels]`. -// pooling_ratio: Pooling ratio for each dimension of `value`, currently only -// supports row and col dimension and should be >= 1.0. For example, a valid -// pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements -// must be 1.0 because we don't allow pooling on batch and channels -// dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions -// respectively. -// -// Returns: -// output: output tensor after fractional avg pooling. -// row_pooling_sequence: row pooling sequence, needed to calculate gradient. -// col_pooling_sequence: column pooling sequence, needed to calculate gradient. -func FractionalAvgPool(scope *Scope, value tf.Output, pooling_ratio []float32, optional ...FractionalAvgPoolAttr) (output tf.Output, row_pooling_sequence tf.Output, col_pooling_sequence tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"pooling_ratio": pooling_ratio} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "FractionalAvgPool", - Input: []tf.Input{ - value, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0), op.Output(1), op.Output(2) -} - // StatefulUniformFullIntAttr is an optional argument to StatefulUniformFullInt. type StatefulUniformFullIntAttr func(optionalAttr) @@ -47966,49 +47866,6 @@ func LoadTPUEmbeddingFTRLParameters(scope *Scope, parameters tf.Output, accumula return scope.AddOperation(opspec) } -// Conv3DBackpropInputAttr is an optional argument to Conv3DBackpropInput. -type Conv3DBackpropInputAttr func(optionalAttr) - -// Conv3DBackpropInputDilations sets the optional dilations attribute to value. -// If not specified, defaults to -func Conv3DBackpropInputDilations(value []int64) Conv3DBackpropInputAttr { - return func(m optionalAttr) { - m["dilations"] = value - } -} - -// Computes the gradients of 3-D convolution with respect to the input. -// -// DEPRECATED at GraphDef version 10: Use Conv3DBackpropInputV2 -// -// Arguments: -// input: Shape `[batch, depth, rows, cols, in_channels]`. -// filter: Shape `[depth, rows, cols, in_channels, out_channels]`. -// `in_channels` must match between `input` and `filter`. -// out_backprop: Backprop signal of shape `[batch, out_depth, out_rows, out_cols, -// out_channels]`. -// strides: 1-D tensor of length 5. The stride of the sliding window for each -// dimension of `input`. Must have `strides[0] = strides[4] = 1`. -// padding: The type of padding algorithm to use. -func Conv3DBackpropInput(scope *Scope, input tf.Output, filter tf.Output, out_backprop tf.Output, strides []int64, padding string, optional ...Conv3DBackpropInputAttr) (output tf.Output) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{"strides": strides, "padding": padding} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "Conv3DBackpropInput", - Input: []tf.Input{ - input, filter, out_backprop, - }, - Attrs: attrs, - } - op := scope.AddOperation(opspec) - return op.Output(0) -} - // DepthwiseConv2dNativeAttr is an optional argument to DepthwiseConv2dNative. type DepthwiseConv2dNativeAttr func(optionalAttr) @@ -48963,44 +48820,6 @@ func AutoShardDataset(scope *Scope, input_dataset tf.Output, num_workers tf.Outp return op.Output(0) } -// InfeedEnqueuePrelinearizedBufferAttr is an optional argument to InfeedEnqueuePrelinearizedBuffer. -type InfeedEnqueuePrelinearizedBufferAttr func(optionalAttr) - -// InfeedEnqueuePrelinearizedBufferDeviceOrdinal sets the optional device_ordinal attribute to value. -// -// value: The TPU device to use. This should be -1 when the Op is running on a TPU device -// and = 0 when the Op is running on the CPU device. -// If not specified, defaults to -1 -func InfeedEnqueuePrelinearizedBufferDeviceOrdinal(value int64) InfeedEnqueuePrelinearizedBufferAttr { - return func(m optionalAttr) { - m["device_ordinal"] = value - } -} - -// An op which enqueues prelinearized buffer into TPU infeed. -// -// Arguments: -// input: A variant tensor representing linearized output. -// -// Returns the created operation. -func InfeedEnqueuePrelinearizedBuffer(scope *Scope, input tf.Output, optional ...InfeedEnqueuePrelinearizedBufferAttr) (o *tf.Operation) { - if scope.Err() != nil { - return - } - attrs := map[string]interface{}{} - for _, a := range optional { - a(attrs) - } - opspec := tf.OpSpec{ - Type: "InfeedEnqueuePrelinearizedBuffer", - Input: []tf.Input{ - input, - }, - Attrs: attrs, - } - return scope.AddOperation(opspec) -} - // RetrieveTPUEmbeddingFTRLParametersAttr is an optional argument to RetrieveTPUEmbeddingFTRLParameters. type RetrieveTPUEmbeddingFTRLParametersAttr func(optionalAttr) @@ -49126,6 +48945,90 @@ func ResourceApplyAdagradV2(scope *Scope, var_ tf.Output, accum tf.Output, lr tf return scope.AddOperation(opspec) } +// QuantizedMatMulWithBiasAndReluAttr is an optional argument to QuantizedMatMulWithBiasAndRelu. +type QuantizedMatMulWithBiasAndReluAttr func(optionalAttr) + +// QuantizedMatMulWithBiasAndReluToutput sets the optional Toutput attribute to value. +// If not specified, defaults to DT_QINT32 +func QuantizedMatMulWithBiasAndReluToutput(value tf.DataType) QuantizedMatMulWithBiasAndReluAttr { + return func(m optionalAttr) { + m["Toutput"] = value + } +} + +// QuantizedMatMulWithBiasAndReluTransposeA sets the optional transpose_a attribute to value. +// +// value: If true, `a` is transposed before multiplication. +// If not specified, defaults to false +func QuantizedMatMulWithBiasAndReluTransposeA(value bool) QuantizedMatMulWithBiasAndReluAttr { + return func(m optionalAttr) { + m["transpose_a"] = value + } +} + +// QuantizedMatMulWithBiasAndReluTransposeB sets the optional transpose_b attribute to value. +// +// value: If true, `b` is transposed before multiplication. +// If not specified, defaults to false +func QuantizedMatMulWithBiasAndReluTransposeB(value bool) QuantizedMatMulWithBiasAndReluAttr { + return func(m optionalAttr) { + m["transpose_b"] = value + } +} + +// QuantizedMatMulWithBiasAndReluInputQuantMode sets the optional input_quant_mode attribute to value. +// +// value: Input data quantization mode. Either MIN_FIRST(default) or SCALED. +// If not specified, defaults to "MIN_FIRST" +func QuantizedMatMulWithBiasAndReluInputQuantMode(value string) QuantizedMatMulWithBiasAndReluAttr { + return func(m optionalAttr) { + m["input_quant_mode"] = value + } +} + +// Perform a quantized matrix multiplication of `a` by the matrix `b` with bias +// add and relu fusion. +// +// The inputs must be two-dimensional matrices and 1D bias vector. And the inner +// dimension of `a` (after being transposed if `transpose_a` is non-zero) must +// match the outer dimension of `b` (after being transposed if `transposed_b` is +// non-zero). Then do broadcast add operation with bias values on the matrix +// multiplication result. The bias size must match inner dimension of `b`. Then do +// relu activation to get non-negative result. +// +// Arguments: +// a: A matrix to be multiplied. Must be a two-dimensional tensor of type `quint8`. +// b: A matrix to be multiplied and must be a two-dimensional tensor of type `qint8`. +// bias: A 1D bias tensor with size matching with inner dimension of `b` (after being +// transposed if `transposed_b` is non-zero). +// min_a: The float value that the lowest quantized `a` value represents. +// max_a: The float value that the highest quantized `a` value represents. +// min_b: The float value that the lowest quantized `b` value represents. +// max_b: The float value that the highest quantized `b` value represents. +// +// Returns: +// out +// min_out: The float value that the lowest quantized output value represents. +// max_out: The float value that the highest quantized output value represents. +func QuantizedMatMulWithBiasAndRelu(scope *Scope, a tf.Output, b tf.Output, bias tf.Output, min_a tf.Output, max_a tf.Output, min_b tf.Output, max_b tf.Output, optional ...QuantizedMatMulWithBiasAndReluAttr) (out tf.Output, min_out tf.Output, max_out tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "QuantizedMatMulWithBiasAndRelu", + Input: []tf.Input{ + a, b, bias, min_a, max_a, min_b, max_b, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0), op.Output(1), op.Output(2) +} + // Shuts down a running distributed TPU system. // // The op returns an error if no system is running. @@ -49734,3 +49637,60 @@ func TPUReplicatedOutput(scope *Scope, input tf.Output, num_replicas int64) (out } return outputs } + +// Removes keys and its associated values from a table. +// +// The tensor `keys` must of the same type as the keys of the table. Keys not +// already in the table are silently ignored. +// +// Arguments: +// table_handle: Handle to the table. +// keys: Any shape. Keys of the elements to remove. +// +// Returns the created operation. +func LookupTableRemoveV2(scope *Scope, table_handle tf.Output, keys tf.Output) (o *tf.Operation) { + if scope.Err() != nil { + return + } + opspec := tf.OpSpec{ + Type: "LookupTableRemoveV2", + Input: []tf.Input{ + table_handle, keys, + }, + } + return scope.AddOperation(opspec) +} + +// NotEqualAttr is an optional argument to NotEqual. +type NotEqualAttr func(optionalAttr) + +// NotEqualIncompatibleShapeError sets the optional incompatible_shape_error attribute to value. +// If not specified, defaults to true +func NotEqualIncompatibleShapeError(value bool) NotEqualAttr { + return func(m optionalAttr) { + m["incompatible_shape_error"] = value + } +} + +// Returns the truth value of (x != y) element-wise. +// +// *NOTE*: `NotEqual` supports broadcasting. More about broadcasting +// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) +func NotEqual(scope *Scope, x tf.Output, y tf.Output, optional ...NotEqualAttr) (z tf.Output) { + if scope.Err() != nil { + return + } + attrs := map[string]interface{}{} + for _, a := range optional { + a(attrs) + } + opspec := tf.OpSpec{ + Type: "NotEqual", + Input: []tf.Input{ + x, y, + }, + Attrs: attrs, + } + op := scope.AddOperation(opspec) + return op.Output(0) +}