Reduce ops v2 API changes

PiperOrigin-RevId: 222422608
This commit is contained in:
A. Unique TensorFlower 2018-11-21 09:52:26 -08:00 committed by TensorFlower Gardener
parent 4f92a46fa8
commit 1799b11347
33 changed files with 659 additions and 248 deletions

View File

@ -81,7 +81,7 @@ class ExpectationImportanceSampleTest(test.TestCase):
# Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x). # Compute E_p[X_1 * X_2 > 0], with X_i the ith component of X ~ p(x).
# Should equal 1/2 because p is a spherical Gaussian centered at (0, 0). # Should equal 1/2 because p is a spherical Gaussian centered at (0, 0).
def indicator(x): def indicator(x):
x1_times_x2 = math_ops.reduce_prod(x, reduction_indices=[-1]) x1_times_x2 = math_ops.reduce_prod(x, axis=[-1])
return 0.5 * (math_ops.sign(x1_times_x2) + 1.0) return 0.5 * (math_ops.sign(x1_times_x2) + 1.0)
prob = mc.expectation_importance_sampler( prob = mc.expectation_importance_sampler(

View File

@ -353,12 +353,12 @@ def expectation(f, samples, log_prob=None, use_reparametrization=True,
def _sample_mean(values): def _sample_mean(values):
"""Mean over sample indices. In this module this is always [0].""" """Mean over sample indices. In this module this is always [0]."""
return math_ops.reduce_mean(values, reduction_indices=[0]) return math_ops.reduce_mean(values, axis=[0])
def _sample_max(values): def _sample_max(values):
"""Max over sample indices. In this module this is always [0].""" """Max over sample indices. In this module this is always [0]."""
return math_ops.reduce_max(values, reduction_indices=[0]) return math_ops.reduce_max(values, axis=[0])
def _get_samples(dist, z, n, seed): def _get_samples(dist, z, n, seed):

View File

@ -119,8 +119,7 @@ def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):
labels = array_ops.expand_dims(labels, 1) labels = array_ops.expand_dims(labels, 1)
# Labels are indices of classes, convert them to one hot encodings. # Labels are indices of classes, convert them to one hot encodings.
target_one_hot = array_ops.one_hot(indices=labels, depth=num_classes) target_one_hot = array_ops.one_hot(indices=labels, depth=num_classes)
labels = math_ops.reduce_sum( labels = math_ops.reduce_sum(input_tensor=target_one_hot, axis=[1])
input_tensor=target_one_hot, reduction_indices=[1])
labels = math_ops.to_float(labels) labels = math_ops.to_float(labels)
# Calculate softmax probabilities for each class. # Calculate softmax probabilities for each class.

View File

@ -82,7 +82,7 @@ class NormalTest(test.TestCase):
x = constant_op.constant( x = constant_op.constant(
[[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], [2.5, -2.5, -4.0, 0.0, 1.0, -2.0]], [[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], [2.5, -2.5, -4.0, 0.0, 1.0, -2.0]],
dtype=dtypes.float32) dtype=dtypes.float32)
s = math_ops.reduce_sum(x, reduction_indices=[1]) s = math_ops.reduce_sum(x, axis=[1])
x = array_ops.transpose(x) # Reshape to shape (6, 2) x = array_ops.transpose(x) # Reshape to shape (6, 2)
n = constant_op.constant([6] * 2) n = constant_op.constant([6] * 2)
prior = distributions.Normal(loc=mu0, scale=sigma0) prior = distributions.Normal(loc=mu0, scale=sigma0)

View File

@ -147,14 +147,13 @@ class WishartCholeskyTest(test.TestCase):
x = chol_w.sample(10000, seed=42) x = chol_w.sample(10000, seed=42)
self.assertAllEqual((10000, 3, 3), x.get_shape()) self.assertAllEqual((10000, 3, 3), x.get_shape())
moment1_estimate = math_ops.reduce_mean(x, reduction_indices=[0]).eval() moment1_estimate = math_ops.reduce_mean(x, axis=[0]).eval()
self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05) self.assertAllClose(chol_w.mean().eval(), moment1_estimate, rtol=0.05)
# The Variance estimate uses the squares rather than outer-products # The Variance estimate uses the squares rather than outer-products
# because Wishart.Variance is the diagonal of the Wishart covariance # because Wishart.Variance is the diagonal of the Wishart covariance
# matrix. # matrix.
variance_estimate = (math_ops.reduce_mean( variance_estimate = (math_ops.reduce_mean(math_ops.square(x), axis=[0]) -
math_ops.square(x), reduction_indices=[0]) -
math_ops.square(moment1_estimate)).eval() math_ops.square(moment1_estimate)).eval()
self.assertAllClose( self.assertAllClose(
chol_w.variance().eval(), variance_estimate, rtol=0.05) chol_w.variance().eval(), variance_estimate, rtol=0.05)

View File

@ -168,7 +168,7 @@ class SoftmaxCentered(bijector.Bijector):
# log_normalization = 1 + reduce_sum(exp(logits)) # log_normalization = 1 + reduce_sum(exp(logits))
# -log_normalization + reduce_sum(logits - log_normalization) # -log_normalization + reduce_sum(logits - log_normalization)
log_normalization = nn_ops.softplus( log_normalization = nn_ops.softplus(
math_ops.reduce_logsumexp(x, axis=-1, keep_dims=True)) math_ops.reduce_logsumexp(x, axis=-1, keepdims=True))
return array_ops.squeeze( return array_ops.squeeze(
(-log_normalization + math_ops.reduce_sum( (-log_normalization + math_ops.reduce_sum(
x - log_normalization, axis=-1, keepdims=True)), axis=-1) x - log_normalization, axis=-1, keepdims=True)), axis=-1)

View File

@ -87,8 +87,8 @@ class TFETest(test_util.TensorFlowTestCase):
x += 1. x += 1.
# Without a device context, heuristics are used to place ops. # Without a device context, heuristics are used to place ops.
# In this case, ops.reduce_mean runs on the GPU. # In this case, ops.reduce_mean runs on the GPU.
reduction_indices = range(x.shape.ndims) axis = range(x.shape.ndims)
m = math_ops.reduce_mean(x, reduction_indices) m = math_ops.reduce_mean(x, axis)
# m is on GPU, bring it back to CPU and compare. # m is on GPU, bring it back to CPU and compare.
self.assertEqual(3.5, m.cpu().numpy()) self.assertEqual(3.5, m.cpu().numpy())

View File

@ -84,8 +84,7 @@ def bow_encoder(ids,
if isinstance(ids, sparse_tensor.SparseTensor): if isinstance(ids, sparse_tensor.SparseTensor):
raise TypeError('ids are expected to be dense Tensor, got: %s', ids) raise TypeError('ids are expected to be dense Tensor, got: %s', ids)
return math_ops.reduce_mean( return math_ops.reduce_mean(
embedding_ops.embedding_lookup(embeddings, ids), embedding_ops.embedding_lookup(embeddings, ids), axis=1)
reduction_indices=1)
def embed_sequence(ids, def embed_sequence(ids,

View File

@ -1015,8 +1015,7 @@ class _OneHotColumn(
dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0) dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0)
# Reduce to get a multi-hot per example. # Reduce to get a multi-hot per example.
return math_ops.reduce_sum( return math_ops.reduce_sum(one_hot_id_tensor, axis=[output_rank - 1])
one_hot_id_tensor, reduction_indices=[output_rank - 1])
@property @property
def _variable_shape(self): def _variable_shape(self):

View File

@ -3811,7 +3811,7 @@ class UnitNormTests(test.TestCase):
image = random_ops.random_uniform((height, width, 3)) image = random_ops.random_uniform((height, width, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6) output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt( norms = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(output), reduction_indices=dim)) math_ops.reduce_sum(math_ops.square(output), axis=dim))
shape = [height, width, 3] shape = [height, width, 3]
del shape[dim] del shape[dim]
@ -3847,7 +3847,7 @@ class UnitNormTests(test.TestCase):
image = array_ops.placeholder(dtypes.float32, (None, None, 3)) image = array_ops.placeholder(dtypes.float32, (None, None, 3))
output = _layers.unit_norm(image, dim=dim, epsilon=1e-6) output = _layers.unit_norm(image, dim=dim, epsilon=1e-6)
norms = math_ops.sqrt( norms = math_ops.sqrt(
math_ops.reduce_sum(math_ops.square(output), reduction_indices=dim)) math_ops.reduce_sum(math_ops.square(output), axis=dim))
with self.cached_session(): with self.cached_session():
actual = norms.eval({image: placeholder_value}) actual = norms.eval({image: placeholder_value})

View File

@ -668,7 +668,7 @@ class DynamicRNNEstimatorLearningTest(test.TestCase):
sequences = centers + noise sequences = centers + noise
inputs = array_ops.expand_dims(sequences, 2) inputs = array_ops.expand_dims(sequences, 2)
labels = math_ops.reduce_mean(sequences, reduction_indices=[1]) labels = math_ops.reduce_mean(sequences, axis=[1])
return {'inputs': inputs}, labels return {'inputs': inputs}, labels
return input_fn return input_fn
@ -722,8 +722,8 @@ class DynamicRNNEstimatorLearningTest(test.TestCase):
inputs = array_ops.expand_dims(math_ops.to_float(random_sequence), 2) inputs = array_ops.expand_dims(math_ops.to_float(random_sequence), 2)
labels = math_ops.to_int32( labels = math_ops.to_int32(
array_ops.squeeze( array_ops.squeeze(
math_ops.reduce_sum( math_ops.reduce_sum(inputs, axis=[1]) > (
inputs, reduction_indices=[1]) > (sequence_length / 2.0))) sequence_length / 2.0)))
return {'inputs': inputs}, labels return {'inputs': inputs}, labels
return input_fn return input_fn

View File

@ -59,9 +59,8 @@ def _scale_losses(losses, weights):
""" """
# First, compute the sum of the losses over all elements: # First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims) start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims)) axis = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum( reduced_losses = math_ops.reduce_sum(losses, axis=axis)
losses, reduction_indices=reduction_indices)
reduced_losses = math_ops.multiply(reduced_losses, weights) reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses) return math_ops.reduce_sum(reduced_losses)
@ -158,10 +157,9 @@ def _num_present(losses, weights, per_batch=False):
# First, count the number of nonzero weights: # First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1: if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims)) axis = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum( num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)), math_ops.to_float(math_ops.not_equal(weights, 0)), axis=axis)
reduction_indices=reduction_indices)
# Next, determine the number of elements that weights would broadcast to: # Next, determine the number of elements that weights would broadcast to:
broadcast_dims = array_ops.slice( broadcast_dims = array_ops.slice(
@ -577,16 +575,16 @@ def mean_pairwise_squared_error(predictions,
if weights.get_shape().ndims is None: if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None") raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims)) axis = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum( sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), reduction_indices=reduction_indices) math_ops.square(diffs), axis=axis)
num_present_per_batch = _num_present(diffs, weights, per_batch=True) num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * math_ops.div_no_nan( term1 = 2.0 * math_ops.div_no_nan(
sum_squares_diff_per_batch, num_present_per_batch, name="value") sum_squares_diff_per_batch, num_present_per_batch, name="value")
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices) sum_diff = math_ops.reduce_sum(diffs, axis=axis)
term2 = 2.0 * math_ops.div_no_nan( term2 = 2.0 * math_ops.div_no_nan(
math_ops.square(sum_diff), math_ops.square(sum_diff),
math_ops.square(num_present_per_batch), math_ops.square(num_present_per_batch),
@ -645,7 +643,7 @@ def cosine_distance(predictions,
radial_diffs = math_ops.multiply(predictions, labels) radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum( losses = 1 - math_ops.reduce_sum(
radial_diffs, reduction_indices=[ radial_diffs, axis=[
axis, axis,
]) ])
return compute_weighted_loss(losses, weights, scope=scope) return compute_weighted_loss(losses, weights, scope=scope)

View File

@ -3416,7 +3416,7 @@ def streaming_mean_cosine_distance(predictions,
predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels) radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum( radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[ radial_diffs, axis=[
dim, dim,
], keepdims=True) ], keepdims=True)
mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None, mean_distance, update_op = streaming_mean(radial_diffs, weights, None, None,

View File

@ -138,7 +138,7 @@ def LastValueQuantize(inputs,
if per_channel: if per_channel:
if input_dim >= 2: if input_dim >= 2:
batch_min = math_ops.reduce_min( batch_min = math_ops.reduce_min(
inputs, reduction_indices=reduce_dims, name='BatchMin') inputs, axis=reduce_dims, name='BatchMin')
else: else:
batch_min = inputs batch_min = inputs
else: else:
@ -147,7 +147,7 @@ def LastValueQuantize(inputs,
if per_channel: if per_channel:
if input_dim >= 2: if input_dim >= 2:
batch_max = math_ops.reduce_max( batch_max = math_ops.reduce_max(
inputs, reduction_indices=reduce_dims, name='BatchMax') inputs, axis=reduce_dims, name='BatchMax')
else: else:
batch_max = inputs batch_max = inputs
else: else:
@ -263,7 +263,7 @@ def MovingAvgQuantize(inputs,
if per_channel: if per_channel:
if input_dim >= 2: if input_dim >= 2:
batch_min = math_ops.reduce_min( batch_min = math_ops.reduce_min(
inputs, reduction_indices=reduce_dims, name='BatchMin') inputs, axis=reduce_dims, name='BatchMin')
else: else:
batch_min = inputs batch_min = inputs
else: else:
@ -272,7 +272,7 @@ def MovingAvgQuantize(inputs,
if per_channel: if per_channel:
if input_dim >= 2: if input_dim >= 2:
batch_max = math_ops.reduce_max( batch_max = math_ops.reduce_max(
inputs, reduction_indices=reduce_dims, name='BatchMax') inputs, axis=reduce_dims, name='BatchMax')
else: else:
batch_max = inputs batch_max = inputs
else: else:

View File

@ -74,7 +74,7 @@ class BackpropTest(test.TestCase):
tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1) tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2) tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3) tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1)) tf_g4 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1))
tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4 tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
tf_grad = gradients.gradients(tf_y, [tf_var])[0] tf_grad = gradients.gradients(tf_y, [tf_var])[0]

View File

@ -96,8 +96,8 @@ class CostAnalysisTest(test.TestCase):
b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1)) b_fc = variables.Variable(random_ops.truncated_normal([10], stddev=0.1))
y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc) y_conv = nn_ops.softmax(math_ops.matmul(h_conv_flat, w_fc) + b_fc)
cross_entropy = math_ops.reduce_mean(-math_ops.reduce_sum( cross_entropy = math_ops.reduce_mean(
label * math_ops.log(y_conv), reduction_indices=[1])) -math_ops.reduce_sum(label * math_ops.log(y_conv), axis=[1]))
_ = adam.AdamOptimizer(1e-4).minimize(cross_entropy) _ = adam.AdamOptimizer(1e-4).minimize(cross_entropy)
mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph()) mg = meta_graph.create_meta_graph_def(graph=ops.get_default_graph())

View File

@ -88,7 +88,7 @@ def logdet(matrix, name=None):
chol = gen_linalg_ops.cholesky(matrix) chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum( return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))), math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
reduction_indices=[-1]) axis=[-1])
@tf_export('linalg.adjoint') @tf_export('linalg.adjoint')

View File

@ -690,7 +690,7 @@ class LinearOperator(object):
" Requires conversion to a dense matrix and O(N^3) operations.") " Requires conversion to a dense matrix and O(N^3) operations.")
if self._can_use_cholesky(): if self._can_use_cholesky():
diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense())) diag = array_ops.matrix_diag_part(linalg_ops.cholesky(self.to_dense()))
return 2 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1]) return 2 * math_ops.reduce_sum(math_ops.log(diag), axis=[-1])
_, log_abs_det = linalg.slogdet(self.to_dense()) _, log_abs_det = linalg.slogdet(self.to_dense())
return log_abs_det return log_abs_det

View File

@ -418,15 +418,13 @@ class _BaseLinearOperatorCirculant(linear_operator.LinearOperator):
return math_ops.cast(y, self.dtype) return math_ops.cast(y, self.dtype)
def _determinant(self): def _determinant(self):
reduction_indices = [-(i + 1) for i in range(self.block_depth)] axis = [-(i + 1) for i in range(self.block_depth)]
det = math_ops.reduce_prod( det = math_ops.reduce_prod(self.spectrum, axis=axis)
self.spectrum, reduction_indices=reduction_indices)
return math_ops.cast(det, self.dtype) return math_ops.cast(det, self.dtype)
def _log_abs_determinant(self): def _log_abs_determinant(self):
reduction_indices = [-(i + 1) for i in range(self.block_depth)] axis = [-(i + 1) for i in range(self.block_depth)]
lad = math_ops.reduce_sum( lad = math_ops.reduce_sum(math_ops.log(self._abs_spectrum), axis=axis)
math_ops.log(self._abs_spectrum), reduction_indices=reduction_indices)
return math_ops.cast(lad, self.dtype) return math_ops.cast(lad, self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False): def _solve(self, rhs, adjoint=False, adjoint_arg=False):

View File

@ -228,11 +228,11 @@ class LinearOperatorDiag(linear_operator.LinearOperator):
return diag_mat * x return diag_mat * x
def _determinant(self): def _determinant(self):
return math_ops.reduce_prod(self._diag, reduction_indices=[-1]) return math_ops.reduce_prod(self._diag, axis=[-1])
def _log_abs_determinant(self): def _log_abs_determinant(self):
log_det = math_ops.reduce_sum( log_det = math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), reduction_indices=[-1]) math_ops.log(math_ops.abs(self._diag)), axis=[-1])
if self.dtype.is_complex: if self.dtype.is_complex:
log_det = math_ops.cast(log_det, dtype=self.dtype) log_det = math_ops.cast(log_det, dtype=self.dtype)
return log_det return log_det

View File

@ -391,7 +391,7 @@ class LinearOperatorLowRankUpdate(linear_operator.LinearOperator):
if self._use_cholesky: if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance) chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
log_abs_det_c = 2 * math_ops.reduce_sum( log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), reduction_indices=[-1]) math_ops.log(chol_cap_diag), axis=[-1])
else: else:
det_c = linalg_ops.matrix_determinant(self._capacitance) det_c = linalg_ops.matrix_determinant(self._capacitance)
log_abs_det_c = math_ops.log(math_ops.abs(det_c)) log_abs_det_c = math_ops.log(math_ops.abs(det_c))

View File

@ -195,11 +195,11 @@ class LinearOperatorLowerTriangular(linear_operator.LinearOperator):
self._tril, x, adjoint_a=adjoint, adjoint_b=adjoint_arg) self._tril, x, adjoint_a=adjoint, adjoint_b=adjoint_arg)
def _determinant(self): def _determinant(self):
return math_ops.reduce_prod(self._diag, reduction_indices=[-1]) return math_ops.reduce_prod(self._diag, axis=[-1])
def _log_abs_determinant(self): def _log_abs_determinant(self):
return math_ops.reduce_sum( return math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), reduction_indices=[-1]) math_ops.log(math_ops.abs(self._diag)), axis=[-1])
def _solve(self, rhs, adjoint=False, adjoint_arg=False): def _solve(self, rhs, adjoint=False, adjoint_arg=False):
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs rhs = linalg.adjoint(rhs) if adjoint_arg else rhs

View File

@ -583,12 +583,10 @@ def mean_pairwise_squared_error(
diffs = math_ops.subtract(predictions, labels) diffs = math_ops.subtract(predictions, labels)
reduction_indices = math_ops.range(1, array_ops.rank(diffs)) axis = math_ops.range(1, array_ops.rank(diffs))
sum_squares_diff_per_batch = math_ops.reduce_sum( sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), math_ops.square(diffs), axis=axis, keepdims=True)
reduction_indices=reduction_indices,
keepdims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True) num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * math_ops.div_no_nan( term1 = 2.0 * math_ops.div_no_nan(
@ -596,8 +594,7 @@ def mean_pairwise_squared_error(
math_ops.maximum(num_present_per_batch - 1, 0), math_ops.maximum(num_present_per_batch - 1, 0),
name="value") name="value")
sum_diff = math_ops.reduce_sum( sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True)
diffs, reduction_indices=reduction_indices, keepdims=True)
term2 = 2.0 * math_ops.div_no_nan( term2 = 2.0 * math_ops.div_no_nan(
math_ops.square(sum_diff), math_ops.square(sum_diff),
math_ops.maximum( math_ops.maximum(

View File

@ -1314,7 +1314,7 @@ def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disa
# Reduction operations # Reduction operations
def _ReductionDims(x, axis, reduction_indices): def _ReductionDims(x, axis, reduction_indices=None): # pylint: disable=invalid-name
"""Returns range(0, rank(x)) if reduction_indices is None.""" """Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation # TODO(aselle): Remove this after deprecation
if reduction_indices is not None: if reduction_indices is not None:
@ -1337,18 +1337,18 @@ def _ReductionDims(x, axis, reduction_indices):
return range(0, array_ops.rank(x)) return range(0, array_ops.rank(x))
def _may_reduce_to_scalar(keepdims, axis, reduction_indices, output): def _may_reduce_to_scalar(keepdims, axis, output):
"""Set a reduction's output shape to be a scalar if we are certain.""" """Set a reduction's output shape to be a scalar if we are certain."""
if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and ( if not common_shapes.has_fully_defined_shape(output) and (not keepdims) and (
axis is None) and (reduction_indices is None): axis is None):
output.set_shape(()) output.set_shape(())
return output return output
@tf_export("math.reduce_sum", "reduce_sum") @tf_export(v1=["math.reduce_sum", "reduce_sum"])
@deprecation.deprecated_args( @deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims") None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_sum(input_tensor, def reduce_sum_v1(input_tensor,
axis=None, axis=None,
keepdims=None, keepdims=None,
name=None, name=None,
@ -1393,17 +1393,57 @@ def reduce_sum(input_tensor,
int64 while tensorflow returns the same dtype as the input. int64 while tensorflow returns the same dtype as the input.
@end_compatibility @end_compatibility
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
if keepdims is None: return reduce_sum(input_tensor, axis, keepdims, name)
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
@tf_export("math.reduce_sum", "reduce_sum", v1=[])
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x) # 6
tf.reduce_sum(x, 0) # [2, 2, 2]
tf.reduce_sum(x, 1) # [3, 3]
tf.reduce_sum(x, 1, keepdims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._sum( gen_math_ops._sum(
input_tensor, input_tensor, _ReductionDims(input_tensor, axis), keepdims,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name)) name=name))
@ -1544,10 +1584,8 @@ def count_nonzero_v2(input, # pylint: disable=redefined-builtin
dtype=dtype) dtype=dtype)
@tf_export("math.reduce_mean", "reduce_mean") @tf_export(v1=["math.reduce_mean", "reduce_mean"])
@deprecation.deprecated_args( def reduce_mean_v1(input_tensor,
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_mean(input_tensor,
axis=None, axis=None,
keepdims=None, keepdims=None,
name=None, name=None,
@ -1602,22 +1640,72 @@ def reduce_mean(input_tensor,
@end_compatibility @end_compatibility
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
return reduce_mean(input_tensor, axis, keepdims, name)
if keepdims is None:
keepdims = False @tf_export("math.reduce_mean", "reduce_mean", v1=[])
return _may_reduce_to_scalar(keepdims, axis, reduction_indices, def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 1.], [2., 2.]])
tf.reduce_mean(x) # 1.5
tf.reduce_mean(x, 0) # [1.5, 1.5]
tf.reduce_mean(x, 1) # [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
```python
x = tf.constant([1, 0, 1, 0])
tf.reduce_mean(x) # 0
y = tf.constant([1., 0., 1., 0.])
tf.reduce_mean(y) # 0.5
```
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.mean( gen_math_ops.mean(
input_tensor, input_tensor, _ReductionDims(input_tensor, axis), keepdims,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name)) name=name))
@tf_export("math.reduce_variance") @tf_export("math.reduce_variance")
def reduce_variance(input_tensor, axis=None, keepdims=None, name=None): def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the variance of elements across dimensions of a tensor. """Computes the variance of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`. Reduces `input_tensor` along the dimensions given in `axis`.
@ -1665,7 +1753,7 @@ def reduce_variance(input_tensor, axis=None, keepdims=None, name=None):
@tf_export("math.reduce_std") @tf_export("math.reduce_std")
def reduce_std(input_tensor, axis=None, keepdims=None, name=None): def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor. """Computes the standard deviation of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`. Reduces `input_tensor` along the dimensions given in `axis`.
@ -1710,15 +1798,8 @@ def reduce_std(input_tensor, axis=None, keepdims=None, name=None):
return sqrt(variance) return sqrt(variance)
@tf_export("math.reduce_prod", "reduce_prod") @tf_export("math.reduce_prod", "reduce_prod", v1=[])
@deprecation.deprecated_args( def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_prod(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the product of elements across dimensions of a tensor. """Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`. Reduces `input_tensor` along the dimensions given in `axis`.
@ -1736,6 +1817,48 @@ def reduce_prod(input_tensor,
`[-rank(input_tensor), rank(input_tensor))`. `[-rank(input_tensor), rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1. keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional). name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.prod(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_prod", "reduce_prod"])
@deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_prod_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis. reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`. keep_dims: Deprecated alias for `keepdims`.
@ -1746,24 +1869,17 @@ def reduce_prod(input_tensor,
Equivalent to np.prod Equivalent to np.prod
@end_compatibility @end_compatibility
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
return reduce_prod(input_tensor, axis, keepdims, name)
if keepdims is None:
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
gen_math_ops.prod(
input_tensor,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name))
@tf_export("math.reduce_min", "reduce_min") @tf_export(v1=["math.reduce_min", "reduce_min"])
@deprecation.deprecated_args( @deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims") None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_min(input_tensor, def reduce_min_v1(input_tensor,
axis=None, axis=None,
keepdims=None, keepdims=None,
name=None, name=None,
@ -1781,9 +1897,9 @@ def reduce_min(input_tensor,
Args: Args:
input_tensor: The tensor to reduce. Should have real numeric type. input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), axis: The dimensions to reduce. If `None` (the default), reduces all
reduces all dimensions. Must be in the range dimensions. Must be in the range `[-rank(input_tensor),
`[-rank(input_tensor), rank(input_tensor))`. rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1. keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional). name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis. reduction_indices: The old (deprecated) name for axis.
@ -1796,23 +1912,52 @@ def reduce_min(input_tensor,
Equivalent to np.min Equivalent to np.min
@end_compatibility @end_compatibility
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
if keepdims is None: return reduce_min(input_tensor, axis, keepdims, name)
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
@tf_export("math.reduce_min", "reduce_min", v1=[])
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._min( gen_math_ops._min(
input_tensor, input_tensor, _ReductionDims(input_tensor, axis), keepdims,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name)) name=name))
@tf_export("math.reduce_max", "reduce_max") @tf_export(v1=["math.reduce_max", "reduce_max"])
@deprecation.deprecated_args( @deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims") None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_max(input_tensor, def reduce_max_v1(input_tensor,
axis=None, axis=None,
keepdims=None, keepdims=None,
name=None, name=None,
@ -1845,23 +1990,52 @@ def reduce_max(input_tensor,
Equivalent to np.max Equivalent to np.max
@end_compatibility @end_compatibility
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
if keepdims is None: return reduce_max(input_tensor, axis, keepdims, name)
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
@tf_export("math.reduce_max", "reduce_max", v1=[])
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._max( gen_math_ops._max(
input_tensor, input_tensor, _ReductionDims(input_tensor, axis), keepdims,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name)) name=name))
@tf_export("math.reduce_all", "reduce_all") @tf_export(v1=["math.reduce_all", "reduce_all"])
@deprecation.deprecated_args( @deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims") None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_all(input_tensor, def reduce_all_v1(input_tensor,
axis=None, axis=None,
keepdims=None, keepdims=None,
name=None, name=None,
@ -1888,9 +2062,9 @@ def reduce_all(input_tensor,
Args: Args:
input_tensor: The boolean tensor to reduce. input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), axis: The dimensions to reduce. If `None` (the default), reduces all
reduces all dimensions. Must be in the range dimensions. Must be in the range `[-rank(input_tensor),
`[-rank(input_tensor), rank(input_tensor))`. rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1. keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional). name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis. reduction_indices: The old (deprecated) name for axis.
@ -1903,23 +2077,61 @@ def reduce_all(input_tensor,
Equivalent to np.all Equivalent to np.all
@end_compatibility @end_compatibility
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
if keepdims is None: return reduce_all(input_tensor, axis, keepdims, name)
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
@tf_export("reduce_all", "math.reduce_all", v1=[])
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_all(x) # False
tf.reduce_all(x, 0) # [False, False]
tf.reduce_all(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._all( gen_math_ops._all(
input_tensor, input_tensor, _ReductionDims(input_tensor, axis), keepdims,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name)) name=name))
@tf_export("math.reduce_any", "reduce_any") @tf_export(v1=["math.reduce_any", "reduce_any"])
@deprecation.deprecated_args( @deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims") None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_any(input_tensor, def reduce_any_v1(input_tensor,
axis=None, axis=None,
keepdims=None, keepdims=None,
name=None, name=None,
@ -1946,9 +2158,9 @@ def reduce_any(input_tensor,
Args: Args:
input_tensor: The boolean tensor to reduce. input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), axis: The dimensions to reduce. If `None` (the default), reduces all
reduces all dimensions. Must be in the range dimensions. Must be in the range `[-rank(input_tensor),
`[-rank(input_tensor), rank(input_tensor))`. rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1. keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional). name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis. reduction_indices: The old (deprecated) name for axis.
@ -1961,23 +2173,61 @@ def reduce_any(input_tensor,
Equivalent to np.any Equivalent to np.any
@end_compatibility @end_compatibility
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
if keepdims is None: return reduce_any(input_tensor, axis, keepdims, name)
keepdims = False
return _may_reduce_to_scalar(keepdims, axis, reduction_indices,
@tf_export("math.reduce_any", "reduce_any", v1=[])
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_any(x) # True
tf.reduce_any(x, 0) # [True, True]
tf.reduce_any(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._any( gen_math_ops._any(
input_tensor, input_tensor, _ReductionDims(input_tensor, axis), keepdims,
_ReductionDims(input_tensor, axis,
reduction_indices),
keepdims,
name=name)) name=name))
@tf_export("math.reduce_logsumexp", "reduce_logsumexp") @tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
@deprecation.deprecated_args( @deprecation.deprecated_args(
None, "keep_dims is deprecated, use keepdims instead", "keep_dims") None, "keep_dims is deprecated, use keepdims instead", "keep_dims")
def reduce_logsumexp(input_tensor, def reduce_logsumexp_v1(input_tensor,
axis=None, axis=None,
keepdims=None, keepdims=None,
name=None, name=None,
@ -2010,9 +2260,9 @@ def reduce_logsumexp(input_tensor,
Args: Args:
input_tensor: The tensor to reduce. Should have numeric type. input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), axis: The dimensions to reduce. If `None` (the default), reduces all
reduces all dimensions. Must be in the range dimensions. Must be in the range `[-rank(input_tensor),
`[-rank(input_tensor), rank(input_tensor))`. rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1. keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional). name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis. reduction_indices: The old (deprecated) name for axis.
@ -2021,16 +2271,57 @@ def reduce_logsumexp(input_tensor,
Returns: Returns:
The reduced tensor. The reduced tensor.
""" """
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "reduction_indices", reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims, keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims) "keep_dims", keep_dims)
if keepdims is None: return reduce_logsumexp(input_tensor, axis, keepdims, name)
keepdims = False
@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
keepdims = False if keepdims is None else keepdims
input_tensor = ops.convert_to_tensor(input_tensor) input_tensor = ops.convert_to_tensor(input_tensor)
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name: with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
raw_max = reduce_max( raw_max = reduce_max(
input_tensor, input_tensor,
axis=axis, axis=axis,
reduction_indices=reduction_indices,
keepdims=True) keepdims=True)
my_max = array_ops.stop_gradient( my_max = array_ops.stop_gradient(
array_ops.where( array_ops.where(
@ -2040,12 +2331,11 @@ def reduce_logsumexp(input_tensor,
reduce_sum( reduce_sum(
gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)), gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
axis, axis,
keepdims=keepdims, keepdims=keepdims))
reduction_indices=reduction_indices))
if not keepdims: if not keepdims:
my_max = array_ops.reshape(my_max, array_ops.shape(result)) my_max = array_ops.reshape(my_max, array_ops.shape(result))
result = gen_math_ops.add(result, my_max) result = gen_math_ops.add(result, my_max)
return _may_reduce_to_scalar(keepdims, axis, reduction_indices, result) return _may_reduce_to_scalar(keepdims, axis, result)
@tf_export("linalg.trace", v1=["linalg.trace", "trace"]) @tf_export("linalg.trace", v1=["linalg.trace", "trace"])

View File

@ -104,7 +104,7 @@ class LogSumExpTest(test_util.TensorFlowTestCase):
for dtype in [np.float16, np.float32, np.double]: for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype) x_np = np.random.rand(5, 5).astype(dtype)
with self.cached_session(use_gpu=True): with self.cached_session(use_gpu=True):
y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=[0]) y_tf = math_ops.reduce_logsumexp(x_np, axis=[0])
y_np = log(np.sum(exp(x_np), axis=0)) y_np = log(np.sum(exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf) self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf) y_tf_np = self.evaluate(y_tf)
@ -114,7 +114,7 @@ class LogSumExpTest(test_util.TensorFlowTestCase):
for dtype in [np.float16, np.float32, np.double]: for dtype in [np.float16, np.float32, np.double]:
x_np = np.random.rand(5, 5).astype(dtype) x_np = np.random.rand(5, 5).astype(dtype)
with self.cached_session(use_gpu=True): with self.cached_session(use_gpu=True):
y_tf = math_ops.reduce_logsumexp(x_np, reduction_indices=0) y_tf = math_ops.reduce_logsumexp(x_np, axis=0)
y_np = log(np.sum(exp(x_np), axis=0)) y_np = log(np.sum(exp(x_np), axis=0))
self.assertShapeEqual(y_np, y_tf) self.assertShapeEqual(y_np, y_tf)
y_tf_np = self.evaluate(y_tf) y_tf_np = self.evaluate(y_tf)

View File

@ -948,7 +948,7 @@ def mean_cosine_distance(labels,
predictions=predictions, labels=labels, weights=weights) predictions=predictions, labels=labels, weights=weights)
radial_diffs = math_ops.multiply(predictions, labels) radial_diffs = math_ops.multiply(predictions, labels)
radial_diffs = math_ops.reduce_sum( radial_diffs = math_ops.reduce_sum(
radial_diffs, reduction_indices=[ radial_diffs, axis=[
dim, dim,
], keepdims=True) ], keepdims=True)
mean_distance, update_op = mean(radial_diffs, weights, None, None, name or mean_distance, update_op = mean(radial_diffs, weights, None, None, name or
@ -3045,7 +3045,7 @@ def _sparse_average_precision_at_top_k(labels, predictions_idx):
# Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor. # Reduce along k dimension to get the sum, yielding a [D1, ... DN] tensor.
precision_sum = math_ops.reduce_sum( precision_sum = math_ops.reduce_sum(
relevant_precision_per_k, reduction_indices=(-1,), name='precision_sum') relevant_precision_per_k, axis=(-1,), name='precision_sum')
# Divide by number of relevant items to get average precision. These are # Divide by number of relevant items to get average precision. These are
# the "num_relevant_items" and "AveP" terms from the formula above. # the "num_relevant_items" and "AveP" terms from the formula above.

View File

@ -1324,13 +1324,12 @@ class ControlFlowTest(PForTest):
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4) pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually # Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here. # construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, reduction_indices=[0]) real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, reduction_indices=[1]) real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the # Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad. # output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0] real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum( sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
pfor_out_grad, reduction_indices=[0])
with session.Session() as sess: with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run( v1, v2, v1_grad, v2_grad = sess.run(

View File

@ -195,7 +195,7 @@ def _SparseTensorDenseMatMulGrad(op, grad):
parts_a = array_ops.gather(grad, rows if not adj_a else cols) parts_a = array_ops.gather(grad, rows if not adj_a else cols)
parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b), parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b),
cols if not adj_a else rows) cols if not adj_a else rows)
a_values_grad = math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1) a_values_grad = math_ops.reduce_sum(parts_a * parts_b, axis=1)
# gradients w.r.t. (a_indices, a_values, a_shape, b) # gradients w.r.t. (a_indices, a_values, a_shape, b)
return (None, a_values_grad, None, b_grad) return (None, a_values_grad, None, b_grad)

View File

@ -70,8 +70,7 @@ def lbeta(x, name=None):
x = ops.convert_to_tensor(x, name='x') x = ops.convert_to_tensor(x, name='x')
# Note reduce_sum([]) = 0. # Note reduce_sum([]) = 0.
log_prod_gamma_x = math_ops.reduce_sum( log_prod_gamma_x = math_ops.reduce_sum(math_ops.lgamma(x), axis=[-1])
math_ops.lgamma(x), reduction_indices=[-1])
# Note lgamma(0) = infinity, so if x = [] # Note lgamma(0) = infinity, so if x = []
# log_gamma_sum_x = lgamma(0) = infinity, and # log_gamma_sum_x = lgamma(0) = infinity, and
@ -264,11 +263,11 @@ def einsum(equation, *inputs, **kwargs):
missing_indices = set(temp_axis_labels) - set(output_axis_labels) missing_indices = set(temp_axis_labels) - set(output_axis_labels)
if missing_indices: if missing_indices:
reduction_indices = [ axis = [
i for i, a in enumerate(temp_axis_labels) i for i, a in enumerate(temp_axis_labels)
if a not in output_axis_labels if a not in output_axis_labels
] ]
temp = math_ops.reduce_sum(temp, reduction_indices=reduction_indices) temp = math_ops.reduce_sum(temp, axis=axis)
temp_axis_labels = ''.join( temp_axis_labels = ''.join(
a for a in temp_axis_labels if a in output_axis_labels) a for a in temp_axis_labels if a in output_axis_labels)

View File

@ -318,7 +318,7 @@ tf_module {
} }
member_method { member_method {
name: "reduce_std" name: "reduce_std"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_sum" name: "reduce_sum"
@ -326,7 +326,7 @@ tf_module {
} }
member_method { member_method {
name: "reduce_variance" name: "reduce_variance"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "rint" name: "rint"

View File

@ -290,43 +290,43 @@ tf_module {
} }
member_method { member_method {
name: "reduce_all" name: "reduce_all"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_any" name: "reduce_any"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_logsumexp" name: "reduce_logsumexp"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_max" name: "reduce_max"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_mean" name: "reduce_mean"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_min" name: "reduce_min"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_prod" name: "reduce_prod"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_std" name: "reduce_std"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_sum" name: "reduce_sum"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_variance" name: "reduce_variance"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "rint" name: "rint"

View File

@ -898,35 +898,35 @@ tf_module {
} }
member_method { member_method {
name: "reduce_all" name: "reduce_all"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_any" name: "reduce_any"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_logsumexp" name: "reduce_logsumexp"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_max" name: "reduce_max"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_mean" name: "reduce_mean"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_min" name: "reduce_min"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_prod" name: "reduce_prod"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "reduce_sum" name: "reduce_sum"
argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\', \'reduction_indices\', \'keep_dims\'], varargs=None, keywords=None, defaults=[\'None\', \'None\', \'None\', \'None\', \'None\'], " argspec: "args=[\'input_tensor\', \'axis\', \'keepdims\', \'name\'], varargs=None, keywords=None, defaults=[\'None\', \'False\', \'None\'], "
} }
member_method { member_method {
name: "register_tensor_conversion_function" name: "register_tensor_conversion_function"

View File

@ -73,6 +73,7 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"t": "x", "t": "x",
"msg": "message", "msg": "message",
}, },
"tf.sparse.add": ["a", "b", "thresh"],
"tf.sparse.split": { "tf.sparse.split": {
"split_dim": "axis", "split_dim": "axis",
}, },
@ -113,6 +114,73 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"tf.random.stateless_multinomial": { "tf.random.stateless_multinomial": {
"output_dtype": "dtype", "output_dtype": "dtype",
}, },
"tf.sparse.concat": [
"axis", "sp_inputs", "name", "expand_nonconcat_dim", "concat_dim"
],
"tf.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_all": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_any": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_min": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_max": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_sum": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_mean": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_prod": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
"tf.math.reduce_logsumexp": {
"reduction_indices": "axis",
"keep_dims": "keepdims",
},
} }
# Mapping from function to the new name of the function # Mapping from function to the new name of the function
@ -199,7 +267,8 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"tf.convert_to_tensor": ["value", "dtype", "name", "preferred_dtype"], "tf.convert_to_tensor": ["value", "dtype", "name", "preferred_dtype"],
"tf.nn.convolution": [ "tf.nn.convolution": [
"input", "filter", "padding", "strides", "dilation_rate", "name", "input", "filter", "padding", "strides", "dilation_rate", "name",
"data_format"], "data_format"
],
"tf.nn.crelu": ["features", "name", "axis"], "tf.nn.crelu": ["features", "name", "axis"],
"tf.nn.pool": [ "tf.nn.pool": [
"input", "window_shape", "pooling_type", "padding", "dilation_rate", "input", "window_shape", "pooling_type", "padding", "dilation_rate",
@ -218,19 +287,19 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
], ],
"tf.pad": ["tensor", "paddings", "mode", "name", "constant_values"], "tf.pad": ["tensor", "paddings", "mode", "name", "constant_values"],
"tf.quantize_v2": [ "tf.quantize_v2": [
"input", "min_range", "max_range", "T", "mode", "name", "input", "min_range", "max_range", "T", "mode", "name", "round_mode"
"round_mode"
], ],
"tf.feature_column.categorical_column_with_vocabulary_file": [ "tf.feature_column.categorical_column_with_vocabulary_file": [
"key", "vocabulary_file", "vocabulary_size", "key", "vocabulary_file", "vocabulary_size", "num_oov_buckets",
"num_oov_buckets", "default_value", "dtype" "default_value", "dtype"
], ],
"tf.shape": ["input", "name", "out_type"], "tf.shape": ["input", "name", "out_type"],
"tf.size": ["input", "name", "out_type"], "tf.size": ["input", "name", "out_type"],
"tf.random.poisson": ["lam", "shape", "dtype", "seed", "name"],
"tf.sparse.add": ["a", "b", "thresh"],
"tf.sparse.concat": [ "tf.sparse.concat": [
"axis", "sp_inputs", "name", "expand_nonconcat_dim", "concat_dim" "axis", "sp_inputs", "name", "expand_nonconcat_dim", "concat_dim"
], ],
"tf.random.poisson": ["lam", "shape", "dtype", "seed", "name"],
"tf.sparse.segment_mean": [ "tf.sparse.segment_mean": [
"data", "indices", "segment_ids", "name", "num_segments" "data", "indices", "segment_ids", "name", "num_segments"
], ],
@ -243,10 +312,75 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"tf.strings.length": ["input", "name", "unit"], "tf.strings.length": ["input", "name", "unit"],
"tf.transpose": ["a", "perm", "name", "conjugate"], "tf.transpose": ["a", "perm", "name", "conjugate"],
"tf.tuple": ["tensors", "name", "control_inputs"], "tf.tuple": ["tensors", "name", "control_inputs"],
"tf.while_loop": ["cond", "body", "loop_vars", "shape_invariants", "tf.while_loop": [
"parallel_iterations", "back_prop", "swap_memory", "cond", "body", "loop_vars", "shape_invariants",
"name", "maximum_iterations", "parallel_iterations", "back_prop", "swap_memory", "name",
"return_same_structure"], "maximum_iterations", "return_same_structure"
],
"tf.reduce_all": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_all": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.reduce_any": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_any": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.reduce_min": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_min": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.reduce_max": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_max": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.reduce_sum": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_sum": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.reduce_mean": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_mean": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.reduce_prod": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_prod": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.reduce_logsumexp": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
"tf.math.reduce_logsumexp": [
"input_tensor", "axis", "keepdims", "name", "reduction_indices",
"keep_dims"
],
} }
# Specially handled functions. # Specially handled functions.