Fix extraneous depecation message caused by setdiff1d
This PR tries to address the issue raised in 42909 where extraneous deprecation messages showed up when with the following: ``` import tensorflow as tf x = tf.ones(5) with tf.GradientTape() as g: g.watch(x) y = tf.math.reduce_prod(x) grad = g.gradient(y, x) WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:300: setdiff1d (from tensorflow.python.ops.array_ops) is deprecated and will be removed after 2018-11-30. Instructions for updating: This op will be removed after the deprecation date. Please switch to tf.sets.difference(). ``` The deprecation is misleading to users. This PR switch to use internal `gen_array_ops.list_diff` so that deprecation messages will not be triggered. Several other places using setdiff1d have also been fixed. This PR fixes 42909. Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
parent
d74cd4a2a0
commit
734ac4e9ab
@ -24,6 +24,7 @@ from tensorflow.python.framework import dtypes
|
|||||||
from tensorflow.python.framework import ops
|
from tensorflow.python.framework import ops
|
||||||
from tensorflow.python.ops import array_ops
|
from tensorflow.python.ops import array_ops
|
||||||
from tensorflow.python.ops import control_flow_ops
|
from tensorflow.python.ops import control_flow_ops
|
||||||
|
from tensorflow.python.ops import gen_array_ops
|
||||||
from tensorflow.python.ops import gen_linalg_ops
|
from tensorflow.python.ops import gen_linalg_ops
|
||||||
from tensorflow.python.ops import linalg_ops_impl
|
from tensorflow.python.ops import linalg_ops_impl
|
||||||
from tensorflow.python.ops import map_fn
|
from tensorflow.python.ops import map_fn
|
||||||
@ -732,7 +733,8 @@ def norm(tensor,
|
|||||||
ops.convert_to_tensor(axis))
|
ops.convert_to_tensor(axis))
|
||||||
axes = math_ops.range(rank)
|
axes = math_ops.range(rank)
|
||||||
perm_before = array_ops.concat(
|
perm_before = array_ops.concat(
|
||||||
[array_ops.setdiff1d(axes, positive_axis)[0], positive_axis],
|
[gen_array_ops.list_diff(axes, positive_axis, dtypes.int32)[0],
|
||||||
|
positive_axis],
|
||||||
axis=0)
|
axis=0)
|
||||||
perm_after = map_fn.map_fn(
|
perm_after = map_fn.map_fn(
|
||||||
lambda i: math_ops.cast(
|
lambda i: math_ops.cast(
|
||||||
|
@ -297,7 +297,7 @@ def _ProdGrad(op, grad):
|
|||||||
reduction_indices = (reduction_indices + rank) % rank
|
reduction_indices = (reduction_indices + rank) % rank
|
||||||
reduced = math_ops.cast(reduction_indices, dtypes.int32)
|
reduced = math_ops.cast(reduction_indices, dtypes.int32)
|
||||||
idx = math_ops.range(0, rank)
|
idx = math_ops.range(0, rank)
|
||||||
other, _ = array_ops.setdiff1d(idx, reduced)
|
other, _ = gen_array_ops.list_diff(idx, reduced, dtypes.int32)
|
||||||
perm = array_ops.concat([reduced, other], 0)
|
perm = array_ops.concat([reduced, other], 0)
|
||||||
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
|
reduced_num = math_ops.reduce_prod(array_ops.gather(input_shape, reduced))
|
||||||
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
|
other_num = math_ops.reduce_prod(array_ops.gather(input_shape, other))
|
||||||
|
@ -4514,7 +4514,7 @@ def tensordot(a, b, axes, name=None):
|
|||||||
rank_a = array_ops.rank(a)
|
rank_a = array_ops.rank(a)
|
||||||
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
|
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
|
||||||
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
|
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
|
||||||
free, _ = array_ops.setdiff1d(range(rank_a), axes)
|
free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
|
||||||
free_dims = array_ops.gather(shape_a, free)
|
free_dims = array_ops.gather(shape_a, free)
|
||||||
axes_dims = array_ops.gather(shape_a, axes)
|
axes_dims = array_ops.gather(shape_a, axes)
|
||||||
prod_free_dims = reduce_prod(free_dims)
|
prod_free_dims = reduce_prod(free_dims)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user