Removing run_v1_decorators for slice_op_test. Also moving it to kernel_tests/array_ops folder.
PiperOrigin-RevId: 324161756 Change-Id: Ia5b42728f67b4230b0bf50332d5d9adaceeb5b0e
This commit is contained in:
parent
10e4287d61
commit
6be7a14b65
@ -3070,21 +3070,6 @@ cuda_py_test(
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_test(
|
||||
name = "slice_op_test",
|
||||
size = "medium",
|
||||
srcs = ["slice_op_test.py"],
|
||||
tags = ["no_windows"], # b/126916429
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:client_testlib",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:gradients",
|
||||
"//third_party/py/numpy",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_test(
|
||||
name = "huge_slice_op_test",
|
||||
size = "medium",
|
||||
|
@ -26,7 +26,24 @@ cuda_py_test(
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:client_testlib",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:gradients",
|
||||
"//third_party/py/numpy",
|
||||
],
|
||||
)
|
||||
|
||||
cuda_py_test(
|
||||
name = "slice_op_test",
|
||||
size = "medium",
|
||||
srcs = ["slice_op_test.py"],
|
||||
tags = ["no_windows"], # b/126916429
|
||||
deps = [
|
||||
"//tensorflow/python:array_ops",
|
||||
"//tensorflow/python:client_testlib",
|
||||
"//tensorflow/python:errors",
|
||||
"//tensorflow/python:framework_for_generated_wrappers",
|
||||
"//tensorflow/python:gradients",
|
||||
"//third_party/py/numpy",
|
||||
],
|
||||
)
|
||||
|
@ -21,9 +21,12 @@ from __future__ import print_function
|
||||
import numpy as np
|
||||
from six.moves import xrange # pylint: disable=redefined-builtin
|
||||
|
||||
from tensorflow.python.eager import backprop
|
||||
from tensorflow.python.eager import def_function
|
||||
from tensorflow.python.framework import constant_op
|
||||
from tensorflow.python.framework import dtypes
|
||||
from tensorflow.python.framework import errors_impl
|
||||
from tensorflow.python.framework import ops
|
||||
from tensorflow.python.framework import test_util
|
||||
from tensorflow.python.ops import array_ops
|
||||
from tensorflow.python.ops import gradients_impl
|
||||
@ -178,43 +181,44 @@ class SliceTest(test.TestCase):
|
||||
]
|
||||
self.assertAllClose(expected, result.flatten(), rtol=1e-6)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testScalarInput(self):
|
||||
input_val = 0
|
||||
with self.cached_session() as sess:
|
||||
# Test with constant input; shape inference fails.
|
||||
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
|
||||
constant_op.constant(input_val)[:].get_shape()
|
||||
# Test with constant input; shape inference fails.
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
|
||||
constant_op.constant(input_val)[:].get_shape()
|
||||
|
||||
# Test evaluating with non-constant input; kernel execution fails.
|
||||
input_t = array_ops.placeholder(dtypes.int32)
|
||||
# Test evaluating with non-constant input; kernel execution fails.
|
||||
@def_function.function
|
||||
def func(input_t):
|
||||
slice_t = input_t[:]
|
||||
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
|
||||
"out of range"):
|
||||
sess.run([slice_t], feed_dict={input_t: input_val})
|
||||
return slice_t
|
||||
|
||||
with self.assertRaisesWithPredicateMatch(TypeError, "not subscriptable"):
|
||||
self.evaluate(func(input_val))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInvalidIndex(self):
|
||||
input_val = [1, 2]
|
||||
with self.cached_session() as sess:
|
||||
# Test with constant input; shape inference fails.
|
||||
with self.assertRaisesWithPredicateMatch(ValueError, "out of range"):
|
||||
constant_op.constant(input_val)[1:, 1:].get_shape()
|
||||
# Test with constant input; shape inference fails.
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
(ValueError, errors_impl.InvalidArgumentError), "out of range"):
|
||||
constant_op.constant(input_val)[1:, 1:].get_shape()
|
||||
|
||||
# Test evaluating with non-constant input; kernel execution fails.
|
||||
input_t = array_ops.placeholder(dtypes.int32)
|
||||
# Test evaluating with non-constant input; kernel execution fails.
|
||||
@def_function.function
|
||||
def func(input_t):
|
||||
slice_t = input_t[1:, 1:]
|
||||
with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError,
|
||||
"out of range"):
|
||||
sess.run([slice_t], feed_dict={input_t: input_val})
|
||||
return slice_t
|
||||
|
||||
with self.assertRaisesWithPredicateMatch(
|
||||
TypeError, "must be integers or slices, not tuple"):
|
||||
self.evaluate(func(input_val))
|
||||
|
||||
def _testSliceMatrixDim0(self, x, begin, size):
|
||||
with self.cached_session(use_gpu=True):
|
||||
tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval()
|
||||
tf_ans = self.evaluate(array_ops.slice(x, [begin, 0], [size, x.shape[1]]))
|
||||
np_ans = x[begin:begin + size, :]
|
||||
self.assertAllEqual(tf_ans, np_ans)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSliceMatrixDim0(self):
|
||||
x = np.random.rand(8, 4).astype("f")
|
||||
self._testSliceMatrixDim0(x, 1, 2)
|
||||
@ -252,47 +256,46 @@ class SliceTest(test.TestCase):
|
||||
self.assertEqual(slice_val.shape, slice_t.get_shape())
|
||||
self.assertEqual(slice2_val.shape, slice2_t.get_shape())
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testComplex(self):
|
||||
with self.session(use_gpu=True):
|
||||
inp = np.random.rand(4, 10, 10, 4).astype("f")
|
||||
a = constant_op.constant(inp, dtype=dtypes.float32)
|
||||
inp = np.random.rand(4, 10, 10, 4).astype("f")
|
||||
a = constant_op.constant(inp, dtype=dtypes.float32)
|
||||
|
||||
x = np.random.randint(0, 9)
|
||||
z = np.random.randint(0, 9)
|
||||
if z > 0:
|
||||
y = np.random.randint(0, z)
|
||||
else:
|
||||
y = 0
|
||||
slice_t = a[:, x, y:z, :]
|
||||
self.assertAllEqual(slice_t, inp[:, x, y:z, :])
|
||||
x = np.random.randint(0, 9)
|
||||
z = np.random.randint(0, 9)
|
||||
if z > 0:
|
||||
y = np.random.randint(0, z)
|
||||
else:
|
||||
y = 0
|
||||
slice_t = a[:, x, y:z, :]
|
||||
self.assertAllEqual(slice_t, inp[:, x, y:z, :])
|
||||
|
||||
def testRandom(self):
|
||||
# Random dims of rank 6
|
||||
input_shape = np.random.randint(0, 20, size=6)
|
||||
inp = np.random.rand(*input_shape).astype("f")
|
||||
with self.session(use_gpu=True) as sess:
|
||||
a = constant_op.constant(
|
||||
[float(x) for x in inp.ravel(order="C")],
|
||||
shape=input_shape,
|
||||
dtype=dtypes.float32)
|
||||
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
|
||||
sizes = [
|
||||
np.random.randint(0, input_shape[i] - indices[i] + 1)
|
||||
for i in range(6)
|
||||
]
|
||||
slice_t = array_ops.slice(a, indices, sizes)
|
||||
slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[
|
||||
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3]
|
||||
+ sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:
|
||||
indices[5] + sizes[5]]
|
||||
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
|
||||
shape=input_shape,
|
||||
dtype=dtypes.float32)
|
||||
indices = [0 if x == 0 else np.random.randint(x) for x in input_shape]
|
||||
sizes = [
|
||||
np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6)
|
||||
]
|
||||
slice_t = array_ops.slice(a, indices, sizes)
|
||||
slice2_t = a[indices[0]:indices[0] + sizes[0],
|
||||
indices[1]:indices[1] + sizes[1],
|
||||
indices[2]:indices[2] + sizes[2],
|
||||
indices[3]:indices[3] + sizes[3],
|
||||
indices[4]:indices[4] + sizes[4],
|
||||
indices[5]:indices[5] + sizes[5]]
|
||||
|
||||
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
|
||||
slice_val, slice2_val = self.evaluate([slice_t, slice2_t])
|
||||
|
||||
expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[
|
||||
1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[
|
||||
3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[
|
||||
5] + sizes[5]]
|
||||
expected_val = inp[indices[0]:indices[0] + sizes[0],
|
||||
indices[1]:indices[1] + sizes[1],
|
||||
indices[2]:indices[2] + sizes[2],
|
||||
indices[3]:indices[3] + sizes[3],
|
||||
indices[4]:indices[4] + sizes[4],
|
||||
indices[5]:indices[5] + sizes[5]]
|
||||
self.assertAllEqual(slice_val, expected_val)
|
||||
self.assertAllEqual(slice2_val, expected_val)
|
||||
self.assertEqual(expected_val.shape, slice_t.get_shape())
|
||||
@ -308,7 +311,6 @@ class SliceTest(test.TestCase):
|
||||
m2 = array_ops.slice(z, [0, 0, 0], [constant_op.constant(1) + 0, 2, -1])
|
||||
self.assertAllEqual(m2.get_shape().as_list(), [1, 2, 3])
|
||||
|
||||
|
||||
def _testGradientSlice(self, input_shape, slice_begin, slice_size):
|
||||
with self.cached_session(use_gpu=True):
|
||||
num_inputs = np.prod(input_shape)
|
||||
@ -334,18 +336,51 @@ class SliceTest(test.TestCase):
|
||||
|
||||
self.assertAllClose(np_ans, result)
|
||||
|
||||
def _testGradientSliceTape(self, input_shape, slice_begin, slice_size):
|
||||
with backprop.GradientTape() as tape:
|
||||
num_inputs = np.prod(input_shape)
|
||||
num_grads = np.prod(slice_size)
|
||||
inp = np.random.rand(num_inputs).astype("f").reshape(input_shape)
|
||||
a = constant_op.constant([float(x) for x in inp.ravel(order="C")],
|
||||
shape=input_shape,
|
||||
dtype=dtypes.float32)
|
||||
tape.watch(a)
|
||||
slice_t = array_ops.slice(a, slice_begin, slice_size)
|
||||
grads = np.random.rand(num_grads).astype("f").reshape(slice_size)
|
||||
grad_tensor = constant_op.constant(grads)
|
||||
grad = tape.gradient(slice_t, [a], grad_tensor)[0]
|
||||
result = self.evaluate(grad)
|
||||
|
||||
# Create a zero tensor of the input shape ane place
|
||||
# the grads into the right location to compare against TensorFlow.
|
||||
np_ans = np.zeros(input_shape)
|
||||
slices = []
|
||||
for i in xrange(len(input_shape)):
|
||||
slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i]))
|
||||
np_ans[slices] = grads
|
||||
|
||||
self.assertAllClose(np_ans, result)
|
||||
|
||||
def _testGradientVariableSize(self):
|
||||
with self.cached_session(use_gpu=True):
|
||||
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
|
||||
out = array_ops.slice(inp, [1], [-1])
|
||||
grad_actual = gradients_impl.gradients(out, inp)[0].eval()
|
||||
grad_actual = self.evaluate(gradients_impl.gradients(out, inp)[0])
|
||||
self.assertAllClose([0., 1., 1.], grad_actual)
|
||||
|
||||
def _testGradientVariableSizeTape(self):
|
||||
with backprop.GradientTape() as tape:
|
||||
inp = constant_op.constant([1.0, 2.0, 3.0], name="in")
|
||||
tape.watch(inp)
|
||||
out = array_ops.slice(inp, [1], [-1])
|
||||
grad_actual = self.evaluate(tape.gradient(out, inp))
|
||||
self.assertAllClose([0., 1., 1.], grad_actual)
|
||||
|
||||
def _testGradientVariableSize2D(self):
|
||||
# Regression test for bug in slice. A low-level bug in Eigen was causing
|
||||
# incorrect results for negative indices in multi-dimensional tensors.
|
||||
# See b/114318298.
|
||||
with self.cached_session(use_gpu=True) as sess:
|
||||
with self.cached_session(use_gpu=True):
|
||||
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
|
||||
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
|
||||
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
|
||||
@ -356,39 +391,73 @@ class SliceTest(test.TestCase):
|
||||
g1_val, g2_val = self.evaluate([g1, g2])
|
||||
self.assertAllEqual(g1_val, g2_val)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def _testGradientVariableSize2DTape(self):
|
||||
# Regression test for bug in slice. A low-level bug in Eigen was causing
|
||||
# incorrect results for negative indices in multi-dimensional tensors.
|
||||
# See b/114318298.
|
||||
with backprop.GradientTape(persistent=True) as tape:
|
||||
x = constant_op.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 7]])
|
||||
tape.watch(x)
|
||||
loss1 = math_ops.reduce_sum(x[:-1, :-1] * 1.0)
|
||||
loss2 = math_ops.reduce_sum(x[:-1][:, :-1])
|
||||
|
||||
g1 = tape.gradient(loss1, x)
|
||||
g2 = tape.gradient(loss2, x)
|
||||
g1_val, g2_val = self.evaluate([g1, g2])
|
||||
self.assertAllEqual(g1_val, g2_val)
|
||||
|
||||
def testGradientsAll(self):
|
||||
with ops.Graph().as_default():
|
||||
# Slice the middle square out of a 4x4 input
|
||||
self._testGradientSlice([4, 4], [1, 1], [2, 2])
|
||||
|
||||
# Slice the upper left square out of a 4x4 input
|
||||
self._testGradientSlice([4, 4], [0, 0], [2, 2])
|
||||
|
||||
# Slice a non-square input starting from (2,1)
|
||||
self._testGradientSlice([4, 4], [2, 1], [1, 2])
|
||||
|
||||
# Slice a 3D tensor
|
||||
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
|
||||
|
||||
# Use -1 as a slice dimension.
|
||||
self._testGradientVariableSize()
|
||||
|
||||
# Use -1 as a slice dimension on a 2D tensor.
|
||||
self._testGradientVariableSize2D()
|
||||
|
||||
def testGradientsAllTape(self):
|
||||
# Slice the middle square out of a 4x4 input
|
||||
self._testGradientSlice([4, 4], [1, 1], [2, 2])
|
||||
self._testGradientSliceTape([4, 4], [1, 1], [2, 2])
|
||||
|
||||
# Slice the upper left square out of a 4x4 input
|
||||
self._testGradientSlice([4, 4], [0, 0], [2, 2])
|
||||
self._testGradientSliceTape([4, 4], [0, 0], [2, 2])
|
||||
|
||||
# Slice a non-square input starting from (2,1)
|
||||
self._testGradientSlice([4, 4], [2, 1], [1, 2])
|
||||
self._testGradientSliceTape([4, 4], [2, 1], [1, 2])
|
||||
|
||||
# Slice a 3D tensor
|
||||
self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1])
|
||||
self._testGradientSliceTape([3, 3, 3], [0, 1, 0], [2, 1, 1])
|
||||
|
||||
# Use -1 as a slice dimension.
|
||||
self._testGradientVariableSize()
|
||||
self._testGradientVariableSizeTape()
|
||||
|
||||
# Use -1 as a slice dimension on a 2D tensor.
|
||||
self._testGradientVariableSize2D()
|
||||
self._testGradientVariableSize2DTape()
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testNotIterable(self):
|
||||
# NOTE(mrry): If we register __getitem__ as an overloaded
|
||||
# operator, Python will valiantly attempt to iterate over the
|
||||
# Tensor from 0 to infinity. This test ensures that this
|
||||
# unintended behavior is prevented.
|
||||
c = constant_op.constant(5.0)
|
||||
with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
|
||||
"iterating over `tf.Tensor`"):
|
||||
for _ in c:
|
||||
pass
|
||||
# Tensor iteration is disabled explicitly for only graph mode.
|
||||
with ops.Graph().as_default():
|
||||
# NOTE(mrry): If we register __getitem__ as an overloaded
|
||||
# operator, Python will valiantly attempt to iterate over the
|
||||
# Tensor from 0 to infinity. This test ensures that this
|
||||
# unintended behavior is prevented.
|
||||
c = constant_op.constant(5.0)
|
||||
with self.assertRaisesRegex(errors_impl.OperatorNotAllowedInGraphError,
|
||||
"iterating over `tf.Tensor`"):
|
||||
for _ in c:
|
||||
pass
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testComputedShape(self):
|
||||
# NOTE(mrry): We cannot currently handle partially-known values,
|
||||
# because `tf.slice()` uses -1 to specify a wildcard size, and
|
||||
@ -400,9 +469,12 @@ class SliceTest(test.TestCase):
|
||||
b = array_ops.slice(a, [begin, 0], [size, 2])
|
||||
self.assertEqual([1, 2], b.get_shape())
|
||||
|
||||
begin = array_ops.placeholder(dtypes.int32, shape=())
|
||||
c = array_ops.slice(a, [begin, 0], [-1, 2])
|
||||
self.assertEqual([None, 2], c.get_shape().as_list())
|
||||
# placeholders only make sense in a graph.
|
||||
with ops.Graph().as_default():
|
||||
a = constant_op.constant([[1, 2, 3], [4, 5, 6]])
|
||||
begin = array_ops.placeholder(dtypes.int32, shape=())
|
||||
c = array_ops.slice(a, [begin, 0], [-1, 2])
|
||||
self.assertEqual([None, 2], c.get_shape().as_list())
|
||||
|
||||
def testSliceOfSlice(self):
|
||||
with self.session(use_gpu=True):
|
Loading…
x
Reference in New Issue
Block a user