Remove @test_util.run_deprecated_v1 in broadcast_to_ops_test.py
PiperOrigin-RevId: 324129236 Change-Id: I770aef18f7ab43a9f679c4e285135e2f0bd4797a
This commit is contained in:
parent
e1d3f777be
commit
294b239a2b
@ -25,13 +25,12 @@ from tensorflow.python.framework import dtypes
|
|||||||
from tensorflow.python.framework import errors
|
from tensorflow.python.framework import errors
|
||||||
from tensorflow.python.framework import test_util
|
from tensorflow.python.framework import test_util
|
||||||
from tensorflow.python.ops import array_ops
|
from tensorflow.python.ops import array_ops
|
||||||
from tensorflow.python.ops import gradient_checker
|
from tensorflow.python.ops import gradient_checker_v2
|
||||||
from tensorflow.python.platform import test as test_lib
|
from tensorflow.python.platform import test as test_lib
|
||||||
|
|
||||||
|
|
||||||
class BroadcastToTest(test_util.TensorFlowTestCase):
|
class BroadcastToTest(test_util.TensorFlowTestCase):
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToBasic(self):
|
def testBroadcastToBasic(self):
|
||||||
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
|
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
@ -40,7 +39,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToString(self):
|
def testBroadcastToString(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
x = np.array([b"1", b"2", b"3"])
|
x = np.array([b"1", b"2", b"3"])
|
||||||
@ -48,7 +46,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToBool(self):
|
def testBroadcastToBool(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
x = np.array([True, False, True], dtype=np.bool)
|
x = np.array([True, False, True], dtype=np.bool)
|
||||||
@ -56,7 +53,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToShape(self):
|
def testBroadcastToShape(self):
|
||||||
for input_dim in range(1, 6):
|
for input_dim in range(1, 6):
|
||||||
for output_dim in range(input_dim, 6):
|
for output_dim in range(input_dim, 6):
|
||||||
@ -68,7 +64,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, output_shape)
|
v_np = np.broadcast_to(x, output_shape)
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToShapeInnerDim(self):
|
def testBroadcastToShapeInnerDim(self):
|
||||||
input_shape = [2, 1, 3]
|
input_shape = [2, 1, 3]
|
||||||
output_shape = [2, 5, 3]
|
output_shape = [2, 5, 3]
|
||||||
@ -78,7 +73,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, output_shape)
|
v_np = np.broadcast_to(x, output_shape)
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToShapeLargerDim(self):
|
def testBroadcastToShapeLargerDim(self):
|
||||||
input_shape = [2, 1, 3, 2, 2, 2]
|
input_shape = [2, 1, 3, 2, 2, 2]
|
||||||
output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2]
|
output_shape = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 15, 3, 2, 2, 2]
|
||||||
@ -88,7 +82,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, output_shape)
|
v_np = np.broadcast_to(x, output_shape)
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToShapeLargerDim2(self):
|
def testBroadcastToShapeLargerDim2(self):
|
||||||
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
|
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
|
||||||
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
|
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
|
||||||
@ -98,7 +91,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, output_shape)
|
v_np = np.broadcast_to(x, output_shape)
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToScalar(self):
|
def testBroadcastToScalar(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
x = np.array(1, dtype=np.int32)
|
x = np.array(1, dtype=np.int32)
|
||||||
@ -106,7 +98,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, [3, 3])
|
v_np = np.broadcast_to(x, [3, 3])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastScalarToNonScalar(self):
|
def testBroadcastScalarToNonScalar(self):
|
||||||
with self.session(use_gpu=True):
|
with self.session(use_gpu=True):
|
||||||
x = np.array(1.0, dtype=np.float)
|
x = np.array(1.0, dtype=np.float)
|
||||||
@ -115,7 +106,6 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
v_np = np.broadcast_to(x, [2, 3, 4, 1, 1, 1])
|
v_np = np.broadcast_to(x, [2, 3, 4, 1, 1, 1])
|
||||||
self.assertAllEqual(v_tf, v_np)
|
self.assertAllEqual(v_tf, v_np)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testBroadcastToShapeTypeAndInference(self):
|
def testBroadcastToShapeTypeAndInference(self):
|
||||||
for dtype in [dtypes.int32, dtypes.int64]:
|
for dtype in [dtypes.int32, dtypes.int64]:
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session(use_gpu=True):
|
||||||
@ -137,59 +127,70 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
|
|||||||
array_ops.broadcast_to(
|
array_ops.broadcast_to(
|
||||||
constant_op.constant([0, 1]), constant_op.constant([2, 1])))
|
constant_op.constant([0, 1]), constant_op.constant([2, 1])))
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testGradientForScalar(self):
|
def testGradientForScalar(self):
|
||||||
x = constant_op.constant(1, dtype=dtypes.float32)
|
x = constant_op.constant(1, dtype=dtypes.float32)
|
||||||
v = array_ops.broadcast_to(x, [2, 4, 3])
|
|
||||||
out = 2 * v
|
def func(x):
|
||||||
|
v = array_ops.broadcast_to(x, [2, 4, 3])
|
||||||
|
return 2 * v
|
||||||
|
|
||||||
with self.cached_session():
|
with self.cached_session():
|
||||||
err = gradient_checker.compute_gradient_error(x, x.get_shape(), out,
|
err = gradient_checker_v2.max_error(
|
||||||
out.get_shape())
|
*gradient_checker_v2.compute_gradient(func, [x]))
|
||||||
|
|
||||||
self.assertLess(err, 1e-4)
|
self.assertLess(err, 1e-4)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testGradientWithSameRank(self):
|
def testGradientWithSameRank(self):
|
||||||
x = constant_op.constant(np.reshape(np.arange(6), (2, 1, 3)),
|
x = constant_op.constant(np.reshape(np.arange(6), (2, 1, 3)),
|
||||||
dtype=dtypes.float32)
|
dtype=dtypes.float32)
|
||||||
v = array_ops.broadcast_to(x, [2, 5, 3])
|
def func(x):
|
||||||
out = 2 * v
|
v = array_ops.broadcast_to(x, [2, 5, 3])
|
||||||
|
return 2 * v
|
||||||
|
|
||||||
with self.cached_session():
|
with self.cached_session():
|
||||||
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
|
err = gradient_checker_v2.max_error(
|
||||||
out, out.get_shape())
|
*gradient_checker_v2.compute_gradient(func, [x], delta=1e-2))
|
||||||
|
|
||||||
self.assertLess(err, 1e-4)
|
self.assertLess(err, 1e-4)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testGradientWithIncreasingRank(self):
|
def testGradientWithIncreasingRank(self):
|
||||||
x = constant_op.constant([[1], [2]],
|
x = constant_op.constant([[1], [2]],
|
||||||
dtype=dtypes.float32)
|
dtype=dtypes.float32)
|
||||||
v = array_ops.broadcast_to(x, [5, 2, 3])
|
def func(x):
|
||||||
out = 2 * v
|
v = array_ops.broadcast_to(x, [5, 2, 3])
|
||||||
|
return 2 * v
|
||||||
|
|
||||||
with self.cached_session():
|
with self.cached_session():
|
||||||
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
|
err = gradient_checker_v2.max_error(
|
||||||
out, out.get_shape())
|
*gradient_checker_v2.compute_gradient(func, [x]))
|
||||||
|
|
||||||
self.assertLess(err, 1e-4)
|
self.assertLess(err, 1e-4)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testGradientWithBroadcastAllDimensions(self):
|
def testGradientWithBroadcastAllDimensions(self):
|
||||||
x = constant_op.constant([1], dtype=dtypes.float32)
|
x = constant_op.constant([1], dtype=dtypes.float32)
|
||||||
v = array_ops.broadcast_to(x, [5, 2, 3])
|
def func(x):
|
||||||
out = 2 * v
|
v = array_ops.broadcast_to(x, [5, 2, 3])
|
||||||
|
return 2 * v
|
||||||
|
|
||||||
with self.cached_session():
|
with self.cached_session():
|
||||||
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
|
err = gradient_checker_v2.max_error(
|
||||||
out, out.get_shape())
|
*gradient_checker_v2.compute_gradient(func, [x]))
|
||||||
|
|
||||||
self.assertLess(err, 1e-4)
|
self.assertLess(err, 1e-4)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
|
||||||
def testGradientWithLargeDim(self):
|
def testGradientWithLargeDim(self):
|
||||||
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
|
input_shape = [2, 1, 3, 2, 2, 2, 1, 1, 1]
|
||||||
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
|
output_shape = [1, 1, 1, 2, 5, 3, 2, 2, 2, 3, 3, 3]
|
||||||
x = constant_op.constant(np.array(np.random.randn(*input_shape),
|
x = constant_op.constant(np.array(np.random.randn(*input_shape),
|
||||||
dtype=np.float32))
|
dtype=np.float32))
|
||||||
v = array_ops.broadcast_to(x, output_shape)
|
def func(x):
|
||||||
out = 2 * v
|
v = array_ops.broadcast_to(x, output_shape)
|
||||||
|
return 2 * v
|
||||||
|
|
||||||
with self.cached_session():
|
with self.cached_session():
|
||||||
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
|
err = gradient_checker_v2.max_error(
|
||||||
out, out.get_shape())
|
*gradient_checker_v2.compute_gradient(func, [x], delta=1e-2))
|
||||||
|
|
||||||
self.assertLess(err, 1e-4)
|
self.assertLess(err, 1e-4)
|
||||||
|
|
||||||
def testBroadcastToInvalidShape(self):
|
def testBroadcastToInvalidShape(self):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user