[tf.debugging] Make assert_shapes() work on SparseTensors
Fixes https://github.com/tensorflow/tensorflow/issues/36268 PiperOrigin-RevId: 334658746 Change-Id: I53351bf9e8223a37e7cc447651be546fc87cecb6
This commit is contained in:
parent
d3a378f966
commit
11fc1489d0
@ -289,6 +289,10 @@
|
||||
|
||||
* `tf.nn.max_pool2d` now supports explicit padding.
|
||||
|
||||
* `tf.debugging`:
|
||||
|
||||
* `tf.debugging.assert_shapes()` now works on `SparseTensor`s (#36268).
|
||||
|
||||
* Other:
|
||||
|
||||
* We have replaced uses of "whitelist" and "blacklist" with "allowlist"
|
||||
|
@ -1903,6 +1903,174 @@ class AssertShapesTest(test.TestCase):
|
||||
sess.run(out, feed_dict=feed_dict)
|
||||
|
||||
|
||||
class AssertShapesSparseTensorTest(test.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_scalar_target_success(self):
|
||||
sparse_float = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[]], dtypes.int64),
|
||||
constant_op.constant([42], dtypes.float32),
|
||||
constant_op.constant([], dtypes.int64))
|
||||
assertion = check_ops.assert_shapes([(sparse_float, [])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_float)
|
||||
self.evaluate(out)
|
||||
|
||||
def test_assert_shapes_sparse_tensor_nonscalar_target_fail(self):
|
||||
sparse_float = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[]], dtypes.int64),
|
||||
constant_op.constant([42], dtypes.float32),
|
||||
constant_op.constant([], dtypes.int64))
|
||||
with self.assertRaisesRegexp(ValueError,
|
||||
r"must have rank 2.*Received rank 0"):
|
||||
assertion = check_ops.assert_shapes([(sparse_float, [None, None])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_float)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_fully_specified_target_success(self):
|
||||
sparse_float = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[111], [232]], dtypes.int64),
|
||||
constant_op.constant([23.4, -43.2], dtypes.float32),
|
||||
constant_op.constant([500], dtypes.int64))
|
||||
assertion = check_ops.assert_shapes([(sparse_float, [500])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_float)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_fully_specified_target_fail(self):
|
||||
sparse_float = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[111], [232]], dtypes.int64),
|
||||
constant_op.constant([23.4, -43.2], dtypes.float32),
|
||||
constant_op.constant([500], dtypes.int64))
|
||||
with self.assertRaisesRegexp(ValueError, r"dimension 0 must have size 499"):
|
||||
assertion = check_ops.assert_shapes([(sparse_float, [499])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_float)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_partially_specified_target_success(self):
|
||||
sparse_int = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 40], dtypes.int64))
|
||||
assertion = check_ops.assert_shapes([(sparse_int, [None, 40])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_int)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_symbolic_match_success(self):
|
||||
sparse_int = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6, 7], [8, 9, 10]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 30, 40], dtypes.int64))
|
||||
assertion = check_ops.assert_shapes([(sparse_int, ["N", "N", "D"])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_int)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_partially_specified_target_fail(self):
|
||||
sparse_int = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 40], dtypes.int64))
|
||||
with self.assertRaisesRegexp(ValueError, r"dimension 1 must have size 41"):
|
||||
assertion = check_ops.assert_shapes([(sparse_int, [None, 41])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_int)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_wrong_rank_fail(self):
|
||||
sparse_int = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 40], dtypes.int64))
|
||||
with self.assertRaisesRegexp(ValueError,
|
||||
r"must have rank 3\..* Received rank 2"):
|
||||
assertion = check_ops.assert_shapes([(sparse_int, [None, None, 40])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_int)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_wrong_symbolic_match_fail(self):
|
||||
sparse_int = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 40], dtypes.int64))
|
||||
with self.assertRaisesRegexp(ValueError, r"dimension 1 must have size 30"):
|
||||
assertion = check_ops.assert_shapes([(sparse_int, ["D", "D"])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_int)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_multiple_assertions_success(self):
|
||||
sparse_scalar = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[]], dtypes.int64),
|
||||
constant_op.constant([42], dtypes.float32),
|
||||
constant_op.constant([], dtypes.int64))
|
||||
sparse_2d = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 30], dtypes.int64))
|
||||
assertion = check_ops.assert_shapes([(sparse_scalar, []),
|
||||
(sparse_2d, ["N", "N"])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_2d)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_multiple_assertions_fail(self):
|
||||
sparse_scalar = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[]], dtypes.int64),
|
||||
constant_op.constant([42], dtypes.float32),
|
||||
constant_op.constant([], dtypes.int64))
|
||||
sparse_2d = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 40], dtypes.int64))
|
||||
with self.assertRaisesRegexp(ValueError, r"dimension 1 must have size 30"):
|
||||
assertion = check_ops.assert_shapes([(sparse_scalar, []),
|
||||
(sparse_2d, ["N", "N"])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_2d)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_mixed_dense_and_sparse_success(self):
|
||||
dense_scalar = constant_op.constant([42], dtypes.float32)
|
||||
sparse_2d = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 30], dtypes.int64))
|
||||
assertion = check_ops.assert_shapes([(dense_scalar, []),
|
||||
(sparse_2d, ["N", "N"])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_2d)
|
||||
self.evaluate(out)
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
def test_assert_shapes_sparse_tensor_mixed_dense_and_sparse_fail(self):
|
||||
dense_scalar = constant_op.constant([42], dtypes.float32)
|
||||
sparse_2d = sparse_tensor.SparseTensor(
|
||||
constant_op.constant([[5, 6], [7, 8]], dtypes.int64),
|
||||
constant_op.constant([23, -43], dtypes.int32),
|
||||
constant_op.constant([30, 40], dtypes.int64))
|
||||
with self.assertRaisesRegexp(ValueError, r"dimension 1 must have size 30"):
|
||||
assertion = check_ops.assert_shapes([(dense_scalar, []),
|
||||
(sparse_2d, ["N", "N"])])
|
||||
with ops.control_dependencies([assertion]):
|
||||
out = array_ops.identity(sparse_2d)
|
||||
self.evaluate(out)
|
||||
|
||||
|
||||
class IsStrictlyIncreasingTest(test.TestCase):
|
||||
|
||||
@test_util.run_in_graph_and_eager_modes
|
||||
|
@ -1151,14 +1151,15 @@ def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
|
||||
ValueError: If static checks determine `x` has wrong rank.
|
||||
"""
|
||||
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
|
||||
x = ops.convert_to_tensor(x, name='x')
|
||||
if not isinstance(x, sparse_tensor.SparseTensor):
|
||||
x = ops.convert_to_tensor(x, name='x')
|
||||
rank = ops.convert_to_tensor(rank, name='rank')
|
||||
message = message or ''
|
||||
|
||||
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
|
||||
dynamic_condition = math_ops.equal
|
||||
|
||||
if context.executing_eagerly():
|
||||
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
|
||||
name = ''
|
||||
else:
|
||||
name = x.name
|
||||
@ -1418,11 +1419,12 @@ def assert_rank_in(
|
||||
"""
|
||||
with ops.name_scope(
|
||||
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
|
||||
x = ops.convert_to_tensor(x, name='x')
|
||||
if not isinstance(x, sparse_tensor.SparseTensor):
|
||||
x = ops.convert_to_tensor(x, name='x')
|
||||
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
|
||||
message = message or ''
|
||||
|
||||
if context.executing_eagerly():
|
||||
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
|
||||
name = ''
|
||||
else:
|
||||
name = x.name
|
||||
@ -1582,7 +1584,7 @@ def _dimension_sizes(x):
|
||||
rank = x.get_shape().rank
|
||||
rank_is_known = rank is not None
|
||||
if rank_is_known and rank == 0:
|
||||
return tuple([1])
|
||||
return (1,)
|
||||
if rank_is_known and rank > 0:
|
||||
static_shape = x.get_shape().as_list()
|
||||
sizes = [
|
||||
@ -1787,14 +1789,14 @@ def assert_shapes(shapes, data=None, summarize=None, message=None, name=None):
|
||||
message = message or ''
|
||||
with ops.name_scope(name, 'assert_shapes', [shapes, data]):
|
||||
# Shape specified as None implies no constraint
|
||||
shape_constraints = [
|
||||
(ops.convert_to_tensor(x), s) for x, s in shapes if s is not None
|
||||
]
|
||||
shape_constraints = [(x if isinstance(x, sparse_tensor.SparseTensor) else
|
||||
ops.convert_to_tensor(x), s)
|
||||
for x, s in shapes if s is not None]
|
||||
|
||||
executing_eagerly = context.executing_eagerly()
|
||||
|
||||
def tensor_name(x):
|
||||
if executing_eagerly:
|
||||
if executing_eagerly or isinstance(x, sparse_tensor.SparseTensor):
|
||||
return _shape_and_dtype_str(x)
|
||||
return x.name
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user