Remove unnecessary eval() calls
The assertAll* statements already evaluate the arguments. PiperOrigin-RevId: 320729457 Change-Id: Ie1564419eb5cf8f69d0e700c000074e248401dbc
This commit is contained in:
parent
f3b556a903
commit
3750943228
tensorflow/python
eager
feature_column
framework
keras/legacy_tf_layers
kernel_tests
array_ops_test.pyatrous_conv2d_test.py
distributions
embedding_ops_test.pyinit_ops_test.pylinalg
linalg_ops_test.pyregex_replace_op_test.pysparse_cross_op_test.pysparse_slice_op_test.pyops
training
@ -288,8 +288,8 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
|
||||
tf_opt = training.GradientDescentOptimizer(0.1)
|
||||
tf_embedding.initializer.run()
|
||||
|
||||
self.assertAllClose(tf_grad.indices.eval(), grad.indices)
|
||||
self.assertAllClose(tf_grad.values.eval(), grad.values)
|
||||
self.assertAllClose(tf_grad.indices, grad.indices)
|
||||
self.assertAllClose(tf_grad.values, grad.values)
|
||||
|
||||
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
|
||||
expected = self.evaluate(tf_embedding)
|
||||
|
@ -825,7 +825,7 @@ class FunctionGradientsTest(test.TestCase, parameterized.TestCase):
|
||||
return middle_fn(x, v)
|
||||
|
||||
x = constant_op.constant(5.0)
|
||||
self.assertAllEqual(outer_fn(x).eval(), 5.0 * (5.0 + 3.0))
|
||||
self.assertAllEqual(outer_fn(x), 5.0 * (5.0 + 3.0))
|
||||
|
||||
grad, = gradients_impl.gradients(outer_fn(x), x)
|
||||
|
||||
|
@ -1360,7 +1360,7 @@ class FunctionTest(test.TestCase, parameterized.TestCase):
|
||||
def g(x):
|
||||
return f(x) + 1
|
||||
|
||||
self.assertAllEqual(g(constant_op.constant(2.0)).eval(), 5.0)
|
||||
self.assertAllEqual(g(constant_op.constant(2.0)), 5.0)
|
||||
|
||||
def testDict(self):
|
||||
|
||||
|
@ -752,9 +752,8 @@ class HashedCategoricalColumnTest(test.TestCase):
|
||||
with self.cached_session():
|
||||
self.assertEqual(dtypes.int64, output.values.dtype)
|
||||
self.assertAllEqual(expected_values, output.values)
|
||||
self.assertAllEqual(wire_tensor.indices.eval(), output.indices)
|
||||
self.assertAllEqual(wire_tensor.dense_shape.eval(),
|
||||
output.dense_shape.eval())
|
||||
self.assertAllEqual(wire_tensor.indices, output.indices)
|
||||
self.assertAllEqual(wire_tensor.dense_shape, output.dense_shape.eval())
|
||||
|
||||
def test_tensor_dtype_should_be_string_or_integer(self):
|
||||
string_fc = fc._categorical_column_with_hash_bucket(
|
||||
|
@ -607,9 +607,9 @@ class AutomaticControlDependenciesTest(test.TestCase):
|
||||
one = constant_op.constant(1.0)
|
||||
one = c.mark_as_return(one)
|
||||
one.eval(feed_dict={p: False})
|
||||
self.assertAllEqual(v.read_value().eval(), 5.0)
|
||||
self.assertAllEqual(v.read_value(), 5.0)
|
||||
one.eval(feed_dict={p: True})
|
||||
self.assertAllEqual(v.read_value().eval(), 6.0)
|
||||
self.assertAllEqual(v.read_value(), 6.0)
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
def testCondNested(self):
|
||||
@ -737,7 +737,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
|
||||
v.assign(2 * v)
|
||||
return v.read_value()
|
||||
|
||||
self.assertAllEqual(f().eval(), 4.0)
|
||||
self.assertAllEqual(f(), 4.0)
|
||||
|
||||
def testOptimizerInDefun(self):
|
||||
def loss(v):
|
||||
|
@ -437,10 +437,10 @@ class FunctionTest(test.TestCase):
|
||||
self.assertEqual([("Assert", "Assert")], Foo.stateful_ops)
|
||||
g = ops.Graph()
|
||||
with g.as_default(), self.cached_session():
|
||||
self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0)
|
||||
self.assertAllEqual(Foo(constant_op.constant(3.0)), 6.0)
|
||||
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
|
||||
"assertion failed.*-3"):
|
||||
self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0)
|
||||
self.assertAllEqual(Foo(constant_op.constant(-3.0)), 6.0)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testAssertWrapper(self):
|
||||
|
@ -945,7 +945,7 @@ class ImportGraphDefTest(test.TestCase):
|
||||
|
||||
with self.cached_session():
|
||||
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
|
||||
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
|
||||
self.assertAllEqual(pack.outputs[0], [5.0, 5.0])
|
||||
|
||||
def testWithDevice(self):
|
||||
with ops.Graph().as_default() as g:
|
||||
|
@ -284,11 +284,11 @@ class DenseTest(test.TestCase, parameterized.TestCase):
|
||||
weights = _get_variable_dict_from_varstore()
|
||||
self.assertEqual(len(weights), 2)
|
||||
# Check that the matrix weights got initialized to ones (from scope).
|
||||
self.assertAllClose(weights['scope/dense/kernel'].read_value().eval(),
|
||||
self.assertAllClose(weights['scope/dense/kernel'].read_value(),
|
||||
np.ones((3, 2)))
|
||||
# Check that the bias still got initialized to zeros.
|
||||
self.assertAllClose(weights['scope/dense/bias'].read_value().eval(),
|
||||
np.zeros((2)))
|
||||
self.assertAllClose(weights['scope/dense/bias'].read_value(), np.zeros(
|
||||
(2)))
|
||||
|
||||
def testEagerExecution(self):
|
||||
with context.eager_mode():
|
||||
|
@ -1277,7 +1277,7 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
|
||||
res = array_ops.sequence_mask(constant_op.constant([1, 3, 2]), 5)
|
||||
self.assertAllEqual(res.get_shape(), [3, 5])
|
||||
self.assertAllEqual(
|
||||
res.eval(),
|
||||
res,
|
||||
[[True, False, False, False, False], [True, True, True, False, False],
|
||||
[True, True, False, False, False]])
|
||||
|
||||
@ -1289,7 +1289,7 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
|
||||
constant_op.constant([0, 1, 4]), dtype=dtypes.float32)
|
||||
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
|
||||
self.assertAllEqual(
|
||||
res.eval(),
|
||||
res,
|
||||
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -1298,8 +1298,8 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
|
||||
res = array_ops.sequence_mask(constant_op.constant([0, 1, 4]))
|
||||
self.assertAllEqual(res.get_shape().as_list(), [3, 4])
|
||||
self.assertAllEqual(
|
||||
res.eval(), [[False, False, False, False],
|
||||
[True, False, False, False], [True, True, True, True]])
|
||||
res, [[False, False, False, False], [True, False, False, False],
|
||||
[True, True, True, True]])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testTwoDimensional(self):
|
||||
@ -1315,7 +1315,7 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
|
||||
constant_op.constant([[0, 1, 4], [1, 2, 3]]), dtype=dtypes.float32)
|
||||
self.assertAllEqual(res.get_shape().as_list(), [2, 3, 4])
|
||||
self.assertAllEqual(
|
||||
res.eval(),
|
||||
res,
|
||||
[[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]],
|
||||
[[1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0]]])
|
||||
|
||||
@ -1334,7 +1334,7 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
|
||||
constant_op.constant(5, dtype=maxlen_dtype))
|
||||
self.assertAllEqual(res.get_shape(), [3, 5])
|
||||
self.assertAllEqual(
|
||||
res.eval(),
|
||||
res,
|
||||
[[True, False, False, False, False], [True, True, True, False, False],
|
||||
[True, True, False, False, False]])
|
||||
|
||||
|
@ -81,8 +81,7 @@ class AtrousConv2DTest(test.TestCase):
|
||||
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
|
||||
y2 = nn_ops.conv2d(
|
||||
x, f_up, strides=[1, 1, 1, 1], padding=padding)
|
||||
self.assertAllClose(
|
||||
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
|
||||
self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testAtrousSequence(self):
|
||||
@ -135,8 +134,7 @@ class AtrousConv2DTest(test.TestCase):
|
||||
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
|
||||
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
|
||||
self.assertAllClose(
|
||||
y1.eval(), self.evaluate(y2), rtol=1e-2, atol=1e-2)
|
||||
self.assertAllClose(y1, y2, rtol=1e-2, atol=1e-2)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradient(self):
|
||||
@ -200,8 +198,7 @@ class AtrousConv2DTransposeTest(test.TestCase):
|
||||
padding)
|
||||
y2 = nn_ops.conv2d_transpose(
|
||||
x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
|
||||
self.assertAllClose(
|
||||
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
|
||||
self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
|
||||
|
||||
|
||||
class AtrousDepthwiseConv2DTest(test.TestCase):
|
||||
@ -229,8 +226,7 @@ class AtrousDepthwiseConv2DTest(test.TestCase):
|
||||
y1 = nn_impl.depthwise_conv2d(
|
||||
x, f, strides, padding, rate=[rate, rate])
|
||||
y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
|
||||
self.assertAllClose(
|
||||
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
|
||||
self.assertAllClose(y1, y2, rtol=1e-3, atol=1e-3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -61,8 +61,8 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
|
||||
with self.cached_session():
|
||||
self.assertAllEqual([2], dist.probs.get_shape())
|
||||
self.assertAllEqual([2], dist.logits.get_shape())
|
||||
self.assertAllClose(dist.probs.eval(), p)
|
||||
self.assertAllClose(dist.logits.eval(), logits)
|
||||
self.assertAllClose(dist.probs, p)
|
||||
self.assertAllClose(dist.logits, logits)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testShapes(self):
|
||||
@ -131,14 +131,14 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
|
||||
histograms = [[0.2, 0.8], [0.6, 0.4]]
|
||||
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
|
||||
with self.cached_session():
|
||||
self.assertAllClose(dist.prob([0, 1]).eval(), [0.2, 0.4])
|
||||
self.assertAllClose(dist.prob([0, 1]), [0.2, 0.4])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testPMFNoBatch(self):
|
||||
histograms = [0.2, 0.8]
|
||||
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
|
||||
with self.cached_session():
|
||||
self.assertAllClose(dist.prob(0).eval(), 0.2)
|
||||
self.assertAllClose(dist.prob(0), 0.2)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testCDFWithDynamicEventShapeKnownNdims(self):
|
||||
@ -240,7 +240,7 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
|
||||
expected_cdf_result[2, 1] = 0.75
|
||||
|
||||
with self.cached_session():
|
||||
self.assertAllClose(dist.cdf(devent).eval(), expected_cdf_result)
|
||||
self.assertAllClose(dist.cdf(devent), expected_cdf_result)
|
||||
|
||||
def testBroadcastWithBatchParamsAndBiggerEvent(self):
|
||||
## The parameters have a single batch dimension, and the event has two.
|
||||
@ -314,15 +314,15 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
|
||||
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
|
||||
dist = categorical.Categorical(logits)
|
||||
with self.cached_session():
|
||||
self.assertAllClose(dist.log_prob([0, 1]).eval(), np.log([0.2, 0.4]))
|
||||
self.assertAllClose(dist.log_prob([0.0, 1.0]).eval(), np.log([0.2, 0.4]))
|
||||
self.assertAllClose(dist.log_prob([0, 1]), np.log([0.2, 0.4]))
|
||||
self.assertAllClose(dist.log_prob([0.0, 1.0]), np.log([0.2, 0.4]))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testEntropyNoBatch(self):
|
||||
logits = np.log([0.2, 0.8]) - 50.
|
||||
dist = categorical.Categorical(logits)
|
||||
with self.cached_session():
|
||||
self.assertAllClose(dist.entropy().eval(),
|
||||
self.assertAllClose(dist.entropy(),
|
||||
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -330,7 +330,7 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
|
||||
logits = np.log([[0.2, 0.8], [0.6, 0.4]]) - 50.
|
||||
dist = categorical.Categorical(logits)
|
||||
with self.cached_session():
|
||||
self.assertAllClose(dist.entropy().eval(), [
|
||||
self.assertAllClose(dist.entropy(), [
|
||||
-(0.2 * np.log(0.2) + 0.8 * np.log(0.8)),
|
||||
-(0.6 * np.log(0.6) + 0.4 * np.log(0.4))
|
||||
])
|
||||
@ -460,7 +460,7 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
|
||||
with self.cached_session():
|
||||
histograms = [[[0.2, 0.8], [0.6, 0.4]]]
|
||||
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
|
||||
self.assertAllEqual(dist.mode().eval(), [[1, 0]])
|
||||
self.assertAllEqual(dist.mode(), [[1, 0]])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testCategoricalCategoricalKL(self):
|
||||
|
@ -556,7 +556,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
ids = np.random.randint(
|
||||
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
|
||||
# Compare nonsharded to gather
|
||||
simple = embedding_ops.embedding_lookup(params, ids).eval()
|
||||
simple = embedding_ops.embedding_lookup(params, ids)
|
||||
self.assertAllEqual(simple, array_ops.gather(params, ids))
|
||||
# Run a few random sharded versions
|
||||
for procs in 1, 2, 3:
|
||||
@ -564,7 +564,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
split_params = [
|
||||
array_ops.gather(params, stride + p) for p in xrange(procs)
|
||||
]
|
||||
sharded = embedding_ops.embedding_lookup(split_params, ids).eval()
|
||||
sharded = embedding_ops.embedding_lookup(split_params, ids)
|
||||
self.assertAllEqual(simple, sharded)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -583,8 +583,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
params.shape[0], size=np.prod(ids_shape,
|
||||
dtype=np.int64)).reshape(ids_shape)
|
||||
# Compare nonsharded to gather
|
||||
simple = embedding_ops.embedding_lookup(
|
||||
params, ids, max_norm=1.0).eval()
|
||||
simple = embedding_ops.embedding_lookup(params, ids, max_norm=1.0)
|
||||
# assertAllClose is used here as different implementations of sqrt may
|
||||
# be used to compute each of the values being compared. For example,
|
||||
# on AVX512 builds the embedding operation makes use of Eigen's fast
|
||||
@ -599,7 +598,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
array_ops.gather(params, stride + p) for p in xrange(procs)
|
||||
]
|
||||
sharded = embedding_ops.embedding_lookup(
|
||||
split_params, ids, max_norm=1.0).eval()
|
||||
split_params, ids, max_norm=1.0)
|
||||
self.assertAllEqual(simple, sharded)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -626,7 +625,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
dtype=np.int64)).reshape(ids_shape)
|
||||
# Compare nonsharded to gather.
|
||||
simple = embedding_ops._embedding_lookup_and_transform(
|
||||
params, ids, max_norm=l2_norm, transform_fn=transform).eval()
|
||||
params, ids, max_norm=l2_norm, transform_fn=transform)
|
||||
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
|
||||
# Run a few different sharded versions.
|
||||
for procs in 1, 2, 3:
|
||||
@ -635,8 +634,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
array_ops.gather(params, stride + p) for p in xrange(procs)
|
||||
]
|
||||
sharded = embedding_ops._embedding_lookup_and_transform(
|
||||
split_params, ids, max_norm=l2_norm,
|
||||
transform_fn=transform).eval()
|
||||
split_params, ids, max_norm=l2_norm, transform_fn=transform)
|
||||
# assertAllClose is used here as different implementations of sqrt may
|
||||
# be used to compute each of the values being compared. For example,
|
||||
# on AVX512 builds the embedding operation makes use of Eigen's fast
|
||||
@ -871,8 +869,9 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
sparse_ids, sparse_weights = self._ids_and_weights_2d()
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, sparse_weights).eval())
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
|
||||
sparse_ids,
|
||||
sparse_weights))
|
||||
|
||||
self.assertAllClose(
|
||||
embedding_lookup_result,
|
||||
@ -887,8 +886,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, sparse_weights,
|
||||
default_id=3).eval())
|
||||
embedding_weights, sparse_ids, sparse_weights, default_id=3))
|
||||
|
||||
self.assertAllClose(
|
||||
embedding_lookup_result,
|
||||
@ -903,8 +901,8 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
sparse_ids, _ = self._ids_and_weights_2d()
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, None).eval())
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
|
||||
sparse_ids, None))
|
||||
|
||||
self.assertAllClose(
|
||||
embedding_lookup_result,
|
||||
@ -919,8 +917,8 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
sparse_ids, _ = self._ids_and_weights_2d()
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, None).eval())
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
|
||||
sparse_ids, None))
|
||||
|
||||
embedding_weights = list(itertools.chain(*embedding_weights))
|
||||
self.assertAllClose(embedding_lookup_result,
|
||||
@ -951,8 +949,9 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
sparse_ids, sparse_weights = self._ids_and_weights_3d()
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, sparse_weights).eval())
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
|
||||
sparse_ids,
|
||||
sparse_weights))
|
||||
|
||||
self.assertAllClose(embedding_lookup_result, [[
|
||||
(1.0 * embedding_weights[0][0] + 2.0 * embedding_weights[0][1]) / 3.0,
|
||||
@ -967,8 +966,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, sparse_weights,
|
||||
default_id=3).eval())
|
||||
embedding_weights, sparse_ids, sparse_weights, default_id=3))
|
||||
|
||||
self.assertAllClose(
|
||||
embedding_lookup_result,
|
||||
@ -985,8 +983,8 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
sparse_ids, _ = self._ids_and_weights_3d()
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, None).eval())
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
|
||||
sparse_ids, None))
|
||||
|
||||
self.assertAllClose(embedding_lookup_result, [[(
|
||||
embedding_weights[0][0] + embedding_weights[0][1]) / 2.0, [0] * 4, [
|
||||
@ -1003,8 +1001,8 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
sparse_ids, _ = self._ids_and_weights_3d()
|
||||
|
||||
embedding_lookup_result = (
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(
|
||||
embedding_weights, sparse_ids, None).eval())
|
||||
embedding_ops.safe_embedding_lookup_sparse_v2(embedding_weights,
|
||||
sparse_ids, None))
|
||||
|
||||
embedding_weights = list(itertools.chain(*embedding_weights))
|
||||
self.assertAllClose(embedding_lookup_result, [[
|
||||
@ -1046,7 +1044,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
ops.convert_to_tensor([1, 2])
|
||||
]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
|
||||
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testCint32Gpu(self):
|
||||
@ -1060,7 +1058,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
ops.convert_to_tensor([1, 2])
|
||||
]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
|
||||
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInt32Cpu(self):
|
||||
@ -1074,7 +1072,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
ops.convert_to_tensor([1, 2])
|
||||
]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
|
||||
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testInt32Gpu(self):
|
||||
@ -1088,7 +1086,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
ops.convert_to_tensor([1, 2])
|
||||
]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
|
||||
data_flow_ops.dynamic_stitch(indices, values), [12, 23, 1, 2])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSumGradArgs(self):
|
||||
@ -1102,7 +1100,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
ops.convert_to_tensor([1, 1])
|
||||
]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
|
||||
data_flow_ops.dynamic_stitch(indices, values), [2, 3, 1, 1])
|
||||
|
||||
# We expect that the values are merged in order.
|
||||
@test_util.run_deprecated_v1
|
||||
@ -1115,7 +1113,7 @@ class DynamicStitchOpTest(test.TestCase):
|
||||
indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))])
|
||||
np_values.extend([np.random.uniform(size=100)])
|
||||
values.extend([ops.convert_to_tensor(np_values[-1])])
|
||||
stitched = data_flow_ops.dynamic_stitch(indices, values).eval()
|
||||
stitched = data_flow_ops.dynamic_stitch(indices, values)
|
||||
self.assertAllEqual(np_values[-1], stitched)
|
||||
|
||||
|
||||
@ -1133,7 +1131,7 @@ class ParallelDynamicStitchOpTest(test.TestCase):
|
||||
ops.convert_to_tensor([1, 2, 3])
|
||||
]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.parallel_dynamic_stitch(indices, values).eval(),
|
||||
data_flow_ops.parallel_dynamic_stitch(indices, values),
|
||||
[12, 23, 1, 2, 34, 3, 45])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -1148,7 +1146,7 @@ class ParallelDynamicStitchOpTest(test.TestCase):
|
||||
ops.convert_to_tensor([1, 3, 2])
|
||||
]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.parallel_dynamic_stitch(indices, values).eval(),
|
||||
data_flow_ops.parallel_dynamic_stitch(indices, values),
|
||||
[12, 23, 1, 2, 3, 34, 45, 56])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -1157,8 +1155,7 @@ class ParallelDynamicStitchOpTest(test.TestCase):
|
||||
indices = [ops.convert_to_tensor([0, 1]), ops.convert_to_tensor([2, 3])]
|
||||
values = [ops.convert_to_tensor([2, 3]), ops.convert_to_tensor([1, 1])]
|
||||
self.assertAllEqual(
|
||||
data_flow_ops.parallel_dynamic_stitch(indices, values).eval(),
|
||||
[2, 3, 1, 1])
|
||||
data_flow_ops.parallel_dynamic_stitch(indices, values), [2, 3, 1, 1])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -1325,7 +1325,7 @@ class IdentityInitializerTest(test.TestCase):
|
||||
init = init_ops.identity_initializer()
|
||||
shape = (10, 5)
|
||||
with self.session(graph=ops.Graph(), use_gpu=True):
|
||||
self.assertAllClose(init(shape).eval(), np.eye(*shape))
|
||||
self.assertAllClose(init(shape), np.eye(*shape))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGain(self):
|
||||
@ -1334,9 +1334,9 @@ class IdentityInitializerTest(test.TestCase):
|
||||
init_default = init_ops.identity_initializer(dtype=dtype)
|
||||
init_custom = init_ops.identity_initializer(gain=0.9, dtype=dtype)
|
||||
with self.session(graph=ops.Graph(), use_gpu=True):
|
||||
self.assertAllClose(init_default(shape).eval(), np.eye(*shape))
|
||||
self.assertAllClose(init_default(shape), np.eye(*shape))
|
||||
with self.session(graph=ops.Graph(), use_gpu=True):
|
||||
self.assertAllClose(init_custom(shape).eval(), np.eye(*shape) * 0.9)
|
||||
self.assertAllClose(init_custom(shape), np.eye(*shape) * 0.9)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testPartitions(self):
|
||||
|
@ -279,9 +279,8 @@ class AddAndReturnScaledIdentityTest(test.TestCase):
|
||||
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
|
||||
|
||||
with self.cached_session():
|
||||
self.assertAllClose(2 *
|
||||
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
|
||||
operator.to_dense().eval())
|
||||
self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
|
||||
operator.to_dense())
|
||||
self.assertTrue(operator.is_positive_definite)
|
||||
self.assertTrue(operator.is_non_singular)
|
||||
self.assertEqual("my_operator", operator.name)
|
||||
@ -298,9 +297,8 @@ class AddAndReturnScaledIdentityTest(test.TestCase):
|
||||
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
|
||||
|
||||
with self.cached_session():
|
||||
self.assertAllClose(3.2 *
|
||||
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
|
||||
operator.to_dense().eval())
|
||||
self.assertAllClose(3.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
|
||||
operator.to_dense())
|
||||
self.assertTrue(operator.is_positive_definite)
|
||||
self.assertTrue(operator.is_non_singular)
|
||||
self.assertEqual("my_operator", operator.name)
|
||||
@ -318,9 +316,8 @@ class AddAndReturnScaledIdentityTest(test.TestCase):
|
||||
self.assertIsInstance(operator, linalg.LinearOperatorScaledIdentity)
|
||||
|
||||
with self.cached_session():
|
||||
self.assertAllClose(1.2 *
|
||||
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
|
||||
operator.to_dense().eval())
|
||||
self.assertAllClose(1.2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
|
||||
operator.to_dense())
|
||||
self.assertTrue(operator.is_positive_definite)
|
||||
self.assertTrue(operator.is_non_singular)
|
||||
self.assertEqual("my_operator", operator.name)
|
||||
@ -343,9 +340,8 @@ class AddAndReturnDiagTest(test.TestCase):
|
||||
self.assertIsInstance(operator, linalg.LinearOperatorDiag)
|
||||
|
||||
with self.cached_session():
|
||||
self.assertAllClose(2 *
|
||||
linalg_ops.eye(num_rows=2, batch_shape=[3]).eval(),
|
||||
operator.to_dense().eval())
|
||||
self.assertAllClose(2 * linalg_ops.eye(num_rows=2, batch_shape=[3]),
|
||||
operator.to_dense())
|
||||
self.assertTrue(operator.is_positive_definite)
|
||||
self.assertTrue(operator.is_non_singular)
|
||||
self.assertEqual("my_operator", operator.name)
|
||||
@ -365,8 +361,8 @@ class AddAndReturnDiagTest(test.TestCase):
|
||||
|
||||
with self.cached_session():
|
||||
self.assertAllClose(
|
||||
linalg.LinearOperatorDiag(diag1 + diag2).to_dense().eval(),
|
||||
operator.to_dense().eval())
|
||||
linalg.LinearOperatorDiag(diag1 + diag2).to_dense(),
|
||||
operator.to_dense())
|
||||
self.assertTrue(operator.is_positive_definite)
|
||||
self.assertTrue(operator.is_non_singular)
|
||||
self.assertEqual("my_operator", operator.name)
|
||||
|
@ -69,8 +69,7 @@ class CholeskySolveTest(test.TestCase):
|
||||
with self.subTest(n=n, np_type=np_type, atol=atol, k=k):
|
||||
rhs = self.rng.randn(2, n, k).astype(np_type)
|
||||
x = linalg_ops.cholesky_solve(chol, rhs)
|
||||
self.assertAllClose(
|
||||
rhs, math_ops.matmul(array, x).eval(), atol=atol)
|
||||
self.assertAllClose(rhs, math_ops.matmul(array, x), atol=atol)
|
||||
|
||||
|
||||
class LogdetTest(test.TestCase):
|
||||
|
@ -44,7 +44,7 @@ class RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):
|
||||
"HiJkLmN"], dtypes.string),
|
||||
pos=0,
|
||||
len=5)
|
||||
stripped = op(inp, "\\p{Ll}", ".").eval()
|
||||
stripped = op(inp, "\\p{Ll}", ".")
|
||||
self.assertAllEqual([b"A.C.E", b"H.J.L"], stripped)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -52,7 +52,7 @@ class RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):
|
||||
values = ["a:foo", "a:bar", "a:foo", "b:baz", "b:qux", "ca:b"]
|
||||
with self.cached_session():
|
||||
input_vector = constant_op.constant(values, dtypes.string)
|
||||
stripped = op(input_vector, "^(a:|b:)", "", replace_global=False).eval()
|
||||
stripped = op(input_vector, "^(a:|b:)", "", replace_global=False)
|
||||
self.assertAllEqual([b"foo", b"bar", b"foo", b"baz", b"qux", b"ca:b"],
|
||||
stripped)
|
||||
|
||||
@ -61,7 +61,7 @@ class RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):
|
||||
values = ["aba\naba", "abcdabcde"]
|
||||
with self.cached_session():
|
||||
input_vector = constant_op.constant(values, dtypes.string)
|
||||
stripped = op(input_vector, "a.*a", "(\\0)").eval()
|
||||
stripped = op(input_vector, "a.*a", "(\\0)")
|
||||
self.assertAllEqual([b"(aba)\n(aba)", b"(abcda)bcde"], stripped)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -69,7 +69,7 @@ class RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):
|
||||
values = ["abc", "1"]
|
||||
with self.cached_session():
|
||||
input_vector = constant_op.constant(values, dtypes.string)
|
||||
stripped = op(input_vector, "", "x").eval()
|
||||
stripped = op(input_vector, "", "x")
|
||||
self.assertAllEqual([b"xaxbxcx", b"x1x"], stripped)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -87,7 +87,7 @@ class RegexReplaceOpVariantsTest(test.TestCase, parameterized.TestCase):
|
||||
values = ["ababababab", "abcabcabc", ""]
|
||||
with self.cached_session():
|
||||
input_vector = constant_op.constant(values, dtypes.string)
|
||||
stripped = op(input_vector, "ab", "abc", True).eval()
|
||||
stripped = op(input_vector, "ab", "abc", True)
|
||||
self.assertAllEqual([b"abcabcabcabcabc", b"abccabccabcc", b""], stripped)
|
||||
|
||||
|
||||
|
@ -65,9 +65,9 @@ class BaseSparseCrossOpTest(test.TestCase):
|
||||
constant_op.constant(shape, dtypes.int64))
|
||||
|
||||
def _assert_sparse_tensor_equals(self, sp1, sp2):
|
||||
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
|
||||
self.assertAllEqual(sp1.values.eval(), sp2.values)
|
||||
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
|
||||
self.assertAllEqual(sp1.indices, sp2.indices)
|
||||
self.assertAllEqual(sp1.values, sp2.values)
|
||||
self.assertAllEqual(sp1.dense_shape, sp2.dense_shape)
|
||||
|
||||
def _assert_sparse_tensor_empty(self, sp):
|
||||
self.assertEqual(0, sp.indices.size)
|
||||
@ -424,9 +424,9 @@ class SparseCrossOpTest(test.TestCase):
|
||||
self.assertEqual(0, sp.dense_shape[1])
|
||||
|
||||
def _assert_sparse_tensor_equals(self, sp1, sp2):
|
||||
self.assertAllEqual(sp1.indices.eval(), sp2.indices)
|
||||
self.assertAllEqual(sp1.values.eval(), sp2.values)
|
||||
self.assertAllEqual(sp1.dense_shape.eval(), sp2.dense_shape)
|
||||
self.assertAllEqual(sp1.indices, sp2.indices)
|
||||
self.assertAllEqual(sp1.values, sp2.values)
|
||||
self.assertAllEqual(sp1.dense_shape, sp2.dense_shape)
|
||||
|
||||
def _sparse_tensor(self, data, batch_size=-1):
|
||||
"""Generates a SparseTensor.
|
||||
|
@ -87,16 +87,15 @@ class SparseSliceOpTest(test.TestCase):
|
||||
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 6])
|
||||
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [3, 7])
|
||||
self.assertAllEqual(
|
||||
sp_tensor0.indices.eval(),
|
||||
sp_tensor0.indices,
|
||||
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4]])
|
||||
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5, 11, 13, 14])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 6])
|
||||
self.assertAllEqual(sp_tensor0.values, [0, 2, 4, 5, 11, 13, 14])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape, [2, 6])
|
||||
self.assertAllEqual(
|
||||
sp_tensor1.indices.eval(),
|
||||
sp_tensor1.indices,
|
||||
[[0, 0], [0, 3], [0, 5], [1, 0], [1, 2], [1, 3], [1, 5]])
|
||||
self.assertAllEqual(sp_tensor1.values.eval(),
|
||||
[20, 23, 25, 30, 32, 33, 35])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 6])
|
||||
self.assertAllEqual(sp_tensor1.values, [20, 23, 25, 30, 32, 33, 35])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape, [2, 6])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSliceMatrixUnevenCols(self):
|
||||
@ -107,38 +106,38 @@ class SparseSliceOpTest(test.TestCase):
|
||||
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 5], [5, 2])
|
||||
|
||||
self.assertAllEqual(
|
||||
sp_tensor0.indices.eval(),
|
||||
sp_tensor0.indices,
|
||||
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2], [4, 1]])
|
||||
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 11, 20, 30, 32, 41])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 3])
|
||||
self.assertAllEqual(sp_tensor1.indices.eval(),
|
||||
self.assertAllEqual(sp_tensor0.values, [0, 2, 11, 20, 30, 32, 41])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape, [5, 3])
|
||||
self.assertAllEqual(sp_tensor1.indices,
|
||||
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
|
||||
self.assertAllEqual(sp_tensor1.values.eval(), [4, 13, 14, 23, 33, 44])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
|
||||
self.assertAllEqual(sp_tensor2.indices.eval(),
|
||||
self.assertAllEqual(sp_tensor1.values, [4, 13, 14, 23, 33, 44])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape, [5, 2])
|
||||
self.assertAllEqual(sp_tensor2.indices,
|
||||
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
|
||||
self.assertAllEqual(sp_tensor2.values.eval(), [5, 16, 25, 35, 46])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
|
||||
self.assertAllEqual(sp_tensor2.values, [5, 16, 25, 35, 46])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape, [5, 2])
|
||||
|
||||
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 2])
|
||||
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
|
||||
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 2])
|
||||
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 6], [5, 2])
|
||||
self.assertAllEqual(sp_tensor0.indices.eval(),
|
||||
self.assertAllEqual(sp_tensor0.indices,
|
||||
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
|
||||
self.assertAllEqual(sp_tensor0.values.eval(), [0, 11, 20, 30, 41])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 2])
|
||||
self.assertAllEqual(sp_tensor1.indices.eval(),
|
||||
self.assertAllEqual(sp_tensor0.values, [0, 11, 20, 30, 41])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape, [5, 2])
|
||||
self.assertAllEqual(sp_tensor1.indices,
|
||||
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
|
||||
self.assertAllEqual(sp_tensor1.values.eval(), [2, 13, 23, 32, 33])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
|
||||
self.assertAllEqual(sp_tensor2.indices.eval(),
|
||||
self.assertAllEqual(sp_tensor1.values, [2, 13, 23, 32, 33])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape, [5, 2])
|
||||
self.assertAllEqual(sp_tensor2.indices,
|
||||
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
|
||||
self.assertAllEqual(sp_tensor2.values.eval(), [4, 5, 14, 25, 35, 44])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
|
||||
self.assertAllEqual(sp_tensor3.indices.eval(), [[1, 0], [4, 0]])
|
||||
self.assertAllEqual(sp_tensor3.values.eval(), [16, 46])
|
||||
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [5, 1])
|
||||
self.assertAllEqual(sp_tensor2.values, [4, 5, 14, 25, 35, 44])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape, [5, 2])
|
||||
self.assertAllEqual(sp_tensor3.indices, [[1, 0], [4, 0]])
|
||||
self.assertAllEqual(sp_tensor3.values, [16, 46])
|
||||
self.assertAllEqual(sp_tensor3.dense_shape, [5, 1])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSliceMatrixUnevenRows(self):
|
||||
@ -146,35 +145,32 @@ class SparseSliceOpTest(test.TestCase):
|
||||
sp_input = self._SparseTensor_5x7()
|
||||
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [3, 7])
|
||||
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [3, 0], [3, 7])
|
||||
self.assertAllEqual(sp_tensor0.indices.eval(),
|
||||
self.assertAllEqual(sp_tensor0.indices,
|
||||
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
|
||||
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
|
||||
self.assertAllEqual(sp_tensor0.values.eval(),
|
||||
self.assertAllEqual(sp_tensor0.values,
|
||||
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [3, 7])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape, [3, 7])
|
||||
self.assertAllEqual(
|
||||
sp_tensor1.indices.eval(),
|
||||
sp_tensor1.indices,
|
||||
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4], [1, 6]])
|
||||
self.assertAllEqual(sp_tensor1.values.eval(),
|
||||
[30, 32, 33, 35, 41, 44, 46])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
|
||||
self.assertAllEqual(sp_tensor1.values, [30, 32, 33, 35, 41, 44, 46])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape, [2, 7])
|
||||
|
||||
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 7])
|
||||
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [2, 7])
|
||||
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [4, 0], [2, 7])
|
||||
self.assertAllEqual(
|
||||
sp_tensor0.indices.eval(),
|
||||
sp_tensor0.indices,
|
||||
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [1, 6]])
|
||||
self.assertAllEqual(sp_tensor0.values.eval(),
|
||||
[0, 2, 4, 5, 11, 13, 14, 16])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 7])
|
||||
self.assertAllEqual(sp_tensor0.values, [0, 2, 4, 5, 11, 13, 14, 16])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape, [2, 7])
|
||||
|
||||
self.assertAllEqual(sp_tensor1.values.eval(),
|
||||
[20, 23, 25, 30, 32, 33, 35])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
|
||||
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 1], [0, 4], [0, 6]])
|
||||
self.assertAllEqual(sp_tensor2.values.eval(), [41, 44, 46])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 7])
|
||||
self.assertAllEqual(sp_tensor1.values, [20, 23, 25, 30, 32, 33, 35])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape, [2, 7])
|
||||
self.assertAllEqual(sp_tensor2.indices, [[0, 1], [0, 4], [0, 6]])
|
||||
self.assertAllEqual(sp_tensor2.values, [41, 44, 46])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape, [1, 7])
|
||||
return
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -185,20 +181,18 @@ class SparseSliceOpTest(test.TestCase):
|
||||
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [1, 0], [1, 6])
|
||||
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [2, 0], [1, 7])
|
||||
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [3, 0], [2, 7])
|
||||
self.assertAllEqual(sp_tensor0.indices.eval(),
|
||||
[[0, 0], [0, 2], [0, 4], [0, 5]])
|
||||
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [1, 6])
|
||||
self.assertAllEqual(sp_tensor1.indices.eval(), [[0, 1], [0, 3], [0, 4]])
|
||||
self.assertAllEqual(sp_tensor1.values.eval(), [11, 13, 14])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [1, 6])
|
||||
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 0], [0, 3], [0, 5]])
|
||||
self.assertAllEqual(sp_tensor2.values.eval(), [20, 23, 25])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 6])
|
||||
self.assertAllEqual(sp_tensor3.indices.eval(),
|
||||
[[0, 0], [0, 2], [0, 3], [0, 5]])
|
||||
self.assertAllEqual(sp_tensor3.values.eval(), [30, 32, 33, 35])
|
||||
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [1, 6])
|
||||
self.assertAllEqual(sp_tensor0.indices, [[0, 0], [0, 2], [0, 4], [0, 5]])
|
||||
self.assertAllEqual(sp_tensor0.values, [0, 2, 4, 5])
|
||||
self.assertAllEqual(sp_tensor0.dense_shape, [1, 6])
|
||||
self.assertAllEqual(sp_tensor1.indices, [[0, 1], [0, 3], [0, 4]])
|
||||
self.assertAllEqual(sp_tensor1.values, [11, 13, 14])
|
||||
self.assertAllEqual(sp_tensor1.dense_shape, [1, 6])
|
||||
self.assertAllEqual(sp_tensor2.indices, [[0, 0], [0, 3], [0, 5]])
|
||||
self.assertAllEqual(sp_tensor2.values, [20, 23, 25])
|
||||
self.assertAllEqual(sp_tensor2.dense_shape, [1, 6])
|
||||
self.assertAllEqual(sp_tensor3.indices, [[0, 0], [0, 2], [0, 3], [0, 5]])
|
||||
self.assertAllEqual(sp_tensor3.values, [30, 32, 33, 35])
|
||||
self.assertAllEqual(sp_tensor3.dense_shape, [1, 6])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSliceColumns(self):
|
||||
@ -208,18 +202,18 @@ class SparseSliceOpTest(test.TestCase):
|
||||
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
|
||||
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 3])
|
||||
|
||||
self.assertAllEqual(sparse_tensor0.indices.eval(),
|
||||
self.assertAllEqual(sparse_tensor0.indices,
|
||||
[[0, 0], [1, 1], [2, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 11, 20, 30])
|
||||
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 2])
|
||||
self.assertAllEqual(sparse_tensor1.indices.eval(),
|
||||
self.assertAllEqual(sparse_tensor0.values, [0, 11, 20, 30])
|
||||
self.assertAllEqual(sparse_tensor0.dense_shape, [4, 2])
|
||||
self.assertAllEqual(sparse_tensor1.indices,
|
||||
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
|
||||
self.assertAllEqual(sparse_tensor1.values.eval(), [2, 13, 23, 32, 33])
|
||||
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 2])
|
||||
self.assertAllEqual(sparse_tensor2.indices.eval(),
|
||||
self.assertAllEqual(sparse_tensor1.values, [2, 13, 23, 32, 33])
|
||||
self.assertAllEqual(sparse_tensor1.dense_shape, [4, 2])
|
||||
self.assertAllEqual(sparse_tensor2.indices,
|
||||
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
|
||||
self.assertAllEqual(sparse_tensor2.values.eval(), [4, 5, 14, 25, 35])
|
||||
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 2])
|
||||
self.assertAllEqual(sparse_tensor2.values, [4, 5, 14, 25, 35])
|
||||
self.assertAllEqual(sparse_tensor2.dense_shape, [4, 2])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testSliceAllColumns(self):
|
||||
@ -231,27 +225,24 @@ class SparseSliceOpTest(test.TestCase):
|
||||
sparse_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 3], [4, 1])
|
||||
sparse_tensor4 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 1])
|
||||
sparse_tensor5 = sparse_ops.sparse_slice(sp_input, [0, 5], [6, 3])
|
||||
self.assertAllEqual(sparse_tensor0.indices.eval(),
|
||||
[[0, 0], [2, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 20, 30])
|
||||
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 1])
|
||||
self.assertAllEqual(sparse_tensor1.indices.eval(), [[1, 0]])
|
||||
self.assertAllEqual(sparse_tensor1.values.eval(), [11])
|
||||
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 1])
|
||||
self.assertAllEqual(sparse_tensor2.indices.eval(), [[0, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor2.values.eval(), [2, 32])
|
||||
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 1])
|
||||
self.assertAllEqual(sparse_tensor3.indices.eval(),
|
||||
[[1, 0], [2, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor3.dense_shape.eval(), [4, 1])
|
||||
self.assertAllEqual(sparse_tensor3.values.eval(), [13, 23, 33])
|
||||
self.assertAllEqual(sparse_tensor4.indices.eval(), [[0, 0], [1, 0]])
|
||||
self.assertAllEqual(sparse_tensor4.values.eval(), [4, 14])
|
||||
self.assertAllEqual(sparse_tensor4.dense_shape.eval(), [4, 1])
|
||||
self.assertAllEqual(sparse_tensor5.indices.eval(),
|
||||
[[0, 0], [2, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor5.values.eval(), [5, 25, 35])
|
||||
self.assertAllEqual(sparse_tensor5.dense_shape.eval(), [4, 1])
|
||||
self.assertAllEqual(sparse_tensor0.indices, [[0, 0], [2, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor0.values, [0, 20, 30])
|
||||
self.assertAllEqual(sparse_tensor0.dense_shape, [4, 1])
|
||||
self.assertAllEqual(sparse_tensor1.indices, [[1, 0]])
|
||||
self.assertAllEqual(sparse_tensor1.values, [11])
|
||||
self.assertAllEqual(sparse_tensor1.dense_shape, [4, 1])
|
||||
self.assertAllEqual(sparse_tensor2.indices, [[0, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor2.values, [2, 32])
|
||||
self.assertAllEqual(sparse_tensor2.dense_shape, [4, 1])
|
||||
self.assertAllEqual(sparse_tensor3.indices, [[1, 0], [2, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor3.dense_shape, [4, 1])
|
||||
self.assertAllEqual(sparse_tensor3.values, [13, 23, 33])
|
||||
self.assertAllEqual(sparse_tensor4.indices, [[0, 0], [1, 0]])
|
||||
self.assertAllEqual(sparse_tensor4.values, [4, 14])
|
||||
self.assertAllEqual(sparse_tensor4.dense_shape, [4, 1])
|
||||
self.assertAllEqual(sparse_tensor5.indices, [[0, 0], [2, 0], [3, 0]])
|
||||
self.assertAllEqual(sparse_tensor5.values, [5, 25, 35])
|
||||
self.assertAllEqual(sparse_tensor5.dense_shape, [4, 1])
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradients(self):
|
||||
|
@ -1646,7 +1646,7 @@ class GradPassThroughTest(test_util.TensorFlowTestCase):
|
||||
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(grads[0].eval(), 6.0)
|
||||
self.assertAllClose(grads[0], 6.0)
|
||||
|
||||
# Verify that variables involved in the wrapped op do not receive gradients.
|
||||
y = custom_gradient.grad_pass_through(lambda v: x * v)(z)
|
||||
|
@ -156,8 +156,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
|
||||
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(expected_remapped_matrix,
|
||||
remapped_matrix.as_tensor().eval())
|
||||
self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
|
||||
|
||||
def test_load_and_remap_output_layer_weight_initializer_dnn_output(self):
|
||||
"""Tests for the output layer initializer in the DNN output case."""
|
||||
@ -190,8 +189,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
|
||||
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(expected_remapped_matrix,
|
||||
remapped_matrix.as_tensor().eval())
|
||||
self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
|
||||
|
||||
def test_initializer_with_oov_only_partition(self):
|
||||
"""Tests for the output layer initializer where one partition is all OOV."""
|
||||
@ -228,8 +226,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
|
||||
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(expected_remapped_matrix,
|
||||
remapped_matrix.as_tensor().eval())
|
||||
self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
|
||||
|
||||
def test_load_and_remap_linear_multiclass_initializer_default_init(self):
|
||||
"""Tests where the zeros_initializer default is used for linear."""
|
||||
@ -264,8 +261,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
|
||||
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(expected_remapped_matrix,
|
||||
remapped_matrix.as_tensor().eval())
|
||||
self.assertAllClose(expected_remapped_matrix, remapped_matrix.as_tensor())
|
||||
|
||||
def test_load_embedding_initializer(self):
|
||||
"""Tests for the load_embedding_initializer wrapper."""
|
||||
@ -299,7 +295,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(expected_remapped_embeddings,
|
||||
remapped_embeddings.as_tensor().eval())
|
||||
remapped_embeddings.as_tensor())
|
||||
|
||||
def test_load_embedding_initializer_large_oov(self):
|
||||
"""Tests for the large OOV case for load_embedding_initializer wrapper."""
|
||||
@ -345,7 +341,7 @@ class LoadAndRemapWrappersTest(test.TestCase):
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(expected_remapped_embeddings,
|
||||
remapped_embeddings.as_tensor().eval())
|
||||
remapped_embeddings.as_tensor())
|
||||
|
||||
def test_load_embedding_initializer_old_row_vocab(self):
|
||||
"""Tests for load_embedding_initializer where we constrain old vocab."""
|
||||
@ -383,7 +379,8 @@ class LoadAndRemapWrappersTest(test.TestCase):
|
||||
with self.cached_session():
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(expected_remapped_embeddings,
|
||||
remapped_embeddings.as_tensor().eval())
|
||||
remapped_embeddings.as_tensor())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
test.main()
|
||||
|
Loading…
Reference in New Issue
Block a user