Remove unnecessary eval() calls

The assertAll* statements already evaluate the arguments.

PiperOrigin-RevId: 319130109
Change-Id: I0034bdfa87a974613561a39e0d4a4223292245c7
This commit is contained in:
Gaurav Jain 2020-06-30 17:15:12 -07:00 committed by TensorFlower Gardener
parent 17991834b6
commit 016bb3fc99
71 changed files with 445 additions and 447 deletions
tensorflow
compiler/tests
examples/adding_an_op
python

View File

@ -268,7 +268,7 @@ class ConcatTest(xla_test.XLATestCase):
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(c.eval(), correct)
self.assertAllEqual(c, correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = self.evaluate(gradients_impl.gradients(c, xs, dc))
@ -281,7 +281,7 @@ class ConcatTest(xla_test.XLATestCase):
with self.test_scope():
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), self.evaluate(concat_tuple_t))
self.assertAllEqual(concat_list_t, self.evaluate(concat_tuple_t))
def testConcatNoScalars(self):
with self.session():

View File

@ -508,7 +508,7 @@ class TensorArrayTest(xla_test.XLATestCase):
return w2_grad.read(2)
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), xla.compile(fn)[0].eval())
self.assertAllEqual(c(12.00), xla.compile(fn)[0])
def fn():
ta = tensor_array_ops.TensorArray(

View File

@ -32,26 +32,26 @@ class ZeroOut1Test(tf.test.TestCase):
def test(self):
with self.cached_session():
result = zero_out_op_1.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
@test_util.run_deprecated_v1
def test_namespace(self):
with self.cached_session():
result = zero_out_op_1.namespace_zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
@test_util.run_deprecated_v1
def test_namespace_call_op_on_op(self):
with self.cached_session():
x = zero_out_op_1.namespace_zero_out([5, 4, 3, 2, 1])
result = zero_out_op_1.namespace_zero_out(x)
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
@test_util.run_deprecated_v1
def test_namespace_nested(self):
with self.cached_session():
result = zero_out_op_1.namespace_nested_zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
def testLoadTwice(self):
zero_out_loaded_again = tf.load_op_library(os.path.join(

View File

@ -33,13 +33,13 @@ class ZeroOut2Test(tf.test.TestCase):
def test(self):
with self.cached_session():
result = zero_out_op_2.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
@test_util.run_deprecated_v1
def test_2d(self):
with self.cached_session():
result = zero_out_op_2.zero_out([[6, 5, 4], [3, 2, 1]])
self.assertAllEqual(result.eval(), [[6, 0, 0], [0, 0, 0]])
self.assertAllEqual(result, [[6, 0, 0], [0, 0, 0]])
@test_util.run_deprecated_v1
def test_grad(self):

View File

@ -30,13 +30,13 @@ class ZeroOut3Test(tf.test.TestCase):
def test(self):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
self.assertAllEqual(result, [5, 0, 0, 0, 0])
@test_util.run_deprecated_v1
def testAttr(self):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3)
self.assertAllEqual(result.eval(), [0, 0, 0, 2, 0])
self.assertAllEqual(result, [0, 0, 0, 2, 0])
@test_util.run_deprecated_v1
def testNegative(self):

View File

@ -115,7 +115,7 @@ class SessionTest(test_util.TensorFlowTestCase):
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
self.assertAllEqual(inp, 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
@ -133,7 +133,7 @@ class SessionTest(test_util.TensorFlowTestCase):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
self.assertAllEqual(inp, 10.0)
def testSessionInterOpThreadPool(self):
config_pb = config_pb2.ConfigProto()
@ -1235,11 +1235,11 @@ class SessionTest(test_util.TensorFlowTestCase):
self.assertEqual(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEqual(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
self.assertAllEqual(c, 5.0)
self.assertAllEqual(d, 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEqual(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
self.assertAllEqual(e, 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
@ -1299,10 +1299,10 @@ class SessionTest(test_util.TensorFlowTestCase):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
self.assertAllEqual([[4.0, 4.0, 4.0]], c)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
self.assertAllEqual([[24.0]], e)
sess.close()
@test_util.run_v1_only('b/120545219')
@ -1549,7 +1549,7 @@ class SessionTest(test_util.TensorFlowTestCase):
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
self.assertAllEqual(c, c_list)
def testStringFeed(self):
with session.Session() as sess:

View File

@ -208,7 +208,7 @@ class CompilationEnabledInGradientTest(test.TestCase, parameterized.TestCase):
ncg.get_attr("_XlaCompile")
# d/dx (x ** 4) = 4 * (x ** 3)
self.assertAllClose([[108]], x_grads.eval())
self.assertAllClose([[108]], x_grads)
@test_util.build_as_function_and_v1_graph
def testCompilationGradientScopeNames(self):

View File

@ -322,7 +322,7 @@ class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
self._observer)
with wrapper as sess:
self.assertAllClose([[3.0], [4.0]], self._s.eval())
self.assertAllClose([[3.0], [4.0]], self._s)
self.assertEqual(1, self._observer["on_run_start_count"])
self.assertEqual(self._s, self._observer["run_fetches"])
self.assertEqual(1, self._observer["on_run_end_count"])

View File

@ -160,7 +160,7 @@ class AllReduceTest(test_util.TensorFlowTestCase):
output_tensors = build_f(input_tensors, un_op)
sum_reduced = math_ops.add_n(output_tensors)
sum_reduced.op.run()
self.assertAllClose(sum_reduced.eval(), self.evaluate(simple_sum))
self.assertAllClose(sum_reduced, self.evaluate(simple_sum))
def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):
start_time = time.time()

View File

@ -65,7 +65,7 @@ class AssignMovingAveragesTest(test.TestCase, parameterized.TestCase):
with distribution.scope(), self.cached_session() as sess:
var, assign = distribution.extended.call_for_each_replica(replica_fn)
variables.global_variables_initializer().run()
self.assertAllClose([10.0, 11.0], var.eval())
self.assertAllClose([10.0, 11.0], var)
sess.run(distribution.experimental_local_results(assign))
# Mean of val across calls to replica_fn().
average_val = [1.0 + 0.5 * (replica_id[0] - 1),
@ -91,12 +91,12 @@ class AssignMovingAveragesTest(test.TestCase, parameterized.TestCase):
with distribution.scope(), self.cached_session() as sess:
var, assign_op = distribution.extended.call_for_each_replica(replica_fn)
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var.eval())
self.assertAllClose([0.0, 0.0], var)
sess.run(distribution.experimental_local_results(assign_op))
# Mean of val across calls to replica_fn().
average_val = [1.0 + 0.5 * (replica_id[0] - 1),
2.0 - 0.5 * (replica_id[0] - 1)]
self.assertAllClose(average_val, var.eval())
self.assertAllClose(average_val, var)
@combinations.generate(all_combinations)
def testCrossDeviceWithoutZeroDebias(self, distribution):
@ -110,7 +110,7 @@ class AssignMovingAveragesTest(test.TestCase, parameterized.TestCase):
var, val, decay, zero_debias=False)
variables.global_variables_initializer().run()
self.assertAllClose([10.0, 11.0], var.eval())
self.assertAllClose([10.0, 11.0], var)
sess.run(assign)
average_val = [1.0, 2.0]
val_weight = 1.0 - 0.25
@ -138,9 +138,9 @@ class AssignMovingAveragesTest(test.TestCase, parameterized.TestCase):
assign = moving_averages.assign_moving_average(var, val, decay)
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var.eval())
self.assertAllClose([0.0, 0.0], var)
sess.run(assign, feed_dict={val: [1.0, 2.0]})
self.assertAllClose([1.0, 2.0], var.eval())
self.assertAllClose([1.0, 2.0], var)
# Also try assign.op.
sess.run(assign.op, feed_dict={val: [10.0, 0.0]})
@ -182,7 +182,7 @@ class AssignMovingAveragesTest(test.TestCase, parameterized.TestCase):
with distribution.scope(), self.cached_session() as sess:
var, assign = distribution.extended.call_for_each_replica(replica_fn)
variables.global_variables_initializer().run()
self.assertAllClose([10.0, 11.0], var.eval())
self.assertAllClose([10.0, 11.0], var)
sess.run(distribution.experimental_local_results(assign))
self.assertAllClose(
[10 * 0.25 + 1. * (1 - 0.25), 11 * 0.25 + 2. * (1 - 0.25)],

View File

@ -1499,7 +1499,7 @@ class BackpropTest(test.TestCase, parameterized.TestCase):
tf_max = max_pooling3d(
tf_aa, pool_size=pool_size, strides=strides, padding='SAME')
tf_da = gradients.gradients(tf_max, [tf_aa])
self.assertAllEqual(da[0], tf_da[0].eval())
self.assertAllEqual(da[0], tf_da[0])
@test_util.run_in_graph_and_eager_modes
def testWatchBadThing(self):

View File

@ -85,7 +85,7 @@ class FunctionGradientsTest(test.TestCase, parameterized.TestCase):
node = f()
grads, = gradients_impl.gradients(node, v)
v.initializer.run()
self.assertAllEqual(grads.eval(), 2.0)
self.assertAllEqual(grads, 2.0)
self.assertEqual(grads.shape, v.shape)
def testSymbolicHigherOrder(self):

View File

@ -280,7 +280,7 @@ class NumericColumnTest(test.TestCase):
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.cached_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
self.assertAllEqual([[20., 110.]], features['price'])
@test_util.run_deprecated_v1
def test_parse_example_with_default_value(self):
@ -303,7 +303,7 @@ class NumericColumnTest(test.TestCase):
features=fc.make_parse_example_spec([price]))
self.assertIn('price', features)
with self.cached_session():
self.assertAllEqual([[20., 110.], [11., 11.]], features['price'].eval())
self.assertAllEqual([[20., 110.], [11., 11.]], features['price'])
def test_normalizer_fn_must_be_callable(self):
with self.assertRaisesRegex(TypeError, 'must be a callable'):
@ -318,7 +318,7 @@ class NumericColumnTest(test.TestCase):
price = fc._numeric_column('price', shape=[2], normalizer_fn=_increment_two)
output = _transform_features({'price': [[1., 2.], [5., 6.]]}, [price])
with self.cached_session():
self.assertAllEqual([[3., 4.], [7., 8.]], output[price].eval())
self.assertAllEqual([[3., 4.], [7., 8.]], output[price])
@test_util.run_deprecated_v1
def test_get_dense_tensor(self):
@ -454,7 +454,7 @@ class BucketizedColumnTest(test.TestCase):
features=fc.make_parse_example_spec([bucketized_price]))
self.assertIn('price', features)
with self.cached_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
self.assertAllEqual([[20., 110.]], features['price'])
@test_util.run_deprecated_v1
def test_transform_feature(self):
@ -751,8 +751,8 @@ class HashedCategoricalColumnTest(test.TestCase):
expected_values = [6, 4, 1]
with self.cached_session():
self.assertEqual(dtypes.int64, output.values.dtype)
self.assertAllEqual(expected_values, output.values.eval())
self.assertAllEqual(wire_tensor.indices.eval(), output.indices.eval())
self.assertAllEqual(expected_values, output.values)
self.assertAllEqual(wire_tensor.indices.eval(), output.indices)
self.assertAllEqual(wire_tensor.dense_shape.eval(),
output.dense_shape.eval())
@ -807,7 +807,7 @@ class HashedCategoricalColumnTest(test.TestCase):
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.cached_session():
self.assertAllEqual(expected_values, output.values.eval())
self.assertAllEqual(expected_values, output.values)
@test_util.run_deprecated_v1
def test_int32_64_is_compatible(self):
@ -822,7 +822,7 @@ class HashedCategoricalColumnTest(test.TestCase):
# Check exact hashed output. If hashing changes this test will break.
expected_values = [3, 7, 5]
with self.cached_session():
self.assertAllEqual(expected_values, output.values.eval())
self.assertAllEqual(expected_values, output.values)
@test_util.run_deprecated_v1
def test_get_sparse_tensors(self):
@ -1022,12 +1022,12 @@ class CrossedColumnTest(test.TestCase):
self.assertIn('price', features)
self.assertIn('wire', features)
with self.cached_session():
self.assertAllEqual([[20., 110.]], features['price'].eval())
self.assertAllEqual([[20., 110.]], features['price'])
wire_sparse = features['wire']
self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices.eval())
self.assertAllEqual([[0, 0], [0, 1]], wire_sparse.indices)
# Use byte constants to pass the open-source test.
self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values.eval())
self.assertAllEqual([1, 2], wire_sparse.dense_shape.eval())
self.assertAllEqual([b'omar', b'stringer'], wire_sparse.values)
self.assertAllEqual([1, 2], wire_sparse.dense_shape)
@test_util.run_deprecated_v1
def test_transform_feature(self):
@ -1672,12 +1672,12 @@ class LinearModelTest(test.TestCase):
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][0])
self.assertAllEqual([[0.]], cols_to_vars[price1][1])
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0])
self.assertAllEqual([[0.]], cols_to_vars[price2][1])
def test_fills_cols_to_output_tensors(self):
# Provide three _DenseColumn's to input_layer: a _NumericColumn, a
@ -2340,12 +2340,12 @@ class _LinearModelTest(test.TestCase):
with _initialized_session():
self.assertEqual([0.], cols_to_vars['bias'][0].eval())
# Partitioning shards the [2, 1] price1 var into 2 [1, 1] Variables.
self.assertAllEqual([[0.]], cols_to_vars[price1][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][1].eval())
self.assertAllEqual([[0.]], cols_to_vars[price1][0])
self.assertAllEqual([[0.]], cols_to_vars[price1][1])
# Partitioning shards the [3, 1] price2 var into a [2, 1] Variable and
# a [1, 1] Variable.
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0].eval())
self.assertAllEqual([[0.]], cols_to_vars[price2][1].eval())
self.assertAllEqual([[0.], [0.]], cols_to_vars[price2][0])
self.assertAllEqual([[0.]], cols_to_vars[price2][1])
def test_dense_collection(self):
price = fc._numeric_column('price')
@ -4560,9 +4560,9 @@ class TransformFeaturesTest(test.TestCase):
[bucketized_price, hashed_sparse])
with _initialized_session():
self.assertIn(bucketized_price.name, transformed[bucketized_price].name)
self.assertAllEqual([[0], [3]], transformed[bucketized_price].eval())
self.assertAllEqual([[0], [3]], transformed[bucketized_price])
self.assertIn(hashed_sparse.name, transformed[hashed_sparse].name)
self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values.eval())
self.assertAllEqual([6, 4, 1], transformed[hashed_sparse].values)
def test_column_order(self):
"""When the column is both dense and sparse, uses sparse tensors."""
@ -5044,7 +5044,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
for v in global_vars:
self.assertIsInstance(v, variables_lib.Variable)
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
if use_safe_embedding_lookup:
@ -5114,7 +5114,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
@test_util.run_deprecated_v1
@ -5211,7 +5211,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, embedding_lookup.eval(
feed_dict={
input_indices: sparse_input.indices,
@ -5275,7 +5275,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
self.assertCountEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(embedding_lookup))
@test_util.run_deprecated_v1
@ -5495,7 +5495,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
self.assertCountEqual(('input_layer/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, trainable_vars[0].eval())
self.assertAllEqual(embedding_values, trainable_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(input_layer))
@test_util.run_deprecated_v1
@ -5555,7 +5555,7 @@ class EmbeddingColumnTest(test.TestCase, parameterized.TestCase):
self.assertCountEqual([],
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(input_layer))
@ -6295,7 +6295,7 @@ class SharedEmbeddingColumnTest(test.TestCase, parameterized.TestCase):
self.assertCountEqual([], tuple([v.name for v in trainable_vars]))
shared_embedding_vars = global_vars
with _initialized_session():
self.assertAllEqual(embedding_values, shared_embedding_vars[0].eval())
self.assertAllEqual(embedding_values, shared_embedding_vars[0])
self.assertAllEqual(expected_lookups, self.evaluate(input_layer))
@test_util.run_deprecated_v1

View File

@ -50,7 +50,7 @@ class AutomaticControlDependenciesTest(test.TestCase):
v.assign(2 * v)
val = v.read_value()
val = c.mark_as_return(val)
self.assertAllEqual(val.eval(), 4.0)
self.assertAllEqual(val, 4.0)
def testNoControlDepsBetweenVariableReads(self):
with context.graph_mode(), self.cached_session():

View File

@ -350,8 +350,8 @@ class FunctionTest(test.TestCase):
do_constant_folding=True)))
with self.session(graph=g, config=cfg):
self.assertAllClose(y.eval(), 6.)
self.assertAllClose(dx.eval(), 2.)
self.assertAllClose(y, 6.)
self.assertAllClose(dx, 2.)
def _testZNoDepOnY(self, use_const_grad_ys):
@ -423,7 +423,7 @@ class FunctionTest(test.TestCase):
with ops.Graph().as_default(), self.cached_session():
z = Foo(constant_op.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
self.assertAllEqual(z, 6.0)
def testAssertOp(self):
@ -538,7 +538,7 @@ class FunctionTest(test.TestCase):
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(z.eval(), 101.)
self.assertAllEqual(z, 101.)
@test_util.run_deprecated_v1
def testResourceVarAsImplicitInput(self):
@ -561,7 +561,7 @@ class FunctionTest(test.TestCase):
with self.session(graph=g):
v.initializer.run()
self.assertAllEqual(expected_val.eval(), self.evaluate(actual_val))
self.assertAllEqual(expected_val, self.evaluate(actual_val))
self.assertAllEqual(expected_shape, self.evaluate(actual_shape))
def testDefineErrors(self):
@ -675,7 +675,7 @@ class FunctionTest(test.TestCase):
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
self.assertAllEqual(z, 25.0)
def testNestedDefinedFunction(self):
@ -691,7 +691,7 @@ class FunctionTest(test.TestCase):
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
self.assertAllEqual(z, 25.0)
def testUnusedFunction(self):
invoked = False
@ -773,8 +773,8 @@ class FunctionTest(test.TestCase):
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(y.eval(), [[12.0]])
self.assertAllEqual(z.eval(), [[1.0]])
self.assertAllEqual(y, [[12.0]])
self.assertAllEqual(z, [[1.0]])
def testCaptureControls(self):
g = ops.Graph()
@ -1418,8 +1418,8 @@ class FunctionOverloadTest(test.TestCase):
y = Sinh(constant_op.constant(0.25, dtypes.float64))
with self.session(graph=g):
self.assertAllClose(x.eval(), np.sinh(0.25))
self.assertAllClose(y.eval(), np.sinh(0.25))
self.assertAllClose(x, np.sinh(0.25))
self.assertAllClose(y, np.sinh(0.25))
def testGradient(self):
@ -1439,7 +1439,7 @@ class FunctionOverloadTest(test.TestCase):
dx, = gradients_impl.gradients(y, x)
with self.session(graph=g):
self.assertAllClose(dx.eval(), 0.25)
self.assertAllClose(dx, 0.25)
def testDocString(self):
@ -1483,7 +1483,7 @@ class FunctionCaptureByValueTest(test.TestCase):
self.assertEqual(0, len(Foo.captured_inputs))
with self.session(graph=g):
self.assertAllEqual(y.eval(), [[12.0]])
self.assertAllEqual(y, [[12.0]])
class UnrollLSTMTest(test.TestCase):

View File

@ -69,7 +69,7 @@ class DenseTest(test.TestCase, parameterized.TestCase):
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
x = core_layers.Dense(1)(v)
variables.global_variables_initializer().run()
self.assertAllEqual(x.eval(), [[0.0]])
self.assertAllEqual(x, [[0.0]])
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def testCall(self):

View File

@ -193,12 +193,12 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
aggregated_update = adam.Adam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
@ -645,12 +645,12 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
aggregated_update = adam.NonFusedAdam().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):

View File

@ -106,14 +106,14 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0, 3.0], var0.eval())
self.assertAllClose([4.0, 5.0, 6.0], var1.eval())
self.assertAllClose([1.0, 2.0, 3.0], var0)
self.assertAllClose([4.0, 5.0, 6.0], var1)
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power)
update.run()
var0_np, m0, v0 = adamax_sparse_update_numpy(
@ -122,8 +122,8 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
var1_np, grads1_np_indices, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testSparseDevicePlacement(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
@ -163,12 +163,12 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
aggregated_update = adamax.Adamax().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
repeated_index_update_var.eval())
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
@ -292,22 +292,22 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], var0)
self.assertAllClose([3.0, 4.0], var1)
beta1_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adamax
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power)
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testSharing(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
@ -332,12 +332,12 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
beta1_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], var0)
self.assertAllClose([3.0, 4.0], var1)
# Run 3 steps of intertwined Adamax1 and Adamax2.
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power)
if t % 2 == 0:
update1.run()
else:
@ -347,8 +347,8 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testSlotsUniqueEager(self):
with context.eager_mode():

View File

@ -99,15 +99,15 @@ class NadamOptimizerTest(test.TestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 1.0, 2.0], var0)
self.assertAllClose([3.0, 3.0, 4.0], var1)
beta1_power, beta2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Nadam
for t in range(3):
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**(t + 1), beta2_power.eval())
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power)
self.assertAllCloseAccordingToType(0.999**(t + 1), beta2_power)
update.run()
mcache = update_m_cache(mcache, t)
@ -117,8 +117,8 @@ class NadamOptimizerTest(test.TestCase):
var1_np, grads1_np, t, m1, v1, mcache, epsilon=sparse_epsilon)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testBasic(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
@ -140,8 +140,8 @@ class NadamOptimizerTest(test.TestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], var0)
self.assertAllClose([3.0, 4.0], var1)
# Run 3 steps of Nadam
for t in range(3):
@ -154,8 +154,8 @@ class NadamOptimizerTest(test.TestCase):
mcache)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, var0)
self.assertAllCloseAccordingToType(var1_np, var1)
def testConstructNAdamWithLR(self):
opt = nadam.Nadam(lr=1.0)

View File

@ -149,7 +149,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
self.assertAllEqual(masked_tensor.get_shape()[leading:],
masked_arr.shape[leading:])
self.assertAllClose(masked_arr, masked_tensor.eval())
self.assertAllClose(masked_arr, masked_tensor)
@test_util.run_deprecated_v1
def testMaskDim1ArrDim2Axis1(self):
@ -201,7 +201,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.cached_session():
self.assertAllClose(numpy_result, tf_result.eval())
self.assertAllClose(numpy_result, tf_result)
@test_util.run_deprecated_v1
def testEmptyInput1D(self):
@ -211,7 +211,7 @@ class BooleanMaskTest(test_util.TensorFlowTestCase):
tf_result = array_ops.boolean_mask(arr, mask)
self.assertAllEqual(numpy_result.shape[1:], tf_result.get_shape()[1:])
with self.cached_session():
self.assertAllClose(numpy_result, tf_result.eval())
self.assertAllClose(numpy_result, tf_result)
@test_util.run_deprecated_v1
def testEmptyOutput(self):
@ -530,7 +530,7 @@ class MeshgridTest(test_util.TensorFlowTestCase):
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.cached_session(use_gpu=use_gpu):
for xx, yy in zip(numpy_out, tf_out):
self.assertAllEqual(xx, yy.eval())
self.assertAllEqual(xx, yy)
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
@ -544,7 +544,7 @@ class MeshgridTest(test_util.TensorFlowTestCase):
with self.cached_session(use_gpu=use_gpu):
tf_out = array_ops.meshgrid(*inputs, indexing=index)
for x_np, x_tf in zip(numpy_out, tf_out):
self.assertAllEqual(x_np, x_tf.eval())
self.assertAllEqual(x_np, x_tf)
@test_util.run_deprecated_v1
def testCompare(self):
@ -1306,9 +1306,9 @@ class SequenceMaskTest(test_util.TensorFlowTestCase):
with self.cached_session():
res = array_ops.sequence_mask(constant_op.constant([[1, 3, 2]]), 5)
self.assertAllEqual(res.get_shape(), [1, 3, 5])
self.assertAllEqual(res.eval(), [[[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]]])
self.assertAllEqual(res, [[[True, False, False, False, False],
[True, True, True, False, False],
[True, True, False, False, False]]])
# test dtype and default maxlen:
res = array_ops.sequence_mask(
@ -1410,7 +1410,7 @@ class InvertPermutationTest(test_util.TensorFlowTestCase):
x = constant_op.constant([3, 4, 0, 2, 1], dtype=dtype)
y = array_ops.invert_permutation(x)
self.assertAllEqual(y.get_shape(), [5])
self.assertAllEqual(y.eval(), [2, 4, 3, 0, 1])
self.assertAllEqual(y, [2, 4, 3, 0, 1])
class UnravelIndexTest(test_util.TensorFlowTestCase):
@ -1424,17 +1424,17 @@ class UnravelIndexTest(test_util.TensorFlowTestCase):
indices_1 = constant_op.constant(1621, dtype=dtype)
dims_1 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
self.assertAllEqual(out_1.eval(), [3, 1, 4, 1])
self.assertAllEqual(out_1, [3, 1, 4, 1])
indices_2 = constant_op.constant([1621], dtype=dtype)
dims_2 = constant_op.constant([6, 7, 8, 9], dtype=dtype)
out_2 = array_ops.unravel_index(indices_2, dims_2)
self.assertAllEqual(out_2.eval(), [[3], [1], [4], [1]])
self.assertAllEqual(out_2, [[3], [1], [4], [1]])
indices_3 = constant_op.constant([22, 41, 37], dtype=dtype)
dims_3 = constant_op.constant([7, 6], dtype=dtype)
out_3 = array_ops.unravel_index(indices_3, dims_3)
self.assertAllEqual(out_3.eval(), [[3, 6, 6], [4, 5, 1]])
self.assertAllEqual(out_3, [[3, 6, 6], [4, 5, 1]])
# Test case for GitHub issue 40204.
def testUnravelIndexZeroDim(self):
@ -1492,7 +1492,7 @@ class SnapshotOpTest(test_util.TensorFlowTestCase):
with self.cached_session(use_gpu=True):
x = constant_op.constant([0, 1, 2, 3], dtype=dtype)
y = gen_array_ops.snapshot(x)
self.assertAllEqual(y.eval(), [0, 1, 2, 3])
self.assertAllEqual(y, [0, 1, 2, 3])
@test_util.run_all_in_graph_and_eager_modes

View File

@ -77,7 +77,7 @@ class ScatterTest(test.TestCase):
ref.batch_scatter_update(ops.IndexedSlices(indices, updates))
else:
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
self.assertAllClose(ref, new)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):

View File

@ -63,7 +63,7 @@ class BatchToSpaceDepthToSpace(test.TestCase, PythonOpImpl):
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.cached_session():
self.assertAllEqual(y1.eval(), y2.eval())
self.assertAllEqual(y1, y2)
class BatchToSpaceDepthToSpaceCpp(BatchToSpaceDepthToSpace, CppOpImpl):

View File

@ -102,11 +102,11 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
[self._feature_0, self._feature_1], buckets)
self.evaluate(summary_op)
self.evaluate(flush_op)
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
self.assertAllClose(self._feature_0_boundaries, buckets[0])
self.assertAllClose(self._feature_1_boundaries, buckets[1])
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
self.assertAllClose(self._feature_0_quantiles, quantiles[0])
self.assertAllClose(self._feature_1_quantiles, quantiles[1])
def testBasicQuantileBucketsSingleResourcesAddFlushed(self):
with self.cached_session():
@ -137,11 +137,11 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
self.evaluate(summary_op_2)
self.evaluate(flush_op)
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
self.assertAllClose(self._feature_0_boundaries, buckets[0])
self.assertAllClose(self._feature_1_boundaries, buckets[1])
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
self.assertAllClose(self._feature_0_quantiles, quantiles[0])
self.assertAllClose(self._feature_1_quantiles, quantiles[1])
def testBasicQuantileBucketsMultipleResources(self):
with self.cached_session() as sess:
@ -171,11 +171,11 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
[self._feature_0, self._feature_1], bucket_0 + bucket_1)
self.evaluate([summary_op_0, summary_op_1])
self.evaluate([flush_op_0, flush_op_1])
self.assertAllClose(self._feature_0_boundaries, bucket_0[0].eval())
self.assertAllClose(self._feature_1_boundaries, bucket_1[0].eval())
self.assertAllClose(self._feature_0_boundaries, bucket_0[0])
self.assertAllClose(self._feature_1_boundaries, bucket_1[0])
self.assertAllClose(self._feature_0_quantiles, quantiles[0].eval())
self.assertAllClose(self._feature_1_quantiles, quantiles[1].eval())
self.assertAllClose(self._feature_0_quantiles, quantiles[0])
self.assertAllClose(self._feature_1_quantiles, quantiles[1])
def testSaveRestoreAfterFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
@ -192,15 +192,15 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
resources.initialize_resources(resources.shared_resources()).run()
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
self.assertAllClose([], buckets[0])
self.assertAllClose([], buckets[1])
summaries = accumulator.add_summaries([self._feature_0, self._feature_1],
self._example_weights)
with ops.control_dependencies([summaries]):
flush = accumulator.flush()
self.evaluate(flush)
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
self.assertAllClose(self._feature_0_boundaries, buckets[0])
self.assertAllClose(self._feature_1_boundaries, buckets[1])
save.save(sess, save_path)
with self.session(graph=ops.Graph()) as sess:
@ -212,8 +212,8 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
save = saver.Saver()
save.restore(sess, save_path)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
self.assertAllClose(self._feature_0_boundaries, buckets[0])
self.assertAllClose(self._feature_1_boundaries, buckets[1])
def testSaveRestoreBeforeFlush(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
@ -233,12 +233,12 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
self._example_weights)
self.evaluate(summaries)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
self.assertAllClose([], buckets[0])
self.assertAllClose([], buckets[1])
save.save(sess, save_path)
self.evaluate(accumulator.flush())
self.assertAllClose(self._feature_0_boundaries, buckets[0].eval())
self.assertAllClose(self._feature_1_boundaries, buckets[1].eval())
self.assertAllClose(self._feature_0_boundaries, buckets[0])
self.assertAllClose(self._feature_1_boundaries, buckets[1])
with self.session(graph=ops.Graph()) as sess:
accumulator = boosted_trees_ops.QuantileAccumulator(
@ -249,8 +249,8 @@ class QuantileOpsTest(test_util.TensorFlowTestCase):
save = saver.Saver()
save.restore(sess, save_path)
buckets = accumulator.get_bucket_boundaries()
self.assertAllClose([], buckets[0].eval())
self.assertAllClose([], buckets[1].eval())
self.assertAllClose([], buckets[0])
self.assertAllClose([], buckets[1])
if __name__ == "__main__":

View File

@ -38,7 +38,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array([1, 2, 3], dtype=dtype)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToString(self):
@ -46,7 +46,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array([b"1", b"2", b"3"])
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToBool(self):
@ -54,7 +54,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array([True, False, True], dtype=np.bool)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToShape(self):
@ -66,7 +66,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeInnerDim(self):
@ -76,7 +76,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeLargerDim(self):
@ -86,7 +86,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeLargerDim2(self):
@ -96,7 +96,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToScalar(self):
@ -104,7 +104,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
x = np.array(1, dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastScalarToNonScalar(self):
@ -113,7 +113,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
v_tf = array_ops.broadcast_to(constant_op.constant(1.0), [2, 3, 4,
1, 1, 1])
v_np = np.broadcast_to(x, [2, 3, 4, 1, 1, 1])
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
@test_util.run_deprecated_v1
def testBroadcastToShapeTypeAndInference(self):
@ -125,7 +125,7 @@ class BroadcastToTest(test_util.TensorFlowTestCase):
constant_op.constant([3, 3], dtype=dtype))
shape = v_tf.get_shape().as_list()
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
self.assertAllEqual(v_tf, v_np)
# check shape inference when shape input is constant
self.assertAllEqual(shape, v_np.shape)

View File

@ -227,7 +227,7 @@ class ClipTest(test.TestCase):
x = array_ops.zeros([3])
b = clip_ops.clip_by_norm(x, 1.)
grad, = gradients_impl.gradients(b, x)
self.assertAllEqual(grad.eval(), [1., 1., 1.])
self.assertAllEqual(grad, [1., 1., 1.])
def testClipByNormBadShape(self):
with self.session(use_gpu=True):

View File

@ -430,11 +430,11 @@ class ZerosTest(test.TestCase):
z = array_ops.zeros([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
self.assertAllEqual(z, np.zeros([2, 3]))
z = array_ops.zeros(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.zeros([2, 3]))
self.assertAllEqual(z, np.zeros([2, 3]))
# Test explicit type control
for dtype in [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
@ -610,11 +610,11 @@ class OnesTest(test.TestCase):
z = array_ops.ones([2, 3])
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
self.assertAllEqual(z, np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d))
self.assertEqual(z.dtype, dtypes_lib.float32)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
self.assertAllEqual(z, np.ones([2, 3]))
# Test explicit type control
for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32,
dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8,
@ -623,11 +623,11 @@ class OnesTest(test.TestCase):
z = array_ops.ones([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
self.assertAllEqual(z, np.ones([2, 3]))
z = array_ops.ones(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
self.assertAllEqual(z.eval(), np.ones([2, 3]))
self.assertAllEqual(z, np.ones([2, 3]))
class OnesLikeTest(test.TestCase):

View File

@ -93,13 +93,13 @@ class AssignOpTest(test.TestCase):
p = variables.VariableV1([1])
a = state_ops.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), self.evaluate(data))
self.assertAllEqual(p, self.evaluate(data))
# Assign to yet another shape
data2 = array_ops.fill([10, 10], 1)
a2 = state_ops.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), self.evaluate(data2))
self.assertAllEqual(p, self.evaluate(data2))
@test_util.run_v1_only("b/120545219")
def testInitRequiredAssignAdd(self):

View File

@ -42,7 +42,7 @@ class DepthToSpaceTest(test.TestCase):
with self.cached_session(use_gpu=False):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
self.assertAllEqual(x_tf, outputs)
# Run this test only if only CPU device is available
if all(x.device_type == "CPU" for x in device_lib.list_local_devices()):
@ -59,13 +59,13 @@ class DepthToSpaceTest(test.TestCase):
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.eval(), outputs)
self.assertAllEqual(x_tf, outputs)
# test NCHW on GPU
input_nchw = test_util.NHWCToNCHW(input_nhwc)
output_nchw = array_ops.depth_to_space(
input_nchw, block_size, data_format="NCHW")
output_nhwc = test_util.NCHWToNHWC(output_nchw)
self.assertAllEqual(output_nhwc.eval(), outputs)
self.assertAllEqual(output_nhwc, outputs)
@test_util.run_deprecated_v1
def testBasic(self):

View File

@ -379,14 +379,14 @@ class MatrixDiagTest(test.TestCase):
mat = np.diag(v)
v_diag = array_ops.matrix_diag(v)
self.assertEqual((3, 3), v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
self.assertAllEqual(v_diag, mat)
# {Sub,Super}diagonals.
for offset in [1, -2, 5]:
mat = np.diag(v, offset)
v_diag = array_ops.matrix_diag(v, k=offset)
self.assertEqual(mat.shape, v_diag.get_shape())
self.assertAllEqual(v_diag.eval(), mat)
self.assertAllEqual(v_diag, mat)
# Diagonal bands.
for align in alignment_list:
@ -394,7 +394,7 @@ class MatrixDiagTest(test.TestCase):
for diags, (vecs, solution) in tests.items():
v_diags = array_ops.matrix_diag(vecs[0], k=diags, align=align)
self.assertEqual(v_diags.get_shape(), solution[0].shape)
self.assertAllEqual(v_diags.eval(), solution[0])
self.assertAllEqual(v_diags, solution[0])
def _testVectorBatch(self, dtype):
with self.cached_session(use_gpu=True):
@ -404,7 +404,7 @@ class MatrixDiagTest(test.TestCase):
[0.0, 0.0, 6.0]]]).astype(dtype)
v_batch_diag = array_ops.matrix_diag(v_batch)
self.assertEqual((2, 3, 3), v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
self.assertAllEqual(v_batch_diag, mat_batch)
# {Sub,Super}diagonals.
for offset in [1, -2, 5]:
@ -414,7 +414,7 @@ class MatrixDiagTest(test.TestCase):
]
mat_batch = np.stack(mats, axis=0)
self.assertEqual(mat_batch.shape, v_batch_diag.get_shape())
self.assertAllEqual(v_batch_diag.eval(), mat_batch)
self.assertAllEqual(v_batch_diag, mat_batch)
# Diagonal bands with padding_value.
for padding_value, align in zip_to_first_list_length([0, 555, -11],
@ -429,7 +429,7 @@ class MatrixDiagTest(test.TestCase):
mask = solution == 0
solution = (solution + padding_value * mask).astype(dtype)
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
self.assertAllEqual(v_diags, solution)
@test_util.run_deprecated_v1
def testVectorBatch(self):
@ -495,7 +495,7 @@ class MatrixDiagTest(test.TestCase):
mask = solution == 0
solution = solution + padding_value * mask
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
self.assertAllEqual(v_diags, solution)
# Giving just num_rows.
for expected, (_, tests) in test_list:
@ -514,7 +514,7 @@ class MatrixDiagTest(test.TestCase):
mask = solution == 0
solution = solution + padding_value * mask
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
self.assertAllEqual(v_diags, solution)
# Giving just num_cols.
for expected, (_, tests) in test_list:
@ -533,7 +533,7 @@ class MatrixDiagTest(test.TestCase):
mask = solution == 0
solution = solution + padding_value * mask
self.assertEqual(v_diags.get_shape(), solution.shape)
self.assertAllEqual(v_diags.eval(), solution)
self.assertAllEqual(v_diags, solution)
@test_util.run_deprecated_v1
def testInvalidShape(self):
@ -600,7 +600,7 @@ class MatrixSetDiagTest(test.TestCase):
output = array_ops.matrix_set_diag(
input_mat, vecs[0], k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
self.assertAllEqual(output, solution)
@test_util.run_deprecated_v1
def testRectangular(self):
@ -629,7 +629,7 @@ class MatrixSetDiagTest(test.TestCase):
output = array_ops.matrix_set_diag(
input_mat, vecs[0], k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
self.assertAllEqual(output, solution)
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
@ -657,7 +657,7 @@ class MatrixSetDiagTest(test.TestCase):
output = array_ops.matrix_set_diag(
input_mat, vecs.astype(dtype), k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
self.assertAllEqual(output, solution)
@test_util.run_deprecated_v1
def testSquareBatch(self):
@ -691,7 +691,7 @@ class MatrixSetDiagTest(test.TestCase):
output = array_ops.matrix_set_diag(
input_mat, vecs, k=diags, align=align)
self.assertEqual(output.get_shape(), solution.shape)
self.assertAllEqual(output.eval(), solution)
self.assertAllEqual(output, solution)
@test_util.run_deprecated_v1
def testInvalidShape(self):
@ -780,13 +780,13 @@ class MatrixDiagPartTest(test.TestCase):
mat = np.diag(v)
mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
self.assertAllEqual(mat_diag, v)
for offset in [-2, 3]:
mat = np.diag(v, offset)
mat_diag = array_ops.matrix_diag_part(mat, k=offset)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag.eval(), v)
self.assertAllEqual(mat_diag, v)
# Diagonal bands.
for align in alignment_list:
@ -795,17 +795,17 @@ class MatrixDiagPartTest(test.TestCase):
solution, _ = pair
mat_diag = array_ops.matrix_diag_part(mat[0], k=diags, align=align)
self.assertEqual(mat_diag.get_shape(), solution[0].shape)
self.assertAllEqual(mat_diag.eval(), solution[0])
self.assertAllEqual(mat_diag, solution[0])
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session(use_gpu=True):
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 5.0]))
self.assertAllEqual(mat_diag, np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag.eval(), np.array([1.0, 4.0]))
self.assertAllEqual(mat_diag, np.array([1.0, 4.0]))
# Diagonal bands.
for align in alignment_list:
@ -815,7 +815,7 @@ class MatrixDiagPartTest(test.TestCase):
mat_diag = array_ops.matrix_diag_part(
mat[0], k=diags, align=align)
self.assertEqual(mat_diag.get_shape(), solution[0].shape)
self.assertAllEqual(mat_diag.eval(), solution[0])
self.assertAllEqual(mat_diag, solution[0])
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
@ -826,7 +826,7 @@ class MatrixDiagPartTest(test.TestCase):
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
self.assertAllEqual(mat_batch_diag, v_batch)
# Diagonal bands with padding_value.
for padding_value, align in zip_to_first_list_length([0, 555, -11],
@ -842,7 +842,7 @@ class MatrixDiagPartTest(test.TestCase):
mask = solution == 0
solution = (solution + padding_value * mask).astype(dtype)
self.assertEqual(mat_batch_diag.get_shape(), solution.shape)
self.assertAllEqual(mat_batch_diag.eval(), solution)
self.assertAllEqual(mat_batch_diag, solution)
@test_util.run_deprecated_v1
def testSquareBatch(self):
@ -861,7 +861,7 @@ class MatrixDiagPartTest(test.TestCase):
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag.eval(), v_batch)
self.assertAllEqual(mat_batch_diag, v_batch)
# Diagonal bands with padding_value and align.
for padding_value, align in zip_to_first_list_length([0, 555, -11],
@ -874,7 +874,7 @@ class MatrixDiagPartTest(test.TestCase):
mask = solution == 0
solution = solution + padding_value * mask
self.assertEqual(mat_batch_diag.get_shape(), solution.shape)
self.assertAllEqual(mat_batch_diag.eval(), solution)
self.assertAllEqual(mat_batch_diag, solution)
@test_util.run_deprecated_v1
def testUnknownShape(self):

View File

@ -50,7 +50,7 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
p = [0.2, 0.8]
dist = categorical.Categorical(probs=p)
with self.cached_session():
self.assertAllClose(p, dist.probs.eval())
self.assertAllClose(p, dist.probs)
self.assertAllEqual([2], dist.logits.get_shape())
@test_util.run_deprecated_v1
@ -70,9 +70,9 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
for batch_shape in ([], [1], [2, 3, 4]):
dist = make_categorical(batch_shape, 10)
self.assertAllEqual(batch_shape, dist.batch_shape)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape_tensor())
self.assertEqual(10, dist.event_size.eval())
# event_size is available as a constant because the shape is
# known at graph build time.
@ -83,9 +83,9 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
batch_shape, constant_op.constant(
10, dtype=dtypes.int32))
self.assertAllEqual(len(batch_shape), dist.batch_shape.ndims)
self.assertAllEqual(batch_shape, dist.batch_shape_tensor().eval())
self.assertAllEqual(batch_shape, dist.batch_shape_tensor())
self.assertAllEqual([], dist.event_shape)
self.assertAllEqual([], dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.event_shape_tensor())
self.assertEqual(10, dist.event_size.eval())
def testDtype(self):
@ -202,7 +202,7 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
cdf_op = dist.cdf(event)
with self.cached_session():
self.assertAllClose(cdf_op.eval(), expected_cdf)
self.assertAllClose(cdf_op, expected_cdf)
@test_util.run_deprecated_v1
def testCDFNoBatch(self):

View File

@ -43,7 +43,7 @@ class DirichletMultinomialTest(test.TestCase):
alpha = np.random.rand(3)
dist = ds.DirichletMultinomial(1., alpha)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@ -54,7 +54,7 @@ class DirichletMultinomialTest(test.TestCase):
n = [[3., 2], [4, 5], [6, 7]]
dist = ds.DirichletMultinomial(n, alpha)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@ -65,7 +65,7 @@ class DirichletMultinomialTest(test.TestCase):
with self.cached_session():
dist = ds.DirichletMultinomial(n, alpha)
self.assertEqual([1, 1], dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval())
self.assertAllClose(n, dist.total_count)
@test_util.run_deprecated_v1
def testAlphaProperty(self):
@ -73,7 +73,7 @@ class DirichletMultinomialTest(test.TestCase):
with self.cached_session():
dist = ds.DirichletMultinomial(1, alpha)
self.assertEqual([1, 3], dist.concentration.get_shape())
self.assertAllClose(alpha, dist.concentration.eval())
self.assertAllClose(alpha, dist.concentration)
@test_util.run_deprecated_v1
def testPmfNandCountsAgree(self):

View File

@ -40,7 +40,7 @@ class MultinomialTest(test.TestCase):
p = [.1, .3, .6]
dist = multinomial.Multinomial(total_count=1., probs=p)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
@ -51,7 +51,7 @@ class MultinomialTest(test.TestCase):
n = [[3., 2], [4, 5], [6, 7]]
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
@ -62,7 +62,7 @@ class MultinomialTest(test.TestCase):
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual((2, 1), dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval())
self.assertAllClose(n, dist.total_count)
@test_util.run_v1_only("b/120545219")
def testP(self):
@ -71,7 +71,7 @@ class MultinomialTest(test.TestCase):
dist = multinomial.Multinomial(total_count=3., probs=p)
self.assertEqual((1, 3), dist.probs.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.probs.eval())
self.assertAllClose(p, dist.probs)
@test_util.run_v1_only("b/120545219")
def testLogits(self):
@ -81,8 +81,8 @@ class MultinomialTest(test.TestCase):
multinom = multinomial.Multinomial(total_count=3., logits=logits)
self.assertEqual((1, 3), multinom.probs.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(p, multinom.probs.eval())
self.assertAllClose(logits, multinom.logits.eval())
self.assertAllClose(p, multinom.probs)
self.assertAllClose(logits, multinom.logits)
@test_util.run_v1_only("b/120545219")
def testPmfUnderflow(self):
@ -172,7 +172,7 @@ class MultinomialTest(test.TestCase):
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertAllClose(pmf, [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
@test_util.run_v1_only("b/120545219")
@ -181,7 +181,7 @@ class MultinomialTest(test.TestCase):
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertAllClose(pmf, [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
@ -213,7 +213,7 @@ class MultinomialTest(test.TestCase):
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval())
self.assertAllClose(expected_means, dist.mean())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovariance(self):
@ -225,7 +225,7 @@ class MultinomialTest(test.TestCase):
[-1 / 10, 4 / 5, -7 / 10],
[-7 / 20, -7 / 10, 21 / 20]]
self.assertEqual((3, 3), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
self.assertAllClose(expected_covariances, dist.covariance())
@test_util.run_v1_only("b/120545219")
def testMultinomialCovarianceBatch(self):
@ -240,7 +240,7 @@ class MultinomialTest(test.TestCase):
# Shape [4, 2, 2, 2]
expected_covariances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
self.assertAllClose(expected_covariances, dist.covariance())
def testCovarianceMultidimensional(self):
# Shape [3, 5, 4]

View File

@ -274,7 +274,7 @@ class EmbeddingLookupTest(test.TestCase):
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding.eval(), [[1.0]])
self.assertAllEqual(embedding, [[1.0]])
@test_util.run_deprecated_v1
def testMaxNormNontrivial(self):
@ -288,7 +288,7 @@ class EmbeddingLookupTest(test.TestCase):
norms = math_ops.sqrt(
math_ops.reduce_sum(embeddings * embeddings, axis=1))
normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllEqual(embedding.eval(), 2 * self.evaluate(normalized))
self.assertAllEqual(embedding, 2 * self.evaluate(normalized))
@test_util.run_deprecated_v1
def testSimpleShardedPartitionedVariable(self):
@ -557,7 +557,7 @@ class EmbeddingLookupTest(test.TestCase):
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids).eval()
self.assertAllEqual(simple, array_ops.gather(params, ids).eval())
self.assertAllEqual(simple, array_ops.gather(params, ids))
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
@ -591,7 +591,7 @@ class EmbeddingLookupTest(test.TestCase):
# vectorized square root algorithm for doubles. These different
# implementations of sqrt are not guaranteed to produce exactly the
# same results. Therefore, an exact comparison cannot be made.
self.assertAllClose(simple, array_ops.gather(params_norm, ids).eval())
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
@ -627,7 +627,7 @@ class EmbeddingLookupTest(test.TestCase):
# Compare nonsharded to gather.
simple = embedding_ops._embedding_lookup_and_transform(
params, ids, max_norm=l2_norm, transform_fn=transform).eval()
self.assertAllClose(simple, array_ops.gather(params_norm, ids).eval())
self.assertAllClose(simple, array_ops.gather(params_norm, ids))
# Run a few different sharded versions.
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)

View File

@ -275,7 +275,7 @@ class GatherNdTest(test.TestCase):
expected_grads = np.array([[3, 4], [1, 2]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
@test_util.run_deprecated_v1
def testGradientsRank3Elements(self):
@ -360,7 +360,7 @@ class GatherNdTest(test.TestCase):
dtype=np.float64)
with self.session(use_gpu=True):
self.assertIndexedSlices(grads)
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads).eval())
self.assertAllEqual(expected_grads, ops.convert_to_tensor(grads))
@test_util.run_v1_only("RefVariable is not supported in v2")
def testGatherNdRefVariable(self):

View File

@ -271,17 +271,17 @@ class GatherTest(test.TestCase, parameterized.TestCase):
params = np.zeros((7, 0, 0), dtype=dtype.as_numpy_dtype)
indices = np.array([3, 4], dtype=itype)
gather = array_ops.gather(params, indices, axis=0)
self.assertAllEqual(gather.eval(), np.zeros((2, 0, 0)))
self.assertAllEqual(gather, np.zeros((2, 0, 0)))
# Middle axis gather.
params = np.zeros((0, 7, 0), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=1)
self.assertAllEqual(gather.eval(), np.zeros((0, 2, 0)))
self.assertAllEqual(gather, np.zeros((0, 2, 0)))
# Trailing axis gather.
params = np.zeros((0, 0, 7), dtype=dtype.as_numpy_dtype)
gather = array_ops.gather(params, indices, axis=2)
self.assertAllEqual(gather.eval(), np.zeros((0, 0, 2)))
self.assertAllEqual(gather, np.zeros((0, 0, 2)))
@parameterized.parameters([
# batch_dims=0 (equivalent to tf.gather)

View File

@ -117,7 +117,7 @@ class ConstantInitializersTest(test.TestCase):
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.zeros_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
self.assertAllEqual(x, np.zeros(shape))
@test_util.run_deprecated_v1
def testOnesInitializer(self):
@ -126,7 +126,7 @@ class ConstantInitializersTest(test.TestCase):
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
self.assertAllEqual(x, np.ones(shape))
@test_util.run_deprecated_v1
def testConstantZeroInitializer(self):
@ -135,7 +135,7 @@ class ConstantInitializersTest(test.TestCase):
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
self.assertAllEqual(x, np.zeros(shape))
@test_util.run_deprecated_v1
def testConstantOneInitializer(self):
@ -144,7 +144,7 @@ class ConstantInitializersTest(test.TestCase):
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
self.assertAllEqual(x, np.ones(shape))
@test_util.run_deprecated_v1
def testConstantIntInitializer(self):
@ -157,7 +157,7 @@ class ConstantInitializersTest(test.TestCase):
initializer=init_ops.constant_initializer(7))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))
self.assertAllEqual(x, 7 * np.ones(shape, dtype=np.int32))
@test_util.run_deprecated_v1
def testConstantTupleInitializer(self):
@ -170,7 +170,7 @@ class ConstantInitializersTest(test.TestCase):
initializer=init_ops.constant_initializer((10, 20, 30)))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), [10, 20, 30])
self.assertAllEqual(x, [10, 20, 30])
def _testNDimConstantInitializer(self, name, value, shape, expected):
with self.cached_session(use_gpu=True):
@ -482,7 +482,7 @@ class RangeTest(test.TestCase):
@test_util.run_deprecated_v1
def testLimitOnly(self):
with self.session(use_gpu=True):
self.assertAllEqual(np.arange(5), math_ops.range(5).eval())
self.assertAllEqual(np.arange(5), math_ops.range(5))
def testEmpty(self):
for start in 0, 5:
@ -1348,7 +1348,7 @@ class IdentityInitializerTest(test.TestCase):
"foo", partitioner=partitioner, initializer=init):
v = array_ops.identity(variable_scope.get_variable("bar", shape=shape))
variables.global_variables_initializer().run()
self.assertAllClose(v.eval(), np.eye(*shape))
self.assertAllClose(v, np.eye(*shape))
if __name__ == "__main__":

View File

@ -37,35 +37,35 @@ class InplaceOpsTest(test_util.TensorFlowTestCase):
with self.session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] = 1
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.ones([1, 3], dtype) * 2)
y[-1, :] = 2
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] = 7
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
@test_util.run_deprecated_v1
def testBasicUpdateBool(self):
with self.session(use_gpu=True):
x = array_ops.ones([7, 3], dtypes.bool)
y = np.ones([7, 3], dtypes.bool.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [3], array_ops.ones([1, 3],
dtypes.bool))
y[3, :] = True
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, [-1],
array_ops.zeros([1, 3], dtypes.bool))
y[-1, :] = False
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_update(x, 5, array_ops.zeros([3], dtypes.bool))
y[5, :] = False
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
@test_util.run_deprecated_v1
def testBasicAdd(self):
@ -73,19 +73,19 @@ class InplaceOpsTest(test_util.TensorFlowTestCase):
with self.cached_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = array_ops.inplace_add(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] += 1
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] += 2
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] += 7
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_add(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] += 99
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
@test_util.run_deprecated_v1
def testBasicSub(self):
@ -93,19 +93,19 @@ class InplaceOpsTest(test_util.TensorFlowTestCase):
with self.cached_session(use_gpu=True):
x = array_ops.ones([7, 3], dtype)
y = np.ones([7, 3], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, [3], array_ops.ones([1, 3], dtype))
y[3, :] -= 1
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, [-1], array_ops.ones([1, 3], dtype) * 2)
y[-1, :] -= 2
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, 5, array_ops.ones([3], dtype) * 7)
y[5, :] -= 7
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = inplace_ops.inplace_sub(x, None, array_ops.ones([7, 3], dtype) * 99)
y[:, :] -= 99
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
@test_util.run_deprecated_v1
def testRandom(self):
@ -126,7 +126,7 @@ class InplaceOpsTest(test_util.TensorFlowTestCase):
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx, :] -= val
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
@test_util.run_deprecated_v1
def testRandom1D(self):
@ -147,7 +147,7 @@ class InplaceOpsTest(test_util.TensorFlowTestCase):
elif op == 2:
x = inplace_ops.inplace_sub(x, idx, val)
y[idx] -= val
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
def testAlias(self):
with self.session(use_gpu=True) as sess:
@ -214,11 +214,11 @@ class InplaceOpsTest(test_util.TensorFlowTestCase):
with self.cached_session(use_gpu=True):
x = array_ops.zeros([7, 0], dtype)
y = np.zeros([7, 0], dtype.as_numpy_dtype)
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = op_fn(x, [3], array_ops.ones([1, 0], dtype))
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
x = op_fn(x, None, array_ops.ones([1, 0], dtype))
self.assertAllClose(x.eval(), y)
self.assertAllClose(x, y)
if __name__ == "__main__":

View File

@ -81,7 +81,7 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorDiag)
self.assertAllClose([[3., 0.], [0., 3.]], op.to_dense().eval())
self.assertAllClose([[3., 0.], [0., 3.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
@ -104,7 +104,7 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertTrue(isinstance(op, linalg_lib.LinearOperatorDiag))
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense().eval())
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# Adding positive definite operators produces positive def.
self.assertTrue(op.is_positive_definite)
# Real diagonal ==> self-adjoint.
@ -128,7 +128,7 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorLowerTriangular)
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense().eval())
self.assertAllClose([[6., 0.], [0., 6.]], op.to_dense())
# The diag operators will be self-adjoint (because real and diagonal).
# The TriL operator has the self-adjoint hint set.
@ -151,7 +151,7 @@ class LinearOperatorAdditionCorrectnessTest(test.TestCase):
self.assertEqual(1, len(op_sum))
op = op_sum[0]
self.assertIsInstance(op, linalg_lib.LinearOperatorFullMatrix)
self.assertAllClose([[5., -1.], [0.5, 5.]], op.to_dense().eval())
self.assertAllClose([[5., -1.], [0.5, 5.]], op.to_dense())
self.assertEqual("my_operator", op.name)
def test_incompatible_domain_dimensions_raises(self):
@ -241,10 +241,10 @@ class LinearOperatorOrderOfAdditionTest(test.TestCase):
for op in op_sum:
if isinstance(op, linalg.LinearOperatorDiag):
found_diag = True
self.assertAllClose([[3.]], op.to_dense().eval())
self.assertAllClose([[3.]], op.to_dense())
if isinstance(op, linalg.LinearOperatorLowerTriangular):
found_tril = True
self.assertAllClose([[5.]], op.to_dense().eval())
self.assertAllClose([[5.]], op.to_dense())
self.assertTrue(found_diag and found_tril)
def test_intermediate_tier_is_not_skipped(self):
@ -390,7 +390,7 @@ class AddAndReturnTriLTest(test.TestCase):
self.assertIsInstance(operator, linalg.LinearOperatorLowerTriangular)
with self.cached_session():
self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense().eval())
self.assertAllClose([[11., 0.], [30., 2.]], operator.to_dense())
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
@ -413,7 +413,7 @@ class AddAndReturnMatrixTest(test.TestCase):
self.assertIsInstance(operator, linalg.LinearOperatorFullMatrix)
with self.cached_session():
self.assertAllClose([[0., 0.], [0., 5.]], operator.to_dense().eval())
self.assertAllClose([[0., 0.], [0., 5.]], operator.to_dense())
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)

View File

@ -203,7 +203,7 @@ class NonSquareLinearOperatorCompositionTest(
]
operator = linalg.LinearOperatorComposition(operators)
with self.cached_session():
self.assertAllEqual((2, 3, 5), operator.shape_tensor().eval())
self.assertAllEqual((2, 3, 5), operator.shape_tensor())
@test_util.run_deprecated_v1
def test_shape_tensors_when_only_dynamically_available(self):

View File

@ -52,7 +52,7 @@ class ShapeTest(test_lib.TestCase):
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
self.assertAllClose(batch_identity, self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):

View File

@ -1443,14 +1443,14 @@ class DenseHashTableOpTest(test.TestCase):
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
self.assertAllEqual(0, table.size())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(4, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
@ -1470,7 +1470,7 @@ class DenseHashTableOpTest(test.TestCase):
table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(2, table.size())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
@ -1478,12 +1478,12 @@ class DenseHashTableOpTest(test.TestCase):
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, -1, 2, 3], output.eval())
self.assertAllEqual([-1, 0, -1, 2, 3], output)
@test_util.run_v1_only("Saver V1 only")
def testSaveRestoreOnlyTable(self):
@ -1508,14 +1508,14 @@ class DenseHashTableOpTest(test.TestCase):
save = saver.Saver([table])
self.assertAllEqual(0, table.size().eval())
self.assertAllEqual(0, table.size())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(4, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([12, 15], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
@ -1535,7 +1535,7 @@ class DenseHashTableOpTest(test.TestCase):
table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(2, table.size())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver([table])
@ -1543,12 +1543,12 @@ class DenseHashTableOpTest(test.TestCase):
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, -1, 2, 3], output.eval())
self.assertAllEqual([-1, 0, -1, 2, 3], output)
@test_util.run_in_graph_and_eager_modes
def testObjectSaveRestore(self):
@ -1633,14 +1633,14 @@ class DenseHashTableOpTest(test.TestCase):
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
self.assertAllEqual(0, table.size())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(4, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [16, 17]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
@ -1663,7 +1663,7 @@ class DenseHashTableOpTest(test.TestCase):
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([[21, 22], [23, 24]], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(2, table.size())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
@ -1671,7 +1671,7 @@ class DenseHashTableOpTest(test.TestCase):
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
@ -1704,14 +1704,14 @@ class DenseHashTableOpTest(test.TestCase):
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
self.assertAllEqual(0, table.size())
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(4, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
keys2 = constant_op.constant([[12, 13], [15, 16]], dtypes.int64)
table.remove(keys2).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
@ -1734,7 +1734,7 @@ class DenseHashTableOpTest(test.TestCase):
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([3, 4], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(2, table.size())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
@ -1742,13 +1742,13 @@ class DenseHashTableOpTest(test.TestCase):
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(3, table.size())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([0, 1, -1, 3, -1], output.eval())
self.assertAllEqual([0, 1, -1, 3, -1], output)
def testReprobe(self):
with self.cached_session():
@ -3006,22 +3006,22 @@ class MutableHashTableOpTest(test.TestCase):
# Populate the table in the first session
with session1:
self.assertAllEqual(0, table.size().eval())
self.assertAllEqual(0, table.size())
keys = constant_op.constant([11, 12], dtypes.int64)
values = constant_op.constant(["a", "b"])
table.insert(keys, values).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(2, table.size())
output = table.lookup(constant_op.constant([11, 12, 13], dtypes.int64))
self.assertAllEqual([b"a", b"b", b"-"], output.eval())
self.assertAllEqual([b"a", b"b", b"-"], output)
# Verify that we can access the shared data from the second session
with session2:
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(2, table.size())
output = table.lookup(constant_op.constant([10, 11, 12], dtypes.int64))
self.assertAllEqual([b"-", b"a", b"b"], output.eval())
self.assertAllEqual([b"-", b"a", b"b"], output)
def testMutableHashTableOfTensors(self):
with self.cached_session():

View File

@ -840,7 +840,7 @@ class HingeLossTest(test.TestCase):
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = losses.hinge_loss(labels, logits)
self.assertAllClose(loss.eval(), 0.0, atol=1e-3)
self.assertAllClose(loss, 0.0, atol=1e-3)
@test_util.run_deprecated_v1
def testSomeInsideMargin(self):
@ -850,7 +850,7 @@ class HingeLossTest(test.TestCase):
loss = losses.hinge_loss(labels, logits)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), 0.175, atol=1e-3)
self.assertAllClose(loss, 0.175, atol=1e-3)
@test_util.run_deprecated_v1
def testSomeMisclassified(self):
@ -860,7 +860,7 @@ class HingeLossTest(test.TestCase):
loss = losses.hinge_loss(labels, logits)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(loss.eval(), 0.875, atol=1e-3)
self.assertAllClose(loss, 0.875, atol=1e-3)
class HuberLossTest(test.TestCase):
@ -878,8 +878,8 @@ class HuberLossTest(test.TestCase):
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([1.0, -1.0, 0.0, 0.5])
loss = losses.huber_loss(labels, predictions)
self.assertAllClose(loss.eval(),
0.5 * (0.25 + 0.16 + 1.0 + 0.25) / 4., atol=1e-5)
self.assertAllClose(
loss, 0.5 * (0.25 + 0.16 + 1.0 + 0.25) / 4., atol=1e-5)
@test_util.run_deprecated_v1
def testAllLinear(self):
@ -887,8 +887,7 @@ class HuberLossTest(test.TestCase):
predictions = constant_op.constant([1.5, -1.4, -1.0, 0.0])
labels = constant_op.constant([0.0, 1.0, 0.0, 1.5])
loss = losses.huber_loss(labels, predictions)
self.assertAllClose(loss.eval(),
(1.5 + 2.4 + 1.0 + 1.5) / 4. - 0.5, atol=1e-5)
self.assertAllClose(loss, (1.5 + 2.4 + 1.0 + 1.5) / 4. - 0.5, atol=1e-5)
@test_util.run_deprecated_v1
def testMixedQuadraticLinear(self):
@ -901,7 +900,7 @@ class HuberLossTest(test.TestCase):
quadratic = 0.5 * (0.25 + 0.16 + 1.0 + 0.25) / 4.
linear = (1.5 + 2.4 + 1.0 + 1.5) / 4. - 0.5
expected_loss = (quadratic + linear) / 2.
self.assertAllClose(loss.eval(), expected_loss, atol=1e-5)
self.assertAllClose(loss, expected_loss, atol=1e-5)
def testAllQuadraticDelta(self):
with self.cached_session():

View File

@ -44,7 +44,7 @@ class RollTest(test_util.TensorFlowTestCase):
expected_roll = np.roll(np_input, shift, axis)
with self.cached_session(use_gpu=True):
roll = manip_ops.roll(np_input, shift, axis)
self.assertAllEqual(roll.eval(), expected_roll)
self.assertAllEqual(roll, expected_roll)
def _testGradient(self, np_input, shift, axis):
with self.cached_session(use_gpu=True):

View File

@ -1775,8 +1775,8 @@ class PrecisionRecallThresholdsTest(test.TestCase):
initial_rec = rec.eval()
for _ in range(10):
self.evaluate([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
self.assertAllClose(initial_prec, prec)
self.assertAllClose(initial_rec, rec)
# TODO(nsilberman): fix tests (passing but incorrect).
@test_util.run_deprecated_v1
@ -3852,7 +3852,7 @@ class MeanIOUTest(test.TestCase):
with self.cached_session():
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertAllEqual([[0, 0], [40, 0]], update_op)
self.assertEqual(0., miou.eval())
@test_util.run_deprecated_v1
@ -3884,7 +3884,7 @@ class MeanIOUTest(test.TestCase):
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
self.assertAllEqual([[2, 0], [2, 4]], update_op)
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
@ -3904,7 +3904,7 @@ class MeanIOUTest(test.TestCase):
with self.cached_session():
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op)
self.assertAlmostEqual(
1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 / (0 + 5 + 0)),
miou.eval())
@ -3917,7 +3917,7 @@ class MeanIOUTest(test.TestCase):
with self.cached_session():
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAllEqual([[1, 0], [0, 0]], update_op)
self.assertAlmostEqual(1, miou.eval())
@test_util.run_deprecated_v1
@ -3936,7 +3936,7 @@ class MeanIOUTest(test.TestCase):
with self.cached_session():
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op)
self.assertAlmostEqual(
1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)), miou.eval())
@ -4151,7 +4151,7 @@ class MeanPerClassAccuracyTest(test.TestCase):
mean_accuracy, update_op = metrics.mean_per_class_accuracy(
labels, predictions, num_classes)
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual([0.0, 0.0], update_op.eval())
self.assertAllEqual([0.0, 0.0], update_op)
self.assertEqual(0., mean_accuracy.eval())
@test_util.run_deprecated_v1
@ -4172,7 +4172,7 @@ class MeanPerClassAccuracyTest(test.TestCase):
labels, predictions, num_classes, weights=weights)
self.evaluate(variables.local_variables_initializer())
desired_accuracy = np.array([2. / 2., 4. / 6.], dtype=np.float32)
self.assertAllEqual(desired_accuracy, update_op.eval())
self.assertAllEqual(desired_accuracy, update_op)
desired_mean_accuracy = np.mean(desired_accuracy)
self.assertAlmostEqual(desired_mean_accuracy, mean_accuracy.eval())
@ -4205,9 +4205,9 @@ class FalseNegativesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(3., tn_update_op.eval())
self.assertAllClose(3., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(3., tn_update_op)
self.assertAllClose(3., tn)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4225,9 +4225,9 @@ class FalseNegativesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(5., tn_update_op.eval())
self.assertAllClose(5., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(5., tn_update_op)
self.assertAllClose(5., tn)
class FalseNegativesAtThresholdsTest(test.TestCase):
@ -4257,9 +4257,9 @@ class FalseNegativesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
self.assertAllEqual((0, 0, 0), fn)
self.assertAllEqual((0, 2, 3), fn_update_op)
self.assertAllEqual((0, 2, 3), fn)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4277,9 +4277,9 @@ class FalseNegativesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
self.assertAllEqual((0.0, 0.0, 0.0), fn)
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op)
self.assertAllEqual((0.0, 8.0, 11.0), fn)
class FalsePositivesTest(test.TestCase):
@ -4310,9 +4310,9 @@ class FalsePositivesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(7., tn_update_op.eval())
self.assertAllClose(7., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(7., tn_update_op)
self.assertAllClose(7., tn)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4330,9 +4330,9 @@ class FalsePositivesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(14., tn_update_op.eval())
self.assertAllClose(14., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(14., tn_update_op)
self.assertAllClose(14., tn)
class FalsePositivesAtThresholdsTest(test.TestCase):
@ -4362,9 +4362,9 @@ class FalsePositivesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
self.assertAllEqual((0, 0, 0), fp)
self.assertAllEqual((7, 4, 2), fp_update_op)
self.assertAllEqual((7, 4, 2), fp)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4384,9 +4384,9 @@ class FalsePositivesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
self.assertAllEqual((0.0, 0.0, 0.0), fp)
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op)
self.assertAllEqual((125.0, 42.0, 12.0), fp)
class TrueNegativesTest(test.TestCase):
@ -4417,9 +4417,9 @@ class TrueNegativesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(3., tn_update_op.eval())
self.assertAllClose(3., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(3., tn_update_op)
self.assertAllClose(3., tn)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4437,9 +4437,9 @@ class TrueNegativesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(4., tn_update_op.eval())
self.assertAllClose(4., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(4., tn_update_op)
self.assertAllClose(4., tn)
class TrueNegativesAtThresholdsTest(test.TestCase):
@ -4469,9 +4469,9 @@ class TrueNegativesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
self.assertAllEqual((0, 0, 0), tn)
self.assertAllEqual((2, 5, 7), tn_update_op)
self.assertAllEqual((2, 5, 7), tn)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4489,9 +4489,9 @@ class TrueNegativesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
self.assertAllEqual((0.0, 0.0, 0.0), tn)
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op)
self.assertAllEqual((5.0, 15.0, 23.0), tn)
class TruePositivesTest(test.TestCase):
@ -4522,9 +4522,9 @@ class TruePositivesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(7., tn_update_op.eval())
self.assertAllClose(7., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(7., tn_update_op)
self.assertAllClose(7., tn)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4542,9 +4542,9 @@ class TruePositivesTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn.eval())
self.assertAllClose(12., tn_update_op.eval())
self.assertAllClose(12., tn.eval())
self.assertAllClose(0., tn)
self.assertAllClose(12., tn_update_op)
self.assertAllClose(12., tn)
class TruePositivesAtThresholdsTest(test.TestCase):
@ -4574,9 +4574,9 @@ class TruePositivesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
self.assertAllEqual((0, 0, 0), tp)
self.assertAllEqual((3, 1, 0), tp_update_op)
self.assertAllEqual((3, 1, 0), tp)
@test_util.run_deprecated_v1
def testWeighted(self):
@ -4592,9 +4592,9 @@ class TruePositivesAtThresholdsTest(test.TestCase):
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
self.assertAllEqual((0.0, 0.0, 0.0), tp)
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op)
self.assertAllEqual((111.0, 37.0, 0.0), tp)
if __name__ == '__main__':

View File

@ -373,7 +373,7 @@ class PadOpTest(test.TestCase):
[paddings_value[i][0] + inp.shape.dims[i].value for i in range(4)],
[-1, -1, -1, -1])
with self.cached_session(use_gpu=True):
self.assertAllEqual(inp.eval(), self.evaluate(middle))
self.assertAllEqual(inp, self.evaluate(middle))
self.assertAllEqual(
np.zeros([row[0] for row in paddings_value]), self.evaluate(left))
self.assertAllEqual(

View File

@ -543,7 +543,7 @@ class PaddingFIFOQueueTest(test.TestCase):
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
self.assertAllEqual(dequeued_t, elems)
def testPartiallyKnownHighDimension(self):
with self.cached_session():
@ -554,7 +554,7 @@ class PaddingFIFOQueueTest(test.TestCase):
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertAllEqual(dequeued_t.eval(), elems)
self.assertAllEqual(dequeued_t, elems)
def testEnqueueWrongShape(self):
q = data_flow_ops.PaddingFIFOQueue(10, (dtypes_lib.int32, dtypes_lib.int32),

View File

@ -258,7 +258,7 @@ class PyFuncTest(PyFuncTestBase):
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
self.assertAllEqual(s, correct)
@test_util.run_v1_only("b/120545219")
def testStringPaddingAreConvertedToBytes(self):
@ -266,7 +266,7 @@ class PyFuncTest(PyFuncTestBase):
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
self.assertAllEqual(s, correct)
@test_util.run_v1_only("b/120545219")
def testNulTerminatedStrings(self):
@ -274,7 +274,7 @@ class PyFuncTest(PyFuncTestBase):
correct = [b"this", b"is", b"a", b"test"]
with self.cached_session():
s, = script_ops.py_func(lambda: [inp], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
self.assertAllEqual(s, correct)
@test_util.run_v1_only("b/120545219")
def testLarge(self):

View File

@ -55,9 +55,9 @@ class RandomShuffleQueueTest(test.TestCase):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
self.assertAllEqual(0, q.size())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
self.assertAllEqual(1, q.size())
def testEnqueueWithShape(self):
with self.cached_session():
@ -65,7 +65,7 @@ class RandomShuffleQueueTest(test.TestCase):
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
self.assertAllEqual(1, q.size())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
@ -74,7 +74,7 @@ class RandomShuffleQueueTest(test.TestCase):
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
self.assertAllEqual(4, q.size())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))

View File

@ -59,7 +59,7 @@ class ReducedShapeTest(test.TestCase):
def _check(self, shape, axes, result):
output = math_ops.reduced_shape(shape, axes=axes)
self.assertAllEqual(output.eval(), result)
self.assertAllEqual(output, result)
@test_util.run_deprecated_v1
def testSimple(self):
@ -390,7 +390,7 @@ class SumReductionTest(BaseReductionTest):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_sum(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
self.assertAllEqual(y, np.zeros(9938))
class MeanReductionTest(BaseReductionTest):
@ -697,7 +697,7 @@ class ProdReductionTest(BaseReductionTest):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.reduce_prod(x, [0])
self.assertAllEqual(y.eval(), np.ones(9938))
self.assertAllEqual(y, np.ones(9938))
class MinReductionTest(test.TestCase):
@ -1124,7 +1124,7 @@ class CountNonzeroReductionTest(test.TestCase):
# A large number is needed to get Eigen to die
x = array_ops.zeros((0, 9938), dtype=dtype)
y = math_ops.count_nonzero(x, [0])
self.assertAllEqual(y.eval(), np.zeros(9938))
self.assertAllEqual(y, np.zeros(9938))
def testStringReduce(self):
# Test case for GitHub issue 18712

View File

@ -2241,7 +2241,7 @@ class RawRNNTest(test.TestCase):
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state)
@test_util.run_v1_only("b/124229375")
def testEmitDifferentStructureThanCellOutput(self):

View File

@ -200,7 +200,7 @@ class StatefulScatterNdTest(test.TestCase):
with self.session(use_gpu=True) as sess:
self.evaluate(init)
self.evaluate(scatter)
self.assertAllClose(ref.eval(), expected)
self.assertAllClose(ref, expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)

View File

@ -464,7 +464,7 @@ class UnsortedSegmentTest(SegmentReductionHelper):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
self.assertAllEqual(unsorted, np.zeros((2, 0), dtype=dtype))
def testDropNegatives(self):
# Note: the test is done by replacing segment_ids with 8 to -1

View File

@ -256,12 +256,12 @@ class ShapeOpsTest(test.TestCase):
def testExpandDimsScalar(self):
with self.cached_session():
inp = constant_op.constant(7)
self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval())
self.assertAllEqual([7], array_ops.expand_dims(inp, 0))
self.assertAllEqual([7], array_ops.expand_dims(inp, -1))
inp = constant_op.constant(True)
self.assertAllEqual([True], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([True], array_ops.expand_dims(inp, -1).eval())
self.assertAllEqual([True], array_ops.expand_dims(inp, 0))
self.assertAllEqual([True], array_ops.expand_dims(inp, -1))
def testExpandDimsDimType(self):
for dtype in [dtypes.int32, dtypes.int64]:

View File

@ -265,7 +265,7 @@ class SliceTest(test.TestCase):
else:
y = 0
slice_t = a[:, x, y:z, :]
self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :])
self.assertAllEqual(slice_t, inp[:, x, y:z, :])
def testRandom(self):
# Random dims of rank 6

View File

@ -107,13 +107,13 @@ class SpaceToBatchTest(test.TestCase, PythonOpImpl):
math_ops.cast(inputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), outputs)
self.assertAllEqual(x_tf, outputs)
# inputs = batch_to_space(outputs)
x_tf = self.batch_to_space(
math_ops.cast(outputs, dtypes.float32),
paddings,
block_size=block_size)
self.assertAllEqual(x_tf.eval(), inputs)
self.assertAllEqual(x_tf, inputs)
def _testOne(self, inputs, block_size, outputs):
paddings = np.zeros((2, 2), dtype=np.int32)
@ -205,11 +205,11 @@ class SpaceToBatchNDTest(test.TestCase):
# outputs = space_to_batch(inputs)
x_tf = array_ops.space_to_batch_nd(
math_ops.cast(inputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), outputs)
self.assertAllEqual(x_tf, outputs)
# inputs = batch_to_space(outputs)
x_tf = array_ops.batch_to_space_nd(
math_ops.cast(outputs, dtypes.float32), block_shape, paddings)
self.assertAllEqual(x_tf.eval(), inputs)
self.assertAllEqual(x_tf, inputs)
def _testDirect(self, input_shape, block_shape, paddings):
inputs = np.arange(np.prod(input_shape), dtype=np.float32)
@ -328,7 +328,7 @@ class SpaceToBatchSpaceToDepth(test.TestCase, PythonOpImpl):
array_ops.transpose(x, [3, 1, 2, 0]), block_size=block_size),
[3, 1, 2, 0])
with self.session(use_gpu=True):
self.assertAllEqual(y1.eval(), y2.eval())
self.assertAllEqual(y1, y2)
class SpaceToBatchSpaceToDepthCpp(SpaceToBatchSpaceToDepth, CppOpImpl):

View File

@ -62,7 +62,7 @@ class StackOpTest(test.TestCase):
# Stack back into a single tensorflow tensor
with self.subTest(shape=shape, axis=axis, dtype=dtype):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c.eval(), data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelCPU(self):
@ -73,7 +73,7 @@ class StackOpTest(test.TestCase):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelGPU(self):
@ -84,7 +84,7 @@ class StackOpTest(test.TestCase):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testConst(self):
@ -104,14 +104,14 @@ class StackOpTest(test.TestCase):
c = array_ops.stack(data)
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c.eval(), data)
self.assertAllEqual(c, data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl.eval(), data)
self.assertAllEqual(cl, data)
@test_util.run_deprecated_v1
def testConstParallelCPU(self):
@ -123,11 +123,11 @@ class StackOpTest(test.TestCase):
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testConstParallelGPU(self):
@ -139,11 +139,11 @@ class StackOpTest(test.TestCase):
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testGradientsAxis0(self):

View File

@ -32,19 +32,19 @@ class StringJoinOpTest(test.TestCase):
with self.cached_session():
output = string_ops.string_join([input0, input1])
self.assertAllEqual(output.eval(), [b"aa", b"ba"])
self.assertAllEqual(output, [b"aa", b"ba"])
output = string_ops.string_join([input0, input1], separator="--")
self.assertAllEqual(output.eval(), [b"a--a", b"b--a"])
self.assertAllEqual(output, [b"a--a", b"b--a"])
output = string_ops.string_join([input0, input1, input0], separator="--")
self.assertAllEqual(output.eval(), [b"a--a--a", b"b--a--b"])
self.assertAllEqual(output, [b"a--a--a", b"b--a--b"])
output = string_ops.string_join([input1] * 4, separator="!")
self.assertEqual(output.eval(), b"a!a!a!a")
output = string_ops.string_join([input2] * 2, separator="")
self.assertAllEqual(output.eval(), [[b"bb"], [b"cc"]])
self.assertAllEqual(output, [[b"bb"], [b"cc"]])
with self.assertRaises(ValueError): # Inconsistent shapes
string_ops.string_join([input0, input2]).eval()

View File

@ -60,7 +60,7 @@ class StringLengthOpTest(test.TestCase):
strings = [[["1", "12"], ["123", "1234"], ["12345", "123456"]]]
lengths = string_ops.string_length(strings, "some_name")
with self.session():
self.assertAllEqual(lengths.eval(), [[[1, 2], [3, 4], [5, 6]]])
self.assertAllEqual(lengths, [[[1, 2], [3, 4], [5, 6]]])
if __name__ == "__main__":

View File

@ -610,7 +610,7 @@ class TensorArrayTest(test.TestCase):
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
self.assertAllEqual(c(12.00), w2_grad.read(2))
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.

View File

@ -325,7 +325,7 @@ class UnicodeTranscodeOpTest(test.TestCase, parameterized.TestCase):
with self.test_session():
output = string_ops.unicode_transcode(
string, input_encoding=input_encoding, output_encoding="UTF-8")
self.assertAllEqual(output.eval(), expected)
self.assertAllEqual(output, expected)
@test_util.run_deprecated_v1
def test_invalid_encoding_causes_errors(self):

View File

@ -201,8 +201,8 @@ class SwitchTestCase(test_util.TensorFlowTestCase):
one = constant_op.constant(1)
less_op = math_ops.less(zero, one)
_, switch_true = control_flow_ops.switch(data, less_op)
self.assertAllEqual([1, 2, 3], switch_true.values.eval())
self.assertAllEqual([0, 1, 2], switch_true.indices.eval())
self.assertAllEqual([1, 2, 3], switch_true.values)
self.assertAllEqual([0, 1, 2], switch_true.indices)
@test_util.run_deprecated_v1
def testIndexedSlicesGradient(self):

View File

@ -329,13 +329,13 @@ class GradientsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
y1 = math_ops.square(y)
y2 = math_ops.square(y1)
g = gradients.gradients([y, y2], x)
self.assertAllClose(17502.0, g[0].eval())
self.assertAllClose(17502.0, g[0])
g = gradients.gradients(y + y2, x)
self.assertAllClose(17502.0, g[0].eval())
self.assertAllClose(17502.0, g[0])
z = array_ops.identity(y)
z2 = array_ops.identity(y2)
g = gradients.gradients([z, z2], x)
self.assertAllClose(17502.0, g[0].eval())
self.assertAllClose(17502.0, g[0])
@test_util.run_v1_only("b/120545219")
def testPartialDerivatives(self):
@ -838,7 +838,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
np_val = np.random.rand(4, 4, 4, 4).astype(np.float32)
c = constant_op.constant(np_val)
c_sparse = math_ops._as_indexed_slices(c)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
self.assertAllEqual(np_val.shape, c_sparse.dense_shape)
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, self.evaluate(c_dense))
@ -857,7 +857,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
sparse_list.append(c_sparse)
packed_dense = array_ops.stack(dense_list)
packed_sparse = array_ops.stack(sparse_list)
self.assertAllClose(packed_dense.eval(), self.evaluate(packed_sparse))
self.assertAllClose(packed_dense, self.evaluate(packed_sparse))
@test_util.run_v1_only("b/120545219")
def testInt64Indices(self):
@ -868,7 +868,7 @@ class IndexedSlicesToTensorTest(test_util.TensorFlowTestCase):
c_sparse = ops.IndexedSlices(
c_sparse.values,
math_ops.cast(c_sparse.indices, dtypes.int64), c_sparse.dense_shape)
self.assertAllEqual(np_val.shape, c_sparse.dense_shape.eval())
self.assertAllEqual(np_val.shape, c_sparse.dense_shape)
c_dense = math_ops.multiply(c_sparse, 1.0)
self.assertAllClose(np_val, self.evaluate(c_dense))
@ -1347,7 +1347,7 @@ class CustomGradientTest(test_util.TensorFlowTestCase, parameterized.TestCase):
g, = gradients_impl.gradients(output, alpha)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(g.eval(), [2.0])
self.assertAllEqual(g, [2.0])
self.assertAllEqual(g.eval(feed_dict={conditional: False}), [3.0])
def testRecursiveCustomGradient(self):

View File

@ -4153,14 +4153,14 @@ class ConvertImageTest(test_util.TensorFlowTestCase):
image = constant_op.constant(x_np)
y = image_ops.convert_image_dtype(image, output_dtype)
self.assertTrue(y.dtype == output_dtype)
self.assertAllClose(y.eval(), y_np, atol=1e-5)
self.assertAllClose(y, y_np, atol=1e-5)
if output_dtype in [
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64
]:
y_saturate = image_ops.convert_image_dtype(
image, output_dtype, saturate=True)
self.assertTrue(y_saturate.dtype == output_dtype)
self.assertAllClose(y_saturate.eval(), y_np, atol=1e-5)
self.assertAllClose(y_saturate, y_np, atol=1e-5)
@test_util.run_deprecated_v1
def testNoConvert(self):
@ -4440,7 +4440,7 @@ class NonMaxSuppressionTest(test_util.TensorFlowTestCase):
iou_threshold = constant_op.constant(iou_threshold_np)
selected_indices = image_ops.non_max_suppression(
boxes, scores, max_output_size, iou_threshold)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
self.assertAllClose(selected_indices, [3, 0, 5])
@test_util.run_deprecated_v1
def testInvalidShape(self):
@ -4616,10 +4616,9 @@ class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):
self.assertEqual(selected_indices_padded.shape.is_fully_defined(), True)
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices_padded.eval(),
[3, 0, 5, 0, 0])
self.assertAllClose(selected_indices_padded, [3, 0, 5, 0, 0])
self.assertEqual(num_valid_padded.eval(), 3)
self.assertAllClose(selected_indices.eval(), [3, 0, 5])
self.assertAllClose(selected_indices, [3, 0, 5])
self.assertEqual(num_valid.eval(), 3)
@test_util.run_deprecated_v1
@ -4646,7 +4645,7 @@ class NonMaxSuppressionPaddedTest(test_util.TensorFlowTestCase):
# The output shape of the padded operation must be fully defined.
self.assertEqual(selected_indices.shape.is_fully_defined(), False)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [0, 2, 4])
self.assertAllClose(selected_indices, [0, 2, 4])
self.assertEqual(num_valid.eval(), 3)
@ -4672,7 +4671,7 @@ class NonMaxSuppressionWithOverlapsTest(test_util.TensorFlowTestCase):
overlaps, scores, max_output_size, overlap_threshold, score_threshold)
with self.cached_session():
self.assertAllClose(selected_indices.eval(), [1])
self.assertAllClose(selected_indices, [1])
class VerifyCompatibleImageShapesTest(test_util.TensorFlowTestCase):

View File

@ -412,8 +412,8 @@ class DivNoNanGradientTest(test.TestCase):
outputs = math_ops.div_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), np.zeros(y.shape.as_list()))
self.assertAllClose(dx, np.zeros(x.shape.as_list()))
self.assertAllClose(dy, np.zeros(y.shape.as_list()))
class MulNoNanGradientTest(test.TestCase):
@ -437,8 +437,8 @@ class MulNoNanGradientTest(test.TestCase):
outputs = math_ops.mul_no_nan(x, y)
with self.cached_session():
dx, dy = gradients.gradients(outputs, [x, y])
self.assertAllClose(dx.eval(), np.zeros(x.shape.as_list()))
self.assertAllClose(dy.eval(), x_vals)
self.assertAllClose(dx, np.zeros(x.shape.as_list()))
self.assertAllClose(dy, x_vals)
class XlogyTest(test.TestCase):

View File

@ -161,8 +161,8 @@ class EmbeddingColumnTest(test.TestCase):
self.assertItemsEqual(('embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, embedding_lookup)
class SharedEmbeddingColumnTest(test.TestCase):
@ -307,9 +307,9 @@ class SharedEmbeddingColumnTest(test.TestCase):
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(expected_lookups_b, embedding_lookup_b.eval())
self.assertAllEqual(embedding_values, embedding_var)
self.assertAllEqual(expected_lookups_a, embedding_lookup_a)
self.assertAllEqual(expected_lookups_b, embedding_lookup_b)
if __name__ == '__main__':

View File

@ -167,8 +167,8 @@ class EmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
'sequence_features/bbb_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0].eval())
self.assertAllEqual(expected_lookups, embedding_lookup.eval())
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, embedding_lookup)
self.assertAllEqual(expected_lookups_sequence,
sequence_embedding_lookup[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
@ -341,8 +341,8 @@ class SharedEmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(expected_lookups_a, embedding_lookup_a.eval())
self.assertAllEqual(embedding_values, embedding_var)
self.assertAllEqual(expected_lookups_a, embedding_lookup_a)
self.assertAllEqual(expected_lookups_b,
embedding_lookup_b[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
@ -556,7 +556,7 @@ class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()
@ -624,7 +624,7 @@ class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var.eval())
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()

View File

@ -199,12 +199,12 @@ class AdagradOptimizerTest(test.TestCase):
aggregated_update = adagrad.AdagradOptimizer(3.0).apply_gradients(
[(grad_aggregated, aggregated_update_var)])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
@test_util.run_deprecated_v1

View File

@ -147,12 +147,12 @@ class AdamOptimizerTest(test.TestCase):
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
self.assertAllClose(aggregated_update_var,
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_resource=False, use_callable_params=False):

View File

@ -259,8 +259,8 @@ class SaverTest(test.TestCase):
graph_saver = saver_module.Saver([w3, w4])
self.evaluate(variables.global_variables_initializer())
graph_saver.restore(sess, eager_ckpt_prefix)
self.assertAllEqual(w3.eval(), 3.0)
self.assertAllEqual(w4.eval(), 4.0)
self.assertAllEqual(w3, 3.0)
self.assertAllEqual(w4, 4.0)
@test_util.run_in_graph_and_eager_modes
def testResourceSaveRestoreCachingDevice(self):
@ -2609,8 +2609,8 @@ class CheckpointReaderTest(test.TestCase):
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
self.assertAllEqual(v0, v0_tensor)
self.assertAllEqual(v1, v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegex(errors.NotFoundError,
"v3 not found in checkpoint"):

View File

@ -152,7 +152,7 @@ class SlotCreatorTest(test.TestCase):
self.assertEqual("var/part_%d/slot" % i, slot.op.name)
self.assertEqual([2], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual([1.0, 2.5], slot.eval())
self.assertAllEqual([1.0, 2.5], slot)
self.assertAllEqual([2], si.full_shape)
self.assertAllEqual([i], si.var_offset)
self.assertAllEqual([1], si.var_shape)
@ -173,7 +173,7 @@ class SlotCreatorTest(test.TestCase):
self.assertEqual("var/part_%d/slot" % i, slot.op.name)
self.assertEqual([], slot.get_shape().as_list())
self.assertEqual(dtypes.float32, slot.dtype.base_dtype)
self.assertAllEqual(1.0, slot.eval())
self.assertAllEqual(1.0, slot)
if __name__ == "__main__":

View File

@ -467,7 +467,7 @@ class WarmStartingUtilTest(test.TestCase):
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=[var])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var.eval(), prev_int_val)
self.assertAllEqual(var, prev_int_val)
def testWarmStart_ListOfStrings(self):
# Save checkpoint from which to warm-start.
@ -487,7 +487,7 @@ class WarmStartingUtilTest(test.TestCase):
ws_util.warm_start(self.get_temp_dir(), vars_to_warm_start=["v1"])
self.evaluate(variables.global_variables_initializer())
# Verify weights were correctly warm-started (init overridden to ones).
self.assertAllEqual(var.eval(), prev_int_val)
self.assertAllEqual(var, prev_int_val)
def testWarmStart_ListOfRegexes(self):
# Save checkpoint from which to warm-start.
@ -524,10 +524,10 @@ class WarmStartingUtilTest(test.TestCase):
self.evaluate(variables.global_variables_initializer())
# Verify the selection of weights were correctly warm-started (init
# overridden to ones).
self.assertAllEqual(v1.eval(), prev_v1_val)
self.assertAllEqual(v1_momentum.eval(), prev_v1_momentum_val)
self.assertAllEqual(v2.eval(), prev_v2_val)
self.assertAllEqual(v2_momentum.eval(), np.zeros([10, 1]))
self.assertAllEqual(v1, prev_v1_val)
self.assertAllEqual(v1_momentum, prev_v1_momentum_val)
self.assertAllEqual(v2, prev_v2_val)
self.assertAllEqual(v2_momentum, np.zeros([10, 1]))
def testWarmStart_SparseColumnIntegerized(self):
# Create feature column.