Replace many calls to eval() with self.evaluate()

In order to get tests running in eager mode we need to remove invalid
functions calls such as eval(). This change is simply a search and
replace for tests where this was safe. As a result, a few more tests now
work in eager mode.

PiperOrigin-RevId: 221836866
This commit is contained in:
Gaurav Jain 2018-11-16 12:44:53 -08:00 committed by TensorFlower Gardener
parent 704961fe72
commit 4fe22bc9da
209 changed files with 2586 additions and 2399 deletions

View File

@ -50,8 +50,8 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
@ -63,9 +63,9 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
# For -0.1*3.0*(0.1 - 0)/(0 + sqrt(0.1 + 0.1*0.1)) = -0.904534
# similarly for others.
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), var0.eval())
np.array([-0.904534, -1.603567]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), var1.eval())
np.array([-0.094821, -0.189358]), self.evaluate(var1))
def testAdagradDAwithoutRegularizationBasic2(self):
for dtype in self.float_types:
@ -87,16 +87,16 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), var0.eval())
np.array([-0.904534, -1.603567]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), var1.eval())
np.array([-0.094821, -0.189358]), self.evaluate(var1))
def testAdagradDAWithL1(self):
for dtype in self.float_types:
@ -118,16 +118,16 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.895489, -1.59555]), var0.eval())
np.array([-0.895489, -1.59555]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.085339, -0.17989]), var1.eval())
np.array([-0.085339, -0.17989]), self.evaluate(var1))
def testAdagradDAWithL1_L2(self):
for dtype in self.float_types:
@ -149,16 +149,16 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
zip([grads0, grads1], [var0, var1]), global_step=global_step)
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.046907, -0.093659]), var0.eval())
np.array([-0.046907, -0.093659]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.004275, -0.009023]), var1.eval())
np.array([-0.004275, -0.009023]), self.evaluate(var1))
if __name__ == "__main__":

View File

@ -42,17 +42,19 @@ class AdagradOptimizerTest(xla_test.XLATestCase):
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval(),
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval(),
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
def testTensorLearningRate(self):
@ -68,17 +70,19 @@ class AdagradOptimizerTest(xla_test.XLATestCase):
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval(),
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval(),
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)
def testSharing(self):
@ -103,18 +107,20 @@ class AdagradOptimizerTest(xla_test.XLATestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval(),
np.array([-1.6026098728179932, -0.6026098728179932]),
self.evaluate(var0),
float_rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval(),
np.array([2.715679168701172, 3.715679168701172]),
self.evaluate(var1),
float_rtol=1e-5)

View File

@ -75,23 +75,24 @@ class AdamOptimizerTest(xla_test.XLATestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRate(self):
for dtype in self.float_types:
@ -117,23 +118,24 @@ class AdamOptimizerTest(xla_test.XLATestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
for dtype in self.float_types:
@ -162,13 +164,14 @@ class AdamOptimizerTest(xla_test.XLATestCase):
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run(feed_dict={grads0: grads0_np, grads1: grads1_np})
else:
@ -178,8 +181,8 @@ class AdamOptimizerTest(xla_test.XLATestCase):
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":

View File

@ -78,8 +78,8 @@ class AdaMaxOptimizerTest(xla_test.XLATestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power = opt._get_beta_accumulators()
@ -87,14 +87,17 @@ class AdaMaxOptimizerTest(xla_test.XLATestCase):
for t in range(1, 4):
update.run()
self.assertAllCloseAccordingToType(0.9**(t + 1), beta1_power.eval())
self.assertAllCloseAccordingToType(0.9**(t + 1),
self.evaluate(beta1_power))
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval(), rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, var1.eval(), rtol=1e-2)
self.assertAllCloseAccordingToType(
var0_np, self.evaluate(var0), rtol=1e-2)
self.assertAllCloseAccordingToType(
var1_np, self.evaluate(var1), rtol=1e-2)
self.assertEqual("var0_%d/AdaMax:0" % (i,),
opt.get_slot(var=var0, name="m").name)
@ -118,22 +121,23 @@ class AdaMaxOptimizerTest(xla_test.XLATestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power = opt._get_beta_accumulators()
# Run 3 steps of AdaMax
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
update.run()
var0_np, m0, v0 = adamax_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adamax_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":
test.main()

View File

@ -90,8 +90,8 @@ class AddSignTest(xla_test.XLATestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of AddSign
# first 4 steps with positive gradient
@ -125,8 +125,8 @@ class AddSignTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
var0_np, var0.eval(), half_rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, var1.eval())
var0_np, self.evaluate(var0), half_rtol=1e-2)
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10

View File

@ -43,7 +43,7 @@ class ClusteringTest(xla_test.XLATestCase):
input1 = constant_op.constant(val1, name="const1")
input2 = constant_op.constant(val2, name="const2")
output = math_ops.add(input1, input2)
result = output.eval()
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testAddFromCpuMultiple(self):
@ -57,7 +57,7 @@ class ClusteringTest(xla_test.XLATestCase):
with self.test_scope():
output = math_ops.add(input1, input2)
for _ in xrange(10):
result = output.eval()
result = self.evaluate(output)
self.assertAllClose(result, expected, rtol=1e-3)
def testDeadlock(self):

View File

@ -72,7 +72,7 @@ class ConcatTest(xla_test.XLATestCase):
x2 = constant_op.constant(p2)
with self.test_scope():
c = array_ops.concat([x1, x2], 0)
result = c.eval()
result = self.evaluate(c)
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
@ -150,7 +150,7 @@ class ConcatTest(xla_test.XLATestCase):
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 1)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsSimpleAll(self):
@ -177,7 +177,7 @@ class ConcatTest(xla_test.XLATestCase):
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@ -205,7 +205,7 @@ class ConcatTest(xla_test.XLATestCase):
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@ -242,7 +242,7 @@ class ConcatTest(xla_test.XLATestCase):
[float(x) for x in grad_inp.flatten()], shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@ -280,7 +280,7 @@ class ConcatTest(xla_test.XLATestCase):
with self.test_scope():
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
self.assertAllEqual(concat_list_t.eval(), self.evaluate(concat_tuple_t))
def testConcatNoScalars(self):
with self.cached_session():

View File

@ -85,7 +85,7 @@ class Conv3DTransposeTest(xla_test.XLATestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells = kernel_depth * kernel_height * kernel_width
@ -135,7 +135,7 @@ class Conv3DTransposeTest(xla_test.XLATestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
@ -173,7 +173,7 @@ class Conv3DTransposeTest(xla_test.XLATestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)

View File

@ -129,7 +129,7 @@ class FIFOQueueTest(xla_test.XLATestCase):
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
@ -192,9 +192,9 @@ class FIFOQueueTest(xla_test.XLATestCase):
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, size.eval())
self.assertEqual(0, self.evaluate(size))
if __name__ == "__main__":

View File

@ -50,14 +50,14 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return var0.eval(), var1.eval()
return self.evaluate(var0), self.evaluate(var1)
def equivAdagradTest_AdagradPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
@ -65,14 +65,14 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Adagrad for a few steps
for _ in range(steps):
adagrad_update.run()
return var0.eval(), var1.eval()
return self.evaluate(var0), self.evaluate(var1)
def equivGradientDescentTest_FtrlPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
@ -85,14 +85,14 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run Ftrl for a few steps
for _ in range(steps):
ftrl_update.run()
return var0.eval(), var1.eval()
return self.evaluate(var0), self.evaluate(var1)
def equivGradientDescentTest_GradientDescentPart(self, steps, dtype):
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
@ -100,14 +100,14 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run GradientDescent for a few steps
for _ in range(steps):
sgd_update.run()
return var0.eval(), var1.eval()
return self.evaluate(var0), self.evaluate(var1)
def testFtrlwithoutRegularization(self):
for dtype in self.float_types:
@ -124,8 +124,8 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps FTRL
for _ in range(3):
@ -134,12 +134,12 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]),
var0.eval(),
self.evaluate(var0),
float_rtol=1e-4,
half_rtol=1e-2)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]),
var1.eval(),
self.evaluate(var1),
float_rtol=1e-5,
half_rtol=1e-2)
@ -158,8 +158,8 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps FTRL
for _ in range(3):
@ -167,10 +167,14 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]), var0.eval(), 1e-5, 1e-5,
np.array([-2.55607247, -3.98729396]),
self.evaluate(var0),
1e-5,
1e-5,
float_rtol=1e-4)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), var1.eval(), 1e-5, 1e-5)
np.array([-0.28232238, -0.56096673]), self.evaluate(var1), 1e-5,
1e-5)
def testFtrlWithL1(self):
for dtype in self.float_types:
@ -187,8 +191,8 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
@ -197,12 +201,14 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]),
var0.eval(),
self.evaluate(var0),
rtol=1e-4,
bfloat16_rtol=1e-1,
bfloat16_atol=1e-1)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), var1.eval(), rtol=1e-4)
np.array([-0.93460727, -1.86147261]),
self.evaluate(var1),
rtol=1e-4)
def testFtrlWithL1_L2(self):
for dtype in self.float_types:
@ -219,8 +225,8 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
@ -228,9 +234,13 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]), var0.eval(), rtol=1e-5)
np.array([-0.24059935, -0.46829352]),
self.evaluate(var0),
rtol=1e-5)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]), var1.eval(), rtol=1e-5)
np.array([-0.02406147, -0.04830509]),
self.evaluate(var1),
rtol=1e-5)
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
@ -254,8 +264,8 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([4.0, 3.0], var1.eval())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
@ -263,9 +273,13 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-0.22578996, -0.44345799]), var0.eval(), rtol=1e-4)
np.array([-0.22578996, -0.44345799]),
self.evaluate(var0),
rtol=1e-4)
self.assertAllCloseAccordingToType(
np.array([-0.14378493, -0.13229476]), var1.eval(), rtol=1e-4)
np.array([-0.14378493, -0.13229476]),
self.evaluate(var1),
rtol=1e-4)
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
@ -291,8 +305,8 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
update1 = opt1.apply_gradients([(grads1, var1)])
variables.global_variables_initializer().run()
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([1.0, 2.0], var1.eval())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var1))
# Run 10 steps FTRL
for _ in range(10):
@ -301,7 +315,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
# var0 is experiencing L2 shrinkage so it should be smaller than var1
# in magnitude.
self.assertTrue((var0.eval()**2 < var1.eval()**2).all())
self.assertTrue((var0.eval()**2 < self.evaluate(var1)**2).all())
accum0 = list(opt0._slots["accum"].values())[0].eval()
accum1 = list(opt1._slots["accum"].values())[0].eval()
# L2 shrinkage should not change how we update grad accumulator.

View File

@ -120,8 +120,8 @@ class LRNTest(xla_test.XLATestCase):
with self.test_scope():
actual = gen_nn_ops.lrn_grad(out_grads, in_image, out_image,
depth_radius, bias, alpha, beta)
expected_val = expected.eval()
actual_val = actual.eval()
expected_val = self.evaluate(expected)
actual_val = self.evaluate(actual)
self.assertAllClose(actual_val, expected_val, rtol=1e-3)

View File

@ -61,37 +61,43 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testNesterovMomentum(self):
for dtype in self.float_types:
@ -115,8 +121,8 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
var0_np, accum0_np, var0_np * 0.8, 0.1, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 0.9, 0.1, 0.9)
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testTensorLearningRateAndMomentum(self):
for dtype in self.float_types:
@ -141,37 +147,43 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
if __name__ == "__main__":

View File

@ -91,8 +91,8 @@ class PowerSignTest(xla_test.XLATestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 7 steps of powersign
# first 4 steps with positive gradient
@ -125,8 +125,8 @@ class PowerSignTest(xla_test.XLATestCase):
)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testDense(self):
decay_steps = 10

View File

@ -45,15 +45,17 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
self.assertAllClose(np.array([-2.60260963, -4.29698515]), var0.eval())
self.assertAllClose(np.array([-0.28432083, -0.56694895]), var1.eval())
self.assertAllClose(
np.array([-2.60260963, -4.29698515]), self.evaluate(var0))
self.assertAllClose(
np.array([-0.28432083, -0.56694895]), self.evaluate(var1))
opt_vars = opt.variables()
self.assertStartsWith(opt_vars[0].name, var0._shared_name)
self.assertStartsWith(opt_vars[1].name, var1._shared_name)
@ -74,14 +76,14 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps Proximal Adagrad.
for _ in range(3):
update.run()
self.assertAllClose(np.array([-1.60261, -2.296985]), var0.eval())
self.assertAllClose(np.array([3.715679, 2.433051]), var1.eval())
self.assertAllClose(np.array([-1.60261, -2.296985]), self.evaluate(var0))
self.assertAllClose(np.array([3.715679, 2.433051]), self.evaluate(var1))
def testProximalAdagradWithL1(self):
with self.cached_session(), self.test_scope():
@ -98,14 +100,14 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Adagrad
for _ in range(10):
update.run()
self.assertAllClose(np.array([-6.663634, -9.190331]), var0.eval())
self.assertAllClose(np.array([2.959304, 1.029232]), var1.eval())
self.assertAllClose(np.array([-6.663634, -9.190331]), self.evaluate(var0))
self.assertAllClose(np.array([2.959304, 1.029232]), self.evaluate(var1))
def testProximalAdagradWithL1_L2(self):
with self.cached_session(), self.test_scope():
@ -122,15 +124,15 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Adagrad.
for _ in range(10):
update.run()
self.assertAllClose(np.array([-0.0495, -0.0995]), var0.eval())
self.assertAllClose(np.array([-0.0045, -0.0095]), var1.eval())
self.assertAllClose(np.array([-0.0495, -0.0995]), self.evaluate(var0))
self.assertAllClose(np.array([-0.0045, -0.0095]), self.evaluate(var1))
def applyOptimizer(self, opt, steps=5):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
@ -141,14 +143,14 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
return var0.eval(), var1.eval()
return self.evaluate(var0), self.evaluate(var1)
def testEquivAdagradwithoutRegularization(self):
with self.cached_session(), self.test_scope():

View File

@ -42,15 +42,15 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var0.eval())
self.assertAllClose([0.0, 0.0], var1.eval())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run 3 steps Proximal Gradient Descent.
for _ in range(3):
update.run()
self.assertAllClose(np.array([-0.9, -1.8]), var0.eval())
self.assertAllClose(np.array([-0.09, -0.18]), var1.eval())
self.assertAllClose(np.array([-0.9, -1.8]), self.evaluate(var0))
self.assertAllClose(np.array([-0.09, -0.18]), self.evaluate(var1))
def testProximalGradientDescentwithoutRegularization2(self):
with self.cached_session(), self.test_scope():
@ -64,15 +64,15 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 3 steps Proximal Gradient Descent
for _ in range(3):
update.run()
self.assertAllClose(np.array([0.1, 0.2]), var0.eval())
self.assertAllClose(np.array([3.91, 2.82]), var1.eval())
self.assertAllClose(np.array([0.1, 0.2]), self.evaluate(var0))
self.assertAllClose(np.array([3.91, 2.82]), self.evaluate(var1))
def testProximalGradientDescentWithL1(self):
with self.cached_session(), self.test_scope():
@ -86,15 +86,15 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps proximal gradient descent.
for _ in range(10):
update.run()
self.assertAllClose(np.array([-1.988, -3.988001]), var0.eval())
self.assertAllClose(np.array([3.67, 2.37]), var1.eval())
self.assertAllClose(np.array([-1.988, -3.988001]), self.evaluate(var0))
self.assertAllClose(np.array([3.67, 2.37]), self.evaluate(var1))
def testProximalGradientDescentWithL1_L2(self):
with self.cached_session(), self.test_scope():
@ -108,15 +108,15 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([4.0, 3.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
# Run 10 steps Proximal Gradient Descent
for _ in range(10):
update.run()
self.assertAllClose(np.array([-0.0495, -0.0995]), var0.eval())
self.assertAllClose(np.array([-0.0045, -0.0095]), var1.eval())
self.assertAllClose(np.array([-0.0495, -0.0995]), self.evaluate(var0))
self.assertAllClose(np.array([-0.0045, -0.0095]), self.evaluate(var1))
def applyOptimizer(self, opt, steps=5):
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0])
@ -127,14 +127,14 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run ProximalAdagrad for a few steps
for _ in range(steps):
update.run()
return var0.eval(), var1.eval()
return self.evaluate(var0), self.evaluate(var1)
def testEquivGradientDescentwithoutRegularization(self):
with self.cached_session(), self.test_scope():

View File

@ -63,7 +63,7 @@ class QrOpTest(xla_test.XLATestCase, parameterized.TestCase):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = math_ops.matmul(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
precision = self.AdjustedNorm(xx.eval() - identity.eval())
precision = self.AdjustedNorm(xx.eval() - self.evaluate(identity))
self.assertTrue(np.all(precision < 5.0))
def _test(self, dtype, shape, full_matrices):

View File

@ -92,8 +92,8 @@ class RmspropTest(xla_test.XLATestCase):
self.assertTrue(mom1 is not None)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of RMSProp
for _ in range(3):
@ -118,14 +118,14 @@ class RmspropTest(xla_test.XLATestCase):
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
if __name__ == "__main__":

View File

@ -79,7 +79,8 @@ class TensorArrayTest(xla_test.XLATestCase):
c0 = w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]),
self.evaluate(c0))
def testTensorArrayWritePack(self):
for dtype in self.numeric_tf_types:
@ -97,7 +98,7 @@ class TensorArrayTest(xla_test.XLATestCase):
c0 = w2.stack()
self.assertAllEqual([3, 0, 1], c0.eval().shape)
self.assertAllEqual([3, 0, 1], self.evaluate(c0).shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.cached_session(), self.test_scope():
@ -113,8 +114,8 @@ class TensorArrayTest(xla_test.XLATestCase):
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0], [204.0, 205.0]]), c0.eval())
convert([[4.0, 5.0], [104.0, 105.0], [6.0, 7.0], [106.0, 107.0],
[8.0, 9.0], [204.0, 205.0]]), self.evaluate(c0))
def testTensorArrayWriteConcat(self):
for dtype in self.numeric_tf_types:
@ -341,7 +342,7 @@ class TensorArrayTest(xla_test.XLATestCase):
r0_bad = gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtype2, flow_in=w0.flow)
with self.assertRaisesOpError("TensorArray dtype is "):
r0_bad.eval()
self.evaluate(r0_bad)
# Test reading from a different index than the one we wrote to
w0.read(1)
@ -422,7 +423,7 @@ class TensorArrayTest(xla_test.XLATestCase):
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
self.assertAllClose(9.0, self.evaluate(r))
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.cached_session() as session, self.test_scope():
@ -526,7 +527,7 @@ class TensorArrayTest(xla_test.XLATestCase):
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
def _testTensorArrayGradientUnpackRead(self):
with self.cached_session() as session, self.test_scope():
@ -592,7 +593,7 @@ class TensorArrayTest(xla_test.XLATestCase):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
self.assertAllEqual(3, self.evaluate(s))
def testWriteCloseTensorArray(self):
with self.cached_session(), self.test_scope():
@ -722,7 +723,7 @@ class TensorArrayTest(xla_test.XLATestCase):
# r = acc2.stack()
# grad = gradients_impl.gradients(r, [x])[0]
# self.assertAllClose(31.0, grad.eval())
# self.assertAllClose(31.0, self.evaluate(grad))
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.cached_session() as session, self.test_scope():
@ -912,7 +913,7 @@ class TensorArrayTest(xla_test.XLATestCase):
self.assertEqual(0, ta.size().eval())
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
packed = ta.stack()
self.assertAllEqual([0, 3, 5], packed.eval().shape)
self.assertAllEqual([0, 3, 5], self.evaluate(packed).shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], ta.concat().eval().shape)
@ -1041,8 +1042,8 @@ class TensorArrayTest(xla_test.XLATestCase):
(read0, read1, size0, size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, v0.eval())
self.assertEqual(1, v1.eval())
self.assertEqual(1, self.evaluate(v0))
self.assertEqual(1, self.evaluate(v1))
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)

View File

@ -39,13 +39,13 @@ class ZeroOut3Test(tf.test.TestCase):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1)
with self.assertRaisesOpError("Need preserve_index >= 0, got -1"):
result.eval()
self.evaluate(result)
def testLarge(self):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17)
with self.assertRaisesOpError("preserve_index out of range"):
result.eval()
self.evaluate(result)
if __name__ == "__main__":

View File

@ -110,7 +110,7 @@ class AudioFeatureGenerationTest(tf.test.TestCase):
left_context=1,
right_context=1)
self.assertAllEqual(
filterbanks.eval(),
self.evaluate(filterbanks),
[[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350],
[436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]])
@ -153,7 +153,7 @@ class AudioFeatureGenerationTest(tf.test.TestCase):
frame_stride=3,
zero_padding=True)
self.assertAllEqual(
filterbanks.eval(),
self.evaluate(filterbanks),
[[0, 0, 0, 0, 479, 425], [436, 378, 410, 350, 391, 325],
[374, 308, 362, 292, 352, 275]])

View File

@ -292,9 +292,9 @@ class SparseTest(test.TestCase):
return
self.assertTrue(isinstance(b, sparse_tensor.SparseTensor))
with self.cached_session():
self.assertAllEqual(a.eval().indices, b.eval().indices)
self.assertAllEqual(a.eval().values, b.eval().values)
self.assertAllEqual(a.eval().dense_shape, b.eval().dense_shape)
self.assertAllEqual(a.eval().indices, self.evaluate(b).indices)
self.assertAllEqual(a.eval().values, self.evaluate(b).values)
self.assertAllEqual(a.eval().dense_shape, self.evaluate(b).dense_shape)
def testSerializeDeserialize(self):
test_cases = (

View File

@ -339,7 +339,7 @@ class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
with wrapper.as_default():
foo = constant_op.constant(42, name="foo")
self.assertEqual(42, foo.eval())
self.assertEqual(42, self.evaluate(foo))
self.assertEqual(foo, self._observer["run_fetches"])
def testWrapperShouldSupportSessionClose(self):

View File

@ -159,7 +159,7 @@ class AllReduceTest(test_util.TensorFlowTestCase):
output_tensors = build_f(input_tensors, un_op)
sum_reduced = math_ops.add_n(output_tensors)
sum_reduced.op.run()
self.assertAllClose(sum_reduced.eval(), simple_sum.eval())
self.assertAllClose(sum_reduced.eval(), self.evaluate(simple_sum))
def _testRingAllReduce(self, num_workers, num_gpus, shape, subdiv):
start_time = time.time()

View File

@ -215,7 +215,7 @@ class BackpropTest(test.TestCase):
self.assertAllClose(tf_grad.values.eval(), grad.values)
tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run()
expected = tf_embedding.eval()
expected = self.evaluate(tf_embedding)
opt.apply_gradients([(grad, embedding)])
self.assertAllClose(expected, embedding.read_value())

View File

@ -33,7 +33,7 @@ class GraphOnlyOpsTest(test_util.TensorFlowTestCase):
x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)
z_tf = graph_only_ops.graph_zeros_like(x)
with self.cached_session():
self.assertAllClose(np.zeros((2, 3)), z_tf.eval())
self.assertAllClose(np.zeros((2, 3)), self.evaluate(z_tf))
def testGraphPlaceholder(self):
x_tf = graph_only_ops.graph_placeholder(dtypes.int32, shape=(1,))

View File

@ -80,8 +80,8 @@ class TapeTest(test.TestCase):
tf_e = tf_d + tf_f
tf_da, tf_db = gradients_impl.gradients(tf_e, [tf_a, tf_b])
self.assertAllEqual(da, tf_da.eval())
self.assertAllEqual(db, tf_db.eval())
self.assertAllEqual(da, self.evaluate(tf_da))
self.assertAllEqual(db, self.evaluate(tf_db))
def testBasicFunctional(self):
@ -142,8 +142,8 @@ class TapeTest(test.TestCase):
tf_rr = 2 * math_ops.reduce_sum(tf_mm)
tf_da, tf_db = gradients_impl.gradients(tf_rr, [tf_a, tf_b])
self.assertAllEqual(da, tf_da.eval())
self.assertAllEqual(db, tf_db.eval())
self.assertAllEqual(da, self.evaluate(tf_da))
self.assertAllEqual(db, self.evaluate(tf_db))
def testGcTwoOutputs(self):

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -552,8 +552,8 @@ class FunctionTest(test.TestCase):
with self.session(graph=g):
v.initializer.run()
self.assertAllEqual(expected_val.eval(), actual_val.eval())
self.assertAllEqual(expected_shape, actual_shape.eval())
self.assertAllEqual(expected_val.eval(), self.evaluate(actual_val))
self.assertAllEqual(expected_shape, self.evaluate(actual_shape))
def testDefineErrors(self):
with ops.Graph().as_default():

View File

@ -930,7 +930,7 @@ class ImportGraphDefTest(test.TestCase):
name="",
return_elements=["id:0"])
with self.cached_session():
self.assertEqual(5.0, t.eval())
self.assertEqual(5.0, self.evaluate(t))
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
@ -1071,7 +1071,7 @@ class ImportGraphDefTest(test.TestCase):
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
g.eval()
self.evaluate(g)
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
@ -1255,7 +1255,7 @@ class ImportGraphDefTest(test.TestCase):
z = TestFunc()
with self.cached_session():
z_val = z.eval()
z_val = self.evaluate(z)
self.assertEqual(z_val, -2.0)
def testImportGraphWithFunctionTwice(self):

View File

@ -317,7 +317,7 @@ class OperationTest(test_util.TensorFlowTestCase):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
@ -346,18 +346,18 @@ class OperationTest(test_util.TensorFlowTestCase):
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, tensor.eval())
self.assertAllEqual(values, self.evaluate(tensor))
def testConvertToTensorNestedMix(self):
with self.cached_session():
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), tensor.eval())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
def testConvertToTensorPreferred(self):
with self.cached_session():
@ -2490,12 +2490,14 @@ class KernelLabelTest(test_util.TensorFlowTestCase):
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", default_1.eval())
self.assertAllEqual(b"My label is: default", default_2.eval())
self.assertAllEqual(b"My label is: default", default_3.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_1.eval())
self.assertAllEqual(b"My label is: overload_1", overload_1_2.eval())
self.assertAllEqual(b"My label is: overload_2", overload_2.eval())
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):

View File

@ -46,7 +46,7 @@ class SparseTensorTest(test_util.TensorFlowTestCase):
self.assertEqual(sp.get_shape(), (4, 5))
with self.cached_session() as sess:
value = sp.eval()
value = self.evaluate(sp)
self.assertAllEqual(indices, value.indices)
self.assertAllEqual(values, value.values)
self.assertAllEqual(shape, value.dense_shape)
@ -85,7 +85,7 @@ class ConvertToTensorOrSparseTensorTest(test_util.TensorFlowTestCase):
value = [42, 43]
from_value = sparse_tensor.convert_to_tensor_or_sparse_tensor(
value)
self.assertAllEqual(value, from_value.eval())
self.assertAllEqual(value, self.evaluate(from_value))
def test_convert_sparse(self):
with self.cached_session():

View File

@ -158,12 +158,11 @@ class AdadeltaOptimizerTest(test.TestCase):
loss, var_list=[var0])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], var0.eval())
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
if __name__ == "__main__":

View File

@ -87,23 +87,24 @@ class AdamOptimizerTest(test.TestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
@ -141,12 +142,12 @@ class AdamOptimizerTest(test.TestCase):
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
self.evaluate(repeated_index_update_var))
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
self.evaluate(repeated_index_update_var))
def doTestBasic(self, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
@ -226,23 +227,24 @@ class AdamOptimizerTest(test.TestCase):
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
beta1_power, beta2_power = get_beta_accumulators(opt, dtype)
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
@ -266,13 +268,14 @@ class AdamOptimizerTest(test.TestCase):
beta1_power, beta2_power = get_beta_accumulators(opt, dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
self.assertAllCloseAccordingToType(0.9**t, self.evaluate(beta1_power))
self.assertAllCloseAccordingToType(0.999**t,
self.evaluate(beta2_power))
if t % 2 == 0:
update1.run()
else:
@ -282,8 +285,8 @@ class AdamOptimizerTest(test.TestCase):
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testSlotsUniqueEager(self):
with context.eager_mode():

View File

@ -289,8 +289,8 @@ class MomentumOptimizerTest(test.TestCase):
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
@ -329,8 +329,8 @@ class MomentumOptimizerTest(test.TestCase):
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(
var1_np, accum1_np, 3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
self.assertAllClose(var0_np, self.evaluate(var0))
self.assertAllClose(var1_np, self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testMinimizeSparseResourceVariable(self):
@ -406,37 +406,43 @@ class MomentumOptimizerTest(test.TestCase):
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
]), self.evaluate(var1))
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
@ -461,51 +467,57 @@ class MomentumOptimizerTest(test.TestCase):
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
self.assertAllClose([0, 0], self.evaluate(var0)[0])
self.assertAllClose([0, 0], self.evaluate(var0)[1])
self.assertAllClose([1, 1], self.evaluate(var1)[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(np.array([.1, .1]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([.1, .1]),
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]),
slot1.eval()[2])
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([0, 0]),
self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]),
var0.eval()[1])
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]),
var1.eval()[2])
self.evaluate(var1)[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllClose(np.array([0, 0]), self.evaluate(slot0)[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
slot0.eval()[1])
self.evaluate(slot0)[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
self.evaluate(slot1)[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllClose(np.array([0, 0]), self.evaluate(var0)[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]),
var0.eval()[1])
self.evaluate(var0)[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0),
0.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]),
var1.eval()[2])
self.evaluate(var1)[2])
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
@ -527,37 +539,43 @@ class MomentumOptimizerTest(test.TestCase):
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
self.assertAllCloseAccordingToType(
np.array([0.1, 0.1]), self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([0.01, 0.01]), self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0),
3.98 - ((0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
]), self.evaluate(var1))
@test_util.run_in_graph_and_eager_modes
def testConfig(self):

View File

@ -132,8 +132,8 @@ class RMSPropOptimizerTest(test.TestCase):
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSProp
for _ in range(1, 5):
@ -148,14 +148,14 @@ class RMSPropOptimizerTest(test.TestCase):
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.float32, dtypes.float64]:
@ -173,12 +173,13 @@ class RMSPropOptimizerTest(test.TestCase):
loss, var_list=[var0])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[0., 1.]], var0.eval(), atol=0.01)
self.assertAllCloseAccordingToType([[0., 1.]],
self.evaluate(var0),
atol=0.01)
def testMinimizeSparseResourceVariableCentered(self):
for dtype in [dtypes.float32, dtypes.float64]:
@ -196,12 +197,13 @@ class RMSPropOptimizerTest(test.TestCase):
loss, var_list=[var0])
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[-111, -138]], var0.eval(), atol=0.01)
self.assertAllCloseAccordingToType([[-111, -138]],
self.evaluate(var0),
atol=0.01)
def testSparse(self):
for (dtype, learning_rate, rho, momentum, epsilon, centered) in _TESTPARAMS:
@ -256,8 +258,8 @@ class RMSPropOptimizerTest(test.TestCase):
mom1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Run 4 steps of RMSProp
for _ in range(1, 5):
@ -272,14 +274,14 @@ class RMSPropOptimizerTest(test.TestCase):
# Validate updated params
if centered:
self.assertAllCloseAccordingToType(mg0_np, mg0.eval())
self.assertAllCloseAccordingToType(mg1_np, mg1.eval())
self.assertAllCloseAccordingToType(rms0_np, rms0.eval())
self.assertAllCloseAccordingToType(rms1_np, rms1.eval())
self.assertAllCloseAccordingToType(mom0_np, mom0.eval())
self.assertAllCloseAccordingToType(mom1_np, mom1.eval())
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
self.assertAllCloseAccordingToType(mg0_np, self.evaluate(mg0))
self.assertAllCloseAccordingToType(mg1_np, self.evaluate(mg1))
self.assertAllCloseAccordingToType(rms0_np, self.evaluate(rms0))
self.assertAllCloseAccordingToType(rms1_np, self.evaluate(rms1))
self.assertAllCloseAccordingToType(mom0_np, self.evaluate(mom0))
self.assertAllCloseAccordingToType(mom1_np, self.evaluate(mom1))
self.assertAllCloseAccordingToType(var0_np, self.evaluate(var0))
self.assertAllCloseAccordingToType(var1_np, self.evaluate(var1))
def testCallableParams(self):
with context.eager_mode():

View File

@ -88,13 +88,13 @@ class AccumulateNV2Test(test_util.TensorFlowTestCase):
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, tf_val.eval())
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
tf_val.eval()
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():

View File

@ -37,14 +37,14 @@ class ArgMaxTest(test.TestCase):
with self.session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
self.evaluate(ans)
def _testBothArg(self,
method,
@ -79,7 +79,7 @@ class ArgMaxTest(test.TestCase):
expected_values = x.argmax()
with self.session(use_gpu=True):
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
@ -87,7 +87,7 @@ class ArgMaxTest(test.TestCase):
expected_values = x.argmin()
with self.session(use_gpu=True):
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)

View File

@ -79,7 +79,8 @@ class AtrousConv2DTest(test.TestCase):
y1 = nn_ops.atrous_conv2d(x, f, rate, padding=padding)
y2 = nn_ops.conv2d(
x, f_up, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
def testAtrousSequence(self):
"""Tests optimization of sequence of atrous convolutions.
@ -131,7 +132,8 @@ class AtrousConv2DTest(test.TestCase):
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = nn_ops.conv2d(y2, f, strides=[1, 1, 1, 1], padding=padding)
y2 = array_ops.batch_to_space(y2, crops=pad, block_size=rate)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-2, atol=1e-2)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-2, atol=1e-2)
def testGradient(self):
with self.session(use_gpu=True):
@ -193,7 +195,8 @@ class AtrousConv2DTransposeTest(test.TestCase):
padding)
y2 = nn_ops.conv2d_transpose(
x, f_up, y_shape, strides=[1, 1, 1, 1], padding=padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
class AtrousDepthwiseConv2DTest(test.TestCase):
@ -220,7 +223,8 @@ class AtrousDepthwiseConv2DTest(test.TestCase):
y1 = nn_impl.depthwise_conv2d(
x, f, strides, padding, rate=[rate, rate])
y2 = nn_impl.depthwise_conv2d(x, f_up, strides, padding)
self.assertAllClose(y1.eval(), y2.eval(), rtol=1e-3, atol=1e-3)
self.assertAllClose(
y1.eval(), self.evaluate(y2), rtol=1e-3, atol=1e-3)
if __name__ == "__main__":

View File

@ -121,8 +121,7 @@ class ExtractGlimpseTest(test.TestCase):
with self.cached_session():
result = image_ops.extract_glimpse(empty_image, [1, 1], offsets)
self.assertAllEqual(
np.zeros(
(0, 1, 1, 0), dtype=np.float32), result.eval())
np.zeros((0, 1, 1, 0), dtype=np.float32), self.evaluate(result))
def testLargeCenterGlimpse(self):
self._VerifyValues(

View File

@ -214,7 +214,7 @@ class BroadcastSimpleTest(test.TestCase):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.

View File

@ -52,7 +52,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([3, 7])
np_val = self._buildParams(expected_result, dtype)
gather_val = gather_t.eval()
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
@ -68,7 +68,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
gather_t = array_ops.batch_gather(params, indices_tf)
expected_result = np.array([[3], [15]])
np_val = self._buildParams(expected_result, dtype)
gather_val = gather_t.eval()
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
@ -81,7 +81,7 @@ class GatherTest(test.TestCase, parameterized.TestCase):
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.batch_gather(params, indices_tf)
gather_val = gather_t.eval()
gather_val = self.evaluate(gather_t)
expected_result = np.array([[[2, 0], [7, 5]], [[10, 8], [11, 15]]])
np_val = self._buildParams(expected_result, dtype)
self.assertAllEqual(np_val, gather_val)

View File

@ -86,7 +86,7 @@ class BatchMatmulOpTest(test.TestCase):
with self.cached_session(use_gpu=is_floating) as sess:
if static_shape:
z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = z0.eval()
z0_val = self.evaluate(z0)
else:
x_ph = array_ops.placeholder(x.dtype)
y_ph = array_ops.placeholder(y.dtype)

View File

@ -91,7 +91,7 @@ class ScatterTest(test.TestCase):
session.run([update0, update1])
self.assertAllEqual([False, True], var.eval())
self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRange(self):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)

View File

@ -48,7 +48,7 @@ class BetaincTest(test.TestCase):
tf_x_s = constant_op.constant(x_s, dtype=dtype)
tf_out_t = math_ops.betainc(tf_a_s, tf_b_s, tf_x_s)
with self.cached_session():
tf_out = tf_out_t.eval()
tf_out = self.evaluate(tf_out_t)
scipy_out = special.betainc(a_s, b_s, x_s).astype(np_dt)
# the scipy version of betainc uses a double-only implementation.

View File

@ -30,7 +30,7 @@ class BitcastTest(test.TestCase):
def _testBitcast(self, x, datatype, shape):
with self.session(use_gpu=True):
tf_ans = array_ops.bitcast(x, datatype)
out = tf_ans.eval()
out = self.evaluate(tf_ans)
buff_after = memoryview(out).tobytes()
buff_before = memoryview(x).tobytes()
self.assertEqual(buff_before, buff_after)

View File

@ -35,13 +35,13 @@ class ResourceOpsTest(test_util.TensorFlowTestCase):
ensemble = boosted_trees_ops.TreeEnsemble('ensemble')
resources.initialize_resources(resources.shared_resources()).run()
stamp_token = ensemble.get_stamp_token()
self.assertEqual(0, stamp_token.eval())
self.assertEqual(0, self.evaluate(stamp_token))
(_, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(0, num_trees.eval())
self.assertEqual(0, num_finalized_trees.eval())
self.assertEqual(0, num_attempted_layers.eval())
self.assertAllEqual([0, 1], nodes_range.eval())
self.assertEqual(0, self.evaluate(num_trees))
self.assertEqual(0, self.evaluate(num_finalized_trees))
self.assertEqual(0, self.evaluate(num_attempted_layers))
self.assertAllEqual([0, 1], self.evaluate(nodes_range))
def testCreateWithProto(self):
with self.cached_session():
@ -154,11 +154,11 @@ class ResourceOpsTest(test_util.TensorFlowTestCase):
resources.initialize_resources(resources.shared_resources()).run()
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(7, stamp_token.eval())
self.assertEqual(2, num_trees.eval())
self.assertEqual(1, num_finalized_trees.eval())
self.assertEqual(6, num_attempted_layers.eval())
self.assertAllEqual([16, 19], nodes_range.eval())
self.assertEqual(7, self.evaluate(stamp_token))
self.assertEqual(2, self.evaluate(num_trees))
self.assertEqual(1, self.evaluate(num_finalized_trees))
self.assertEqual(6, self.evaluate(num_attempted_layers))
self.assertAllEqual([16, 19], self.evaluate(nodes_range))
def testSerializeDeserialize(self):
with self.cached_session():
@ -167,11 +167,11 @@ class ResourceOpsTest(test_util.TensorFlowTestCase):
resources.initialize_resources(resources.shared_resources()).run()
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(5, stamp_token.eval())
self.assertEqual(0, num_trees.eval())
self.assertEqual(0, num_finalized_trees.eval())
self.assertEqual(0, num_attempted_layers.eval())
self.assertAllEqual([0, 1], nodes_range.eval())
self.assertEqual(5, self.evaluate(stamp_token))
self.assertEqual(0, self.evaluate(num_trees))
self.assertEqual(0, self.evaluate(num_finalized_trees))
self.assertEqual(0, self.evaluate(num_attempted_layers))
self.assertAllEqual([0, 1], self.evaluate(nodes_range))
# Deserialize.
ensemble_proto = boosted_trees_pb2.TreeEnsemble()
@ -219,18 +219,18 @@ class ResourceOpsTest(test_util.TensorFlowTestCase):
]):
(stamp_token, num_trees, num_finalized_trees, num_attempted_layers,
nodes_range) = ensemble.get_states()
self.assertEqual(3, stamp_token.eval())
self.assertEqual(1, num_trees.eval())
self.assertEqual(3, self.evaluate(stamp_token))
self.assertEqual(1, self.evaluate(num_trees))
# This reads from metadata, not really counting the layers.
self.assertEqual(5, num_attempted_layers.eval())
self.assertEqual(0, num_finalized_trees.eval())
self.assertAllEqual([3, 7], nodes_range.eval())
self.assertEqual(5, self.evaluate(num_attempted_layers))
self.assertEqual(0, self.evaluate(num_finalized_trees))
self.assertAllEqual([3, 7], self.evaluate(nodes_range))
# Serialize.
new_ensemble_proto = boosted_trees_pb2.TreeEnsemble()
new_stamp_token, new_serialized = ensemble.serialize()
self.assertEqual(3, new_stamp_token.eval())
self.assertEqual(3, self.evaluate(new_stamp_token))
new_ensemble_proto.ParseFromString(new_serialized.eval())
self.assertProtoEquals(ensemble_proto, new_ensemble_proto)

View File

@ -359,7 +359,7 @@ class StatsOpsTest(test_util.TensorFlowTestCase):
[[0., 0.], [.15, .36], [.06, .07], [.1, .2]], # node 1
[[-.33, .58], [0., 0.], [.3, .4], [0., 0.]], # node 2
]],
result.eval())
self.evaluate(result))
def testMakeStatsSummaryMultipleFeatures(self):
"""Tests that MakeStatsSummary works for multiple features."""
@ -389,7 +389,7 @@ class StatsOpsTest(test_util.TensorFlowTestCase):
[[.3, .4], [0., 0.], [-.4, .5], [.07, .08]], # node 2
], # feature 1
],
result.eval())
self.evaluate(result))
def _verify_precision(self, length):
with self.cached_session():
@ -408,7 +408,7 @@ class StatsOpsTest(test_util.TensorFlowTestCase):
node_ids, gradients, hessians, [bucketized_features], max_splits,
num_buckets) # shape=[max_splits, num_buckets, num_features, 2]
self.assertAllClose([[[[2., 0.2]]]], result.eval())
self.assertAllClose([[[[2., 0.2]]]], self.evaluate(result))
def testMakeStatsSummaryNumericalPrecisionSmallBatch(self):
"""Tests numeric precision."""

View File

@ -55,7 +55,7 @@ class RangeSamplerOpsTest(test.TestCase):
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled_candidates, _, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
result = sampled_candidates.eval()
result = self.evaluate(sampled_candidates)
expected_ids = [0, 1, 2, 3, 4]
self.assertAllEqual(result, expected_ids)
@ -68,7 +68,7 @@ class RangeSamplerOpsTest(test.TestCase):
_, true_expected_count, _ = candidate_sampling_ops.all_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
true_log_expected_count = math_ops.log(true_expected_count)
result = true_log_expected_count.eval()
result = self.evaluate(true_log_expected_count)
self.assertAllEqual(result, [[0.0] * self.NUM_TRUE] * self.BATCH_SIZE)
self.assertEqual(true_expected_count.get_shape(),
@ -83,7 +83,7 @@ class RangeSamplerOpsTest(test.TestCase):
_, _, sampled_expected_count = candidate_sampling_ops.all_candidate_sampler( # pylint: disable=line-too-long
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True)
sampled_log_expected_count = math_ops.log(sampled_expected_count)
result = sampled_log_expected_count.eval()
result = self.evaluate(sampled_log_expected_count)
self.assertAllEqual(result, [0.0] * self.NUM_SAMPLED)
self.assertEqual(sampled_expected_count.get_shape(), [self.NUM_SAMPLED])
@ -114,7 +114,7 @@ class RangeSamplerOpsTest(test.TestCase):
[[1, 2], [0, 4], [3, 3]], dtype=dtypes.int64)
sampled, _, _ = candidate_sampling_ops.log_uniform_candidate_sampler(
true_classes, self.NUM_TRUE, self.NUM_SAMPLED, True, 5, seed=seed)
return sampled.eval()
return self.evaluate(sampled)
# Non-zero seed. Repeatable.
for seed in [1, 12, 123, 1234]:

View File

@ -107,10 +107,10 @@ class CastOpTest(test.TestCase):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.cached_session(use_gpu=False):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
with self.cached_session(use_gpu=True):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
self.assertAllClose(a, self.evaluate(b), rtol=1 / 128.)
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))

View File

@ -58,8 +58,8 @@ class GenerateVocabRemappingTest(test.TestCase):
expected_remapping = range(0, 3)
expected_num_present = 3
with self.cached_session():
self.assertAllEqual(expected_remapping, remapping.eval())
self.assertAllEqual(expected_num_present, num_present.eval())
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
def test_generate_remapping_with_shifted_vocab(self):
"""Tests where vocab is the same, but shifted / ordered differently."""
@ -71,8 +71,8 @@ class GenerateVocabRemappingTest(test.TestCase):
expected_remapping = [2, 0, 1]
expected_num_present = 3
with self.cached_session():
self.assertAllEqual(expected_remapping, remapping.eval())
self.assertAllEqual(expected_num_present, num_present.eval())
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
def test_generate_remapping_with_offset(self):
"""Tests offset and num_new_vocab logic."""
@ -84,8 +84,8 @@ class GenerateVocabRemappingTest(test.TestCase):
expected_remapping = [0]
expected_num_present = 1
with self.cached_session():
self.assertAllEqual(expected_remapping, remapping.eval())
self.assertAllEqual(expected_num_present, num_present.eval())
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
def test_generate_remapping_with_old_vocab_size(self):
"""Tests where old_vocab_size is specified."""
@ -99,8 +99,8 @@ class GenerateVocabRemappingTest(test.TestCase):
expected_remapping = [-1, 0, 1]
expected_num_present = 2
with self.cached_session():
self.assertAllEqual(expected_remapping, remapping.eval())
self.assertAllEqual(expected_num_present, num_present.eval())
self.assertAllEqual(expected_remapping, self.evaluate(remapping))
self.assertAllEqual(expected_num_present, self.evaluate(num_present))
class LoadAndRemapMatrixTest(test.TestCase):
@ -142,7 +142,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_cols=self.old_num_cols)
with self.cached_session():
self.assertAllClose(self.matrix_value[row_remapping],
remapped_matrix.eval())
self.evaluate(remapped_matrix))
# No row remapping, new weight matrix has third col, then first col.
row_remapping = list(range(self.old_num_rows))
@ -157,7 +157,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_cols=len(col_remapping))
with self.cached_session():
self.assertAllClose(self.matrix_value[row_remapping][:, col_remapping],
remapped_matrix.eval())
self.evaluate(remapped_matrix))
# Both row and column remappings.
row_remapping = [1, 0, 4]
@ -172,7 +172,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_cols=len(col_remapping))
with self.cached_session():
self.assertAllClose(self.matrix_value[row_remapping][:, col_remapping],
remapped_matrix.eval())
self.evaluate(remapped_matrix))
def test_load_and_remap_with_init(self):
"""Tests the op's load and remap where there are missing entries."""
@ -190,7 +190,8 @@ class LoadAndRemapMatrixTest(test.TestCase):
[33, init_val, init_val, init_val, 1, init_val], [3, 2])
with self.cached_session():
self.assertAllClose(expected_remapped_matrix, remapped_matrix.eval())
self.assertAllClose(expected_remapped_matrix,
self.evaluate(remapped_matrix))
def test_load_and_remap_all_missing_rows(self):
"""Tests when all the rows are missing and need to be initialized."""
@ -207,7 +208,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
with self.cached_session():
self.assertAllClose(
np.reshape(initializing_values, (num_rows, self.old_num_cols)),
remapped_matrix.eval())
self.evaluate(remapped_matrix))
def test_load_and_remap_all_missing_rows_and_cols(self):
"""Tests when all the rows & cols are missing and need to be initialized."""
@ -225,7 +226,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
with self.cached_session():
self.assertAllClose(
np.reshape(initializing_values, (num_rows, num_cols)),
remapped_matrix.eval())
self.evaluate(remapped_matrix))
def test_load_and_remap_invalid_remapping(self):
"""Tests that errors are raised when an ID maps to multiple new IDs.
@ -244,7 +245,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_rows=len(invalid_remapping),
num_cols=self.old_num_cols)
with self.cached_session(), self.assertRaises(errors.UnimplementedError):
remapped_matrix.eval()
self.evaluate(remapped_matrix)
# Invalid column remapping.
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
@ -256,7 +257,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_rows=self.old_num_rows,
num_cols=len(invalid_remapping))
with self.cached_session(), self.assertRaises(errors.UnimplementedError):
remapped_matrix.eval()
self.evaluate(remapped_matrix)
def test_load_and_remap_incorrect_initializing_values(self):
"""Tests that errors are raised with incorrect number of init values."""
@ -273,7 +274,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_rows=3,
num_cols=2)
with self.cached_session(), self.assertRaises(errors.InvalidArgumentError):
remapped_matrix.eval()
self.evaluate(remapped_matrix)
remapped_matrix = gen_checkpoint_ops.load_and_remap_matrix(
ckpt_path=[self.bundle_file],
@ -285,7 +286,7 @@ class LoadAndRemapMatrixTest(test.TestCase):
num_rows=3,
num_cols=2)
with self.cached_session(), self.assertRaises(errors.InvalidArgumentError):
remapped_matrix.eval()
self.evaluate(remapped_matrix)
class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
@ -324,7 +325,7 @@ class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
num_rows=num_rows,
num_cols=num_cols,
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(np_value[::-1], remapped_matrix.eval())
self.assertAllClose(np_value[::-1], self.evaluate(remapped_matrix))
# Tests loading the tensor (except for the first and last rows), with
# uninitialized values. Requires num_rows to be at least 3 since we're
@ -348,7 +349,7 @@ class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
np.vstack([
np.tile(42, [prefix_rows, num_cols]), np_value[1:-1],
np.tile(42, [suffix_rows, num_cols])
]), remapped_matrix.eval())
]), self.evaluate(remapped_matrix))
# Tests when everything is taken from initializing_values.
new_rows = 7
@ -365,7 +366,7 @@ class LoadAndRemapMatrixWithMaxRowsTest(test.TestCase):
max_rows_in_memory=max_rows_in_memory)
self.assertAllClose(
np.reshape(initializing_values, (new_rows, num_cols)),
remapped_matrix.eval())
self.evaluate(remapped_matrix))
def test_loading_rows_divisible_by_max_rows(self):
"""Tests loading normal var when rows are evenly divisible by max_rows."""

View File

@ -55,7 +55,7 @@ class ClipTest(test.TestCase):
np_ans = [[-4.4, 2.0, 3.0], [4.0, 4.4, 4.4]]
clip_value = 4.4
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -71,7 +71,7 @@ class ClipTest(test.TestCase):
clip_value_min = 2
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -88,7 +88,7 @@ class ClipTest(test.TestCase):
[2, 2, 2, 3, 3, 3], shape=[2, 3], dtype=dtype)
clip_value_max = 4
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -105,7 +105,7 @@ class ClipTest(test.TestCase):
clip_value_max = constant_op.constant(
[6, 6, 6, 6, 6, 6], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -123,7 +123,7 @@ class ClipTest(test.TestCase):
clip_value_max = constant_op.constant(
[5, 5, 5, 7, 7, 7], shape=[2, 3], dtype=dtype)
ans = clip_ops.clip_by_value(x, clip_value_min, clip_value_max)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -144,7 +144,7 @@ class ClipTest(test.TestCase):
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = clip_ops.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -157,10 +157,10 @@ class ClipTest(test.TestCase):
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans_tensor = ans.eval()
tf_ans_tensor = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
self.assertAllClose(np_ans, tf_ans_tensor)
@ -188,7 +188,7 @@ class ClipTest(test.TestCase):
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -200,7 +200,7 @@ class ClipTest(test.TestCase):
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -212,7 +212,7 @@ class ClipTest(test.TestCase):
np_ans = [[-2.4, 0.0, 0.0], [3.2, 0.0, 3.0]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [0])
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -224,7 +224,7 @@ class ClipTest(test.TestCase):
np_ans = [[-3.0, 0.0, 0.0], [3.2, 0.0, 2.4]]
clip_norm = 4.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -236,7 +236,7 @@ class ClipTest(test.TestCase):
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 3.0]]
clip_norm = 6.0
ans = clip_ops.clip_by_norm(x, clip_norm, [1])
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -256,7 +256,7 @@ class ClipTest(test.TestCase):
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
@ -277,7 +277,7 @@ class ClipTest(test.TestCase):
ans, norm = clip_ops.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
@ -300,7 +300,7 @@ class ClipTest(test.TestCase):
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[2].eval()
tf_norm = norm.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
@ -322,7 +322,7 @@ class ClipTest(test.TestCase):
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = norm.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
@ -352,7 +352,7 @@ class ClipTest(test.TestCase):
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
@ -371,7 +371,7 @@ class ClipTest(test.TestCase):
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
tf_norm = self.evaluate(norm)
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
@ -386,7 +386,7 @@ class ClipTest(test.TestCase):
ans, norm = clip_ops.clip_by_global_norm([x0, x1], clip_norm)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
norm.eval()
self.evaluate(norm)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
ans[0].eval()
with self.assertRaisesRegexp(errors.InvalidArgumentError, "global norm"):
@ -400,7 +400,7 @@ class ClipTest(test.TestCase):
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -412,7 +412,7 @@ class ClipTest(test.TestCase):
np_ans = [[-2.88, 0.0, 0.0], [3.84, 0.0, 0.0]]
clip_norm = constant_op.constant(0.8)
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -424,7 +424,7 @@ class ClipTest(test.TestCase):
np_ans = [[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)
@ -436,7 +436,7 @@ class ClipTest(test.TestCase):
np_ans = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = clip_ops.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertAllClose(np_ans, tf_ans)

View File

@ -33,12 +33,12 @@ class CompareAndBitpackTest(test.TestCase):
with self.cached_session(use_gpu=True):
ans = math_ops.compare_and_bitpack(x, threshold)
if expected_err_re is None:
tf_ans = ans.eval()
tf_ans = self.evaluate(ans)
self.assertShapeEqual(truth, ans)
self.assertAllEqual(tf_ans, truth)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
self.evaluate(ans)
def _testBasic(self, dtype):
rows = 371

View File

@ -71,7 +71,7 @@ class ConcatOpTest(test.TestCase):
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
c = array_ops.concat([x1, x2], 0)
result = c.eval()
result = self.evaluate(c)
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
@ -83,7 +83,7 @@ class ConcatOpTest(test.TestCase):
v2 = variables.Variable(p2)
c = array_ops.concat([v1, v2], 0)
variables.global_variables_initializer().run()
result = c.eval()
result = self.evaluate(c)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], p1)
@ -195,7 +195,7 @@ class ConcatOpTest(test.TestCase):
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
def testGradientsSimple(self):
@ -222,7 +222,7 @@ class ConcatOpTest(test.TestCase):
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@ -249,7 +249,7 @@ class ConcatOpTest(test.TestCase):
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@ -279,7 +279,7 @@ class ConcatOpTest(test.TestCase):
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = concated_grad.eval()
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@ -476,7 +476,7 @@ class ConcatOpTest(test.TestCase):
with self.cached_session():
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
self.assertAllEqual(concat_list_t.eval(), self.evaluate(concat_tuple_t))
def testConcatNoScalars(self):
with self.cached_session():
@ -543,13 +543,13 @@ class ConcatOpTest(test.TestCase):
c = gen_array_ops.concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = c.eval()
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
output)
c = gen_array_ops.concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = c.eval()
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
def _testGradientsForAxis(
@ -615,7 +615,7 @@ class ConcatOpTest(test.TestCase):
c = gen_array_ops.concat_v2([t1, t2],
constant_op.constant(1, dtype=dtype))
self.assertEqual([2, 6], c.get_shape().as_list())
output = c.eval()
output = self.evaluate(c)
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
class ConcatOffsetTest(test.TestCase):

View File

@ -149,7 +149,7 @@ class ConditionalAccumulatorTest(test.TestCase):
accum_op.run()
is_all_equal = True
val = takeg_t.eval()
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
@ -184,7 +184,7 @@ class ConditionalAccumulatorTest(test.TestCase):
sess.run(accum_op, feed_dict={x: elem})
is_all_equal = True
val = takeg_t.eval()
val = self.evaluate(takeg_t)
for i in range(len(val)):
for j in range(len(val[i])):
is_all_equal &= (val[i][j] == elems_ave[i][j])
@ -259,7 +259,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
@ -268,7 +268,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(15.0, val)
def testAccumulatorTakeGradSum(self):
@ -286,7 +286,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
accum_ops = [q.apply_grad((x,), local_step=1) for x in elems]
@ -295,7 +295,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(30.0, val)
def testAccumulatorTakeGradInvalidReductionType(self):
@ -319,7 +319,7 @@ class ConditionalAccumulatorTest(test.TestCase):
accum_op.run()
with self.assertRaises(errors_impl.InvalidArgumentError):
takeg_t.eval()
self.evaluate(takeg_t)
def testAccumulatorRepeatedTakeGradMean(self):
with self.cached_session():
@ -334,7 +334,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave, val)
elems = [20.0, 30.0]
@ -345,7 +345,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(elems_ave + 0.0, val)
def testAccumulatorRepeatedTakeGradSum(self):
@ -364,7 +364,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
elems = [20.0, 30.0]
@ -375,7 +375,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for accum_op in accum_ops:
accum_op.run()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(elems_sum, val)
def testAccumulatorIncrementGlobalStep(self):
@ -392,7 +392,7 @@ class ConditionalAccumulatorTest(test.TestCase):
variables.global_variables_initializer().run()
for _ in range(3):
set_global_step_op.run()
inc_global_step.eval()
self.evaluate(inc_global_step)
def testAccumulatorSetGlobalStepPreventsAccumulation(self):
with self.cached_session():
@ -410,7 +410,7 @@ class ConditionalAccumulatorTest(test.TestCase):
accum_op.run()
takeg_t = q.take_grad(1)
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(0.0 + sum(x for x in local_steps
if x >= ls) / sum(1 for x in local_steps
if x >= ls), val)
@ -436,7 +436,7 @@ class ConditionalAccumulatorTest(test.TestCase):
for thread in threads:
thread.join()
val = takeg_t.eval()
val = self.evaluate(takeg_t)
self.assertEqual(val, sum(elems) / len(elems))

View File

@ -232,7 +232,7 @@ class ConfusionMatrixTest(test.TestCase):
with self.cached_session():
cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int32)
tf_cm = cm.eval()
tf_cm = self.evaluate(cm)
self.assertEqual(tf_cm.dtype, np.int32)
def testOutputIsInt64(self):
@ -241,7 +241,7 @@ class ConfusionMatrixTest(test.TestCase):
with self.cached_session():
cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int64)
tf_cm = cm.eval()
tf_cm = self.evaluate(cm)
self.assertEqual(tf_cm.dtype, np.int64)
@ -261,8 +261,8 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, static_labels.eval())
self.assertAllEqual(prediction_values, static_predictions.eval())
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
@ -286,8 +286,8 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, static_labels.eval())
self.assertAllEqual(prediction_values, static_predictions.eval())
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
@ -311,8 +311,8 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
labels_placeholder, predictions_placeholder, expected_rank_diff=0))
with self.cached_session():
self.assertAllEqual(label_values, static_labels.eval())
self.assertAllEqual(prediction_values, static_predictions.eval())
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
@ -337,8 +337,8 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, static_labels.eval())
self.assertAllEqual(prediction_values, static_predictions.eval())
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
@ -363,8 +363,8 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, static_labels.eval())
self.assertAllEqual(prediction_values, static_predictions.eval())
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
@ -389,8 +389,9 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, static_labels.eval())
self.assertAllEqual(expected_prediction_values, static_predictions.eval())
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
@ -416,8 +417,9 @@ class RemoveSqueezableDimensionsTest(test.TestCase):
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, static_labels.eval())
self.assertAllEqual(expected_prediction_values, static_predictions.eval())
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values

View File

@ -282,29 +282,29 @@ class AsTensorTest(test.TestCase):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([], x.eval())
self.assertAllEqual([], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]))
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31-1, 2, 3], x.eval())
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31-1, 2, 3]),
dtype=dtypes_lib.int32)
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual([2**31-1, 2, 3], x.eval())
self.assertAllEqual([2**31 - 1, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]))
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], x.eval())
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
x = ops.convert_to_tensor(tensor_shape.TensorShape([2**31, 2, 3]),
dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([2**31, 2, 3], x.eval())
self.assertAllEqual([2**31, 2, 3], self.evaluate(x))
with self.assertRaisesRegexp(
ValueError, "a dimension is too large .2147483648."):
@ -314,11 +314,11 @@ class AsTensorTest(test.TestCase):
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3]), dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual([1, 2, 3], x.eval())
self.assertAllEqual([1, 2, 3], self.evaluate(x))
x = array_ops.reshape(
array_ops.zeros([6]), tensor_shape.TensorShape([2, 3]))
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], x.eval())
self.assertAllEqual([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], self.evaluate(x))
with self.assertRaisesRegexp(ValueError, "partially known"):
ops.convert_to_tensor(tensor_shape.TensorShape(None))
@ -334,12 +334,12 @@ class AsTensorTest(test.TestCase):
with self.cached_session():
x = ops.convert_to_tensor(tensor_shape.TensorShape([1, 2, 3])[1])
self.assertEqual(dtypes_lib.int32, x.dtype)
self.assertAllEqual(2, x.eval())
self.assertAllEqual(2, self.evaluate(x))
x = ops.convert_to_tensor(
tensor_shape.TensorShape([1, 2, 3])[1], dtype=dtypes_lib.int64)
self.assertEqual(dtypes_lib.int64, x.dtype)
self.assertAllEqual(2, x.eval())
self.assertAllEqual(2, self.evaluate(x))
shape = tensor_shape.TensorShape(None)
if shape._v2_behavior:
@ -372,7 +372,7 @@ class ZerosTest(test.TestCase):
with self.cached_session():
ret = array_ops.zeros(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
return self.evaluate(ret)
def testConst(self):
self.assertTrue(
@ -383,7 +383,7 @@ class ZerosTest(test.TestCase):
self.assertEqual(0, self._Zeros(()))
with self.cached_session():
scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(0, scalar.eval())
self.assertEqual(0, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[0] * 3] * 2)
@ -392,7 +392,7 @@ class ZerosTest(test.TestCase):
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of zeros of the same dimensions as "d".
z = array_ops.zeros(array_ops.shape(d))
out = z.eval()
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@ -420,13 +420,13 @@ class ZerosTest(test.TestCase):
z = array_ops.zeros([2, 3], dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.eval()
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
z = array_ops.zeros(array_ops.shape(d), dtype=dtype)
self.assertEqual(z.dtype, dtype)
self.assertEqual([2, 3], z.get_shape())
z_value = z.eval()
z_value = self.evaluate(z)
self.assertFalse(np.any(z_value))
self.assertEqual((2, 3), z_value.shape)
@ -538,7 +538,7 @@ class OnesTest(test.TestCase):
with self.cached_session():
ret = array_ops.ones(shape)
self.assertEqual(shape, ret.get_shape())
return ret.eval()
return self.evaluate(ret)
def testConst(self):
self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2)))
@ -548,7 +548,7 @@ class OnesTest(test.TestCase):
self.assertEqual(1, self._Ones(()))
with self.cached_session():
scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32))
self.assertEqual(1, scalar.eval())
self.assertEqual(1, self.evaluate(scalar))
def testDynamicSizes(self):
np_ans = np.array([[1] * 3] * 2)
@ -557,7 +557,7 @@ class OnesTest(test.TestCase):
d = array_ops.fill([2, 3], 12., name="fill")
# Constructs a tensor of ones of the same dimensions as "d".
z = array_ops.ones(array_ops.shape(d))
out = z.eval()
out = self.evaluate(z)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, d)
self.assertShapeEqual(np_ans, z)
@ -617,7 +617,7 @@ class OnesLikeTest(test.TestCase):
z_var = array_ops.ones_like(d)
# Test that the type is correct
self.assertEqual(z_var.dtype, dtype)
z_value = z_var.eval()
z_value = self.evaluate(z_var)
# Test that the value is correct
self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2)))
@ -634,7 +634,7 @@ class FillTest(test.TestCase):
def _compare(self, dims, val, np_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.fill(dims, val, name="fill")
out = tf_ans.eval()
out = self.evaluate(tf_ans)
self.assertAllClose(np_ans, out)
# Fill does not set the shape.
# self.assertShapeEqual(np_ans, tf_ans)
@ -726,7 +726,7 @@ class PlaceholderTest(test.TestCase):
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
self.evaluate(p_identity)
def testShape(self):
with self.cached_session():
@ -739,7 +739,7 @@ class PlaceholderTest(test.TestCase):
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float and "
r"shape \[10,10\]"):
p_identity.eval()
self.evaluate(p_identity)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: "Cannot feed value of shape" in str(e)):
@ -783,7 +783,7 @@ class PlaceholderTest(test.TestCase):
# Should trigger an operator error, not a shape error.
with self.assertRaisesOpError(
"must feed a value for placeholder tensor 'p' with dtype float"):
p_identity.eval()
self.evaluate(p_identity)
def testControlDependency(self):
with self.cached_session():
@ -896,7 +896,7 @@ class PlaceholderWithDefaultTest(test.TestCase):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([[2, 2], [2, 2]], shape=[2, 2])
a = array_ops.identity(p)
self.assertAllEqual([[2, 2], [2, 2]], a.eval())
self.assertAllEqual([[2, 2], [2, 2]], self.evaluate(a))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))
@ -907,7 +907,7 @@ class PlaceholderWithDefaultTest(test.TestCase):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([1, 2, 3], shape=[None])
a = array_ops.identity(p)
self.assertAllEqual([1, 2, 3], a.eval())
self.assertAllEqual([1, 2, 3], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
with self.assertRaises(ValueError):
@ -917,7 +917,7 @@ class PlaceholderWithDefaultTest(test.TestCase):
with self.session(force_gpu=test_util.is_gpu_available()):
p = array_ops.placeholder_with_default([17], shape=None)
a = array_ops.identity(p)
self.assertAllEqual([17], a.eval())
self.assertAllEqual([17], self.evaluate(a))
self.assertAllEqual([3, 37], a.eval(feed_dict={p: [3, 37]}))
self.assertAllEqual(
[[3, 3], [3, 3]], a.eval(feed_dict={p: [[3, 3], [3, 3]]}))

View File

@ -139,7 +139,7 @@ class ControlFlowTest(test.TestCase):
self.assertTrue(isinstance(v2, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
self.assertEqual(9, self.evaluate(v2))
def testRefEnter(self):
with self.cached_session():
@ -152,7 +152,7 @@ class ControlFlowTest(test.TestCase):
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
self.assertEqual(9, self.evaluate(v3))
def testRefSwitch(self):
with self.cached_session():
@ -162,7 +162,7 @@ class ControlFlowTest(test.TestCase):
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
self.assertEqual(9, self.evaluate(v2))
def testEnterMulExit(self):
with self.cached_session():
@ -173,7 +173,7 @@ class ControlFlowTest(test.TestCase):
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
result = self.evaluate(exit_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testEnterShapePropagation(self):
@ -214,7 +214,7 @@ class ControlFlowTest(test.TestCase):
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
dead_branch.eval()
self.evaluate(dead_branch)
def testSwitchMergeLess(self):
with self.cached_session():
@ -225,7 +225,7 @@ class ControlFlowTest(test.TestCase):
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
result = self.evaluate(merge_op)
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
@ -238,7 +238,7 @@ class ControlFlowTest(test.TestCase):
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
@ -252,7 +252,7 @@ class ControlFlowTest(test.TestCase):
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
result = self.evaluate(merge_op)
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
@ -269,7 +269,7 @@ class ControlFlowTest(test.TestCase):
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
result = self.evaluate(exit_n)
self.assertAllEqual(10, result)
def testLoop_1(self):
@ -295,7 +295,7 @@ class ControlFlowTest(test.TestCase):
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
def testLoop_2(self):
@ -321,7 +321,7 @@ class ControlFlowTest(test.TestCase):
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
result = self.evaluate(exit_i)
self.assertAllEqual(10, result)
def testDifferentFrame(self):
@ -476,7 +476,7 @@ class ControlFlowTest(test.TestCase):
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
result = self.evaluate(r)
self.assertAllEqual(11, result)
def testCond_1(self):
@ -492,7 +492,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = r.eval()
result = self.evaluate(r)
self.assertAllEqual(9, result)
def testCond_3(self):
@ -505,7 +505,7 @@ class ControlFlowTest(test.TestCase):
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
result = self.evaluate(r)
self.assertAllEqual(12, result)
@test_util.run_in_graph_and_eager_modes
@ -532,9 +532,9 @@ class ControlFlowTest(test.TestCase):
result = f().eval()
self.assertEqual(True, result)
# Only second cond result was fetched, so v1 assign shouldn't run.
self.assertEqual(7, v1.eval())
self.assertEqual(2, v2.eval())
self.assertEqual(7, v3.eval())
self.assertEqual(7, self.evaluate(v1))
self.assertEqual(2, self.evaluate(v2))
self.assertEqual(7, self.evaluate(v3))
result = f_defun()
self.assertEqual(True, self.evaluate(result))
@ -555,7 +555,7 @@ class ControlFlowTest(test.TestCase):
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
self.assertAllEqual(4, self.evaluate(count))
def testCond_6(self):
with self.cached_session():
@ -568,7 +568,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
result = r.eval()
result = self.evaluate(r)
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
@ -677,7 +677,7 @@ class ControlFlowTest(test.TestCase):
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
self.assertAllEqual([2.0], self.evaluate(r))
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
def testCondWithControl(self):
@ -693,7 +693,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, r.eval())
self.assertEqual(5, self.evaluate(r))
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
@ -758,7 +758,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(1.0, grad.eval())
self.assertAllEqual(1.0, self.evaluate(grad))
def testCondGrad_2(self):
with self.cached_session():
@ -827,13 +827,13 @@ class ControlFlowTest(test.TestCase):
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
self.assertEqual(1.0, self.evaluate(result))
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
self.assertEqual(1.0, self.evaluate(result))
@test_util.disable_control_flow_v2("b/113327884")
def testCondGrad_Gather(self):
@ -982,7 +982,7 @@ class ControlFlowTest(test.TestCase):
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
self.assertEqual(10000, self.evaluate(r))
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
def testWhileExternalControlDependencies(self):
@ -1013,7 +1013,7 @@ class ControlFlowTest(test.TestCase):
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
result.eval()
self.evaluate(result)
self.assertAllEqual(v.eval(), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@ -1045,19 +1045,19 @@ class ControlFlowTest(test.TestCase):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
self.assertAllEqual(45, self.evaluate(r))
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], r.eval())
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], self.evaluate(r))
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, r.eval())
self.assertEqual(1, self.evaluate(r))
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
@ -1350,7 +1350,7 @@ class ControlFlowTest(test.TestCase):
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
@ -1366,7 +1366,7 @@ class ControlFlowTest(test.TestCase):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
self.assertAllClose(10.0, self.evaluate(r))
def testWhile_Gpu_2(self):
self._testWhile_Gpu_2(use_gpu=False)
@ -1387,7 +1387,7 @@ class ControlFlowTest(test.TestCase):
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
self.assertAllEqual(np.ones((8, 8)), self.evaluate(r))
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
@ -1395,7 +1395,7 @@ class ControlFlowTest(test.TestCase):
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
self.assertEqual(10000, self.evaluate(r))
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
@ -1403,7 +1403,7 @@ class ControlFlowTest(test.TestCase):
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
self.assertEqual([10000], self.evaluate(r))
def testWhileShapeInference(self):
with self.cached_session():
@ -1515,7 +1515,7 @@ class ControlFlowTest(test.TestCase):
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
self.assertEqual(225, self.evaluate(r))
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
@ -1547,7 +1547,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
self.assertEqual(1048576.0, self.evaluate(r))
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
@ -1581,7 +1581,7 @@ class ControlFlowTest(test.TestCase):
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
self.assertAllEqual(12, self.evaluate(res))
def testWhileWithControl_3(self):
with self.cached_session() as sess:
@ -1650,8 +1650,8 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
variables.global_variables_initializer().run()
self.assertEqual(4, r.eval())
self.assertAllClose(65536.0, v.eval())
self.assertEqual(4, self.evaluate(r))
self.assertAllClose(65536.0, self.evaluate(v))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
def testWhileCondExitControl(self):
@ -1675,8 +1675,8 @@ class ControlFlowTest(test.TestCase):
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
self.assertEqual(6.0, self.evaluate(r))
self.assertEqual(99, self.evaluate(v))
def testCondWhile_1(self):
@ -1687,7 +1687,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
self.assertAllEqual(10, self.evaluate(r))
def testCondWhile_2(self):
@ -1698,7 +1698,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
self.assertAllEqual(10, self.evaluate(r))
def _testCondWhile_3(self, use_gpu):
with self.cached_session(use_gpu=use_gpu) as sess:
@ -1741,7 +1741,7 @@ class ControlFlowTest(test.TestCase):
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_2(self):
@ -1750,7 +1750,7 @@ class ControlFlowTest(test.TestCase):
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
self.assertAllEqual(10, self.evaluate(r))
def testWhileCond_3(self):
@ -1764,7 +1764,7 @@ class ControlFlowTest(test.TestCase):
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
self.assertAllEqual(10, self.evaluate(r))
def testWhileCondGradMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 2},
@ -1815,8 +1815,8 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertEqual(3, self.evaluate(r))
result = self.evaluate(select)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@ -1840,10 +1840,10 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertEqual(3, self.evaluate(r))
result1 = self.evaluate(select1)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
result2 = self.evaluate(select2)
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
@ -1892,9 +1892,9 @@ class ControlFlowTest(test.TestCase):
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_5(self):
@ -1921,10 +1921,10 @@ class ControlFlowTest(test.TestCase):
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(10, self.evaluate(var_a))
self.assertEqual(10, self.evaluate(var_b))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_6(self):
@ -1951,10 +1951,10 @@ class ControlFlowTest(test.TestCase):
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
self.assertEqual(0, self.evaluate(var_b))
self.evaluate(lpa) # Run the loop
self.assertEqual(55, self.evaluate(var_b))
self.assertEqual(10, self.evaluate(var_a))
def testWhileQueue_1(self):
with self.cached_session():
@ -1970,7 +1970,7 @@ class ControlFlowTest(test.TestCase):
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
self.assertEqual([10], self.evaluate(r))
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
@ -2006,7 +2006,7 @@ class ControlFlowTest(test.TestCase):
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, rx.eval())
self.assertEqual(45, self.evaluate(rx))
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
@ -2057,7 +2057,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
self.assertAllClose(1024.0, self.evaluate(r))
def testWhileGrad_Shape(self):
with self.cached_session():
@ -2097,7 +2097,7 @@ class ControlFlowTest(test.TestCase):
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
self.assertEqual(524288.0, self.evaluate(r))
def testWhileGrad_LoopAdd(self):
with self.cached_session():
@ -2108,7 +2108,7 @@ class ControlFlowTest(test.TestCase):
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
self.assertAllClose(2048.0, self.evaluate(r))
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.cached_session(use_gpu=use_gpu) as sess:
@ -2151,7 +2151,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
self.assertAllClose(512.0, self.evaluate(r))
def testNestedWhileCondWhileGrad(self):
if control_flow_ops.ENABLE_WHILE_V2 and test_util.is_gpu_available():
@ -2443,10 +2443,11 @@ class ControlFlowTest(test.TestCase):
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
self.assertAllClose(2.0, self.evaluate(y_f_d)) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
self.assertAllClose(1.0,
self.evaluate(g)) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
@ -2462,7 +2463,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
self.assertAllClose(8.0, self.evaluate(r))
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
@ -2489,7 +2490,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
self.assertAllClose(256.0, self.evaluate(r))
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
@ -2512,7 +2513,7 @@ class ControlFlowTest(test.TestCase):
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
self.assertAllClose(512.0, self.evaluate(r))
@test_util.disable_control_flow_v2("unsupported: resource creation in body. "
"Enable with new TAs b/117675481")
@ -2536,7 +2537,7 @@ class ControlFlowTest(test.TestCase):
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
self.assertAllClose(2.999, self.evaluate(var))
def _testWhileCondGrad_Simple(self, use_gpu):
with self.cached_session(use_gpu=use_gpu):
@ -2552,7 +2553,7 @@ class ControlFlowTest(test.TestCase):
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
self.assertAllClose(1024.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/117519152")
def testWhileCondGrad_Simple(self):
@ -2649,7 +2650,7 @@ class ControlFlowTest(test.TestCase):
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.disable_control_flow_v2("b/116328420 (SparseTensor)")
def testWhileGrad_SparseTensor(self):
@ -2672,7 +2673,7 @@ class ControlFlowTest(test.TestCase):
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
self.assertAllClose(np.array([1024.0, 1024.0]), self.evaluate(r))
@test_util.disable_control_flow_v2("b/115920078 (gradients)")
def testCallGradInLoop(self):
@ -2730,9 +2731,9 @@ class ControlFlowTest(test.TestCase):
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
self.assertEqual(32.0, self.evaluate(r))
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
@ -2750,13 +2751,13 @@ class ControlFlowTest(test.TestCase):
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
self.assertEqual(168.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
self.assertEqual(136.0, self.evaluate(r))
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
self.assertEqual(32.0, self.evaluate(r))
def testWhileGrad_StopGradInside(self):
with self.cached_session():
@ -2773,9 +2774,9 @@ class ControlFlowTest(test.TestCase):
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
self.assertAllClose(0.0, self.evaluate(r))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
self.assertAllClose(156.0, self.evaluate(r))
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
@ -2829,7 +2830,7 @@ class ControlFlowTest(test.TestCase):
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, r.eval())
self.assertEqual(388.0, self.evaluate(r))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileGradientWithNontrainablePath1(self):
@ -2915,7 +2916,7 @@ class ControlFlowTest(test.TestCase):
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
variables.global_variables_initializer().run()
self.assertEqual(5.0, result.eval())
self.assertEqual(5.0, self.evaluate(result))
def testOneValueCond(self):
@ -2976,7 +2977,7 @@ class ControlFlowTest(test.TestCase):
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
r4.eval()
self.evaluate(r4)
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
@ -3023,17 +3024,17 @@ class ControlFlowTest(test.TestCase):
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertEqual(2, self.evaluate(r2))
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertEqual(1, self.evaluate(r1))
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertEqual(0, self.evaluate(r0))
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
@ -3055,15 +3056,15 @@ class ControlFlowTest(test.TestCase):
self.assertTrue(isinstance(i, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
self.assertEqual(0, self.evaluate(v))
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
self.assertEqual(1, self.evaluate(v))
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
self.assertEqual(2, self.evaluate(v))
def testWithOpsDependencies(self):
with self.cached_session() as sess:
@ -3105,14 +3106,14 @@ class ControlFlowTest(test.TestCase):
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
self.evaluate(v)
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
self.assertAllEqual(20, self.evaluate(c2_with_c1_dep))
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
self.assertAllClose(0.0, self.evaluate(v))
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
@ -3127,13 +3128,15 @@ class ControlFlowTest(test.TestCase):
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
self.evaluate(gather_v_at_1)
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
self.assertAllEqual([[10.0, 11.0]],
self.evaluate(gather_v_at_1_after_init))
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
self.evaluate(v))
def testDependenciesDevice(self):
with ops.Graph().as_default():
@ -3167,7 +3170,7 @@ class ControlFlowTest(test.TestCase):
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
self.evaluate(v1)
# Runs "init" before fetching v1 and v2.
init.run()
@ -3495,20 +3498,20 @@ class TupleTest(test.TestCase):
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
self.evaluate(v2)
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
self.assertAllClose([3.0], self.evaluate(t1))
self.assertAllClose([10.0], self.evaluate(v2))
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
self.assertAllClose([30.0], self.evaluate(t2))
self.assertAllClose([1.0], self.evaluate(v1))
def testIndexedSlices(self):
for v1_first in [True, False]:
@ -3533,22 +3536,22 @@ class TupleTest(test.TestCase):
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
self.evaluate(v1)
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
self.evaluate(v2)
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[10.0, 11.0]], self.evaluate(g1))
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
self.evaluate(v2))
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[10.1, 11.1]], self.evaluate(g2))
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
self.evaluate(v1))
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
@ -3558,9 +3561,9 @@ class TupleTest(test.TestCase):
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.evaluate(t)
self.assertEquals(1, var.eval())
self.assertEquals(1, self.evaluate(var))
class AssertTest(test.TestCase):

View File

@ -43,7 +43,7 @@ class Conv1DTest(test.TestCase):
with self.cached_session(use_gpu=test.is_gpu_available()):
c = nn_ops.conv1d(x, filters, stride, padding="VALID")
reduced = array_ops.squeeze(c)
output = reduced.eval()
output = self.evaluate(reduced)
if stride == 1:
self.assertEqual(len(output), 3)
self.assertAllClose(output,
@ -69,7 +69,7 @@ class Conv1DTest(test.TestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, stride=stride, padding="VALID")
value = output.eval()
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)

View File

@ -52,7 +52,7 @@ class Conv2DTransposeTest(test.TestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells=kernel_height * kernel_width
@ -90,7 +90,7 @@ class Conv2DTransposeTest(test.TestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
@ -123,7 +123,7 @@ class Conv2DTransposeTest(test.TestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
@ -194,7 +194,7 @@ class Conv2DTransposeTest(test.TestCase):
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
@ -229,7 +229,7 @@ class Conv2DTransposeTest(test.TestCase):
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = output.eval()
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[2]):
for w in xrange(y_shape[3]):
@ -264,7 +264,7 @@ class Conv2DTransposeTest(test.TestCase):
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
value = output.eval()
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1

View File

@ -48,7 +48,7 @@ class Conv3DTransposeTest(test.TestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
value = self.evaluate(output)
# We count the number of cells being added at the locations in the output.
# At the center, #cells = kernel_depth * kernel_height * kernel_width
@ -98,7 +98,7 @@ class Conv3DTransposeTest(test.TestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = output.eval()
value = self.evaluate(output)
for n in xrange(x_shape[0]):
for k in xrange(f_shape[3]):
@ -146,7 +146,7 @@ class Conv3DTransposeTest(test.TestCase):
output = nn_ops.conv3d_transpose(
x_value, f_value, constant_op.constant(y_shape, dtype=dtype),
strides=strides, padding="SAME")
output.eval()
self.evaluate(output)
def testConv3DTransposeValid(self):
with self.cached_session():
@ -165,7 +165,7 @@ class Conv3DTransposeTest(test.TestCase):
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv3d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = output.eval()
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)

View File

@ -81,7 +81,7 @@ class BinaryOpTest(test.TestCase):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = out.eval()
tf_cpu = self.evaluate(out)
# Test that the op takes precedence over numpy operators.
np_left = tf_func(x, iny).eval()
np_right = tf_func(inx, y).eval()
@ -178,7 +178,7 @@ class BinaryOpTest(test.TestCase):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
tf_gpu = self.evaluate(out)
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
@ -748,7 +748,7 @@ class ComparisonOpTest(test.TestCase):
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
@ -779,7 +779,7 @@ class ComparisonOpTest(test.TestCase):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = out.eval()
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):

View File

@ -88,7 +88,7 @@ class ComparisonOpTest(test.TestCase):
out = func(
ops.convert_to_tensor(np.array([x]).astype(dtype)),
ops.convert_to_tensor(np.array([y]).astype(dtype)))
ret = out.eval()
ret = self.evaluate(out)
return ret[0]
def testScalarCompareScalar(self):
@ -119,7 +119,7 @@ class ComparisonOpTest(test.TestCase):
np_ans = np_func(x, y)
with self.test_session(force_gpu=test_util.is_gpu_available()):
out = tf_func(ops.convert_to_tensor(x), ops.convert_to_tensor(y))
tf_ans = out.eval()
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
def testTensorCompareTensor(self):
@ -223,7 +223,7 @@ class LogicalOpTest(test.TestCase):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_val = out.eval()
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
@ -233,7 +233,7 @@ class LogicalOpTest(test.TestCase):
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
out = math_ops.logical_not(ops.convert_to_tensor(x))
tf_val = out.eval()
tf_val = self.evaluate(out)
self.assertEqual(out.dtype, dtypes_lib.bool)
self.assertAllEqual(np_ans, tf_val)
self.assertShapeEqual(np_ans, out)
@ -319,7 +319,7 @@ class SelectOpTest(test.TestCase):
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
out = array_ops.where(c, x, y)
tf_ans = out.eval()
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
@ -463,7 +463,7 @@ class BatchSelectOpTest(test.TestCase):
with self.test_session(use_gpu=use_gpu,
force_gpu=use_gpu and test_util.is_gpu_available()):
out = array_ops.where(c, x, y)
tf_ans = out.eval()
tf_ans = self.evaluate(out)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, out)
@ -644,13 +644,13 @@ class MathOpsOverloadTest(test.TestCase):
with self.test_session(use_gpu=False):
inx = ops.convert_to_tensor(x, dtype=dtype)
z = func(inx, y) # Should use __add__, __sub__, etc.
return z.eval()
return self.evaluate(z)
def _computeLiteralAndTensor(self, x, y, dtype, func):
with self.test_session(use_gpu=False):
iny = ops.convert_to_tensor(y, dtype=dtype)
z = func(x, iny) # Should use __radd__, __rsub__, etc.
return z.eval()
return self.evaluate(z)
def _compareBinary(self, x, y, dtype, np_func, tf_func):
np_ans = np_func(x, y).astype(dtype.as_numpy_dtype)
@ -777,9 +777,9 @@ class IsFiniteInfNanTest(test.TestCase):
tf_y = math_ops.sqrt(x)
tf_nan = math_ops.is_nan(tf_y)
if value < 0:
self.assertAllEqual(np_nan, tf_nan.eval())
self.assertAllEqual(np_nan, self.evaluate(tf_nan))
else:
self.assertAllCloseAccordingToType(np_y, tf_y.eval())
self.assertAllCloseAccordingToType(np_y, self.evaluate(tf_y))
class RoundingTest(test.TestCase):
@ -833,7 +833,7 @@ class ComplexMakeRealImagTest(test.TestCase):
real = ops.convert_to_tensor(real)
imag = ops.convert_to_tensor(imag)
tf_ans = math_ops.complex(real, imag)
out = tf_ans.eval()
out = self.evaluate(tf_ans)
self.assertAllEqual(np_ans, out)
self.assertShapeEqual(np_ans, tf_ans)
@ -855,10 +855,10 @@ class ComplexMakeRealImagTest(test.TestCase):
tf_imag = math_ops.imag(inx)
tf_real_real = math_ops.real(tf_real)
tf_imag_real = math_ops.imag(tf_real)
self.assertAllEqual(np_real, tf_real.eval())
self.assertAllEqual(np_imag, tf_imag.eval())
self.assertAllEqual(np_real, tf_real_real.eval())
self.assertAllEqual(np_zeros, tf_imag_real.eval())
self.assertAllEqual(np_real, self.evaluate(tf_real))
self.assertAllEqual(np_imag, self.evaluate(tf_imag))
self.assertAllEqual(np_real, self.evaluate(tf_real_real))
self.assertAllEqual(np_zeros, self.evaluate(tf_imag_real))
def testRealImag64(self):
real = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(np.float32)
@ -916,7 +916,7 @@ class ComplexMakeRealImagTest(test.TestCase):
force_gpu=use_gpu and test_util.is_gpu_available()):
inx = ops.convert_to_tensor(cplx)
tf_conj = math_ops.conj(inx)
tf_ans = tf_conj.eval()
tf_ans = self.evaluate(tf_conj)
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
@ -1032,13 +1032,13 @@ class AccumulateTest(test.TestCase):
np_val = random_arrays[0]
for random_array in random_arrays[1:]:
np_val += random_array
self.assertAllClose(np_val, tf_val.eval())
self.assertAllClose(np_val, self.evaluate(tf_val))
def testZeroArgs(self):
with self.cached_session():
with self.assertRaises(ValueError):
tf_val = math_ops.accumulate_n([])
tf_val.eval()
self.evaluate(tf_val)
def testWrongShape(self):
with self.cached_session():
@ -1070,7 +1070,7 @@ class PolyvalTest(test.TestCase):
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, tf_val.eval())
self.assertAllClose(np_val, self.evaluate(tf_val))
def testSimple(self):
for dtype in [
@ -1093,7 +1093,7 @@ class PolyvalTest(test.TestCase):
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, tf_val.eval())
self.assertAllClose(np_val, self.evaluate(tf_val))
def testEmpty(self):
x = np.random.rand(2, 2).astype(np.float32)
@ -1101,7 +1101,7 @@ class PolyvalTest(test.TestCase):
np_val = np.polyval(coeffs, x)
with self.cached_session():
tf_val = math_ops.polyval(coeffs, x)
self.assertAllClose(np_val, tf_val.eval())
self.assertAllClose(np_val, self.evaluate(tf_val))
if __name__ == "__main__":

View File

@ -84,7 +84,7 @@ class UnaryOpTest(test.TestCase):
np_ans *= 1.1
else:
y = tf_func(inx)
tf_cpu = y.eval()
tf_cpu = self.evaluate(y)
self.assertShapeEqual(np_ans, y)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_cpu, rtol=1e-3, atol=1e-3)
@ -140,7 +140,7 @@ class UnaryOpTest(test.TestCase):
np_ans = np_func(x)
with self.test_session(force_gpu=test_util.is_gpu_available()):
result = tf_func(ops.convert_to_tensor(x))
tf_gpu = result.eval()
tf_gpu = self.evaluate(result)
if x.dtype == np.float16:
self.assertAllClose(np_ans, tf_gpu, rtol=1e-3, atol=1e-3)
else:

View File

@ -61,7 +61,7 @@ class DecodeBmpOpTest(test.TestCase):
decode = array_ops.squeeze(image_ops.decode_bmp(img_in))
with self.cached_session():
decoded = decode.eval()
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)
def testGrayscale(self):
@ -136,7 +136,7 @@ class DecodeBmpOpTest(test.TestCase):
decode = image_ops.decode_bmp(img_in)
with self.cached_session():
decoded = decode.eval()
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)

View File

@ -76,7 +76,7 @@ class DecodeImageOpTest(test.TestCase):
bad_channels = image_ops.decode_image(gif0, channels=1)
with self.assertRaises(errors_impl.InvalidArgumentError):
bad_channels.eval()
self.evaluate(bad_channels)
def testJpeg(self):
# Read a real jpeg and verify shape
@ -92,7 +92,7 @@ class DecodeImageOpTest(test.TestCase):
bad_channels = image_ops.decode_image(jpeg0, channels=4)
with self.assertRaises(errors_impl.InvalidArgumentError):
bad_channels.eval()
self.evaluate(bad_channels)
def testPng(self):
# Read some real PNGs, converting to different channel numbers
@ -113,7 +113,7 @@ class DecodeImageOpTest(test.TestCase):
decode = image_ops.decode_image(image_bytes)
with self.cached_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
decode.eval()
self.evaluate(decode)
if __name__ == "__main__":

View File

@ -47,7 +47,7 @@ class DecodePngOpTest(test.TestCase):
img_in, dtype=dtypes.uint16))
with self.cached_session():
decoded = decode.eval()
decoded = self.evaluate(decode)
self.assertAllEqual(decoded, img_bytes)

View File

@ -54,7 +54,7 @@ class AssignOpTest(test.TestCase):
for t in threads:
t.join()
vals = p.eval()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
@ -81,7 +81,7 @@ class AssignOpTest(test.TestCase):
for t in threads:
t.join()
vals = p.eval()
vals = self.evaluate(p)
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
@ -114,7 +114,7 @@ class AssignOpTest(test.TestCase):
for t in threads:
t.join()
vals = p.eval()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
@ -142,7 +142,7 @@ class AssignOpTest(test.TestCase):
for t in threads:
t.join()
vals = p.eval()
vals = self.evaluate(p)
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)

View File

@ -36,8 +36,8 @@ class AssignOpTest(test.TestCase):
p = variables.Variable(x)
assign = state_ops.assign(p, y)
p.initializer.run()
new_value = assign.eval()
return p.eval(), new_value
new_value = self.evaluate(assign)
return self.evaluate(p), new_value
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
@ -45,8 +45,8 @@ class AssignOpTest(test.TestCase):
p = variables.Variable(x)
add = state_ops.assign_add(p, y)
p.initializer.run()
new_value = add.eval()
return p.eval(), new_value
new_value = self.evaluate(add)
return self.evaluate(p), new_value
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
@ -54,8 +54,8 @@ class AssignOpTest(test.TestCase):
p = variables.Variable(x)
sub = state_ops.assign_sub(p, y)
p.initializer.run()
new_value = sub.eval()
return p.eval(), new_value
new_value = self.evaluate(sub)
return self.evaluate(p), new_value
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
@ -90,13 +90,13 @@ class AssignOpTest(test.TestCase):
p = variables.VariableV1([1])
a = state_ops.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), data.eval())
self.assertAllEqual(p.eval(), self.evaluate(data))
# Assign to yet another shape
data2 = array_ops.fill([10, 10], 1)
a2 = state_ops.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), data2.eval())
self.assertAllEqual(p.eval(), self.evaluate(data2))
def testInitRequiredAssignAdd(self):
with self.cached_session():

View File

@ -106,13 +106,13 @@ class DepthToSpaceTest(test.TestCase):
# test NHWC (default) on CPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
self.evaluate(x_tf)
if test.is_gpu_available():
with self.cached_session(use_gpu=True):
# test NHWC (default) on GPU
x_tf = array_ops.depth_to_space(input_nhwc, block_size)
self.assertAllEqual(x_tf.shape, x_out.shape)
x_tf.eval()
self.evaluate(x_tf)
# Tests for different width and height.
def testNonSquare(self):
@ -185,7 +185,7 @@ class DepthToSpaceTest(test.TestCase):
# divisible by 16.
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
self.evaluate(out_tf)
# Test when the block size is 0.
def testBlockSize0(self):
@ -194,7 +194,7 @@ class DepthToSpaceTest(test.TestCase):
block_size = 0
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
self.evaluate(out_tf)
# Test when the block size is 1. The block size should be > 1.
def testBlockSizeOne(self):
@ -205,7 +205,7 @@ class DepthToSpaceTest(test.TestCase):
block_size = 1
with self.assertRaises(ValueError):
out_tf = array_ops.depth_to_space(x_np, block_size)
out_tf.eval()
self.evaluate(out_tf)
def testBlockSizeLargerThanInput(self):
# The block size is too large for this input.
@ -214,7 +214,7 @@ class DepthToSpaceTest(test.TestCase):
block_size = 10
with self.assertRaises(ValueError):
out_tf = array_ops.space_to_depth(x_np, block_size)
out_tf.eval()
self.evaluate(out_tf)
def testBlockSizeNotDivisibleDepth(self):
# The depth is not divisible by the square of the block size.

View File

@ -528,7 +528,7 @@ class DepthwiseConv2DTest(test.TestCase):
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
@ -548,7 +548,7 @@ class DepthwiseConv2DTest(test.TestCase):
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
@ -580,7 +580,7 @@ class DepthwiseConv2DTest(test.TestCase):
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
@ -600,7 +600,7 @@ class DepthwiseConv2DTest(test.TestCase):
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = backprop.eval()
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret

View File

@ -35,7 +35,7 @@ from tensorflow.python.platform import test
class DeterminantOpTest(test.TestCase):
def _compareDeterminantBase(self, matrix_x, tf_ans):
out = tf_ans.eval()
out = self.evaluate(tf_ans)
shape = matrix_x.shape
if shape[-1] == 0 and shape[-2] == 0:
np_ans = np.ones(shape[:-2]).astype(matrix_x.dtype)
@ -54,8 +54,8 @@ class DeterminantOpTest(test.TestCase):
np_ans = np_ans.astype(matrix_x.dtype)
self.assertShapeEqual(np_ans, abs_log_det_tf)
sign_tf_val = sign_tf.eval()
abs_log_det_tf_val = abs_log_det_tf.eval()
sign_tf_val = self.evaluate(sign_tf)
abs_log_det_tf_val = self.evaluate(abs_log_det_tf)
self.assertAllClose(
sign_tf_val * np.exp(abs_log_det_tf_val),
np_sign * np.exp(np_ans),

View File

@ -89,7 +89,7 @@ class MatrixSetDiagTest(test.TestCase):
[1.0, 1.0, 3.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag, output.eval())
self.assertAllEqual(mat_set_diag, self.evaluate(output))
def testRectangular(self):
with self.session(use_gpu=True):
@ -98,14 +98,14 @@ class MatrixSetDiagTest(test.TestCase):
expected = np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((2, 3), output.get_shape())
self.assertAllEqual(expected, output.eval())
self.assertAllEqual(expected, self.evaluate(output))
v = np.array([3.0, 4.0])
mat = np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
expected = np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]])
output = array_ops.matrix_set_diag(mat, v)
self.assertEqual((3, 2), output.get_shape())
self.assertAllEqual(expected, output.eval())
self.assertAllEqual(expected, self.evaluate(output))
def _testSquareBatch(self, dtype):
with self.cached_session(use_gpu=True):
@ -121,7 +121,7 @@ class MatrixSetDiagTest(test.TestCase):
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 3, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
def testSquareBatch(self):
self._testSquareBatch(np.float32)
@ -140,7 +140,7 @@ class MatrixSetDiagTest(test.TestCase):
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]])
output = array_ops.matrix_set_diag(mat_batch, v_batch)
self.assertEqual((2, 2, 3), output.get_shape())
self.assertAllEqual(mat_set_diag_batch, output.eval())
self.assertAllEqual(mat_set_diag_batch, self.evaluate(output))
def testInvalidShape(self):
with self.assertRaisesRegexp(ValueError, "must be at least rank 2"):
@ -273,9 +273,9 @@ class DiagTest(test.TestCase):
def _diagOp(self, diag, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
out = tf_ans.eval()
out = self.evaluate(tf_ans)
tf_ans_inv = array_ops.diag_part(expected_ans)
inv_out = tf_ans_inv.eval()
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(out, expected_ans)
self.assertAllClose(inv_out, diag)
self.assertShapeEqual(expected_ans, tf_ans)
@ -421,7 +421,7 @@ class DiagPartOpTest(test.TestCase):
with self.cached_session(use_gpu=use_gpu):
tensor = ops.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = array_ops.diag_part(tensor)
inv_out = tf_ans_inv.eval()
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
@ -445,7 +445,7 @@ class DiagPartOpTest(test.TestCase):
t = ops.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = array_ops.diag_part(t)
out = tf_ans.eval()
out = self.evaluate(tf_ans)
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)

View File

@ -355,7 +355,7 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
samples = dist.sample(n, seed=123)
samples.set_shape([n, 1, 2])
self.assertEqual(samples.dtype, dtypes.int32)
sample_values = samples.eval()
sample_values = self.evaluate(samples)
self.assertFalse(np.any(sample_values < 0))
self.assertFalse(np.any(sample_values > 1))
self.assertAllClose(
@ -371,7 +371,7 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
samples = dist.sample((100, 100), seed=123)
prob = dist.prob(samples)
prob_val = prob.eval()
prob_val = self.evaluate(prob)
self.assertAllClose(
[0.2**2 + 0.8**2], [prob_val[:, :, :, 0].mean()], atol=1e-2)
self.assertAllClose(
@ -393,26 +393,26 @@ class CategoricalTest(test.TestCase, parameterized.TestCase):
dist = categorical.Categorical(math_ops.log(histograms) - 50.)
prob = dist.prob(1)
self.assertAllClose([[0.8, 0.6]], prob.eval())
self.assertAllClose([[0.8, 0.6]], self.evaluate(prob))
prob = dist.prob([1])
self.assertAllClose([[0.8, 0.6]], prob.eval())
self.assertAllClose([[0.8, 0.6]], self.evaluate(prob))
prob = dist.prob([0, 1])
self.assertAllClose([[0.2, 0.6]], prob.eval())
self.assertAllClose([[0.2, 0.6]], self.evaluate(prob))
prob = dist.prob([[0, 1]])
self.assertAllClose([[0.2, 0.6]], prob.eval())
self.assertAllClose([[0.2, 0.6]], self.evaluate(prob))
prob = dist.prob([[[0, 1]]])
self.assertAllClose([[[0.2, 0.6]]], prob.eval())
self.assertAllClose([[[0.2, 0.6]]], self.evaluate(prob))
prob = dist.prob([[1, 0], [0, 1]])
self.assertAllClose([[0.8, 0.4], [0.2, 0.6]], prob.eval())
self.assertAllClose([[0.8, 0.4], [0.2, 0.6]], self.evaluate(prob))
prob = dist.prob([[[1, 1], [1, 0]], [[1, 0], [0, 1]]])
self.assertAllClose([[[0.8, 0.6], [0.8, 0.4]], [[0.8, 0.4], [0.2, 0.6]]],
prob.eval())
self.evaluate(prob))
def testLogPMFShape(self):
with self.cached_session():

View File

@ -110,7 +110,7 @@ class DirichletMultinomialTest(test.TestCase):
counts = [1., 0]
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1 / 3., pmf.eval())
self.assertAllClose(1 / 3., self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
@ -122,7 +122,7 @@ class DirichletMultinomialTest(test.TestCase):
counts = [3., 2]
dist = ds.DirichletMultinomial(5., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1 / 7., pmf.eval())
self.assertAllClose(1 / 7., self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesMultidimensionalN(self):
@ -134,7 +134,7 @@ class DirichletMultinomialTest(test.TestCase):
n = np.full([4, 3], 5., dtype=np.float32)
dist = ds.DirichletMultinomial(n, alpha)
pmf = dist.prob(counts)
self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, pmf.eval())
self.assertAllClose([[1 / 7., 1 / 7., 1 / 7.]] * 4, self.evaluate(pmf))
self.assertEqual((4, 3), pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenSameRank(self):
@ -145,7 +145,7 @@ class DirichletMultinomialTest(test.TestCase):
counts = [[1., 0], [0., 1]]
dist = ds.DirichletMultinomial([1.], alpha)
pmf = dist.prob(counts)
self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())
self.assertAllClose([1 / 3., 2 / 3.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfAlphaStretchedInBroadcastWhenLowerRank(self):
@ -155,7 +155,7 @@ class DirichletMultinomialTest(test.TestCase):
alpha = [1., 2]
counts = [[1., 0], [0., 1]]
pmf = ds.DirichletMultinomial(1., alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 3.], pmf.eval())
self.assertAllClose([1 / 3., 2 / 3.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
@ -165,7 +165,7 @@ class DirichletMultinomialTest(test.TestCase):
alpha = [[1., 2], [2., 3]]
counts = [[1., 0]]
pmf = ds.DirichletMultinomial([1., 1.], alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())
self.assertAllClose([1 / 3., 2 / 5.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
@ -175,7 +175,7 @@ class DirichletMultinomialTest(test.TestCase):
alpha = [[1., 2], [2., 3]]
counts = [1., 0]
pmf = ds.DirichletMultinomial(1., alpha).prob(counts)
self.assertAllClose([1 / 3., 2 / 5.], pmf.eval())
self.assertAllClose([1 / 3., 2 / 5.], self.evaluate(pmf))
self.assertAllEqual([2], pmf.get_shape())
def testPmfForOneVoteIsTheMeanWithOneRecordInput(self):
@ -289,7 +289,7 @@ class DirichletMultinomialTest(test.TestCase):
expected_covariance = n * (n + alpha_0) / (1 + alpha_0) * shared_matrix
self.assertEqual([2, 2], covariance.get_shape())
self.assertAllClose(expected_covariance, covariance.eval())
self.assertAllClose(expected_covariance, self.evaluate(covariance))
def testCovarianceNAlphaBroadcast(self):
alpha_v = [1., 2, 3]
@ -327,7 +327,7 @@ class DirichletMultinomialTest(test.TestCase):
ns * (ns + alpha_0) / (1 + alpha_0))[..., array_ops.newaxis]
self.assertEqual([4, 3, 3], covariance.get_shape())
self.assertAllClose(expected_covariance, covariance.eval())
self.assertAllClose(expected_covariance, self.evaluate(covariance))
def testCovarianceMultidimensional(self):
alpha = np.random.rand(3, 5, 4).astype(np.float32)
@ -353,7 +353,7 @@ class DirichletMultinomialTest(test.TestCase):
with self.cached_session():
dist = ds.DirichletMultinomial(0., alpha)
pmf = dist.prob(counts)
self.assertAllClose(1.0, pmf.eval())
self.assertAllClose(1.0, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testLargeTauGivesPreciseProbabilities(self):
@ -368,7 +368,7 @@ class DirichletMultinomialTest(test.TestCase):
with self.cached_session():
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.8, pmf.eval(), atol=1e-4)
self.assertAllClose(0.8, self.evaluate(pmf), atol=1e-4)
self.assertEqual((), pmf.get_shape())
# Two (three sided) coin flips. Prob[coin 3] = 0.8.
@ -376,7 +376,7 @@ class DirichletMultinomialTest(test.TestCase):
with self.cached_session():
dist = ds.DirichletMultinomial(2., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.8**2, pmf.eval(), atol=1e-2)
self.assertAllClose(0.8**2, self.evaluate(pmf), atol=1e-2)
self.assertEqual((), pmf.get_shape())
# Three (three sided) coin flips.
@ -384,7 +384,7 @@ class DirichletMultinomialTest(test.TestCase):
with self.cached_session():
dist = ds.DirichletMultinomial(3., alpha)
pmf = dist.prob(counts)
self.assertAllClose(3 * 0.1 * 0.8 * 0.8, pmf.eval(), atol=1e-2)
self.assertAllClose(3 * 0.1 * 0.8 * 0.8, self.evaluate(pmf), atol=1e-2)
self.assertEqual((), pmf.get_shape())
def testSmallTauPrefersCorrelatedResults(self):
@ -399,7 +399,7 @@ class DirichletMultinomialTest(test.TestCase):
with self.cached_session():
dist = ds.DirichletMultinomial(1., alpha)
pmf = dist.prob(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertAllClose(0.5, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
# If there are two draws, it is much more likely that they are the same.
@ -409,7 +409,7 @@ class DirichletMultinomialTest(test.TestCase):
dist = ds.DirichletMultinomial(2., alpha)
pmf_same = dist.prob(counts_same)
pmf_different = dist.prob(counts_different)
self.assertLess(5 * pmf_different.eval(), pmf_same.eval())
self.assertLess(5 * self.evaluate(pmf_different), self.evaluate(pmf_same))
self.assertEqual((), pmf_same.get_shape())
def testNonStrictTurnsOffAllChecks(self):

View File

@ -63,17 +63,17 @@ class KLTest(test.TestCase):
kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
kl.eval()
self.evaluate(kl)
with self.assertRaisesOpError(
"KL calculation between .* and .* returned NaN values"):
a.kl_divergence(a).eval()
a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=True)
kl_ok = kullback_leibler.kl_divergence(a, a)
self.assertAllEqual([float("nan")], kl_ok.eval())
self.assertAllEqual([float("nan")], self.evaluate(kl_ok))
self_kl_ok = a.kl_divergence(a)
self.assertAllEqual([float("nan")], self_kl_ok.eval())
self.assertAllEqual([float("nan")], self.evaluate(self_kl_ok))
cross_ok = a.cross_entropy(a)
self.assertAllEqual([float("nan")], cross_ok.eval())
self.assertAllEqual([float("nan")], self.evaluate(cross_ok))
def testRegistrationFailures(self):

View File

@ -127,7 +127,7 @@ class MultinomialTest(test.TestCase):
p = [0.5, 0.5]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertAllClose(0.5, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
@ -138,7 +138,7 @@ class MultinomialTest(test.TestCase):
dist = multinomial.Multinomial(total_count=5., probs=p)
pmf = dist.prob(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81. / 10000, pmf.eval())
self.assertAllClose(81. / 10000, self.evaluate(pmf))
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
@ -146,7 +146,7 @@ class MultinomialTest(test.TestCase):
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
@ -154,7 +154,7 @@ class MultinomialTest(test.TestCase):
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertAllClose([0.1, 0.9], self.evaluate(pmf))
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
@ -182,7 +182,7 @@ class MultinomialTest(test.TestCase):
# [2]
counts = [2., 1]
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
pmf.eval()
self.evaluate(pmf)
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
@ -191,7 +191,7 @@ class MultinomialTest(test.TestCase):
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
pmf.eval()
self.evaluate(pmf)
self.assertEqual((4, 3), pmf.get_shape())
def testMultinomialMean(self):

View File

@ -362,7 +362,7 @@ class ErfInvTest(test.TestCase):
expected_x = special.erfinv(x)
x = special_math.erfinv(x)
self.assertAllClose(expected_x, x.eval(), atol=0.)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testErfInvIntegerInput(self):
with self.cached_session():

View File

@ -41,7 +41,7 @@ class DynamicStitchTestBase(object):
data = [constant_op.constant(40), constant_op.constant(60)]
for step in -1, 1:
stitched_t = self.stitch_op(indices[::step], data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40, 60][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@ -78,7 +78,7 @@ class DynamicStitchTestBase(object):
constant_op.constant([10, 60, 20, 30, 50]), dtype=dtype)
]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8], stitched_t.get_shape().as_list())
@ -88,7 +88,7 @@ class DynamicStitchTestBase(object):
indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([0, 10, 20, 30, 40, 50, 60, 70], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([8], stitched_t.get_shape().as_list())
@ -106,7 +106,7 @@ class DynamicStitchTestBase(object):
constant_op.constant([[20, 21], [30, 31], [50, 51]])
]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31], [40, 41],
[50, 51], [60, 61], [70, 71]], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
@ -127,7 +127,7 @@ class DynamicStitchTestBase(object):
array_ops.zeros([0, 2], dtype=dtypes.int32)
]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([[0, 1], [10, 11], [20, 21], [30, 31], [40, 41],
[50, 51], [60, 61], [70, 71]], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
@ -147,7 +147,7 @@ class DynamicStitchTestBase(object):
[[1., 2.], [31., 32.]]])
]
stitched_t = self.stitch_op(indices, data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
correct = 10. * np.arange(7)[:, None] + [1., 2.]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
@ -157,7 +157,7 @@ class DynamicStitchTestBase(object):
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7. * datum.eval(), grad)
self.assertAllEqual(7. * self.evaluate(datum), grad)
def testErrorIndicesMultiDimensional(self):
indices = [
@ -227,7 +227,7 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@ -246,7 +246,7 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
@ -256,7 +256,7 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7.0 * datum.eval(), grad)
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
# GPU version unit tests
def testScalarGPU(self):
@ -265,7 +265,7 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
for step in -1, 1:
stitched_t = data_flow_ops.dynamic_stitch(indices[::step], data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
self.assertAllEqual([40.0, 60.0][::step], stitched_val)
# Dimension 0 is max(flatten(indices))+1.
self.assertEqual([2], stitched_t.get_shape().as_list())
@ -284,7 +284,7 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
[[[51, 52], [21, 22]], [[1, 2], [31, 32]]], dtype=dtypes.float32)
]
stitched_t = data_flow_ops.dynamic_stitch(indices, data)
stitched_val = stitched_t.eval()
stitched_val = self.evaluate(stitched_t)
correct = 10 * np.arange(7)[:, None] + [1.0, 2.0]
self.assertAllEqual(correct, stitched_val)
self.assertEqual([7, 2], stitched_t.get_shape().as_list())
@ -294,7 +294,7 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
stitched_grad)
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
for datum, grad in zip(data, sess.run(grads[3:])):
self.assertAllEqual(7.0 * datum.eval(), grad)
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
if __name__ == "__main__":

View File

@ -49,11 +49,11 @@ class EditDistanceTest(test.TestCase):
if expected_err_re is None:
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
output = self.evaluate(edit_distance)
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
self.evaluate(edit_distance)
def _testEditDistance(self,
hypothesis,

View File

@ -76,7 +76,7 @@ class ScatterAddSubTest(test.TestCase):
# p = init
variables.global_variables_initializer().run()
# p += vals
result = p2.eval()
result = self.evaluate(p2)
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == state_ops.scatter_add:
@ -278,7 +278,7 @@ class EmbeddingLookupTest(test.TestCase):
norms = math_ops.sqrt(
math_ops.reduce_sum(embeddings * embeddings, axis=1))
normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllEqual(embedding.eval(), 2 * normalized.eval())
self.assertAllEqual(embedding.eval(), 2 * self.evaluate(normalized))
def testSimpleShardedPartitionedVariable(self):
with self.cached_session() as sess:
@ -319,7 +319,7 @@ class EmbeddingLookupTest(test.TestCase):
p_var_val = sess.run(list(p_variable))
# Actual test
print(ops.get_default_graph().as_graph_def())
tf_result = embedding.eval()
tf_result = self.evaluate(embedding)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)

View File

@ -51,7 +51,7 @@ class ExtractImagePatches(test.TestCase):
rates=rates,
padding=padding,
name="im2col")
self.assertAllClose(patches, out_tensor.eval())
self.assertAllClose(patches, self.evaluate(out_tensor))
def testKsize1x1Stride1x1Rate1x1(self):
"""Verifies that for 1x1 kernel the output equals the input."""

View File

@ -52,7 +52,7 @@ class ExtractVolumePatches(test.TestCase):
strides=strides,
padding=padding,
name="im2col_3d")
self.assertAllClose(patches, out_tensor.eval())
self.assertAllClose(patches, self.evaluate(out_tensor))
# pylint: disable=bad-whitespace
def testKsize1x1x1Stride1x1x1(self):

View File

@ -211,7 +211,7 @@ class FIFOQueueTest(test.TestCase):
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testDequeueHalf(self):
@ -225,7 +225,7 @@ class FIFOQueueTest(test.TestCase):
enqueue_op.run()
for i in xrange(len(elems)):
vals = dequeued_t.eval()
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i]], vals)
def testEnqueueAndBlockingDequeue(self):
@ -288,9 +288,9 @@ class FIFOQueueTest(test.TestCase):
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual(1, size.eval())
self.assertEqual(1, self.evaluate(size))
dequeued_t.op.run()
self.assertEqual(0, size.eval())
self.assertEqual(0, self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
@ -302,7 +302,7 @@ class FIFOQueueTest(test.TestCase):
enqueue_op.run()
for i in range(8):
vals = dequeued_t.eval()
vals = self.evaluate(dequeued_t)
self.assertEqual([elems[i % 4]], vals)
def testEmptyEnqueueMany(self):
@ -313,9 +313,9 @@ class FIFOQueueTest(test.TestCase):
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual([0], size_t.eval())
self.assertEqual([0], self.evaluate(size_t))
enqueue_op.run()
self.assertEqual([0], size_t.eval())
self.assertEqual([0], self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
@ -323,9 +323,9 @@ class FIFOQueueTest(test.TestCase):
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], dequeued_t.eval().tolist())
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
@ -333,9 +333,9 @@ class FIFOQueueTest(test.TestCase):
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], dequeued_t.eval().tolist())
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], dequeued_t.eval().tolist())
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
@ -369,8 +369,8 @@ class FIFOQueueTest(test.TestCase):
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testDequeueUpToNoBlocking(self):
with self.cached_session():
@ -381,8 +381,8 @@ class FIFOQueueTest(test.TestCase):
enqueue_op.run()
self.assertAllEqual(elems[0:4], dequeued_t.eval())
self.assertAllEqual(elems[4:8], dequeued_t.eval())
self.assertAllEqual(elems[0:4], self.evaluate(dequeued_t))
self.assertAllEqual(elems[4:8], self.evaluate(dequeued_t))
def testMultiDequeueMany(self):
with self.cached_session() as sess:
@ -518,7 +518,7 @@ class FIFOQueueTest(test.TestCase):
r"Expected \[2,3,3\], got \[2,3,4\]"):
sess.run([enqueue_op],
feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))})
dequeued_t.eval()
self.evaluate(dequeued_t)
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
@ -672,7 +672,7 @@ class FIFOQueueTest(test.TestCase):
while elements_dequeued < 250:
# With equal probability, run Dequeue or dequeue_many.
if random.random() > 0.5:
self.assertEqual(elements_dequeued, dequeued_t.eval())
self.assertEqual(elements_dequeued, self.evaluate(dequeued_t))
elements_dequeued += 1
else:
count = random.randint(0, min(20, 250 - elements_dequeued))
@ -778,12 +778,12 @@ class FIFOQueueTest(test.TestCase):
enqueue_op.run()
close_op.run()
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([elem], self.evaluate(dequeued_t))
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
dequeued_t.eval()
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
@ -1059,8 +1059,8 @@ class FIFOQueueTest(test.TestCase):
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([elem], self.evaluate(dequeued_t))
self.assertEqual([50.0], self.evaluate(dequeued_t))
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
@ -1082,10 +1082,10 @@ class FIFOQueueTest(test.TestCase):
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for elem in elems:
self.assertEqual([elem], dequeued_t.eval())
self.assertEqual([elem], self.evaluate(dequeued_t))
time.sleep(0.01)
self.assertEqual([50.0], dequeued_t.eval())
self.assertEqual([60.0], dequeued_t.eval())
self.assertEqual([50.0], self.evaluate(dequeued_t))
self.assertEqual([60.0], self.evaluate(dequeued_t))
# Make sure the thread finishes before exiting.
thread.join()
@ -1119,12 +1119,12 @@ class FIFOQueueTest(test.TestCase):
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 40.0, 50.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(elem, self.evaluate(dequeued_t))
self.assertEqual(0, q.size().eval())
def testBlockingEnqueueManyBeforeClose(self):
@ -1154,11 +1154,11 @@ class FIFOQueueTest(test.TestCase):
close_thread.start()
# The dequeue will unblock both threads.
self.assertEqual(10.0, dequeued_t.eval())
self.assertEqual(10.0, self.evaluate(dequeued_t))
enqueue_thread.join()
close_thread.join()
for elem in [20.0, 30.0, 50.0, 60.0]:
self.assertEqual(elem, dequeued_t.eval())
self.assertEqual(elem, self.evaluate(dequeued_t))
def testDoesNotLoseValue(self):
with self.cached_session():

View File

@ -364,7 +364,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
@ -373,7 +373,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fap_input_backprop_tensor = gen_nn_ops.fractional_avg_pool_grad(
@ -382,7 +382,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
row_seq,
col_seq,
overlapping=False)
fap_input_backprop = fap_input_backprop_tensor.eval()
fap_input_backprop = self.evaluate(fap_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
@ -403,7 +403,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
@ -412,7 +412,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
input_backprop_tensor = gen_nn_ops.avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
@ -423,7 +423,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
row_seq,
col_seq,
overlapping=True)
fap_input_backprop = fap_input_backprop_tensor.eval()
fap_input_backprop = self.evaluate(fap_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
@ -442,7 +442,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
@ -473,7 +473,7 @@ class FractionalAvgPoolGradTest(test.TestCase):
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4

View File

@ -374,12 +374,12 @@ class FractionalMaxPoolGradTest(test.TestCase):
padding = "VALID"
output_tensor = nn_ops.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops.max_pool_grad(
input_tensor, output_tensor, output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fmp_input_backprop_tensor = gen_nn_ops.fractional_max_pool_grad(
@ -389,7 +389,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
row_seq,
col_seq,
overlapping=False)
fmp_input_backprop = fmp_input_backprop_tensor.eval()
fmp_input_backprop = self.evaluate(fmp_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
@ -409,12 +409,12 @@ class FractionalMaxPoolGradTest(test.TestCase):
padding = "VALID"
output_tensor = nn_ops.max_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
output_backprop = self._PRNG.randint(100, size=output_data.shape)
input_backprop_tensor = gen_nn_ops.max_pool_grad(
input_tensor, output_tensor, output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
input_backprop = self.evaluate(input_backprop_tensor)
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
@ -426,7 +426,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
row_seq,
col_seq,
overlapping=True)
fmp_input_backprop = fmp_input_backprop_tensor.eval()
fmp_input_backprop = self.evaluate(fmp_input_backprop_tensor)
self.assertShapeEqual(input_backprop, fmp_input_backprop_tensor)
self.assertAllClose(input_backprop, fmp_input_backprop)
@ -447,7 +447,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
@ -480,7 +480,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
pseudo_random=pseudo_random,
overlapping=overlapping,
seed=self._SEED)
output_data = output_tensor.eval()
output_data = self.evaluate(output_tensor)
output_shape = output_data.shape
# error_margin and delta setting is similar to max_pool_grad.
error_margin = 1e-3
@ -578,7 +578,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
row_seq,
col_seq,
overlapping=False)
input_backprop_not_overlapping = r.eval()
input_backprop_not_overlapping = self.evaluate(r)
self.assertShapeEqual(
np.reshape(expected_input_backprop_not_overlapping, input_size), r)
self.assertAllClose(expected_input_backprop_not_overlapping,
@ -588,7 +588,7 @@ class FractionalMaxPoolGradTest(test.TestCase):
output_data_overlapping, shape=output_size)
r = gen_nn_ops.fractional_max_pool_grad(
input_tensor, output_tensor, grad, row_seq, col_seq, overlapping=True)
input_backprop_overlapping = r.eval()
input_backprop_overlapping = self.evaluate(r)
self.assertShapeEqual(
np.reshape(expected_input_backprop_overlapping, input_size), r)
self.assertAllClose(expected_input_backprop_overlapping,

View File

@ -974,7 +974,7 @@ class FunctionalOpsTest(test.TestCase):
MLP,
rewrite_with_while=rewrite_with_while)[0]
return ret.eval()
return self.evaluate(ret)
def _npMLP(self, xval, wsval, bsval):
for i in range(wsval.shape[0]):

View File

@ -40,7 +40,7 @@ class GatherNdTest(test.TestCase):
params = constant_op.constant(np.array([8, 1, 2, 3, 7, 5], dtype=dtype))
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
self.assertAllEqual(np.array([7, 7, 8], dtype=dtype), gather_nd_val)
self.assertEqual([3], gather_nd_t.get_shape())
@ -60,20 +60,20 @@ class GatherNdTest(test.TestCase):
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
indices_empty = np.empty((0, 1), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0, 3], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0, 3), dtype=np.float32), gather_nd_ok_val)
params_empty = np.empty((0, 3), dtype=np.float32)
indices_empty = np.empty((0, 2), dtype=np.int32)
gather_nd_ok_t = array_ops.gather_nd(params_empty, indices_empty)
gather_nd_ok_val = gather_nd_ok_t.eval()
gather_nd_ok_val = self.evaluate(gather_nd_ok_t)
self.assertEqual([0], gather_nd_ok_t.get_shape())
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
@ -82,7 +82,7 @@ class GatherNdTest(test.TestCase):
gather_nd_break_t = array_ops.gather_nd(params_empty, indices_nonempty)
with self.assertRaisesOpError(
r"Requested more than 0 entries, but params is empty."):
gather_nd_break_t.eval()
self.evaluate(gather_nd_break_t)
self.assertAllClose(np.empty((0,), dtype=np.float32), gather_nd_ok_val)
def testIndexScalar(self):
@ -91,7 +91,7 @@ class GatherNdTest(test.TestCase):
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4, 1])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([], gather_nd_t.get_shape())
self.assertAllEqual(np.array(7), gather_nd_val)
@ -101,7 +101,7 @@ class GatherNdTest(test.TestCase):
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([4])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([-7, 7]), gather_nd_val)
@ -111,7 +111,7 @@ class GatherNdTest(test.TestCase):
[[-8, -1, -2, -3, -7, -5], [8, 1, 2, 3, 7, 5]], dtype=np.float32).T
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2], gather_nd_t.get_shape())
self.assertAllEqual(np.array([[-7, 7], [-7, 7], [-8, 8]]), gather_nd_val)
@ -125,7 +125,7 @@ class GatherNdTest(test.TestCase):
params_t = constant_op.constant(params)
indices = constant_op.constant([[4], [4], [0]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[4, 4, 0]], gather_nd_val)
@ -140,7 +140,7 @@ class GatherNdTest(test.TestCase):
indices = constant_op.constant(
[[], []], dtype=dtypes.int32) # Size (2, 0)
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 6, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(
@ -156,7 +156,7 @@ class GatherNdTest(test.TestCase):
params_t = constant_op.constant(params)
indices = constant_op.constant([[[3], [2], [1]], [[4], [4], [0]]])
gather_nd_t = array_ops.gather_nd(params_t, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
self.assertEqual([2, 3, 2, 2], gather_nd_t.get_shape())
self.assertAllEqual(params[[3, 2, 1, 4, 4, 0]].reshape(2, 3, 2, 2),
@ -168,7 +168,7 @@ class GatherNdTest(test.TestCase):
params = np.random.rand(*shape)
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
gather_nd_t = array_ops.gather_nd(params, indices)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected, gather_nd_val)
@ -181,7 +181,7 @@ class GatherNdTest(test.TestCase):
indices = np.vstack([np.random.randint(0, s, size=2000) for s in shape]).T
indices_reshaped = indices.reshape([10, 10, 20, 5])
gather_nd_t = array_ops.gather_nd(params, indices_reshaped)
gather_nd_val = gather_nd_t.eval()
gather_nd_val = self.evaluate(gather_nd_t)
expected = params[tuple(indices.T)]
self.assertAllEqual(expected.reshape([10, 10, 20]), gather_nd_val)
@ -205,7 +205,7 @@ class GatherNdTest(test.TestCase):
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
gather_nd.eval()
self.evaluate(gather_nd)
def _disabledTestBadIndicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
@ -218,7 +218,7 @@ class GatherNdTest(test.TestCase):
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,1\] = \[7\] does not index into param shape \[3\]"):
gather_nd.eval()
self.evaluate(gather_nd)
def testBadIndicesWithSlicesCPU(self):
with self.session(use_gpu=False):
@ -227,7 +227,7 @@ class GatherNdTest(test.TestCase):
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
gather_nd.eval()
self.evaluate(gather_nd)
def _disabledTestBadIndicesWithSlicesGPU(self):
# TODO disabled due to different behavior on GPU and CPU
@ -240,7 +240,7 @@ class GatherNdTest(test.TestCase):
gather_nd = array_ops.gather_nd(params, indices)
with self.assertRaisesOpError(
r"indices\[0,2\] = \[1\] does not index into param shape \[1,3\]"):
gather_nd.eval()
self.evaluate(gather_nd)
def testGradientsRank2Elements(self):
indices = constant_op.constant([[0, 0], [1, 1]], dtype=dtypes.int32)
@ -251,7 +251,7 @@ class GatherNdTest(test.TestCase):
grads = gradients_impl.gradients([outputs], [inputs], [grad_vals])[0]
expected_grads = np.array([[1, 0], [0, 2]], dtype=np.float64)
with self.session(use_gpu=True):
assert np.array_equal(expected_grads, grads.eval())
assert np.array_equal(expected_grads, self.evaluate(grads))
def testGradientsRank2Slices(self):
indices = constant_op.constant([[1], [0]], dtype=dtypes.int32)
@ -278,7 +278,7 @@ class GatherNdTest(test.TestCase):
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, grads.eval())
self.assertAllEqual(expected_grads, self.evaluate(grads))
def testGradientsRank7Elements(self):
# Shape [1,1,2,1,1,2,2]
@ -307,7 +307,7 @@ class GatherNdTest(test.TestCase):
[[[[3, 4], [7, 8]]]]
]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, grads.eval())
self.assertAllEqual(expected_grads, self.evaluate(grads))
def testGradientsInt64Indices(self):
indices = constant_op.constant(
@ -322,7 +322,7 @@ class GatherNdTest(test.TestCase):
expected_grads = np.array(
[[[5, 6], [1, 2]], [[3, 4], [7, 8]]], dtype=np.float64)
with self.session(use_gpu=True):
self.assertAllEqual(expected_grads, grads.eval())
self.assertAllEqual(expected_grads, self.evaluate(grads))
def testGradientsRank2SlicesWithEmptySpace(self):
indices = constant_op.constant([[2], [0], [5]], dtype=dtypes.int32)
@ -361,10 +361,10 @@ class GatherNdOpBenchmark(test.Benchmark):
gather_op = array_ops.gather_nd(t_params, t_indices)
variables.global_variables_initializer().run()
for _ in range(10):
gather_op.eval()
self.evaluate(gather_op)
t1 = time.time()
for _ in range(1000):
gather_op.eval()
self.evaluate(gather_op)
t2 = time.time()
self.report_benchmark(iters=1000, wall_time=(t2 - t1) / 1000.0)

View File

@ -50,7 +50,7 @@ class GatherTest(test.TestCase):
params = constant_op.constant(params_np)
indices_tf = constant_op.constant(indices)
gather_t = array_ops.gather(params, indices_tf)
gather_val = gather_t.eval()
gather_val = self.evaluate(gather_t)
np_val = params_np[indices]
self.assertAllEqual(np_val, gather_val)
self.assertEqual(np_val.shape, gather_t.get_shape())
@ -65,7 +65,7 @@ class GatherTest(test.TestCase):
params = constant_op.constant(params_np)
indices = constant_op.constant(2)
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = gather_t.eval()
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, 2, axis=axis), gather_val)
expected_shape = data.shape[:axis] + data.shape[axis + 1:]
self.assertEqual(expected_shape, gather_t.get_shape())
@ -81,7 +81,7 @@ class GatherTest(test.TestCase):
# The indices must be in bounds for any axis.
indices = constant_op.constant([0, 1, 0, 2])
gather_t = array_ops.gather(params, indices, axis=axis)
gather_val = gather_t.eval()
gather_val = self.evaluate(gather_t)
self.assertAllEqual(np.take(params_np, [0, 1, 0, 2], axis=axis),
gather_val)
expected_shape = data.shape[:axis] + (4,) + data.shape[axis + 1:]
@ -142,8 +142,11 @@ class GatherTest(test.TestCase):
source_slice = ((slice(None),) * outer_dims + (source_index,) +
(slice(None),) * inner_dims)
correct_params_grad[dest_slice] += gather_grad[source_slice]
self.assertAllClose(correct_params_grad, params_grad.eval(),
atol=2e-6, rtol=2e-6)
self.assertAllClose(
correct_params_grad,
self.evaluate(params_grad),
atol=2e-6,
rtol=2e-6)
def testString(self):
params = np.array([[b"asdf", b"zxcv"], [b"qwer", b"uiop"]])

View File

@ -33,11 +33,11 @@ class SliceTest(test.TestCase):
a_large = array_ops.tile(
constant_op.constant(np.array([False, True] * 4)), [2**29 + 3])
slice_t = array_ops.slice(a_large, np.asarray([3]).astype(np.int64), [3])
slice_val = slice_t.eval()
slice_val = self.evaluate(slice_t)
self.assertAllEqual([True, False, True], slice_val)
slice_t = array_ops.slice(
a_large, constant_op.constant([long(2)**32 + 3], dtype=dtypes.int64),
[3])
slice_val = slice_t.eval()
slice_val = self.evaluate(slice_t)
self.assertAllEqual([True, False, True], slice_val)

View File

@ -32,7 +32,7 @@ class InTopKTest(test.TestCase):
np_ans = np.array(expected)
with self.cached_session():
precision = nn_ops.in_top_k(predictions, target, k)
out = precision.eval()
out = self.evaluate(precision)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, precision)
@ -77,7 +77,7 @@ class InTopKTest(test.TestCase):
np_ans = np.array([False, True])
with self.cached_session():
precision = nn_ops.in_top_k(predictions, target, k)
out = precision.eval()
out = self.evaluate(precision)
self.assertAllClose(np_ans, out)
self.assertShapeEqual(np_ans, precision)

View File

@ -349,7 +349,7 @@ class UniformUnitScalingInitializationTest(test.TestCase):
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
variables.global_variables_initializer().run()
self.assertAllEqual(shape, x.eval().shape)
self.assertAllEqual(shape, self.evaluate(x).shape)
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
@ -435,7 +435,7 @@ class RangeTest(test.TestCase):
tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return tf_ans.eval()
return self.evaluate(tf_ans)
def testBasic(self):
self.assertTrue(
@ -524,7 +524,7 @@ class LinSpaceTest(test.TestCase):
with self.session(graph=graph, force_gpu=self.force_gpu):
tf_ans = math_ops.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
return self.evaluate(tf_ans)
def testPositive(self):
for self.force_gpu in self._gpu_modes():
@ -706,7 +706,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
with self.session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the delta-orthogonal kernel.
self.assertAllClose(sess.run(ratio), gain, rtol=tol, atol=tol)
@ -723,7 +723,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
initializer=
init_ops.convolutional_delta_orthogonal)
x.initializer.run()
y = x.eval()[1, 1, :, :]
y = self.evaluate(x)[1, 1, :, :]
determinant = np.linalg.det(y)
value += determinant
abs_value += np.abs(determinant)
@ -844,7 +844,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
with self.session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(sess.run(ratio), gain, rtol=tol, atol=tol)
@ -939,7 +939,7 @@ class ConvolutionOrthogonal2dInitializerTest(test.TestCase):
with self.session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(sess.run(ratio), gain, rtol=tol, atol=tol)
@ -1064,7 +1064,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
with self.cached_session(use_gpu=True) as sess:
sess.run(my_ops)
# Check the shape of the outputs
t = outputs.eval()
t = self.evaluate(outputs)
self.assertAllEqual(t.shape, outputs_shape)
# Check isometry of the orthogonal kernel.
self.assertAllClose(sess.run(ratio), gain, rtol=tol, atol=tol)

View File

@ -35,7 +35,7 @@ class LargeConcatOpTest(test.TestCase):
with self.session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
_ = onezeros.eval()
_ = self.evaluate(onezeros)
if __name__ == "__main__":

View File

@ -137,7 +137,8 @@ class LinearOperatorCirculantTestSelfAdjointOperator(
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(0, imag_matrix.eval(), rtol=0, atol=eps * 3)
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestHermitianSpectrum(
@ -203,7 +204,8 @@ class LinearOperatorCirculantTestHermitianSpectrum(
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(0, imag_matrix.eval(), rtol=0, atol=eps * 3)
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
class LinearOperatorCirculantTestNonHermitianSpectrum(
@ -257,7 +259,8 @@ class LinearOperatorCirculantTestNonHermitianSpectrum(
matrix = operator.to_dense()
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(0, imag_matrix.eval(), rtol=0, atol=eps * 3)
np.testing.assert_allclose(
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3)
def test_simple_positive_real_spectrum_gives_self_adjoint_pos_def_oper(self):
with self.cached_session() as sess:
@ -299,7 +302,7 @@ class LinearOperatorCirculantTestNonHermitianSpectrum(
imag_matrix = math_ops.imag(matrix)
eps = np.finfo(np.float32).eps
np.testing.assert_allclose(
0, imag_matrix.eval(), rtol=0, atol=eps * 3 * 4)
0, self.evaluate(imag_matrix), rtol=0, atol=eps * 3 * 4)
def test_convolution_kernel_same_as_first_row_of_to_dense(self):
spectrum = [[3., 2., 1.], [2., 1.5, 1.]]
@ -310,7 +313,7 @@ class LinearOperatorCirculantTestNonHermitianSpectrum(
self.assertAllEqual((2, 3), h.get_shape())
self.assertAllEqual((2, 3, 3), c.get_shape())
self.assertAllClose(h.eval(), c.eval()[:, :, 0])
self.assertAllClose(h.eval(), self.evaluate(c)[:, :, 0])
def test_assert_non_singular_fails_for_singular_operator(self):
spectrum = math_ops.cast([0, 4, 2j + 2], dtypes.complex64)

View File

@ -83,7 +83,7 @@ class LinearOperatorIdentityTest(
num_rows=2, dtype=dtypes.float16)
x = rng.randn(2, 3).astype(np.float16)
y = operator.matmul(x)
self.assertAllClose(x, y.eval())
self.assertAllClose(x, self.evaluate(y))
def test_non_scalar_num_rows_raises_static(self):
with self.assertRaisesRegexp(ValueError, "must be a 0-D Tensor"):
@ -357,7 +357,7 @@ class LinearOperatorScaledIdentityTest(
num_rows=2, multiplier=multiplier)
x = rng.randn(2, 3).astype(np.float16)
y = operator.matmul(x)
self.assertAllClose(multiplier[..., None, None] * x, y.eval())
self.assertAllClose(multiplier[..., None, None] * x, self.evaluate(y))
def test_non_scalar_num_rows_raises_static(self):
# Many "test_...num_rows" tests are performed in LinearOperatorIdentity.

View File

@ -70,8 +70,8 @@ class KroneckerDenseTest(test.TestCase):
[5., 10., -1., -2.]], dtype=dtypes.float32)
with self.cached_session():
self.assertAllClose(_kronecker_dense([x, y]).eval(), z.eval())
self.assertAllClose(_kronecker_dense([y, x]).eval(), w.eval())
self.assertAllClose(_kronecker_dense([x, y]).eval(), self.evaluate(z))
self.assertAllClose(_kronecker_dense([y, x]).eval(), self.evaluate(w))
class SquareLinearOperatorKroneckerTest(

View File

@ -134,7 +134,7 @@ class LinearOperatorTest(test.TestCase):
with self.cached_session():
operator_dense = operator.to_dense()
self.assertAllEqual((2, 3, 4), operator_dense.get_shape())
self.assertAllClose(matrix, operator_dense.eval())
self.assertAllClose(matrix, self.evaluate(operator_dense))
def test_generic_to_dense_method_non_square_matrix_tensor(self):
matrix = rng.randn(2, 3, 4)
@ -152,7 +152,7 @@ class LinearOperatorTest(test.TestCase):
with self.cached_session():
y = operator.matvec(x)
self.assertAllEqual((2,), y.get_shape())
self.assertAllClose([1., 2.], y.eval())
self.assertAllClose([1., 2.], self.evaluate(y))
def test_solvevec(self):
matrix = [[1., 0], [0., 2.]]
@ -161,7 +161,7 @@ class LinearOperatorTest(test.TestCase):
with self.cached_session():
x = operator.solvevec(y)
self.assertAllEqual((2,), x.get_shape())
self.assertAllClose([1., 1 / 2.], x.eval())
self.assertAllClose([1., 1 / 2.], self.evaluate(x))
def test_is_square_set_to_true_for_square_static_shapes(self):
operator = LinearOperatorShape(shape=(2, 4, 4))

View File

@ -102,7 +102,7 @@ class BroadcastMatrixBatchDimsTest(test.TestCase):
self.assertTrue(isinstance(tensor, ops.Tensor))
with self.cached_session():
self.assertAllClose(arr, tensor.eval())
self.assertAllClose(arr, self.evaluate(tensor))
def test_static_dims_broadcast(self):
# x.batch_shape = [3, 1, 2]
@ -205,7 +205,7 @@ class CholeskySolveWithBroadcastTest(test.TestCase):
result = linear_operator_util.cholesky_solve_with_broadcast(chol, rhs)
self.assertAllEqual((2, 3, 7), result.get_shape())
expected = linalg_ops.cholesky_solve(chol_broadcast, rhs)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2, 2]
@ -244,7 +244,7 @@ class MatmulWithBroadcastTest(test.TestCase):
result = linear_operator_util.matmul_with_broadcast(x, y)
self.assertAllEqual((2, 1, 7), result.get_shape())
expected = math_ops.matmul(x, y_broadcast)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_static_dims_broadcast_y_has_extra_dims(self):
# Since the second arg has extra dims, and the domain dim of the first arg
@ -261,7 +261,7 @@ class MatmulWithBroadcastTest(test.TestCase):
result = linear_operator_util.matmul_with_broadcast(x, y)
self.assertAllEqual((2, 3, 5, 5), result.get_shape())
expected = math_ops.matmul(x_broadcast, y)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_static_dims_broadcast_y_has_extra_dims_transpose_a_and_b(self):
# Since the second arg has extra dims, and the domain dim of the first arg
@ -280,7 +280,7 @@ class MatmulWithBroadcastTest(test.TestCase):
self.assertAllEqual((2, 3, 5, 1), result.get_shape())
expected = math_ops.matmul(
x_broadcast, y, transpose_a=True, transpose_b=True)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_static_dims_broadcast_y_has_extra_dims_transpose_dynamic(self):
# Since the second arg has extra dims, and the domain dim of the first arg
@ -344,7 +344,7 @@ class MatrixSolveWithBroadcastTest(test.TestCase):
matrix, rhs)
self.assertAllEqual((2, 3, 7), result.get_shape())
expected = linalg_ops.matrix_solve(matrix, rhs_broadcast)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_static_dims_broadcast_rhs_has_extra_dims(self):
# Since the second arg has extra dims, and the domain dim of the first arg
@ -362,7 +362,7 @@ class MatrixSolveWithBroadcastTest(test.TestCase):
result = linear_operator_util.matrix_solve_with_broadcast(matrix, rhs)
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_static_dims_broadcast_rhs_has_extra_dims_dynamic(self):
# Since the second arg has extra dims, and the domain dim of the first arg
@ -385,7 +385,7 @@ class MatrixSolveWithBroadcastTest(test.TestCase):
self.assertAllEqual(3, result.shape.ndims)
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs)
self.assertAllClose(
expected.eval(),
self.evaluate(expected),
result.eval(feed_dict={
matrix_ph: matrix,
rhs_ph: rhs
@ -408,7 +408,7 @@ class MatrixSolveWithBroadcastTest(test.TestCase):
matrix, rhs, adjoint=True)
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_solve(matrix_broadcast, rhs, adjoint=True)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2, 2]
@ -447,7 +447,7 @@ class MatrixTriangularSolveWithBroadcastTest(test.TestCase):
matrix, rhs)
self.assertAllEqual((2, 3, 7), result.get_shape())
expected = linalg_ops.matrix_triangular_solve(matrix, rhs_broadcast)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_static_dims_broadcast_rhs_has_extra_dims(self):
# Since the second arg has extra dims, and the domain dim of the first arg
@ -466,7 +466,7 @@ class MatrixTriangularSolveWithBroadcastTest(test.TestCase):
matrix, rhs)
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_triangular_solve(matrix_broadcast, rhs)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_static_dims_broadcast_rhs_has_extra_dims_and_adjoint(self):
# Since the second arg has extra dims, and the domain dim of the first arg
@ -486,7 +486,7 @@ class MatrixTriangularSolveWithBroadcastTest(test.TestCase):
self.assertAllEqual((2, 3, 2), result.get_shape())
expected = linalg_ops.matrix_triangular_solve(
matrix_broadcast, rhs, adjoint=True)
self.assertAllClose(expected.eval(), result.eval())
self.assertAllClose(expected.eval(), self.evaluate(result))
def test_dynamic_dims_broadcast_64bit(self):
# batch_shape = [2]

View File

@ -50,7 +50,7 @@ class ShapeTest(test_lib.TestCase):
determinants = linalg_ops.matrix_determinant(batch_identity)
reduced = math_ops.reduce_sum(determinants)
sum_grad = gradients_impl.gradients(reduced, batch_identity)[0]
self.assertAllClose(batch_identity.eval(), sum_grad.eval())
self.assertAllClose(batch_identity.eval(), self.evaluate(sum_grad))
class MatrixUnaryFunctorGradientTest(test_lib.TestCase):
@ -69,7 +69,7 @@ def _GetMatrixUnaryFunctorGradientTest(functor_, dtype_, shape_, **kwargs_):
if functor_.__name__ == 'matrix_square_root':
# Square the input matrix to ensure that its matrix square root exists
a = math_ops.matmul(a, a)
a_np = a.eval()
a_np = self.evaluate(a)
b = functor_(a, **kwargs_)
# Optimal stepsize for central difference is O(epsilon^{1/3}).

View File

@ -85,7 +85,7 @@ class LogdetTest(test.TestCase):
# [_RandomPDMatrix(n, self.rng, np_dtype),
# _RandomPDMatrix(n, self.rng, np_dtype)]).astype(np_dtype)
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, logdet_tf.eval(), atol=atol)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
@ -94,7 +94,7 @@ class LogdetTest(test.TestCase):
_, logdet_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
logdet_tf = linalg.logdet(matrix)
self.assertAllClose(logdet_np, logdet_tf.eval(), atol=atol)
self.assertAllClose(logdet_np, self.evaluate(logdet_tf), atol=atol)
class SlogdetTest(test.TestCase):
@ -110,8 +110,9 @@ class SlogdetTest(test.TestCase):
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(log_abs_det_np, log_abs_det_tf.eval(), atol=atol)
self.assertAllClose(sign_np, sign_tf.eval(), atol=atol)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
def test_works_with_underflow_case(self):
for np_dtype, atol in [(np.float32, 0.05), (np.float64, 1e-5),
@ -120,8 +121,9 @@ class SlogdetTest(test.TestCase):
sign_np, log_abs_det_np = np.linalg.slogdet(matrix)
with self.session(use_gpu=True):
sign_tf, log_abs_det_tf = linalg.slogdet(matrix)
self.assertAllClose(log_abs_det_np, log_abs_det_tf.eval(), atol=atol)
self.assertAllClose(sign_np, sign_tf.eval(), atol=atol)
self.assertAllClose(
log_abs_det_np, self.evaluate(log_abs_det_tf), atol=atol)
self.assertAllClose(sign_np, self.evaluate(sign_tf), atol=atol)
class AdjointTest(test.TestCase):
@ -135,7 +137,7 @@ class AdjointTest(test.TestCase):
matrix = ops.convert_to_tensor(matrix_np)
transposed = linalg.adjoint(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, transposed.eval())
self.assertAllEqual(expected_transposed, self.evaluate(transposed))
class EyeTest(parameterized.TestCase, test.TestCase):

Some files were not shown because too many files have changed in this diff Show More