Use self.evaluate for global_variables_initializer
This fixes some common incompatibilities with eager mode execution. PiperOrigin-RevId: 319258451 Change-Id: I93c66eb3b8c75f75fd9c1deb9526fbd937b93805
This commit is contained in:
parent
153947b5c5
commit
6e8976428d
@ -48,7 +48,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -85,7 +85,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
|
||||
@ -116,7 +116,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
|
||||
@ -147,7 +147,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase):
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
|
||||
|
@ -40,7 +40,7 @@ class AdagradOptimizerTest(xla_test.XLATestCase):
|
||||
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
|
||||
ada_update = ada_opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
|
||||
@ -68,7 +68,7 @@ class AdagradOptimizerTest(xla_test.XLATestCase):
|
||||
constant_op.constant(3.0), initial_accumulator_value=0.1)
|
||||
ada_update = ada_opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
|
||||
@ -104,7 +104,7 @@ class AdagradOptimizerTest(xla_test.XLATestCase):
|
||||
self.assertEqual(slot0.get_shape(), var0.get_shape())
|
||||
slot1 = ada_opt.get_slot(var1, "accumulator")
|
||||
self.assertEqual(slot1.get_shape(), var1.get_shape())
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values.
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
|
@ -72,7 +72,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
|
||||
grads1 = array_ops.placeholder(dtype)
|
||||
opt = adam.AdamOptimizer()
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
@ -115,7 +115,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
|
||||
grads1 = array_ops.placeholder(dtype)
|
||||
opt = adam.AdamOptimizer(constant_op.constant(0.001))
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
@ -159,7 +159,7 @@ class AdamOptimizerTest(xla_test.XLATestCase):
|
||||
opt = adam.AdamOptimizer()
|
||||
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
beta1_power, beta2_power = opt._get_beta_accumulators()
|
||||
|
||||
|
@ -48,7 +48,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -63,7 +63,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
|
||||
opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
|
||||
adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -83,7 +83,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -98,7 +98,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype)
|
||||
opt = gradient_descent.GradientDescentOptimizer(3.0, name="sgd")
|
||||
sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -122,7 +122,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -156,7 +156,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -189,7 +189,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=0.0)
|
||||
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -223,7 +223,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -262,7 +262,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
|
||||
@ -303,7 +303,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase):
|
||||
l2_regularization_strength=2.0)
|
||||
update0 = opt0.apply_gradients([(grads0, var0)])
|
||||
update1 = opt1.apply_gradients([(grads1, var1)])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var1))
|
||||
|
@ -50,7 +50,7 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
|
||||
learning_rate=2.0, momentum=0.9)
|
||||
mom_update = mom_opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Check we have slots
|
||||
self.assertEqual(["momentum"], mom_opt.get_slot_names())
|
||||
slot0 = mom_opt.get_slot(var0, "momentum")
|
||||
@ -114,7 +114,7 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
|
||||
mom_op = momentum_lib.MomentumOptimizer(
|
||||
learning_rate=0.1, momentum=0.9, use_nesterov=True)
|
||||
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(1, 5):
|
||||
opt_op.run()
|
||||
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
|
||||
@ -136,7 +136,7 @@ class MomentumOptimizerTest(xla_test.XLATestCase):
|
||||
momentum=constant_op.constant(0.9))
|
||||
mom_update = mom_opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Check we have slots
|
||||
self.assertEqual(["momentum"], mom_opt.get_slot_names())
|
||||
slot0 = mom_opt.get_slot(var0, "momentum")
|
||||
|
@ -43,7 +43,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -74,7 +74,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -98,7 +98,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -122,7 +122,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -141,7 +141,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase):
|
||||
grads1 = constant_op.constant([0.01, 0.02])
|
||||
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
|
||||
|
@ -40,7 +40,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
|
||||
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
|
||||
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
|
||||
@ -62,7 +62,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
|
||||
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -84,7 +84,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
|
||||
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
3.0, l1_regularization_strength=0.001, l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -106,7 +106,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
|
||||
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([4.0, 3.0], self.evaluate(var1))
|
||||
@ -125,7 +125,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase):
|
||||
grads1 = constant_op.constant([0.01, 0.02])
|
||||
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
|
||||
|
@ -801,7 +801,7 @@ class TensorArrayTest(xla_test.XLATestCase):
|
||||
# state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
|
||||
# var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
|
||||
|
||||
# variables.global_variables_initializer().run()
|
||||
# self.evaluate(variables.global_variables_initializer())
|
||||
# state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
|
||||
# self.evaluate([state0, var, v0, vout, v0_grad, var_grad, state0_grad])
|
||||
# )
|
||||
@ -1150,7 +1150,7 @@ class TensorArrayTest(xla_test.XLATestCase):
|
||||
|
||||
return [read0, read1, size0, size1, v0, v1]
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
read0_v, read1_v, size0_v, size1_v, v0, v1 = self.evaluate(
|
||||
xla.compile(fn))
|
||||
|
@ -213,7 +213,7 @@ class ParameterServerStrategyTestBase(
|
||||
self.assertNotEqual(f, None)
|
||||
|
||||
if context.num_gpus() >= 1 and num_gpus <= 1:
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
y_val, z_val, f_val = sess.run([y, z, f])
|
||||
self.assertEqual(y_val, 33.0)
|
||||
self.assertEqual(z_val, 43.0)
|
||||
@ -259,7 +259,7 @@ class ParameterServerStrategyTestBase(
|
||||
x = d.extended.call_for_each_replica(model_fn)
|
||||
|
||||
if context.num_gpus() >= 1:
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
x_val = sess.run(x)
|
||||
if num_gpus < 1:
|
||||
self.assertEqual(x_val, [13.0, 25.0])
|
||||
@ -363,7 +363,7 @@ class ParameterServerStrategyTestBase(
|
||||
self.assertNotEqual(f, None)
|
||||
|
||||
if context.num_gpus() >= 1 and num_gpus <= 1:
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
y_val, z_val, f_val = sess.run([y, z, f])
|
||||
self.assertEqual(y_val, 33.0)
|
||||
self.assertEqual(z_val, 43.0)
|
||||
@ -408,7 +408,7 @@ class ParameterServerStrategyTestBase(
|
||||
train_op = d.group(train_op)
|
||||
|
||||
if task_id == 0:
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Workers waiting for chief worker's initializing variables.
|
||||
self._init_condition.acquire()
|
||||
@ -496,7 +496,7 @@ class ParameterServerStrategyTestBase(
|
||||
if (not task_type or
|
||||
multi_worker_util.is_chief(
|
||||
d.extended._cluster_spec, task_type, task_id)):
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Workers waiting for chief worker's initializing variables.
|
||||
self._init_condition.acquire()
|
||||
|
@ -537,7 +537,7 @@ class FunctionTest(test.TestCase):
|
||||
z = Foo(v)
|
||||
|
||||
with self.session(graph=g):
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllEqual(z, 101.)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -772,7 +772,7 @@ class FunctionTest(test.TestCase):
|
||||
z = Bar()
|
||||
|
||||
with self.session(graph=g):
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllEqual(y, [[12.0]])
|
||||
self.assertAllEqual(z, [[1.0]])
|
||||
|
||||
|
@ -68,7 +68,7 @@ class DenseTest(test.TestCase, parameterized.TestCase):
|
||||
v = variable_scope.get_variable(
|
||||
'X', initializer=init_ops.zeros_initializer(), shape=(1, 1))
|
||||
x = core_layers.Dense(1)(v)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllEqual(x, [[0.0]])
|
||||
|
||||
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
|
||||
@ -280,7 +280,7 @@ class DenseTest(test.TestCase, parameterized.TestCase):
|
||||
initializer=init_ops.ones_initializer()), self.cached_session():
|
||||
inputs = random_ops.random_uniform((5, 3), seed=1)
|
||||
core_layers.dense(inputs, 2)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
weights = _get_variable_dict_from_varstore()
|
||||
self.assertEqual(len(weights), 2)
|
||||
# Check that the matrix weights got initialized to ones (from scope).
|
||||
@ -445,7 +445,7 @@ class DropoutTest(test.TestCase, parameterized.TestCase):
|
||||
with self.cached_session():
|
||||
inputs = array_ops.ones((5, 5))
|
||||
dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
np_output = self.evaluate(dropped)
|
||||
self.assertAlmostEqual(0., np_output.min())
|
||||
dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1)
|
||||
|
@ -133,7 +133,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
|
||||
opt = adam.Adam()
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
|
||||
@ -167,7 +167,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
|
||||
optimizer = adam.Adam(3.0)
|
||||
minimize_op = optimizer.minimize(g_sum, var_list=[var])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
minimize_op.run()
|
||||
|
||||
def testSparseRepeatedIndices(self):
|
||||
@ -192,7 +192,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
[(grad_repeated_index, repeated_index_update_var)])
|
||||
aggregated_update = adam.Adam().apply_gradients(
|
||||
[(grad_aggregated, aggregated_update_var)])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(aggregated_update_var,
|
||||
self.evaluate(repeated_index_update_var))
|
||||
for _ in range(3):
|
||||
@ -459,7 +459,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
grads1 = constant_op.constant(grads1_np)
|
||||
opt = adam.Adam(constant_op.constant(0.001))
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
@ -499,7 +499,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
opt = adam.Adam()
|
||||
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
|
||||
|
||||
@ -585,7 +585,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
|
||||
opt = adam.NonFusedAdam()
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0))
|
||||
@ -619,7 +619,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
|
||||
optimizer = adam.NonFusedAdam(3.0)
|
||||
minimize_op = optimizer.minimize(g_sum, var_list=[var])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
minimize_op.run()
|
||||
|
||||
def testSparseRepeatedIndices(self):
|
||||
@ -644,7 +644,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
[(grad_repeated_index, repeated_index_update_var)])
|
||||
aggregated_update = adam.NonFusedAdam().apply_gradients(
|
||||
[(grad_aggregated, aggregated_update_var)])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(aggregated_update_var,
|
||||
self.evaluate(repeated_index_update_var))
|
||||
for _ in range(3):
|
||||
@ -915,7 +915,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
grads1 = constant_op.constant(grads1_np)
|
||||
opt = adam.NonFusedAdam(constant_op.constant(0.001))
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
@ -955,7 +955,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
opt = adam.NonFusedAdam()
|
||||
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype)
|
||||
|
||||
|
@ -103,7 +103,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
|
||||
opt = adamax.Adamax()
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0, 3.0], var0)
|
||||
@ -137,7 +137,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop
|
||||
optimizer = adamax.Adamax(3.0)
|
||||
minimize_op = optimizer.minimize(g_sum, var_list=[var])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
minimize_op.run()
|
||||
|
||||
def testSparseRepeatedIndices(self):
|
||||
@ -162,7 +162,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
[(grad_repeated_index, repeated_index_update_var)])
|
||||
aggregated_update = adamax.Adamax().apply_gradients(
|
||||
[(grad_aggregated, aggregated_update_var)])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(aggregated_update_var,
|
||||
repeated_index_update_var.eval())
|
||||
for _ in range(3):
|
||||
@ -289,7 +289,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
grads1 = constant_op.constant(grads1_np)
|
||||
opt = adamax.Adamax(constant_op.constant(0.001))
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], var0)
|
||||
@ -327,7 +327,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase):
|
||||
opt = adamax.Adamax()
|
||||
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
beta1_power = get_beta_accumulators(opt, dtype)
|
||||
|
||||
|
@ -52,7 +52,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([0.0, 0.0], v0_val)
|
||||
@ -89,7 +89,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -116,7 +116,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
return pred * pred
|
||||
|
||||
sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
@ -141,7 +141,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -171,7 +171,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -209,7 +209,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -246,7 +246,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
|
||||
@ -283,7 +283,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=2.0)
|
||||
update0 = opt0.apply_gradients([(grads0, var0)])
|
||||
update1 = opt1.apply_gradients([(grads1, var1)])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -320,7 +320,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
if is_sparse:
|
||||
|
@ -96,7 +96,7 @@ class NadamOptimizerTest(test.TestCase):
|
||||
constant_op.constant(grads1_np_indices), constant_op.constant([3]))
|
||||
opt = nadam.Nadam(epsilon=sparse_epsilon)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 1.0, 2.0], var0)
|
||||
@ -137,7 +137,7 @@ class NadamOptimizerTest(test.TestCase):
|
||||
grads1 = constant_op.constant(grads1_np)
|
||||
opt = nadam.Nadam()
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], var0)
|
||||
|
@ -1067,7 +1067,7 @@ class StridedSliceBenchmark(test_lib.Benchmark):
|
||||
"""Benchmark new strided slice operation on non-trivial case."""
|
||||
|
||||
def run_and_time(self, slice_op):
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(10):
|
||||
_ = slice_op.eval()
|
||||
iters = 1000
|
||||
|
@ -235,7 +235,7 @@ class BatchMatMulBenchmark(test.Benchmark):
|
||||
GetRandomNormalInput(a_shape, np.float32))
|
||||
matrix_b = variables.Variable(
|
||||
GetRandomNormalInput(b_shape, np.float32))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Use batch matmul op's internal broadcasting.
|
||||
self.run_op_benchmark(
|
||||
|
@ -198,7 +198,7 @@ class CastOpTest(test.TestCase):
|
||||
x = variables.Variable(5, dtype=dtypes.float32)
|
||||
y = variables.Variable(True, dtype=dtypes.bool)
|
||||
cast = math_ops.cast(y, x.dtype)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertEqual(1.0, self.evaluate(cast))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
|
@ -330,7 +330,7 @@ class CholeskyBenchmark(test.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix = variables.Variable(self._GenerateMatrix(shape))
|
||||
l = linalg_ops.cholesky(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(
|
||||
@ -344,7 +344,7 @@ class CholeskyBenchmark(test.Benchmark):
|
||||
ops.device("/device:GPU:0"):
|
||||
matrix = variables.Variable(self._GenerateMatrix(shape))
|
||||
l = linalg_ops.cholesky(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(
|
||||
@ -364,7 +364,7 @@ class CholeskyBenchmark(test.Benchmark):
|
||||
grad_matrix = variables.Variable(
|
||||
np.random.randn(*matrix.shape).astype(np.float32))
|
||||
grad = grad_fn(l, grad_matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(
|
||||
|
@ -4959,7 +4959,7 @@ class WhileOpBenchmark(test.Benchmark):
|
||||
with session.Session() as sess, ops.device(default_device):
|
||||
# Get the initial id i, input x, and kernel.
|
||||
i, x, kernel = self._getInitVariables()
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
if static_unroll:
|
||||
for _ in xrange(steps):
|
||||
|
@ -2943,7 +2943,7 @@ class Conv2DBenchmark(test.Benchmark):
|
||||
x = convolutional.conv2d(x, num_outputs, [1, kernel_w])
|
||||
outputs = x
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
num_iterations = 4
|
||||
for iter_index in xrange(num_iterations):
|
||||
start = time.time()
|
||||
@ -2959,7 +2959,7 @@ class Conv2DBenchmark(test.Benchmark):
|
||||
config.graph_options.rewrite_options.dependency_optimization = (
|
||||
rewriter_config_pb2.RewriterConfig.OFF)
|
||||
with session_lib.Session(config=config) as session:
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
session, op, burn_iters=burn_iters, min_iters=num_iters, name=name)
|
||||
|
||||
|
@ -194,7 +194,7 @@ class MatrixDeterminantBenchmark(test.Benchmark):
|
||||
config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"):
|
||||
matrix = self._GenerateMatrix(shape)
|
||||
d = linalg_ops.matrix_determinant(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(
|
||||
@ -207,7 +207,7 @@ class MatrixDeterminantBenchmark(test.Benchmark):
|
||||
config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"):
|
||||
matrix = self._GenerateMatrix(shape)
|
||||
d = linalg_ops.matrix_determinant(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(
|
||||
|
@ -258,7 +258,7 @@ class NormalTest(test.TestCase):
|
||||
value = func(x)
|
||||
grads = gradients_impl.gradients(value, [mu, sigma])
|
||||
with self.session(graph=g):
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllFinite(value)
|
||||
self.assertAllFinite(grads[0])
|
||||
self.assertAllFinite(grads[1])
|
||||
@ -381,7 +381,7 @@ class NormalTest(test.TestCase):
|
||||
value = dist.quantile(p)
|
||||
grads = gradients_impl.gradients(value, [mu, p])
|
||||
with self.cached_session(graph=g):
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllFinite(grads[0])
|
||||
self.assertAllFinite(grads[1])
|
||||
|
||||
|
@ -435,7 +435,7 @@ class EinsumBenchmark(test.Benchmark):
|
||||
input_shape = (dim,) * len(subscript)
|
||||
input_vars.append(
|
||||
variables.Variable(np.array(r.randn(*input_shape), np.float32)))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Call einsum_v1.
|
||||
self.run_op_benchmark(
|
||||
|
@ -76,7 +76,7 @@ class ScatterAddSubTest(test.TestCase):
|
||||
ind = constant_op.constant(indices, dtype=dtypes.int32)
|
||||
p2 = scatter_op(p, ind, vals, name="updated_p")
|
||||
# p = init
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# p += vals
|
||||
result = self.evaluate(p2)
|
||||
# Compute the expected 'p' using numpy operations.
|
||||
@ -302,7 +302,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
|
||||
print("Construct ids", ids.get_shape())
|
||||
embedding = embedding_ops.embedding_lookup(p_variable, ids)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
params_values = [params[p_i.name] for p_i in p]
|
||||
# Test that the PartitionedVariable components equal the list in p
|
||||
p_var_val = self.evaluate(list(p_variable))
|
||||
@ -325,7 +325,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
|
||||
print("Construct ids", ids.get_shape())
|
||||
embedding = embedding_ops.embedding_lookup(p_variable, ids)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
params_values = [params[p_i.name] for p_i in p]
|
||||
# Test that the PartitionedVariable components equal the list in p
|
||||
p_var_val = self.evaluate(list(p_variable))
|
||||
@ -425,7 +425,7 @@ class EmbeddingLookupTest(test.TestCase):
|
||||
# will test that aspect.
|
||||
id_vals = np.random.randint(vocab_size, size=num_vals)
|
||||
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
embedding = embedding_ops.embedding_lookup(
|
||||
p_variable, ids, partition_strategy="div")
|
||||
tf_result = embedding.eval(feed_dict=feed_dict)
|
||||
|
@ -396,7 +396,7 @@ class GatherNdOpBenchmark(test.Benchmark):
|
||||
t_params = variables.Variable(params)
|
||||
t_indices = variables.Variable(indices)
|
||||
gather_op = array_ops.gather_nd(t_params, t_indices)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for _ in range(10):
|
||||
self.evaluate(gather_op)
|
||||
t1 = time.time()
|
||||
|
@ -373,7 +373,7 @@ class UniformUnitScalingInitializationTest(test.TestCase):
|
||||
"x",
|
||||
shape=shape,
|
||||
initializer=init_ops.uniform_unit_scaling_initializer())
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllEqual(shape, self.evaluate(x).shape)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -1347,7 +1347,7 @@ class IdentityInitializerTest(test.TestCase):
|
||||
with variable_scope.variable_scope(
|
||||
"foo", partitioner=partitioner, initializer=init):
|
||||
v = array_ops.identity(variable_scope.get_variable("bar", shape=shape))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(v, np.eye(*shape))
|
||||
|
||||
|
||||
|
@ -126,7 +126,7 @@ class PrintV2Test(test.TestCase):
|
||||
def testPrintOneVariable(self):
|
||||
var = variables.Variable(math_ops.range(10))
|
||||
if not context.executing_eagerly():
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
with self.captureWritesToStream(sys.stderr) as printed:
|
||||
print_op = logging_ops.print_v2(var)
|
||||
self.evaluate(print_op)
|
||||
@ -138,7 +138,7 @@ class PrintV2Test(test.TestCase):
|
||||
plus_one = var_one.assign_add(1.0)
|
||||
var_two = variables.Variable(math_ops.range(10))
|
||||
if not context.executing_eagerly():
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
with self.captureWritesToStream(sys.stderr) as printed:
|
||||
self.evaluate(plus_one)
|
||||
print_op = logging_ops.print_v2(var_one, {"second": var_two})
|
||||
|
@ -268,7 +268,7 @@ class LuBenchmark(test.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix = variables.Variable(self._GenerateMatrix(shape))
|
||||
lu, p = linalg_ops.lu(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(lu, p),
|
||||
@ -281,7 +281,7 @@ class LuBenchmark(test.Benchmark):
|
||||
ops.device("/device:GPU:0"):
|
||||
matrix = variables.Variable(self._GenerateMatrix(shape))
|
||||
lu, p = linalg_ops.lu(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(lu, p),
|
||||
|
@ -117,7 +117,7 @@ class MatrixBandPartBenchmark(test_lib.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix = variables.Variable(array_ops.ones(shape_))
|
||||
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(band),
|
||||
@ -131,7 +131,7 @@ class MatrixBandPartBenchmark(test_lib.Benchmark):
|
||||
ops.device("/gpu:0"):
|
||||
matrix = variables.Variable(array_ops.ones(shape_))
|
||||
band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(band),
|
||||
|
@ -192,7 +192,7 @@ class MatrixExponentialBenchmark(test.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix = self._GenerateMatrix(shape)
|
||||
expm = linalg_impl.matrix_exponential(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(expm),
|
||||
@ -205,7 +205,7 @@ class MatrixExponentialBenchmark(test.Benchmark):
|
||||
ops.device("/gpu:0"):
|
||||
matrix = self._GenerateMatrix(shape)
|
||||
expm = linalg_impl.matrix_exponential(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(expm),
|
||||
|
@ -186,7 +186,7 @@ class MatrixInverseBenchmark(test.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix = self._GenerateMatrix(shape)
|
||||
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(inv),
|
||||
@ -200,7 +200,7 @@ class MatrixInverseBenchmark(test.Benchmark):
|
||||
ops.device("/gpu:0"):
|
||||
matrix = self._GenerateMatrix(shape)
|
||||
inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(inv),
|
||||
|
@ -177,7 +177,7 @@ class MatrixLogarithmBenchmark(test.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix = self._GenerateMatrix(shape)
|
||||
logm = gen_linalg_ops.matrix_logarithm(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(logm),
|
||||
|
@ -335,7 +335,7 @@ class MatrixSolveLsBenchmark(test_lib.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
|
||||
x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(x),
|
||||
@ -350,7 +350,7 @@ class MatrixSolveLsBenchmark(test_lib.Benchmark):
|
||||
ops.device("/gpu:0"):
|
||||
matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
|
||||
x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(x),
|
||||
|
@ -183,7 +183,7 @@ class MatrixSolveBenchmark(test.Benchmark):
|
||||
ops.device("/cpu:0"):
|
||||
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
|
||||
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(x),
|
||||
@ -201,7 +201,7 @@ class MatrixSolveBenchmark(test.Benchmark):
|
||||
ops.device("/gpu:0"):
|
||||
matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs)
|
||||
x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(x),
|
||||
|
@ -237,7 +237,7 @@ class QRBenchmark(test.Benchmark):
|
||||
low=-1.0, high=1.0, size=shape_).astype(np.float32)
|
||||
matrix = variables.Variable(matrix_value)
|
||||
q, r = linalg_ops.qr(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(q, r),
|
||||
@ -252,7 +252,7 @@ class QRBenchmark(test.Benchmark):
|
||||
low=-1.0, high=1.0, size=shape_).astype(np.float32)
|
||||
matrix = variables.Variable(matrix_value)
|
||||
q, r = linalg_ops.qr(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(q, r),
|
||||
|
@ -223,7 +223,7 @@ class SumReductionTest(BaseReductionTest):
|
||||
|
||||
with self.session(graph=ops.Graph(), use_gpu=True) as sess:
|
||||
tf_arr = variables.Variable(arr)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
tf_mean = math_ops.reduce_mean(tf_arr, 0, False)
|
||||
tf_out_mean = self.evaluate(tf_mean)
|
||||
self.assertAllClose(tf_out_mean, 1.)
|
||||
|
@ -969,7 +969,7 @@ class SegmentReductionOpBenchmark(test.Benchmark):
|
||||
vc = variables.Variable(const.astype(dtype))
|
||||
name, op = op_functor(vc, vs, seg_ids)
|
||||
with session.Session() as sess:
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
r = self.run_op_benchmark(
|
||||
sess,
|
||||
op,
|
||||
|
@ -217,7 +217,7 @@ class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
|
||||
st_serialized, dtype=values.dtype)
|
||||
st_deserialized_op = st_deserialized.values.op
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
st_roundtrip_values = self.evaluate(st_roundtrip)
|
||||
st_deserialized_values = self.evaluate(st_deserialized)
|
||||
|
@ -54,7 +54,7 @@ class StringFormatOpTest(test.TestCase):
|
||||
var = variables.Variable(3.34)
|
||||
format_output = string_ops.string_format("{}", [var])
|
||||
if not context.executing_eagerly():
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
out = self.evaluate(format_output)
|
||||
expected = "3.34"
|
||||
self.assertEqual(compat.as_text(out), expected)
|
||||
@ -65,7 +65,7 @@ class StringFormatOpTest(test.TestCase):
|
||||
var = variables.Variable(math_ops.range(10))
|
||||
format_output = string_ops.string_format("{}", [var])
|
||||
if not context.executing_eagerly():
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
out = self.evaluate(format_output)
|
||||
expected = "[0 1 2 ... 7 8 9]"
|
||||
self.assertEqual(compat.as_text(out), expected)
|
||||
@ -78,7 +78,7 @@ class StringFormatOpTest(test.TestCase):
|
||||
var_two = variables.Variable(math_ops.range(10))
|
||||
format_output = string_ops.string_format("{}, {}", [var_one, var_two])
|
||||
if not context.executing_eagerly():
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.evaluate(plus_one)
|
||||
out = self.evaluate(format_output)
|
||||
expected = "3.14, [0 1 2 ... 7 8 9]"
|
||||
|
@ -343,7 +343,7 @@ class SVDBenchmark(test.Benchmark):
|
||||
low=-1.0, high=1.0, size=shape_).astype(np.float32)
|
||||
matrix = variables.Variable(matrix_value)
|
||||
u, s, v = linalg_ops.svd(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(u, s, v),
|
||||
@ -358,7 +358,7 @@ class SVDBenchmark(test.Benchmark):
|
||||
low=-1.0, high=1.0, size=shape_).astype(np.float32)
|
||||
matrix = variables.Variable(matrix_value)
|
||||
u, s, v = linalg_ops.svd(matrix)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(u, s, v),
|
||||
|
@ -221,7 +221,7 @@ class TridiagonalMulOpTest(test.TestCase):
|
||||
vec,
|
||||
diagonals_format='sequence')
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(x1),
|
||||
|
@ -708,7 +708,7 @@ class TridiagonalSolveOpTest(test.TestCase):
|
||||
return
|
||||
x = linalg_impl.tridiagonal_solve(
|
||||
diags, rhs, partial_pivoting=pivoting)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.run_op_benchmark(
|
||||
sess,
|
||||
control_flow_ops.group(x),
|
||||
|
@ -626,7 +626,7 @@ class ComputeSampledLogitsTest(test_lib.TestCase):
|
||||
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
|
||||
initializer=constant_op.constant(biases))
|
||||
with self.session(graph=g) as sess:
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
return self.evaluate([list(sharded_weights), list(sharded_biases)])
|
||||
|
||||
def testShapes(self):
|
||||
|
@ -1115,7 +1115,7 @@ class EinsumBenchmark(test.Benchmark):
|
||||
input_shape = (dim,) * len(subscript)
|
||||
input_vars.append(
|
||||
variables.Variable(np.array(r.randn(*input_shape), np.float32)))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
if len(input_vars) <= 2:
|
||||
self.run_op_benchmark(
|
||||
|
@ -176,7 +176,7 @@ class AdadeltaOptimizerTest(test.TestCase):
|
||||
loss = pred * pred
|
||||
sgd_op = adadelta.AdadeltaOptimizer(
|
||||
1.0, 1.0, 1.0).minimize(loss)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
|
@ -53,7 +53,7 @@ class AdagradDAOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([0.0, 0.0], v0_val)
|
||||
@ -94,7 +94,7 @@ class AdagradDAOptimizerTest(test.TestCase):
|
||||
loss = pred * pred
|
||||
sgd_op = adagrad_da.AdagradDAOptimizer(
|
||||
1.0, global_step).minimize(loss)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
@ -122,7 +122,7 @@ class AdagradDAOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -155,7 +155,7 @@ class AdagradDAOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -188,7 +188,7 @@ class AdagradDAOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
|
@ -80,7 +80,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
|
||||
opt = adam.AdamOptimizer()
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
@ -121,7 +121,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
|
||||
optimizer = adam.AdamOptimizer(3.0)
|
||||
minimize_op = optimizer.minimize(gathered_sum)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
minimize_op.run()
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -146,7 +146,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
[(grad_repeated_index, repeated_index_update_var)])
|
||||
aggregated_update = adam.AdamOptimizer().apply_gradients(
|
||||
[(grad_aggregated, aggregated_update_var)])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
self.assertAllClose(aggregated_update_var,
|
||||
self.evaluate(repeated_index_update_var))
|
||||
for _ in range(3):
|
||||
@ -268,7 +268,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
grads1 = constant_op.constant(grads1_np)
|
||||
opt = adam.AdamOptimizer(constant_op.constant(0.001))
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
@ -308,7 +308,7 @@ class AdamOptimizerTest(test.TestCase):
|
||||
opt = adam.AdamOptimizer()
|
||||
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
beta1_power, beta2_power = opt._get_beta_accumulators()
|
||||
|
||||
|
@ -76,7 +76,7 @@ class LatestCheckpointWithRelativePaths(test.TestCase):
|
||||
|
||||
with self.cached_session() as sess:
|
||||
unused_a = variables.Variable(0.0) # So that Saver saves something.
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Should fail.
|
||||
saver = saver_module.Saver(sharded=False)
|
||||
@ -123,7 +123,7 @@ class LatestCheckpointWithRelativePaths(test.TestCase):
|
||||
save = saver_module.Saver({"v0": v0})
|
||||
|
||||
# Record a short training history.
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
save.save(sess, filepath, global_step=0)
|
||||
self.evaluate(inc)
|
||||
save.save(sess, filepath, global_step=1)
|
||||
@ -136,7 +136,7 @@ class LatestCheckpointWithRelativePaths(test.TestCase):
|
||||
|
||||
# Create a new saver.
|
||||
save = saver_module.Saver({"v0": v0})
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Get the most recent checkpoint name from the training history file.
|
||||
name = checkpoint_management.latest_checkpoint(traindir)
|
||||
@ -278,7 +278,7 @@ class SaverUtilsTest(test.TestCase):
|
||||
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
|
||||
with self.session(graph=ops_lib.Graph()) as sess:
|
||||
unused_v = variables.Variable(1.0, name="v")
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
saver = saver_module.Saver(sharded=sharded, write_version=version)
|
||||
|
||||
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
|
||||
@ -297,7 +297,7 @@ class SaverUtilsTest(test.TestCase):
|
||||
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
|
||||
with self.session(graph=ops_lib.Graph()) as sess:
|
||||
unused_v = variables.Variable(1.0, name="v")
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
saver = saver_module.Saver(write_version=version)
|
||||
prefixes.append(
|
||||
saver.save(sess, os.path.join(self._base_dir, str(version))))
|
||||
@ -312,7 +312,7 @@ class SaverUtilsTest(test.TestCase):
|
||||
for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1):
|
||||
with self.session(graph=ops_lib.Graph()) as sess:
|
||||
unused_v = variables.Variable(1.0, name="v")
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
saver = saver_module.Saver(sharded=sharded, write_version=version)
|
||||
|
||||
path = os.path.join(self._base_dir, "%s-%s" % (sharded, version))
|
||||
|
@ -56,7 +56,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([0.0, 0.0], v0_val)
|
||||
@ -94,7 +94,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -120,7 +120,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
|
||||
loss = pred * pred
|
||||
sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
@ -146,7 +146,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -177,7 +177,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -216,7 +216,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -254,7 +254,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=2.0,
|
||||
l2_shrinkage_regularization_strength=0.1)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
|
||||
@ -292,7 +292,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
l2_regularization_strength=2.0)
|
||||
update0 = opt0.apply_gradients([(grads0, var0)])
|
||||
update1 = opt1.apply_gradients([(grads1, var1)])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
|
||||
@ -329,7 +329,7 @@ class FtrlOptimizerTest(test.TestCase):
|
||||
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
|
||||
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
sess = ops.get_default_session()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
|
@ -47,7 +47,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
optimizer = gradient_descent.GradientDescentOptimizer(3.0)
|
||||
sgd_op = optimizer.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
|
||||
@ -155,7 +155,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
# doesn't work because the sessions and graph are reused across unit
|
||||
# tests and this would mean trying to reinitialize variables. Figure out
|
||||
# a long-term solution for this.
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([3.0], self.evaluate(var1))
|
||||
@ -179,7 +179,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
lrate = constant_op.constant(3.0)
|
||||
sgd_op = gradient_descent.GradientDescentOptimizer(
|
||||
lrate).apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
|
||||
@ -199,7 +199,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
values = [1.0, 3.0]
|
||||
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
|
||||
grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for grad, _ in grads_and_vars:
|
||||
self.assertAllCloseAccordingToType([1.0], self.evaluate(grad))
|
||||
|
||||
@ -214,7 +214,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
|
||||
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]), global_step=global_step)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
|
||||
@ -245,7 +245,7 @@ class GradientDescentOptimizerTest(test.TestCase):
|
||||
constant_op.constant([2, 1]))
|
||||
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0], [2.0]], self.evaluate(var0))
|
||||
self.assertAllCloseAccordingToType([[3.0], [4.0]], self.evaluate(var1))
|
||||
|
@ -176,7 +176,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
mom_op = momentum_lib.MomentumOptimizer(
|
||||
learning_rate=2.0, momentum=0.9, use_nesterov=True)
|
||||
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for t in range(1, 5):
|
||||
opt_op.run()
|
||||
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
|
||||
@ -218,7 +218,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
grads_and_vars = [(y_feed, var0), (constant_op.constant(
|
||||
[3.0, 3.0], dtype=dtype), var1)]
|
||||
opt_update = mom_op.apply_gradients(grads_and_vars)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for t in range(1, 5):
|
||||
opt_update.run(feed_dict={x_feed: grads[t - 1]})
|
||||
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
|
||||
@ -295,7 +295,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
momentum=constant_op.constant(0.9))
|
||||
mom_update = mom_opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Check we have slots
|
||||
self.assertEqual(["momentum"], mom_opt.get_slot_names())
|
||||
slot0 = mom_opt.get_slot(var0, "momentum")
|
||||
@ -452,7 +452,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
grads0 = constant_op.constant([0.0] * num_samples)
|
||||
mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
|
||||
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
for i in xrange(num_samples):
|
||||
mom_update.run(feed_dict={grads0: db_grad[i]})
|
||||
self.assertAllClose(np.array(db_out[i]), self.evaluate(var0))
|
||||
@ -477,7 +477,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
learning_rate=2.0, momentum=0.9)
|
||||
mom_update = mom_opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
# Check we have slots
|
||||
self.assertEqual(["momentum"], mom_opt.get_slot_names())
|
||||
@ -553,7 +553,7 @@ class MomentumOptimizerTest(test.TestCase):
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
mom_update2 = mom_opt.apply_gradients(
|
||||
zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertEqual(["momentum"], mom_opt.get_slot_names())
|
||||
slot0 = mom_opt.get_slot(var0, "momentum")
|
||||
|
@ -78,7 +78,7 @@ class OptimizerTest(test.TestCase):
|
||||
aggregation_method=gradients_util.AggregationMethod.
|
||||
EXPERIMENTAL_ACCUMULATE_N)
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
|
||||
@ -102,7 +102,7 @@ class OptimizerTest(test.TestCase):
|
||||
opt_op = sgd_op.minimize(
|
||||
cost, global_step, [var0, var1], grad_loss=grad_loss)
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
|
||||
@ -259,7 +259,7 @@ class OptimizerTest(test.TestCase):
|
||||
sgd_op = gradient_descent.GradientDescentOptimizer(3.0)
|
||||
opt_op = sgd_op.minimize(cost, global_step, [var0, var1])
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
|
||||
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
|
||||
|
@ -47,7 +47,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([0.0, 0.0], v0_val)
|
||||
@ -87,7 +87,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.0,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([1.0, 2.0], v0_val)
|
||||
@ -109,7 +109,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
|
||||
loss = pred * pred
|
||||
sgd_op = proximal_adagrad.ProximalAdagradOptimizer(1.0).minimize(loss)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
@ -133,7 +133,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([1.0, 2.0], v0_val)
|
||||
@ -160,7 +160,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
l1_regularization_strength=0.001,
|
||||
l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([1.0, 2.0], v0_val)
|
||||
@ -195,7 +195,7 @@ class ProximalAdagradOptimizerTest(test.TestCase):
|
||||
grads1 = constant_op.constant([0.01, 0.02])
|
||||
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
sess = ops.get_default_session()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
|
@ -49,7 +49,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase):
|
||||
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([0.0, 0.0], v0_val)
|
||||
@ -82,7 +82,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase):
|
||||
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([1.0, 2.0], v0_val)
|
||||
@ -106,7 +106,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase):
|
||||
loss = pred * pred
|
||||
sgd_op = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
1.0).minimize(loss)
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
# Fetch params to validate initial values
|
||||
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
|
||||
# Run 1 step of sgd
|
||||
@ -127,7 +127,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase):
|
||||
opt = proximal_gradient_descent.ProximalGradientDescentOptimizer(
|
||||
3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0)
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
self.assertAllClose([1.0, 2.0], v0_val)
|
||||
@ -162,7 +162,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase):
|
||||
grads1 = constant_op.constant([0.01, 0.02])
|
||||
|
||||
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
sess = ops.get_default_session()
|
||||
v0_val, v1_val = self.evaluate([var0, var1])
|
||||
|
@ -51,7 +51,7 @@ class SaverLargePartitionedVariableTest(test.TestCase):
|
||||
partitioner=partitioned_variables.fixed_size_partitioner(4),
|
||||
initializer=init,
|
||||
dtype=dtypes.bool))
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
save = saver.Saver(partitioned_var)
|
||||
val = save.save(sess, save_path)
|
||||
self.assertEqual(save_path, val)
|
||||
|
@ -147,7 +147,7 @@ class SlotCreatorTest(test.TestCase):
|
||||
slot = slot_creator.create_slot(v, s.initialized_value(), name="slot")
|
||||
si = slot._save_slice_info
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertEqual("var/part_%d/slot" % i, slot.op.name)
|
||||
self.assertEqual([2], slot.get_shape().as_list())
|
||||
@ -168,7 +168,7 @@ class SlotCreatorTest(test.TestCase):
|
||||
for i, v in enumerate(p_v):
|
||||
slot = slot_creator.create_slot(v, s.initialized_value(), name="slot")
|
||||
|
||||
variables.global_variables_initializer().run()
|
||||
self.evaluate(variables.global_variables_initializer())
|
||||
|
||||
self.assertEqual("var/part_%d/slot" % i, slot.op.name)
|
||||
self.assertEqual([], slot.get_shape().as_list())
|
||||
|
Loading…
Reference in New Issue
Block a user