From 6e8976428df7023fb922e3f54cd441055c0d79ad Mon Sep 17 00:00:00 2001 From: Gaurav Jain <gjn@google.com> Date: Wed, 1 Jul 2020 11:04:07 -0700 Subject: [PATCH] Use self.evaluate for global_variables_initializer This fixes some common incompatibilities with eager mode execution. PiperOrigin-RevId: 319258451 Change-Id: I93c66eb3b8c75f75fd9c1deb9526fbd937b93805 --- tensorflow/compiler/tests/adagrad_da_test.py | 8 ++++---- tensorflow/compiler/tests/adagrad_test.py | 6 +++--- tensorflow/compiler/tests/adam_test.py | 6 +++--- tensorflow/compiler/tests/ftrl_test.py | 20 +++++++++---------- tensorflow/compiler/tests/momentum_test.py | 6 +++--- .../compiler/tests/proximal_adagrad_test.py | 10 +++++----- .../tests/proximal_gradient_descent_test.py | 10 +++++----- .../compiler/tests/tensor_array_ops_test.py | 4 ++-- .../parameter_server_strategy_test.py | 10 +++++----- tensorflow/python/framework/function_test.py | 4 ++-- .../keras/legacy_tf_layers/core_test.py | 6 +++--- .../python/keras/optimizer_v2/adam_test.py | 20 +++++++++---------- .../python/keras/optimizer_v2/adamax_test.py | 10 +++++----- .../python/keras/optimizer_v2/ftrl_test.py | 18 ++++++++--------- .../python/keras/optimizer_v2/nadam_test.py | 4 ++-- .../python/kernel_tests/array_ops_test.py | 2 +- .../kernel_tests/batch_matmul_op_test.py | 2 +- .../python/kernel_tests/cast_op_test.py | 2 +- .../python/kernel_tests/cholesky_op_test.py | 6 +++--- .../kernel_tests/control_flow_ops_py_test.py | 2 +- .../python/kernel_tests/conv_ops_test.py | 4 ++-- .../kernel_tests/determinant_op_test.py | 4 ++-- .../kernel_tests/distributions/normal_test.py | 4 ++-- .../python/kernel_tests/einsum_op_test.py | 2 +- .../python/kernel_tests/embedding_ops_test.py | 8 ++++---- .../python/kernel_tests/gather_nd_op_test.py | 2 +- .../python/kernel_tests/init_ops_test.py | 4 ++-- .../python/kernel_tests/logging_ops_test.py | 4 ++-- tensorflow/python/kernel_tests/lu_op_test.py | 4 ++-- .../kernel_tests/matrix_band_part_op_test.py | 4 ++-- .../matrix_exponential_op_test.py | 4 ++-- .../kernel_tests/matrix_inverse_op_test.py | 4 ++-- .../kernel_tests/matrix_logarithm_op_test.py | 2 +- .../kernel_tests/matrix_solve_ls_op_test.py | 4 ++-- .../kernel_tests/matrix_solve_op_test.py | 4 ++-- tensorflow/python/kernel_tests/qr_op_test.py | 4 ++-- .../python/kernel_tests/reduction_ops_test.py | 2 +- .../segment_reduction_ops_test.py | 2 +- .../sparse_tensors_map_ops_test.py | 2 +- .../kernel_tests/string_format_op_test.py | 6 +++--- tensorflow/python/kernel_tests/svd_op_test.py | 4 ++-- .../tridiagonal_matmul_op_test.py | 2 +- .../kernel_tests/tridiagonal_solve_op_test.py | 2 +- tensorflow/python/ops/nn_test.py | 2 +- .../python/ops/special_math_ops_test.py | 2 +- tensorflow/python/training/adadelta_test.py | 2 +- tensorflow/python/training/adagrad_da_test.py | 10 +++++----- tensorflow/python/training/adam_test.py | 10 +++++----- .../training/checkpoint_management_test.py | 12 +++++------ tensorflow/python/training/ftrl_test.py | 18 ++++++++--------- .../python/training/gradient_descent_test.py | 12 +++++------ tensorflow/python/training/momentum_test.py | 12 +++++------ tensorflow/python/training/optimizer_test.py | 6 +++--- .../python/training/proximal_adagrad_test.py | 12 +++++------ .../proximal_gradient_descent_test.py | 10 +++++----- .../saver_large_partitioned_variable_test.py | 2 +- .../python/training/slot_creator_test.py | 4 ++-- 57 files changed, 176 insertions(+), 176 deletions(-) diff --git a/tensorflow/compiler/tests/adagrad_da_test.py b/tensorflow/compiler/tests/adagrad_da_test.py index e08435b5713..9ae7744e5c6 100644 --- a/tensorflow/compiler/tests/adagrad_da_test.py +++ b/tensorflow/compiler/tests/adagrad_da_test.py @@ -48,7 +48,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase): l2_regularization_strength=0.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -85,7 +85,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase): l2_regularization_strength=0.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1)) @@ -116,7 +116,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase): l2_regularization_strength=0.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1)) @@ -147,7 +147,7 @@ class AdagradDAOptimizerTest(xla_test.XLATestCase): l2_regularization_strength=2.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1)) diff --git a/tensorflow/compiler/tests/adagrad_test.py b/tensorflow/compiler/tests/adagrad_test.py index 9f7a940019e..3ee368b3952 100644 --- a/tensorflow/compiler/tests/adagrad_test.py +++ b/tensorflow/compiler/tests/adagrad_test.py @@ -40,7 +40,7 @@ class AdagradOptimizerTest(xla_test.XLATestCase): ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) @@ -68,7 +68,7 @@ class AdagradOptimizerTest(xla_test.XLATestCase): constant_op.constant(3.0), initial_accumulator_value=0.1) ada_update = ada_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) @@ -104,7 +104,7 @@ class AdagradOptimizerTest(xla_test.XLATestCase): self.assertEqual(slot0.get_shape(), var0.get_shape()) slot1 = ada_opt.get_slot(var1, "accumulator") self.assertEqual(slot1.get_shape(), var1.get_shape()) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values. self.assertAllClose([1.0, 2.0], self.evaluate(var0)) diff --git a/tensorflow/compiler/tests/adam_test.py b/tensorflow/compiler/tests/adam_test.py index 2a5b809e288..d4fa06c782e 100644 --- a/tensorflow/compiler/tests/adam_test.py +++ b/tensorflow/compiler/tests/adam_test.py @@ -72,7 +72,7 @@ class AdamOptimizerTest(xla_test.XLATestCase): grads1 = array_ops.placeholder(dtype) opt = adam.AdamOptimizer() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -115,7 +115,7 @@ class AdamOptimizerTest(xla_test.XLATestCase): grads1 = array_ops.placeholder(dtype) opt = adam.AdamOptimizer(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -159,7 +159,7 @@ class AdamOptimizerTest(xla_test.XLATestCase): opt = adam.AdamOptimizer() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) beta1_power, beta2_power = opt._get_beta_accumulators() diff --git a/tensorflow/compiler/tests/ftrl_test.py b/tensorflow/compiler/tests/ftrl_test.py index a2efb413a57..3aeeb9101b6 100644 --- a/tensorflow/compiler/tests/ftrl_test.py +++ b/tensorflow/compiler/tests/ftrl_test.py @@ -48,7 +48,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -63,7 +63,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1) adagrad_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -83,7 +83,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -98,7 +98,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): var0, var1, grads0, grads1 = self.initVariableAndGradient(dtype) opt = gradient_descent.GradientDescentOptimizer(3.0, name="sgd") sgd_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -122,7 +122,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -156,7 +156,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -189,7 +189,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.001, l2_regularization_strength=0.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -223,7 +223,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.001, l2_regularization_strength=2.0) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -262,7 +262,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) ftrl_update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1)) @@ -303,7 +303,7 @@ class FtrlOptimizerTest(xla_test.XLATestCase): l2_regularization_strength=2.0) update0 = opt0.apply_gradients([(grads0, var0)]) update1 = opt1.apply_gradients([(grads1, var1)]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var1)) diff --git a/tensorflow/compiler/tests/momentum_test.py b/tensorflow/compiler/tests/momentum_test.py index 5f061fa0595..e3f74663276 100644 --- a/tensorflow/compiler/tests/momentum_test.py +++ b/tensorflow/compiler/tests/momentum_test.py @@ -50,7 +50,7 @@ class MomentumOptimizerTest(xla_test.XLATestCase): learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") @@ -114,7 +114,7 @@ class MomentumOptimizerTest(xla_test.XLATestCase): mom_op = momentum_lib.MomentumOptimizer( learning_rate=0.1, momentum=0.9, use_nesterov=True) opt_op = mom_op.minimize(cost, global_step, [var0, var1]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) for _ in range(1, 5): opt_op.run() var0_np, accum0_np = self._update_nesterov_momentum_numpy( @@ -136,7 +136,7 @@ class MomentumOptimizerTest(xla_test.XLATestCase): momentum=constant_op.constant(0.9)) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") diff --git a/tensorflow/compiler/tests/proximal_adagrad_test.py b/tensorflow/compiler/tests/proximal_adagrad_test.py index 1993d4ecb19..cfe0b16bee3 100644 --- a/tensorflow/compiler/tests/proximal_adagrad_test.py +++ b/tensorflow/compiler/tests/proximal_adagrad_test.py @@ -43,7 +43,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -74,7 +74,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -98,7 +98,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -122,7 +122,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase): l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -141,7 +141,7 @@ class ProximalAdagradOptimizerTest(xla_test.XLATestCase): grads1 = constant_op.constant([0.01, 0.02]) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) diff --git a/tensorflow/compiler/tests/proximal_gradient_descent_test.py b/tensorflow/compiler/tests/proximal_gradient_descent_test.py index ce97fd1a5ba..779063ef00b 100644 --- a/tensorflow/compiler/tests/proximal_gradient_descent_test.py +++ b/tensorflow/compiler/tests/proximal_gradient_descent_test.py @@ -40,7 +40,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase): opt = proximal_gradient_descent.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([0.0, 0.0], self.evaluate(var0)) self.assertAllClose([0.0, 0.0], self.evaluate(var1)) @@ -62,7 +62,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase): opt = proximal_gradient_descent.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -84,7 +84,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase): opt = proximal_gradient_descent.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -106,7 +106,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase): opt = proximal_gradient_descent.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([4.0, 3.0], self.evaluate(var1)) @@ -125,7 +125,7 @@ class ProximalGradientDescentOptimizerTest(xla_test.XLATestCase): grads1 = constant_op.constant([0.01, 0.02]) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) diff --git a/tensorflow/compiler/tests/tensor_array_ops_test.py b/tensorflow/compiler/tests/tensor_array_ops_test.py index 1175983090f..ae297a6b7be 100644 --- a/tensorflow/compiler/tests/tensor_array_ops_test.py +++ b/tensorflow/compiler/tests/tensor_array_ops_test.py @@ -801,7 +801,7 @@ class TensorArrayTest(xla_test.XLATestCase): # state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0] # var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0] - # variables.global_variables_initializer().run() + # self.evaluate(variables.global_variables_initializer()) # state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = ( # self.evaluate([state0, var, v0, vout, v0_grad, var_grad, state0_grad]) # ) @@ -1150,7 +1150,7 @@ class TensorArrayTest(xla_test.XLATestCase): return [read0, read1, size0, size1, v0, v1] - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) read0_v, read1_v, size0_v, size1_v, v0, v1 = self.evaluate( xla.compile(fn)) diff --git a/tensorflow/python/distribute/parameter_server_strategy_test.py b/tensorflow/python/distribute/parameter_server_strategy_test.py index b4fb15800a1..c219aa734d9 100644 --- a/tensorflow/python/distribute/parameter_server_strategy_test.py +++ b/tensorflow/python/distribute/parameter_server_strategy_test.py @@ -213,7 +213,7 @@ class ParameterServerStrategyTestBase( self.assertNotEqual(f, None) if context.num_gpus() >= 1 and num_gpus <= 1: - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) y_val, z_val, f_val = sess.run([y, z, f]) self.assertEqual(y_val, 33.0) self.assertEqual(z_val, 43.0) @@ -259,7 +259,7 @@ class ParameterServerStrategyTestBase( x = d.extended.call_for_each_replica(model_fn) if context.num_gpus() >= 1: - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) x_val = sess.run(x) if num_gpus < 1: self.assertEqual(x_val, [13.0, 25.0]) @@ -363,7 +363,7 @@ class ParameterServerStrategyTestBase( self.assertNotEqual(f, None) if context.num_gpus() >= 1 and num_gpus <= 1: - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) y_val, z_val, f_val = sess.run([y, z, f]) self.assertEqual(y_val, 33.0) self.assertEqual(z_val, 43.0) @@ -408,7 +408,7 @@ class ParameterServerStrategyTestBase( train_op = d.group(train_op) if task_id == 0: - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Workers waiting for chief worker's initializing variables. self._init_condition.acquire() @@ -496,7 +496,7 @@ class ParameterServerStrategyTestBase( if (not task_type or multi_worker_util.is_chief( d.extended._cluster_spec, task_type, task_id)): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Workers waiting for chief worker's initializing variables. self._init_condition.acquire() diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py index 220eb06c9b6..26ae88e58c7 100644 --- a/tensorflow/python/framework/function_test.py +++ b/tensorflow/python/framework/function_test.py @@ -537,7 +537,7 @@ class FunctionTest(test.TestCase): z = Foo(v) with self.session(graph=g): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(z, 101.) @test_util.run_deprecated_v1 @@ -772,7 +772,7 @@ class FunctionTest(test.TestCase): z = Bar() with self.session(graph=g): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(y, [[12.0]]) self.assertAllEqual(z, [[1.0]]) diff --git a/tensorflow/python/keras/legacy_tf_layers/core_test.py b/tensorflow/python/keras/legacy_tf_layers/core_test.py index e912006c620..46fb4bef620 100644 --- a/tensorflow/python/keras/legacy_tf_layers/core_test.py +++ b/tensorflow/python/keras/legacy_tf_layers/core_test.py @@ -68,7 +68,7 @@ class DenseTest(test.TestCase, parameterized.TestCase): v = variable_scope.get_variable( 'X', initializer=init_ops.zeros_initializer(), shape=(1, 1)) x = core_layers.Dense(1)(v) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(x, [[0.0]]) @combinations.generate(combinations.combine(mode=['graph', 'eager'])) @@ -280,7 +280,7 @@ class DenseTest(test.TestCase, parameterized.TestCase): initializer=init_ops.ones_initializer()), self.cached_session(): inputs = random_ops.random_uniform((5, 3), seed=1) core_layers.dense(inputs, 2) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) weights = _get_variable_dict_from_varstore() self.assertEqual(len(weights), 2) # Check that the matrix weights got initialized to ones (from scope). @@ -445,7 +445,7 @@ class DropoutTest(test.TestCase, parameterized.TestCase): with self.cached_session(): inputs = array_ops.ones((5, 5)) dropped = core_layers.dropout(inputs, 0.5, training=True, seed=1) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) np_output = self.evaluate(dropped) self.assertAlmostEqual(0., np_output.min()) dropped = core_layers.dropout(inputs, 0.5, training=False, seed=1) diff --git a/tensorflow/python/keras/optimizer_v2/adam_test.py b/tensorflow/python/keras/optimizer_v2/adam_test.py index b706c984d77..ad53e89bd81 100644 --- a/tensorflow/python/keras/optimizer_v2/adam_test.py +++ b/tensorflow/python/keras/optimizer_v2/adam_test.py @@ -133,7 +133,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): constant_op.constant(grads1_np_indices), constant_op.constant([3])) opt = adam.Adam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0)) @@ -167,7 +167,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adam.Adam(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) minimize_op.run() def testSparseRepeatedIndices(self): @@ -192,7 +192,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adam.Adam().apply_gradients( [(grad_aggregated, aggregated_update_var)]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) for _ in range(3): @@ -459,7 +459,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): grads1 = constant_op.constant(grads1_np) opt = adam.Adam(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -499,7 +499,7 @@ class AdamOptimizerTest(test.TestCase, parameterized.TestCase): opt = adam.Adam() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) @@ -585,7 +585,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): constant_op.constant(grads1_np_indices), constant_op.constant([3])) opt = adam.NonFusedAdam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], self.evaluate(var0)) @@ -619,7 +619,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adam.NonFusedAdam(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) minimize_op.run() def testSparseRepeatedIndices(self): @@ -644,7 +644,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adam.NonFusedAdam().apply_gradients( [(grad_aggregated, aggregated_update_var)]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) for _ in range(3): @@ -915,7 +915,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): grads1 = constant_op.constant(grads1_np) opt = adam.NonFusedAdam(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -955,7 +955,7 @@ class NonFusedAdamOptimizerTest(test.TestCase, parameterized.TestCase): opt = adam.NonFusedAdam() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) beta_1_power, beta_2_power = get_beta_accumulators(opt, dtype) diff --git a/tensorflow/python/keras/optimizer_v2/adamax_test.py b/tensorflow/python/keras/optimizer_v2/adamax_test.py index 47a87e3644b..a78a9d2f443 100644 --- a/tensorflow/python/keras/optimizer_v2/adamax_test.py +++ b/tensorflow/python/keras/optimizer_v2/adamax_test.py @@ -103,7 +103,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): constant_op.constant(grads1_np_indices), constant_op.constant([3])) opt = adamax.Adamax() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0, 3.0], var0) @@ -137,7 +137,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): g_sum = lambda: math_ops.reduce_sum(array_ops.gather(var, indices)) # pylint: disable=cell-var-from-loop optimizer = adamax.Adamax(3.0) minimize_op = optimizer.minimize(g_sum, var_list=[var]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) minimize_op.run() def testSparseRepeatedIndices(self): @@ -162,7 +162,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adamax.Adamax().apply_gradients( [(grad_aggregated, aggregated_update_var)]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose(aggregated_update_var, repeated_index_update_var.eval()) for _ in range(3): @@ -289,7 +289,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): grads1 = constant_op.constant(grads1_np) opt = adamax.Adamax(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0) @@ -327,7 +327,7 @@ class AdamaxOptimizerTest(test.TestCase, parameterized.TestCase): opt = adamax.Adamax() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) beta1_power = get_beta_accumulators(opt, dtype) diff --git a/tensorflow/python/keras/optimizer_v2/ftrl_test.py b/tensorflow/python/keras/optimizer_v2/ftrl_test.py index 1eae42c07c1..9b17c0013e1 100644 --- a/tensorflow/python/keras/optimizer_v2/ftrl_test.py +++ b/tensorflow/python/keras/optimizer_v2/ftrl_test.py @@ -52,7 +52,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) @@ -89,7 +89,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -116,7 +116,7 @@ class FtrlOptimizerTest(test.TestCase): return pred * pred sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd @@ -141,7 +141,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -171,7 +171,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -209,7 +209,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -246,7 +246,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val) @@ -283,7 +283,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=2.0) update0 = opt0.apply_gradients([(grads0, var0)]) update1 = opt1.apply_gradients([(grads1, var1)]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -320,7 +320,7 @@ class FtrlOptimizerTest(test.TestCase): grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) if is_sparse: diff --git a/tensorflow/python/keras/optimizer_v2/nadam_test.py b/tensorflow/python/keras/optimizer_v2/nadam_test.py index 43b2f6e031e..cdcd98309d3 100644 --- a/tensorflow/python/keras/optimizer_v2/nadam_test.py +++ b/tensorflow/python/keras/optimizer_v2/nadam_test.py @@ -96,7 +96,7 @@ class NadamOptimizerTest(test.TestCase): constant_op.constant(grads1_np_indices), constant_op.constant([3])) opt = nadam.Nadam(epsilon=sparse_epsilon) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 1.0, 2.0], var0) @@ -137,7 +137,7 @@ class NadamOptimizerTest(test.TestCase): grads1 = constant_op.constant(grads1_np) opt = nadam.Nadam() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0) diff --git a/tensorflow/python/kernel_tests/array_ops_test.py b/tensorflow/python/kernel_tests/array_ops_test.py index 92b5490936e..994a6a6cd9b 100644 --- a/tensorflow/python/kernel_tests/array_ops_test.py +++ b/tensorflow/python/kernel_tests/array_ops_test.py @@ -1067,7 +1067,7 @@ class StridedSliceBenchmark(test_lib.Benchmark): """Benchmark new strided slice operation on non-trivial case.""" def run_and_time(self, slice_op): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) for _ in range(10): _ = slice_op.eval() iters = 1000 diff --git a/tensorflow/python/kernel_tests/batch_matmul_op_test.py b/tensorflow/python/kernel_tests/batch_matmul_op_test.py index 0c8c62d3118..30b61027813 100644 --- a/tensorflow/python/kernel_tests/batch_matmul_op_test.py +++ b/tensorflow/python/kernel_tests/batch_matmul_op_test.py @@ -235,7 +235,7 @@ class BatchMatMulBenchmark(test.Benchmark): GetRandomNormalInput(a_shape, np.float32)) matrix_b = variables.Variable( GetRandomNormalInput(b_shape, np.float32)) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Use batch matmul op's internal broadcasting. self.run_op_benchmark( diff --git a/tensorflow/python/kernel_tests/cast_op_test.py b/tensorflow/python/kernel_tests/cast_op_test.py index e9be8e7d5f7..4e70d2ee0e1 100644 --- a/tensorflow/python/kernel_tests/cast_op_test.py +++ b/tensorflow/python/kernel_tests/cast_op_test.py @@ -198,7 +198,7 @@ class CastOpTest(test.TestCase): x = variables.Variable(5, dtype=dtypes.float32) y = variables.Variable(True, dtype=dtypes.bool) cast = math_ops.cast(y, x.dtype) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertEqual(1.0, self.evaluate(cast)) @test_util.run_deprecated_v1 diff --git a/tensorflow/python/kernel_tests/cholesky_op_test.py b/tensorflow/python/kernel_tests/cholesky_op_test.py index b748a8ec864..a9afca8bfe7 100644 --- a/tensorflow/python/kernel_tests/cholesky_op_test.py +++ b/tensorflow/python/kernel_tests/cholesky_op_test.py @@ -330,7 +330,7 @@ class CholeskyBenchmark(test.Benchmark): ops.device("/cpu:0"): matrix = variables.Variable(self._GenerateMatrix(shape)) l = linalg_ops.cholesky(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group( @@ -344,7 +344,7 @@ class CholeskyBenchmark(test.Benchmark): ops.device("/device:GPU:0"): matrix = variables.Variable(self._GenerateMatrix(shape)) l = linalg_ops.cholesky(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group( @@ -364,7 +364,7 @@ class CholeskyBenchmark(test.Benchmark): grad_matrix = variables.Variable( np.random.randn(*matrix.shape).astype(np.float32)) grad = grad_fn(l, grad_matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group( diff --git a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py index b03020c3bf5..f0e37dfe6a2 100644 --- a/tensorflow/python/kernel_tests/control_flow_ops_py_test.py +++ b/tensorflow/python/kernel_tests/control_flow_ops_py_test.py @@ -4959,7 +4959,7 @@ class WhileOpBenchmark(test.Benchmark): with session.Session() as sess, ops.device(default_device): # Get the initial id i, input x, and kernel. i, x, kernel = self._getInitVariables() - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) if static_unroll: for _ in xrange(steps): diff --git a/tensorflow/python/kernel_tests/conv_ops_test.py b/tensorflow/python/kernel_tests/conv_ops_test.py index 73804e6731a..5c7ef34cad8 100644 --- a/tensorflow/python/kernel_tests/conv_ops_test.py +++ b/tensorflow/python/kernel_tests/conv_ops_test.py @@ -2943,7 +2943,7 @@ class Conv2DBenchmark(test.Benchmark): x = convolutional.conv2d(x, num_outputs, [1, kernel_w]) outputs = x - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) num_iterations = 4 for iter_index in xrange(num_iterations): start = time.time() @@ -2959,7 +2959,7 @@ class Conv2DBenchmark(test.Benchmark): config.graph_options.rewrite_options.dependency_optimization = ( rewriter_config_pb2.RewriterConfig.OFF) with session_lib.Session(config=config) as session: - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( session, op, burn_iters=burn_iters, min_iters=num_iters, name=name) diff --git a/tensorflow/python/kernel_tests/determinant_op_test.py b/tensorflow/python/kernel_tests/determinant_op_test.py index dbfda385ed2..4eb2be0a23d 100644 --- a/tensorflow/python/kernel_tests/determinant_op_test.py +++ b/tensorflow/python/kernel_tests/determinant_op_test.py @@ -194,7 +194,7 @@ class MatrixDeterminantBenchmark(test.Benchmark): config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"): matrix = self._GenerateMatrix(shape) d = linalg_ops.matrix_determinant(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group( @@ -207,7 +207,7 @@ class MatrixDeterminantBenchmark(test.Benchmark): config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"): matrix = self._GenerateMatrix(shape) d = linalg_ops.matrix_determinant(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group( diff --git a/tensorflow/python/kernel_tests/distributions/normal_test.py b/tensorflow/python/kernel_tests/distributions/normal_test.py index f2a193e69bd..5d4688be87b 100644 --- a/tensorflow/python/kernel_tests/distributions/normal_test.py +++ b/tensorflow/python/kernel_tests/distributions/normal_test.py @@ -258,7 +258,7 @@ class NormalTest(test.TestCase): value = func(x) grads = gradients_impl.gradients(value, [mu, sigma]) with self.session(graph=g): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllFinite(value) self.assertAllFinite(grads[0]) self.assertAllFinite(grads[1]) @@ -381,7 +381,7 @@ class NormalTest(test.TestCase): value = dist.quantile(p) grads = gradients_impl.gradients(value, [mu, p]) with self.cached_session(graph=g): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllFinite(grads[0]) self.assertAllFinite(grads[1]) diff --git a/tensorflow/python/kernel_tests/einsum_op_test.py b/tensorflow/python/kernel_tests/einsum_op_test.py index 47d5d457193..10b96716580 100644 --- a/tensorflow/python/kernel_tests/einsum_op_test.py +++ b/tensorflow/python/kernel_tests/einsum_op_test.py @@ -435,7 +435,7 @@ class EinsumBenchmark(test.Benchmark): input_shape = (dim,) * len(subscript) input_vars.append( variables.Variable(np.array(r.randn(*input_shape), np.float32))) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Call einsum_v1. self.run_op_benchmark( diff --git a/tensorflow/python/kernel_tests/embedding_ops_test.py b/tensorflow/python/kernel_tests/embedding_ops_test.py index dba5dbb964e..be8ff5f7d08 100644 --- a/tensorflow/python/kernel_tests/embedding_ops_test.py +++ b/tensorflow/python/kernel_tests/embedding_ops_test.py @@ -76,7 +76,7 @@ class ScatterAddSubTest(test.TestCase): ind = constant_op.constant(indices, dtype=dtypes.int32) p2 = scatter_op(p, ind, vals, name="updated_p") # p = init - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # p += vals result = self.evaluate(p2) # Compute the expected 'p' using numpy operations. @@ -302,7 +302,7 @@ class EmbeddingLookupTest(test.TestCase): ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) print("Construct ids", ids.get_shape()) embedding = embedding_ops.embedding_lookup(p_variable, ids) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) params_values = [params[p_i.name] for p_i in p] # Test that the PartitionedVariable components equal the list in p p_var_val = self.evaluate(list(p_variable)) @@ -325,7 +325,7 @@ class EmbeddingLookupTest(test.TestCase): ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) print("Construct ids", ids.get_shape()) embedding = embedding_ops.embedding_lookup(p_variable, ids) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) params_values = [params[p_i.name] for p_i in p] # Test that the PartitionedVariable components equal the list in p p_var_val = self.evaluate(list(p_variable)) @@ -425,7 +425,7 @@ class EmbeddingLookupTest(test.TestCase): # will test that aspect. id_vals = np.random.randint(vocab_size, size=num_vals) ids = constant_op.constant(list(id_vals), dtype=dtypes.int32) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) embedding = embedding_ops.embedding_lookup( p_variable, ids, partition_strategy="div") tf_result = embedding.eval(feed_dict=feed_dict) diff --git a/tensorflow/python/kernel_tests/gather_nd_op_test.py b/tensorflow/python/kernel_tests/gather_nd_op_test.py index 2a4962f6699..026683d595b 100644 --- a/tensorflow/python/kernel_tests/gather_nd_op_test.py +++ b/tensorflow/python/kernel_tests/gather_nd_op_test.py @@ -396,7 +396,7 @@ class GatherNdOpBenchmark(test.Benchmark): t_params = variables.Variable(params) t_indices = variables.Variable(indices) gather_op = array_ops.gather_nd(t_params, t_indices) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) for _ in range(10): self.evaluate(gather_op) t1 = time.time() diff --git a/tensorflow/python/kernel_tests/init_ops_test.py b/tensorflow/python/kernel_tests/init_ops_test.py index 35be40570a0..393d2cdfdcd 100644 --- a/tensorflow/python/kernel_tests/init_ops_test.py +++ b/tensorflow/python/kernel_tests/init_ops_test.py @@ -373,7 +373,7 @@ class UniformUnitScalingInitializationTest(test.TestCase): "x", shape=shape, initializer=init_ops.uniform_unit_scaling_initializer()) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllEqual(shape, self.evaluate(x).shape) @test_util.run_deprecated_v1 @@ -1347,7 +1347,7 @@ class IdentityInitializerTest(test.TestCase): with variable_scope.variable_scope( "foo", partitioner=partitioner, initializer=init): v = array_ops.identity(variable_scope.get_variable("bar", shape=shape)) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose(v, np.eye(*shape)) diff --git a/tensorflow/python/kernel_tests/logging_ops_test.py b/tensorflow/python/kernel_tests/logging_ops_test.py index 5beb785ac2b..b3c30f07d14 100644 --- a/tensorflow/python/kernel_tests/logging_ops_test.py +++ b/tensorflow/python/kernel_tests/logging_ops_test.py @@ -126,7 +126,7 @@ class PrintV2Test(test.TestCase): def testPrintOneVariable(self): var = variables.Variable(math_ops.range(10)) if not context.executing_eagerly(): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) with self.captureWritesToStream(sys.stderr) as printed: print_op = logging_ops.print_v2(var) self.evaluate(print_op) @@ -138,7 +138,7 @@ class PrintV2Test(test.TestCase): plus_one = var_one.assign_add(1.0) var_two = variables.Variable(math_ops.range(10)) if not context.executing_eagerly(): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) with self.captureWritesToStream(sys.stderr) as printed: self.evaluate(plus_one) print_op = logging_ops.print_v2(var_one, {"second": var_two}) diff --git a/tensorflow/python/kernel_tests/lu_op_test.py b/tensorflow/python/kernel_tests/lu_op_test.py index de9d8c32cb5..fee6aecb3b0 100644 --- a/tensorflow/python/kernel_tests/lu_op_test.py +++ b/tensorflow/python/kernel_tests/lu_op_test.py @@ -268,7 +268,7 @@ class LuBenchmark(test.Benchmark): ops.device("/cpu:0"): matrix = variables.Variable(self._GenerateMatrix(shape)) lu, p = linalg_ops.lu(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(lu, p), @@ -281,7 +281,7 @@ class LuBenchmark(test.Benchmark): ops.device("/device:GPU:0"): matrix = variables.Variable(self._GenerateMatrix(shape)) lu, p = linalg_ops.lu(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(lu, p), diff --git a/tensorflow/python/kernel_tests/matrix_band_part_op_test.py b/tensorflow/python/kernel_tests/matrix_band_part_op_test.py index 25b502cf814..db537f0cb86 100644 --- a/tensorflow/python/kernel_tests/matrix_band_part_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_band_part_op_test.py @@ -117,7 +117,7 @@ class MatrixBandPartBenchmark(test_lib.Benchmark): ops.device("/cpu:0"): matrix = variables.Variable(array_ops.ones(shape_)) band = array_ops.matrix_band_part(matrix, limits[0], limits[1]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(band), @@ -131,7 +131,7 @@ class MatrixBandPartBenchmark(test_lib.Benchmark): ops.device("/gpu:0"): matrix = variables.Variable(array_ops.ones(shape_)) band = array_ops.matrix_band_part(matrix, limits[0], limits[1]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(band), diff --git a/tensorflow/python/kernel_tests/matrix_exponential_op_test.py b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py index 7f6ea9b434c..149ce0d631e 100644 --- a/tensorflow/python/kernel_tests/matrix_exponential_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_exponential_op_test.py @@ -192,7 +192,7 @@ class MatrixExponentialBenchmark(test.Benchmark): ops.device("/cpu:0"): matrix = self._GenerateMatrix(shape) expm = linalg_impl.matrix_exponential(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(expm), @@ -205,7 +205,7 @@ class MatrixExponentialBenchmark(test.Benchmark): ops.device("/gpu:0"): matrix = self._GenerateMatrix(shape) expm = linalg_impl.matrix_exponential(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(expm), diff --git a/tensorflow/python/kernel_tests/matrix_inverse_op_test.py b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py index 244e95eefa2..ffe0f595618 100644 --- a/tensorflow/python/kernel_tests/matrix_inverse_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_inverse_op_test.py @@ -186,7 +186,7 @@ class MatrixInverseBenchmark(test.Benchmark): ops.device("/cpu:0"): matrix = self._GenerateMatrix(shape) inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(inv), @@ -200,7 +200,7 @@ class MatrixInverseBenchmark(test.Benchmark): ops.device("/gpu:0"): matrix = self._GenerateMatrix(shape) inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(inv), diff --git a/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py b/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py index 8cc230d2806..5004a9c5588 100644 --- a/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_logarithm_op_test.py @@ -177,7 +177,7 @@ class MatrixLogarithmBenchmark(test.Benchmark): ops.device("/cpu:0"): matrix = self._GenerateMatrix(shape) logm = gen_linalg_ops.matrix_logarithm(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(logm), diff --git a/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py b/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py index 889ea0dbd6c..d2e9c7c737b 100644 --- a/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_solve_ls_op_test.py @@ -335,7 +335,7 @@ class MatrixSolveLsBenchmark(test_lib.Benchmark): ops.device("/cpu:0"): matrix, rhs = _GenerateTestData(matrix_shape, num_rhs) x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(x), @@ -350,7 +350,7 @@ class MatrixSolveLsBenchmark(test_lib.Benchmark): ops.device("/gpu:0"): matrix, rhs = _GenerateTestData(matrix_shape, num_rhs) x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(x), diff --git a/tensorflow/python/kernel_tests/matrix_solve_op_test.py b/tensorflow/python/kernel_tests/matrix_solve_op_test.py index bbd909c8e58..209e60417da 100644 --- a/tensorflow/python/kernel_tests/matrix_solve_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_solve_op_test.py @@ -183,7 +183,7 @@ class MatrixSolveBenchmark(test.Benchmark): ops.device("/cpu:0"): matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs) x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(x), @@ -201,7 +201,7 @@ class MatrixSolveBenchmark(test.Benchmark): ops.device("/gpu:0"): matrix, rhs = self._GenerateTestData(matrix_shape, num_rhs) x = linalg_ops.matrix_solve(matrix, rhs, adjoint=adjoint) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(x), diff --git a/tensorflow/python/kernel_tests/qr_op_test.py b/tensorflow/python/kernel_tests/qr_op_test.py index 2effb832bda..31d538f8b27 100644 --- a/tensorflow/python/kernel_tests/qr_op_test.py +++ b/tensorflow/python/kernel_tests/qr_op_test.py @@ -237,7 +237,7 @@ class QRBenchmark(test.Benchmark): low=-1.0, high=1.0, size=shape_).astype(np.float32) matrix = variables.Variable(matrix_value) q, r = linalg_ops.qr(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(q, r), @@ -252,7 +252,7 @@ class QRBenchmark(test.Benchmark): low=-1.0, high=1.0, size=shape_).astype(np.float32) matrix = variables.Variable(matrix_value) q, r = linalg_ops.qr(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(q, r), diff --git a/tensorflow/python/kernel_tests/reduction_ops_test.py b/tensorflow/python/kernel_tests/reduction_ops_test.py index 757944661aa..c0ad81f3055 100644 --- a/tensorflow/python/kernel_tests/reduction_ops_test.py +++ b/tensorflow/python/kernel_tests/reduction_ops_test.py @@ -223,7 +223,7 @@ class SumReductionTest(BaseReductionTest): with self.session(graph=ops.Graph(), use_gpu=True) as sess: tf_arr = variables.Variable(arr) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) tf_mean = math_ops.reduce_mean(tf_arr, 0, False) tf_out_mean = self.evaluate(tf_mean) self.assertAllClose(tf_out_mean, 1.) diff --git a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py index 5d17b61cb06..9c0e0e38b6a 100644 --- a/tensorflow/python/kernel_tests/segment_reduction_ops_test.py +++ b/tensorflow/python/kernel_tests/segment_reduction_ops_test.py @@ -969,7 +969,7 @@ class SegmentReductionOpBenchmark(test.Benchmark): vc = variables.Variable(const.astype(dtype)) name, op = op_functor(vc, vs, seg_ids) with session.Session() as sess: - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) r = self.run_op_benchmark( sess, op, diff --git a/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py b/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py index 6039ff1afa7..b55b29ffd8e 100644 --- a/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py +++ b/tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py @@ -217,7 +217,7 @@ class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark): st_serialized, dtype=values.dtype) st_deserialized_op = st_deserialized.values.op - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) st_roundtrip_values = self.evaluate(st_roundtrip) st_deserialized_values = self.evaluate(st_deserialized) diff --git a/tensorflow/python/kernel_tests/string_format_op_test.py b/tensorflow/python/kernel_tests/string_format_op_test.py index 52379cc2c8d..adb8ad6e677 100644 --- a/tensorflow/python/kernel_tests/string_format_op_test.py +++ b/tensorflow/python/kernel_tests/string_format_op_test.py @@ -54,7 +54,7 @@ class StringFormatOpTest(test.TestCase): var = variables.Variable(3.34) format_output = string_ops.string_format("{}", [var]) if not context.executing_eagerly(): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) out = self.evaluate(format_output) expected = "3.34" self.assertEqual(compat.as_text(out), expected) @@ -65,7 +65,7 @@ class StringFormatOpTest(test.TestCase): var = variables.Variable(math_ops.range(10)) format_output = string_ops.string_format("{}", [var]) if not context.executing_eagerly(): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) out = self.evaluate(format_output) expected = "[0 1 2 ... 7 8 9]" self.assertEqual(compat.as_text(out), expected) @@ -78,7 +78,7 @@ class StringFormatOpTest(test.TestCase): var_two = variables.Variable(math_ops.range(10)) format_output = string_ops.string_format("{}, {}", [var_one, var_two]) if not context.executing_eagerly(): - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.evaluate(plus_one) out = self.evaluate(format_output) expected = "3.14, [0 1 2 ... 7 8 9]" diff --git a/tensorflow/python/kernel_tests/svd_op_test.py b/tensorflow/python/kernel_tests/svd_op_test.py index c8180df2d07..8bbfc517857 100644 --- a/tensorflow/python/kernel_tests/svd_op_test.py +++ b/tensorflow/python/kernel_tests/svd_op_test.py @@ -343,7 +343,7 @@ class SVDBenchmark(test.Benchmark): low=-1.0, high=1.0, size=shape_).astype(np.float32) matrix = variables.Variable(matrix_value) u, s, v = linalg_ops.svd(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(u, s, v), @@ -358,7 +358,7 @@ class SVDBenchmark(test.Benchmark): low=-1.0, high=1.0, size=shape_).astype(np.float32) matrix = variables.Variable(matrix_value) u, s, v = linalg_ops.svd(matrix) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(u, s, v), diff --git a/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py b/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py index c7b4a8689e2..456f13e86a7 100644 --- a/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py +++ b/tensorflow/python/kernel_tests/tridiagonal_matmul_op_test.py @@ -221,7 +221,7 @@ class TridiagonalMulOpTest(test.TestCase): vec, diagonals_format='sequence') - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(x1), diff --git a/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py b/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py index afc327e2aef..3045461ab4d 100644 --- a/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py +++ b/tensorflow/python/kernel_tests/tridiagonal_solve_op_test.py @@ -708,7 +708,7 @@ class TridiagonalSolveOpTest(test.TestCase): return x = linalg_impl.tridiagonal_solve( diags, rhs, partial_pivoting=pivoting) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(x), diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py index b5bcc2c3099..c6433db610d 100644 --- a/tensorflow/python/ops/nn_test.py +++ b/tensorflow/python/ops/nn_test.py @@ -626,7 +626,7 @@ class ComputeSampledLogitsTest(test_lib.TestCase): partitioner=partitioned_variables.fixed_size_partitioner(num_shards), initializer=constant_op.constant(biases)) with self.session(graph=g) as sess: - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) return self.evaluate([list(sharded_weights), list(sharded_biases)]) def testShapes(self): diff --git a/tensorflow/python/ops/special_math_ops_test.py b/tensorflow/python/ops/special_math_ops_test.py index 437997c9ce0..623f5063c7d 100644 --- a/tensorflow/python/ops/special_math_ops_test.py +++ b/tensorflow/python/ops/special_math_ops_test.py @@ -1115,7 +1115,7 @@ class EinsumBenchmark(test.Benchmark): input_shape = (dim,) * len(subscript) input_vars.append( variables.Variable(np.array(r.randn(*input_shape), np.float32))) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) if len(input_vars) <= 2: self.run_op_benchmark( diff --git a/tensorflow/python/training/adadelta_test.py b/tensorflow/python/training/adadelta_test.py index 9b84a2efb65..939e9a7ff1c 100644 --- a/tensorflow/python/training/adadelta_test.py +++ b/tensorflow/python/training/adadelta_test.py @@ -176,7 +176,7 @@ class AdadeltaOptimizerTest(test.TestCase): loss = pred * pred sgd_op = adadelta.AdadeltaOptimizer( 1.0, 1.0, 1.0).minimize(loss) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd diff --git a/tensorflow/python/training/adagrad_da_test.py b/tensorflow/python/training/adagrad_da_test.py index 0730618e31f..dd40bf58bb1 100644 --- a/tensorflow/python/training/adagrad_da_test.py +++ b/tensorflow/python/training/adagrad_da_test.py @@ -53,7 +53,7 @@ class AdagradDAOptimizerTest(test.TestCase): l2_regularization_strength=0.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) @@ -94,7 +94,7 @@ class AdagradDAOptimizerTest(test.TestCase): loss = pred * pred sgd_op = adagrad_da.AdagradDAOptimizer( 1.0, global_step).minimize(loss) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd @@ -122,7 +122,7 @@ class AdagradDAOptimizerTest(test.TestCase): l2_regularization_strength=0.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -155,7 +155,7 @@ class AdagradDAOptimizerTest(test.TestCase): l2_regularization_strength=0.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -188,7 +188,7 @@ class AdagradDAOptimizerTest(test.TestCase): l2_regularization_strength=2.0) update = opt.apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) diff --git a/tensorflow/python/training/adam_test.py b/tensorflow/python/training/adam_test.py index 4142e61e356..a29b986c033 100644 --- a/tensorflow/python/training/adam_test.py +++ b/tensorflow/python/training/adam_test.py @@ -80,7 +80,7 @@ class AdamOptimizerTest(test.TestCase): constant_op.constant(grads1_np_indices), constant_op.constant([2])) opt = adam.AdamOptimizer() update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -121,7 +121,7 @@ class AdamOptimizerTest(test.TestCase): gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices)) optimizer = adam.AdamOptimizer(3.0) minimize_op = optimizer.minimize(gathered_sum) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) minimize_op.run() @test_util.run_deprecated_v1 @@ -146,7 +146,7 @@ class AdamOptimizerTest(test.TestCase): [(grad_repeated_index, repeated_index_update_var)]) aggregated_update = adam.AdamOptimizer().apply_gradients( [(grad_aggregated, aggregated_update_var)]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertAllClose(aggregated_update_var, self.evaluate(repeated_index_update_var)) for _ in range(3): @@ -268,7 +268,7 @@ class AdamOptimizerTest(test.TestCase): grads1 = constant_op.constant(grads1_np) opt = adam.AdamOptimizer(constant_op.constant(0.001)) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) @@ -308,7 +308,7 @@ class AdamOptimizerTest(test.TestCase): opt = adam.AdamOptimizer() update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) beta1_power, beta2_power = opt._get_beta_accumulators() diff --git a/tensorflow/python/training/checkpoint_management_test.py b/tensorflow/python/training/checkpoint_management_test.py index f8c45306168..0036255d1f9 100644 --- a/tensorflow/python/training/checkpoint_management_test.py +++ b/tensorflow/python/training/checkpoint_management_test.py @@ -76,7 +76,7 @@ class LatestCheckpointWithRelativePaths(test.TestCase): with self.cached_session() as sess: unused_a = variables.Variable(0.0) # So that Saver saves something. - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Should fail. saver = saver_module.Saver(sharded=False) @@ -123,7 +123,7 @@ class LatestCheckpointWithRelativePaths(test.TestCase): save = saver_module.Saver({"v0": v0}) # Record a short training history. - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) save.save(sess, filepath, global_step=0) self.evaluate(inc) save.save(sess, filepath, global_step=1) @@ -136,7 +136,7 @@ class LatestCheckpointWithRelativePaths(test.TestCase): # Create a new saver. save = saver_module.Saver({"v0": v0}) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Get the most recent checkpoint name from the training history file. name = checkpoint_management.latest_checkpoint(traindir) @@ -278,7 +278,7 @@ class SaverUtilsTest(test.TestCase): for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1): with self.session(graph=ops_lib.Graph()) as sess: unused_v = variables.Variable(1.0, name="v") - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) saver = saver_module.Saver(sharded=sharded, write_version=version) path = os.path.join(self._base_dir, "%s-%s" % (sharded, version)) @@ -297,7 +297,7 @@ class SaverUtilsTest(test.TestCase): for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1): with self.session(graph=ops_lib.Graph()) as sess: unused_v = variables.Variable(1.0, name="v") - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) saver = saver_module.Saver(write_version=version) prefixes.append( saver.save(sess, os.path.join(self._base_dir, str(version)))) @@ -312,7 +312,7 @@ class SaverUtilsTest(test.TestCase): for version in (saver_pb2.SaverDef.V2, saver_pb2.SaverDef.V1): with self.session(graph=ops_lib.Graph()) as sess: unused_v = variables.Variable(1.0, name="v") - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) saver = saver_module.Saver(sharded=sharded, write_version=version) path = os.path.join(self._base_dir, "%s-%s" % (sharded, version)) diff --git a/tensorflow/python/training/ftrl_test.py b/tensorflow/python/training/ftrl_test.py index a10dbe5500c..f0cbe13e037 100644 --- a/tensorflow/python/training/ftrl_test.py +++ b/tensorflow/python/training/ftrl_test.py @@ -56,7 +56,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) @@ -94,7 +94,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -120,7 +120,7 @@ class FtrlOptimizerTest(test.TestCase): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) loss = pred * pred sgd_op = ftrl.FtrlOptimizer(1.0).minimize(loss) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd @@ -146,7 +146,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -177,7 +177,7 @@ class FtrlOptimizerTest(test.TestCase): l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -216,7 +216,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -254,7 +254,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val) @@ -292,7 +292,7 @@ class FtrlOptimizerTest(test.TestCase): l2_regularization_strength=2.0) update0 = opt0.apply_gradients([(grads0, var0)]) update1 = opt1.apply_gradients([(grads1, var1)]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) @@ -329,7 +329,7 @@ class FtrlOptimizerTest(test.TestCase): grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) sess = ops.get_default_session() v0_val, v1_val = self.evaluate([var0, var1]) diff --git a/tensorflow/python/training/gradient_descent_test.py b/tensorflow/python/training/gradient_descent_test.py index 5a6c5cfa747..2c7756e403f 100644 --- a/tensorflow/python/training/gradient_descent_test.py +++ b/tensorflow/python/training/gradient_descent_test.py @@ -47,7 +47,7 @@ class GradientDescentOptimizerTest(test.TestCase): optimizer = gradient_descent.GradientDescentOptimizer(3.0) sgd_op = optimizer.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1)) @@ -155,7 +155,7 @@ class GradientDescentOptimizerTest(test.TestCase): # doesn't work because the sessions and graph are reused across unit # tests and this would mean trying to reinitialize variables. Figure out # a long-term solution for this. - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0], self.evaluate(var1)) @@ -179,7 +179,7 @@ class GradientDescentOptimizerTest(test.TestCase): lrate = constant_op.constant(3.0) sgd_op = gradient_descent.GradientDescentOptimizer( lrate).apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1)) @@ -199,7 +199,7 @@ class GradientDescentOptimizerTest(test.TestCase): values = [1.0, 3.0] vars_ = [variables.Variable([v], dtype=dtype) for v in values] grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) for grad, _ in grads_and_vars: self.assertAllCloseAccordingToType([1.0], self.evaluate(grad)) @@ -214,7 +214,7 @@ class GradientDescentOptimizerTest(test.TestCase): grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients( zip([grads0, grads1], [var0, var1]), global_step=global_step) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0)) self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1)) @@ -245,7 +245,7 @@ class GradientDescentOptimizerTest(test.TestCase): constant_op.constant([2, 1])) sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0], [2.0]], self.evaluate(var0)) self.assertAllCloseAccordingToType([[3.0], [4.0]], self.evaluate(var1)) diff --git a/tensorflow/python/training/momentum_test.py b/tensorflow/python/training/momentum_test.py index b69c828f84b..6e47a2e5f2e 100644 --- a/tensorflow/python/training/momentum_test.py +++ b/tensorflow/python/training/momentum_test.py @@ -176,7 +176,7 @@ class MomentumOptimizerTest(test.TestCase): mom_op = momentum_lib.MomentumOptimizer( learning_rate=2.0, momentum=0.9, use_nesterov=True) opt_op = mom_op.minimize(cost, global_step, [var0, var1]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) for t in range(1, 5): opt_op.run() var0_np, accum0_np = self._update_nesterov_momentum_numpy( @@ -218,7 +218,7 @@ class MomentumOptimizerTest(test.TestCase): grads_and_vars = [(y_feed, var0), (constant_op.constant( [3.0, 3.0], dtype=dtype), var1)] opt_update = mom_op.apply_gradients(grads_and_vars) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) for t in range(1, 5): opt_update.run(feed_dict={x_feed: grads[t - 1]}) var0_np, accum0_np = self._update_nesterov_momentum_numpy( @@ -295,7 +295,7 @@ class MomentumOptimizerTest(test.TestCase): momentum=constant_op.constant(0.9)) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") @@ -452,7 +452,7 @@ class MomentumOptimizerTest(test.TestCase): grads0 = constant_op.constant([0.0] * num_samples) mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1) mom_update = mom_opt.apply_gradients(zip([grads0], [var0])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) for i in xrange(num_samples): mom_update.run(feed_dict={grads0: db_grad[i]}) self.assertAllClose(np.array(db_out[i]), self.evaluate(var0)) @@ -477,7 +477,7 @@ class MomentumOptimizerTest(test.TestCase): learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) @@ -553,7 +553,7 @@ class MomentumOptimizerTest(test.TestCase): zip([grads0, grads1], [var0, var1])) mom_update2 = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") diff --git a/tensorflow/python/training/optimizer_test.py b/tensorflow/python/training/optimizer_test.py index a0e07c5618f..80689085587 100644 --- a/tensorflow/python/training/optimizer_test.py +++ b/tensorflow/python/training/optimizer_test.py @@ -78,7 +78,7 @@ class OptimizerTest(test.TestCase): aggregation_method=gradients_util.AggregationMethod. EXPERIMENTAL_ACCUMULATE_N) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) @@ -102,7 +102,7 @@ class OptimizerTest(test.TestCase): opt_op = sgd_op.minimize( cost, global_step, [var0, var1], grad_loss=grad_loss) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) @@ -259,7 +259,7 @@ class OptimizerTest(test.TestCase): sgd_op = gradient_descent.GradientDescentOptimizer(3.0) opt_op = sgd_op.minimize(cost, global_step, [var0, var1]) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], self.evaluate(var0)) self.assertAllClose([3.0, 4.0], self.evaluate(var1)) diff --git a/tensorflow/python/training/proximal_adagrad_test.py b/tensorflow/python/training/proximal_adagrad_test.py index ea603d4d39c..7b8bd8f9805 100644 --- a/tensorflow/python/training/proximal_adagrad_test.py +++ b/tensorflow/python/training/proximal_adagrad_test.py @@ -47,7 +47,7 @@ class ProximalAdagradOptimizerTest(test.TestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) @@ -87,7 +87,7 @@ class ProximalAdagradOptimizerTest(test.TestCase): l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) @@ -109,7 +109,7 @@ class ProximalAdagradOptimizerTest(test.TestCase): pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) loss = pred * pred sgd_op = proximal_adagrad.ProximalAdagradOptimizer(1.0).minimize(loss) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd @@ -133,7 +133,7 @@ class ProximalAdagradOptimizerTest(test.TestCase): l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) @@ -160,7 +160,7 @@ class ProximalAdagradOptimizerTest(test.TestCase): l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) @@ -195,7 +195,7 @@ class ProximalAdagradOptimizerTest(test.TestCase): grads1 = constant_op.constant([0.01, 0.02]) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) sess = ops.get_default_session() v0_val, v1_val = self.evaluate([var0, var1]) diff --git a/tensorflow/python/training/proximal_gradient_descent_test.py b/tensorflow/python/training/proximal_gradient_descent_test.py index 25b206605dc..603807332ca 100644 --- a/tensorflow/python/training/proximal_gradient_descent_test.py +++ b/tensorflow/python/training/proximal_gradient_descent_test.py @@ -49,7 +49,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase): opt = proximal_gradient_descent.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) @@ -82,7 +82,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase): opt = proximal_gradient_descent.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) @@ -106,7 +106,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase): loss = pred * pred sgd_op = proximal_gradient_descent.ProximalGradientDescentOptimizer( 1.0).minimize(loss) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd @@ -127,7 +127,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase): opt = proximal_gradient_descent.ProximalGradientDescentOptimizer( 3.0, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) v0_val, v1_val = self.evaluate([var0, var1]) self.assertAllClose([1.0, 2.0], v0_val) @@ -162,7 +162,7 @@ class ProximalGradientDescentOptimizerTest(test.TestCase): grads1 = constant_op.constant([0.01, 0.02]) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) sess = ops.get_default_session() v0_val, v1_val = self.evaluate([var0, var1]) diff --git a/tensorflow/python/training/saver_large_partitioned_variable_test.py b/tensorflow/python/training/saver_large_partitioned_variable_test.py index 84458836d06..562b252a6b2 100644 --- a/tensorflow/python/training/saver_large_partitioned_variable_test.py +++ b/tensorflow/python/training/saver_large_partitioned_variable_test.py @@ -51,7 +51,7 @@ class SaverLargePartitionedVariableTest(test.TestCase): partitioner=partitioned_variables.fixed_size_partitioner(4), initializer=init, dtype=dtypes.bool)) - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) save = saver.Saver(partitioned_var) val = save.save(sess, save_path) self.assertEqual(save_path, val) diff --git a/tensorflow/python/training/slot_creator_test.py b/tensorflow/python/training/slot_creator_test.py index 80372c72d69..88192811c8b 100644 --- a/tensorflow/python/training/slot_creator_test.py +++ b/tensorflow/python/training/slot_creator_test.py @@ -147,7 +147,7 @@ class SlotCreatorTest(test.TestCase): slot = slot_creator.create_slot(v, s.initialized_value(), name="slot") si = slot._save_slice_info - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertEqual("var/part_%d/slot" % i, slot.op.name) self.assertEqual([2], slot.get_shape().as_list()) @@ -168,7 +168,7 @@ class SlotCreatorTest(test.TestCase): for i, v in enumerate(p_v): slot = slot_creator.create_slot(v, s.initialized_value(), name="slot") - variables.global_variables_initializer().run() + self.evaluate(variables.global_variables_initializer()) self.assertEqual("var/part_%d/slot" % i, slot.op.name) self.assertEqual([], slot.get_shape().as_list())