Replace x.initializer.run()
with self.evaluate(x.initializer)
in tests.
This change makes the line V2 compatible. sed -i "s/\([_a-z0-9]*\).initializer.run()/self.evaluate(\1.initializer)/" ./third_party/tensorflow/python/kernel_tests/*.py PiperOrigin-RevId: 324166739 Change-Id: Icf6213784063de295f0bf7b69a847e5f3820a752
This commit is contained in:
parent
98400b759b
commit
dd472f98f5
@ -800,7 +800,7 @@ class StridedSliceTest(test_util.TensorFlowTestCase):
|
||||
def testExpandVariable(self):
|
||||
with self.session(use_gpu=True):
|
||||
x = variables.Variable(7, dtype=dtypes.int32)
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
y = x[None].eval()
|
||||
self.assertEqual(y.shape, (1,))
|
||||
self.assertAllEqual(y, (7,))
|
||||
|
@ -147,7 +147,7 @@ class CondV2Test(test.TestCase):
|
||||
def testExternalControlDependencies(self):
|
||||
with ops.Graph().as_default(), self.test_session():
|
||||
v = variables.Variable(1.0)
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
op = v.assign_add(1.0)
|
||||
|
||||
def true_branch():
|
||||
|
@ -1505,7 +1505,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
||||
def testWhileExternalControlDependencies(self):
|
||||
with self.cached_session():
|
||||
v = variables.Variable(0.0)
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
increment = v.assign_add(1.0).read_value()
|
||||
|
||||
def body_fn(i):
|
||||
@ -1521,7 +1521,7 @@ class ControlFlowTest(test.TestCase, parameterized.TestCase):
|
||||
def testWhileExternalControlDependenciesNoInput(self):
|
||||
with self.cached_session():
|
||||
v = variables.Variable(0.0)
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
# TODO(apassos): figure out why the reading is necessary here.
|
||||
increment = v.assign_add(1.0).read_value()
|
||||
|
||||
|
@ -36,7 +36,7 @@ class AssignOpTest(test.TestCase):
|
||||
with self.cached_session(use_gpu=use_gpu):
|
||||
p = variables.Variable(x)
|
||||
assign = state_ops.assign(p, y)
|
||||
p.initializer.run()
|
||||
self.evaluate(p.initializer)
|
||||
new_value = self.evaluate(assign)
|
||||
return self.evaluate(p), new_value
|
||||
|
||||
@ -45,7 +45,7 @@ class AssignOpTest(test.TestCase):
|
||||
with self.cached_session(use_gpu=use_gpu):
|
||||
p = variables.Variable(x)
|
||||
add = state_ops.assign_add(p, y)
|
||||
p.initializer.run()
|
||||
self.evaluate(p.initializer)
|
||||
new_value = self.evaluate(add)
|
||||
return self.evaluate(p), new_value
|
||||
|
||||
@ -54,7 +54,7 @@ class AssignOpTest(test.TestCase):
|
||||
with self.cached_session(use_gpu=use_gpu):
|
||||
p = variables.Variable(x)
|
||||
sub = state_ops.assign_sub(p, y)
|
||||
p.initializer.run()
|
||||
self.evaluate(p.initializer)
|
||||
new_value = self.evaluate(sub)
|
||||
return self.evaluate(p), new_value
|
||||
|
||||
|
@ -808,7 +808,7 @@ class SafeEmbeddingLookupSparseTest(test.TestCase):
|
||||
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
|
||||
initializer=initializer))
|
||||
for w in embedding_weights:
|
||||
w.initializer.run()
|
||||
self.evaluate(w.initializer)
|
||||
embedding_weights = [w.eval() for w in embedding_weights]
|
||||
return embedding_weights
|
||||
|
||||
|
@ -116,7 +116,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.zeros_initializer())
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
self.assertAllEqual(x, np.zeros(shape))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -125,7 +125,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.ones_initializer())
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
self.assertAllEqual(x, np.ones(shape))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -134,7 +134,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
self.assertAllEqual(x, np.zeros(shape))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -143,7 +143,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
shape = [2, 3]
|
||||
x = variable_scope.get_variable(
|
||||
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
self.assertAllEqual(x, np.ones(shape))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -155,7 +155,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
shape=shape,
|
||||
dtype=dtypes.int32,
|
||||
initializer=init_ops.constant_initializer(7))
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
|
||||
self.assertAllEqual(x, 7 * np.ones(shape, dtype=np.int32))
|
||||
|
||||
@ -168,7 +168,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
shape=shape,
|
||||
dtype=dtypes.int32,
|
||||
initializer=init_ops.constant_initializer((10, 20, 30)))
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
|
||||
self.assertAllEqual(x, [10, 20, 30])
|
||||
|
||||
@ -176,7 +176,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
with self.cached_session(use_gpu=True):
|
||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
|
||||
actual = array_ops.reshape(x, [-1]).eval()
|
||||
self.assertEqual(len(actual), len(expected))
|
||||
@ -201,7 +201,7 @@ class ConstantInitializersTest(test.TestCase):
|
||||
with self.cached_session(use_gpu=True):
|
||||
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
|
||||
x = variable_scope.get_variable(name, shape=shape, initializer=init)
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
|
||||
actual = array_ops.reshape(x, [-1]).eval()
|
||||
self.assertGreater(len(actual), len(expected))
|
||||
@ -931,7 +931,7 @@ class ConvolutionDeltaOrthogonalInitializerTest(test.TestCase):
|
||||
"{}".format(i),
|
||||
shape=shape,
|
||||
initializer=init_ops.convolutional_delta_orthogonal)
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
y = self.evaluate(x)[1, 1, :, :]
|
||||
determinant = np.linalg.det(y)
|
||||
value += determinant
|
||||
@ -1000,7 +1000,7 @@ class ConvolutionOrthogonal1dInitializerTest(test.TestCase):
|
||||
"{}".format(i),
|
||||
shape=shape,
|
||||
initializer=init_ops.convolutional_orthogonal_1d)
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
y = np.sum(x.eval(), axis=0)
|
||||
determinant = np.linalg.det(y)
|
||||
value += determinant
|
||||
@ -1229,7 +1229,7 @@ class ConvolutionOrthogonal3dInitializerTest(test.TestCase):
|
||||
"{}".format(i),
|
||||
shape=shape,
|
||||
initializer=init_ops.convolutional_orthogonal_3d)
|
||||
x.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
y = np.sum(x.eval(), axis=(0, 1, 2))
|
||||
determinant = np.linalg.det(y)
|
||||
value += determinant
|
||||
|
@ -306,7 +306,7 @@ class StaticHashTableTest(BaseLookupTableTest):
|
||||
# Init the table in the second session and verify that we do not get a
|
||||
# "Table already initialized" error.
|
||||
with session2:
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
self.assertAllEqual(3, self.evaluate(table.size()))
|
||||
|
||||
@test_util.run_v2_only
|
||||
@ -812,7 +812,7 @@ class InitializeTableFromFileOpTest(BaseLookupTableTest):
|
||||
# Initialize with non existing file (old_file.txt) should fail.
|
||||
# TODO(yleon): Update message, which might change per FileSystem.
|
||||
with self.assertRaisesOpError("old_file.txt"):
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
# Initialize the model feeding the vocabulary file.
|
||||
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
|
||||
@ -2443,7 +2443,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
vocab_file, vocab_size=vocab_size), default_value),
|
||||
oov_buckets)
|
||||
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
|
||||
|
||||
@ -2466,7 +2466,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
oov_buckets,
|
||||
key_dtype=dtypes.int32)
|
||||
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
|
||||
|
||||
@ -2487,7 +2487,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
|
||||
default_value), oov_buckets)
|
||||
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
|
||||
|
||||
@ -2503,7 +2503,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
# Set a table that only uses hash buckets, for each input value returns
|
||||
# an id calculated by fingerprint("input") mod oov_buckets.
|
||||
table = lookup_ops.IdTableWithHashBuckets(None, oov_buckets)
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
values = constant_op.constant(("brain", "salad", "surgery"))
|
||||
|
||||
@ -2526,7 +2526,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
# an id calculated by fingerprint("input") mod oov_buckets.
|
||||
table = lookup_ops.IdTableWithHashBuckets(
|
||||
None, oov_buckets, key_dtype=dtypes.int32)
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
|
||||
|
||||
@ -2606,7 +2606,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
vocab_file, vocab_size=vocab_size), default_value),
|
||||
oov_buckets)
|
||||
|
||||
table1.initializer.run()
|
||||
self.evaluate(table1.initializer)
|
||||
|
||||
input_string_1 = constant_op.constant(
|
||||
["brain", "salad", "surgery", "UNK"])
|
||||
@ -2622,7 +2622,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
oov_buckets = 1
|
||||
|
||||
# Underlying lookup table already initialized in previous session.
|
||||
# No need to call table2.initializer.run()
|
||||
# No need to call self.evaluate(table2.initializer)
|
||||
table2 = lookup_ops.IdTableWithHashBuckets(
|
||||
lookup_ops.StaticHashTable(
|
||||
lookup_ops.TextFileIdTableInitializer(
|
||||
@ -2687,7 +2687,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
lookup_ops.StaticHashTable(
|
||||
lookup_ops.TextFileIdTableInitializer(vocab_file, vocab_size=3),
|
||||
-1), 1)
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
sp_ids = table.lookup(sp_features)
|
||||
|
||||
@ -2716,7 +2716,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
|
||||
1,
|
||||
key_dtype=dtypes.int32)
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
sp_ids = table.lookup(sp_features)
|
||||
|
||||
@ -2745,7 +2745,7 @@ class IdTableWithHashBucketsTest(test.TestCase):
|
||||
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1),
|
||||
1,
|
||||
key_dtype=dtypes.int64)
|
||||
table.initializer.run()
|
||||
self.evaluate(table.initializer)
|
||||
|
||||
sp_ids = table.lookup(sp_features)
|
||||
|
||||
|
@ -150,7 +150,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
np_scatter(new, indices, updates)
|
||||
# Scatter via tensorflow
|
||||
ref_var = variables.VariableV1(ref)
|
||||
ref_var.initializer.run()
|
||||
self.evaluate(ref_var.initializer)
|
||||
tf_scatter(ref_var, indices, updates).eval()
|
||||
|
||||
# Compare
|
||||
@ -276,7 +276,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
# update1 = tf.compat.v1.scatter_nd_update(
|
||||
# var, tf.constant(
|
||||
# [[0]], dtype=tf.int64), [False])
|
||||
# var.initializer.run()
|
||||
# self.evaluate(var.initializer)
|
||||
# session.run([update0, update1])
|
||||
# self.assertAllEqual([False, True], self.evaluate(var))
|
||||
|
||||
@ -292,7 +292,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
updates = np.array([-3, -4, -5]).astype(np.float32)
|
||||
with self.cached_session(use_gpu=False):
|
||||
ref = variables.VariableV1(params)
|
||||
ref.initializer.run()
|
||||
self.evaluate(ref.initializer)
|
||||
|
||||
# Indices all in range, no problem.
|
||||
indices = np.array([[2], [0], [5]])
|
||||
@ -325,7 +325,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
|
||||
dtype=dtypes.float32)
|
||||
with self.cached_session():
|
||||
res.initializer.run()
|
||||
self.evaluate(res.initializer)
|
||||
with self.assertRaisesOpError("Output must be at least 1-D"):
|
||||
state_ops.scatter_nd_update(res, [[0]], [0.22]).eval()
|
||||
|
||||
@ -340,7 +340,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
|
||||
expected_result = np.zeros([2, 2], dtype=np.int32)
|
||||
with self.cached_session():
|
||||
ref.initializer.run()
|
||||
self.evaluate(ref.initializer)
|
||||
self.assertAllEqual(expected_result, self.evaluate(scatter_update))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@ -397,7 +397,7 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
# We don't test the implementation; just test there's no failures.
|
||||
with self.cached_session(force_gpu=True):
|
||||
ref = variables.Variable(params)
|
||||
ref.initializer.run()
|
||||
self.evaluate(ref.initializer)
|
||||
|
||||
# Indices all in range, no problem.
|
||||
indices = np.array([2, 0, 5])
|
||||
|
@ -306,7 +306,7 @@ class AutomaticStackingTest(test.TestCase):
|
||||
with self.session(use_gpu=True):
|
||||
v = variables.Variable(17)
|
||||
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]],
|
||||
self.evaluate(result))
|
||||
|
||||
|
@ -240,7 +240,7 @@ class TopKBenchmark(test.Benchmark):
|
||||
v = resource_variable_ops.ResourceVariable(x)
|
||||
op = nn_ops.top_k(v, k)
|
||||
with session.Session() as sess:
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
|
||||
gb_processed_input = m * n / 1.0e9
|
||||
throughput = gb_processed_input / r["wall_time"]
|
||||
|
@ -656,14 +656,14 @@ class IsInitializedTest(test.TestCase):
|
||||
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
|
||||
self.evaluate(w.initializer)
|
||||
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
self.assertEqual(0, self.evaluate(uninited).size)
|
||||
|
||||
def testZeroSizeVarInitialized(self):
|
||||
with ops.Graph().as_default(), self.cached_session() as sess:
|
||||
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
|
||||
uninited = variables.report_uninitialized_variables()
|
||||
v.initializer.run() # not strictly necessary
|
||||
self.evaluate(v.initializer) # not strictly necessary
|
||||
self.assertEqual(0, self.evaluate(uninited).size)
|
||||
|
||||
def testTrainingWithZeroSizeVar(self):
|
||||
@ -707,7 +707,7 @@ class ObsoleteIsInitializedTest(test.TestCase):
|
||||
self.evaluate(w.initializer)
|
||||
with self.assertRaisesOpError("Attempting to use uninitialized value"):
|
||||
inited.op.run()
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
inited.op.run()
|
||||
|
||||
|
||||
|
@ -284,7 +284,7 @@ class WhereBenchmark(test.Benchmark):
|
||||
v = resource_variable_ops.ResourceVariable(x)
|
||||
op = array_ops.where(v)
|
||||
with session.Session(config=benchmark.benchmark_config()) as sess:
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
|
||||
gb_processed_input = m * n / 1.0e9
|
||||
# approximate size of output: m*n*p int64s for each axis.
|
||||
@ -310,9 +310,9 @@ class WhereBenchmark(test.Benchmark):
|
||||
c = resource_variable_ops.ResourceVariable(c_gen)
|
||||
op = array_ops.where(c, x, y)
|
||||
with session.Session(config=benchmark.benchmark_config()) as sess:
|
||||
x.initializer.run()
|
||||
y.initializer.run()
|
||||
c.initializer.run()
|
||||
self.evaluate(x.initializer)
|
||||
self.evaluate(y.initializer)
|
||||
self.evaluate(c.initializer)
|
||||
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
|
||||
# approximate size of output: m*n*2 floats for each axis.
|
||||
gb_processed = m * n * 8 / 1.0e9
|
||||
|
@ -174,7 +174,7 @@ class WhileV2Test(test.TestCase, parameterized.TestCase):
|
||||
def testExternalControlDependencies(self):
|
||||
with ops.Graph().as_default(), self.test_session():
|
||||
v = variables.Variable(1.)
|
||||
v.initializer.run()
|
||||
self.evaluate(v.initializer)
|
||||
op = v.assign_add(1.)
|
||||
|
||||
def body_fn(i): # pylint: disable=invalid-name
|
||||
|
Loading…
Reference in New Issue
Block a user