Apply tf1-tf2 renames to tensorflow/python/kernel_tests docstrings and comments.
No code changes, only doc-strings and comments. PiperOrigin-RevId: 244372113
This commit is contained in:
parent
b6e4bf24eb
commit
41d03e186c
@ -1058,8 +1058,8 @@ class StridedSliceAssignChecker(object):
|
||||
var = variables.Variable(self.x)
|
||||
sess.run(variables.variables_initializer([var]))
|
||||
val = sess.run(var[index].assign(value))
|
||||
# val_copy is used to check that tf.assign works equivalently to the
|
||||
# assign method above.
|
||||
# val_copy is used to check that tf.compat.v1.assign works equivalently
|
||||
# to the assign method above.
|
||||
val_copy = sess.run(state_ops.assign(var[index], value))
|
||||
valnp = np.copy(self.x_np)
|
||||
valnp[index] = np.array(value)
|
||||
|
@ -486,7 +486,7 @@ class ClipTest(test.TestCase):
|
||||
|
||||
def testClipByAverageNormReplacedWithClipByNorm(self):
|
||||
# Check clip_by_average_norm(t) is the same as
|
||||
# clip_by_norm(t, clip_norm * tf.to_float(tf.size(t)))
|
||||
# clip_by_norm(t, clip_norm * tf.compat.v1.to_float(tf.size(t)))
|
||||
with self.session(use_gpu=True):
|
||||
x = constant_op.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
|
||||
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
|
||||
|
@ -865,7 +865,7 @@ class PlaceholderTest(test.TestCase):
|
||||
# Load graph generated from earlier version of TF where
|
||||
# placeholder shape was not set.
|
||||
#
|
||||
# a = tf.placeholder(tf.float32)
|
||||
# a = tf.compat.v1.placeholder(tf.float32)
|
||||
# b = a + 1.0
|
||||
#
|
||||
# Older graph's default shape is 'shape {}', not 'shape {
|
||||
|
@ -295,8 +295,9 @@ class NdtrGradientTest(test.TestCase):
|
||||
# grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of
|
||||
# the ith output point w.r.t. the jth grid point. We only expect the
|
||||
# diagonal to be nonzero.
|
||||
# TODO(b/31131137): Replace tf.test.compute_gradient with our own custom
|
||||
# gradient evaluation to ensure we correctly handle small function delta.
|
||||
# TODO(b/31131137): Replace tf.compat.v1.test.compute_gradient with our
|
||||
# own custom gradient evaluation to ensure we correctly handle small
|
||||
# function delta.
|
||||
grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,
|
||||
fn(grid),
|
||||
grid_spec.shape)
|
||||
|
@ -118,7 +118,8 @@ class ExtractImagePatchesGradTest(test.TestCase):
|
||||
rates=[1, 1, 1, 1],
|
||||
padding='SAME')
|
||||
# Github issue: #20146
|
||||
# tf.extract_image_patches() gradient very slow at graph construction time
|
||||
# tf.image.extract_image_patches() gradient very slow at graph construction
|
||||
# time
|
||||
gradients = gradients_impl.gradients(patches, images)
|
||||
# Won't time out.
|
||||
self.assertIsNotNone(gradients)
|
||||
|
@ -272,7 +272,8 @@ class GatherTest(test.TestCase, parameterized.TestCase):
|
||||
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
|
||||
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
|
||||
|
||||
# batch_dims=indices.shape.ndims - 1 (equivalent to tf.batch_gather)
|
||||
# batch_dims=indices.shape.ndims - 1
|
||||
# (equivalent to tf.compat.v1.batch_gather)
|
||||
dict( # 2D indices (1 batch dim)
|
||||
batch_dims=1,
|
||||
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
|
||||
|
@ -39,7 +39,7 @@ class LinearOperatorIdentityTest(
|
||||
|
||||
@property
|
||||
def _dtypes_to_test(self):
|
||||
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in
|
||||
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in
|
||||
# 16bit.
|
||||
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
|
||||
|
||||
@ -80,7 +80,7 @@ class LinearOperatorIdentityTest(
|
||||
operator.assert_self_adjoint().run() # Should not fail
|
||||
|
||||
def test_float16_matmul(self):
|
||||
# float16 cannot be tested by base test class because tf.matrix_solve does
|
||||
# float16 cannot be tested by base test class because tf.linalg.solve does
|
||||
# not work with float16.
|
||||
with self.cached_session():
|
||||
operator = linalg_lib.LinearOperatorIdentity(
|
||||
@ -287,7 +287,7 @@ class LinearOperatorScaledIdentityTest(
|
||||
|
||||
@property
|
||||
def _dtypes_to_test(self):
|
||||
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in
|
||||
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in
|
||||
# 16bit.
|
||||
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
|
||||
|
||||
@ -374,7 +374,7 @@ class LinearOperatorScaledIdentityTest(
|
||||
operator.assert_self_adjoint().run()
|
||||
|
||||
def test_float16_matmul(self):
|
||||
# float16 cannot be tested by base test class because tf.matrix_solve does
|
||||
# float16 cannot be tested by base test class because tf.linalg.solve does
|
||||
# not work with float16.
|
||||
with self.cached_session():
|
||||
multiplier = rng.rand(3).astype(np.float16)
|
||||
|
@ -212,7 +212,7 @@ class ParseExampleTest(test.TestCase):
|
||||
"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)
|
||||
}
|
||||
},
|
||||
# TODO(mrry): Consider matching the `tf.parse_example()` error message.
|
||||
# TODO(mrry): Consider matching the `io.parse_example()` error message.
|
||||
expected_err=(errors_impl.OpError, "Key: a."))
|
||||
|
||||
def testDenseDefaultNoShapeShouldFail(self):
|
||||
@ -774,7 +774,7 @@ class ParseExampleTest(test.TestCase):
|
||||
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
|
||||
}
|
||||
},
|
||||
# TODO(mrry): Consider matching the `tf.parse_example()` error message.
|
||||
# TODO(mrry): Consider matching the `io.parse_example()` error message.
|
||||
expected_err=(errors_impl.OpError, "Key: b."))
|
||||
|
||||
self._test(
|
||||
|
@ -1131,7 +1131,7 @@ class ParseSequenceExampleTest(test.TestCase):
|
||||
expected_context_values=None,
|
||||
expected_feat_list_values=None,
|
||||
expected_err=None):
|
||||
# Test using tf.parse_single_sequence_example
|
||||
# Test using tf.io.parse_single_sequence_example
|
||||
self._test(
|
||||
kwargs,
|
||||
expected_context_values=expected_context_values,
|
||||
|
@ -335,8 +335,8 @@ class PyFuncTest(test.TestCase):
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
def testGradientFunction(self):
|
||||
# Input to tf.py_func is necessary, otherwise get_gradient_function()
|
||||
# returns None per default.
|
||||
# Input to tf.compat.v1.py_func is necessary,
|
||||
# otherwise get_gradient_function() returns None per default.
|
||||
a = constant_op.constant(0)
|
||||
x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
|
||||
y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
|
||||
@ -353,7 +353,8 @@ class PyFuncTest(test.TestCase):
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
def testParallel(self):
|
||||
# Tests that tf.py_func's can run in parallel if they release the GIL.
|
||||
# Tests that tf.compat.v1.py_func's can run in parallel if they release
|
||||
# the GIL.
|
||||
with self.cached_session() as session:
|
||||
q = queue.Queue(1)
|
||||
|
||||
|
@ -1144,7 +1144,8 @@ class ResourceVariableOpsTest(test_util.TensorFlowTestCase,
|
||||
expected=[[[[8, 9], [9, 8]], [[8, 8], [9, 9]]],
|
||||
[[[9, 9], [8, 8]], [[8, 9], [9, 8]]]]),
|
||||
|
||||
# batch_dims=indices.shape.ndims - 1 (equivalent to tf.batch_gather)
|
||||
# batch_dims=indices.shape.ndims - 1 (equivalent to
|
||||
# tf.compat.v1.batch_gather)
|
||||
dict( # 2D indices (1 batch dim)
|
||||
batch_dims=1,
|
||||
params=[[10, 11, 12, 13], [20, 21, 22, 23]],
|
||||
|
@ -250,8 +250,8 @@ class StatefulScatterNdTest(test.TestCase):
|
||||
# def testBooleanScatterUpdate(self):
|
||||
# with self.session(use_gpu=False) as session:
|
||||
# var = tf.Variable([True, False])
|
||||
# update0 = tf.scatter_nd_update(var, [[1]], [True])
|
||||
# update1 = tf.scatter_nd_update(
|
||||
# update0 = tf.compat.v1.scatter_nd_update(var, [[1]], [True])
|
||||
# update1 = tf.compat.v1.scatter_nd_update(
|
||||
# var, tf.constant(
|
||||
# [[0]], dtype=tf.int64), [False])
|
||||
# var.initializer.run()
|
||||
|
@ -31,11 +31,11 @@ def grappler_optimize(graph, fetches=None, config_proto=None):
|
||||
fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
|
||||
Grappler uses the 'train_op' collection to look for fetches, so if not
|
||||
provided this collection should be non-empty.
|
||||
config_proto: An optional `tf.ConfigProto` to use when rewriting the
|
||||
graph.
|
||||
config_proto: An optional `tf.compat.v1.ConfigProto` to use when rewriting
|
||||
the graph.
|
||||
|
||||
Returns:
|
||||
A `tf.GraphDef` containing the rewritten graph.
|
||||
A `tf.compat.v1.GraphDef` containing the rewritten graph.
|
||||
"""
|
||||
if config_proto is None:
|
||||
config_proto = config_pb2.ConfigProto()
|
||||
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Tests for the gradient of `tf.sparse_tensor_dense_matmul()`."""
|
||||
"""Tests for the gradient of `tf.sparse.sparse_dense_matmul()`."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
|
@ -737,7 +737,7 @@ class SummaryWriterTest(test_util.TensorFlowTestCase):
|
||||
with summary_ops.create_file_writer_v2(
|
||||
logdir, max_queue=1, flush_millis=999999).as_default():
|
||||
get_total = lambda: len(events_from_logdir(logdir))
|
||||
# Note: First tf.Event is always file_version.
|
||||
# Note: First tf.compat.v1.Event is always file_version.
|
||||
self.assertEqual(1, get_total())
|
||||
summary_ops.write('tag', 1, step=0)
|
||||
self.assertEqual(1, get_total())
|
||||
@ -769,7 +769,7 @@ class SummaryWriterTest(test_util.TensorFlowTestCase):
|
||||
logdir, max_queue=999999, flush_millis=999999)
|
||||
with writer.as_default():
|
||||
get_total = lambda: len(events_from_logdir(logdir))
|
||||
# Note: First tf.Event is always file_version.
|
||||
# Note: First tf.compat.v1.Event is always file_version.
|
||||
self.assertEqual(1, get_total())
|
||||
summary_ops.write('tag', 1, step=0)
|
||||
summary_ops.write('tag', 1, step=0)
|
||||
|
Loading…
Reference in New Issue
Block a user