Set the default value for the use_gpu-param to True in test utilities
Also, fix a bug in IsFunctionCallOp found by this CL. Contrary to what it sounds like, `use_gpu` does not force the test to run on GPUs, it merely *allows* the test to run on GPUs (there is a separate `force_gpu` option for forcing). This means setting `use_gpu` to `True` means that the test will run on GPUs if one is available. Given that setting `use_gpu` to `True` by default makes sense, and there should be a good reason for a test to set it to `False` (which disallows GPU use, even when one is available). For this reason, this CL changes the default value of `use_gpu`. As you can see, this has already found a few real bugs. In a later CL I will remove instances that pass use_gpu=True explicitly as those should no longer be necessary. PiperOrigin-RevId: 356906251 Change-Id: Ibd0f785af0d2b1290dc40e84f928ff4291a58fe7
This commit is contained in:
parent
443f13e41a
commit
750beea911
@ -64,7 +64,8 @@ void MemoryTypesHelper(const NameRangeMap& name_map,
|
||||
|
||||
bool IsFunctionCallOp(const string& op_type) {
|
||||
return op_type == "SymbolicGradient" || op_type == "PartitionedCall" ||
|
||||
op_type == "StatefulPartitionedCall" || op_type == "While";
|
||||
op_type == "StatefulPartitionedCall" || op_type == "While" ||
|
||||
op_type == "StatelessWhile";
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -422,7 +422,9 @@ class FunctionTest(test.TestCase):
|
||||
with ops.control_dependencies([z]):
|
||||
return x * 2
|
||||
|
||||
with ops.Graph().as_default(), self.cached_session():
|
||||
# @function.Defun creates a non-partitioned function. If we place this on
|
||||
# the GPU then the inner `Print` op cannot be run.
|
||||
with ops.Graph().as_default(), self.cached_session(use_gpu=False):
|
||||
z = Foo(constant_op.constant(3.0))
|
||||
self.assertAllEqual(z, 6.0)
|
||||
|
||||
|
@ -2273,7 +2273,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
|
||||
# pylint: disable=g-doc-return-or-yield
|
||||
@contextlib.contextmanager
|
||||
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
|
||||
def session(self, graph=None, config=None, use_gpu=True, force_gpu=False):
|
||||
"""A context manager for a TensorFlow Session for use in executing tests.
|
||||
|
||||
Note that this will set this session and the graph as global defaults.
|
||||
@ -2320,7 +2320,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
def cached_session(self,
|
||||
graph=None,
|
||||
config=None,
|
||||
use_gpu=False,
|
||||
use_gpu=True,
|
||||
force_gpu=False):
|
||||
"""Returns a TensorFlow Session for use in executing tests.
|
||||
|
||||
@ -2374,7 +2374,7 @@ class TensorFlowTestCase(googletest.TestCase):
|
||||
def test_session(self,
|
||||
graph=None,
|
||||
config=None,
|
||||
use_gpu=False,
|
||||
use_gpu=True,
|
||||
force_gpu=False):
|
||||
"""Use cached_session instead."""
|
||||
if self.id().endswith(".test_session"):
|
||||
|
@ -265,6 +265,7 @@ class AtrousConvolutionTest(test.TestCase):
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
@test_util.run_v1_only("b/120545219")
|
||||
@test_util.disable_xla("b/178665095")
|
||||
def testGradient(self):
|
||||
with self.cached_session():
|
||||
for padding in ["SAME", "VALID"]:
|
||||
|
@ -135,6 +135,7 @@ class BetaincTest(test.TestCase):
|
||||
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@test_util.disable_xla("b/178338235")
|
||||
def testBetaIncDoubleVerySmallValues(self):
|
||||
a_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
|
||||
b_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty)
|
||||
@ -142,6 +143,7 @@ class BetaincTest(test.TestCase):
|
||||
self._testBetaInc(a_s, b_s, x_s, dtypes.float64)
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
@test_util.disable_xla("b/178338235")
|
||||
def testBetaIncFloatVerySmallValues(self):
|
||||
a_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
|
||||
b_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty)
|
||||
|
@ -1130,7 +1130,10 @@ class CondV2Test(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
|
||||
with self.session(graph=ops.Graph()) as sess:
|
||||
# Single threaded executor does not support partitioned graphs, so we can't
|
||||
# run on GPUs (running on GPU requires a mixed CPU/GPU graph).
|
||||
with self.session(graph=ops.Graph(), use_gpu=False) as sess:
|
||||
|
||||
@function.defun
|
||||
def _add_cond(x):
|
||||
return cond_v2.cond_v2(
|
||||
|
@ -68,8 +68,11 @@ class Conv2DBackpropFilterGradTest(test.TestCase):
|
||||
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
|
||||
output, filter_shape)
|
||||
print("conv2d_backprop_filter gradient err = %g " % err)
|
||||
err_tolerance = 2e-3
|
||||
self.assertLess(err, err_tolerance)
|
||||
err_tolerance = 3e-2 if test.is_gpu_available() else 2e-3
|
||||
self.assertLess(
|
||||
err,
|
||||
err_tolerance,
|
||||
msg="padding={0},stride={1},".format(str(padding), stride))
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testGradientDilatedConv(self):
|
||||
|
@ -220,7 +220,7 @@ class Conv3DTransposeTest(test.TestCase):
|
||||
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
|
||||
output, y_shape)
|
||||
print("conv3d_transpose gradient err = %g " % err)
|
||||
err_tolerance = 0.0005
|
||||
err_tolerance = 0.00055
|
||||
self.assertLess(err, err_tolerance)
|
||||
|
||||
|
||||
|
@ -306,7 +306,8 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testScalarIndexOutOfRange(self):
|
||||
with self.cached_session() as sess:
|
||||
# GPU kernels don't throw exceptions.
|
||||
with self.cached_session(use_gpu=False):
|
||||
bad = 17
|
||||
data = np.zeros(5)
|
||||
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
|
||||
@ -315,7 +316,8 @@ class DynamicPartitionTest(test.TestCase):
|
||||
|
||||
@test_util.run_deprecated_v1
|
||||
def testHigherRankIndexOutOfRange(self):
|
||||
with self.cached_session() as sess:
|
||||
# GPU kernels don't throw exceptions.
|
||||
with self.cached_session(use_gpu=False) as sess:
|
||||
shape = (2, 3)
|
||||
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
|
||||
data = np.zeros(shape + (5,))
|
||||
|
@ -218,9 +218,10 @@ class MatrixTriangularSolveOpTest(test.TestCase):
|
||||
def testNotInvertible(self):
|
||||
# The input should be invertible.
|
||||
# The matrix is singular because it has a zero on the diagonal.
|
||||
# FIXME(rmlarsen): The GPU kernel does not check for singularity.
|
||||
singular_matrix = np.array([[1., 0., -1.], [-1., 0., 1.], [0., -1., 1.]])
|
||||
with self.cached_session():
|
||||
|
||||
# FIXME(rmlarsen): The GPU kernel does not check for singularity.
|
||||
with self.cached_session(use_gpu=False):
|
||||
with self.assertRaisesOpError("Input matrix is not invertible."):
|
||||
self._verifySolve(singular_matrix, singular_matrix)
|
||||
with self.assertRaisesOpError("Input matrix is not invertible."):
|
||||
|
@ -1015,7 +1015,8 @@ class ReluTest(test_lib.TestCase):
|
||||
# Test that relu(nan) = nan for various sizes.
|
||||
for i in range(18):
|
||||
x = np.zeros(i) + np.nan
|
||||
with self.cached_session():
|
||||
# TODO(b/178335491): This is broken on GPU today.
|
||||
with self.cached_session(use_gpu=False):
|
||||
z = nn_ops.relu(constant_op.constant(x)).eval()
|
||||
self.assertTrue(np.isnan(z).all())
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user