diff --git a/tensorflow/core/framework/memory_types.cc b/tensorflow/core/framework/memory_types.cc index 208ad20c21b..2c71c94c8ee 100644 --- a/tensorflow/core/framework/memory_types.cc +++ b/tensorflow/core/framework/memory_types.cc @@ -64,7 +64,8 @@ void MemoryTypesHelper(const NameRangeMap& name_map, bool IsFunctionCallOp(const string& op_type) { return op_type == "SymbolicGradient" || op_type == "PartitionedCall" || - op_type == "StatefulPartitionedCall" || op_type == "While"; + op_type == "StatefulPartitionedCall" || op_type == "While" || + op_type == "StatelessWhile"; } } // namespace diff --git a/tensorflow/python/framework/function_test.py b/tensorflow/python/framework/function_test.py index 4c9b6122e48..2b3e7531162 100644 --- a/tensorflow/python/framework/function_test.py +++ b/tensorflow/python/framework/function_test.py @@ -422,7 +422,9 @@ class FunctionTest(test.TestCase): with ops.control_dependencies([z]): return x * 2 - with ops.Graph().as_default(), self.cached_session(): + # @function.Defun creates a non-partitioned function. If we place this on + # the GPU then the inner `Print` op cannot be run. + with ops.Graph().as_default(), self.cached_session(use_gpu=False): z = Foo(constant_op.constant(3.0)) self.assertAllEqual(z, 6.0) diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py index 319768280d5..893351bdaf0 100644 --- a/tensorflow/python/framework/test_util.py +++ b/tensorflow/python/framework/test_util.py @@ -2273,7 +2273,7 @@ class TensorFlowTestCase(googletest.TestCase): # pylint: disable=g-doc-return-or-yield @contextlib.contextmanager - def session(self, graph=None, config=None, use_gpu=False, force_gpu=False): + def session(self, graph=None, config=None, use_gpu=True, force_gpu=False): """A context manager for a TensorFlow Session for use in executing tests. Note that this will set this session and the graph as global defaults. @@ -2320,7 +2320,7 @@ class TensorFlowTestCase(googletest.TestCase): def cached_session(self, graph=None, config=None, - use_gpu=False, + use_gpu=True, force_gpu=False): """Returns a TensorFlow Session for use in executing tests. @@ -2374,7 +2374,7 @@ class TensorFlowTestCase(googletest.TestCase): def test_session(self, graph=None, config=None, - use_gpu=False, + use_gpu=True, force_gpu=False): """Use cached_session instead.""" if self.id().endswith(".test_session"): diff --git a/tensorflow/python/kernel_tests/atrous_convolution_test.py b/tensorflow/python/kernel_tests/atrous_convolution_test.py index 2fb8a37e2b9..13686d21b57 100644 --- a/tensorflow/python/kernel_tests/atrous_convolution_test.py +++ b/tensorflow/python/kernel_tests/atrous_convolution_test.py @@ -265,6 +265,7 @@ class AtrousConvolutionTest(test.TestCase): self.assertLess(err, err_tolerance) @test_util.run_v1_only("b/120545219") + @test_util.disable_xla("b/178665095") def testGradient(self): with self.cached_session(): for padding in ["SAME", "VALID"]: diff --git a/tensorflow/python/kernel_tests/betainc_op_test.py b/tensorflow/python/kernel_tests/betainc_op_test.py index 727e15b1661..c8d57c431a9 100644 --- a/tensorflow/python/kernel_tests/betainc_op_test.py +++ b/tensorflow/python/kernel_tests/betainc_op_test.py @@ -135,6 +135,7 @@ class BetaincTest(test.TestCase): self._testBetaInc(a_s, b_s, x_s, dtypes.float64) @test_util.run_deprecated_v1 + @test_util.disable_xla("b/178338235") def testBetaIncDoubleVerySmallValues(self): a_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty) b_s = np.abs(np.random.randn(10, 10) * 1e-16) # in (0, infty) @@ -142,6 +143,7 @@ class BetaincTest(test.TestCase): self._testBetaInc(a_s, b_s, x_s, dtypes.float64) @test_util.run_deprecated_v1 + @test_util.disable_xla("b/178338235") def testBetaIncFloatVerySmallValues(self): a_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty) b_s = np.abs(np.random.randn(10, 10) * 1e-8) # in (0, infty) diff --git a/tensorflow/python/kernel_tests/cond_v2_test.py b/tensorflow/python/kernel_tests/cond_v2_test.py index 4ee4ca525aa..5e466728045 100644 --- a/tensorflow/python/kernel_tests/cond_v2_test.py +++ b/tensorflow/python/kernel_tests/cond_v2_test.py @@ -1130,7 +1130,10 @@ class CondV2Test(test.TestCase): @test_util.run_deprecated_v1 def testLoweringDisabledWithSingleThreadedExecutorContext(self): - with self.session(graph=ops.Graph()) as sess: + # Single threaded executor does not support partitioned graphs, so we can't + # run on GPUs (running on GPU requires a mixed CPU/GPU graph). + with self.session(graph=ops.Graph(), use_gpu=False) as sess: + @function.defun def _add_cond(x): return cond_v2.cond_v2( diff --git a/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py b/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py index c6e5945bd75..e14a7191903 100644 --- a/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py +++ b/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py @@ -68,8 +68,11 @@ class Conv2DBackpropFilterGradTest(test.TestCase): [in_val, out_backprop_val], [in_shape, out_backprop_shape], output, filter_shape) print("conv2d_backprop_filter gradient err = %g " % err) - err_tolerance = 2e-3 - self.assertLess(err, err_tolerance) + err_tolerance = 3e-2 if test.is_gpu_available() else 2e-3 + self.assertLess( + err, + err_tolerance, + msg="padding={0},stride={1},".format(str(padding), stride)) @test_util.run_deprecated_v1 def testGradientDilatedConv(self): diff --git a/tensorflow/python/kernel_tests/conv3d_transpose_test.py b/tensorflow/python/kernel_tests/conv3d_transpose_test.py index 22ba5b90375..53968b26416 100644 --- a/tensorflow/python/kernel_tests/conv3d_transpose_test.py +++ b/tensorflow/python/kernel_tests/conv3d_transpose_test.py @@ -220,7 +220,7 @@ class Conv3DTransposeTest(test.TestCase): err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape], output, y_shape) print("conv3d_transpose gradient err = %g " % err) - err_tolerance = 0.0005 + err_tolerance = 0.00055 self.assertLess(err, err_tolerance) diff --git a/tensorflow/python/kernel_tests/dynamic_partition_op_test.py b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py index 0fd9790c794..2858f119a72 100644 --- a/tensorflow/python/kernel_tests/dynamic_partition_op_test.py +++ b/tensorflow/python/kernel_tests/dynamic_partition_op_test.py @@ -306,7 +306,8 @@ class DynamicPartitionTest(test.TestCase): @test_util.run_deprecated_v1 def testScalarIndexOutOfRange(self): - with self.cached_session() as sess: + # GPU kernels don't throw exceptions. + with self.cached_session(use_gpu=False): bad = 17 data = np.zeros(5) partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7) @@ -315,7 +316,8 @@ class DynamicPartitionTest(test.TestCase): @test_util.run_deprecated_v1 def testHigherRankIndexOutOfRange(self): - with self.cached_session() as sess: + # GPU kernels don't throw exceptions. + with self.cached_session(use_gpu=False) as sess: shape = (2, 3) indices = array_ops.placeholder(shape=shape, dtype=np.int32) data = np.zeros(shape + (5,)) diff --git a/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py b/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py index 683b1188ffb..a497a0d0df8 100644 --- a/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py +++ b/tensorflow/python/kernel_tests/matrix_triangular_solve_op_test.py @@ -218,9 +218,10 @@ class MatrixTriangularSolveOpTest(test.TestCase): def testNotInvertible(self): # The input should be invertible. # The matrix is singular because it has a zero on the diagonal. - # FIXME(rmlarsen): The GPU kernel does not check for singularity. singular_matrix = np.array([[1., 0., -1.], [-1., 0., 1.], [0., -1., 1.]]) - with self.cached_session(): + + # FIXME(rmlarsen): The GPU kernel does not check for singularity. + with self.cached_session(use_gpu=False): with self.assertRaisesOpError("Input matrix is not invertible."): self._verifySolve(singular_matrix, singular_matrix) with self.assertRaisesOpError("Input matrix is not invertible."): diff --git a/tensorflow/python/ops/nn_test.py b/tensorflow/python/ops/nn_test.py index aaf2f77fb29..30f3af5ecd4 100644 --- a/tensorflow/python/ops/nn_test.py +++ b/tensorflow/python/ops/nn_test.py @@ -1015,7 +1015,8 @@ class ReluTest(test_lib.TestCase): # Test that relu(nan) = nan for various sizes. for i in range(18): x = np.zeros(i) + np.nan - with self.cached_session(): + # TODO(b/178335491): This is broken on GPU today. + with self.cached_session(use_gpu=False): z = nn_ops.relu(constant_op.constant(x)).eval() self.assertTrue(np.isnan(z).all())