Remove unnecessary uses of use_gpu for tests
PiperOrigin-RevId: 236034198
This commit is contained in:
parent
67a91b18b5
commit
64b4879c93
@ -20,7 +20,6 @@ from __future__ import print_function
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from tensorflow.python.framework import test_util
|
|
||||||
from tensorflow.python.ops import math_ops
|
from tensorflow.python.ops import math_ops
|
||||||
from tensorflow.python.platform import test
|
from tensorflow.python.platform import test
|
||||||
|
|
||||||
@ -31,7 +30,6 @@ class CompareAndBitpackTest(test.TestCase):
|
|||||||
x, threshold,
|
x, threshold,
|
||||||
truth,
|
truth,
|
||||||
expected_err_re=None):
|
expected_err_re=None):
|
||||||
with test_util.use_gpu():
|
|
||||||
ans = math_ops.compare_and_bitpack(x, threshold)
|
ans = math_ops.compare_and_bitpack(x, threshold)
|
||||||
if expected_err_re is None:
|
if expected_err_re is None:
|
||||||
tf_ans = self.evaluate(ans)
|
tf_ans = self.evaluate(ans)
|
||||||
|
@ -63,7 +63,6 @@ class DynamicStitchTestBase(object):
|
|||||||
self.assertEqual([None], stitched_t.get_shape().as_list())
|
self.assertEqual([None], stitched_t.get_shape().as_list())
|
||||||
|
|
||||||
def testSimpleOneDimensional(self):
|
def testSimpleOneDimensional(self):
|
||||||
with test_util.use_gpu():
|
|
||||||
# Test various datatypes in the simple case to ensure that the op was
|
# Test various datatypes in the simple case to ensure that the op was
|
||||||
# registered under those types.
|
# registered under those types.
|
||||||
dtypes_to_test = [
|
dtypes_to_test = [
|
||||||
@ -86,7 +85,6 @@ class DynamicStitchTestBase(object):
|
|||||||
self.assertEqual([8], stitched_t.get_shape().as_list())
|
self.assertEqual([8], stitched_t.get_shape().as_list())
|
||||||
|
|
||||||
def testOneListOneDimensional(self):
|
def testOneListOneDimensional(self):
|
||||||
with test_util.use_gpu():
|
|
||||||
indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
|
indices = [constant_op.constant([1, 6, 2, 3, 5, 0, 4, 7])]
|
||||||
data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
|
data = [constant_op.constant([10, 60, 20, 30, 50, 0, 40, 70])]
|
||||||
stitched_t = self.stitch_op(indices, data)
|
stitched_t = self.stitch_op(indices, data)
|
||||||
@ -96,7 +94,6 @@ class DynamicStitchTestBase(object):
|
|||||||
self.assertEqual([8], stitched_t.get_shape().as_list())
|
self.assertEqual([8], stitched_t.get_shape().as_list())
|
||||||
|
|
||||||
def testSimpleTwoDimensional(self):
|
def testSimpleTwoDimensional(self):
|
||||||
with test_util.use_gpu():
|
|
||||||
indices = [
|
indices = [
|
||||||
constant_op.constant([0, 4, 7]),
|
constant_op.constant([0, 4, 7]),
|
||||||
constant_op.constant([1, 6]),
|
constant_op.constant([1, 6]),
|
||||||
@ -115,7 +112,6 @@ class DynamicStitchTestBase(object):
|
|||||||
self.assertEqual([8, 2], stitched_t.get_shape().as_list())
|
self.assertEqual([8, 2], stitched_t.get_shape().as_list())
|
||||||
|
|
||||||
def testZeroSizeTensor(self):
|
def testZeroSizeTensor(self):
|
||||||
with test_util.use_gpu():
|
|
||||||
indices = [
|
indices = [
|
||||||
constant_op.constant([0, 4, 7]),
|
constant_op.constant([0, 4, 7]),
|
||||||
constant_op.constant([1, 6]),
|
constant_op.constant([1, 6]),
|
||||||
@ -137,7 +133,6 @@ class DynamicStitchTestBase(object):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testHigherRank(self):
|
def testHigherRank(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
|
||||||
indices = [
|
indices = [
|
||||||
constant_op.constant(6),
|
constant_op.constant(6),
|
||||||
constant_op.constant([4, 1]),
|
constant_op.constant([4, 1]),
|
||||||
@ -159,7 +154,7 @@ class DynamicStitchTestBase(object):
|
|||||||
grads = gradients_impl.gradients(stitched_t, indices + data,
|
grads = gradients_impl.gradients(stitched_t, indices + data,
|
||||||
stitched_grad)
|
stitched_grad)
|
||||||
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
|
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
|
||||||
for datum, grad in zip(data, sess.run(grads[3:])):
|
for datum, grad in zip(data, self.evaluate(grads[3:])):
|
||||||
self.assertAllEqual(7. * self.evaluate(datum), grad)
|
self.assertAllEqual(7. * self.evaluate(datum), grad)
|
||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
@ -241,7 +236,6 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testHigherRank(self):
|
def testHigherRank(self):
|
||||||
with self.session(use_gpu=True) as sess:
|
|
||||||
indices = [
|
indices = [
|
||||||
constant_op.constant(6),
|
constant_op.constant(6),
|
||||||
constant_op.constant([4, 1]),
|
constant_op.constant([4, 1]),
|
||||||
@ -263,12 +257,11 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
|
|||||||
grads = gradients_impl.gradients(stitched_t, indices + data,
|
grads = gradients_impl.gradients(stitched_t, indices + data,
|
||||||
stitched_grad)
|
stitched_grad)
|
||||||
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
|
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
|
||||||
for datum, grad in zip(data, sess.run(grads[3:])):
|
for datum, grad in zip(data, self.evaluate(grads[3:])):
|
||||||
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
|
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
|
||||||
|
|
||||||
# GPU version unit tests
|
# GPU version unit tests
|
||||||
def testScalarGPU(self):
|
def testScalarGPU(self):
|
||||||
with self.cached_session():
|
|
||||||
indices = [constant_op.constant(0), constant_op.constant(1)]
|
indices = [constant_op.constant(0), constant_op.constant(1)]
|
||||||
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
|
data = [constant_op.constant(40.0), constant_op.constant(60.0)]
|
||||||
for step in -1, 1:
|
for step in -1, 1:
|
||||||
@ -280,7 +273,6 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testHigherRankGPU(self):
|
def testHigherRankGPU(self):
|
||||||
with self.cached_session() as sess:
|
|
||||||
indices = [
|
indices = [
|
||||||
constant_op.constant(6),
|
constant_op.constant(6),
|
||||||
constant_op.constant([4, 1]),
|
constant_op.constant([4, 1]),
|
||||||
@ -302,7 +294,7 @@ class ParallelDynamicStitchTest(DynamicStitchTestBase, test.TestCase):
|
|||||||
grads = gradients_impl.gradients(stitched_t, indices + data,
|
grads = gradients_impl.gradients(stitched_t, indices + data,
|
||||||
stitched_grad)
|
stitched_grad)
|
||||||
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
|
self.assertEqual(grads[:3], [None] * 3) # Indices have no gradients
|
||||||
for datum, grad in zip(data, sess.run(grads[3:])):
|
for datum, grad in zip(data, self.evaluate(grads[3:])):
|
||||||
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
|
self.assertAllEqual(7.0 * self.evaluate(datum), grad)
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from __future__ import print_function
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from tensorflow.python.framework import constant_op
|
from tensorflow.python.framework import constant_op
|
||||||
from tensorflow.python.framework import test_util
|
|
||||||
from tensorflow.python.ops import array_ops
|
from tensorflow.python.ops import array_ops
|
||||||
from tensorflow.python.platform import test
|
from tensorflow.python.platform import test
|
||||||
|
|
||||||
@ -44,7 +43,6 @@ class ExtractImagePatches(test.TestCase):
|
|||||||
strides = [1] + strides + [1]
|
strides = [1] + strides + [1]
|
||||||
rates = [1] + rates + [1]
|
rates = [1] + rates + [1]
|
||||||
|
|
||||||
with test_util.use_gpu():
|
|
||||||
out_tensor = array_ops.extract_image_patches(
|
out_tensor = array_ops.extract_image_patches(
|
||||||
constant_op.constant(image),
|
constant_op.constant(image),
|
||||||
ksizes=ksizes,
|
ksizes=ksizes,
|
||||||
|
@ -21,7 +21,6 @@ from __future__ import print_function
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from tensorflow.python.framework import constant_op
|
from tensorflow.python.framework import constant_op
|
||||||
from tensorflow.python.framework import test_util
|
|
||||||
from tensorflow.python.ops import array_ops
|
from tensorflow.python.ops import array_ops
|
||||||
from tensorflow.python.platform import test
|
from tensorflow.python.platform import test
|
||||||
|
|
||||||
@ -46,7 +45,6 @@ class ExtractVolumePatches(test.TestCase):
|
|||||||
ksizes = [1] + ksizes + [1]
|
ksizes = [1] + ksizes + [1]
|
||||||
strides = [1] + strides + [1]
|
strides = [1] + strides + [1]
|
||||||
|
|
||||||
with test_util.use_gpu():
|
|
||||||
out_tensor = array_ops.extract_volume_patches(
|
out_tensor = array_ops.extract_volume_patches(
|
||||||
constant_op.constant(image),
|
constant_op.constant(image),
|
||||||
ksizes=ksizes,
|
ksizes=ksizes,
|
||||||
|
@ -66,8 +66,6 @@ class LuOpTest(test.TestCase):
|
|||||||
|
|
||||||
def _verifyLu(self, x, output_idx_type=dtypes.int64):
|
def _verifyLu(self, x, output_idx_type=dtypes.int64):
|
||||||
# Verify that Px = LU.
|
# Verify that Px = LU.
|
||||||
with test_util.use_gpu():
|
|
||||||
|
|
||||||
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
|
lu, perm = linalg_ops.lu(x, output_idx_type=output_idx_type)
|
||||||
|
|
||||||
# Prepare the lower factor of shape num_rows x num_rows
|
# Prepare the lower factor of shape num_rows x num_rows
|
||||||
@ -140,7 +138,6 @@ class LuOpTest(test.TestCase):
|
|||||||
self._verifyLu(complex_data, output_idx_type=output_idx_type)
|
self._verifyLu(complex_data, output_idx_type=output_idx_type)
|
||||||
|
|
||||||
def testPivoting(self):
|
def testPivoting(self):
|
||||||
with test_util.use_gpu():
|
|
||||||
# This matrix triggers partial pivoting because the first diagonal entry
|
# This matrix triggers partial pivoting because the first diagonal entry
|
||||||
# is small.
|
# is small.
|
||||||
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
|
data = np.array([[1e-9, 1., 0.], [1., 0., 0], [0., 1., 5]])
|
||||||
@ -167,7 +164,6 @@ class LuOpTest(test.TestCase):
|
|||||||
# LU factorization gives an error when the input is singular.
|
# LU factorization gives an error when the input is singular.
|
||||||
# Note: A singular matrix may return without error but it won't be a valid
|
# Note: A singular matrix may return without error but it won't be a valid
|
||||||
# factorization.
|
# factorization.
|
||||||
with test_util.use_gpu():
|
|
||||||
for dtype in self.float_types:
|
for dtype in self.float_types:
|
||||||
with self.assertRaises(errors.InvalidArgumentError):
|
with self.assertRaises(errors.InvalidArgumentError):
|
||||||
self.evaluate(
|
self.evaluate(
|
||||||
@ -220,7 +216,6 @@ class LuOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testConcurrentExecutesWithoutError(self):
|
def testConcurrentExecutesWithoutError(self):
|
||||||
with test_util.use_gpu():
|
|
||||||
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
matrix1 = random_ops.random_normal([5, 5], seed=42)
|
||||||
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
matrix2 = random_ops.random_normal([5, 5], seed=42)
|
||||||
lu1, p1 = linalg_ops.lu(matrix1)
|
lu1, p1 = linalg_ops.lu(matrix1)
|
||||||
|
@ -127,7 +127,7 @@ def _GetMatMulGradientTest(a_np_, b_np_, use_static_shape_, **kwargs_):
|
|||||||
epsilon = np.finfo(a_np_.dtype).eps
|
epsilon = np.finfo(a_np_.dtype).eps
|
||||||
delta = epsilon**(1.0 / 3.0)
|
delta = epsilon**(1.0 / 3.0)
|
||||||
tol = 20 * delta
|
tol = 20 * delta
|
||||||
with self.session(), test_util.use_gpu():
|
with self.session():
|
||||||
theoretical, numerical = gradient_checker_v2.compute_gradient(
|
theoretical, numerical = gradient_checker_v2.compute_gradient(
|
||||||
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
|
lambda x: math_ops.matmul(x, effective_b_np, **kwargs_),
|
||||||
[effective_a_np],
|
[effective_a_np],
|
||||||
|
@ -32,7 +32,7 @@ class SquareRootOpTest(test.TestCase):
|
|||||||
|
|
||||||
def _verifySquareRoot(self, matrix, np_type):
|
def _verifySquareRoot(self, matrix, np_type):
|
||||||
matrix = matrix.astype(np_type)
|
matrix = matrix.astype(np_type)
|
||||||
with test_util.use_gpu():
|
|
||||||
# Verify that matmul(sqrtm(A), sqrtm(A)) = A
|
# Verify that matmul(sqrtm(A), sqrtm(A)) = A
|
||||||
sqrt = gen_linalg_ops.matrix_square_root(matrix)
|
sqrt = gen_linalg_ops.matrix_square_root(matrix)
|
||||||
square = math_ops.matmul(sqrt, sqrt)
|
square = math_ops.matmul(sqrt, sqrt)
|
||||||
|
@ -41,7 +41,6 @@ class UnstackOpTest(test.TestCase):
|
|||||||
|
|
||||||
def testSimple(self):
|
def testSimple(self):
|
||||||
np.random.seed(7)
|
np.random.seed(7)
|
||||||
with test_util.use_gpu():
|
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
||||||
for dtype in [
|
for dtype in [
|
||||||
np.bool, np.float16, np.float32, np.float64, np.int32, np.int64
|
np.bool, np.float16, np.float32, np.float64, np.int32, np.int64
|
||||||
@ -80,7 +79,7 @@ class UnstackOpTest(test.TestCase):
|
|||||||
data = np.random.randn(*shape)
|
data = np.random.randn(*shape)
|
||||||
shapes = [shape[1:]] * shape[0]
|
shapes = [shape[1:]] * shape[0]
|
||||||
for i in xrange(shape[0]):
|
for i in xrange(shape[0]):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant(data)
|
x = constant_op.constant(data)
|
||||||
cs = array_ops.unstack(x, num=shape[0])
|
cs = array_ops.unstack(x, num=shape[0])
|
||||||
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
|
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
|
||||||
@ -94,7 +93,7 @@ class UnstackOpTest(test.TestCase):
|
|||||||
out_shape = list(shape)
|
out_shape = list(shape)
|
||||||
del out_shape[1]
|
del out_shape[1]
|
||||||
for i in xrange(shape[1]):
|
for i in xrange(shape[1]):
|
||||||
with self.cached_session(use_gpu=True):
|
with self.cached_session():
|
||||||
x = constant_op.constant(data)
|
x = constant_op.constant(data)
|
||||||
cs = array_ops.unstack(x, num=shape[1], axis=1)
|
cs = array_ops.unstack(x, num=shape[1], axis=1)
|
||||||
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
|
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
|
||||||
@ -103,7 +102,6 @@ class UnstackOpTest(test.TestCase):
|
|||||||
|
|
||||||
@test_util.run_deprecated_v1
|
@test_util.run_deprecated_v1
|
||||||
def testInferNum(self):
|
def testInferNum(self):
|
||||||
with self.cached_session():
|
|
||||||
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
|
||||||
x = array_ops.placeholder(np.float32, shape=shape)
|
x = array_ops.placeholder(np.float32, shape=shape)
|
||||||
cs = array_ops.unstack(x)
|
cs = array_ops.unstack(x)
|
||||||
|
Loading…
Reference in New Issue
Block a user