Changing cuda* names to gpu* names in a couple of tests

This commit is contained in:
Deven Desai 2020-01-11 01:27:42 +00:00
parent 2df7f0fd53
commit 1a071541c1
4 changed files with 13 additions and 13 deletions

View File

@ -43,13 +43,13 @@ namespace {
__global__ void SetOutbufZero(GpuLaunchConfig config, __global__ void SetOutbufZero(GpuLaunchConfig config,
int* __restrict__ outbuf) { int* __restrict__ outbuf) {
CUDA_1D_KERNEL_LOOP(x, config.virtual_thread_count) { outbuf[x] = 0; } GPU_1D_KERNEL_LOOP(x, config.virtual_thread_count) { outbuf[x] = 0; }
} }
// counting number of jobs by using atomic +1 // counting number of jobs by using atomic +1
__global__ void Count1D(GpuLaunchConfig config, int bufsize, __global__ void Count1D(GpuLaunchConfig config, int bufsize,
int* __restrict__ outbuf) { int* __restrict__ outbuf) {
CUDA_1D_KERNEL_LOOP(x, config.virtual_thread_count) { GPU_1D_KERNEL_LOOP(x, config.virtual_thread_count) {
if (x < 0) { // x might overflow when testing extreme case if (x < 0) { // x might overflow when testing extreme case
break; break;
} }
@ -58,11 +58,11 @@ __global__ void Count1D(GpuLaunchConfig config, int bufsize,
} }
__global__ void Count2D(Gpu2DLaunchConfig config, int bufsize, __global__ void Count2D(Gpu2DLaunchConfig config, int bufsize,
int* __restrict__ outbuf) { int* __restrict__ outbuf) {
CUDA_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) { GPU_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) {
if (x < 0) { // x might overflow when testing extreme case if (x < 0) { // x might overflow when testing extreme case
break; break;
} }
CUDA_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) { GPU_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) {
if (y < 0) { // y might overflow when testing extreme case if (y < 0) { // y might overflow when testing extreme case
break; break;
} }
@ -73,15 +73,15 @@ __global__ void Count2D(Gpu2DLaunchConfig config, int bufsize,
} }
__global__ void Count3D(Gpu3DLaunchConfig config, int bufsize, __global__ void Count3D(Gpu3DLaunchConfig config, int bufsize,
int* __restrict__ outbuf) { int* __restrict__ outbuf) {
CUDA_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) { GPU_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) {
if (x < 0) { // x might overflow when testing extreme case if (x < 0) { // x might overflow when testing extreme case
break; break;
} }
CUDA_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) { GPU_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) {
if (y < 0) { // y might overflow when testing extreme case if (y < 0) { // y might overflow when testing extreme case
break; break;
} }
CUDA_AXIS_KERNEL_LOOP(z, config.virtual_thread_count.z, Z) { GPU_AXIS_KERNEL_LOOP(z, config.virtual_thread_count.z, Z) {
if (z < 0) { // z might overflow when testing extreme case if (z < 0) { // z might overflow when testing extreme case
break; break;
} }
@ -96,7 +96,7 @@ __global__ void Count3D(Gpu3DLaunchConfig config, int bufsize,
__global__ void CudaShuffleGetSrcLaneTest( __global__ void CudaShuffleGetSrcLaneTest(
unsigned* __restrict__ failure_count) { unsigned* __restrict__ failure_count) {
unsigned lane_id = CudaLaneId(); unsigned lane_id = GpuLaneId();
for (int width = warpSize; width > 1; width /= 2) { for (int width = warpSize; width > 1; width /= 2) {
auto check_result = [&](const char* op_name, int param, unsigned actual, auto check_result = [&](const char* op_name, int param, unsigned actual,
unsigned expected) { unsigned expected) {
@ -194,7 +194,7 @@ TEST_F(GpuLaunchConfigTest, GetGpuLaunchConfig) {
#undef TEST_LAUNCH_PARAMETER #undef TEST_LAUNCH_PARAMETER
} }
bool operator==(const Gpu2DLaunchConfig& a, const Cuda2DLaunchConfig& b) { bool operator==(const Gpu2DLaunchConfig& a, const Gpu2DLaunchConfig& b) {
return a.thread_per_block.x == b.thread_per_block.x && return a.thread_per_block.x == b.thread_per_block.x &&
a.thread_per_block.y == b.thread_per_block.y && a.thread_per_block.y == b.thread_per_block.y &&
a.thread_per_block.z == b.thread_per_block.z && a.thread_per_block.z == b.thread_per_block.z &&

View File

@ -71,7 +71,7 @@ class AssignOpTest(test.TestCase):
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False) var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value) self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value) self.assertAllEqual(x - y, op_value)
if test.is_built_with_cuda() and dtype in [np.float32, np.float64]: if test.is_built_with_gpu_support() and dtype in [np.float32, np.float64]:
var_value, op_value = self._initAssignFetch(x, y, use_gpu=True) var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
self.assertAllEqual(y, var_value) self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value) self.assertAllEqual(y, op_value)

View File

@ -127,7 +127,7 @@ class SoftmaxTest(test.TestCase):
self._testAll( self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32)) np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
@unittest.skipUnless(test.is_built_with_cuda(), @unittest.skipUnless(test.is_built_with_gpu_support(),
"Test only applicable when running on GPUs") "Test only applicable when running on GPUs")
def testFloatGPU(self): def testFloatGPU(self):
if test.is_gpu_available(cuda_only=True): if test.is_gpu_available(cuda_only=True):
@ -142,7 +142,7 @@ class SoftmaxTest(test.TestCase):
self._testAll( self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16)) np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
@unittest.skipUnless(test.is_built_with_cuda(), @unittest.skipUnless(test.is_built_with_gpu_support(),
"Test only applicable when running on GPUs") "Test only applicable when running on GPUs")
def testHalfGPU(self): def testHalfGPU(self):
if test.is_gpu_available(cuda_only=True): if test.is_gpu_available(cuda_only=True):

View File

@ -88,7 +88,7 @@ class SparseXentTest(test.TestCase):
[1., 2., 3., 4.]] [1., 2., 3., 4.]]
labels = [4, 3, 0, -1] labels = [4, 3, 0, -1]
if test.is_built_with_cuda() and test.is_gpu_available(): if test.is_built_with_gpu_support() and test.is_gpu_available():
with self.session(use_gpu=True) as sess: with self.session(use_gpu=True) as sess:
loss, backprop = ( loss, backprop = (
gen_nn_ops.sparse_softmax_cross_entropy_with_logits( gen_nn_ops.sparse_softmax_cross_entropy_with_logits(