From 1a071541c184204fef1359e719586909de36f492 Mon Sep 17 00:00:00 2001 From: Deven Desai Date: Sat, 11 Jan 2020 01:27:42 +0000 Subject: [PATCH] Changing cuda* names to gpu* names in a couple of tests --- .../core/util/gpu_kernel_helper_test.cu.cc | 18 +++++++++--------- .../kernel_tests/dense_update_ops_test.py | 2 +- .../python/kernel_tests/softmax_op_test.py | 4 ++-- .../python/kernel_tests/sparse_xent_op_test.py | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tensorflow/core/util/gpu_kernel_helper_test.cu.cc b/tensorflow/core/util/gpu_kernel_helper_test.cu.cc index 0b84aed9234..c089511e964 100644 --- a/tensorflow/core/util/gpu_kernel_helper_test.cu.cc +++ b/tensorflow/core/util/gpu_kernel_helper_test.cu.cc @@ -43,13 +43,13 @@ namespace { __global__ void SetOutbufZero(GpuLaunchConfig config, int* __restrict__ outbuf) { - CUDA_1D_KERNEL_LOOP(x, config.virtual_thread_count) { outbuf[x] = 0; } + GPU_1D_KERNEL_LOOP(x, config.virtual_thread_count) { outbuf[x] = 0; } } // counting number of jobs by using atomic +1 __global__ void Count1D(GpuLaunchConfig config, int bufsize, int* __restrict__ outbuf) { - CUDA_1D_KERNEL_LOOP(x, config.virtual_thread_count) { + GPU_1D_KERNEL_LOOP(x, config.virtual_thread_count) { if (x < 0) { // x might overflow when testing extreme case break; } @@ -58,11 +58,11 @@ __global__ void Count1D(GpuLaunchConfig config, int bufsize, } __global__ void Count2D(Gpu2DLaunchConfig config, int bufsize, int* __restrict__ outbuf) { - CUDA_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) { + GPU_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) { if (x < 0) { // x might overflow when testing extreme case break; } - CUDA_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) { + GPU_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) { if (y < 0) { // y might overflow when testing extreme case break; } @@ -73,15 +73,15 @@ __global__ void Count2D(Gpu2DLaunchConfig config, int bufsize, } __global__ void Count3D(Gpu3DLaunchConfig config, int bufsize, int* __restrict__ outbuf) { - CUDA_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) { + GPU_AXIS_KERNEL_LOOP(x, config.virtual_thread_count.x, X) { if (x < 0) { // x might overflow when testing extreme case break; } - CUDA_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) { + GPU_AXIS_KERNEL_LOOP(y, config.virtual_thread_count.y, Y) { if (y < 0) { // y might overflow when testing extreme case break; } - CUDA_AXIS_KERNEL_LOOP(z, config.virtual_thread_count.z, Z) { + GPU_AXIS_KERNEL_LOOP(z, config.virtual_thread_count.z, Z) { if (z < 0) { // z might overflow when testing extreme case break; } @@ -96,7 +96,7 @@ __global__ void Count3D(Gpu3DLaunchConfig config, int bufsize, __global__ void CudaShuffleGetSrcLaneTest( unsigned* __restrict__ failure_count) { - unsigned lane_id = CudaLaneId(); + unsigned lane_id = GpuLaneId(); for (int width = warpSize; width > 1; width /= 2) { auto check_result = [&](const char* op_name, int param, unsigned actual, unsigned expected) { @@ -194,7 +194,7 @@ TEST_F(GpuLaunchConfigTest, GetGpuLaunchConfig) { #undef TEST_LAUNCH_PARAMETER } -bool operator==(const Gpu2DLaunchConfig& a, const Cuda2DLaunchConfig& b) { +bool operator==(const Gpu2DLaunchConfig& a, const Gpu2DLaunchConfig& b) { return a.thread_per_block.x == b.thread_per_block.x && a.thread_per_block.y == b.thread_per_block.y && a.thread_per_block.z == b.thread_per_block.z && diff --git a/tensorflow/python/kernel_tests/dense_update_ops_test.py b/tensorflow/python/kernel_tests/dense_update_ops_test.py index 545de87ca10..47bbce45a18 100644 --- a/tensorflow/python/kernel_tests/dense_update_ops_test.py +++ b/tensorflow/python/kernel_tests/dense_update_ops_test.py @@ -71,7 +71,7 @@ class AssignOpTest(test.TestCase): var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False) self.assertAllEqual(x - y, var_value) self.assertAllEqual(x - y, op_value) - if test.is_built_with_cuda() and dtype in [np.float32, np.float64]: + if test.is_built_with_gpu_support() and dtype in [np.float32, np.float64]: var_value, op_value = self._initAssignFetch(x, y, use_gpu=True) self.assertAllEqual(y, var_value) self.assertAllEqual(y, op_value) diff --git a/tensorflow/python/kernel_tests/softmax_op_test.py b/tensorflow/python/kernel_tests/softmax_op_test.py index c28ac79a47d..82fe328dd10 100644 --- a/tensorflow/python/kernel_tests/softmax_op_test.py +++ b/tensorflow/python/kernel_tests/softmax_op_test.py @@ -127,7 +127,7 @@ class SoftmaxTest(test.TestCase): self._testAll( np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32)) - @unittest.skipUnless(test.is_built_with_cuda(), + @unittest.skipUnless(test.is_built_with_gpu_support(), "Test only applicable when running on GPUs") def testFloatGPU(self): if test.is_gpu_available(cuda_only=True): @@ -142,7 +142,7 @@ class SoftmaxTest(test.TestCase): self._testAll( np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16)) - @unittest.skipUnless(test.is_built_with_cuda(), + @unittest.skipUnless(test.is_built_with_gpu_support(), "Test only applicable when running on GPUs") def testHalfGPU(self): if test.is_gpu_available(cuda_only=True): diff --git a/tensorflow/python/kernel_tests/sparse_xent_op_test.py b/tensorflow/python/kernel_tests/sparse_xent_op_test.py index 9af0a4948e6..76973add820 100644 --- a/tensorflow/python/kernel_tests/sparse_xent_op_test.py +++ b/tensorflow/python/kernel_tests/sparse_xent_op_test.py @@ -88,7 +88,7 @@ class SparseXentTest(test.TestCase): [1., 2., 3., 4.]] labels = [4, 3, 0, -1] - if test.is_built_with_cuda() and test.is_gpu_available(): + if test.is_built_with_gpu_support() and test.is_gpu_available(): with self.session(use_gpu=True) as sess: loss, backprop = ( gen_nn_ops.sparse_softmax_cross_entropy_with_logits(