Adding ROCm support for the functional ops

This commit is contained in:
Deven Desai 2019-07-03 00:13:49 +00:00
parent 28d194b368
commit 27559caaab

View File

@ -15,7 +15,7 @@ limitations under the License.
#define EIGEN_USE_THREADS
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#if GOOGLE_CUDA
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/device_base.h"
#endif
@ -424,7 +424,7 @@ class WhileOp : public AsyncOpKernel {
return Finish(s);
}
Tensor cond_t;
#if GOOGLE_CUDA
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
const DeviceBase::GpuDeviceInfo* gpu_device_info =
ctx_->device()->tensorflow_gpu_device_info();
const bool is_hostmem_dtype =