Fix for the broken --config=rocm
build
The --config=rocm build was broken by the merge for PR #26840 This commit backs out the ROCm support in the file avgpooling_op.cc (added by the above PR). This is because the the template instantiations required for GPU support of the average pooling operator (which are in avgpooling_op_gpu.cu.cc) also need to be enabled for ROCm at the same time (as the code in avgpooling_op.cc) in order to avoid link errors with the `--config=rocm` build. Enabling ROCm support for the code in avgpooling_op_gpu.cu.cc requires other PRs (the set spwaned from PR #28343) to be merged first. Once those PRs are merged, we will file another PR to re-enable ROCm support in the avgpooling*.cc files.
This commit is contained in:
parent
6e18e7c812
commit
151ebd0bc6
@ -36,10 +36,10 @@ limitations under the License.
|
||||
#include "tensorflow/core/util/padding.h"
|
||||
#include "tensorflow/core/util/tensor_format.h"
|
||||
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
#if GOOGLE_CUDA
|
||||
#include "tensorflow/core/kernels/maxpooling_op_gpu.h"
|
||||
#include "tensorflow/core/kernels/pooling_ops_common_gpu.h"
|
||||
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
#endif // GOOGLE_CUDA
|
||||
|
||||
namespace tensorflow {
|
||||
|
||||
@ -112,7 +112,7 @@ REGISTER_KERNEL_BUILDER(
|
||||
Name("AvgPool").Device(DEVICE_CPU).TypeConstraint<Eigen::half>("T"),
|
||||
AvgPoolingOp<CPUDevice, Eigen::half>);
|
||||
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
#if GOOGLE_CUDA
|
||||
template <typename T>
|
||||
class AvgPoolingOp<GPUDevice, T> : public UnaryOp<T> {
|
||||
public:
|
||||
@ -205,7 +205,7 @@ REGISTER_KERNEL_BUILDER(
|
||||
REGISTER_KERNEL_BUILDER(
|
||||
Name("AvgPool").Device(DEVICE_GPU).TypeConstraint<double>("T"),
|
||||
AvgPoolingOp<GPUDevice, double>);
|
||||
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
#endif // GOOGLE_CUDA
|
||||
|
||||
// The operation to compute AvgPool gradients.
|
||||
// It takes two inputs:
|
||||
@ -368,7 +368,7 @@ TF_CALL_float(REGISTER_CPU_KERNEL);
|
||||
TF_CALL_double(REGISTER_CPU_KERNEL);
|
||||
TF_CALL_half(REGISTER_CPU_KERNEL);
|
||||
|
||||
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
#if GOOGLE_CUDA
|
||||
|
||||
// A CUDNN based AvgPoolingGrad implementation. It includes the padding as the
|
||||
// candidates for the pooling operation.
|
||||
@ -577,6 +577,6 @@ REGISTER_KERNEL_BUILDER(Name("AvgPoolGrad")
|
||||
.HostMemory("orig_input_shape"),
|
||||
AvgPoolingGradOpCustomGPUKernel<Eigen::half>);
|
||||
|
||||
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
|
||||
#endif // GOOGLE_CUDA
|
||||
|
||||
} // namespace tensorflow
|
||||
|
Loading…
Reference in New Issue
Block a user