[KERNEL_GEN] Rename MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED -> MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED.

The plan is to use this flag for experimental CPU kernels as well.

PiperOrigin-RevId: 355369970
Change-Id: I5bdb413176c3aa7cb753b3aa554046a05eb799f3
This commit is contained in:
Alexander Belyaev 2021-02-03 04:39:20 -08:00 committed by TensorFlower Gardener
parent 59fcbe5e28
commit bff7bf1e48
49 changed files with 54 additions and 54 deletions

View File

@ -3347,7 +3347,7 @@ tf_kernel_library(
tf_kernel_library(
name = "cwise_op",
copts = if_mlir_generated_gpu_kernels_enabled(if_true = ["-DMLIR_GENERATED_GPU_KERNELS_ENABLED=1"]) +
if_mlir_experimental_kernels_enabled(if_true = ["-DMLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED=1"]),
if_mlir_experimental_kernels_enabled(if_true = ["-DMLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED=1"]),
prefix = "cwise_op",
deps = MATH_DEPS + if_mlir_generated_gpu_kernels_enabled(if_true = ["//tensorflow/core/kernels/mlir_generated:cwise_op"]),
)

View File

@ -20,7 +20,7 @@ REGISTER2(UnaryOp, CPU, "Acos", functor::acos, float, double);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER2(UnaryOp, GPU, "Acos", functor::acos, float, double);
#endif
#endif

View File

@ -23,7 +23,7 @@ REGISTER4(UnaryOp, CPU, "Acosh", functor::acosh, float, double, complex64,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER2(UnaryOp, GPU, "Acosh", functor::acosh, float, double);
#endif
#endif

View File

@ -25,7 +25,7 @@ REGISTER6(BinaryOp, CPU, "AddV2", functor::add, float, Eigen::half, double,
REGISTER3(BinaryOp, GPU, "Add", functor::add, float, Eigen::half, double);
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER3(BinaryOp, GPU, "AddV2", functor::add, float, Eigen::half, double);
#endif

View File

@ -33,7 +33,7 @@ REGISTER6(BinaryOp, GPU, "Add", functor::add, uint8, uint16, uint64, int64,
complex64, complex128);
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER7(BinaryOp, GPU, "AddV2", functor::add, uint8, uint16, uint32, uint64,
int64, complex64, complex128);
#else

View File

@ -28,7 +28,7 @@ REGISTER_COMPLEX(CPU, double, complex128);
#if GOOGLE_CUDA
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER_COMPLEX(GPU, float, complex64);
REGISTER_COMPLEX(GPU, double, complex128);
#endif

View File

@ -20,7 +20,7 @@ REGISTER2(UnaryOp, CPU, "Asin", functor::asin, float, double);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER2(UnaryOp, GPU, "Asin", functor::asin, float, double);
#endif
#endif

View File

@ -22,7 +22,7 @@ REGISTER4(UnaryOp, CPU, "Asinh", functor::asinh, float, double, complex64,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER2(UnaryOp, GPU, "Asinh", functor::asinh, float, double);
#endif
#endif

View File

@ -21,7 +21,7 @@ REGISTER2(BinaryOp, CPU, "Atan2", functor::atan2, float, double);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER2(BinaryOp, GPU, "Atan2", functor::atan2, float, double);
#endif
#endif

View File

@ -22,7 +22,7 @@ REGISTER8(BinaryOp, CPU, "BitwiseAnd", functor::bitwise_and, int8, int16, int32,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER8(BinaryOp, GPU, "BitwiseAnd", functor::bitwise_and, int8, int16, int32,
int64, uint8, uint16, uint32, uint64);
#else
@ -30,7 +30,7 @@ REGISTER8(BinaryOp, GPU, "BitwiseAnd", functor::bitwise_and, int8, int16, int32,
REGISTER4(BinaryOp, GPU, "BitwiseAnd", functor::bitwise_and, uint8, uint16,
uint32, uint64);
#endif // !MLIR_GENERATED_GPU_KERNELS_ENABLED ||
// !MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED
// !MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
} // namespace tensorflow

View File

@ -22,7 +22,7 @@ REGISTER8(BinaryOp, CPU, "BitwiseOr", functor::bitwise_or, int8, int16, int32,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER8(BinaryOp, GPU, "BitwiseOr", functor::bitwise_or, int8, int16, int32,
int64, uint8, uint16, uint32, uint64);
#else
@ -30,7 +30,7 @@ REGISTER8(BinaryOp, GPU, "BitwiseOr", functor::bitwise_or, int8, int16, int32,
REGISTER4(BinaryOp, GPU, "BitwiseOr", functor::bitwise_or, uint8, uint16,
uint32, uint64);
#endif // !MLIR_GENERATED_GPU_KERNELS_ENABLED ||
// !MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED
// !MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
} // namespace tensorflow

View File

@ -22,7 +22,7 @@ REGISTER8(BinaryOp, CPU, "BitwiseXor", functor::bitwise_xor, int8, int16, int32,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER8(BinaryOp, GPU, "BitwiseXor", functor::bitwise_xor, int8, int16, int32,
int64, uint8, uint16, uint32, uint64);
#else
@ -30,7 +30,7 @@ REGISTER8(BinaryOp, GPU, "BitwiseXor", functor::bitwise_xor, int8, int16, int32,
REGISTER4(BinaryOp, GPU, "BitwiseXor", functor::bitwise_xor, uint8, uint16,
uint32, uint64);
#endif // !MLIR_GENERATED_GPU_KERNELS_ENABLED ||
// !MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED
// !MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
} // namespace tensorflow

View File

@ -22,7 +22,7 @@ REGISTER3(UnaryOp, CPU, "Digamma", functor::digamma, float, Eigen::half,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER3(UnaryOp, GPU, "Digamma", functor::digamma, float, Eigen::half,
double);
#endif

View File

@ -30,7 +30,7 @@ REGISTER5(BinaryOp, CPU, "DivNoNan", functor::div_no_nan, Eigen::half, float,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
// ROCM TODO: re-enable complex64 / complex128 after compiler fix
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER9(BinaryOp, GPU, "Div", functor::div, float, Eigen::half, double, uint8,
uint16, int16, int64, complex64, complex128);
REGISTER5(BinaryOp, GPU, "RealDiv", functor::div, float, Eigen::half, double,

View File

@ -28,7 +28,7 @@ REGISTER_KERNEL_BUILDER(
ApproximateEqualOp<CPUDevice, double>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER4(BinaryOp, GPU, "Equal", functor::equal_to, float, Eigen::half, double,
uint8);
#else

View File

@ -26,7 +26,7 @@ REGISTER6(BinaryOp, CPU, "Equal", functor::equal_to, int32, int64, complex64,
complex128, tstring, bool);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER6(BinaryOp, GPU, "Equal", functor::equal_to, int8, int16, int64,
complex64, complex128, bool);
#else

View File

@ -21,7 +21,7 @@ REGISTER6(UnaryOp, CPU, "Exp", functor::exp, float, Eigen::half, bfloat16,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER5(UnaryOp, GPU, "Exp", functor::exp, float, Eigen::half, double,
complex64, complex128);
#endif

View File

@ -20,7 +20,7 @@ REGISTER6(UnaryOp, CPU, "Expm1", functor::expm1, float, Eigen::half, bfloat16,
double, complex64, complex128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER3(UnaryOp, GPU, "Expm1", functor::expm1, float, Eigen::half, double);
#endif
#endif

View File

@ -25,7 +25,7 @@ REGISTER4(BinaryOp, CPU, "FloorDiv", functor::floor_div_real, float,
REGISTER4(BinaryOp, GPU, "FloorDiv", functor::floor_div, uint8, uint16, int16,
int64);
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER3(BinaryOp, GPU, "FloorDiv", functor::floor_div_real, float,
Eigen::half, double);
#endif

View File

@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_UNARY2(acos, float, double);
#endif
} // namespace functor

View File

@ -21,7 +21,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_UNARY2(acosh, float, double);
#endif
} // namespace functor

View File

@ -21,7 +21,7 @@ namespace tensorflow {
namespace functor {
#if GOOGLE_CUDA
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_UNARY2(get_angle, complex64, complex128);
#endif
#endif

View File

@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_BINARY8(left_shift, int8, int16, int32, int64, uint8, uint16, uint32,
uint64);
#else

View File

@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_BINARY6(maximum, Eigen::half, float, double, uint8, int16, int64);
#endif
} // namespace functor

View File

@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_BINARY6(minimum, Eigen::half, float, double, uint8, int16, int64);
#endif
} // namespace functor

View File

@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_BINARY3(pow, Eigen::half, float, double);
DEFINE_BINARY1(safe_pow_ignore_error, int64);
#endif

View File

@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_BINARY8(right_shift, int8, int16, int32, int64, uint8, uint16, uint32,
uint64);
#else

View File

@ -20,7 +20,7 @@ limitations under the License.
namespace tensorflow {
namespace functor {
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
DEFINE_BINARY4(squared_difference, float, Eigen::half, double, int64);
#endif
} // namespace functor

View File

@ -21,7 +21,7 @@ REGISTER9(BinaryOp, CPU, "Greater", functor::greater, float, Eigen::half,
REGISTER3(BinaryOp, CPU, "Greater", functor::greater, int8, int16, bfloat16);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER6(BinaryOp, GPU, "Greater", functor::greater, float, Eigen::half,
double, int8, int16, int64);
REGISTER4(BinaryOp, GPU, "Greater", functor::greater, uint8, uint16, uint32,

View File

@ -22,7 +22,7 @@ REGISTER3(BinaryOp, CPU, "GreaterEqual", functor::greater_equal, int8, int16,
bfloat16);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER9(BinaryOp, GPU, "GreaterEqual", functor::greater_equal, float,
Eigen::half, double, int64, uint8, uint16, uint32, uint64, int8);
REGISTER(BinaryOp, GPU, "GreaterEqual", functor::greater_equal, int16);

View File

@ -22,7 +22,7 @@ REGISTER8(BinaryOp, CPU, "LeftShift", functor::left_shift, int8, int16, int32,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER8(BinaryOp, GPU, "LeftShift", functor::left_shift, int8, int16, int32,
int64, uint8, uint16, uint32, uint64);
#else

View File

@ -23,7 +23,7 @@ REGISTER7(BinaryOp, CPU, "Less", functor::less, uint8, uint16, uint32, uint64,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER9(BinaryOp, GPU, "Less", functor::less, float, Eigen::half, double,
int64, uint8, uint16, uint32, uint64, int8);
REGISTER(BinaryOp, GPU, "Less", functor::less, int16);

View File

@ -23,7 +23,7 @@ REGISTER7(BinaryOp, CPU, "LessEqual", functor::less_equal, int64, uint8, uint16,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER9(BinaryOp, GPU, "LessEqual", functor::less_equal, float, Eigen::half,
double, int64, uint8, uint16, uint32, uint64, int8);
REGISTER(BinaryOp, GPU, "LessEqual", functor::less_equal, int16);

View File

@ -21,7 +21,7 @@ REGISTER3(UnaryOp, CPU, "Lgamma", functor::lgamma, float, Eigen::half, double);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER3(UnaryOp, GPU, "Lgamma", functor::lgamma, float, Eigen::half, double);
#endif
#endif

View File

@ -20,10 +20,10 @@ REGISTER_KERNEL_BUILDER(Name("LogicalAnd").Device(DEVICE_CPU),
BinaryOp<CPUDevice, functor::logical_and>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER_KERNEL_BUILDER(Name("LogicalAnd").Device(DEVICE_GPU),
BinaryOp<GPUDevice, functor::logical_and>);
#endif // !MLIR_GENERATED_GPU_KERNELS_ENABLED ||
// !MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED
// !MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED
#endif
} // namespace tensorflow

View File

@ -20,10 +20,10 @@ REGISTER_KERNEL_BUILDER(Name("LogicalOr").Device(DEVICE_CPU),
BinaryOp<CPUDevice, functor::logical_or>);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER_KERNEL_BUILDER(Name("LogicalOr").Device(DEVICE_GPU),
BinaryOp<GPUDevice, functor::logical_or>);
#endif // !MLIR_GENERATED_GPU_KERNELS_ENABLED ||
// !MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED
// !MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED
#endif
} // namespace tensorflow

View File

@ -20,7 +20,7 @@ REGISTER8(BinaryOp, CPU, "Maximum", functor::maximum, float, Eigen::half,
bfloat16, double, uint8, int16, int32, int64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER6(BinaryOp, GPU, "Maximum", functor::maximum, float, Eigen::half,
double, uint8, int16, int64);
#else

View File

@ -20,7 +20,7 @@ REGISTER8(BinaryOp, CPU, "Minimum", functor::minimum, float, Eigen::half,
bfloat16, double, uint8, int16, int32, int64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER6(BinaryOp, GPU, "Minimum", functor::minimum, float, Eigen::half,
double, uint8, int16, int64);
#else

View File

@ -31,7 +31,7 @@ REGISTER(BinaryOp, CPU, "Mul", functor::mul, int32);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER4(BinaryOp, GPU, "Mul", functor::mul, Eigen::half, float, double,
uint8);
#else

View File

@ -26,7 +26,7 @@ REGISTER8(BinaryOp, CPU, "Mul", functor::mul, int8, uint16, uint32, uint64,
int16, int64, complex64, complex128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER8(BinaryOp, GPU, "Mul", functor::mul, int8, uint16, uint32, uint64,
int16, int64, complex64, complex128);
#else

View File

@ -22,7 +22,7 @@ REGISTER7(BinaryOp, CPU, "NotEqual", functor::not_equal_to, uint16, uint32,
uint64, qint8, qint16, quint8, quint16);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER4(BinaryOp, GPU, "NotEqual", functor::not_equal_to, float, Eigen::half,
double, uint8);
#else

View File

@ -26,7 +26,7 @@ REGISTER6(BinaryOp, CPU, "NotEqual", functor::not_equal_to, int32, int64,
complex64, complex128, tstring, bool);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER6(BinaryOp, GPU, "NotEqual", functor::not_equal_to, int8, int16, int64,
complex64, complex128, bool);
#else

View File

@ -22,7 +22,7 @@ REGISTER4(BinaryOp, CPU, "Pow", functor::safe_pow, int8, int16, int32, int64);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER3(BinaryOp, GPU, "Pow", functor::pow, float, Eigen::half, double);
REGISTER(BinaryOp, GPU, "Pow", functor::safe_pow_ignore_error, int64);
#endif

View File

@ -22,7 +22,7 @@ REGISTER8(BinaryOp, CPU, "RightShift", functor::right_shift, int8, int16, int32,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER8(BinaryOp, GPU, "RightShift", functor::right_shift, int8, int16, int32,
int64, uint8, uint16, uint32, uint64);
#else

View File

@ -20,7 +20,7 @@ REGISTER8(UnaryOp, CPU, "Sign", functor::sign, float, double, int32, int64,
complex64, Eigen::half, bfloat16, complex128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER6(UnaryOp, GPU, "Sign", functor::sign, float, Eigen::half, double,
int64, complex64, complex128);
#else

View File

@ -21,7 +21,7 @@ REGISTER6(UnaryOp, CPU, "Sin", functor::sin, float, Eigen::half, bfloat16,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER3(UnaryOp, GPU, "Sin", functor::sin, float, Eigen::half, double);
#endif
#endif

View File

@ -22,7 +22,7 @@ REGISTER5(UnaryOp, CPU, "Sinh", functor::sinh, float, double, bfloat16,
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER2(UnaryOp, GPU, "Sinh", functor::sinh, float, double);
#endif
#endif

View File

@ -21,7 +21,7 @@ REGISTER8(BinaryOp, CPU, "SquaredDifference", functor::squared_difference,
complex128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER4(BinaryOp, GPU, "SquaredDifference", functor::squared_difference,
float, Eigen::half, double, int64);
#endif

View File

@ -31,7 +31,7 @@ REGISTER(BinaryOp, CPU, "Sub", functor::sub, int32);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
!defined(MLIR_GENERATED_EXPERIMENTAL_KERNELS_ENABLED)
REGISTER8(BinaryOp, GPU, "Sub", functor::sub, float, Eigen::half, double, int64,
complex64, complex128, uint32, uint64);
#else