[MLIR][KernelGen] Add tf.Log1p kernel and tests

PiperOrigin-RevId: 351566460
Change-Id: I59f66909af1d32817de7eeca978f9343a952d649
This commit is contained in:
A. Unique TensorFlower 2021-01-13 05:36:19 -08:00 committed by TensorFlower Gardener
parent 31523e7982
commit 18f59db276
7 changed files with 61 additions and 0 deletions
tensorflow
compiler/mlir/hlo
include/mlir-hlo/Dialect/mhlo/transforms
lib/Dialect/mhlo/transforms
core/kernels

View File

@ -60,6 +60,7 @@ MAP_HLO_TO_LHLO(ImagOp);
MAP_HLO_TO_LHLO(IotaOp);
MAP_HLO_TO_LHLO(IsFiniteOp);
MAP_HLO_TO_LHLO(LogOp);
MAP_HLO_TO_LHLO(Log1pOp);
MAP_HLO_TO_LHLO(MaxOp);
MAP_HLO_TO_LHLO(MinOp);
MAP_HLO_TO_LHLO(MulOp);

View File

@ -429,6 +429,18 @@ inline Value MapLhloOpToStdScalarOp<lmhlo::LogOp>(Location loc,
loc, result_types, args, b);
}
template <>
inline Value MapLhloOpToStdScalarOp<lmhlo::Log1pOp>(Location loc,
ArrayRef<Type> result_types,
ArrayRef<Value> args,
OpBuilder* b) {
auto ty = result_types.front().cast<FloatType>();
Value x = args.front();
Value one = b->create<ConstantOp>(loc, b->getFloatAttr(ty, 1.0));
Value x_plus_one = b->create<AddFOp>(loc, x, one);
return b->create<::mlir::LogOp>(loc, x_plus_one);
}
template <>
inline Value MapLhloOpToStdScalarOp<lmhlo::MaxOp>(Location loc,
ArrayRef<Type> result_types,

View File

@ -1239,6 +1239,7 @@ void populateLHLOToLinalgConversionPattern(MLIRContext* context,
PointwiseToLinalgConverter<lmhlo::ImagOp>,
PointwiseToLinalgConverter<lmhlo::IsFiniteOp>,
PointwiseToLinalgConverter<lmhlo::LogOp>,
PointwiseToLinalgConverter<lmhlo::Log1pOp>,
PointwiseToLinalgConverter<lmhlo::MaxOp>,
PointwiseToLinalgConverter<lmhlo::MinOp>,
PointwiseToLinalgConverter<lmhlo::MulOp>,
@ -1359,6 +1360,7 @@ void populateHLOToLinalgConversionPattern(MLIRContext* context,
PointwiseToLinalgConverter<mhlo::ImagOp, false>,
PointwiseToLinalgConverter<mhlo::IsFiniteOp, false>,
PointwiseToLinalgConverter<mhlo::LogOp, false>,
PointwiseToLinalgConverter<mhlo::Log1pOp, false>,
PointwiseToLinalgConverter<mhlo::MaxOp, false>,
PointwiseToLinalgConverter<mhlo::MinOp, false>,
PointwiseToLinalgConverter<mhlo::MulOp, false>,

View File

@ -20,7 +20,10 @@ REGISTER6(UnaryOp, CPU, "Log1p", functor::log1p, float, Eigen::half, bfloat16,
double, complex64, complex128);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#if !defined(MLIR_GENERATED_GPU_KERNELS_ENABLED) || \
!defined(MLIR_GENERATED_EXPERIMENTAL_GPU_KERNELS_ENABLED)
REGISTER3(UnaryOp, GPU, "Log1p", functor::log1p, float, Eigen::half, double);
#endif
#endif
} // namespace tensorflow

View File

@ -57,6 +57,7 @@ filegroup(
"gpu_op_imag.cc",
"gpu_op_is_inf.cc",
"gpu_op_log.cc",
"gpu_op_log1p.cc",
"gpu_op_logical_not.cc",
"gpu_op_neg.cc",
"gpu_op_real.cc",
@ -118,6 +119,7 @@ tf_kernel_library(
":imag_kernels",
":is_inf_kernels",
":log_kernels",
":log1p_kernels",
":logical_not_kernels",
":neg_kernels",
":real_kernels",
@ -621,6 +623,7 @@ gen_kernel_library(
"floor",
"is_finite",
"log",
"log1p",
"rsqrt",
"sqrt",
"tanh",

View File

@ -0,0 +1,25 @@
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/kernels/mlir_generated/gpu_ops_base.h"
namespace tensorflow {
GENERATE_AND_REGISTER_UNARY_KERNEL(Log1p, f16, DT_HALF, Eigen::half);
GENERATE_AND_REGISTER_UNARY_KERNEL(Log1p, f32, DT_FLOAT, float);
GENERATE_AND_REGISTER_UNARY_KERNEL(Log1p, f64, DT_DOUBLE, double);
} // namespace tensorflow

View File

@ -296,6 +296,21 @@ GENERATE_DEFAULT_TEST_WITH_SPECIFIC_INPUT_VALUES_2(
test::DefaultInputGreaterThanZero<Eigen::half>(), std::log,
test::GpuOpsTestConfig())
/// Test `tf.Log1p`.
GENERATE_DEFAULT_TEST_WITH_SPECIFIC_INPUT_VALUES(
Log1p, DT_FLOAT, DT_FLOAT, test::DefaultInputGreaterThanZero<float>(),
std::log1p, test::GpuOpsTestConfig())
GENERATE_DEFAULT_TEST_WITH_SPECIFIC_INPUT_VALUES(
Log1p, DT_DOUBLE, DT_DOUBLE, test::DefaultInputGreaterThanZero<double>(),
std::log1p, test::GpuOpsTestConfig())
GENERATE_DEFAULT_TEST_WITH_SPECIFIC_INPUT_VALUES_2(
Log1p, DT_HALF, DT_FLOAT, DT_HALF, DT_FLOAT,
test::DefaultInputGreaterThanZero<Eigen::half>(), std::log1p,
test::GpuOpsTestConfig())
/// Test `tf.LogicalNot`
bool baseline_logical_not(bool x) { return !x; }