diff --git a/tensorflow/core/kernels/mlir_generated/gpu_unary_ops_test.cc b/tensorflow/core/kernels/mlir_generated/gpu_unary_ops_test.cc index 40d58a6b48f..16331bfd044 100644 --- a/tensorflow/core/kernels/mlir_generated/gpu_unary_ops_test.cc +++ b/tensorflow/core/kernels/mlir_generated/gpu_unary_ops_test.cc @@ -49,18 +49,20 @@ class GpuUnaryOpTest : public OpsTestBase { // function. In most cases it is enough to just provide the input type, // because all the types are the same. template - void Run(std::vector input_shape, std::vector input, + void Run(std::vector input_shape, absl::InlinedVector input, const std::string op_name, ROutT (*expected_callback)(RT), bool expect_equal = true, bool add_tout = false, - bool expect_buffer_reuse = true) { + bool expect_buffer_reuse = true, bool add_t = true) { assert(std::accumulate(input_shape.begin(), input_shape.end(), 1, std::multiplies()) == input.size() && "Expected input length to equal to shape's number of elements."); TensorShape shape(input_shape); NodeDefBuilder builder("some_name", op_name); - builder.Input(FakeInput(DataTypeToEnum::v())) - .Attr("T", DataTypeToEnum::v()); + builder.Input(FakeInput(DataTypeToEnum::v())); + if (add_t) { + builder.Attr("T", DataTypeToEnum::v()); + } if (add_tout) { builder.Attr("Tout", DataTypeToEnum::v()); } @@ -98,15 +100,15 @@ class GpuUnaryOpTest : public OpsTestBase { std::vector DefaultInputShape() { return std::vector{2, 7}; } template - std::vector DefaultInput() { + absl::InlinedVector DefaultInput() { return InputAsVector({-18.0, -9.0, -1e-6, -0.0, 0.0, 1e-6, 0.1, 0.2, 0.3, 0.5, 0.7, 0.9, 9.0, 18.0}); } template - std::vector> DefaultComplexInput() { + absl::InlinedVector, 10> DefaultComplexInput() { auto input = DefaultInput(); - std::vector> complex_input; + absl::InlinedVector, 10> complex_input; for (T value : input) { complex_input.emplace_back(value, -value); } @@ -114,21 +116,22 @@ class GpuUnaryOpTest : public OpsTestBase { } template - std::vector DefaultInputGreaterThanZero() { + absl::InlinedVector DefaultInputGreaterThanZero() { return InputAsVector({18.0, 9.0, 1e-6, 1.0, 0.1, 1e-6, 0.1, 0.2, 0.3, 0.5, 0.7, 0.9, 9.0, 18.0}); } template - std::vector DefaultInputGreaterOrEqualToZero() { + absl::InlinedVector DefaultInputGreaterOrEqualToZero() { return InputAsVector({18.0, 9.0, 1e-6, 0.0, 0.1, 1e-6, 0.1, 0.2, 0.3, 0.5, 0.7, 0.9, 9.0, 18.0}); } private: template - std::vector InputAsVector(std::initializer_list input) { - std::vector result; + absl::InlinedVector InputAsVector( + std::initializer_list input) { + absl::InlinedVector result; result.reserve(input.size()); for (const auto& value : input) { result.push_back(static_cast(value)); @@ -386,6 +389,19 @@ TEST_F(GpuUnaryOpTest, LogHalf) { /*expect_equal=*/false); } +/// Test `tf.LogicalNot` + +TEST_F(GpuUnaryOpTest, LogicalNot) { + Run( + DefaultInputShape(), DefaultInput(), + /*op_name=*/"LogicalNot", + /*expected_callback=*/[](bool v) { return !v; }, + /*expect_equal=*/true, + /*add_tout=*/false, + /*expect_buffer_reuse=*/true, + /*add_t=*/false); +} + /// Test `tf.Neg`. /// Reference implementation. @@ -415,6 +431,27 @@ TEST_F(GpuUnaryOpTest, NegHalf) { /*expect_equal=*/false); } +TEST_F(GpuUnaryOpTest, NegInt8) { + Run(DefaultInputShape(), DefaultInput(), + /*op_name=*/"Neg", + /*expected_callback=*/expected_neg, + /*expect_equal=*/true); +} + +TEST_F(GpuUnaryOpTest, NegInt16) { + Run(DefaultInputShape(), DefaultInput(), + /*op_name=*/"Neg", + /*expected_callback=*/expected_neg, + /*expect_equal=*/true); +} + +TEST_F(GpuUnaryOpTest, NegInt64) { + Run(DefaultInputShape(), DefaultInput(), + /*op_name=*/"Neg", + /*expected_callback=*/expected_neg, + /*expect_equal=*/true); +} + /// Test `tf.Real`. TEST_F(GpuUnaryOpTest, RealFloat) {