diff --git a/tensorflow/core/kernels/mlir_generated/BUILD b/tensorflow/core/kernels/mlir_generated/BUILD index 69eb3f9233f..8c2318a17a7 100644 --- a/tensorflow/core/kernels/mlir_generated/BUILD +++ b/tensorflow/core/kernels/mlir_generated/BUILD @@ -305,13 +305,10 @@ tf_cuda_cc_test( ], ) -tf_cuda_cc_test( - name = "gpu_binary_ops_test", - size = "medium", - srcs = if_mlir_generated_gpu_kernels_enabled(["gpu_binary_ops_test.cc"]), - tags = tf_cuda_tests_tags() + [ - "no_cuda_asan", # b/173033461 - ], +cc_library( + name = "base_binary_ops_test", + testonly = 1, + hdrs = ["base_binary_ops_test.h"], deps = [ ":base_ops_test", "//tensorflow/core:framework", @@ -332,6 +329,21 @@ tf_cuda_cc_test( ], ) +tf_cuda_cc_test( + name = "gpu_binary_ops_test", + size = "medium", + srcs = if_mlir_generated_gpu_kernels_enabled(["gpu_binary_ops_test.cc"]), + tags = tf_cuda_tests_tags() + [ + "no_cuda_asan", # b/173033461 + ], + deps = [ + ":base_binary_ops_test", + ":base_ops_test", + "//tensorflow/core/common_runtime:device", + "//tensorflow/core/common_runtime:device_factory", + ], +) + # TODO(b/160731748): Re-enable when it works again. # gen_kernel_library( # name = "bias_add", diff --git a/tensorflow/core/kernels/mlir_generated/base_binary_ops_test.h b/tensorflow/core/kernels/mlir_generated/base_binary_ops_test.h new file mode 100644 index 00000000000..05551185914 --- /dev/null +++ b/tensorflow/core/kernels/mlir_generated/base_binary_ops_test.h @@ -0,0 +1,370 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_CORE_KERNELS_MLIR_GENERATED_BASE_BINARY_OPS_TEST_H_ +#define TENSORFLOW_CORE_KERNELS_MLIR_GENERATED_BASE_BINARY_OPS_TEST_H_ + +#include "absl/container/inlined_vector.h" +#include "absl/strings/string_view.h" +#include "llvm/ADT/STLExtras.h" +#include "tensorflow/core/common_runtime/device.h" +#include "tensorflow/core/common_runtime/device_factory.h" +#include "tensorflow/core/framework/fake_input.h" +#include "tensorflow/core/framework/node_def_builder.h" +#include "tensorflow/core/framework/tensor.h" +#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/kernels/mlir_generated/base_ops_test.h" +#include "tensorflow/core/kernels/ops_testutil.h" +#include "tensorflow/core/lib/core/status_test_util.h" +#include "tensorflow/core/platform/test.h" + +namespace tensorflow { + +// Base class for `BinaryOpsTest` fixture that has to be defined with a custom +// TF device if you want to use the test macros in this file. +class BinaryOpsTestBase : public OpsTestBase { + protected: + // This method should set the TF device, e.g. DEVICE_CPU, DEVICE_GPU. + void SetUp() override = 0; + + template + void SetOpKernel(const std::string& op_name, const TensorShape& lhs_shape, + const absl::InlinedVector& lhs_input, + const TensorShape& rhs_shape, + const absl::InlinedVector& rhs_input, bool add_t, + bool add_tout) { + auto builder = NodeDefBuilder("some_name", op_name) + .Input(FakeInput(DataTypeToEnum::v())) + .Input(FakeInput(DataTypeToEnum::v())); + if (add_t) { + builder.Attr("T", DataTypeToEnum::v()); + } + if (add_tout) { + builder.Attr("Tout", DataTypeToEnum::v()); + } + TF_ASSERT_OK(builder.Finalize(node_def())); + + TF_ASSERT_OK(InitOp()); + AddInputFromArray(lhs_shape, lhs_input); + AddInputFromArray(rhs_shape, rhs_input); + } + + // Run fully specified tests. + + template + void RunAndExpectResult(const std::string& op_name, + const TensorShape& lhs_shape, + const absl::InlinedVector& lhs_input, + const TensorShape& rhs_shape, + const absl::InlinedVector& rhs_input, + const TensorShape& expected_shape, + const absl::InlinedVector& expected_output, + const test::OpsTestConfig& config) { + SetOpKernel(op_name, lhs_shape, lhs_input, rhs_shape, rhs_input, + config.add_t, config.add_tout); + TF_ASSERT_OK(RunOpKernel()); + + // Compare output to expectation. + Tensor expected_tensor(allocator(), DataTypeToEnum::value, + expected_shape); + test::FillValues(&expected_tensor, expected_output); + if (config.expect_strictly_equal) { + test::ExpectEqual(expected_tensor, *GetOutput(0)); + } else { + test::ExpectClose(expected_tensor, *GetOutput(0)); + } + } + + template + void RunAndExpectInvalidArgument(const std::string& op_name, + const TensorShape& lhs_shape, + const absl::InlinedVector& lhs_input, + const TensorShape& rhs_shape, + const absl::InlinedVector& rhs_input, + const test::OpsTestConfig& config) { + SetOpKernel(op_name, lhs_shape, lhs_input, rhs_shape, rhs_input, + config.add_t, config.add_tout); + auto status = RunOpKernel(); + EXPECT_FALSE(status.ok()); + EXPECT_EQ(status.code(), error::INVALID_ARGUMENT); + } + + // Run common test cases. + + template + void TestIncompatibleShapes(const std::string& op_name, + const absl::InlinedVector& lhs_input, + const absl::InlinedVector& rhs_input, + const test::OpsTestConfig& config) { + // Prepare incompatibly shaped inputs. + TensorShape lhs_shape{3}; + TensorShape rhs_shape{2}; + auto repeated_lhs_input = + test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); + auto repeated_rhs_input = + test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); + + RunAndExpectInvalidArgument(op_name, lhs_shape, repeated_lhs_input, + rhs_shape, repeated_rhs_input, config); + } + + template + void TestEqualShapes(const std::string& op_name, const TensorShape& shape, + const absl::InlinedVector& lhs_input, + const absl::InlinedVector& rhs_input, + BaselineOutT (*baseline_callback)(BaselineT, BaselineT), + const test::OpsTestConfig& config) { + // Prepare inputs. + int input_size = shape.num_elements(); + CHECK(lhs_input.size() <= input_size && rhs_input.size() <= input_size && + "expect input shape to hold all input values"); + auto repeated_lhs_input = + test::RepeatInputToMatchShape(lhs_input, input_size); + auto repeated_rhs_input = + test::RepeatInputToMatchShape(rhs_input, input_size); + + // Compute expected results. + absl::InlinedVector expected_output; + for (auto it_lhs = repeated_lhs_input.begin(), + it_rhs = repeated_rhs_input.begin(), + end = repeated_lhs_input.end(); + it_lhs != end; ++it_lhs, ++it_rhs) { + auto lhs = static_cast(*it_lhs); + auto rhs = static_cast(*it_rhs); + auto result = static_cast(baseline_callback(lhs, rhs)); + expected_output.push_back(result); + } + + RunAndExpectResult(op_name, shape, repeated_lhs_input, shape, + repeated_rhs_input, shape, expected_output, + config); + } + + template + void TestOneScalar(const std::string& op_name, T scalar_input, + const TensorShape& other_shape, + const absl::InlinedVector& other_input, + BaselineOutT (*baseline_callback)(BaselineT, BaselineT), + const test::OpsTestConfig& config) { + // Prepare inputs. + TensorShape scalar_shape{}; + CHECK(other_input.size() <= other_shape.num_elements() && + "expect other input shape to hold all input values"); + auto repeated_other_input = + test::RepeatInputToMatchShape(other_input, other_shape.num_elements()); + + // Compute expected results. + absl::InlinedVector expected_output; + for (auto it = repeated_other_input.begin(), + end = repeated_other_input.end(); + it != end; ++it) { + auto scalar = static_cast(scalar_input); + auto other_value = static_cast(*it); + auto result = static_cast(baseline_callback(scalar, other_value)); + expected_output.push_back(result); + } + + auto scalar_input_vector = test::InputAsVector({scalar_input}); + RunAndExpectResult(op_name, scalar_shape, scalar_input_vector, + other_shape, repeated_other_input, + /*expected_shape=*/other_shape, expected_output, + config); + } + + template + void TestBroadcastingExpand(const std::string& op_name, + const absl::InlinedVector& lhs_input, + const absl::InlinedVector& rhs_input, + BaselineOutT (*baseline_callback)(BaselineT, + BaselineT), + const test::OpsTestConfig& config) { + // Prepare inputs. + TensorShape lhs_shape{1}; + TensorShape rhs_shape{6}; + auto repeated_lhs_input = + test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); + auto repeated_rhs_input = + test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); + + // Compute expected results. + std::vector lhs_indices = {0, 0, 0, 0, 0, 0}; + std::vector rhs_indices = {0, 1, 2, 3, 4, 5}; + auto expected_output = + ComputeExpectedOutput( + lhs_indices, repeated_lhs_input, rhs_indices, repeated_rhs_input, + baseline_callback); + + RunAndExpectResult( + op_name, lhs_shape, repeated_lhs_input, rhs_shape, repeated_rhs_input, + /*expected_shape=*/rhs_shape, expected_output, config); + } + + template + void TestBroadcastingInDim(const std::string& op_name, + const absl::InlinedVector& lhs_input, + const absl::InlinedVector& rhs_input, + BaselineOutT (*baseline_callback)(BaselineT, + BaselineT), + const test::OpsTestConfig& config) { + // Prepare inputs. + TensorShape lhs_shape{3}; + TensorShape rhs_shape{2, 3}; + auto repeated_lhs_input = + test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); + auto repeated_rhs_input = + test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); + + // Compute expected results. + std::vector lhs_indices = {0, 1, 2, 0, 1, 2}; + std::vector rhs_indices = {0, 1, 2, 3, 4, 5}; + auto expected_output = + ComputeExpectedOutput( + lhs_indices, repeated_lhs_input, rhs_indices, repeated_rhs_input, + baseline_callback); + + RunAndExpectResult( + op_name, lhs_shape, repeated_lhs_input, rhs_shape, repeated_rhs_input, + /*expected_shape=*/rhs_shape, expected_output, config); + } + + template + void TestBroadcasting(const std::string& op_name, + const absl::InlinedVector& lhs_input, + const absl::InlinedVector& rhs_input, + BaselineOutT (*baseline_callback)(BaselineT, BaselineT), + const test::OpsTestConfig& config) { + // Prepare inputs. + TensorShape lhs_shape{2, 1}; + TensorShape rhs_shape{3}; + auto repeated_lhs_input = + test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); + auto repeated_rhs_input = + test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); + + // Compute expected results. + TensorShape expected_shape{2, 3}; + std::vector lhs_indices = {0, 0, 0, 1, 1, 1}; + std::vector rhs_indices = {0, 1, 2, 0, 1, 2}; + auto expected_output = + ComputeExpectedOutput( + lhs_indices, repeated_lhs_input, rhs_indices, repeated_rhs_input, + baseline_callback); + + RunAndExpectResult(op_name, lhs_shape, repeated_lhs_input, + rhs_shape, repeated_rhs_input, expected_shape, + expected_output, config); + } + + template + void TestEmptyShapeBroadcasting(const std::string& op_name, + const absl::InlinedVector& lhs_input, + const absl::InlinedVector& rhs_input, + const test::OpsTestConfig& config) { + // Prepare inputs. + TensorShape lhs_shape{2, 0, 1}; + TensorShape rhs_shape{2, 0, 5}; + absl::InlinedVector empty_input = {}; + + // Define expected result. + TensorShape expected_shape{2, 0, 5}; + absl::InlinedVector expected_output = {}; + + RunAndExpectResult(op_name, lhs_shape, empty_input, rhs_shape, + empty_input, expected_shape, expected_output, + config); + } + + private: + template + absl::InlinedVector ComputeExpectedOutput( + std::vector lhs_indices, absl::InlinedVector lhs_input, + std::vector rhs_indices, absl::InlinedVector rhs_input, + BaselineOutT (*baseline_callback)(BaselineT, BaselineT)) { + absl::InlinedVector expected_output; + for (int i = 0; i < lhs_indices.size(); i++) { + auto lhs = static_cast(lhs_input[lhs_indices[i]]); + auto rhs = static_cast(rhs_input[rhs_indices[i]]); + auto result = static_cast(baseline_callback(lhs, rhs)); + expected_output.push_back(result); + } + return expected_output; + } +}; + +// Macros to easily generate common test cases. The macros use `BinaryOpsTest` +// fixture in order to share implementation across GPU and CPU platform tests. +// For specific inputs, please define your own test fixtures. +#define GENERATE_DEFAULT_TESTS_2(op_name, test_name, T, BaselineT, OutT, \ + BaselineOutT, lhs_input, rhs_input, \ + baseline_callback, config) \ + TEST_F(BinaryOpsTest, op_name##EqShapes##test_name) { \ + TestEqualShapes( \ + #op_name, /*shape=*/test::DefaultInputShape(), lhs_input, rhs_input, \ + baseline_callback, config); \ + } \ + \ + TEST_F(BinaryOpsTest, op_name##OneScalar##test_name) { \ + TestOneScalar( \ + #op_name, /*scalar_input=*/lhs_input.front(), \ + /*other_shape=*/test::DefaultInputShape(), /*other_input=*/rhs_input, \ + baseline_callback, config); \ + } \ + \ + TEST_F(BinaryOpsTest, op_name##IncompatibleShapes##test_name) { \ + TestIncompatibleShapes(#op_name, lhs_input, rhs_input, config); \ + } \ + \ + TEST_F(BinaryOpsTest, op_name##BroadcastingExpand##test_name) { \ + TestBroadcastingExpand( \ + #op_name, lhs_input, rhs_input, baseline_callback, config); \ + } \ + \ + TEST_F(BinaryOpsTest, op_name##BroadcastingInDim##test_name) { \ + TestBroadcastingInDim( \ + #op_name, lhs_input, rhs_input, baseline_callback, config); \ + } \ + \ + TEST_F(BinaryOpsTest, op_name##Broadcasting##test_name) { \ + TestBroadcasting( \ + #op_name, lhs_input, rhs_input, baseline_callback, config); \ + } \ + \ + TEST_F(BinaryOpsTest, op_name##EmptyShapeBroadcasting##test_name) { \ + TestEmptyShapeBroadcasting( \ + #op_name, lhs_input, rhs_input, config); \ + } + +#define GENERATE_DEFAULT_TESTS(op_name, test_name, T, OutT, baseline_callback) \ + GENERATE_DEFAULT_TESTS_2(op_name, test_name, T, T, OutT, OutT, \ + test::DefaultInput(), test::DefaultInput(), \ + baseline_callback, \ + test::OpsTestConfig().ExpectStrictlyEqual()) + +#define GENERATE_DEFAULT_TESTS_WITH_SPECIFIC_INPUT_VALUES( \ + op_name, test_name, T, OutT, lhs_input, rhs_input, baseline_callback) \ + GENERATE_DEFAULT_TESTS_2(op_name, test_name, T, T, OutT, OutT, lhs_input, \ + rhs_input, baseline_callback, \ + test::OpsTestConfig().ExpectStrictlyEqual()) + +} // namespace tensorflow + +#endif // TENSORFLOW_CORE_KERNELS_MLIR_GENERATED_BASE_BINARY_OPS_TEST_H_ diff --git a/tensorflow/core/kernels/mlir_generated/gpu_binary_ops_test.cc b/tensorflow/core/kernels/mlir_generated/gpu_binary_ops_test.cc index 9ab8123e839..f70681af00a 100644 --- a/tensorflow/core/kernels/mlir_generated/gpu_binary_ops_test.cc +++ b/tensorflow/core/kernels/mlir_generated/gpu_binary_ops_test.cc @@ -13,29 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include -#include -#include -#include - -#include "absl/container/inlined_vector.h" -#include "absl/strings/string_view.h" -#include "llvm/ADT/STLExtras.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" -#include "tensorflow/core/framework/fake_input.h" -#include "tensorflow/core/framework/node_def_builder.h" -#include "tensorflow/core/framework/tensor.h" -#include "tensorflow/core/framework/tensor_shape.h" +#include "tensorflow/core/kernels/mlir_generated/base_binary_ops_test.h" #include "tensorflow/core/kernels/mlir_generated/base_ops_test.h" -#include "tensorflow/core/kernels/ops_testutil.h" -#include "tensorflow/core/lib/core/status_test_util.h" -#include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { -class GpuBinaryOpTest : public OpsTestBase { +// Test fixture `BinaryOpsTest` that sets the TF device is expected by the TEST +// macros below. +class BinaryOpsTest : public BinaryOpsTestBase { protected: void SetUp() override { std::unique_ptr device_gpu( @@ -43,333 +31,8 @@ class GpuBinaryOpTest : public OpsTestBase { "/job:a/replica:0/task:0")); SetDevice(tensorflow::DEVICE_GPU, std::move(device_gpu)); } - - template - void SetOpKernel(const std::string& op_name, const TensorShape& lhs_shape, - const absl::InlinedVector& lhs_input, - const TensorShape& rhs_shape, - const absl::InlinedVector& rhs_input, bool add_t, - bool add_tout) { - auto builder = NodeDefBuilder("some_name", op_name) - .Input(FakeInput(DataTypeToEnum::v())) - .Input(FakeInput(DataTypeToEnum::v())); - if (add_t) { - builder.Attr("T", DataTypeToEnum::v()); - } - if (add_tout) { - builder.Attr("Tout", DataTypeToEnum::v()); - } - TF_ASSERT_OK(builder.Finalize(node_def())); - - TF_ASSERT_OK(InitOp()); - AddInputFromArray(lhs_shape, lhs_input); - AddInputFromArray(rhs_shape, rhs_input); - } - - // Run fully specified tests. - - template - void RunAndExpectResult(const std::string& op_name, - const TensorShape& lhs_shape, - const absl::InlinedVector& lhs_input, - const TensorShape& rhs_shape, - const absl::InlinedVector& rhs_input, - const TensorShape& expected_shape, - const absl::InlinedVector& expected_output, - const test::OpsTestConfig& config) { - SetOpKernel(op_name, lhs_shape, lhs_input, rhs_shape, rhs_input, - config.add_t, config.add_tout); - TF_ASSERT_OK(RunOpKernel()); - - // Compare output to expectation. - Tensor expected_tensor(allocator(), DataTypeToEnum::value, - expected_shape); - test::FillValues(&expected_tensor, expected_output); - if (config.expect_strictly_equal) { - test::ExpectEqual(expected_tensor, *GetOutput(0)); - } else { - test::ExpectClose(expected_tensor, *GetOutput(0)); - } - } - - template - void RunAndExpectInvalidArgument(const std::string& op_name, - const TensorShape& lhs_shape, - const absl::InlinedVector& lhs_input, - const TensorShape& rhs_shape, - const absl::InlinedVector& rhs_input, - const test::OpsTestConfig& config) { - SetOpKernel(op_name, lhs_shape, lhs_input, rhs_shape, rhs_input, - config.add_t, config.add_tout); - auto status = RunOpKernel(); - EXPECT_FALSE(status.ok()); - EXPECT_EQ(status.code(), error::INVALID_ARGUMENT); - } - - // Run common test cases. - - template - void TestIncompatibleShapes(const std::string& op_name, - const absl::InlinedVector& lhs_input, - const absl::InlinedVector& rhs_input, - const test::OpsTestConfig& config) { - // Prepare incompatibly shaped inputs. - TensorShape lhs_shape{3}; - TensorShape rhs_shape{2}; - auto repeated_lhs_input = - test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); - auto repeated_rhs_input = - test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); - - RunAndExpectInvalidArgument(op_name, lhs_shape, repeated_lhs_input, - rhs_shape, repeated_rhs_input, config); - } - - template - void TestEqualShapes(const std::string& op_name, const TensorShape& shape, - const absl::InlinedVector& lhs_input, - const absl::InlinedVector& rhs_input, - BaselineOutT (*baseline_callback)(BaselineT, BaselineT), - const test::OpsTestConfig& config) { - // Prepare inputs. - int input_size = shape.num_elements(); - CHECK(lhs_input.size() <= input_size && rhs_input.size() <= input_size && - "expect input shape to hold all input values"); - auto repeated_lhs_input = - test::RepeatInputToMatchShape(lhs_input, input_size); - auto repeated_rhs_input = - test::RepeatInputToMatchShape(rhs_input, input_size); - - // Compute expected results. - absl::InlinedVector expected_output; - for (auto it_lhs = repeated_lhs_input.begin(), - it_rhs = repeated_rhs_input.begin(), - end = repeated_lhs_input.end(); - it_lhs != end; ++it_lhs, ++it_rhs) { - auto lhs = static_cast(*it_lhs); - auto rhs = static_cast(*it_rhs); - auto result = static_cast(baseline_callback(lhs, rhs)); - expected_output.push_back(result); - } - - RunAndExpectResult(op_name, shape, repeated_lhs_input, shape, - repeated_rhs_input, shape, expected_output, - config); - } - - template - void TestOneScalar(const std::string& op_name, T scalar_input, - const TensorShape& other_shape, - const absl::InlinedVector& other_input, - BaselineOutT (*baseline_callback)(BaselineT, BaselineT), - const test::OpsTestConfig& config) { - // Prepare inputs. - TensorShape scalar_shape{}; - CHECK(other_input.size() <= other_shape.num_elements() && - "expect other input shape to hold all input values"); - auto repeated_other_input = - test::RepeatInputToMatchShape(other_input, other_shape.num_elements()); - - // Compute expected results. - absl::InlinedVector expected_output; - for (auto it = repeated_other_input.begin(), - end = repeated_other_input.end(); - it != end; ++it) { - auto scalar = static_cast(scalar_input); - auto other_value = static_cast(*it); - auto result = static_cast(baseline_callback(scalar, other_value)); - expected_output.push_back(result); - } - - auto scalar_input_vector = test::InputAsVector({scalar_input}); - RunAndExpectResult(op_name, scalar_shape, scalar_input_vector, - other_shape, repeated_other_input, - /*expected_shape=*/other_shape, expected_output, - config); - } - - template - void TestBroadcastingExpand(const std::string& op_name, - const absl::InlinedVector& lhs_input, - const absl::InlinedVector& rhs_input, - BaselineOutT (*baseline_callback)(BaselineT, - BaselineT), - const test::OpsTestConfig& config) { - // Prepare inputs. - TensorShape lhs_shape{1}; - TensorShape rhs_shape{6}; - auto repeated_lhs_input = - test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); - auto repeated_rhs_input = - test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); - - // Compute expected results. - std::vector lhs_indices = {0, 0, 0, 0, 0, 0}; - std::vector rhs_indices = {0, 1, 2, 3, 4, 5}; - auto expected_output = - ComputeExpectedOutput( - lhs_indices, repeated_lhs_input, rhs_indices, repeated_rhs_input, - baseline_callback); - - RunAndExpectResult( - op_name, lhs_shape, repeated_lhs_input, rhs_shape, repeated_rhs_input, - /*expected_shape=*/rhs_shape, expected_output, config); - } - - template - void TestBroadcastingInDim(const std::string& op_name, - const absl::InlinedVector& lhs_input, - const absl::InlinedVector& rhs_input, - BaselineOutT (*baseline_callback)(BaselineT, - BaselineT), - const test::OpsTestConfig& config) { - // Prepare inputs. - TensorShape lhs_shape{3}; - TensorShape rhs_shape{2, 3}; - auto repeated_lhs_input = - test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); - auto repeated_rhs_input = - test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); - - // Compute expected results. - std::vector lhs_indices = {0, 1, 2, 0, 1, 2}; - std::vector rhs_indices = {0, 1, 2, 3, 4, 5}; - auto expected_output = - ComputeExpectedOutput( - lhs_indices, repeated_lhs_input, rhs_indices, repeated_rhs_input, - baseline_callback); - - RunAndExpectResult( - op_name, lhs_shape, repeated_lhs_input, rhs_shape, repeated_rhs_input, - /*expected_shape=*/rhs_shape, expected_output, config); - } - - template - void TestBroadcasting(const std::string& op_name, - const absl::InlinedVector& lhs_input, - const absl::InlinedVector& rhs_input, - BaselineOutT (*baseline_callback)(BaselineT, BaselineT), - const test::OpsTestConfig& config) { - // Prepare inputs. - TensorShape lhs_shape{2, 1}; - TensorShape rhs_shape{3}; - auto repeated_lhs_input = - test::RepeatInputToMatchShape(lhs_input, lhs_shape.num_elements()); - auto repeated_rhs_input = - test::RepeatInputToMatchShape(rhs_input, rhs_shape.num_elements()); - - // Compute expected results. - TensorShape expected_shape{2, 3}; - std::vector lhs_indices = {0, 0, 0, 1, 1, 1}; - std::vector rhs_indices = {0, 1, 2, 0, 1, 2}; - auto expected_output = - ComputeExpectedOutput( - lhs_indices, repeated_lhs_input, rhs_indices, repeated_rhs_input, - baseline_callback); - - RunAndExpectResult(op_name, lhs_shape, repeated_lhs_input, - rhs_shape, repeated_rhs_input, expected_shape, - expected_output, config); - } - - template - void TestEmptyShapeBroadcasting(const std::string& op_name, - const absl::InlinedVector& lhs_input, - const absl::InlinedVector& rhs_input, - const test::OpsTestConfig& config) { - // Prepare inputs. - TensorShape lhs_shape{2, 0, 1}; - TensorShape rhs_shape{2, 0, 5}; - absl::InlinedVector empty_input = {}; - - // Define expected result. - TensorShape expected_shape{2, 0, 5}; - absl::InlinedVector expected_output = {}; - - RunAndExpectResult(op_name, lhs_shape, empty_input, rhs_shape, - empty_input, expected_shape, expected_output, - config); - } - - private: - template - absl::InlinedVector ComputeExpectedOutput( - std::vector lhs_indices, absl::InlinedVector lhs_input, - std::vector rhs_indices, absl::InlinedVector rhs_input, - BaselineOutT (*baseline_callback)(BaselineT, BaselineT)) { - absl::InlinedVector expected_output; - for (int i = 0; i < lhs_indices.size(); i++) { - auto lhs = static_cast(lhs_input[lhs_indices[i]]); - auto rhs = static_cast(rhs_input[rhs_indices[i]]); - auto result = static_cast(baseline_callback(lhs, rhs)); - expected_output.push_back(result); - } - return expected_output; - } }; -// Macros to easily generate common test cases. For specific inputs, please -// define your own test fixtures. - -#define GENERATE_DEFAULT_TESTS_2(op_name, test_name, T, BaselineT, OutT, \ - BaselineOutT, lhs_input, rhs_input, \ - baseline_callback, config) \ - TEST_F(GpuBinaryOpTest, op_name##EqShapes##test_name) { \ - TestEqualShapes( \ - #op_name, /*shape=*/test::DefaultInputShape(), lhs_input, rhs_input, \ - baseline_callback, config); \ - } \ - \ - TEST_F(GpuBinaryOpTest, op_name##OneScalar##test_name) { \ - TestOneScalar( \ - #op_name, /*scalar_input=*/lhs_input.front(), \ - /*other_shape=*/test::DefaultInputShape(), /*other_input=*/rhs_input, \ - baseline_callback, config); \ - } \ - \ - TEST_F(GpuBinaryOpTest, op_name##IncompatibleShapes##test_name) { \ - TestIncompatibleShapes(#op_name, lhs_input, rhs_input, config); \ - } \ - \ - TEST_F(GpuBinaryOpTest, op_name##BroadcastingExpand##test_name) { \ - TestBroadcastingExpand( \ - #op_name, lhs_input, rhs_input, baseline_callback, config); \ - } \ - \ - TEST_F(GpuBinaryOpTest, op_name##BroadcastingInDim##test_name) { \ - TestBroadcastingInDim( \ - #op_name, lhs_input, rhs_input, baseline_callback, config); \ - } \ - \ - TEST_F(GpuBinaryOpTest, op_name##Broadcasting##test_name) { \ - TestBroadcasting( \ - #op_name, lhs_input, rhs_input, baseline_callback, config); \ - } \ - \ - TEST_F(GpuBinaryOpTest, op_name##EmptyShapeBroadcasting##test_name) { \ - TestEmptyShapeBroadcasting( \ - #op_name, lhs_input, rhs_input, config); \ - } - -#define GENERATE_DEFAULT_TESTS(op_name, test_name, T, OutT, baseline_callback) \ - GENERATE_DEFAULT_TESTS_2(op_name, test_name, T, T, OutT, OutT, \ - test::DefaultInput(), test::DefaultInput(), \ - baseline_callback, \ - test::OpsTestConfig().ExpectStrictlyEqual()) - -#define GENERATE_DEFAULT_TESTS_WITH_SPECIFIC_INPUT_VALUES( \ - op_name, test_name, T, OutT, lhs_input, rhs_input, baseline_callback) \ - GENERATE_DEFAULT_TESTS_2(op_name, test_name, T, T, OutT, OutT, lhs_input, \ - rhs_input, baseline_callback, \ - test::OpsTestConfig().ExpectStrictlyEqual()) - /// Test `tf.AddV2`. template @@ -410,14 +73,14 @@ GENERATE_DEFAULT_TESTS_WITH_SPECIFIC_INPUT_VALUES( std::atan2); // Test some particularly interesting cases. -TEST_F(GpuBinaryOpTest, Atan2FloatSpecialCases) { +TEST_F(BinaryOpsTest, Atan2FloatSpecialCases) { TestEqualShapes( "Atan2", /*shape=*/{20}, test::InputAsVector({1, 1, 1, 0, -1, -1, -1, 0}), test::InputAsVector({1, 0, -1, -1, -1, 0, 1, 1}), std::atan2, test::OpsTestConfig().ExpectStrictlyEqual()); } -TEST_F(GpuBinaryOpTest, Atan2DoubleSpecialCases) { +TEST_F(BinaryOpsTest, Atan2DoubleSpecialCases) { TestEqualShapes( "Atan2", /*shape=*/{20}, test::InputAsVector({1, 1, 1, 0, -1, -1, -1, 0}),