Depthwise convolution tests changed.

PiperOrigin-RevId: 261154825
This commit is contained in:
A. Unique TensorFlower 2019-08-01 10:56:50 -07:00 committed by TensorFlower Gardener
parent 219e8d8d6f
commit 623abf22f8
4 changed files with 215 additions and 130 deletions

View File

@ -296,9 +296,12 @@ xla_test(
xla_test(
name = "conv_depthwise_test",
timeout = "long",
srcs = ["conv_depthwise_test.cc"],
srcs = [
"conv_depthwise_test.cc",
],
shard_count = 50,
deps = [
":conv_depthwise_common",
":test_macros_header",
"//tensorflow/compiler/xla:execution_options_util",
"//tensorflow/compiler/xla:status_macros",
@ -709,6 +712,27 @@ cc_library(
],
)
cc_library(
name = "conv_depthwise_common",
testonly = True,
srcs = ["conv_depthwise_common.cc"],
hdrs = ["conv_depthwise_common.h"],
deps = [
":test_macros_header",
"//tensorflow/compiler/xla:execution_options_util",
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:test",
"//tensorflow/compiler/xla/client:xla_computation",
"//tensorflow/compiler/xla/service:bfloat16_normalization",
"//tensorflow/compiler/xla/service:despecializer",
"//tensorflow/compiler/xla/service:hlo_parser",
"//tensorflow/compiler/xla/tests:client_library_test_base",
"//tensorflow/compiler/xla/tests:hlo_test_base",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"@com_google_absl//absl/types:optional",
],
)
xla_test(
name = "exhaustive_unary_test_f32_or_smaller",
srcs = ["exhaustive_unary_test.cc"],

View File

@ -0,0 +1,135 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/tests/conv_depthwise_common.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/service/bfloat16_normalization.h"
#include "tensorflow/compiler/xla/service/despecializer.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
namespace xla {
string GetFloatDataType(bool use_bfloat16) {
return use_bfloat16 ? "bf16" : "f32";
}
string DepthwiseConvolution2DTestDataToString(
const ::testing::TestParamInfo<
::testing::tuple<DepthwiseConvolution2DSpec, bool>>& data) {
const auto& spec = ::testing::get<0>(data.param);
const string data_type = GetFloatDataType(::testing::get<1>(data.param));
string str = absl::StrCat(
"activation_dims_", absl::StrJoin(spec.activation_dims, "x"),
"_activation_layout_", absl::StrJoin(spec.activation_layout, "_"),
"_kernel_dims_", absl::StrJoin(spec.kernel_dims, "x"), "_kernel_layout_",
absl::StrJoin(spec.kernel_layout, "_"), "_output_dims_",
absl::StrJoin(spec.output_dims, "x"), "_output_layout_",
absl::StrJoin(spec.output_layout, "_"), data_type);
// -1 indicates non-existence.
if (spec.stride != -1) {
absl::StrAppend(&str, "_lhs_dilation_", spec.lhs_dilate, "x1");
}
// Test names are not allowed to contain the '-' character.
absl::c_replace(str, '-', 'n');
return str;
}
string BuildHloTextDepthwiseConvolution2D(
const DepthwiseConvolution2DSpec& spec, bool use_bfloat16,
bool is_scheduled) {
const string data_type = GetFloatDataType(use_bfloat16);
const string sched_tag = is_scheduled ? ", is_scheduled=true " : "";
if (spec.activation_dims[1] == 1 && spec.kernel_dims[1] == 2) {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv %s
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d pad=1_1x%d_%d rhs_dilate=1x%d}, dim_labels=b01f_01io->b01f,
feature_group_count=%d
}
)",
sched_tag, data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.window, spec.window, spec.window, spec.output_feature);
} else if (spec.stride == -1) {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv %s
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d}, dim_labels=b01f_01io->b01f,
feature_group_count=%d
}
)",
sched_tag, data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.output_feature);
} else {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv %s
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d stride=%dx1 pad=%d_%dx0_0 lhs_dilate=%dx1},
dim_labels=b01f_01io->b01f, feature_group_count=%d
}
)",
sched_tag, data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.stride, 0, 0, spec.lhs_dilate, spec.output_feature);
}
}
} // namespace xla

View File

@ -0,0 +1,53 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_XLA_TESTS_CONV_DEPTHWISE_COMMON_H_
#define TENSORFLOW_COMPILER_XLA_TESTS_CONV_DEPTHWISE_COMMON_H_
#include "absl/types/optional.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/service/bfloat16_normalization.h"
#include "tensorflow/compiler/xla/service/despecializer.h"
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
namespace xla {
string GetFloatDataType(bool use_bfloat16);
struct DepthwiseConvolution2DSpec {
int64 output_feature, window, stride, pad, lhs_dilate;
std::vector<int64> activation_dims;
std::vector<int64> activation_layout;
std::vector<int64> kernel_dims;
std::vector<int64> kernel_layout;
std::vector<int64> output_dims;
std::vector<int64> output_layout;
};
string DepthwiseConvolution2DTestDataToString(
const ::testing::TestParamInfo<
::testing::tuple<DepthwiseConvolution2DSpec, bool>>& data);
string BuildHloTextDepthwiseConvolution2D(
const DepthwiseConvolution2DSpec& spec, bool use_bfloat16,
bool is_scheduled = false);
} // namespace xla
#endif // TENSORFLOW_COMPILER_XLA_TESTS_CONV_DEPTHWISE_COMMON_H_

View File

@ -22,26 +22,13 @@ limitations under the License.
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/conv_depthwise_common.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
namespace xla {
namespace {
string GetFloatDataType(bool use_bfloat16) {
return use_bfloat16 ? "bf16" : "f32";
}
struct DepthwiseConvolution2DSpec {
int64 output_feature, window, stride, pad, lhs_dilate;
std::vector<int64> activation_dims;
std::vector<int64> activation_layout;
std::vector<int64> kernel_dims;
std::vector<int64> kernel_layout;
std::vector<int64> output_dims;
std::vector<int64> output_layout;
};
class DepthwiseConvolution2DTest
: public HloTestBase,
public ::testing::WithParamInterface<
@ -70,6 +57,7 @@ static std::vector<DepthwiseConvolution2DSpec> GetConv2DTestCases() {
config.kernel_dims = {kernel_size, kernel_size, 1, feature};
config.kernel_layout = {3, 2, 1, 0};
config.output_layout = {3, 0, 2, 1};
if (activation_size == 1 && kernel_size == 2) {
// Test for outer dim.
@ -87,127 +75,12 @@ static std::vector<DepthwiseConvolution2DSpec> GetConv2DTestCases() {
config.output_dims = {batch, activation_size - kernel_size + 1,
activation_size - kernel_size + 1, feature};
}
// Try this layout for all kernel shapes.
config.output_layout = {3, 0, 2, 1};
config_set.push_back(config);
// Try other layouts only for certain kernel shapes.
if (kernel_size % 2 == 0) {
config.activation_layout = {0, 3, 2, 1};
config_set.push_back(config);
config.output_layout = {0, 3, 2, 1};
config_set.push_back(config);
config.activation_layout = {3, 0, 2, 1};
config_set.push_back(config);
}
}
return config_set;
}
string DepthwiseConvolution2DTestDataToString(
const ::testing::TestParamInfo<
::testing::tuple<DepthwiseConvolution2DSpec, bool>>& data) {
const auto& spec = ::testing::get<0>(data.param);
const string data_type = GetFloatDataType(::testing::get<1>(data.param));
string str = absl::StrCat(
"activation_dims_", absl::StrJoin(spec.activation_dims, "x"),
"_activation_layout_", absl::StrJoin(spec.activation_layout, "_"),
"_kernel_dims_", absl::StrJoin(spec.kernel_dims, "x"), "_kernel_layout_",
absl::StrJoin(spec.kernel_layout, "_"), "_output_dims_",
absl::StrJoin(spec.output_dims, "x"), "_output_layout_",
absl::StrJoin(spec.output_layout, "_"), data_type);
// -1 indicates non-existence.
if (spec.stride != -1) {
absl::StrAppend(&str, "_lhs_dilation_", spec.lhs_dilate, "x1");
}
// Test names are not allowed to contain the '-' character.
absl::c_replace(str, '-', 'n');
return str;
}
string BuildHloTextDepthwiseConvolution2D(
const DepthwiseConvolution2DSpec& spec, bool use_bfloat16) {
const string data_type = GetFloatDataType(use_bfloat16);
if (spec.activation_dims[1] == 1 && spec.kernel_dims[1] == 2) {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d pad=1_1x%d_%d rhs_dilate=1x%d}, dim_labels=b01f_01io->b01f,
feature_group_count=%d
}
)",
data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.window, spec.window, spec.window, spec.output_feature);
} else if (spec.stride == -1) {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d}, dim_labels=b01f_01io->b01f,
feature_group_count=%d
}
)",
data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.output_feature);
} else {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d stride=%dx1 pad=%d_%dx0_0 lhs_dilate=%dx1},
dim_labels=b01f_01io->b01f, feature_group_count=%d
}
)",
data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.stride, 0, 0, spec.lhs_dilate, spec.output_feature);
}
}
XLA_TEST_P(DepthwiseConvolution2DTest, DoIt) {
const DepthwiseConvolution2DSpec& spec = ::testing::get<0>(GetParam());