STT-tensorflow/tensorflow/compiler/xla/tests/conv_depthwise_common.cc
Adrian Kuegel 6f81dbf07a Use VerifiedHloModule in a few more tests.
Also enable some tests that are now passing on the GPU backend.
Finally, remove some unused hlo_parser.h includes and the corresponding
dependency.

PiperOrigin-RevId: 275224726
Change-Id: Icc206d85b40c439abe8232aaa748ed4a07b50b09
2019-10-17 03:42:51 -07:00

135 lines
5.7 KiB
C++

/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/xla/tests/conv_depthwise_common.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/xla/client/xla_computation.h"
#include "tensorflow/compiler/xla/execution_options_util.h"
#include "tensorflow/compiler/xla/service/bfloat16_normalization.h"
#include "tensorflow/compiler/xla/service/despecializer.h"
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/tests/client_library_test_base.h"
#include "tensorflow/compiler/xla/tests/hlo_test_base.h"
#include "tensorflow/compiler/xla/tests/test_macros.h"
namespace xla {
string GetFloatDataType(bool use_bfloat16) {
return use_bfloat16 ? "bf16" : "f32";
}
string DepthwiseConvolution2DTestDataToString(
const ::testing::TestParamInfo<
::testing::tuple<DepthwiseConvolution2DSpec, bool>>& data) {
const auto& spec = ::testing::get<0>(data.param);
const string data_type = GetFloatDataType(::testing::get<1>(data.param));
string str = absl::StrCat(
"activation_dims_", absl::StrJoin(spec.activation_dims, "x"),
"_activation_layout_", absl::StrJoin(spec.activation_layout, "_"),
"_kernel_dims_", absl::StrJoin(spec.kernel_dims, "x"), "_kernel_layout_",
absl::StrJoin(spec.kernel_layout, "_"), "_output_dims_",
absl::StrJoin(spec.output_dims, "x"), "_output_layout_",
absl::StrJoin(spec.output_layout, "_"), data_type);
// -1 indicates non-existence.
if (spec.stride != -1) {
absl::StrAppend(&str, "_lhs_dilation_", spec.lhs_dilate, "x1");
}
// Test names are not allowed to contain the '-' character.
absl::c_replace(str, '-', 'n');
return str;
}
string BuildHloTextDepthwiseConvolution2D(
const DepthwiseConvolution2DSpec& spec, bool use_bfloat16,
bool is_scheduled) {
const string data_type = GetFloatDataType(use_bfloat16);
const string sched_tag = is_scheduled ? ", is_scheduled=true " : "";
if (spec.activation_dims[1] == 1 && spec.kernel_dims[1] == 2) {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv %s
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d pad=1_1x%d_%d rhs_dilate=1x%d}, dim_labels=b01f_01io->b01f,
feature_group_count=%d
}
)",
sched_tag, data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.window, spec.window, spec.window, spec.output_feature);
} else if (spec.stride == -1) {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv %s
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d}, dim_labels=b01f_01io->b01f,
feature_group_count=%d
}
)",
sched_tag, data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.output_feature);
} else {
return absl::StrFormat(
R"(
HloModule TensorFlowDepthwiseConv %s
ENTRY main {
activation = %s[%s]{%s} parameter(0)
kernel = %s[%s]{%s} parameter(1)
ROOT conv = %s[%s]{%s} convolution(%s[%s]{%s} activation, %s[%s]{%s} kernel),
window={size=%dx%d stride=%dx1 pad=%d_%dx0_0 lhs_dilate=%dx1},
dim_labels=b01f_01io->b01f, feature_group_count=%d
}
)",
sched_tag, data_type, absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), data_type,
absl::StrJoin(spec.output_dims, ","),
absl::StrJoin(spec.output_layout, ","), data_type,
absl::StrJoin(spec.activation_dims, ","),
absl::StrJoin(spec.activation_layout, ","), data_type,
absl::StrJoin(spec.kernel_dims, ","),
absl::StrJoin(spec.kernel_layout, ","), spec.window, spec.window,
spec.stride, 0, 0, spec.lhs_dilate, spec.output_feature);
}
}
} // namespace xla