[tf2tensorrt] Support Conv2DBackpropInput with input_sizes of size 2.

tf2tensorrt prefers an input_sizes of size 2 because it is a constant
regardless of whether the batch size is variable.

Separate the tests for Conv2DBackpropInput into a different test method.

PiperOrigin-RevId: 307818437
Change-Id: Id7a54651d68a72c5222231e02dfe4b3b8e8f35e6
This commit is contained in:
Jingyue Wu 2020-04-22 08:09:07 -07:00 committed by TensorFlower Gardener
parent 1f5087b8ad
commit b644a649e7
2 changed files with 167 additions and 97 deletions

View File

@ -2027,6 +2027,24 @@ Status Conv2DPaddingHelper(OpConverterParams* params, const TFAttrs& attrs,
return Status::OK();
}
namespace {
// Extracts the spatial dimensions from `output_sizes` and returns them as a
// vector of size 2.
std::vector<int64_t> GetSpatialDimsFromOutputSizes(
const TRT_TensorOrWeights& output_sizes, const int h_index,
const int w_index) {
// We use h_index and w_index instead of 1 and 2 because we haven't
// transposed output_sizes along with the input.
const TRT_ShapedWeights& weights = output_sizes.weights();
const int output_sizes_length = weights.count();
auto output_sizes_values = static_cast<int*>(weights.GetValues());
// The length of output_sizes can be 2 or 4. When the length is 4,
// output_sizes represents <height,width>.
return {output_sizes_values[output_sizes_length == 4 ? h_index : 0],
output_sizes_values[output_sizes_length == 4 ? w_index : 1]};
}
} // namespace
Status ConvertConv2DHelper(OpConverterParams* params, int group,
bool is_conv2d_backprop_input) {
const auto& inputs = params->inputs;
@ -2125,11 +2143,8 @@ Status ConvertConv2DHelper(OpConverterParams* params, int group,
// For backprop, calculate padding based on "input_sizes" input, which
// actually corresponds to output size. ("input_sizes" makes sense in the
// context of Conv2DBackpropInput).
// We use h_index and w_index instead of 1 and 2 because we havent
// transposed backprop_output_size along with the input.
auto output_size_weights =
static_cast<int*>(backprop_output_size.weights().GetValues());
input_dims = {output_size_weights[h_index], output_size_weights[w_index]};
input_dims =
GetSpatialDimsFromOutputSizes(backprop_output_size, h_index, w_index);
} else {
// Use 1 and 2 because tensor_dim has the dimensions of the transposed
// input.
@ -2189,22 +2204,24 @@ Status ConvertConv2DHelper(OpConverterParams* params, int group,
// argument output_shape and thus the TRT output shape could be wrong
// in case of strides>1.
if (is_conv2d_backprop_input) {
auto tf_output_shape =
static_cast<int*>(backprop_output_size.weights().GetValues());
std::vector<int64_t> output_spatial_dims =
GetSpatialDimsFromOutputSizes(backprop_output_size, h_index, w_index);
const int output_height = output_spatial_dims[0];
const int output_width = output_spatial_dims[1];
nvinfer1::Dims trt_output_shape = output_tensor->getDimensions();
// What determines the padding size is the difference between the given
// input_sizes (tf_output_shape) and TRT computed size.
const int height_diff = tf_output_shape[h_index] - trt_output_shape.d[1];
const int width_diff = tf_output_shape[w_index] - trt_output_shape.d[2];
const int height_diff = output_height - trt_output_shape.d[1];
const int width_diff = output_width - trt_output_shape.d[2];
if ((height_diff < 0) || (width_diff < 0)) {
return errors::InvalidArgument(
"input_sizes argument of Conv2DBackprop (i.e. output_shape argument "
"of conv2d_transpose) ",
"is too small for the given out_backprop argument of Conv2DBackprop "
"(i.e. input argument of conv2d_transpose). Expect: ",
"(", tf_output_shape[h_index], ", ", tf_output_shape[w_index],
") >= ", "(", trt_output_shape.d[1], ", ", trt_output_shape.d[2],
") for op ", node_def.name());
"(", output_height, ", ", output_width, ") >= ", "(",
trt_output_shape.d[1], ", ", trt_output_shape.d[2], ") for op ",
node_def.name());
}
// Only add a padding layer if padding sizes are larger than 0
if ((height_diff > 0) || (width_diff > 0)) {

View File

@ -3698,28 +3698,16 @@ TEST_F(OpConverterTest, ConvertConv2D) {
// Get nodedef for Conv2D layer.
auto get_conv2d_nodedef =
[](std::vector<int> strides = {1, 1, 1, 1}, string padding = "SAME",
string data_format = "NCHW", std::vector<int> dilations = {1, 1, 1, 1},
bool is_conv2d_backprop_input = false) -> NodeDef {
string data_format = "NCHW",
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto filter = ops::Placeholder(s.WithOpName("weights"), DT_FLOAT);
if (is_conv2d_backprop_input) {
auto input_sizes =
ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32);
ops::Conv2DBackpropInput::Attrs attrs = ops::Conv2DBackpropInput::Attrs()
.DataFormat(data_format)
.Dilations(dilations);
auto conv2d =
ops::Conv2DBackpropInput(s.WithOpName("my_conv2d"), input_sizes,
filter, input, strides, padding, attrs);
return conv2d.operation.node()->def();
} else {
ops::Conv2D::Attrs attrs =
ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations);
auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter,
strides, padding, attrs);
return conv2d.operation.node()->def();
}
ops::Conv2D::Attrs attrs =
ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations);
auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter, strides,
padding, attrs);
return conv2d.operation.node()->def();
};
{
@ -3785,19 +3773,6 @@ TEST_F(OpConverterTest, ConvertConv2D) {
"Dilation rate must be 1 for batch and channel "
"dimensions, at my_conv2d");
}
{
// Dilation + Conv2DBackpropInput, should fail.
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NHWC", {1, 1, 2, 1}, true);
AddTestTensor("input", {2, 3, 1});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
AddTestWeights<int>("input_sizes", {4}, {1, 2, 3, 1});
RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
"Dilation with Conv2DBackpropInput "
"(conv2d_transpose) is not supported, "
"at my_conv2d");
}
{
// Strides is not 4D, should fail.
Reset();
@ -3830,7 +3805,6 @@ TEST_F(OpConverterTest, ConvertConv2D) {
string padding;
string data_format;
std::vector<int> dilations;
bool is_conv2d_backprop_input;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
};
@ -3846,7 +3820,6 @@ TEST_F(OpConverterTest, ConvertConv2D) {
/*padding=*/"VALID",
/*data_format=*/"NCHW",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/false,
/*expected_output_dims=*/{1, 2, 2},
/*expected_output=*/{1, 1, 0, 1}},
// SAME padding (Asymmetric)
@ -3858,7 +3831,6 @@ TEST_F(OpConverterTest, ConvertConv2D) {
/*padding=*/"SAME",
/*data_format=*/"NCHW",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/false,
/*expected_output_dims=*/{1, 2, 3},
/*expected_output=*/{1, 1, -2, 0, 1, -4}},
// SAME padding (Symmetric)
@ -3870,7 +3842,6 @@ TEST_F(OpConverterTest, ConvertConv2D) {
/*padding=*/"SAME",
/*data_format=*/"NCHW",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/false,
/*expected_output_dims=*/{1, 2, 3},
/*expected_output=*/{1, 2, -1, 3, 1, -3}},
// NHWC
@ -3882,7 +3853,6 @@ TEST_F(OpConverterTest, ConvertConv2D) {
/*padding=*/"VALID",
/*data_format=*/"NHWC",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/false,
/*expected_output_dims=*/{2, 2, 1},
/*expected_output=*/{1, 1, 0, 1}},
// Dilated
@ -3894,7 +3864,6 @@ TEST_F(OpConverterTest, ConvertConv2D) {
/*padding=*/"VALID",
/*data_format=*/"NCHW",
/*dilations=*/{1, 1, 1, 2},
/*is_conv2d_backprop_input=*/false,
/*expected_output_dims=*/{1, 2, 1},
/*expected_output=*/{2, 1}},
// Strided
@ -3906,62 +3875,18 @@ TEST_F(OpConverterTest, ConvertConv2D) {
/*padding=*/"VALID",
/*data_format=*/"NCHW",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/false,
/*expected_output_dims=*/{1, 2, 2},
/*expected_output=*/{1, 0, 1, 3}},
// Transpose Strided
TestParams{/*input_dims=*/{1, 2, 2},
/*input=*/{0, 1, 2, 3},
/*filter_dims=*/{1, 2, 1, 1},
/*filter=*/{-1, 1},
/*strides=*/{1, 1, 1, 2},
/*padding=*/"SAME",
/*data_format=*/"NCHW",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/true,
/*expected_output_dims=*/{1, 2, 4},
/*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}},
// Transpose Strided NHWC
TestParams{/*input_dims=*/{2, 2, 1},
/*input=*/{0, 1, 2, 3},
/*filter_dims=*/{1, 2, 1, 1},
/*filter=*/{-1, 1},
/*strides=*/{1, 1, 2, 1},
/*padding=*/"SAME",
/*data_format=*/"NHWC",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/true,
/*expected_output_dims=*/{2, 4, 1},
/*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}},
// Transpose Strided NHWC with VALID padding
TestParams{/*input_dims=*/{3, 1, 1},
/*input=*/{0, 1, 2},
/*filter_dims=*/{2, 1, 1, 1},
/*filter=*/{-1, 1},
/*strides=*/{1, 2, 1, 1},
/*padding=*/"VALID",
/*data_format=*/"NHWC",
/*dilations=*/{1, 1, 1, 1},
/*is_conv2d_backprop_input=*/true,
/*expected_output_dims=*/{7, 1, 1},
/*expected_output=*/{0, 0, -1, 1, -2, 2, 0}},
};
for (int i = 0; i < ok_params.size(); i++) {
Reset();
NodeDef node_def = get_conv2d_nodedef(
ok_params[i].strides, ok_params[i].padding, ok_params[i].data_format,
ok_params[i].dilations, ok_params[i].is_conv2d_backprop_input);
NodeDef node_def =
get_conv2d_nodedef(ok_params[i].strides, ok_params[i].padding,
ok_params[i].data_format, ok_params[i].dilations);
AddTestTensor("input", ok_params[i].input_dims);
AddTestWeights<float>("weights", ok_params[i].filter_dims,
ok_params[i].filter);
if (ok_params[i].is_conv2d_backprop_input) {
std::vector<int> tf_input_sizes = ok_params[i].expected_output_dims;
tf_input_sizes.insert(tf_input_sizes.begin(), 1); // Add batch dimension.
QCHECK_EQ(4, tf_input_sizes.size());
AddTestWeights<int>("input_sizes", {4}, tf_input_sizes);
}
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_conv2d", &output));
@ -3979,6 +3904,134 @@ TEST_F(OpConverterTest, ConvertConv2D) {
}
}
TEST_F(OpConverterTest, ConvertConv2DBackpropInput) {
// Get nodedef for Conv2D layer.
auto get_conv2d_backprop_input_nodedef =
[](std::vector<int> strides = {1, 1, 1, 1}, string padding = "SAME",
string data_format = "NCHW",
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto filter = ops::Placeholder(s.WithOpName("weights"), DT_FLOAT);
auto input_sizes = ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32);
ops::Conv2DBackpropInput::Attrs attrs = ops::Conv2DBackpropInput::Attrs()
.DataFormat(data_format)
.Dilations(dilations);
auto conv2d = ops::Conv2DBackpropInput(
s.WithOpName("my_conv2d_backprop_input"), input_sizes, filter, input,
strides, padding, attrs);
return conv2d.operation.node()->def();
};
{
// Dilation + Conv2DBackpropInput, should fail.
Reset();
NodeDef node_def = get_conv2d_backprop_input_nodedef({1, 1, 1, 1}, "SAME",
"NHWC", {1, 1, 2, 1});
AddTestTensor("input", {2, 3, 1});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
AddTestWeights<int>("input_sizes", {4}, {1, 2, 3, 1});
RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
"Dilation with Conv2DBackpropInput "
"(conv2d_transpose) is not supported, "
"at my_conv2d_backprop_input");
}
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> filter_dims;
std::vector<float> filter;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> dilations;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
};
// Ok.
std::vector<TestParams> ok_params = {
// Transpose Strided
TestParams{/*input_dims=*/{1, 2, 2},
/*input=*/{0, 1, 2, 3},
/*filter_dims=*/{1, 2, 1, 1},
/*filter=*/{-1, 1},
/*strides=*/{1, 1, 1, 2},
/*padding=*/"SAME",
/*data_format=*/"NCHW",
/*dilations=*/{1, 1, 1, 1},
/*expected_output_dims=*/{1, 2, 4},
/*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}},
// Transpose Strided NHWC
TestParams{/*input_dims=*/{2, 2, 1},
/*input=*/{0, 1, 2, 3},
/*filter_dims=*/{1, 2, 1, 1},
/*filter=*/{-1, 1},
/*strides=*/{1, 1, 2, 1},
/*padding=*/"SAME",
/*data_format=*/"NHWC",
/*dilations=*/{1, 1, 1, 1},
/*expected_output_dims=*/{2, 4, 1},
/*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}},
// Transpose Strided NHWC with VALID padding
TestParams{/*input_dims=*/{3, 1, 1},
/*input=*/{0, 1, 2},
/*filter_dims=*/{2, 1, 1, 1},
/*filter=*/{-1, 1},
/*strides=*/{1, 2, 1, 1},
/*padding=*/"VALID",
/*data_format=*/"NHWC",
/*dilations=*/{1, 1, 1, 1},
/*expected_output_dims=*/{7, 1, 1},
/*expected_output=*/{0, 0, -1, 1, -2, 2, 0}},
};
for (int i = 0; i < ok_params.size(); i++) {
for (int input_sizes_length : {2, 4}) {
Reset();
NodeDef node_def = get_conv2d_backprop_input_nodedef(
ok_params[i].strides, ok_params[i].padding, ok_params[i].data_format,
ok_params[i].dilations);
AddTestTensor("input", ok_params[i].input_dims);
AddTestWeights<float>("weights", ok_params[i].filter_dims,
ok_params[i].filter);
std::vector<int> tf_input_sizes = ok_params[i].expected_output_dims;
if (input_sizes_length == 4) {
tf_input_sizes.insert(tf_input_sizes.begin(),
1); // Add batch dimension.
QCHECK_EQ(4, tf_input_sizes.size());
AddTestWeights<int>("input_sizes", {4}, tf_input_sizes);
} else {
// Remove the channel dimension.
if (ok_params[i].data_format == "NHWC") {
tf_input_sizes.pop_back();
} else {
tf_input_sizes.erase(tf_input_sizes.begin());
}
QCHECK_EQ(2, tf_input_sizes.size());
AddTestWeights<int>("input_sizes", {2}, tf_input_sizes);
}
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_conv2d_backprop_input", &output));
ASSERT_TRUE(output.is_tensor());
ExpectTrtDimsEqualsArray(ok_params[i].expected_output_dims,
output.tensor()->getDimensions());
const DataVec input_data{{"input", AsTensor<float>(ok_params[i].input)}};
DataVec output_data{
{"my_conv2d_backprop_input",
ConstructTensor<float>(ok_params[i].expected_output.size())}};
BuildAndRun(input_data, &output_data);
EXPECT_THAT(GetSpanForData<float>(output_data[0]),
ElementsAreArray(ok_params[i].expected_output));
}
}
}
#if IS_TRT_VERSION_GE(6, 0, 0, 0)
TEST_F(OpConverterTest, ConvertConv3D) {
// Get nodedef for Conv3D layer.