From b644a649e7a885a86892054a66e5591fbdb6dceb Mon Sep 17 00:00:00 2001 From: Jingyue Wu Date: Wed, 22 Apr 2020 08:09:07 -0700 Subject: [PATCH] [tf2tensorrt] Support Conv2DBackpropInput with input_sizes of size 2. tf2tensorrt prefers an input_sizes of size 2 because it is a constant regardless of whether the batch size is variable. Separate the tests for Conv2DBackpropInput into a different test method. PiperOrigin-RevId: 307818437 Change-Id: Id7a54651d68a72c5222231e02dfe4b3b8e8f35e6 --- .../tf2tensorrt/convert/convert_nodes.cc | 41 +++- .../tf2tensorrt/convert/convert_nodes_test.cc | 223 +++++++++++------- 2 files changed, 167 insertions(+), 97 deletions(-) diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc index 6a39a13ac0b..a729df73d71 100644 --- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc +++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc @@ -2027,6 +2027,24 @@ Status Conv2DPaddingHelper(OpConverterParams* params, const TFAttrs& attrs, return Status::OK(); } +namespace { +// Extracts the spatial dimensions from `output_sizes` and returns them as a +// vector of size 2. +std::vector GetSpatialDimsFromOutputSizes( + const TRT_TensorOrWeights& output_sizes, const int h_index, + const int w_index) { + // We use h_index and w_index instead of 1 and 2 because we haven't + // transposed output_sizes along with the input. + const TRT_ShapedWeights& weights = output_sizes.weights(); + const int output_sizes_length = weights.count(); + auto output_sizes_values = static_cast(weights.GetValues()); + // The length of output_sizes can be 2 or 4. When the length is 4, + // output_sizes represents . + return {output_sizes_values[output_sizes_length == 4 ? h_index : 0], + output_sizes_values[output_sizes_length == 4 ? w_index : 1]}; +} +} // namespace + Status ConvertConv2DHelper(OpConverterParams* params, int group, bool is_conv2d_backprop_input) { const auto& inputs = params->inputs; @@ -2125,11 +2143,8 @@ Status ConvertConv2DHelper(OpConverterParams* params, int group, // For backprop, calculate padding based on "input_sizes" input, which // actually corresponds to output size. ("input_sizes" makes sense in the // context of Conv2DBackpropInput). - // We use h_index and w_index instead of 1 and 2 because we havent - // transposed backprop_output_size along with the input. - auto output_size_weights = - static_cast(backprop_output_size.weights().GetValues()); - input_dims = {output_size_weights[h_index], output_size_weights[w_index]}; + input_dims = + GetSpatialDimsFromOutputSizes(backprop_output_size, h_index, w_index); } else { // Use 1 and 2 because tensor_dim has the dimensions of the transposed // input. @@ -2189,22 +2204,24 @@ Status ConvertConv2DHelper(OpConverterParams* params, int group, // argument output_shape and thus the TRT output shape could be wrong // in case of strides>1. if (is_conv2d_backprop_input) { - auto tf_output_shape = - static_cast(backprop_output_size.weights().GetValues()); + std::vector output_spatial_dims = + GetSpatialDimsFromOutputSizes(backprop_output_size, h_index, w_index); + const int output_height = output_spatial_dims[0]; + const int output_width = output_spatial_dims[1]; nvinfer1::Dims trt_output_shape = output_tensor->getDimensions(); // What determines the padding size is the difference between the given // input_sizes (tf_output_shape) and TRT computed size. - const int height_diff = tf_output_shape[h_index] - trt_output_shape.d[1]; - const int width_diff = tf_output_shape[w_index] - trt_output_shape.d[2]; + const int height_diff = output_height - trt_output_shape.d[1]; + const int width_diff = output_width - trt_output_shape.d[2]; if ((height_diff < 0) || (width_diff < 0)) { return errors::InvalidArgument( "input_sizes argument of Conv2DBackprop (i.e. output_shape argument " "of conv2d_transpose) ", "is too small for the given out_backprop argument of Conv2DBackprop " "(i.e. input argument of conv2d_transpose). Expect: ", - "(", tf_output_shape[h_index], ", ", tf_output_shape[w_index], - ") >= ", "(", trt_output_shape.d[1], ", ", trt_output_shape.d[2], - ") for op ", node_def.name()); + "(", output_height, ", ", output_width, ") >= ", "(", + trt_output_shape.d[1], ", ", trt_output_shape.d[2], ") for op ", + node_def.name()); } // Only add a padding layer if padding sizes are larger than 0 if ((height_diff > 0) || (width_diff > 0)) { diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc index 64d82d16d28..3e9c5db80d0 100644 --- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc +++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc @@ -3698,28 +3698,16 @@ TEST_F(OpConverterTest, ConvertConv2D) { // Get nodedef for Conv2D layer. auto get_conv2d_nodedef = [](std::vector strides = {1, 1, 1, 1}, string padding = "SAME", - string data_format = "NCHW", std::vector dilations = {1, 1, 1, 1}, - bool is_conv2d_backprop_input = false) -> NodeDef { + string data_format = "NCHW", + std::vector dilations = {1, 1, 1, 1}) -> NodeDef { Scope s = Scope::NewRootScope(); auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT); auto filter = ops::Placeholder(s.WithOpName("weights"), DT_FLOAT); - if (is_conv2d_backprop_input) { - auto input_sizes = - ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32); - ops::Conv2DBackpropInput::Attrs attrs = ops::Conv2DBackpropInput::Attrs() - .DataFormat(data_format) - .Dilations(dilations); - auto conv2d = - ops::Conv2DBackpropInput(s.WithOpName("my_conv2d"), input_sizes, - filter, input, strides, padding, attrs); - return conv2d.operation.node()->def(); - } else { - ops::Conv2D::Attrs attrs = - ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations); - auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter, - strides, padding, attrs); - return conv2d.operation.node()->def(); - } + ops::Conv2D::Attrs attrs = + ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations); + auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter, strides, + padding, attrs); + return conv2d.operation.node()->def(); }; { @@ -3785,19 +3773,6 @@ TEST_F(OpConverterTest, ConvertConv2D) { "Dilation rate must be 1 for batch and channel " "dimensions, at my_conv2d"); } - { - // Dilation + Conv2DBackpropInput, should fail. - Reset(); - NodeDef node_def = - get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NHWC", {1, 1, 2, 1}, true); - AddTestTensor("input", {2, 3, 1}); - AddTestWeights("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); - AddTestWeights("input_sizes", {4}, {1, 2, 3, 1}); - RunValidationAndConversion(node_def, error::UNIMPLEMENTED, - "Dilation with Conv2DBackpropInput " - "(conv2d_transpose) is not supported, " - "at my_conv2d"); - } { // Strides is not 4D, should fail. Reset(); @@ -3830,7 +3805,6 @@ TEST_F(OpConverterTest, ConvertConv2D) { string padding; string data_format; std::vector dilations; - bool is_conv2d_backprop_input; std::vector expected_output_dims; std::vector expected_output; }; @@ -3846,7 +3820,6 @@ TEST_F(OpConverterTest, ConvertConv2D) { /*padding=*/"VALID", /*data_format=*/"NCHW", /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/false, /*expected_output_dims=*/{1, 2, 2}, /*expected_output=*/{1, 1, 0, 1}}, // SAME padding (Asymmetric) @@ -3858,7 +3831,6 @@ TEST_F(OpConverterTest, ConvertConv2D) { /*padding=*/"SAME", /*data_format=*/"NCHW", /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/false, /*expected_output_dims=*/{1, 2, 3}, /*expected_output=*/{1, 1, -2, 0, 1, -4}}, // SAME padding (Symmetric) @@ -3870,7 +3842,6 @@ TEST_F(OpConverterTest, ConvertConv2D) { /*padding=*/"SAME", /*data_format=*/"NCHW", /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/false, /*expected_output_dims=*/{1, 2, 3}, /*expected_output=*/{1, 2, -1, 3, 1, -3}}, // NHWC @@ -3882,7 +3853,6 @@ TEST_F(OpConverterTest, ConvertConv2D) { /*padding=*/"VALID", /*data_format=*/"NHWC", /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/false, /*expected_output_dims=*/{2, 2, 1}, /*expected_output=*/{1, 1, 0, 1}}, // Dilated @@ -3894,7 +3864,6 @@ TEST_F(OpConverterTest, ConvertConv2D) { /*padding=*/"VALID", /*data_format=*/"NCHW", /*dilations=*/{1, 1, 1, 2}, - /*is_conv2d_backprop_input=*/false, /*expected_output_dims=*/{1, 2, 1}, /*expected_output=*/{2, 1}}, // Strided @@ -3906,62 +3875,18 @@ TEST_F(OpConverterTest, ConvertConv2D) { /*padding=*/"VALID", /*data_format=*/"NCHW", /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/false, /*expected_output_dims=*/{1, 2, 2}, /*expected_output=*/{1, 0, 1, 3}}, - // Transpose Strided - TestParams{/*input_dims=*/{1, 2, 2}, - /*input=*/{0, 1, 2, 3}, - /*filter_dims=*/{1, 2, 1, 1}, - /*filter=*/{-1, 1}, - /*strides=*/{1, 1, 1, 2}, - /*padding=*/"SAME", - /*data_format=*/"NCHW", - /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/true, - /*expected_output_dims=*/{1, 2, 4}, - /*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}}, - // Transpose Strided NHWC - TestParams{/*input_dims=*/{2, 2, 1}, - /*input=*/{0, 1, 2, 3}, - /*filter_dims=*/{1, 2, 1, 1}, - /*filter=*/{-1, 1}, - /*strides=*/{1, 1, 2, 1}, - /*padding=*/"SAME", - /*data_format=*/"NHWC", - /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/true, - /*expected_output_dims=*/{2, 4, 1}, - /*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}}, - // Transpose Strided NHWC with VALID padding - TestParams{/*input_dims=*/{3, 1, 1}, - /*input=*/{0, 1, 2}, - /*filter_dims=*/{2, 1, 1, 1}, - /*filter=*/{-1, 1}, - /*strides=*/{1, 2, 1, 1}, - /*padding=*/"VALID", - /*data_format=*/"NHWC", - /*dilations=*/{1, 1, 1, 1}, - /*is_conv2d_backprop_input=*/true, - /*expected_output_dims=*/{7, 1, 1}, - /*expected_output=*/{0, 0, -1, 1, -2, 2, 0}}, - }; for (int i = 0; i < ok_params.size(); i++) { Reset(); - NodeDef node_def = get_conv2d_nodedef( - ok_params[i].strides, ok_params[i].padding, ok_params[i].data_format, - ok_params[i].dilations, ok_params[i].is_conv2d_backprop_input); + NodeDef node_def = + get_conv2d_nodedef(ok_params[i].strides, ok_params[i].padding, + ok_params[i].data_format, ok_params[i].dilations); AddTestTensor("input", ok_params[i].input_dims); AddTestWeights("weights", ok_params[i].filter_dims, ok_params[i].filter); - if (ok_params[i].is_conv2d_backprop_input) { - std::vector tf_input_sizes = ok_params[i].expected_output_dims; - tf_input_sizes.insert(tf_input_sizes.begin(), 1); // Add batch dimension. - QCHECK_EQ(4, tf_input_sizes.size()); - AddTestWeights("input_sizes", {4}, tf_input_sizes); - } RunValidationAndConversion(node_def); TRT_TensorOrWeights output; TF_EXPECT_OK(GetTensorOrWeights("my_conv2d", &output)); @@ -3979,6 +3904,134 @@ TEST_F(OpConverterTest, ConvertConv2D) { } } +TEST_F(OpConverterTest, ConvertConv2DBackpropInput) { + // Get nodedef for Conv2D layer. + auto get_conv2d_backprop_input_nodedef = + [](std::vector strides = {1, 1, 1, 1}, string padding = "SAME", + string data_format = "NCHW", + std::vector dilations = {1, 1, 1, 1}) -> NodeDef { + Scope s = Scope::NewRootScope(); + auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT); + auto filter = ops::Placeholder(s.WithOpName("weights"), DT_FLOAT); + auto input_sizes = ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32); + ops::Conv2DBackpropInput::Attrs attrs = ops::Conv2DBackpropInput::Attrs() + .DataFormat(data_format) + .Dilations(dilations); + auto conv2d = ops::Conv2DBackpropInput( + s.WithOpName("my_conv2d_backprop_input"), input_sizes, filter, input, + strides, padding, attrs); + return conv2d.operation.node()->def(); + }; + + { + // Dilation + Conv2DBackpropInput, should fail. + Reset(); + NodeDef node_def = get_conv2d_backprop_input_nodedef({1, 1, 1, 1}, "SAME", + "NHWC", {1, 1, 2, 1}); + AddTestTensor("input", {2, 3, 1}); + AddTestWeights("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9}); + AddTestWeights("input_sizes", {4}, {1, 2, 3, 1}); + RunValidationAndConversion(node_def, error::UNIMPLEMENTED, + "Dilation with Conv2DBackpropInput " + "(conv2d_transpose) is not supported, " + "at my_conv2d_backprop_input"); + } + + struct TestParams { + std::vector input_dims; + std::vector input; + std::vector filter_dims; + std::vector filter; + std::vector strides; + string padding; + string data_format; + std::vector dilations; + std::vector expected_output_dims; + std::vector expected_output; + }; + + // Ok. + std::vector ok_params = { + // Transpose Strided + TestParams{/*input_dims=*/{1, 2, 2}, + /*input=*/{0, 1, 2, 3}, + /*filter_dims=*/{1, 2, 1, 1}, + /*filter=*/{-1, 1}, + /*strides=*/{1, 1, 1, 2}, + /*padding=*/"SAME", + /*data_format=*/"NCHW", + /*dilations=*/{1, 1, 1, 1}, + /*expected_output_dims=*/{1, 2, 4}, + /*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}}, + // Transpose Strided NHWC + TestParams{/*input_dims=*/{2, 2, 1}, + /*input=*/{0, 1, 2, 3}, + /*filter_dims=*/{1, 2, 1, 1}, + /*filter=*/{-1, 1}, + /*strides=*/{1, 1, 2, 1}, + /*padding=*/"SAME", + /*data_format=*/"NHWC", + /*dilations=*/{1, 1, 1, 1}, + /*expected_output_dims=*/{2, 4, 1}, + /*expected_output=*/{0, 0, -1, 1, -2, 2, -3, 3}}, + // Transpose Strided NHWC with VALID padding + TestParams{/*input_dims=*/{3, 1, 1}, + /*input=*/{0, 1, 2}, + /*filter_dims=*/{2, 1, 1, 1}, + /*filter=*/{-1, 1}, + /*strides=*/{1, 2, 1, 1}, + /*padding=*/"VALID", + /*data_format=*/"NHWC", + /*dilations=*/{1, 1, 1, 1}, + /*expected_output_dims=*/{7, 1, 1}, + /*expected_output=*/{0, 0, -1, 1, -2, 2, 0}}, + }; + + for (int i = 0; i < ok_params.size(); i++) { + for (int input_sizes_length : {2, 4}) { + Reset(); + NodeDef node_def = get_conv2d_backprop_input_nodedef( + ok_params[i].strides, ok_params[i].padding, ok_params[i].data_format, + ok_params[i].dilations); + AddTestTensor("input", ok_params[i].input_dims); + AddTestWeights("weights", ok_params[i].filter_dims, + ok_params[i].filter); + + std::vector tf_input_sizes = ok_params[i].expected_output_dims; + if (input_sizes_length == 4) { + tf_input_sizes.insert(tf_input_sizes.begin(), + 1); // Add batch dimension. + QCHECK_EQ(4, tf_input_sizes.size()); + AddTestWeights("input_sizes", {4}, tf_input_sizes); + } else { + // Remove the channel dimension. + if (ok_params[i].data_format == "NHWC") { + tf_input_sizes.pop_back(); + } else { + tf_input_sizes.erase(tf_input_sizes.begin()); + } + QCHECK_EQ(2, tf_input_sizes.size()); + AddTestWeights("input_sizes", {2}, tf_input_sizes); + } + + RunValidationAndConversion(node_def); + TRT_TensorOrWeights output; + TF_EXPECT_OK(GetTensorOrWeights("my_conv2d_backprop_input", &output)); + ASSERT_TRUE(output.is_tensor()); + ExpectTrtDimsEqualsArray(ok_params[i].expected_output_dims, + output.tensor()->getDimensions()); + + const DataVec input_data{{"input", AsTensor(ok_params[i].input)}}; + DataVec output_data{ + {"my_conv2d_backprop_input", + ConstructTensor(ok_params[i].expected_output.size())}}; + BuildAndRun(input_data, &output_data); + EXPECT_THAT(GetSpanForData(output_data[0]), + ElementsAreArray(ok_params[i].expected_output)); + } + } +} + #if IS_TRT_VERSION_GE(6, 0, 0, 0) TEST_F(OpConverterTest, ConvertConv3D) { // Get nodedef for Conv3D layer.