From cb61c42938010f3ff33b55ef4b41651c22fb7652 Mon Sep 17 00:00:00 2001 From: Matt Conley Date: Mon, 15 Jun 2020 11:22:30 -0700 Subject: [PATCH] [TFTRT] Add Dynamic Shape Tests for ConvertSquare Co-authored-by: Tamas Feher - Modify ConvertSquare tests to use newer TFTRT testing API - Add INT32 as a supported dtype for TFTRT ConvertSquare --- .../tf2tensorrt/convert/convert_nodes.cc | 5 ++ .../tf2tensorrt/convert/convert_nodes_test.cc | 64 +++++++------------ 2 files changed, 28 insertions(+), 41 deletions(-) diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc index 20ee5ffd8f8..28b27959afc 100644 --- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc +++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc @@ -4424,8 +4424,13 @@ Status ConvertSquare(OpConverterParams* params) { const auto& inputs = params->inputs; const auto& node_def = params->node_def; TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"x", false}})); +#if IS_TRT_VERSION_GE(6, 0, 1, 0) + TF_RETURN_IF_ERROR(AllowDataTypes( + *params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32})); +#else TF_RETURN_IF_ERROR( AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF})); +#endif if (params->validation_only) return Status::OK(); // Constant 2 with same rank as input diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc index 450831910f6..1192b563e57 100644 --- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc +++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc @@ -2754,58 +2754,40 @@ TEST_F(OpConverterTest, ConvertQuantize) { } } -template -void TestConvertSquare(OpConverterTest* test) { - test->Reset(); - typedef typename EnumToDataType::Type CType; - - Scope s = Scope::NewRootScope(); - auto input = ops::Placeholder(s.WithOpName("input"), dtype); - auto square = ops::Square(s.WithOpName("my_square"), input); - NodeDef node_def = square.operation.node()->def(); - - test->AddTestTensor("input", {1, 20}, /*batch_size=*/1, - TfDataTypeToTrt(dtype)); - test->RunValidationAndConversion(node_def); - TRT_TensorOrWeights output; - TF_EXPECT_OK(test->GetTensorOrWeights("my_square", &output)); - ASSERT_TRUE(output.is_tensor()); - ExpectTrtDimsEqualsArray({1, 20}, output.tensor()->getDimensions()); - - const int num_inputs = 20; - std::vector inputs(num_inputs); - std::vector expected_outputs(num_inputs); - for (int i = 0; i < num_inputs; ++i) { - const CType value = CType(i - 9); - inputs[i] = value; - expected_outputs[i] = value * value; - } - const DataVec input_data{{"input", test->AsTensor(inputs)}}; - // Engine outputs are converted to FP16 automatically if we set FP16 mode in - // the builder. - DataVec output_data{{"my_square", test->ConstructTensor(num_inputs)}}; - TF_EXPECT_OK(test->BuildAndRun(input_data, &output_data)); - ExpectArrayNear(expected_outputs, GetSpanForData(output_data[0])); -} - -TEST_F(OpConverterTest, ConvertSquare) { +TEST_P(OpConverterTest2, ConvertSquare) { { // Input is weights, should fail. Reset(); Scope s = Scope::NewRootScope(); - auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT); + auto input = ops::Placeholder(s.WithOpName("input"), tf_type); auto square = ops::Square(s.WithOpName("my_square"), input); NodeDef node_def = square.operation.node()->def(); - AddTestWeights("input", {1, 2, 3}, {1, 2, 3, 4, -5, 6}); + AddTestWeights("input", {1, 2, 3}, {1, 2, 3, 4, -5, 6}, tf_type); RunValidationAndConversion( node_def, error::UNIMPLEMENTED, "The input \"x\" for Square must be a tensor, at my_square"); } - // OK. Note that kINT32 is not supported by IElementWiseLayer, so we don't - // test DT_INT32 type here. - TestConvertSquare(this); - TestConvertSquare(this); + Reset(); + + Scope s = Scope::NewRootScope(); + auto input = ops::Placeholder(s.WithOpName("input"), tf_type); + auto square = ops::Square(s.WithOpName("my_square"), input); + NodeDef node_def = square.operation.node()->def(); + + const int num_inputs = 20; + std::vector inputs(num_inputs); + std::vector expected_outputs(num_inputs); + + for (int i = 0; i < num_inputs; ++i) { + const float value = (i - 9); + inputs[i] = value; + expected_outputs[i] = value * value; + } + AddTestTensor("input", {1, 1, 20}, tf_type, inputs); + + TestOpConverter("my_square", node_def, {1, 1, 20}, Status::OK(), Status::OK(), + ArrayFloatNear(expected_outputs, 0)); } #if IS_TRT_VERSION_GE(5, 1, 0, 0)