Merge pull request #42044 from tfeher:trt_fix_naming_style
PiperOrigin-RevId: 328673510 Change-Id: Ief79c54bb624353d223447924bb6e1c7e1d9b7b2
This commit is contained in:
commit
ecc6730253
@ -1709,12 +1709,12 @@ class ParameterizedOpConverterTestBase
|
|||||||
std::tuple<TrtTestMode, DataType, TrtPrecisionMode>> {
|
std::tuple<TrtTestMode, DataType, TrtPrecisionMode>> {
|
||||||
public:
|
public:
|
||||||
ParameterizedOpConverterTestBase()
|
ParameterizedOpConverterTestBase()
|
||||||
: trt_mode(std::get<0>(GetParam())),
|
: trt_mode_(std::get<0>(GetParam())),
|
||||||
tf_type(std::get<1>(GetParam())),
|
tf_type_(std::get<1>(GetParam())),
|
||||||
converter_precision(std::get<2>(GetParam())) {}
|
converter_precision_(std::get<2>(GetParam())) {}
|
||||||
|
|
||||||
void Reset() {
|
void Reset() {
|
||||||
OpConverterTest::Reset(converter_precision, trt_mode);
|
OpConverterTest::Reset(converter_precision_, trt_mode_);
|
||||||
input_data_.clear();
|
input_data_.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1750,7 +1750,7 @@ class ParameterizedOpConverterTestBase
|
|||||||
if (!partial_input_shape_dims.empty()) {
|
if (!partial_input_shape_dims.empty()) {
|
||||||
partial_shape = partial_input_shape_dims;
|
partial_shape = partial_input_shape_dims;
|
||||||
} else {
|
} else {
|
||||||
if (trt_mode == TrtTestMode::kDynamicShape) {
|
if (trt_mode_ == TrtTestMode::kDynamicShape) {
|
||||||
// In dynamic shape mode we make all dims unknown.
|
// In dynamic shape mode we make all dims unknown.
|
||||||
partial_shape = std::vector<int32>(dims.size(), -1);
|
partial_shape = std::vector<int32>(dims.size(), -1);
|
||||||
} else {
|
} else {
|
||||||
@ -1776,7 +1776,7 @@ class ParameterizedOpConverterTestBase
|
|||||||
void AddTestTensor(const string& name, const std::vector<int32>& dims,
|
void AddTestTensor(const string& name, const std::vector<int32>& dims,
|
||||||
const std::vector<T>& values = {},
|
const std::vector<T>& values = {},
|
||||||
const std::vector<int32>& partial_input_shape_dims = {}) {
|
const std::vector<int32>& partial_input_shape_dims = {}) {
|
||||||
AddTestTensor<T>(name, dims, tf_type, values, partial_input_shape_dims);
|
AddTestTensor<T>(name, dims, tf_type_, values, partial_input_shape_dims);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builds and runs the converted network. Checks output tensor shape. Tests
|
// Builds and runs the converted network. Checks output tensor shape. Tests
|
||||||
@ -1796,7 +1796,7 @@ class ParameterizedOpConverterTestBase
|
|||||||
TensorShapeUtils::MakeShape(expected_output_dims[i], &shape));
|
TensorShapeUtils::MakeShape(expected_output_dims[i], &shape));
|
||||||
string out_name = (n_output == 1) ? name : StrCat(name, ":", i);
|
string out_name = (n_output == 1) ? name : StrCat(name, ":", i);
|
||||||
DataType out_tf_type =
|
DataType out_tf_type =
|
||||||
out_tf_types.size() > i ? out_tf_types[i] : tf_type;
|
out_tf_types.size() > i ? out_tf_types[i] : tf_type_;
|
||||||
InputOutputData data{
|
InputOutputData data{
|
||||||
out_name, ConstructTensor(shape.num_elements(), 0, out_tf_type)};
|
out_name, ConstructTensor(shape.num_elements(), 0, out_tf_type)};
|
||||||
output_data.push_back(data);
|
output_data.push_back(data);
|
||||||
@ -1840,9 +1840,9 @@ class ParameterizedOpConverterTestBase
|
|||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
const TrtTestMode trt_mode;
|
const TrtTestMode trt_mode_;
|
||||||
const DataType tf_type;
|
const DataType tf_type_;
|
||||||
const TrtPrecisionMode converter_precision;
|
const TrtPrecisionMode converter_precision_;
|
||||||
DataVec input_data_;
|
DataVec input_data_;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2075,7 +2075,7 @@ TEST_P(OpConverterTest1, ConvertFusedBatchNorm) {
|
|||||||
37.342354, 41.013527, 30.9738, 34.469433,
|
37.342354, 41.013527, 30.9738, 34.469433,
|
||||||
45.018955, 48.59309, 59.369415, 63.04059};
|
45.018955, 48.59309, 59.369415, 63.04059};
|
||||||
for (auto get_node_def : get_node_def_vec) {
|
for (auto get_node_def : get_node_def_vec) {
|
||||||
NodeDef tmp_node_def = get_node_def(tf_type, "NCHW", true, 0);
|
NodeDef tmp_node_def = get_node_def(tf_type_, "NCHW", true, 0);
|
||||||
std::string op_name = tmp_node_def.op();
|
std::string op_name = tmp_node_def.op();
|
||||||
std::vector<TestParam> test_param{
|
std::vector<TestParam> test_param{
|
||||||
{"NHWC", 0, false, 0,
|
{"NHWC", 0, false, 0,
|
||||||
@ -2097,7 +2097,7 @@ TEST_P(OpConverterTest1, ConvertFusedBatchNorm) {
|
|||||||
errors::Unimplemented(StrCat("The input \"variance\" for ", op_name,
|
errors::Unimplemented(StrCat("The input \"variance\" for ", op_name,
|
||||||
" must be a constant, at my_batchnorm"))},
|
" must be a constant, at my_batchnorm"))},
|
||||||
{"NCHW", 0, false, 0.01}}; // The last one is the only test that runs.
|
{"NCHW", 0, false, 0.01}}; // The last one is the only test that runs.
|
||||||
if (trt_mode == TrtTestMode::kDynamicShape) {
|
if (trt_mode_ == TrtTestMode::kDynamicShape) {
|
||||||
test_param.push_back(
|
test_param.push_back(
|
||||||
{"NCHW", 0, false, 0.01,
|
{"NCHW", 0, false, 0.01,
|
||||||
errors::InvalidArgument(
|
errors::InvalidArgument(
|
||||||
@ -2107,7 +2107,7 @@ TEST_P(OpConverterTest1, ConvertFusedBatchNorm) {
|
|||||||
for (auto p : test_param) {
|
for (auto p : test_param) {
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def =
|
NodeDef node_def =
|
||||||
get_node_def(tf_type, p.data_format, p.is_training, p.epsilon);
|
get_node_def(tf_type_, p.data_format, p.is_training, p.epsilon);
|
||||||
for (int i = 0; i < node_input.size(); i++) {
|
for (int i = 0; i < node_input.size(); i++) {
|
||||||
if (i == 0 || i == p.tensor_input_idx) {
|
if (i == 0 || i == p.tensor_input_idx) {
|
||||||
// The first input (x) is always added as a tensor, and it hase shape
|
// The first input (x) is always added as a tensor, and it hase shape
|
||||||
@ -2126,7 +2126,7 @@ TEST_P(OpConverterTest1, ConvertFusedBatchNorm) {
|
|||||||
// the first arg is a tensor. TODO(tfeher) Check if one can relax this
|
// the first arg is a tensor. TODO(tfeher) Check if one can relax this
|
||||||
// restriction.
|
// restriction.
|
||||||
Status expected_status =
|
Status expected_status =
|
||||||
(i != 0 && trt_mode == TrtTestMode::kImplicitBatch)
|
(i != 0 && trt_mode_ == TrtTestMode::kImplicitBatch)
|
||||||
? errors::InvalidArgument(
|
? errors::InvalidArgument(
|
||||||
StrCat("Batch size doesn't match for tensor ",
|
StrCat("Batch size doesn't match for tensor ",
|
||||||
node_input[i].name,
|
node_input[i].name,
|
||||||
@ -2134,19 +2134,19 @@ TEST_P(OpConverterTest1, ConvertFusedBatchNorm) {
|
|||||||
"converter batch size: 3 vs 2"))
|
"converter batch size: 3 vs 2"))
|
||||||
: Status::OK();
|
: Status::OK();
|
||||||
std::vector<int> partial_input_shape;
|
std::vector<int> partial_input_shape;
|
||||||
if (i == 0 && trt_mode == TrtTestMode::kDynamicShape &&
|
if (i == 0 && trt_mode_ == TrtTestMode::kDynamicShape &&
|
||||||
!p.keep_channel_unknown) {
|
!p.keep_channel_unknown) {
|
||||||
// keep channel dim static (known)
|
// keep channel dim static (known)
|
||||||
partial_input_shape.resize(4, -1);
|
partial_input_shape.resize(4, -1);
|
||||||
partial_input_shape[1] = node_input[i].dims[1];
|
partial_input_shape[1] = node_input[i].dims[1];
|
||||||
}
|
}
|
||||||
AddTestTensor(node_input[i].name, node_input[i].dims, tf_type,
|
AddTestTensor(node_input[i].name, node_input[i].dims, tf_type_,
|
||||||
node_input[i].val, partial_input_shape,
|
node_input[i].val, partial_input_shape,
|
||||||
expected_status);
|
expected_status);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
AddTestWeights(node_input[i].name, node_input[i].dims,
|
AddTestWeights(node_input[i].name, node_input[i].dims,
|
||||||
node_input[i].val, tf_type);
|
node_input[i].val, tf_type_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TestOpConverter("my_batchnorm", node_def, node_input[0].dims,
|
TestOpConverter("my_batchnorm", node_def, node_input[0].dims,
|
||||||
@ -2154,12 +2154,12 @@ TEST_P(OpConverterTest1, ConvertFusedBatchNorm) {
|
|||||||
ArrayFloatNear(expected_output));
|
ArrayFloatNear(expected_output));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // namespace convert
|
}
|
||||||
|
|
||||||
TEST_P(OpConverterTest1, ConvertTranspose) {
|
TEST_P(OpConverterTest1, ConvertTranspose) {
|
||||||
// Get the NodeDef for Transpose.
|
// Get the NodeDef for Transpose.
|
||||||
Scope s = Scope::NewRootScope();
|
Scope s = Scope::NewRootScope();
|
||||||
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
|
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
|
||||||
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
|
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
|
||||||
auto transpose = ops::Transpose(s.WithOpName("my_transpose"), input, weights);
|
auto transpose = ops::Transpose(s.WithOpName("my_transpose"), input, weights);
|
||||||
const NodeDef& node_def = transpose.operation.node()->def();
|
const NodeDef& node_def = transpose.operation.node()->def();
|
||||||
@ -2187,13 +2187,13 @@ TEST_P(OpConverterTest1, ConvertTranspose) {
|
|||||||
{},
|
{},
|
||||||
{3, 2, 1, 1},
|
{3, 2, 1, 1},
|
||||||
{3, 2, 1, 0},
|
{3, 2, 1, 0},
|
||||||
(trt_mode == TrtTestMode::kImplicitBatch)
|
(trt_mode_ == TrtTestMode::kImplicitBatch)
|
||||||
? Status(error::UNIMPLEMENTED,
|
? Status(error::UNIMPLEMENTED,
|
||||||
"Transpose at batch dimension is not supported")
|
"Transpose at batch dimension is not supported")
|
||||||
: Status::OK()},
|
: Status::OK()},
|
||||||
TestParamBase{{1, 1, 2, 3}, {}, {1, 3, 1, 2}, {0, 3, 1, 2}},
|
TestParamBase{{1, 1, 2, 3}, {}, {1, 3, 1, 2}, {0, 3, 1, 2}},
|
||||||
};
|
};
|
||||||
if (trt_mode == TrtTestMode::kDynamicShape) {
|
if (trt_mode_ == TrtTestMode::kDynamicShape) {
|
||||||
// Dynamic shape tests where some shapes are known
|
// Dynamic shape tests where some shapes are known
|
||||||
test_params.push_back(TestParamBase{
|
test_params.push_back(TestParamBase{
|
||||||
{1, 1, 2, 3}, {-1, 1, 2, -1}, {1, 3, 1, 2}, {0, 3, 1, 2}});
|
{1, 1, 2, 3}, {-1, 1, 2, -1}, {1, 3, 1, 2}, {0, 3, 1, 2}});
|
||||||
@ -2317,12 +2317,12 @@ TEST_F(OpConverterTest, ConvertReshape) {
|
|||||||
TEST_P(OpConverterTest1, ConvertShape) {
|
TEST_P(OpConverterTest1, ConvertShape) {
|
||||||
// Get the NodeDef for Shape op.
|
// Get the NodeDef for Shape op.
|
||||||
Scope s = Scope::NewRootScope();
|
Scope s = Scope::NewRootScope();
|
||||||
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
|
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
|
||||||
auto shape = ops::Shape(s.WithOpName("my_shape"), input);
|
auto shape = ops::Shape(s.WithOpName("my_shape"), input);
|
||||||
const NodeDef& node_def = shape.operation.node()->def();
|
const NodeDef& node_def = shape.operation.node()->def();
|
||||||
|
|
||||||
Status conversion_status =
|
Status conversion_status =
|
||||||
(trt_mode == TrtTestMode::kImplicitBatch)
|
(trt_mode_ == TrtTestMode::kImplicitBatch)
|
||||||
? errors::Unimplemented(
|
? errors::Unimplemented(
|
||||||
"Shape is only supported for explicit batch mode.")
|
"Shape is only supported for explicit batch mode.")
|
||||||
: Status::OK();
|
: Status::OK();
|
||||||
@ -2346,7 +2346,7 @@ TEST_P(OpConverterTest1, ConvertShape) {
|
|||||||
// we use for the unit test have no actual input tensor when it is converted
|
// we use for the unit test have no actual input tensor when it is converted
|
||||||
// to a TensorRT network.
|
// to a TensorRT network.
|
||||||
int n_elements = 0;
|
int n_elements = 0;
|
||||||
if (input_is_weight(p) || trt_mode != TrtTestMode::kExplicitBatch) {
|
if (input_is_weight(p) || trt_mode_ != TrtTestMode::kExplicitBatch) {
|
||||||
// Calculate the number of elements for adding input data.
|
// Calculate the number of elements for adding input data.
|
||||||
n_elements = std::accumulate(p.input_dims.begin(), p.input_dims.end(), 1,
|
n_elements = std::accumulate(p.input_dims.begin(), p.input_dims.end(), 1,
|
||||||
std::multiplies<int>());
|
std::multiplies<int>());
|
||||||
@ -2355,7 +2355,7 @@ TEST_P(OpConverterTest1, ConvertShape) {
|
|||||||
if (!input_is_weight(p)) {
|
if (!input_is_weight(p)) {
|
||||||
AddTestTensor("input", p.input_dims, input_val);
|
AddTestTensor("input", p.input_dims, input_val);
|
||||||
} else {
|
} else {
|
||||||
AddTestWeights("input", p.input_dims, input_val, tf_type);
|
AddTestWeights("input", p.input_dims, input_val, tf_type_);
|
||||||
}
|
}
|
||||||
TestOpConverter("my_shape", node_def, p.expected_output_dims, p.status,
|
TestOpConverter("my_shape", node_def, p.expected_output_dims, p.status,
|
||||||
p.runtime_status, ElementsAreArray(p.input_dims),
|
p.runtime_status, ElementsAreArray(p.input_dims),
|
||||||
@ -2620,7 +2620,7 @@ TEST_P(OpConverterTest2, ConvertBiasAdd) {
|
|||||||
for (const string& data_format : {"NHWC", "NCHW"}) {
|
for (const string& data_format : {"NHWC", "NCHW"}) {
|
||||||
for (const int trt_input_rank : {1, 2, 3, 4}) {
|
for (const int trt_input_rank : {1, 2, 3, 4}) {
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = get_biasadd_nodedef(data_format, tf_type);
|
NodeDef node_def = get_biasadd_nodedef(data_format, tf_type_);
|
||||||
|
|
||||||
// Add input, dims_array will be like {2, 1, ..., 1, 3}
|
// Add input, dims_array will be like {2, 1, ..., 1, 3}
|
||||||
std::vector<int32> dims_array(trt_input_rank + 1, 1);
|
std::vector<int32> dims_array(trt_input_rank + 1, 1);
|
||||||
@ -2642,7 +2642,7 @@ TEST_P(OpConverterTest2, ConvertBiasAdd) {
|
|||||||
for (int i = 0; i < channel_size; ++i) {
|
for (int i = 0; i < channel_size; ++i) {
|
||||||
bias[i] = i + 1; // bias will be {1, 2, 3, ...}
|
bias[i] = i + 1; // bias will be {1, 2, 3, ...}
|
||||||
}
|
}
|
||||||
AddTestWeights("weights", {channel_size}, bias, tf_type);
|
AddTestWeights("weights", {channel_size}, bias, tf_type_);
|
||||||
|
|
||||||
// Build and run the engine.
|
// Build and run the engine.
|
||||||
std::vector<float> output_data;
|
std::vector<float> output_data;
|
||||||
@ -2678,7 +2678,7 @@ NodeDef GetBinaryOpNodeDef(DataType dtype) {
|
|||||||
TEST_P(OpConverterTest2, ConvertBinary) {
|
TEST_P(OpConverterTest2, ConvertBinary) {
|
||||||
{
|
{
|
||||||
AttrValue dtype;
|
AttrValue dtype;
|
||||||
dtype.set_type(tf_type);
|
dtype.set_type(tf_type_);
|
||||||
// Both inputs are weights.
|
// Both inputs are weights.
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def =
|
NodeDef node_def =
|
||||||
@ -2723,19 +2723,19 @@ TEST_P(OpConverterTest2, ConvertBinary) {
|
|||||||
if (!op_test_info.count(op_name)) {
|
if (!op_test_info.count(op_name)) {
|
||||||
FAIL() << "Binary op test map does not contain op " << op_name;
|
FAIL() << "Binary op test map does not contain op " << op_name;
|
||||||
}
|
}
|
||||||
NodeDef node_def = op_test_info[op_name].first(tf_type);
|
NodeDef node_def = op_test_info[op_name].first(tf_type_);
|
||||||
std::vector<std::string> input_names;
|
std::vector<std::string> input_names;
|
||||||
std::vector<std::vector<int>> input_dims;
|
std::vector<std::vector<int>> input_dims;
|
||||||
std::vector<std::vector<float>> input_values;
|
std::vector<std::vector<float>> input_values;
|
||||||
if (operand_1_is_tensor) {
|
if (operand_1_is_tensor) {
|
||||||
AddTestTensor("input1", {2, 1, 2}, {3, 6, 3, 6});
|
AddTestTensor("input1", {2, 1, 2}, {3, 6, 3, 6});
|
||||||
} else {
|
} else {
|
||||||
AddTestWeights("input1", {1, 2}, std::vector<float>{3, 6}, tf_type);
|
AddTestWeights("input1", {1, 2}, std::vector<float>{3, 6}, tf_type_);
|
||||||
}
|
}
|
||||||
if (operand_2_is_tensor) {
|
if (operand_2_is_tensor) {
|
||||||
AddTestTensor("input2", {2, 2, 1}, {2, 3, 2, 3});
|
AddTestTensor("input2", {2, 2, 1}, {2, 3, 2, 3});
|
||||||
} else {
|
} else {
|
||||||
AddTestWeights("input2", {2, 1}, std::vector<float>{2, 3}, tf_type);
|
AddTestWeights("input2", {2, 1}, std::vector<float>{2, 3}, tf_type_);
|
||||||
}
|
}
|
||||||
TestOpConverter("my_binary", node_def, {2, 2, 2}, Status::OK(),
|
TestOpConverter("my_binary", node_def, {2, 2, 2}, Status::OK(),
|
||||||
Status::OK(),
|
Status::OK(),
|
||||||
@ -2942,10 +2942,10 @@ TEST_P(OpConverterTest2, ConvertSquare) {
|
|||||||
// Input is weights, should fail.
|
// Input is weights, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
Scope s = Scope::NewRootScope();
|
Scope s = Scope::NewRootScope();
|
||||||
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
|
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
|
||||||
auto square = ops::Square(s.WithOpName("my_square"), input);
|
auto square = ops::Square(s.WithOpName("my_square"), input);
|
||||||
NodeDef node_def = square.operation.node()->def();
|
NodeDef node_def = square.operation.node()->def();
|
||||||
AddTestWeights("input", {1, 2, 3}, {1, 2, 3, 4, -5, 6}, tf_type);
|
AddTestWeights("input", {1, 2, 3}, {1, 2, 3, 4, -5, 6}, tf_type_);
|
||||||
RunValidationAndConversion(
|
RunValidationAndConversion(
|
||||||
node_def, error::UNIMPLEMENTED,
|
node_def, error::UNIMPLEMENTED,
|
||||||
"The input \"x\" for Square must be a tensor, at my_square");
|
"The input \"x\" for Square must be a tensor, at my_square");
|
||||||
@ -2954,7 +2954,7 @@ TEST_P(OpConverterTest2, ConvertSquare) {
|
|||||||
Reset();
|
Reset();
|
||||||
|
|
||||||
Scope s = Scope::NewRootScope();
|
Scope s = Scope::NewRootScope();
|
||||||
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
|
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
|
||||||
auto square = ops::Square(s.WithOpName("my_square"), input);
|
auto square = ops::Square(s.WithOpName("my_square"), input);
|
||||||
NodeDef node_def = square.operation.node()->def();
|
NodeDef node_def = square.operation.node()->def();
|
||||||
|
|
||||||
@ -2967,7 +2967,7 @@ TEST_P(OpConverterTest2, ConvertSquare) {
|
|||||||
inputs[i] = value;
|
inputs[i] = value;
|
||||||
expected_outputs[i] = value * value;
|
expected_outputs[i] = value * value;
|
||||||
}
|
}
|
||||||
AddTestTensor("input", {1, 1, 20}, tf_type, inputs);
|
AddTestTensor("input", {1, 1, 20}, tf_type_, inputs);
|
||||||
|
|
||||||
TestOpConverter("my_square", node_def, {1, 1, 20}, Status::OK(), Status::OK(),
|
TestOpConverter("my_square", node_def, {1, 1, 20}, Status::OK(), Status::OK(),
|
||||||
ArrayFloatNear(expected_outputs, 0));
|
ArrayFloatNear(expected_outputs, 0));
|
||||||
@ -3094,7 +3094,7 @@ TEST_P(OpConverterTest1, ConvertActivation) {
|
|||||||
{
|
{
|
||||||
// Input is weights, should fail.
|
// Input is weights, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
const NodeDef& node_def = CreateUnaryOp<ops::Relu>(tf_type);
|
const NodeDef& node_def = CreateUnaryOp<ops::Relu>(tf_type_);
|
||||||
AddTestWeights<int32>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
AddTestWeights<int32>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
||||||
RunValidationAndConversion(
|
RunValidationAndConversion(
|
||||||
node_def, error::UNIMPLEMENTED,
|
node_def, error::UNIMPLEMENTED,
|
||||||
@ -3151,7 +3151,7 @@ TEST_P(OpConverterTest1, ConvertActivation) {
|
|||||||
FAIL() << "Activation op test map does not contain op " << op_name;
|
FAIL() << "Activation op test map does not contain op " << op_name;
|
||||||
}
|
}
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = op_map[op_name].first(tf_type);
|
NodeDef node_def = op_map[op_name].first(tf_type_);
|
||||||
const std::vector<float> input = {-100, -2, -1, 0, 1, 88};
|
const std::vector<float> input = {-100, -2, -1, 0, 1, 88};
|
||||||
AddTestTensor("input", p.input_dims, input);
|
AddTestTensor("input", p.input_dims, input);
|
||||||
|
|
||||||
@ -3179,7 +3179,7 @@ TEST_P(OpConverterTest1, ConvertActivation) {
|
|||||||
TEST_P(OpConverterTest1, ConvertExpandDims) {
|
TEST_P(OpConverterTest1, ConvertExpandDims) {
|
||||||
// Get the NodeDef for ExpandDims.
|
// Get the NodeDef for ExpandDims.
|
||||||
Scope s = Scope::NewRootScope();
|
Scope s = Scope::NewRootScope();
|
||||||
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
|
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
|
||||||
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
|
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
|
||||||
auto expanddims =
|
auto expanddims =
|
||||||
ops::ExpandDims(s.WithOpName("my_expanddims"), input, weights);
|
ops::ExpandDims(s.WithOpName("my_expanddims"), input, weights);
|
||||||
@ -3207,7 +3207,7 @@ TEST_P(OpConverterTest1, ConvertExpandDims) {
|
|||||||
{},
|
{},
|
||||||
{1, 1, 1, 2, 3},
|
{1, 1, 1, 2, 3},
|
||||||
{0},
|
{0},
|
||||||
trt_mode == TrtTestMode::kImplicitBatch
|
trt_mode_ == TrtTestMode::kImplicitBatch
|
||||||
? Status(error::UNIMPLEMENTED,
|
? Status(error::UNIMPLEMENTED,
|
||||||
"TensorRT does not allow manipulation of the "
|
"TensorRT does not allow manipulation of the "
|
||||||
"batch dimension, at my_expanddims")
|
"batch dimension, at my_expanddims")
|
||||||
@ -3216,7 +3216,7 @@ TEST_P(OpConverterTest1, ConvertExpandDims) {
|
|||||||
{},
|
{},
|
||||||
{1, 1, 1, 2, 3},
|
{1, 1, 1, 2, 3},
|
||||||
{-5},
|
{-5},
|
||||||
trt_mode == TrtTestMode::kImplicitBatch
|
trt_mode_ == TrtTestMode::kImplicitBatch
|
||||||
? Status(error::UNIMPLEMENTED,
|
? Status(error::UNIMPLEMENTED,
|
||||||
"TensorRT does not allow manipulation of the "
|
"TensorRT does not allow manipulation of the "
|
||||||
"batch dimension, at my_expanddims")
|
"batch dimension, at my_expanddims")
|
||||||
@ -3254,7 +3254,7 @@ TEST_P(OpConverterTest1, ConvertExpandDims) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(OpConverterTest1, ConvertSqueeze) {
|
TEST_P(OpConverterTest1, ConvertSqueeze) {
|
||||||
const bool use_implicit_batch = (trt_mode == TrtTestMode::kImplicitBatch);
|
const bool use_implicit_batch = (trt_mode_ == TrtTestMode::kImplicitBatch);
|
||||||
// Get the NodeDef for Squeeze.
|
// Get the NodeDef for Squeeze.
|
||||||
auto get_squeeze_nodedef = [](std::vector<int> axes,
|
auto get_squeeze_nodedef = [](std::vector<int> axes,
|
||||||
DataType tf_type) -> NodeDef {
|
DataType tf_type) -> NodeDef {
|
||||||
@ -3277,7 +3277,7 @@ TEST_P(OpConverterTest1, ConvertSqueeze) {
|
|||||||
{}, // input partial dims
|
{}, // input partial dims
|
||||||
{2, 3}, // expected output dims
|
{2, 3}, // expected output dims
|
||||||
{}, // axis
|
{}, // axis
|
||||||
trt_mode == TrtTestMode::kExplicitBatch
|
trt_mode_ == TrtTestMode::kExplicitBatch
|
||||||
? Status::OK()
|
? Status::OK()
|
||||||
: Status{error::UNIMPLEMENTED,
|
: Status{error::UNIMPLEMENTED,
|
||||||
"Squeeze is not implemented for empty squeeze_dims, at "
|
"Squeeze is not implemented for empty squeeze_dims, at "
|
||||||
@ -3336,7 +3336,7 @@ TEST_P(OpConverterTest1, ConvertSqueeze) {
|
|||||||
"Dimension 2 with size 2 cannot be squeezed because it must be "
|
"Dimension 2 with size 2 cannot be squeezed because it must be "
|
||||||
"size 1, at my_squeeze"}};
|
"size 1, at my_squeeze"}};
|
||||||
|
|
||||||
if (trt_mode == TrtTestMode::kDynamicShape) {
|
if (trt_mode_ == TrtTestMode::kDynamicShape) {
|
||||||
// In this test we try to squeeze axis=2 which has size > 1. In dynamic
|
// In this test we try to squeeze axis=2 which has size > 1. In dynamic
|
||||||
// shape mode the converter sees only -1, so it cannot catch this error.
|
// shape mode the converter sees only -1, so it cannot catch this error.
|
||||||
squeeze_non_singleton.status = Status::OK(); // conversion status
|
squeeze_non_singleton.status = Status::OK(); // conversion status
|
||||||
@ -3351,7 +3351,7 @@ TEST_P(OpConverterTest1, ConvertSqueeze) {
|
|||||||
for (TestParamBase p : test_params) {
|
for (TestParamBase p : test_params) {
|
||||||
SCOPED_TRACE(p);
|
SCOPED_TRACE(p);
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = get_squeeze_nodedef(p.param, tf_type);
|
NodeDef node_def = get_squeeze_nodedef(p.param, tf_type_);
|
||||||
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6},
|
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6},
|
||||||
p.partial_input_dims);
|
p.partial_input_dims);
|
||||||
TestOpConverter("my_squeeze", node_def, p.expected_output_dims, p.status,
|
TestOpConverter("my_squeeze", node_def, p.expected_output_dims, p.status,
|
||||||
@ -4106,14 +4106,14 @@ TEST_F(OpConverterTest, ConvertSlice) {
|
|||||||
|
|
||||||
TEST_P(OpConverterTest1, ConvertConv2D) {
|
TEST_P(OpConverterTest1, ConvertConv2D) {
|
||||||
// Get nodedef for Conv2D layer.
|
// Get nodedef for Conv2D layer.
|
||||||
DataType tf_type_loc = tf_type;
|
DataType tf_type = tf_type_;
|
||||||
auto get_conv2d_nodedef =
|
auto get_conv2d_nodedef =
|
||||||
[tf_type_loc](std::vector<int> strides = {1, 1, 1, 1},
|
[tf_type](std::vector<int> strides = {1, 1, 1, 1},
|
||||||
string padding = "SAME", string data_format = "NCHW",
|
string padding = "SAME", string data_format = "NCHW",
|
||||||
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
|
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
|
||||||
Scope s = Scope::NewRootScope();
|
Scope s = Scope::NewRootScope();
|
||||||
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_loc);
|
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
|
||||||
auto filter = ops::Placeholder(s.WithOpName("weights"), tf_type_loc);
|
auto filter = ops::Placeholder(s.WithOpName("weights"), tf_type);
|
||||||
ops::Conv2D::Attrs attrs =
|
ops::Conv2D::Attrs attrs =
|
||||||
ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations);
|
ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations);
|
||||||
auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter, strides,
|
auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter, strides,
|
||||||
@ -4206,12 +4206,12 @@ TEST_P(OpConverterTest1, ConvertConv2D) {
|
|||||||
node_def, error::UNIMPLEMENTED,
|
node_def, error::UNIMPLEMENTED,
|
||||||
"Stride must be 1 for batch and channel dimensions, at my_conv2d");
|
"Stride must be 1 for batch and channel dimensions, at my_conv2d");
|
||||||
}
|
}
|
||||||
if (trt_mode == TrtTestMode::kDynamicShape) {
|
if (trt_mode_ == TrtTestMode::kDynamicShape) {
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = get_conv2d_nodedef();
|
NodeDef node_def = get_conv2d_nodedef();
|
||||||
// Channel dim unknown, should fail.
|
// Channel dim unknown, should fail.
|
||||||
AddTestTensorWithTFDims("input", {-1, -1, -1, -1},
|
AddTestTensorWithTFDims("input", {-1, -1, -1, -1},
|
||||||
TfDataTypeToTrt(tf_type));
|
TfDataTypeToTrt(tf_type_));
|
||||||
AddTestWeights<float>("weights", {1, 2, 1, 1}, {-1, 1});
|
AddTestWeights<float>("weights", {1, 2, 1, 1}, {-1, 1});
|
||||||
RunValidationAndConversion(
|
RunValidationAndConversion(
|
||||||
node_def, error::INVALID_ARGUMENT,
|
node_def, error::INVALID_ARGUMENT,
|
||||||
@ -4316,15 +4316,15 @@ TEST_P(OpConverterTest1, ConvertConv2D) {
|
|||||||
get_conv2d_nodedef(ok_params[i].strides, ok_params[i].padding,
|
get_conv2d_nodedef(ok_params[i].strides, ok_params[i].padding,
|
||||||
ok_params[i].data_format, ok_params[i].dilations);
|
ok_params[i].data_format, ok_params[i].dilations);
|
||||||
std::vector<int> partial_input_shape;
|
std::vector<int> partial_input_shape;
|
||||||
if (trt_mode == TrtTestMode::kDynamicShape) {
|
if (trt_mode_ == TrtTestMode::kDynamicShape) {
|
||||||
// The channel dim cannot have unknown size, fix that.
|
// The channel dim cannot have unknown size, fix that.
|
||||||
partial_input_shape.resize(ok_params[i].input_dims.size(), -1);
|
partial_input_shape.resize(ok_params[i].input_dims.size(), -1);
|
||||||
int channel_id = (ok_params[i].data_format == "NCHW") ? 1 : 3;
|
int channel_id = (ok_params[i].data_format == "NCHW") ? 1 : 3;
|
||||||
partial_input_shape[channel_id] = ok_params[i].input_dims[channel_id];
|
partial_input_shape[channel_id] = ok_params[i].input_dims[channel_id];
|
||||||
}
|
}
|
||||||
|
|
||||||
AddTestTensor("input", ok_params[i].input_dims, tf_type, ok_params[i].input,
|
AddTestTensor("input", ok_params[i].input_dims, tf_type_,
|
||||||
partial_input_shape);
|
ok_params[i].input, partial_input_shape);
|
||||||
AddTestWeights<float>("weights", ok_params[i].filter_dims,
|
AddTestWeights<float>("weights", ok_params[i].filter_dims,
|
||||||
ok_params[i].filter);
|
ok_params[i].filter);
|
||||||
|
|
||||||
@ -4851,7 +4851,7 @@ TEST_P(OpConverterTest1, ConvertPool) {
|
|||||||
for (int nDim : test_nDims) {
|
for (int nDim : test_nDims) {
|
||||||
// Input is weights, should fail.
|
// Input is weights, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = get_pool_nodedef(tf_type, nDim);
|
NodeDef node_def = get_pool_nodedef(tf_type_, nDim);
|
||||||
|
|
||||||
AddTestWeights<float>("input", {1, 1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
|
AddTestWeights<float>("input", {1, 1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
|
||||||
RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
|
RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
|
||||||
@ -4960,7 +4960,7 @@ TEST_P(OpConverterTest1, ConvertPool) {
|
|||||||
for (bool is_max_pooling : {true, false}) {
|
for (bool is_max_pooling : {true, false}) {
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def =
|
NodeDef node_def =
|
||||||
get_pool_nodedef(tf_type, nDim, ksize, strides, p.padding,
|
get_pool_nodedef(tf_type_, nDim, ksize, strides, p.padding,
|
||||||
data_format, is_max_pooling);
|
data_format, is_max_pooling);
|
||||||
AddTestTensor("input", input_dims, input);
|
AddTestTensor("input", input_dims, input);
|
||||||
TestOpConverter("my_pool", node_def, expected_output_dims, Status::OK(),
|
TestOpConverter("my_pool", node_def, expected_output_dims, Status::OK(),
|
||||||
@ -5022,7 +5022,7 @@ TEST_F(OpConverterTest, ConvertTopK) {
|
|||||||
TEST_P(OpConverterTest3, ConvertGather) {
|
TEST_P(OpConverterTest3, ConvertGather) {
|
||||||
// Get the NodeDef for GatherV2.
|
// Get the NodeDef for GatherV2.
|
||||||
Scope s = Scope::NewRootScope();
|
Scope s = Scope::NewRootScope();
|
||||||
auto params = ops::Placeholder(s.WithOpName("params"), tf_type);
|
auto params = ops::Placeholder(s.WithOpName("params"), tf_type_);
|
||||||
auto indices = ops::Placeholder(s.WithOpName("indices"), DT_INT32);
|
auto indices = ops::Placeholder(s.WithOpName("indices"), DT_INT32);
|
||||||
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
|
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
|
||||||
auto gather = ops::GatherV2(s.WithOpName("my_gather"), params, indices, axis);
|
auto gather = ops::GatherV2(s.WithOpName("my_gather"), params, indices, axis);
|
||||||
@ -5030,7 +5030,7 @@ TEST_P(OpConverterTest3, ConvertGather) {
|
|||||||
{
|
{
|
||||||
// Axis is a tensor, should fail.
|
// Axis is a tensor, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
AddTestTensor("params", {1, 1, 2, 3}, tf_type, {});
|
AddTestTensor("params", {1, 1, 2, 3}, tf_type_, {});
|
||||||
AddTestTensor("indices", {1, 2}, DT_INT32, {});
|
AddTestTensor("indices", {1, 2}, DT_INT32, {});
|
||||||
AddTestTensor("axis", {1}, DT_INT32, {});
|
AddTestTensor("axis", {1}, DT_INT32, {});
|
||||||
RunValidationAndConversion(
|
RunValidationAndConversion(
|
||||||
@ -5075,7 +5075,7 @@ TEST_P(OpConverterTest3, ConvertGather) {
|
|||||||
/*expected_output_shape=*/{2, 1, 1, 3},
|
/*expected_output_shape=*/{2, 1, 1, 3},
|
||||||
/*expected_output=*/{4, 5, 6, 1, 2, 3},
|
/*expected_output=*/{4, 5, 6, 1, 2, 3},
|
||||||
/*params_is_tensor=*/true,
|
/*params_is_tensor=*/true,
|
||||||
trt_mode == TrtTestMode::kImplicitBatch
|
trt_mode_ == TrtTestMode::kImplicitBatch
|
||||||
? Status{error::UNIMPLEMENTED,
|
? Status{error::UNIMPLEMENTED,
|
||||||
"TensorRT does not allow manipulation of the"
|
"TensorRT does not allow manipulation of the"
|
||||||
" batch dimension, at my_gather"}
|
" batch dimension, at my_gather"}
|
||||||
@ -5088,7 +5088,7 @@ TEST_P(OpConverterTest3, ConvertGather) {
|
|||||||
/*expected_output_shape=*/{2, 1, 2, 1},
|
/*expected_output_shape=*/{2, 1, 2, 1},
|
||||||
/*expected_output=*/{3, 1, 6, 4},
|
/*expected_output=*/{3, 1, 6, 4},
|
||||||
/*params_is_tensor=*/true,
|
/*params_is_tensor=*/true,
|
||||||
trt_mode == TrtTestMode::kImplicitBatch
|
trt_mode_ == TrtTestMode::kImplicitBatch
|
||||||
? Status{error::UNIMPLEMENTED,
|
? Status{error::UNIMPLEMENTED,
|
||||||
"Indices must have a batch size of 1 when params"
|
"Indices must have a batch size of 1 when params"
|
||||||
" is a tensor."}
|
" is a tensor."}
|
||||||
@ -5102,7 +5102,7 @@ TEST_P(OpConverterTest3, ConvertGather) {
|
|||||||
/*expected_output_shape=*/{2, 1, 2},
|
/*expected_output_shape=*/{2, 1, 2},
|
||||||
/*expected_output=*/{2, 3, 5, 6},
|
/*expected_output=*/{2, 3, 5, 6},
|
||||||
/*params_is_tensor=*/false,
|
/*params_is_tensor=*/false,
|
||||||
trt_mode == TrtTestMode::kImplicitBatch
|
trt_mode_ == TrtTestMode::kImplicitBatch
|
||||||
? Status{error::UNIMPLEMENTED,
|
? Status{error::UNIMPLEMENTED,
|
||||||
"The input axis must be zero when params is a"
|
"The input axis must be zero when params is a"
|
||||||
" weight."}
|
" weight."}
|
||||||
@ -5115,13 +5115,13 @@ TEST_P(OpConverterTest3, ConvertGather) {
|
|||||||
/*expected_output_shape=*/{2},
|
/*expected_output_shape=*/{2},
|
||||||
/*expected_output=*/{2, 4},
|
/*expected_output=*/{2, 4},
|
||||||
/*params_is_tensor=*/true,
|
/*params_is_tensor=*/true,
|
||||||
trt_mode == TrtTestMode::kImplicitBatch // conversion_status
|
trt_mode_ == TrtTestMode::kImplicitBatch // conversion_status
|
||||||
? Status{error::UNIMPLEMENTED,
|
? Status{error::UNIMPLEMENTED,
|
||||||
"TensorRT does not allow manipulation of the "
|
"TensorRT does not allow manipulation of the "
|
||||||
"batch dimension, at my_gather"}
|
"batch dimension, at my_gather"}
|
||||||
: Status::OK(),
|
: Status::OK(),
|
||||||
Status::OK(), // runtime_status
|
Status::OK(), // runtime_status
|
||||||
trt_mode == TrtTestMode::kImplicitBatch // add_index_status
|
trt_mode_ == TrtTestMode::kImplicitBatch // add_index_status
|
||||||
? Status{error::INVALID_ARGUMENT,
|
? Status{error::INVALID_ARGUMENT,
|
||||||
"Batch size doesn't match for tensor indices: "
|
"Batch size doesn't match for tensor indices: "
|
||||||
"Provided batch size does not match converter "
|
"Provided batch size does not match converter "
|
||||||
@ -5236,7 +5236,7 @@ TEST_P(OpConverterTest3, ConvertGather) {
|
|||||||
if (p.params_is_tensor) {
|
if (p.params_is_tensor) {
|
||||||
AddTestTensor("params", p.params_shape, params_input);
|
AddTestTensor("params", p.params_shape, params_input);
|
||||||
} else {
|
} else {
|
||||||
AddTestWeights("params", p.params_shape, params_input, tf_type);
|
AddTestWeights("params", p.params_shape, params_input, tf_type_);
|
||||||
}
|
}
|
||||||
AddTestTensor("indices", p.indices_shape, DT_INT32, p.indices, {},
|
AddTestTensor("indices", p.indices_shape, DT_INT32, p.indices, {},
|
||||||
p.add_index_status);
|
p.add_index_status);
|
||||||
@ -5276,7 +5276,7 @@ TEST_P(OpConverterTest1, ConvertReduce) {
|
|||||||
{
|
{
|
||||||
// Input is weights, should fail.
|
// Input is weights, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type, false);
|
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type_, false);
|
||||||
AddTestWeights<float>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
AddTestWeights<float>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
||||||
AddTestWeights<int32>("axis", {1}, {1});
|
AddTestWeights<int32>("axis", {1}, {1});
|
||||||
RunValidationAndConversion(
|
RunValidationAndConversion(
|
||||||
@ -5286,7 +5286,7 @@ TEST_P(OpConverterTest1, ConvertReduce) {
|
|||||||
{
|
{
|
||||||
// Axis is weights, should fail.
|
// Axis is weights, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type, false);
|
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type_, false);
|
||||||
AddTestTensor("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
AddTestTensor("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
||||||
AddTestTensor("axis", {1}, DT_INT32, {1});
|
AddTestTensor("axis", {1}, DT_INT32, {1});
|
||||||
RunValidationAndConversion(
|
RunValidationAndConversion(
|
||||||
@ -5346,7 +5346,7 @@ TEST_P(OpConverterTest1, ConvertReduce) {
|
|||||||
for (auto p : params) {
|
for (auto p : params) {
|
||||||
SCOPED_TRACE(StrCat(op.name, keep_dims ? "keep_dims" : ""));
|
SCOPED_TRACE(StrCat(op.name, keep_dims ? "keep_dims" : ""));
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = op.get_node(tf_type, keep_dims);
|
NodeDef node_def = op.get_node(tf_type_, keep_dims);
|
||||||
|
|
||||||
AddTestTensor("input", p.input_dims, p.input_values);
|
AddTestTensor("input", p.input_dims, p.input_values);
|
||||||
AddTestWeights<int32>("axis", {static_cast<int>(p.axis.size())},
|
AddTestWeights<int32>("axis", {static_cast<int>(p.axis.size())},
|
||||||
@ -5366,7 +5366,7 @@ TEST_P(OpConverterTest1, ConvertReduce) {
|
|||||||
int ax_positive = ax >= 0 ? ax : ax + rank;
|
int ax_positive = ax >= 0 ? ax : ax + rank;
|
||||||
// Zero marks elements that we will remove later.
|
// Zero marks elements that we will remove later.
|
||||||
expected_output_dims[ax_positive] = keep_dims ? 1 : 0;
|
expected_output_dims[ax_positive] = keep_dims ? 1 : 0;
|
||||||
if (trt_mode == TrtTestMode::kImplicitBatch &&
|
if (trt_mode_ == TrtTestMode::kImplicitBatch &&
|
||||||
(ax == 0 || ax == -rank)) {
|
(ax == 0 || ax == -rank)) {
|
||||||
p.conversion_status = errors::Unimplemented(
|
p.conversion_status = errors::Unimplemented(
|
||||||
"TensorRT does not allow manipulation of the batch "
|
"TensorRT does not allow manipulation of the batch "
|
||||||
@ -5402,7 +5402,7 @@ TEST_P(OpConverterTest1, ConvertUnary) {
|
|||||||
{
|
{
|
||||||
// Input is weights, should fail.
|
// Input is weights, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
const NodeDef node_def = CreateUnaryOp<ops::Neg>(tf_type);
|
const NodeDef node_def = CreateUnaryOp<ops::Neg>(tf_type_);
|
||||||
AddTestWeights<float>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
AddTestWeights<float>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
|
||||||
RunValidationAndConversion(
|
RunValidationAndConversion(
|
||||||
node_def, error::UNIMPLEMENTED,
|
node_def, error::UNIMPLEMENTED,
|
||||||
@ -5458,7 +5458,7 @@ TEST_P(OpConverterTest1, ConvertUnary) {
|
|||||||
if (!op_map.count(op_name)) {
|
if (!op_map.count(op_name)) {
|
||||||
FAIL() << "Unary op test map does not contain op " << op_name;
|
FAIL() << "Unary op test map does not contain op " << op_name;
|
||||||
}
|
}
|
||||||
NodeDef node_def = op_map[op_name].first(tf_type);
|
NodeDef node_def = op_map[op_name].first(tf_type_);
|
||||||
|
|
||||||
// TODO(bixia): we assume this test is only instantiated for DT_FLOAT for
|
// TODO(bixia): we assume this test is only instantiated for DT_FLOAT for
|
||||||
// now. Need to find a better way to express input and output types.
|
// now. Need to find a better way to express input and output types.
|
||||||
@ -5466,7 +5466,7 @@ TEST_P(OpConverterTest1, ConvertUnary) {
|
|||||||
// TODO(tfeher): improve tests by defining an expected output data type and
|
// TODO(tfeher): improve tests by defining an expected output data type and
|
||||||
// check that. Currently only the shape and values of the output are
|
// check that. Currently only the shape and values of the output are
|
||||||
// checked.
|
// checked.
|
||||||
DataType input_tf_type = op_name == "Cast" ? DT_HALF : tf_type;
|
DataType input_tf_type = op_name == "Cast" ? DT_HALF : tf_type_;
|
||||||
|
|
||||||
std::vector<float> input_values{-0.9f, 0.6f, 0.0f, -3.5f, 100.0f, 2.9f};
|
std::vector<float> input_values{-0.9f, 0.6f, 0.0f, -3.5f, 100.0f, 2.9f};
|
||||||
AddTestTensor("input", p.input_dims, input_tf_type, input_values);
|
AddTestTensor("input", p.input_dims, input_tf_type, input_values);
|
||||||
@ -6033,7 +6033,7 @@ TEST_P(OpConverterTest2, ConvertPack) {
|
|||||||
/*axis=*/1,
|
/*axis=*/1,
|
||||||
/*expected_output_dims=*/{1, 2, 2, 3},
|
/*expected_output_dims=*/{1, 2, 2, 3},
|
||||||
/*expected_output=*/InitTestVector<float>(12),
|
/*expected_output=*/InitTestVector<float>(12),
|
||||||
trt_mode == TrtTestMode::kImplicitBatch
|
trt_mode_ == TrtTestMode::kImplicitBatch
|
||||||
? Status{error::UNIMPLEMENTED,
|
? Status{error::UNIMPLEMENTED,
|
||||||
"The input \"values_1\" for Pack must be a tensor, at "
|
"The input \"values_1\" for Pack must be a tensor, at "
|
||||||
"my_pack"}
|
"my_pack"}
|
||||||
@ -6059,7 +6059,7 @@ TEST_P(OpConverterTest2, ConvertPack) {
|
|||||||
/*axis=*/-4,
|
/*axis=*/-4,
|
||||||
/*expected_output_dims=*/{2, 1, 2, 3},
|
/*expected_output_dims=*/{2, 1, 2, 3},
|
||||||
/*expected_output=*/InitTestVector<float>(12),
|
/*expected_output=*/InitTestVector<float>(12),
|
||||||
trt_mode == TrtTestMode::kImplicitBatch
|
trt_mode_ == TrtTestMode::kImplicitBatch
|
||||||
? Status{error::UNIMPLEMENTED,
|
? Status{error::UNIMPLEMENTED,
|
||||||
"TensorRT does not allow manipulation of the batch "
|
"TensorRT does not allow manipulation of the batch "
|
||||||
"dimension, at my_pack"}
|
"dimension, at my_pack"}
|
||||||
@ -6119,7 +6119,7 @@ TEST_P(OpConverterTest2, ConvertPack) {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
// Inputs have inconsistent shapes, should fail.
|
// Inputs have inconsistent shapes, should fail.
|
||||||
if (trt_mode != TrtTestMode::kDynamicShape) {
|
if (trt_mode_ != TrtTestMode::kDynamicShape) {
|
||||||
params.push_back(TestParams{
|
params.push_back(TestParams{
|
||||||
/*input_shapes=*/{{1, 2, 3}, {1, 3, 2}},
|
/*input_shapes=*/{{1, 2, 3}, {1, 3, 2}},
|
||||||
/*partial_input_shapes=*/{{}, {}},
|
/*partial_input_shapes=*/{{}, {}},
|
||||||
@ -6139,7 +6139,7 @@ TEST_P(OpConverterTest2, ConvertPack) {
|
|||||||
// TODO(tfeher) Add dynamic shapes test once TRT handles shape error
|
// TODO(tfeher) Add dynamic shapes test once TRT handles shape error
|
||||||
// decently
|
// decently
|
||||||
}
|
}
|
||||||
if (trt_mode == TrtTestMode::kDynamicShape) {
|
if (trt_mode_ == TrtTestMode::kDynamicShape) {
|
||||||
// Test with mixed dynamic / static shape input tensors
|
// Test with mixed dynamic / static shape input tensors
|
||||||
params.push_back(
|
params.push_back(
|
||||||
TestParams{/*input_shapes=*/{{1, 2, 3}, {1, 2, 3}},
|
TestParams{/*input_shapes=*/{{1, 2, 3}, {1, 2, 3}},
|
||||||
@ -6155,14 +6155,14 @@ TEST_P(OpConverterTest2, ConvertPack) {
|
|||||||
const int num_inputs = p.input_shapes.size();
|
const int num_inputs = p.input_shapes.size();
|
||||||
EXPECT_EQ(num_inputs, p.input_values.size());
|
EXPECT_EQ(num_inputs, p.input_values.size());
|
||||||
|
|
||||||
NodeDef node_def = GetPackNodeDef(tf_type, num_inputs, p.axis);
|
NodeDef node_def = GetPackNodeDef(tf_type_, num_inputs, p.axis);
|
||||||
// Create inputs.
|
// Create inputs.
|
||||||
for (int j = 0; j < num_inputs; ++j) {
|
for (int j = 0; j < num_inputs; ++j) {
|
||||||
if (j == 1 && p.input_1_is_weight) {
|
if (j == 1 && p.input_1_is_weight) {
|
||||||
AddTestWeights(StrCat("values_", j), p.input_shapes[j],
|
AddTestWeights(StrCat("values_", j), p.input_shapes[j],
|
||||||
p.input_values[j], tf_type);
|
p.input_values[j], tf_type_);
|
||||||
} else {
|
} else {
|
||||||
AddTestTensor(StrCat("values_", j), p.input_shapes[j], tf_type,
|
AddTestTensor(StrCat("values_", j), p.input_shapes[j], tf_type_,
|
||||||
p.input_values[j], p.partial_input_shapes[j]);
|
p.input_values[j], p.partial_input_shapes[j]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -6690,7 +6690,7 @@ TEST_P(OpConverterTest2, ConvertSquaredDifference) {
|
|||||||
{
|
{
|
||||||
// Input is a weight, should fail.
|
// Input is a weight, should fail.
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = GetSquaredDifferenceNodeDef(tf_type);
|
NodeDef node_def = GetSquaredDifferenceNodeDef(tf_type_);
|
||||||
AddTestWeights<float>("x", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
|
AddTestWeights<float>("x", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
|
||||||
AddTestTensor("y", {1, 1, 2, 3});
|
AddTestTensor("y", {1, 1, 2, 3});
|
||||||
RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
|
RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
|
||||||
@ -6717,7 +6717,7 @@ TEST_P(OpConverterTest2, ConvertSquaredDifference) {
|
|||||||
/*value_y=*/std::vector<float>(7 * 5, 0),
|
/*value_y=*/std::vector<float>(7 * 5, 0),
|
||||||
/*expected_output_dims=*/{1, 1, 2, 3},
|
/*expected_output_dims=*/{1, 1, 2, 3},
|
||||||
/*expected_output=*/common_input,
|
/*expected_output=*/common_input,
|
||||||
trt_mode == TrtTestMode::kDynamicShape
|
trt_mode_ == TrtTestMode::kDynamicShape
|
||||||
? Status::OK()
|
? Status::OK()
|
||||||
: errors::InvalidArgument("Infeasible broadcast scheme"),
|
: errors::InvalidArgument("Infeasible broadcast scheme"),
|
||||||
errors::Internal(
|
errors::Internal(
|
||||||
@ -6743,7 +6743,7 @@ TEST_P(OpConverterTest2, ConvertSquaredDifference) {
|
|||||||
|
|
||||||
for (auto p : params) {
|
for (auto p : params) {
|
||||||
Reset();
|
Reset();
|
||||||
NodeDef node_def = GetSquaredDifferenceNodeDef(tf_type);
|
NodeDef node_def = GetSquaredDifferenceNodeDef(tf_type_);
|
||||||
AddTestTensor("x", p.dims_x, p.value_x);
|
AddTestTensor("x", p.dims_x, p.value_x);
|
||||||
AddTestTensor("y", p.dims_y, p.value_y);
|
AddTestTensor("y", p.dims_y, p.value_y);
|
||||||
TestOpConverter("my_squared_diff", node_def, p.expected_output_dims,
|
TestOpConverter("my_squared_diff", node_def, p.expected_output_dims,
|
||||||
@ -6779,7 +6779,7 @@ template <typename OpType, DataType dtype>
|
|||||||
void TestConvertResize(OpConverterTest* test) {
|
void TestConvertResize(OpConverterTest* test) {
|
||||||
typedef typename EnumToDataType<dtype>::Type CType;
|
typedef typename EnumToDataType<dtype>::Type CType;
|
||||||
|
|
||||||
std::vector<ResizeTestParams<CType>> params{
|
std::vector<ResizeTestParams<CType>> params {
|
||||||
// TODO(b/162442839): Enable the test parameters for TRT 7.1.3.x.
|
// TODO(b/162442839): Enable the test parameters for TRT 7.1.3.x.
|
||||||
#if !IS_TRT_VERSION_GE(7, 1, 3, 0)
|
#if !IS_TRT_VERSION_GE(7, 1, 3, 0)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user