Pre-allocate memory for vectors where size is known.

PiperOrigin-RevId: 356886423
Change-Id: I82f3eea4b04d80142b34f35c71ee9e696f5d58cb
This commit is contained in:
A. Unique TensorFlower 2021-02-10 20:02:36 -08:00 committed by TensorFlower Gardener
parent 0992169740
commit 6f52c65fbf
6 changed files with 8 additions and 0 deletions

View File

@ -658,6 +658,7 @@ void TFE_InferShapes(TFE_Op* tfe_op, TF_ShapeAndTypeList* input_shapes,
c.SetInput(i, c.UnknownShape());
continue;
}
dims.reserve(input_shape.num_dims);
for (int j = 0; j < input_shape.num_dims; ++j) {
dims.push_back(c.MakeDim(input_shape.dims[j]));
}

View File

@ -196,6 +196,7 @@ TF_Function* TF_GraphToFunctionWithControlOutputs(
// Compute body nodes.
std::vector<const Node*> control_output_nodes;
control_output_nodes.reserve(ncontrol_outputs);
for (int i = 0; i < ncontrol_outputs; ++i) {
control_output_nodes.push_back(&control_outputs[i]->node);
}

View File

@ -133,6 +133,7 @@ void LoggingDeviceExecute(const TFE_Op* original_op, int* num_outputs,
TFE_DeleteOp(op);
if (TF_GetCode(s) != TF_OK) return;
std::vector<TFE_TensorHandle*> unwrapped_outputs;
unwrapped_outputs.reserve(op_outputs.size());
for (auto* handle : op_outputs) {
unwrapped_outputs.push_back(handle);
}

View File

@ -4734,6 +4734,7 @@ Status ConvertPack(OpConverterParams* params) {
params->use_implicit_batch ? TrtInputArg::kTensor : TrtInputArg::kBoth;
std::vector<std::pair<string, TrtInputArg>> inputs_is_weight;
inputs_is_weight.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs_is_weight.push_back({StrCat("values_", i), expected_arg});
}
@ -5111,6 +5112,7 @@ Status ConvertConcat(OpConverterParams* params) {
// for details. TODO(tfeher): Allow weight input in explicit batch mode.
std::vector<std::pair<string, TrtInputArg>> inputs_kinds;
TrtInputArg expected_input = TrtInputArg::kTensor;
inputs_kinds.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs_kinds.push_back({StrCat("values_", i), expected_input});
}
@ -5141,6 +5143,7 @@ Status ConvertConcat(OpConverterParams* params) {
// Gather inputs as tensors
std::vector<nvinfer1::ITensor const*> input_tensors;
input_tensors.reserve(num_inputs);
for (int i = 0; i < num_inputs; i++) {
input_tensors.push_back(inputs.at(i).tensor());
}

View File

@ -56,6 +56,7 @@ class TensorKey : public Tensor {
const uint8* d = static_cast<uint8*>(k.data());
size_t s = k.AllocatedBytes();
std::vector<uint8> vec;
vec.reserve(s);
for (int i = 0; i < s; i++) {
vec.push_back(d[i]);
}

View File

@ -57,6 +57,7 @@ namespace toco {
// Split up the DynamicStitch inputs into the indices and data.
std::vector<std::string> stitch_indices_inputs;
std::vector<std::string> stitch_data_inputs;
stitch_indices_inputs.reserve(stitch_op->num_partitions);
for (int i = 0; i < stitch_op->num_partitions; ++i) {
stitch_indices_inputs.push_back(stitch_op->inputs[i]);
}