diff --git a/tensorflow/core/kernels/data/batch_dataset_op.cc b/tensorflow/core/kernels/data/batch_dataset_op.cc index 734c98bf67d..e761127ebcf 100644 --- a/tensorflow/core/kernels/data/batch_dataset_op.cc +++ b/tensorflow/core/kernels/data/batch_dataset_op.cc @@ -186,6 +186,7 @@ class BatchDatasetOp::Dataset : public DatasetBase { // overload that supports zero-copy, and might make sense in an // optimization pass. const size_t num_tuple_components = batch_elements[0].size(); + out_tensors->reserve(num_tuple_components); const int64 num_batch_elements = batch_elements.size(); for (size_t component_index = 0; component_index < num_tuple_components; ++component_index) { diff --git a/tensorflow/core/kernels/data/captured_function.cc b/tensorflow/core/kernels/data/captured_function.cc index cd6682d198d..a39484ad061 100644 --- a/tensorflow/core/kernels/data/captured_function.cc +++ b/tensorflow/core/kernels/data/captured_function.cc @@ -110,6 +110,7 @@ Status RunShortCircuit(const ShortCircuitInfo& info, std::vector* rets) { VLOG(3) << "Running function " << func->func().name() << " short circuit"; size_t num_args = args.size(); + rets->reserve(info.indices.size()); for (size_t i = 0; i < info.indices.size(); ++i) { if (info.indices[i] < num_args) { rets->push_back(args[info.indices[i]]); @@ -125,6 +126,7 @@ Status RunShortCircuit(const ShortCircuitInfo& info, std::vector&& args, std::vector* rets) { VLOG(3) << "Running function " << func->func().name() << " short circuit"; size_t num_args = args.size(); + rets->reserve(info.indices.size()); for (size_t i = 0; i < info.indices.size(); ++i) { if (info.indices[i] < num_args) { if (info.can_move[i]) { diff --git a/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc b/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc index b0deedeadac..18c0a03f60d 100644 --- a/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/map_and_batch_dataset_op.cc @@ -466,6 +466,7 @@ class MapAndBatchDatasetOp::Dataset : public DatasetBase { return Status::OK(); } const size_t num_components = return_values->size(); + result->output.reserve(num_components); for (size_t i = 0; i < num_components; ++i) { TensorShape component_shape({dataset()->batch_size_}); component_shape.AppendShape(return_values->at(i).shape()); diff --git a/tensorflow/core/kernels/data/experimental/random_dataset_op.cc b/tensorflow/core/kernels/data/experimental/random_dataset_op.cc index 42230af18ef..43e25f381ad 100644 --- a/tensorflow/core/kernels/data/experimental/random_dataset_op.cc +++ b/tensorflow/core/kernels/data/experimental/random_dataset_op.cc @@ -86,6 +86,7 @@ class RandomDatasetOp::Dataset : public DatasetBase { Status GetNextInternal(IteratorContext* ctx, std::vector* out_tensors, bool* end_of_sequence) override { + out_tensors->reserve(1); mutex_lock l(mu_); out_tensors->emplace_back(ctx->allocator({}), DT_INT64, TensorShape({})); out_tensors->back().scalar()() = Random(); diff --git a/tensorflow/core/kernels/data/tensor_dataset_op.cc b/tensorflow/core/kernels/data/tensor_dataset_op.cc index 3a12690c4b3..4ee4087bb22 100644 --- a/tensorflow/core/kernels/data/tensor_dataset_op.cc +++ b/tensorflow/core/kernels/data/tensor_dataset_op.cc @@ -38,6 +38,8 @@ class TensorDatasetOp::Dataset : public DatasetBase { public: Dataset(OpKernelContext* ctx, std::vector tensors) : DatasetBase(DatasetContext(ctx)), tensors_(std::move(tensors)) { + dtypes_.reserve(tensors_.size()); + shapes_.reserve(tensors_.size()); for (const Tensor& t : tensors_) { dtypes_.push_back(t.dtype()); shapes_.emplace_back(t.shape().dim_sizes()); diff --git a/tensorflow/core/kernels/data/tf_record_dataset_op.cc b/tensorflow/core/kernels/data/tf_record_dataset_op.cc index 096a412bba1..b2a78794d36 100644 --- a/tensorflow/core/kernels/data/tf_record_dataset_op.cc +++ b/tensorflow/core/kernels/data/tf_record_dataset_op.cc @@ -100,6 +100,7 @@ class TFRecordDatasetOp::Dataset : public DatasetBase { Status GetNextInternal(IteratorContext* ctx, std::vector* out_tensors, bool* end_of_sequence) override { + out_tensors->reserve(1); mutex_lock l(mu_); do { // We are currently processing a file, so try to read the next record.