diff --git a/tensorflow/compiler/jit/xla_device_context.cc b/tensorflow/compiler/jit/xla_device_context.cc index 4948fc9965f..e1cef25e33e 100644 --- a/tensorflow/compiler/jit/xla_device_context.cc +++ b/tensorflow/compiler/jit/xla_device_context.cc @@ -69,6 +69,7 @@ absl::optional XlaDeviceAllocator::GetStats() { tf_stats.bytes_reserved = se_stats->bytes_reserved; tf_stats.peak_bytes_reserved = se_stats->peak_bytes_reserved; tf_stats.bytes_reservable_limit = se_stats->bytes_reservable_limit; + tf_stats.largest_free_block_bytes = se_stats->largest_free_block_bytes; return tf_stats; } diff --git a/tensorflow/core/framework/allocator.cc b/tensorflow/core/framework/allocator.cc index 8cc8a29fe48..d20f779c8da 100644 --- a/tensorflow/core/framework/allocator.cc +++ b/tensorflow/core/framework/allocator.cc @@ -31,16 +31,22 @@ thread_local MemoryDebugAnnotation ScopedMemoryDebugAnnotation::annotation_; string AllocatorStats::DebugString() const { return strings::Printf( - "Limit: %20lld\n" - "InUse: %20lld\n" - "MaxInUse: %20lld\n" - "NumAllocs: %20lld\n" - "MaxAllocSize: %20lld\n", + "Limit: %20lld\n" + "InUse: %20lld\n" + "MaxInUse: %20lld\n" + "NumAllocs: %20lld\n" + "MaxAllocSize: %20lld\n" + "Reserved: %20lld\n" + "PeakReserved: %20lld\n" + "LargestFreeBlock: %20lld\n", static_cast(this->bytes_limit ? *this->bytes_limit : 0), static_cast(this->bytes_in_use), static_cast(this->peak_bytes_in_use), static_cast(this->num_allocs), - static_cast(this->largest_alloc_size)); + static_cast(this->largest_alloc_size), + static_cast(this->bytes_reserved), + static_cast(this->peak_bytes_reserved), + static_cast(this->largest_free_block_bytes)); } constexpr size_t Allocator::kAllocatorAlignment; diff --git a/tensorflow/core/framework/allocator.h b/tensorflow/core/framework/allocator.h index 087505f8cd5..dd226b205a9 100644 --- a/tensorflow/core/framework/allocator.h +++ b/tensorflow/core/framework/allocator.h @@ -160,13 +160,16 @@ struct AllocatorStats { // if such a limit is known. absl::optional bytes_reservable_limit; + int64 largest_free_block_bytes; // Largest free block's size in heap. + AllocatorStats() : num_allocs(0), bytes_in_use(0), peak_bytes_in_use(0), largest_alloc_size(0), bytes_reserved(0), - peak_bytes_reserved(0) {} + peak_bytes_reserved(0), + largest_free_block_bytes(0) {} std::string DebugString() const; }; diff --git a/tensorflow/stream_executor/allocator_stats.cc b/tensorflow/stream_executor/allocator_stats.cc index 8a45efdef83..0e25063a446 100644 --- a/tensorflow/stream_executor/allocator_stats.cc +++ b/tensorflow/stream_executor/allocator_stats.cc @@ -20,13 +20,18 @@ namespace stream_executor { std::string AllocatorStats::DebugString() const { return absl::StrFormat( - "Limit: %20lld\n" - "InUse: %20lld\n" - "MaxInUse: %20lld\n" - "NumAllocs: %20lld\n" - "MaxAllocSize: %20lld\n", + "Limit: %20lld\n" + "InUse: %20lld\n" + "MaxInUse: %20lld\n" + "NumAllocs: %20lld\n" + "MaxAllocSize: %20lld\n" + "Reserved: %20lld\n" + "PeakReserved: %20lld\n" + "LargestFreeBlock: %20lld\n", this->bytes_limit ? *this->bytes_limit : 0, this->bytes_in_use, - this->peak_bytes_in_use, this->num_allocs, this->largest_alloc_size); + this->peak_bytes_in_use, this->num_allocs, this->largest_alloc_size, + this->bytes_reserved, this->peak_bytes_reserved, + this->largest_free_block_bytes); } } // namespace stream_executor diff --git a/tensorflow/stream_executor/allocator_stats.h b/tensorflow/stream_executor/allocator_stats.h index 9a99c1099c9..94dafdb6da6 100644 --- a/tensorflow/stream_executor/allocator_stats.h +++ b/tensorflow/stream_executor/allocator_stats.h @@ -43,13 +43,16 @@ struct AllocatorStats { // if such a limit is known. absl::optional bytes_reservable_limit; + int64 largest_free_block_bytes; // Largest free block's size in heap. + AllocatorStats() : num_allocs(0), bytes_in_use(0), peak_bytes_in_use(0), largest_alloc_size(0), bytes_reserved(0), - peak_bytes_reserved(0) {} + peak_bytes_reserved(0), + largest_free_block_bytes(0) {} std::string DebugString() const; };