Remove unused unique_tensor_references library.

This change also removes methods on `TensorReference` and `NodeExecStatsInterface` that are no longer used by any code; and fix some #includes and BUILD files that depended on indirect inclusion of "tensor_reference.h".

PiperOrigin-RevId: 300648897
Change-Id: I1f734b1de096f42158f7cd70ab9e27ad832db3f6
This commit is contained in:
Derek Murray 2020-03-12 16:32:19 -07:00 committed by TensorFlower Gardener
parent ead7a372a8
commit 749708ed0e
22 changed files with 21 additions and 367 deletions

View File

@ -184,6 +184,7 @@ XLA_DEVICE_DEPS = [
"//tensorflow/core:core_cpu_internal",
"//tensorflow/core:dataset_ops_op_lib",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:functional_ops_op_lib",
"//tensorflow/core:lib",
"//tensorflow/core:lib_internal",

View File

@ -24,6 +24,7 @@ limitations under the License.
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/stream_executor/platform/port.h"

View File

@ -2256,7 +2256,6 @@ filegroup(
"//tensorflow/core/framework:shared_ptr_variant.h",
"//tensorflow/core/framework:tensor_reference.h",
"//tensorflow/core/framework:tracking_allocator.h", # only needed for tests
"//tensorflow/core/framework:unique_tensor_references.h",
"//tensorflow/core/framework:variant.h",
"//tensorflow/core/util:framework_internal_public_hdrs",
],

View File

@ -165,16 +165,6 @@ void NodeExecStatsWrapper::SetOutput(int slot, const Tensor* tensor) {
tensor->FillDescription(node_output->mutable_tensor_description());
}
void NodeExecStatsWrapper::SetReferencedTensors(
const TensorReferenceVector& tensors) {
// be careful not to increment the reference count on any tensor
// while recording the information
for (size_t i = 0; i < tensors.size(); ++i) {
AllocationDescription* description = stats_->add_referenced_tensor();
tensors.at(i).FillDescription(description);
}
}
void NodeExecStatsWrapper::AddAllocation(
Allocator* allocator, TrackingAllocator* tracking_allocator) {
AllocatorMemoryUsed* memory = stats_->add_memory();

View File

@ -81,10 +81,6 @@ class NodeExecStatsInterface {
// output slot.
virtual void SetOutput(int slot, const Tensor* tensor) = 0;
// Records information about the tensors that were accessed during the
// execution of this node.
virtual void SetReferencedTensors(const TensorReferenceVector& tensors) = 0;
// Records the absolute time in nanoseconds at which this node became
// runnable (i.e. was scheduled for execution).
virtual void SetScheduled(int64 nanos) = 0;
@ -113,7 +109,6 @@ class NodeExecStatsWrapper : public NodeExecStatsInterface {
bool TrackAllocations() const override { return true; }
void SetMemory(OpKernelContext* ctx) override;
void SetOutput(int slot, const Tensor* tensor) override;
void SetReferencedTensors(const TensorReferenceVector& tensors) override;
void SetScheduled(int64 nanos) override;
private:

View File

@ -74,7 +74,6 @@ exports_files(
"tensor_util.h",
"thread_factory.h",
"tracking_allocator.h",
"unique_tensor_references.h",
"versions.h",
],
visibility = ["//tensorflow/core:__pkg__"],
@ -218,7 +217,6 @@ filegroup(
"type_traits.h",
"typed_allocator.h",
"types.h",
"unique_tensor_references.h",
"variant.h",
"variant_encode_decode.h",
"variant_op_registry.h",
@ -256,7 +254,6 @@ filegroup(
"run_handler_util.cc",
"tensor_slice.cc",
"tensor_util.cc",
"unique_tensor_references.cc",
"versions.cc",
],
)
@ -391,8 +388,6 @@ filegroup(
"tensor_util.cc",
"tensor_util.h",
"thread_factory.h",
"unique_tensor_references.cc",
"unique_tensor_references.h",
"versions.cc",
"versions.h",
],
@ -1039,7 +1034,6 @@ tf_cc_tests(
"tensor_util_test.cc",
"tracking_allocator_test.cc",
"types_test.cc",
"unique_tensor_references_test.cc",
"variant_op_registry_test.cc",
"variant_test.cc",
],

View File

@ -43,7 +43,6 @@ limitations under the License.
#include "tensorflow/core/framework/tracking_allocator.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/unique_tensor_references.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/array_slice.h"

View File

@ -41,40 +41,14 @@ class TensorReference {
if (buf_) buf_->Unref();
}
// Return an estimate of the total bytes being kept alive by this reference.
size_t TotalBytes() const {
// We add 128 as a baseline to account for per-Tensor metadata
return 128 + (buf_ ? buf_->size() : 0);
}
void FillDescription(AllocationDescription* description) const {
if (buf_) buf_->FillAllocationDescription(description);
}
// Convenience function for de-duplicating tensor references.
bool SharesBufferWith(const TensorReference& t) const {
return buf_ == t.buf_;
}
// Convenience function for de-duplicating tensor references.
bool SharesBufferWith(const Tensor& t) const {
return buf_ == (t.buf_ ? t.buf_->root_buffer() : nullptr);
}
// Convenience function for de-duplicating tensor references.
size_t BufferHash() const { return std::hash<TensorBuffer*>()(buf_); }
// A constructor used only for tests
explicit TensorReference(TensorBuffer* test_buffer) : buf_(test_buffer) {
if (buf_) buf_->Ref();
}
private:
TensorBuffer* buf_;
};
typedef gtl::InlinedVector<TensorReference, 4> TensorReferenceVector;
} // namespace tensorflow
#endif // TENSORFLOW_FRAMEWORK_TENSOR_REFERENCE_H_

View File

@ -1,91 +0,0 @@
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/framework/unique_tensor_references.h"
namespace tensorflow {
UniqueTensorReferences::~UniqueTensorReferences() {
if (!frozen_) {
// The references were not retrieved so discard them to avoid
// leaking memory.
TensorReferenceVector refs;
FreezeAndReturnReferences(&refs);
for (auto& tensor : refs) {
tensor.Unref();
}
}
delete referenced_tensors_set_;
}
void UniqueTensorReferences::Add(const Tensor& tensor) {
DCHECK(!frozen_);
// Do nothing if the tensor has a null buffer.
if (tensor.IsInitialized() && tensor.NumElements() > 0) {
if (referenced_tensors_set_ != nullptr) {
// There are enough tensors that we are using a hash set to
// de-duplicate.
const TensorReference tensor_ref(tensor);
if (!referenced_tensors_set_->insert(tensor_ref).second) {
// The tensor was a duplicate, so discard the reference.
tensor_ref.Unref();
}
} else {
for (size_t i = 0; i < referenced_tensors_vector_.size(); ++i) {
if (referenced_tensors_vector_[i].SharesBufferWith(tensor)) {
// tensor is a duplicate, so nothing to do.
return;
}
}
referenced_tensors_vector_.push_back(TensorReference(tensor));
if (kInVector == referenced_tensors_vector_.size()) {
// There are too many tensors to keep using the N^2 algorithm
// so start de-duplicating using a set.
// Transfer the refs from the vector to the set.
DCHECK(referenced_tensors_set_ == nullptr);
referenced_tensors_set_ = new ReferencedTensorsSet;
referenced_tensors_set_->reserve(kInVector);
referenced_tensors_set_->insert(referenced_tensors_vector_.begin(),
referenced_tensors_vector_.end());
DCHECK_EQ(kInVector, referenced_tensors_set_->size());
referenced_tensors_vector_.clear();
}
}
}
}
void UniqueTensorReferences::FreezeAndReturnReferences(
TensorReferenceVector* out_vector) {
// Prevent any further additions.
frozen_ = true;
if (referenced_tensors_set_ != nullptr) {
DCHECK(referenced_tensors_vector_.empty());
out_vector->reserve(referenced_tensors_set_->size());
for (const auto& ref : *referenced_tensors_set_) {
out_vector->push_back(ref);
}
referenced_tensors_set_->clear();
delete referenced_tensors_set_;
referenced_tensors_set_ = nullptr;
} else {
out_vector->reserve(referenced_tensors_vector_.size());
for (const auto& ref : referenced_tensors_vector_) {
out_vector->push_back(ref);
}
referenced_tensors_vector_.clear();
}
}
} // namespace tensorflow

View File

@ -1,81 +0,0 @@
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_FRAMEWORK_UNIQUE_TENSOR_REFERENCES_H_
#define TENSORFLOW_FRAMEWORK_UNIQUE_TENSOR_REFERENCES_H_
#include <unordered_set>
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
// Helper class to maintain a unique set of tensor references. In the
// common case there are not many references, so an inline vector is
// used for <= kInVector unique elements, defaulting to 4 since that
// is the inlined size of TensorReferenceVector. To avoid N^2
// operations when adding N items, any larger number of unique tensor
// references switches to using an unordered set.
class UniqueTensorReferences {
public:
UniqueTensorReferences() : frozen_(false), referenced_tensors_set_(nullptr) {}
~UniqueTensorReferences();
// Adds a reference to tensor if its buffer is not already referenced.
void Add(const Tensor& tensor);
// No more references may be added after this is called. The unique
// references are returning in out_vector.
void FreezeAndReturnReferences(TensorReferenceVector* out_vector);
private:
// Up to kInVector elements are stored in reference_tensors_vector_
// to avoid any allocations or hash computations in the common
// case. When more unique elements are added they move to
// referenced_tensors_set_ to avoid an N^2 algorithm on insert.
static const int kInVector = 4; // Must be >= 1.
struct TensorReferenceEqualFn {
bool operator()(const TensorReference& t1,
const TensorReference& t2) const {
return t1.SharesBufferWith(t2);
}
};
struct TensorReferenceHashFn {
size_t operator()(const TensorReference& t) const { return t.BufferHash(); }
};
bool frozen_;
TensorReferenceVector referenced_tensors_vector_;
typedef std::unordered_set<TensorReference, TensorReferenceHashFn,
TensorReferenceEqualFn>
ReferencedTensorsSet;
// Lazily allocated hash set for when the number of tensors becomes too large.
// If this is non-NULL, then we use the hash set, otherwise, we use the
// referenced_tensors_vector_ (and do O(N^2) work per insertion).
ReferencedTensorsSet* referenced_tensors_set_;
TF_DISALLOW_COPY_AND_ASSIGN(UniqueTensorReferences);
};
} // end namespace tensorflow
#endif // TENSORFLOW_FRAMEWORK_UNIQUE_TENSOR_REFERENCES_H_

View File

@ -1,139 +0,0 @@
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/framework/unique_tensor_references.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(UniquifyTensors, TestUniqueVector) {
UniqueTensorReferences refs;
Tensor a(DT_FLOAT, TensorShape({2, 2}));
Tensor b(DT_FLOAT, TensorShape({2, 2}));
EXPECT_FALSE(a.SharesBufferWith(b));
refs.Add(a);
refs.Add(b);
TensorReferenceVector tensors;
refs.FreezeAndReturnReferences(&tensors);
EXPECT_EQ(2, tensors.size());
if (tensors[0].SharesBufferWith(a)) {
EXPECT_TRUE(tensors[1].SharesBufferWith(b));
} else {
EXPECT_TRUE(tensors[1].SharesBufferWith(a));
EXPECT_TRUE(tensors[0].SharesBufferWith(b));
}
for (auto& t : tensors) {
t.Unref();
}
}
TEST(UniquifyTensors, TestNonUniqueVector) {
UniqueTensorReferences refs;
Tensor a(DT_FLOAT, TensorShape({2, 2}));
Tensor b(a);
EXPECT_TRUE(a.SharesBufferWith(b));
refs.Add(a);
refs.Add(b);
TensorReferenceVector tensors;
refs.FreezeAndReturnReferences(&tensors);
EXPECT_EQ(1, tensors.size());
EXPECT_TRUE(tensors[0].SharesBufferWith(a));
EXPECT_TRUE(tensors[0].SharesBufferWith(b));
for (auto& t : tensors) {
t.Unref();
}
}
TEST(UniquifyTensors, TestNoLeakVector) {
UniqueTensorReferences refs;
Tensor a(DT_FLOAT, TensorShape({2, 2}));
Tensor b(DT_FLOAT, TensorShape({2, 2}));
EXPECT_FALSE(a.SharesBufferWith(b));
refs.Add(a);
refs.Add(b);
}
TEST(UniquifyTensors, TestUniqueSet) {
UniqueTensorReferences refs;
Tensor a(DT_FLOAT, TensorShape({2, 2}));
Tensor b(DT_FLOAT, TensorShape({2, 2}));
Tensor c(DT_FLOAT, TensorShape({2, 2}));
Tensor d(DT_FLOAT, TensorShape({2, 2}));
Tensor e(DT_FLOAT, TensorShape({2, 2}));
EXPECT_FALSE(a.SharesBufferWith(b));
refs.Add(a);
refs.Add(b);
refs.Add(c);
refs.Add(d);
refs.Add(e);
TensorReferenceVector tensors;
refs.FreezeAndReturnReferences(&tensors);
EXPECT_EQ(5, tensors.size());
for (auto& t : tensors) {
t.Unref();
}
}
TEST(UniquifyTensors, TestNonUniqueSet) {
UniqueTensorReferences refs;
Tensor a(DT_FLOAT, TensorShape({2, 2}));
Tensor b(DT_FLOAT, TensorShape({2, 2}));
Tensor c(DT_FLOAT, TensorShape({2, 2}));
Tensor d(DT_FLOAT, TensorShape({2, 2}));
Tensor e(DT_FLOAT, TensorShape({2, 2}));
Tensor f(c);
EXPECT_TRUE(f.SharesBufferWith(c));
refs.Add(a);
refs.Add(b);
refs.Add(c);
refs.Add(d);
refs.Add(e);
refs.Add(f);
TensorReferenceVector tensors;
refs.FreezeAndReturnReferences(&tensors);
EXPECT_EQ(5, tensors.size());
for (auto& t : tensors) {
t.Unref();
}
}
TEST(UniquifyTensors, TestNoLeakSet) {
UniqueTensorReferences refs;
Tensor a(DT_FLOAT, TensorShape({2, 2}));
Tensor b(DT_FLOAT, TensorShape({2, 2}));
Tensor c(DT_FLOAT, TensorShape({2, 2}));
Tensor d(DT_FLOAT, TensorShape({2, 2}));
Tensor e(DT_FLOAT, TensorShape({2, 2}));
refs.Add(a);
refs.Add(b);
refs.Add(c);
refs.Add(d);
refs.Add(e);
}
} // namespace tensorflow

View File

@ -288,6 +288,7 @@ tf_kernel_library(
deps = [
":bounds_check",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//third_party/eigen3",
],
alwayslink = 0,
@ -2626,6 +2627,7 @@ tf_kernel_library(
deps = DYNAMIC_DEPS + [
":fill_functor",
":gather_functor",
"//tensorflow/core:framework_internal",
] + if_cuda(["@cub_archive//:cub"]) + if_rocm([
"@local_config_rocm//rocm:rocprim",
]),
@ -3028,7 +3030,7 @@ tf_kernel_library(
tf_kernel_library(
name = "crop_and_resize_op",
prefix = "crop_and_resize_op",
deps = IMAGE_DEPS,
deps = IMAGE_DEPS + ["//tensorflow/core:framework_internal"],
)
tf_kernel_library(
@ -3576,6 +3578,7 @@ tf_kernel_library(
visibility = [":friends"],
deps = [
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/stream_executor/lib",
"//tensorflow/stream_executor/platform:dso_loader",
@ -4081,7 +4084,7 @@ tf_kernel_library(
tf_kernel_library(
name = "check_numerics_op",
prefix = "check_numerics_op",
deps = MATH_DEPS,
deps = MATH_DEPS + ["//tensorflow/core:framework_internal"],
)
tf_kernel_library(

View File

@ -15,15 +15,18 @@ limitations under the License.
// See docs in ../ops/array_ops.cc.
// clang-format off
#include "tensorflow/core/lib/bfloat16/bfloat16.h"
#include <math.h>
#include <algorithm>
#include <numeric>
#include <math.h> // NOLINT
#include <algorithm> // NOLINT
#include <numeric> // NOLINT
// clang-format on
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/types.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM

View File

@ -26,6 +26,7 @@ limitations under the License.
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"

View File

@ -33,6 +33,7 @@ limitations under the License.
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/stream_executor.h"

View File

@ -91,8 +91,6 @@ class SimpleStepStatsCollector : public StepStatsCollectorInterface {
void SetOutput(int slot, const Tensor* tensor) override {}
void SetReferencedTensors(const TensorReferenceVector& tensors) override {}
void SetScheduled(int64 nanos) override {}
private:

View File

@ -48,6 +48,7 @@ limitations under the License.
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/gather_functor_gpu.cu.h"

View File

@ -20,6 +20,7 @@ limitations under the License.
#include "tensorflow/core/common_runtime/gpu/gpu_event_mgr.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/kernels/gpu_device_array_gpu.h"
namespace tensorflow {

View File

@ -30,6 +30,7 @@ limitations under the License.
#include "rocm/include/rocblas.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/stream_executor.h"
#include "tensorflow/stream_executor/blas.h"

View File

@ -60,6 +60,7 @@ tf_kernel_library(
"//tensorflow/core:array_ops_op_lib",
"//tensorflow/core:bitwise_ops_op_lib",
"//tensorflow/core:framework",
"//tensorflow/core:framework_internal",
"//tensorflow/core:functional_ops_op_lib",
"//tensorflow/core:lib",
"//tensorflow/core:math_ops_op_lib",

View File

@ -22,6 +22,7 @@ limitations under the License.
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"

View File

@ -22,6 +22,7 @@ limitations under the License.
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_reference.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/variant_op_registry.h"