Remove HostBuffers
HostBuffers add significant complexity and have no users. PiperOrigin-RevId: 229682740
This commit is contained in:
parent
77e2ba9188
commit
69b9e5358b
@ -206,7 +206,6 @@ cc_library(
|
|||||||
cc_library(
|
cc_library(
|
||||||
name = "stream",
|
name = "stream",
|
||||||
srcs = [
|
srcs = [
|
||||||
"host_buffer.h",
|
|
||||||
"stream.cc",
|
"stream.cc",
|
||||||
],
|
],
|
||||||
hdrs = ["stream.h"],
|
hdrs = ["stream.h"],
|
||||||
@ -453,12 +452,6 @@ cc_library(
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
cc_library(
|
|
||||||
name = "host_buffer",
|
|
||||||
hdrs = ["host_buffer.h"],
|
|
||||||
deps = [":dnn"],
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_proto_library(
|
tf_proto_library(
|
||||||
name = "dnn_proto",
|
name = "dnn_proto",
|
||||||
srcs = ["dnn.proto"],
|
srcs = ["dnn.proto"],
|
||||||
|
|||||||
@ -2069,22 +2069,6 @@ class DnnSupport {
|
|||||||
QuantizedActivationMode mode,
|
QuantizedActivationMode mode,
|
||||||
DeviceMemory<float>* gpu_unquantized_dst) = 0;
|
DeviceMemory<float>* gpu_unquantized_dst) = 0;
|
||||||
|
|
||||||
// Enqueues an asynchronous copy of the contents of buffer_src to
|
|
||||||
// gpu_unquantized_dst.
|
|
||||||
virtual bool DoCopyHostBuffer2Device(
|
|
||||||
Stream* stream, HostBuffer* buffer_src,
|
|
||||||
DeviceMemory<float>* gpu_unquantized_dst) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enqueues an asynchronous copy of the contents of gpu_unquantized_src to
|
|
||||||
// buffer_dst.
|
|
||||||
virtual bool DoCopyDevice2HostBuffer(
|
|
||||||
Stream* stream, const DeviceMemory<float>& gpu_unquantized_src,
|
|
||||||
HostBuffer* buffer_dst) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create an RNN descriptor based on model shapes and configurations.
|
// Create an RNN descriptor based on model shapes and configurations.
|
||||||
// The caller retains the ownership of the descriptor.
|
// The caller retains the ownership of the descriptor.
|
||||||
//
|
//
|
||||||
|
|||||||
@ -1,46 +0,0 @@
|
|||||||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_STREAM_EXECUTOR_HOST_BUFFER_H_
|
|
||||||
#define TENSORFLOW_STREAM_EXECUTOR_HOST_BUFFER_H_
|
|
||||||
|
|
||||||
#include "tensorflow/stream_executor/dnn.h"
|
|
||||||
|
|
||||||
namespace stream_executor {
|
|
||||||
|
|
||||||
// A HostBuffer is a block of memory in host memory containing the data for a
|
|
||||||
// dnn::BatchDescriptor using a device-dependent memory layout.
|
|
||||||
// Derived classes provide methods to construct a HostBuffer for a specific
|
|
||||||
// device, and to copy data in and out of the buffer.
|
|
||||||
class HostBuffer {
|
|
||||||
public:
|
|
||||||
const dnn::BatchDescriptor& descriptor() const { return descriptor_; }
|
|
||||||
|
|
||||||
// Returns a string describing the HostBuffer.
|
|
||||||
virtual string AsString() const = 0;
|
|
||||||
|
|
||||||
protected:
|
|
||||||
// Construct a HostBuffer from the supplied dnn::BatchDescriptor.
|
|
||||||
explicit HostBuffer(const dnn::BatchDescriptor& descriptor)
|
|
||||||
: descriptor_(descriptor) {}
|
|
||||||
virtual ~HostBuffer() {}
|
|
||||||
|
|
||||||
private:
|
|
||||||
const dnn::BatchDescriptor descriptor_;
|
|
||||||
};
|
|
||||||
|
|
||||||
} // namespace stream_executor
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_STREAM_EXECUTOR_HOST_BUFFER_H_
|
|
||||||
@ -20,7 +20,6 @@ limitations under the License.
|
|||||||
#include "absl/strings/str_cat.h"
|
#include "absl/strings/str_cat.h"
|
||||||
#include "third_party/eigen3/Eigen/Core"
|
#include "third_party/eigen3/Eigen/Core"
|
||||||
#include "tensorflow/stream_executor/blas.h"
|
#include "tensorflow/stream_executor/blas.h"
|
||||||
#include "tensorflow/stream_executor/host_buffer.h"
|
|
||||||
#include "tensorflow/stream_executor/host_or_device_scalar.h"
|
#include "tensorflow/stream_executor/host_or_device_scalar.h"
|
||||||
#include "tensorflow/stream_executor/lib/stacktrace.h"
|
#include "tensorflow/stream_executor/lib/stacktrace.h"
|
||||||
#include "tensorflow/stream_executor/platform.h"
|
#include "tensorflow/stream_executor/platform.h"
|
||||||
@ -95,8 +94,6 @@ string ToVlogString(const void *ptr) {
|
|||||||
return out.str();
|
return out.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
string ToVlogString(const HostBuffer &buffer) { return buffer.AsString(); }
|
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
string ToVlogString(const std::complex<T> &c) {
|
string ToVlogString(const std::complex<T> &c) {
|
||||||
// StrCat does not convert std::complex to text.
|
// StrCat does not convert std::complex to text.
|
||||||
@ -2103,36 +2100,6 @@ Stream &Stream::ThenMemcpyH2DQuantized(
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
Stream &Stream::ThenCopyHostBuffer2Device(
|
|
||||||
HostBuffer *buffer_src, DeviceMemory<float> *gpu_unquantized_dst) {
|
|
||||||
VLOG_CALL(PARAM(*buffer_src), PARAM(gpu_unquantized_dst));
|
|
||||||
|
|
||||||
if (ok()) {
|
|
||||||
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
|
|
||||||
CheckError(
|
|
||||||
dnn->DoCopyHostBuffer2Device(this, buffer_src, gpu_unquantized_dst));
|
|
||||||
} else {
|
|
||||||
SetErrorAndLogNoDnnSupport();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
Stream &Stream::ThenCopyDevice2HostBuffer(
|
|
||||||
const DeviceMemory<float> &gpu_unquantized_src, HostBuffer *buffer_dst) {
|
|
||||||
VLOG_CALL(PARAM(gpu_unquantized_src), PARAM(*buffer_dst));
|
|
||||||
|
|
||||||
if (ok()) {
|
|
||||||
if (dnn::DnnSupport *dnn = parent_->AsDnn()) {
|
|
||||||
CheckError(
|
|
||||||
dnn->DoCopyDevice2HostBuffer(this, gpu_unquantized_src, buffer_dst));
|
|
||||||
} else {
|
|
||||||
SetErrorAndLogNoDnnSupport();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
Stream *Stream::GetOrCreateSubStream() {
|
Stream *Stream::GetOrCreateSubStream() {
|
||||||
mutex_lock lock(mu_);
|
mutex_lock lock(mu_);
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user