[SE] Remove StreamExecutor alias for absl::InlinedVector.
PiperOrigin-RevId: 219324068
This commit is contained in:
parent
e25209c872
commit
9b371c8d58
@ -39,9 +39,9 @@ limitations under the License.
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#include "absl/container/inlined_vector.h"
|
||||||
#include "absl/strings/str_cat.h"
|
#include "absl/strings/str_cat.h"
|
||||||
#include "tensorflow/stream_executor/lib/error.h"
|
#include "tensorflow/stream_executor/lib/error.h"
|
||||||
#include "tensorflow/stream_executor/lib/inlined_vector.h"
|
|
||||||
#include "tensorflow/stream_executor/lib/numbers.h"
|
#include "tensorflow/stream_executor/lib/numbers.h"
|
||||||
#include "tensorflow/stream_executor/lib/process_state.h"
|
#include "tensorflow/stream_executor/lib/process_state.h"
|
||||||
#include "tensorflow/stream_executor/lib/status.h"
|
#include "tensorflow/stream_executor/lib/status.h"
|
||||||
@ -363,7 +363,7 @@ port::StatusOr<DriverVersion> Diagnostician::FindKernelDriverVersion() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const int kContentsSize = 1024;
|
static const int kContentsSize = 1024;
|
||||||
port::InlinedVector<char, 4> contents(kContentsSize);
|
absl::InlinedVector<char, 4> contents(kContentsSize);
|
||||||
size_t retcode =
|
size_t retcode =
|
||||||
fread(contents.begin(), 1, kContentsSize - 2, driver_version_file);
|
fread(contents.begin(), 1, kContentsSize - 2, driver_version_file);
|
||||||
if (retcode < kContentsSize - 1) {
|
if (retcode < kContentsSize - 1) {
|
||||||
|
@ -22,12 +22,12 @@ limitations under the License.
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
#include "absl/base/casts.h"
|
#include "absl/base/casts.h"
|
||||||
|
#include "absl/container/inlined_vector.h"
|
||||||
#include "absl/strings/str_cat.h"
|
#include "absl/strings/str_cat.h"
|
||||||
#include "tensorflow/stream_executor/cuda/cuda_diagnostics.h"
|
#include "tensorflow/stream_executor/cuda/cuda_diagnostics.h"
|
||||||
#include "tensorflow/stream_executor/lib/env.h"
|
#include "tensorflow/stream_executor/lib/env.h"
|
||||||
#include "tensorflow/stream_executor/lib/error.h"
|
#include "tensorflow/stream_executor/lib/error.h"
|
||||||
#include "tensorflow/stream_executor/lib/human_readable.h"
|
#include "tensorflow/stream_executor/lib/human_readable.h"
|
||||||
#include "tensorflow/stream_executor/lib/inlined_vector.h"
|
|
||||||
#include "tensorflow/stream_executor/lib/notification.h"
|
#include "tensorflow/stream_executor/lib/notification.h"
|
||||||
#include "tensorflow/stream_executor/lib/ptr_util.h"
|
#include "tensorflow/stream_executor/lib/ptr_util.h"
|
||||||
#include "tensorflow/stream_executor/lib/stacktrace.h"
|
#include "tensorflow/stream_executor/lib/stacktrace.h"
|
||||||
@ -336,7 +336,7 @@ static port::Status InternalInit() {
|
|||||||
/* static */ bool CUDADriver::GetDeviceName(CUdevice device,
|
/* static */ bool CUDADriver::GetDeviceName(CUdevice device,
|
||||||
string *device_name) {
|
string *device_name) {
|
||||||
static const size_t kCharLimit = 64;
|
static const size_t kCharLimit = 64;
|
||||||
port::InlinedVector<char, 4> chars(kCharLimit);
|
absl::InlinedVector<char, 4> chars(kCharLimit);
|
||||||
CUresult res = cuDeviceGetName(chars.begin(), kCharLimit - 1, device);
|
CUresult res = cuDeviceGetName(chars.begin(), kCharLimit - 1, device);
|
||||||
if (res != CUDA_SUCCESS) {
|
if (res != CUDA_SUCCESS) {
|
||||||
LOG(ERROR) << "failed to get device name for " << device << ": "
|
LOG(ERROR) << "failed to get device name for " << device << ": "
|
||||||
@ -575,8 +575,8 @@ CUDADriver::ContextGetSharedMemConfig(CudaContext* context) {
|
|||||||
static const unsigned int kLogBufferBytesLimit = 1024;
|
static const unsigned int kLogBufferBytesLimit = 1024;
|
||||||
unsigned int error_log_buffer_bytes = kLogBufferBytesLimit;
|
unsigned int error_log_buffer_bytes = kLogBufferBytesLimit;
|
||||||
unsigned int info_log_buffer_bytes = kLogBufferBytesLimit;
|
unsigned int info_log_buffer_bytes = kLogBufferBytesLimit;
|
||||||
port::InlinedVector<char, 4> error_log_buffer(error_log_buffer_bytes);
|
absl::InlinedVector<char, 4> error_log_buffer(error_log_buffer_bytes);
|
||||||
port::InlinedVector<char, 4> info_log_buffer(info_log_buffer_bytes);
|
absl::InlinedVector<char, 4> info_log_buffer(info_log_buffer_bytes);
|
||||||
bool log_verbose = true;
|
bool log_verbose = true;
|
||||||
CUjit_option options[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
|
CUjit_option options[] = {CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES,
|
||||||
CU_JIT_ERROR_LOG_BUFFER,
|
CU_JIT_ERROR_LOG_BUFFER,
|
||||||
@ -1466,7 +1466,7 @@ static port::StatusOr<T> GetSimpleAttribute(CUdevice device,
|
|||||||
/* static */ string CUDADriver::GetPCIBusID(CUdevice device) {
|
/* static */ string CUDADriver::GetPCIBusID(CUdevice device) {
|
||||||
string pci_bus_id;
|
string pci_bus_id;
|
||||||
static const int kBufferSize = 64;
|
static const int kBufferSize = 64;
|
||||||
port::InlinedVector<char, 4> chars(kBufferSize);
|
absl::InlinedVector<char, 4> chars(kBufferSize);
|
||||||
chars[kBufferSize - 1] = '\0';
|
chars[kBufferSize - 1] = '\0';
|
||||||
CUresult res = cuDeviceGetPCIBusId(chars.begin(), kBufferSize - 1, device);
|
CUresult res = cuDeviceGetPCIBusId(chars.begin(), kBufferSize - 1, device);
|
||||||
if (res != CUDA_SUCCESS) {
|
if (res != CUDA_SUCCESS) {
|
||||||
|
@ -78,7 +78,6 @@ limitations under the License.
|
|||||||
#include "tensorflow/stream_executor/device_memory.h"
|
#include "tensorflow/stream_executor/device_memory.h"
|
||||||
#include "tensorflow/stream_executor/kernel_cache_config.h"
|
#include "tensorflow/stream_executor/kernel_cache_config.h"
|
||||||
#include "tensorflow/stream_executor/lib/array_slice.h"
|
#include "tensorflow/stream_executor/lib/array_slice.h"
|
||||||
#include "tensorflow/stream_executor/lib/inlined_vector.h"
|
|
||||||
#include "tensorflow/stream_executor/lib/stringpiece.h"
|
#include "tensorflow/stream_executor/lib/stringpiece.h"
|
||||||
#include "tensorflow/stream_executor/platform/port.h"
|
#include "tensorflow/stream_executor/platform/port.h"
|
||||||
|
|
||||||
|
@ -1,29 +0,0 @@
|
|||||||
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
==============================================================================*/
|
|
||||||
|
|
||||||
#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_INLINED_VECTOR_H_
|
|
||||||
#define TENSORFLOW_STREAM_EXECUTOR_LIB_INLINED_VECTOR_H_
|
|
||||||
|
|
||||||
#include "absl/container/inlined_vector.h"
|
|
||||||
|
|
||||||
namespace stream_executor {
|
|
||||||
namespace port {
|
|
||||||
|
|
||||||
using absl::InlinedVector;
|
|
||||||
|
|
||||||
} // namespace port
|
|
||||||
} // namespace stream_executor
|
|
||||||
|
|
||||||
#endif // TENSORFLOW_STREAM_EXECUTOR_LIB_INLINED_VECTOR_H_
|
|
@ -36,7 +36,6 @@ limitations under the License.
|
|||||||
#include "tensorflow/stream_executor/kernel_cache_config.h"
|
#include "tensorflow/stream_executor/kernel_cache_config.h"
|
||||||
#include "tensorflow/stream_executor/kernel_spec.h"
|
#include "tensorflow/stream_executor/kernel_spec.h"
|
||||||
#include "tensorflow/stream_executor/launch_dim.h"
|
#include "tensorflow/stream_executor/launch_dim.h"
|
||||||
#include "tensorflow/stream_executor/lib/inlined_vector.h"
|
|
||||||
#include "tensorflow/stream_executor/lib/status.h"
|
#include "tensorflow/stream_executor/lib/status.h"
|
||||||
#include "tensorflow/stream_executor/lib/statusor.h"
|
#include "tensorflow/stream_executor/lib/statusor.h"
|
||||||
#include "tensorflow/stream_executor/module_spec.h"
|
#include "tensorflow/stream_executor/module_spec.h"
|
||||||
|
Loading…
Reference in New Issue
Block a user