[SE] Remove StreamExecutor alias for absl::string_view.

PiperOrigin-RevId: 219333558
This commit is contained in:
Justin Lebar 2018-10-30 11:20:40 -07:00 committed by TensorFlower Gardener
parent 3786aa194c
commit 04c404e05c
18 changed files with 99 additions and 125 deletions

View File

@ -22,7 +22,6 @@ limitations under the License.
#include "tensorflow/stream_executor/blas.h" #include "tensorflow/stream_executor/blas.h"
#include "tensorflow/stream_executor/host_or_device_scalar.h" #include "tensorflow/stream_executor/host_or_device_scalar.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/platform/mutex.h" #include "tensorflow/stream_executor/platform/mutex.h"
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
#include "tensorflow/stream_executor/platform/thread_annotations.h" #include "tensorflow/stream_executor/platform/thread_annotations.h"

View File

@ -46,7 +46,6 @@ limitations under the License.
#include "tensorflow/stream_executor/lib/process_state.h" #include "tensorflow/stream_executor/lib/process_state.h"
#include "tensorflow/stream_executor/lib/status.h" #include "tensorflow/stream_executor/lib/status.h"
#include "tensorflow/stream_executor/lib/str_util.h" #include "tensorflow/stream_executor/lib/str_util.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/lib/stringprintf.h" #include "tensorflow/stream_executor/lib/stringprintf.h"
#include "tensorflow/stream_executor/platform/logging.h" #include "tensorflow/stream_executor/platform/logging.h"

View File

@ -37,7 +37,6 @@ limitations under the License.
#include "tensorflow/stream_executor/lib/error.h" #include "tensorflow/stream_executor/lib/error.h"
#include "tensorflow/stream_executor/lib/initialize.h" #include "tensorflow/stream_executor/lib/initialize.h"
#include "tensorflow/stream_executor/lib/mathutil.h" #include "tensorflow/stream_executor/lib/mathutil.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/lib/threadpool.h" #include "tensorflow/stream_executor/lib/threadpool.h"
#include "tensorflow/stream_executor/platform/logging.h" #include "tensorflow/stream_executor/platform/logging.h"
#include "tensorflow/stream_executor/plugin_registry.h" #include "tensorflow/stream_executor/plugin_registry.h"
@ -46,6 +45,7 @@ limitations under the License.
#include "tensorflow/stream_executor/stream_executor_pimpl.h" #include "tensorflow/stream_executor/stream_executor_pimpl.h"
// clang-format off // clang-format off
#include "cuda/include/cudnn.h" #include "cuda/include/cudnn.h"
#include "absl/strings/string_view.h"
// clang-format on // clang-format on
namespace stream_executor { namespace stream_executor {
@ -2317,7 +2317,7 @@ class CudnnEnvVar {
static bool IsEnabledImpl() { static bool IsEnabledImpl() {
const char* tf_env_var_val = getenv(EnvVar::kName); const char* tf_env_var_val = getenv(EnvVar::kName);
if (tf_env_var_val != nullptr) { if (tf_env_var_val != nullptr) {
port::StringPiece tf_env_var_val_str(tf_env_var_val); absl::string_view tf_env_var_val_str(tf_env_var_val);
if (tf_env_var_val_str == "0") { if (tf_env_var_val_str == "0") {
return false; return false;
} }

View File

@ -25,6 +25,7 @@ limitations under the License.
#include <unistd.h> #include <unistd.h>
#endif #endif
#include "absl/strings/str_cat.h" #include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/stream_executor/cuda/cuda_diagnostics.h" #include "tensorflow/stream_executor/cuda/cuda_diagnostics.h"
#include "tensorflow/stream_executor/cuda/cuda_driver.h" #include "tensorflow/stream_executor/cuda/cuda_driver.h"
#include "tensorflow/stream_executor/cuda/cuda_event.h" #include "tensorflow/stream_executor/cuda/cuda_event.h"
@ -146,7 +147,7 @@ port::Status CUDAExecutor::Init(int device_ordinal,
} }
bool CUDAExecutor::FindOnDiskForComputeCapability( bool CUDAExecutor::FindOnDiskForComputeCapability(
port::StringPiece filename, port::StringPiece canonical_suffix, absl::string_view filename, absl::string_view canonical_suffix,
string *found_filename) const { string *found_filename) const {
if (cc_major_ == 0 && cc_minor_ == 0) { if (cc_major_ == 0 && cc_minor_ == 0) {
return false; return false;

View File

@ -25,6 +25,7 @@ limitations under the License.
#include <set> #include <set>
#include <unordered_map> #include <unordered_map>
#include "absl/strings/string_view.h"
#include "tensorflow/stream_executor/cuda/cuda_kernel.h" #include "tensorflow/stream_executor/cuda/cuda_kernel.h"
#include "tensorflow/stream_executor/event.h" #include "tensorflow/stream_executor/event.h"
#include "tensorflow/stream_executor/lib/status.h" #include "tensorflow/stream_executor/lib/status.h"
@ -234,8 +235,8 @@ class CUDAExecutor : public internal::StreamExecutorInterface {
// filename by looking for compute-capability-specific suffixed versions; i.e. // filename by looking for compute-capability-specific suffixed versions; i.e.
// looking for "foo.ptx" will check to see if "foo.ptx.cc30.ptx" is present if // looking for "foo.ptx" will check to see if "foo.ptx.cc30.ptx" is present if
// we're on a compute capability 3.0 machine. // we're on a compute capability 3.0 machine.
bool FindOnDiskForComputeCapability(port::StringPiece filename, bool FindOnDiskForComputeCapability(absl::string_view filename,
port::StringPiece canonical_suffix, absl::string_view canonical_suffix,
string *found_filename) const; string *found_filename) const;
// Host callback landing routine invoked by CUDA. // Host callback landing routine invoked by CUDA.

View File

@ -34,6 +34,7 @@ limitations under the License.
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
#if !defined(PLATFORM_GOOGLE) #if !defined(PLATFORM_GOOGLE)
#include "absl/strings/string_view.h"
#include "cuda/cuda_config.h" #include "cuda/cuda_config.h"
#endif #endif
@ -119,12 +120,12 @@ static mutex& GetRpathMutex() {
return *mu; return *mu;
} }
/* static */ void DsoLoader::RegisterRpath(port::StringPiece path) { /* static */ void DsoLoader::RegisterRpath(absl::string_view path) {
mutex_lock lock{GetRpathMutex()}; mutex_lock lock{GetRpathMutex()};
GetRpaths()->emplace_back(path); GetRpaths()->emplace_back(path);
} }
/* static */ port::Status DsoLoader::GetDsoHandle(port::StringPiece path, /* static */ port::Status DsoLoader::GetDsoHandle(absl::string_view path,
void** dso_handle, void** dso_handle,
LoadKind load_kind) { LoadKind load_kind) {
if (load_kind != LoadKind::kLocal) { if (load_kind != LoadKind::kLocal) {
@ -190,13 +191,13 @@ static std::vector<string>* CreatePrimordialRpaths() {
#endif #endif
} }
/* static */ string DsoLoader::FindDsoPath(port::StringPiece library_name, /* static */ string DsoLoader::FindDsoPath(absl::string_view library_name,
port::StringPiece runfiles_relpath) { absl::string_view runfiles_relpath) {
// Keep a record of the paths we attempted so we can dump out meaningful // Keep a record of the paths we attempted so we can dump out meaningful
// diagnostics if no path is found. // diagnostics if no path is found.
std::vector<string> attempted; std::vector<string> attempted;
using StringPieces = std::vector<port::StringPiece>; using StringPieces = std::vector<absl::string_view>;
string candidate; string candidate;
// Otherwise, try binary-plus-rpath locations. // Otherwise, try binary-plus-rpath locations.

View File

@ -22,9 +22,9 @@ limitations under the License.
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
#include <vector> #include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/stream_executor/lib/status.h" #include "tensorflow/stream_executor/lib/status.h"
#include "tensorflow/stream_executor/lib/statusor.h" #include "tensorflow/stream_executor/lib/statusor.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/platform.h" #include "tensorflow/stream_executor/platform.h"
#include "tensorflow/stream_executor/platform/mutex.h" #include "tensorflow/stream_executor/platform/mutex.h"
@ -48,7 +48,7 @@ class DsoLoader {
static port::Status GetLibcuptiDsoHandle(void** dso_handle); static port::Status GetLibcuptiDsoHandle(void** dso_handle);
// Registers a new binary-relative path to use as a dlopen search path. // Registers a new binary-relative path to use as a dlopen search path.
static void RegisterRpath(port::StringPiece path); static void RegisterRpath(absl::string_view path);
private: private:
// Registered rpaths (singleton vector) and a mutex that guards it. // Registered rpaths (singleton vector) and a mutex that guards it.
@ -61,10 +61,9 @@ class DsoLoader {
// Loads a DSO from the given "path" (which can technically be any dlopen-able // Loads a DSO from the given "path" (which can technically be any dlopen-able
// name). If the load kind is global, the symbols in the loaded DSO are // name). If the load kind is global, the symbols in the loaded DSO are
// visible to subsequent DSO loading operations. // visible to subsequent DSO loading operations.
static port::Status GetDsoHandle(port::StringPiece path, void** dso_handle, static port::Status GetDsoHandle(absl::string_view path, void** dso_handle,
LoadKind load_kind = LoadKind::kLocal); LoadKind load_kind = LoadKind::kLocal);
// Returns the binary directory (or binary path) associated with the currently // Returns the binary directory (or binary path) associated with the currently
// executing program. If strip_executable_name is true, the executable file is // executing program. If strip_executable_name is true, the executable file is
// stripped off of the path. // stripped off of the path.
@ -80,8 +79,8 @@ class DsoLoader {
// library_name: the filename in tree; e.g. libOpenCL.so.1.0.0 // library_name: the filename in tree; e.g. libOpenCL.so.1.0.0
// runfiles_relpath: where to look for the library relative to the runfiles // runfiles_relpath: where to look for the library relative to the runfiles
// root; e.g. third_party/gpus/cuda/lib64 // root; e.g. third_party/gpus/cuda/lib64
static string FindDsoPath(port::StringPiece library_name, static string FindDsoPath(absl::string_view library_name,
port::StringPiece runfiles_relpath); absl::string_view runfiles_relpath);
// Return platform dependent paths for DSOs // Return platform dependent paths for DSOs
static string GetCudaLibraryDirPath(); static string GetCudaLibraryDirPath();

View File

@ -21,6 +21,7 @@ limitations under the License.
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/stream_executor/lib/demangle.h" #include "tensorflow/stream_executor/lib/demangle.h"
#include "tensorflow/stream_executor/platform.h" #include "tensorflow/stream_executor/platform.h"
@ -93,9 +94,9 @@ KernelCacheConfig KernelBase::GetPreferredCacheConfig() const {
// Prefix stub functions emitted by the CUDA splitter. // Prefix stub functions emitted by the CUDA splitter.
static const char *kStubPrefix = "__device_stub_"; static const char *kStubPrefix = "__device_stub_";
void KernelBase::set_name(port::StringPiece name) { void KernelBase::set_name(absl::string_view name) {
name_ = string(name); name_ = string(name);
port::StringPiece stubless_name = name; absl::string_view stubless_name = name;
if (tensorflow::str_util::StartsWith(name, kStubPrefix)) { if (tensorflow::str_util::StartsWith(name, kStubPrefix)) {
stubless_name.remove_prefix(strlen(kStubPrefix)); stubless_name.remove_prefix(strlen(kStubPrefix));
} }

View File

@ -75,10 +75,10 @@ limitations under the License.
#include <type_traits> #include <type_traits>
#include <vector> #include <vector>
#include "absl/strings/string_view.h"
#include "tensorflow/stream_executor/device_memory.h" #include "tensorflow/stream_executor/device_memory.h"
#include "tensorflow/stream_executor/kernel_cache_config.h" #include "tensorflow/stream_executor/kernel_cache_config.h"
#include "tensorflow/stream_executor/lib/array_slice.h" #include "tensorflow/stream_executor/lib/array_slice.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
namespace stream_executor { namespace stream_executor {
@ -177,7 +177,7 @@ class KernelBase {
// Gets the preferred cache configuration for a kernel. // Gets the preferred cache configuration for a kernel.
KernelCacheConfig GetPreferredCacheConfig() const; KernelCacheConfig GetPreferredCacheConfig() const;
void set_name(port::StringPiece name); void set_name(absl::string_view name);
const string &name() const { return name_; } const string &name() const { return name_; }
const string &demangled_name() const { return demangled_name_; } const string &demangled_name() const { return demangled_name_; }

View File

@ -14,26 +14,27 @@ limitations under the License.
==============================================================================*/ ==============================================================================*/
#include "tensorflow/stream_executor/kernel_spec.h" #include "tensorflow/stream_executor/kernel_spec.h"
#include "absl/strings/string_view.h"
namespace stream_executor { namespace stream_executor {
KernelLoaderSpec::KernelLoaderSpec(port::StringPiece kernelname) KernelLoaderSpec::KernelLoaderSpec(absl::string_view kernelname)
: kernelname_(string(kernelname)) {} : kernelname_(string(kernelname)) {}
OnDiskKernelLoaderSpec::OnDiskKernelLoaderSpec(port::StringPiece filename, OnDiskKernelLoaderSpec::OnDiskKernelLoaderSpec(absl::string_view filename,
port::StringPiece kernelname) absl::string_view kernelname)
: KernelLoaderSpec(kernelname), filename_(string(filename)) {} : KernelLoaderSpec(kernelname), filename_(string(filename)) {}
CudaPtxOnDisk::CudaPtxOnDisk(port::StringPiece filename, CudaPtxOnDisk::CudaPtxOnDisk(absl::string_view filename,
port::StringPiece kernelname) absl::string_view kernelname)
: OnDiskKernelLoaderSpec(filename, kernelname) {} : OnDiskKernelLoaderSpec(filename, kernelname) {}
CudaCubinOnDisk::CudaCubinOnDisk(port::StringPiece filename, CudaCubinOnDisk::CudaCubinOnDisk(absl::string_view filename,
port::StringPiece kernelname) absl::string_view kernelname)
: OnDiskKernelLoaderSpec(filename, kernelname) {} : OnDiskKernelLoaderSpec(filename, kernelname) {}
CudaCubinInMemory::CudaCubinInMemory(const char *bytes, CudaCubinInMemory::CudaCubinInMemory(const char *bytes,
port::StringPiece kernelname) absl::string_view kernelname)
: KernelLoaderSpec(kernelname), bytes_(bytes) {} : KernelLoaderSpec(kernelname), bytes_(bytes) {}
bool CompareComputeCapability(const std::tuple<int, int> &lhs, bool CompareComputeCapability(const std::tuple<int, int> &lhs,
@ -45,8 +46,8 @@ bool CompareComputeCapability(const std::tuple<int, int> &lhs,
const std::tuple<int, int> CudaPtxInMemory::kMinimumCapability{1, 0}; const std::tuple<int, int> CudaPtxInMemory::kMinimumCapability{1, 0};
CudaPtxInMemory::CudaPtxInMemory(port::StringPiece ptx, CudaPtxInMemory::CudaPtxInMemory(absl::string_view ptx,
port::StringPiece kernel_name, absl::string_view kernel_name,
bool ptx_compressed) bool ptx_compressed)
: KernelLoaderSpec(kernel_name), : KernelLoaderSpec(kernel_name),
ptx_by_compute_capability_(CompareComputeCapability) { ptx_by_compute_capability_(CompareComputeCapability) {
@ -60,12 +61,12 @@ CudaPtxInMemory::CudaPtxInMemory(port::StringPiece ptx,
CudaPtxInMemory::CudaPtxInMemory( CudaPtxInMemory::CudaPtxInMemory(
const std::initializer_list<CudaPtxInMemory::PtxSpec> &spec_list, const std::initializer_list<CudaPtxInMemory::PtxSpec> &spec_list,
port::StringPiece kernel_name, bool ptx_compressed) absl::string_view kernel_name, bool ptx_compressed)
: KernelLoaderSpec(kernel_name), : KernelLoaderSpec(kernel_name),
ptx_by_compute_capability_(CompareComputeCapability) { ptx_by_compute_capability_(CompareComputeCapability) {
for (const auto &spec : spec_list) { for (const auto &spec : spec_list) {
int major, minor; int major, minor;
port::StringPiece ptx; absl::string_view ptx;
std::tie(major, minor, ptx) = spec; std::tie(major, minor, ptx) = spec;
if (ptx_compressed) { if (ptx_compressed) {
// Lazy decompression. Put an empty string in decompressed_ptx_ showing // Lazy decompression. Put an empty string in decompressed_ptx_ showing
@ -155,62 +156,62 @@ const char *CudaPtxInMemory::original_text(int compute_capability_major,
return ptx_iter->second; return ptx_iter->second;
} }
OpenCLTextOnDisk::OpenCLTextOnDisk(port::StringPiece filename, OpenCLTextOnDisk::OpenCLTextOnDisk(absl::string_view filename,
port::StringPiece kernelname) absl::string_view kernelname)
: OnDiskKernelLoaderSpec(filename, kernelname) {} : OnDiskKernelLoaderSpec(filename, kernelname) {}
OpenCLTextInMemory::OpenCLTextInMemory(port::StringPiece text, OpenCLTextInMemory::OpenCLTextInMemory(absl::string_view text,
port::StringPiece kernelname) absl::string_view kernelname)
: KernelLoaderSpec(kernelname), text_(text) {} : KernelLoaderSpec(kernelname), text_(text) {}
OpenCLBinaryOnDisk::OpenCLBinaryOnDisk(port::StringPiece filename, OpenCLBinaryOnDisk::OpenCLBinaryOnDisk(absl::string_view filename,
port::StringPiece kernelname) absl::string_view kernelname)
: OnDiskKernelLoaderSpec(filename, kernelname) {} : OnDiskKernelLoaderSpec(filename, kernelname) {}
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddOpenCLTextOnDisk( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddOpenCLTextOnDisk(
port::StringPiece filename, port::StringPiece kernelname) { absl::string_view filename, absl::string_view kernelname) {
CHECK(ocl_text_on_disk_ == nullptr); CHECK(ocl_text_on_disk_ == nullptr);
ocl_text_on_disk_.reset(new OpenCLTextOnDisk{filename, kernelname}); ocl_text_on_disk_.reset(new OpenCLTextOnDisk{filename, kernelname});
return this; return this;
} }
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddOpenCLBinaryOnDisk( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddOpenCLBinaryOnDisk(
port::StringPiece filename, port::StringPiece kernelname) { absl::string_view filename, absl::string_view kernelname) {
CHECK(ocl_binary_on_disk_ == nullptr); CHECK(ocl_binary_on_disk_ == nullptr);
ocl_binary_on_disk_.reset(new OpenCLBinaryOnDisk{filename, kernelname}); ocl_binary_on_disk_.reset(new OpenCLBinaryOnDisk{filename, kernelname});
return this; return this;
} }
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddOpenCLTextInMemory( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddOpenCLTextInMemory(
port::StringPiece filename, port::StringPiece kernelname) { absl::string_view filename, absl::string_view kernelname) {
CHECK(ocl_text_in_memory_ == nullptr); CHECK(ocl_text_in_memory_ == nullptr);
ocl_text_in_memory_.reset(new OpenCLTextInMemory{filename, kernelname}); ocl_text_in_memory_.reset(new OpenCLTextInMemory{filename, kernelname});
return this; return this;
} }
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxOnDisk( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxOnDisk(
port::StringPiece filename, port::StringPiece kernelname) { absl::string_view filename, absl::string_view kernelname) {
CHECK(cuda_ptx_on_disk_ == nullptr); CHECK(cuda_ptx_on_disk_ == nullptr);
cuda_ptx_on_disk_.reset(new CudaPtxOnDisk{filename, kernelname}); cuda_ptx_on_disk_.reset(new CudaPtxOnDisk{filename, kernelname});
return this; return this;
} }
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCubinInMemory( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCubinInMemory(
const char *bytes, port::StringPiece kernelname) { const char *bytes, absl::string_view kernelname) {
CHECK(cuda_cubin_in_memory_ == nullptr); CHECK(cuda_cubin_in_memory_ == nullptr);
cuda_cubin_in_memory_.reset(new CudaCubinInMemory{bytes, kernelname}); cuda_cubin_in_memory_.reset(new CudaCubinInMemory{bytes, kernelname});
return this; return this;
} }
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCubinOnDisk( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCubinOnDisk(
port::StringPiece filename, port::StringPiece kernelname) { absl::string_view filename, absl::string_view kernelname) {
CHECK(cuda_cubin_on_disk_ == nullptr); CHECK(cuda_cubin_on_disk_ == nullptr);
cuda_cubin_on_disk_.reset(new CudaCubinOnDisk{filename, kernelname}); cuda_cubin_on_disk_.reset(new CudaCubinOnDisk{filename, kernelname});
return this; return this;
} }
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxInMemory( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxInMemory(
port::StringPiece ptx, port::StringPiece kernelname) { absl::string_view ptx, absl::string_view kernelname) {
CHECK(cuda_ptx_in_memory_ == nullptr); CHECK(cuda_ptx_in_memory_ == nullptr);
cuda_ptx_in_memory_.reset( cuda_ptx_in_memory_.reset(
new CudaPtxInMemory{ptx, kernelname, false /* ptx_compressed */}); new CudaPtxInMemory{ptx, kernelname, false /* ptx_compressed */});
@ -218,7 +219,7 @@ MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxInMemory(
} }
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCompressedPtxInMemory( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCompressedPtxInMemory(
port::StringPiece ptx, port::StringPiece kernelname) { absl::string_view ptx, absl::string_view kernelname) {
CHECK(cuda_ptx_in_memory_ == nullptr); CHECK(cuda_ptx_in_memory_ == nullptr);
cuda_ptx_in_memory_.reset( cuda_ptx_in_memory_.reset(
new CudaPtxInMemory{ptx, kernelname, true /* ptx_compressed */}); new CudaPtxInMemory{ptx, kernelname, true /* ptx_compressed */});
@ -227,7 +228,7 @@ MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCompressedPtxInMemory(
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxInMemory( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxInMemory(
std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list, std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list,
port::StringPiece kernelname) { absl::string_view kernelname) {
CHECK(cuda_ptx_in_memory_ == nullptr); CHECK(cuda_ptx_in_memory_ == nullptr);
cuda_ptx_in_memory_.reset( cuda_ptx_in_memory_.reset(
new CudaPtxInMemory{spec_list, kernelname, false /* ptx_compressed */}); new CudaPtxInMemory{spec_list, kernelname, false /* ptx_compressed */});
@ -236,7 +237,7 @@ MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaPtxInMemory(
MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCompressedPtxInMemory( MultiKernelLoaderSpec *MultiKernelLoaderSpec::AddCudaCompressedPtxInMemory(
std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list, std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list,
port::StringPiece kernelname) { absl::string_view kernelname) {
CHECK(cuda_ptx_in_memory_ == nullptr); CHECK(cuda_ptx_in_memory_ == nullptr);
cuda_ptx_in_memory_.reset( cuda_ptx_in_memory_.reset(
new CudaPtxInMemory{spec_list, kernelname, true /* ptx_compressed */}); new CudaPtxInMemory{spec_list, kernelname, true /* ptx_compressed */});

View File

@ -51,7 +51,7 @@ limitations under the License.
#include <memory> #include <memory>
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
#include "tensorflow/stream_executor/lib/stringpiece.h" #include "absl/strings/string_view.h"
#include "tensorflow/stream_executor/platform/logging.h" #include "tensorflow/stream_executor/platform/logging.h"
#include "tensorflow/stream_executor/platform/mutex.h" #include "tensorflow/stream_executor/platform/mutex.h"
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
@ -76,7 +76,7 @@ class KernelLoaderSpec {
const string &kernelname() const { return kernelname_; } const string &kernelname() const { return kernelname_; }
protected: protected:
explicit KernelLoaderSpec(port::StringPiece kernelname); explicit KernelLoaderSpec(absl::string_view kernelname);
private: private:
// The kernel name that should be loaded out of the program description given // The kernel name that should be loaded out of the program description given
@ -101,8 +101,8 @@ class OnDiskKernelLoaderSpec : public KernelLoaderSpec {
virtual const char *CanonicalSuffix() const = 0; virtual const char *CanonicalSuffix() const = 0;
protected: protected:
OnDiskKernelLoaderSpec(port::StringPiece filename, OnDiskKernelLoaderSpec(absl::string_view filename,
port::StringPiece kernelname); absl::string_view kernelname);
string filename_; string filename_;
@ -113,7 +113,7 @@ class OnDiskKernelLoaderSpec : public KernelLoaderSpec {
// Kernel loader specification for PTX text that resides on disk. // Kernel loader specification for PTX text that resides on disk.
class CudaPtxOnDisk : public OnDiskKernelLoaderSpec { class CudaPtxOnDisk : public OnDiskKernelLoaderSpec {
public: public:
CudaPtxOnDisk(port::StringPiece filename, port::StringPiece kernelname); CudaPtxOnDisk(absl::string_view filename, absl::string_view kernelname);
~CudaPtxOnDisk() override {} ~CudaPtxOnDisk() override {}
const char *CanonicalSuffix() const override { return ".ptx"; } const char *CanonicalSuffix() const override { return ".ptx"; }
@ -125,7 +125,7 @@ class CudaPtxOnDisk : public OnDiskKernelLoaderSpec {
// Kernel loader specification for CUBIN binary that resides on disk. // Kernel loader specification for CUBIN binary that resides on disk.
class CudaCubinOnDisk : public OnDiskKernelLoaderSpec { class CudaCubinOnDisk : public OnDiskKernelLoaderSpec {
public: public:
CudaCubinOnDisk(port::StringPiece filename, port::StringPiece kernelname); CudaCubinOnDisk(absl::string_view filename, absl::string_view kernelname);
~CudaCubinOnDisk() override {} ~CudaCubinOnDisk() override {}
const string &filename() const { return filename_; } const string &filename() const { return filename_; }
@ -143,7 +143,7 @@ class CudaPtxInMemory : public KernelLoaderSpec {
public: public:
// Components: compute capability major number, compute capability minor // Components: compute capability major number, compute capability minor
// number, and PTX source. // number, and PTX source.
typedef std::tuple<int, int, port::StringPiece> PtxSpec; typedef std::tuple<int, int, absl::string_view> PtxSpec;
// Single-PTX constructor. Adds the provided PTX version with an unknown // Single-PTX constructor. Adds the provided PTX version with an unknown
// compute capability. Since the CC is unknown, the PTX is assumed to be very // compute capability. Since the CC is unknown, the PTX is assumed to be very
@ -151,16 +151,16 @@ class CudaPtxInMemory : public KernelLoaderSpec {
// likely to be used as the default! Note that the PTX can be compressed, // likely to be used as the default! Note that the PTX can be compressed,
// which is indicated by the argument ptx_compressed. // which is indicated by the argument ptx_compressed.
// //
// Warning: the string backing the provided port::StringPiece ptx must outlive this // Warning: the string backing the provided absl::string_view ptx must outlive
// instance. // this instance.
CudaPtxInMemory(port::StringPiece ptx, port::StringPiece kernelname, CudaPtxInMemory(absl::string_view ptx, absl::string_view kernelname,
bool ptx_compressed = false); bool ptx_compressed = false);
// Multiple-PTX-version constructor. Adds each item in spec_list to this // Multiple-PTX-version constructor. Adds each item in spec_list to this
// object. Note that the PTX can be compressed, which is indicated by the // object. Note that the PTX can be compressed, which is indicated by the
// argument ptx_compressed. // argument ptx_compressed.
CudaPtxInMemory(const std::initializer_list<PtxSpec> &spec_list, CudaPtxInMemory(const std::initializer_list<PtxSpec> &spec_list,
port::StringPiece kernel_name, bool ptx_compressed = false); absl::string_view kernel_name, bool ptx_compressed = false);
~CudaPtxInMemory() override {} ~CudaPtxInMemory() override {}
// Add the PTX implementation described by ptx_spec to this object. On // Add the PTX implementation described by ptx_spec to this object. On
@ -218,7 +218,7 @@ class CudaPtxInMemory : public KernelLoaderSpec {
// Kernel loader specification for OpenCL text that resides on disk. // Kernel loader specification for OpenCL text that resides on disk.
class OpenCLTextOnDisk : public OnDiskKernelLoaderSpec { class OpenCLTextOnDisk : public OnDiskKernelLoaderSpec {
public: public:
OpenCLTextOnDisk(port::StringPiece filename, port::StringPiece kernelname); OpenCLTextOnDisk(absl::string_view filename, absl::string_view kernelname);
~OpenCLTextOnDisk() override {} ~OpenCLTextOnDisk() override {}
const char *CanonicalSuffix() const override { return ".ocl"; } const char *CanonicalSuffix() const override { return ".ocl"; }
@ -230,7 +230,7 @@ class OpenCLTextOnDisk : public OnDiskKernelLoaderSpec {
// Kernel loader specification for OpenCL binary that resides on disk. // Kernel loader specification for OpenCL binary that resides on disk.
class OpenCLBinaryOnDisk : public OnDiskKernelLoaderSpec { class OpenCLBinaryOnDisk : public OnDiskKernelLoaderSpec {
public: public:
OpenCLBinaryOnDisk(port::StringPiece filename, port::StringPiece kernelname); OpenCLBinaryOnDisk(absl::string_view filename, absl::string_view kernelname);
~OpenCLBinaryOnDisk() override {} ~OpenCLBinaryOnDisk() override {}
const char *CanonicalSuffix() const override { return ".aocx"; } const char *CanonicalSuffix() const override { return ".aocx"; }
@ -242,7 +242,7 @@ class OpenCLBinaryOnDisk : public OnDiskKernelLoaderSpec {
// Kernel loader specification for OpenCL text that resides in memory. // Kernel loader specification for OpenCL text that resides in memory.
class OpenCLTextInMemory : public KernelLoaderSpec { class OpenCLTextInMemory : public KernelLoaderSpec {
public: public:
OpenCLTextInMemory(port::StringPiece text, port::StringPiece kernelname); OpenCLTextInMemory(absl::string_view text, absl::string_view kernelname);
~OpenCLTextInMemory() override {} ~OpenCLTextInMemory() override {}
// Returns the OpenCL text contents. // Returns the OpenCL text contents.
@ -258,7 +258,7 @@ class OpenCLTextInMemory : public KernelLoaderSpec {
// Kernel loader specification for a CUBIN blob that resides in memory. // Kernel loader specification for a CUBIN blob that resides in memory.
class CudaCubinInMemory : public KernelLoaderSpec { class CudaCubinInMemory : public KernelLoaderSpec {
public: public:
CudaCubinInMemory(const char *bytes, port::StringPiece kernelname); CudaCubinInMemory(const char *bytes, absl::string_view kernelname);
~CudaCubinInMemory() override {} ~CudaCubinInMemory() override {}
const char *bytes() const { return bytes_; } const char *bytes() const { return bytes_; }
@ -328,28 +328,28 @@ class MultiKernelLoaderSpec {
// the PTX or OpenCL being loaded. Also be aware that in CUDA C++ the kernel // the PTX or OpenCL being loaded. Also be aware that in CUDA C++ the kernel
// name may be mangled by the compiler if it is not declared in an // name may be mangled by the compiler if it is not declared in an
// extern "C" scope. // extern "C" scope.
MultiKernelLoaderSpec *AddOpenCLTextOnDisk(port::StringPiece filename, MultiKernelLoaderSpec *AddOpenCLTextOnDisk(absl::string_view filename,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddOpenCLBinaryOnDisk(port::StringPiece filename, MultiKernelLoaderSpec *AddOpenCLBinaryOnDisk(absl::string_view filename,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddOpenCLTextInMemory(port::StringPiece ocl_text, MultiKernelLoaderSpec *AddOpenCLTextInMemory(absl::string_view ocl_text,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddCudaPtxOnDisk(port::StringPiece filename, MultiKernelLoaderSpec *AddCudaPtxOnDisk(absl::string_view filename,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddCudaCubinOnDisk(port::StringPiece filename, MultiKernelLoaderSpec *AddCudaCubinOnDisk(absl::string_view filename,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddCudaCubinInMemory(const char *cubin_bytes, MultiKernelLoaderSpec *AddCudaCubinInMemory(const char *cubin_bytes,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddCudaPtxInMemory(port::StringPiece ptx, MultiKernelLoaderSpec *AddCudaPtxInMemory(absl::string_view ptx,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddCudaCompressedPtxInMemory( MultiKernelLoaderSpec *AddCudaCompressedPtxInMemory(
port::StringPiece ptx, port::StringPiece kernelname); absl::string_view ptx, absl::string_view kernelname);
MultiKernelLoaderSpec *AddCudaPtxInMemory( MultiKernelLoaderSpec *AddCudaPtxInMemory(
std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list, std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list,
port::StringPiece kernelname); absl::string_view kernelname);
MultiKernelLoaderSpec *AddCudaCompressedPtxInMemory( MultiKernelLoaderSpec *AddCudaCompressedPtxInMemory(
std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list, std::initializer_list<CudaPtxInMemory::PtxSpec> spec_list,
port::StringPiece kernelname); absl::string_view kernelname);
private: private:
std::unique_ptr<CudaPtxOnDisk> std::unique_ptr<CudaPtxOnDisk>

View File

@ -16,9 +16,9 @@ limitations under the License.
#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_ENV_H_ #ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_ENV_H_
#define TENSORFLOW_STREAM_EXECUTOR_LIB_ENV_H_ #define TENSORFLOW_STREAM_EXECUTOR_LIB_ENV_H_
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/env.h"
#include "tensorflow/stream_executor/lib/status.h" #include "tensorflow/stream_executor/lib/status.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
namespace stream_executor { namespace stream_executor {
@ -31,7 +31,7 @@ inline Status FileExists(const string& filename) {
return Env::Default()->FileExists(filename); return Env::Default()->FileExists(filename);
} }
inline Status FileExists(const port::StringPiece& filename) { inline Status FileExists(const absl::string_view& filename) {
return Env::Default()->FileExists(string(filename)); return Env::Default()->FileExists(string(filename));
} }

View File

@ -15,21 +15,22 @@ limitations under the License.
#include "tensorflow/stream_executor/lib/path.h" #include "tensorflow/stream_executor/lib/path.h"
#include "absl/strings/str_cat.h" #include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace stream_executor { namespace stream_executor {
namespace port { namespace port {
namespace internal { namespace internal {
static bool IsAbsolutePath(port::StringPiece path) { static bool IsAbsolutePath(absl::string_view path) {
return !path.empty() && path[0] == '/'; return !path.empty() && path[0] == '/';
} }
// For an array of paths of length count, append them all together, // For an array of paths of length count, append them all together,
// ensuring that the proper path separators are inserted between them. // ensuring that the proper path separators are inserted between them.
string JoinPathImpl(std::initializer_list<port::StringPiece> paths) { string JoinPathImpl(std::initializer_list<absl::string_view> paths) {
string result; string result;
for (port::StringPiece path : paths) { for (absl::string_view path : paths) {
if (path.empty()) continue; if (path.empty()) continue;
if (result.empty()) { if (result.empty()) {

View File

@ -16,8 +16,8 @@ limitations under the License.
#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_PATH_H_ #ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_PATH_H_
#define TENSORFLOW_STREAM_EXECUTOR_LIB_PATH_H_ #define TENSORFLOW_STREAM_EXECUTOR_LIB_PATH_H_
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/io/path.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"
namespace stream_executor { namespace stream_executor {
@ -28,7 +28,7 @@ using tensorflow::io::Dirname;
namespace internal { namespace internal {
// TODO(rspringer): Move to cc/implementation file. // TODO(rspringer): Move to cc/implementation file.
// Not part of the public API. // Not part of the public API.
string JoinPathImpl(std::initializer_list<port::StringPiece> paths); string JoinPathImpl(std::initializer_list<absl::string_view> paths);
} // namespace internal } // namespace internal
// Join multiple paths together. // Join multiple paths together.
@ -44,7 +44,7 @@ string JoinPathImpl(std::initializer_list<port::StringPiece> paths);
// All paths will be treated as relative paths, regardless of whether or not // All paths will be treated as relative paths, regardless of whether or not
// they start with a leading '/'. That is, all paths will be concatenated // they start with a leading '/'. That is, all paths will be concatenated
// together, with the appropriate path separator inserted in between. // together, with the appropriate path separator inserted in between.
// Arguments must be convertible to port::StringPiece. // Arguments must be convertible to absl::string_view.
// //
// Usage: // Usage:
// string path = file::JoinPath("/var/log", dirname, filename); // string path = file::JoinPath("/var/log", dirname, filename);

View File

@ -18,9 +18,9 @@ limitations under the License.
#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUS_H_ #ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STATUS_H_
#define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUS_H_ #define TENSORFLOW_STREAM_EXECUTOR_LIB_STATUS_H_
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status.h"
#include "tensorflow/stream_executor/lib/error.h" // IWYU pragma: export #include "tensorflow/stream_executor/lib/error.h" // IWYU pragma: export
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/platform/logging.h" #include "tensorflow/stream_executor/platform/logging.h"
namespace stream_executor { namespace stream_executor {
@ -33,13 +33,13 @@ using Status = tensorflow::Status;
ASSERT_EQ(::stream_executor::port::Status::OK(), (val)) ASSERT_EQ(::stream_executor::port::Status::OK(), (val))
// Define some canonical error helpers. // Define some canonical error helpers.
inline Status UnimplementedError(StringPiece message) { inline Status UnimplementedError(absl::string_view message) {
return Status(error::UNIMPLEMENTED, message); return Status(error::UNIMPLEMENTED, message);
} }
inline Status InternalError(StringPiece message) { inline Status InternalError(absl::string_view message) {
return Status(error::INTERNAL, message); return Status(error::INTERNAL, message);
} }
inline Status FailedPreconditionError(StringPiece message) { inline Status FailedPreconditionError(absl::string_view message) {
return Status(error::FAILED_PRECONDITION, message); return Status(error::FAILED_PRECONDITION, message);
} }

View File

@ -16,8 +16,8 @@ limitations under the License.
#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STR_UTIL_H_ #ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STR_UTIL_H_
#define TENSORFLOW_STREAM_EXECUTOR_LIB_STR_UTIL_H_ #define TENSORFLOW_STREAM_EXECUTOR_LIB_STR_UTIL_H_
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
namespace stream_executor { namespace stream_executor {
namespace port { namespace port {
@ -27,7 +27,8 @@ using tensorflow::str_util::Split;
// Returns a copy of the input string 'str' with the given 'suffix' // Returns a copy of the input string 'str' with the given 'suffix'
// removed. If the suffix doesn't match, returns a copy of the original string. // removed. If the suffix doesn't match, returns a copy of the original string.
inline string StripSuffixString(port::StringPiece str, port::StringPiece suffix) { inline string StripSuffixString(absl::string_view str,
absl::string_view suffix) {
if (tensorflow::str_util::EndsWith(str, suffix)) { if (tensorflow::str_util::EndsWith(str, suffix)) {
str.remove_suffix(suffix.size()); str.remove_suffix(suffix.size());
} }

View File

@ -1,29 +0,0 @@
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_STREAM_EXECUTOR_LIB_STRINGPIECE_H_
#define TENSORFLOW_STREAM_EXECUTOR_LIB_STRINGPIECE_H_
#include "absl/strings/string_view.h"
namespace stream_executor {
namespace port {
using StringPiece = absl::string_view;
} // namespace port
} // namespace stream_executor
#endif // TENSORFLOW_STREAM_EXECUTOR_LIB_STRINGPIECE_H_

View File

@ -17,7 +17,6 @@ limitations under the License.
#define TENSORFLOW_STREAM_EXECUTOR_MODULE_SPEC_H_ #define TENSORFLOW_STREAM_EXECUTOR_MODULE_SPEC_H_
#include "tensorflow/stream_executor/lib/array_slice.h" #include "tensorflow/stream_executor/lib/array_slice.h"
#include "tensorflow/stream_executor/lib/stringpiece.h"
#include "tensorflow/stream_executor/platform/logging.h" #include "tensorflow/stream_executor/platform/logging.h"
#include "tensorflow/stream_executor/platform/port.h" #include "tensorflow/stream_executor/platform/port.h"