diff --git a/tensorflow/core/kernels/crop_and_resize_op.h b/tensorflow/core/kernels/crop_and_resize_op.h index 8c34e3e71cc..66ff695d9ce 100644 --- a/tensorflow/core/kernels/crop_and_resize_op.h +++ b/tensorflow/core/kernels/crop_and_resize_op.h @@ -31,7 +31,7 @@ struct CropAndResize { typename TTypes<T, 4>::ConstTensor image, typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_ind, - const string& method_name, float extrapolation_value, + const std::string& method_name, float extrapolation_value, typename TTypes<float, 4>::Tensor crops); }; @@ -43,7 +43,7 @@ struct CropAndResizeBackpropImage { typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_ind, typename TTypes<T, 4>::Tensor grads_image, - const string& method_name); + const std::string& method_name); }; template <typename Device, typename T> diff --git a/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc b/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc index d268eb7b21e..e64a055503f 100644 --- a/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc +++ b/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc @@ -352,7 +352,7 @@ struct CropAndResize<GPUDevice, T> { typename TTypes<T, 4>::ConstTensor image, typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_ind, - const string& method_name, float extrapolation_value, + const std::string& method_name, float extrapolation_value, typename TTypes<float, 4>::Tensor crops) { const int batch = image.dimension(0); const int image_height = image.dimension(1); @@ -391,7 +391,7 @@ struct CropAndResizeBackpropImage<GPUDevice, T> { typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_ind, typename TTypes<T, 4>::Tensor grads_image, - const string& method_name) { + const std::string& method_name) { const int batch = grads_image.dimension(0); const int image_height = grads_image.dimension(1); const int image_width = grads_image.dimension(2); diff --git a/tensorflow/core/kernels/cuda_solvers.h b/tensorflow/core/kernels/cuda_solvers.h index 6833905e379..eb1d5c8a200 100644 --- a/tensorflow/core/kernels/cuda_solvers.h +++ b/tensorflow/core/kernels/cuda_solvers.h @@ -169,14 +169,16 @@ class CudaSolver { // to the underlying Tensor to prevent it from being deallocated prematurely. template <typename Scalar> ScratchSpace<Scalar> GetScratchSpace(const TensorShape& shape, - const string& debug_info, bool on_host); + const std::string& debug_info, + bool on_host); template <typename Scalar> - ScratchSpace<Scalar> GetScratchSpace(int64 size, const string& debug_info, + ScratchSpace<Scalar> GetScratchSpace(int64 size, + const std::string& debug_info, bool on_host); // Returns a DeviceLapackInfo that will live for the duration of the // CudaSolver object. inline DeviceLapackInfo GetDeviceLapackInfo(int64 size, - const string& debug_info); + const std::string& debug_info); // Allocates a temporary tensor that will live for the duration of the // CudaSolver object. @@ -377,12 +379,12 @@ class ScratchSpace { ScratchSpace(OpKernelContext* context, int64 size, bool on_host) : ScratchSpace(context, TensorShape({size}), "", on_host) {} - ScratchSpace(OpKernelContext* context, int64 size, const string& debug_info, - bool on_host) + ScratchSpace(OpKernelContext* context, int64 size, + const std::string& debug_info, bool on_host) : ScratchSpace(context, TensorShape({size}), debug_info, on_host) {} ScratchSpace(OpKernelContext* context, const TensorShape& shape, - const string& debug_info, bool on_host) + const std::string& debug_info, bool on_host) : context_(context), debug_info_(debug_info), on_host_(on_host) { AllocatorAttributes alloc_attr; if (on_host) { @@ -411,7 +413,7 @@ class ScratchSpace { } int64 bytes() const { return scratch_tensor_.TotalBytes(); } int64 size() const { return scratch_tensor_.NumElements(); } - const string& debug_info() const { return debug_info_; } + const std::string& debug_info() const { return debug_info_; } Tensor& tensor() { return scratch_tensor_; } const Tensor& tensor() const { return scratch_tensor_; } @@ -424,21 +426,22 @@ class ScratchSpace { private: OpKernelContext* context_; // not owned - const string debug_info_; + const std::string debug_info_; const bool on_host_; Tensor scratch_tensor_; }; class HostLapackInfo : public ScratchSpace<int> { public: - HostLapackInfo(OpKernelContext* context, int64 size, const string& debug_info) + HostLapackInfo(OpKernelContext* context, int64 size, + const std::string& debug_info) : ScratchSpace<int>(context, size, debug_info, /* on_host */ true){}; }; class DeviceLapackInfo : public ScratchSpace<int> { public: DeviceLapackInfo(OpKernelContext* context, int64 size, - const string& debug_info) + const std::string& debug_info) : ScratchSpace<int>(context, size, debug_info, /* on_host */ false) {} // Allocates a new scratch space on the host and launches a copy of the @@ -460,7 +463,7 @@ class DeviceLapackInfo : public ScratchSpace<int> { #if GOOGLE_CUDA template <typename Scalar> ScratchSpace<Scalar> CudaSolver::GetScratchSpace(const TensorShape& shape, - const string& debug_info, + const std::string& debug_info, bool on_host) { ScratchSpace<Scalar> new_scratch_space(context_, shape, debug_info, on_host); scratch_tensor_refs_.emplace_back(new_scratch_space.tensor()); @@ -469,13 +472,13 @@ ScratchSpace<Scalar> CudaSolver::GetScratchSpace(const TensorShape& shape, template <typename Scalar> ScratchSpace<Scalar> CudaSolver::GetScratchSpace(int64 size, - const string& debug_info, + const std::string& debug_info, bool on_host) { return GetScratchSpace<Scalar>(TensorShape({size}), debug_info, on_host); } inline DeviceLapackInfo CudaSolver::GetDeviceLapackInfo( - int64 size, const string& debug_info) { + int64 size, const std::string& debug_info) { DeviceLapackInfo new_dev_info(context_, size, debug_info); scratch_tensor_refs_.emplace_back(new_dev_info.tensor()); return new_dev_info; diff --git a/tensorflow/core/kernels/cuda_sparse.h b/tensorflow/core/kernels/cuda_sparse.h index 2d41cc72421..978bc9005ed 100644 --- a/tensorflow/core/kernels/cuda_sparse.h +++ b/tensorflow/core/kernels/cuda_sparse.h @@ -75,7 +75,8 @@ using gpuStream_t = hipStream_t; namespace tensorflow { -inline string ConvertGPUSparseErrorToString(const gpusparseStatus_t status) { +inline std::string ConvertGPUSparseErrorToString( + const gpusparseStatus_t status) { switch (status) { #define STRINGIZE(q) #q #define RETURN_IF_STATUS(err) \ diff --git a/tensorflow/core/kernels/fused_batch_norm_op.h b/tensorflow/core/kernels/fused_batch_norm_op.h index 7a64046b335..624f7ecf59a 100644 --- a/tensorflow/core/kernels/fused_batch_norm_op.h +++ b/tensorflow/core/kernels/fused_batch_norm_op.h @@ -30,7 +30,7 @@ namespace functor { // (2) batch norm + side input + activation enum class FusedBatchNormActivationMode { kIdentity, kRelu }; -string ToString(FusedBatchNormActivationMode activation_mode); +std::string ToString(FusedBatchNormActivationMode activation_mode); Status ParseActivationMode(OpKernelConstruction* context, FusedBatchNormActivationMode* activation_mode); diff --git a/tensorflow/core/kernels/gpu_utils.h b/tensorflow/core/kernels/gpu_utils.h index c0dd3b6bc77..a1589db3b5b 100644 --- a/tensorflow/core/kernels/gpu_utils.h +++ b/tensorflow/core/kernels/gpu_utils.h @@ -146,7 +146,7 @@ class AutoTuneMap { } private: - AutoTuneMap(const string& name) : name_(name) { + AutoTuneMap(const std::string& name) : name_(name) { min_score_threshold_ = 1; int min_warmup_iterations = 10; const char* threshold_str = getenv("TF_AUTOTUNE_THRESHOLD"); @@ -174,8 +174,8 @@ class AutoTuneMap { } }; - string GetActionSummary(StringPiece action, const Parameters& params, - const Config& config) { + std::string GetActionSummary(StringPiece action, const Parameters& params, + const Config& config) { return strings::Printf("autotune_map %s %s: %s -> (%s)", name_.c_str(), string(action).c_str(), params.ToString().c_str(), config.ToString().c_str()); @@ -189,7 +189,7 @@ class AutoTuneMap { }; std::unordered_map<Parameters, ValueType, Hasher> params_config_map_ TF_GUARDED_BY(mu_); - string name_; + std::string name_; int32 min_score_threshold_; int32 max_autotune_count_; int32 max_autotune_global_count_;