diff --git a/tensorflow/core/framework/attr_value_util.h b/tensorflow/core/framework/attr_value_util.h
index 966e716e39a..094c007d20b 100644
--- a/tensorflow/core/framework/attr_value_util.h
+++ b/tensorflow/core/framework/attr_value_util.h
@@ -36,7 +36,7 @@ class NameAttrList;
 
 // A human-readable rendering of attr_value, that is more concise than a
 // text-format proto.
-string SummarizeAttrValue(const AttrValue& attr_value);
+std::string SummarizeAttrValue(const AttrValue& attr_value);
 
 // Generates an error if attr_value doesn't have the indicated attr type.
 Status AttrValueHasType(const AttrValue& attr_value, StringPiece type);
@@ -51,7 +51,7 @@ Status AttrValueHasType(const AttrValue& attr_value, StringPiece type);
 bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out);
 
 // Sets *out based on the type of value.
-void SetAttrValue(const string& value, AttrValue* out);
+void SetAttrValue(const std::string& value, AttrValue* out);
 void SetAttrValue(const tstring& value, AttrValue* out);
 void SetAttrValue(const char* value, AttrValue* out);
 void SetAttrValue(StringPiece value, AttrValue* out);
diff --git a/tensorflow/core/framework/device_base.h b/tensorflow/core/framework/device_base.h
index 3415c7f23fc..fabb0b24a93 100644
--- a/tensorflow/core/framework/device_base.h
+++ b/tensorflow/core/framework/device_base.h
@@ -237,7 +237,7 @@ class DeviceBase {
   // Unimplemented by default
   virtual const DeviceAttributes& attributes() const;
   virtual int NumaNode() const { return attributes().locality().numa_node(); }
-  virtual const string& name() const;
+  virtual const std::string& name() const;
 
   // Materializes the given TensorProto into 'tensor' stored in Device
   // memory.  Most devices will want to override this.
diff --git a/tensorflow/core/framework/function.h b/tensorflow/core/framework/function.h
index 03da4dffa7f..95f733d23a6 100644
--- a/tensorflow/core/framework/function.h
+++ b/tensorflow/core/framework/function.h
@@ -114,9 +114,9 @@ class FunctionDefHelper {
 
   // Constructs an AttrValue.func given the "name" and "attrs".
   static AttrValueWrapper FunctionRef(
-      const string& name,
+      const std::string& name,
       gtl::ArraySlice<std::pair<string, AttrValueWrapper>> attrs);
-  static AttrValueWrapper FunctionRef(const string& name) {
+  static AttrValueWrapper FunctionRef(const std::string& name) {
     return FunctionRef(name, {});
   }
 
@@ -127,11 +127,11 @@ class FunctionDefHelper {
     // When constructing a NodeDef, the first entry in ret is used as
     // the node name, the remaining values are ignored.
     std::vector<string> ret;
-    string op;
+    std::string op;
     std::vector<string> arg;
     std::vector<std::pair<string, AttrValueWrapper>> attr;
     std::vector<string> dep;
-    string device;
+    std::string device;
 
     NodeDef ToNodeDef() const;
   };
@@ -143,7 +143,7 @@ class FunctionDefHelper {
   // - `control_ret_def` holds a mapping from the function control
   //   output names to the nodes from `node_def`.
   static FunctionDef Create(
-      const string& function_name, gtl::ArraySlice<string> in_def,
+      const std::string& function_name, gtl::ArraySlice<string> in_def,
       gtl::ArraySlice<string> out_def, gtl::ArraySlice<string> attr_def,
       gtl::ArraySlice<Node> node_def,
       gtl::ArraySlice<std::pair<string, string>> ret_def,
@@ -153,7 +153,7 @@ class FunctionDefHelper {
   // function encoding (node_name:output_name[:output_index]).
   // - `ret_def` holds a mapping from the function output names from `out_def`
   //   to the node outputs from `node_def`.
-  static FunctionDef Create(const string& function_name,
+  static FunctionDef Create(const std::string& function_name,
                             gtl::ArraySlice<string> in_def,
                             gtl::ArraySlice<string> out_def,
                             gtl::ArraySlice<string> attr_def,
@@ -161,7 +161,7 @@ class FunctionDefHelper {
                             gtl::ArraySlice<std::pair<string, string>> ret_def);
 
   // TODO(josh11b): Get rid of these and transition to the one above.
-  static FunctionDef Define(const string& function_name,
+  static FunctionDef Define(const std::string& function_name,
                             gtl::ArraySlice<string> arg_def,
                             gtl::ArraySlice<string> ret_def,
                             gtl::ArraySlice<string> attr_def,
@@ -175,7 +175,7 @@ class FunctionDefHelper {
 
   // Helpers to construct a constant scalar.
   template <typename T>
-  static Node Const(const string& name, const T& val) {
+  static Node Const(const std::string& name, const T& val) {
     Node n = {{name}, "Const"};
     const DataType dtype = DataTypeToEnum<T>::value;
     n.attr.push_back({"dtype", dtype});
@@ -186,7 +186,7 @@ class FunctionDefHelper {
   }
 
   template <typename T>
-  static Node Const(const string& name, gtl::ArraySlice<T> vals) {
+  static Node Const(const std::string& name, gtl::ArraySlice<T> vals) {
     Node n = {{name}, "Const"};
     const DataType dtype = DataTypeToEnum<T>::value;
     n.attr.push_back({"dtype", dtype});
@@ -207,7 +207,7 @@ inline FunctionDefHelper::AttrValueWrapper::AttrValueWrapper(const char* val) {
 
 template <>
 inline FunctionDefHelper::AttrValueWrapper::AttrValueWrapper(
-    const string& val) {
+    const std::string& val) {
   InitFromString(val);
 }
 
@@ -251,13 +251,13 @@ Status InstantiateFunction(const FunctionDef& fdef, AttrSlice attr_values,
 // Particularly, it may not include all information presented in
 // "func_def" (e.g., comments, description of the function arguments,
 // etc.)
-string DebugString(const FunctionDef& func_def);
-string DebugString(const GraphDef& instantiated_func_def);
-string DebugString(gtl::ArraySlice<NodeDef> instantiated_func_nodes);
+std::string DebugString(const FunctionDef& func_def);
+std::string DebugString(const GraphDef& instantiated_func_def);
+std::string DebugString(gtl::ArraySlice<NodeDef> instantiated_func_nodes);
 
 // Returns a debug string for a top level graph (the main program and
 // its supporting functions defined in its library).
-string DebugStringWhole(const GraphDef& gdef);
+std::string DebugStringWhole(const GraphDef& gdef);
 
 // Returns true if f1 == f2. Compares all fields, including descriptions. Order
 // of NodeDefs doesn't matter.
@@ -360,14 +360,14 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
       delete;
 
   // Returns True if the library contains `func`, False otherwise.
-  bool Contains(const string& func) const;
+  bool Contains(const std::string& func) const;
 
   // Returns nullptr if "func" is not defined in "lib_def". Otherwise,
   // returns its definition proto.
   //
   // NB: This function returns a borrowed pointer, which can be invalidated by a
   // subsequent call to `ReplaceFunction()` with the given name.
-  const FunctionDef* Find(const string& func) const TF_LOCKS_EXCLUDED(mu_);
+  const FunctionDef* Find(const std::string& func) const TF_LOCKS_EXCLUDED(mu_);
 
   // Adds function definition 'fdef' to this function library.
   // Returns status 'ok' on success, or error otherwise. This is a no-op if
@@ -388,7 +388,7 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
   // a non-OK status if "func" was not found in the library, OK otherwise.
   // Please be careful when replacing function: make sure all previous pointers
   // returned by `Find()` are no longer in use.
-  Status ReplaceFunction(const string& func, const FunctionDef& fdef)
+  Status ReplaceFunction(const std::string& func, const FunctionDef& fdef)
       TF_LOCKS_EXCLUDED(mu_);
 
   // Replaces the gradient corresponding to `grad.function_name()`. Returns
@@ -401,7 +401,7 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
   // Please be careful when removing function: make sure there are no other
   // nodes using the function, and all previous pointers returned by `Find()`
   // are no longer in use.
-  Status RemoveFunction(const string& func) TF_LOCKS_EXCLUDED(mu_);
+  Status RemoveFunction(const std::string& func) TF_LOCKS_EXCLUDED(mu_);
 
   // Adds the functions and gradients in 'other' to this function library.
   // Duplicate functions and gradients are ignored.
@@ -417,7 +417,8 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
   // If the gradient function for 'func' is specified explicitly in
   // the library, returns the gradient function name.  Otherwise,
   // returns an empty string.
-  string FindGradient(const string& func) const TF_LOCKS_EXCLUDED(mu_);
+  std::string FindGradient(const std::string& func) const
+      TF_LOCKS_EXCLUDED(mu_);
 
   // OpRegistryInterface method. Useful for constructing a Graph.
   //
@@ -427,26 +428,27 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
   //
   // NB: This function outputs a borrowed pointer, which can be invalidated by a
   // subsequent call to `ReplaceFunction()` with the given name.
-  Status LookUp(const string& op_type_name,
+  Status LookUp(const std::string& op_type_name,
                 const OpRegistrationData** op_reg_data) const override
       TF_LOCKS_EXCLUDED(mu_);
 
   // Generates new function name with the specified prefix that is unique
   // across this library.
-  string UniqueFunctionName(StringPiece prefix) const TF_LOCKS_EXCLUDED(mu_);
+  std::string UniqueFunctionName(StringPiece prefix) const
+      TF_LOCKS_EXCLUDED(mu_);
 
   // Given a node def 'ndef', inspects attributes of the callee
   // function to derive the attribute 'value' for 'attr'. Returns OK
   // iff the attribute is given by the function's definition.
   // TODO(irving): Remove; keep only the const Node& version.
   template <typename T>
-  Status GetAttr(const NodeDef& ndef, const string& attr, T* value) const;
+  Status GetAttr(const NodeDef& ndef, const std::string& attr, T* value) const;
 
   // Given a node, inspects attributes of the callee function to derive the
   // attribute 'value' for 'attr'. Returns OK iff the attribute is given by the
   // function's definition.
   template <typename T>
-  Status GetAttr(const Node& node, const string& attr, T* value) const;
+  Status GetAttr(const Node& node, const std::string& attr, T* value) const;
 
   // Returns a proto representation of the state of this function library.
   FunctionDefLibrary ToProto() const TF_LOCKS_EXCLUDED(mu_);
@@ -475,7 +477,7 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
   // name `func` already exists in this function library, and has the same
   // implementation as in `other`. If the implementations conflict, an invalid
   // argument error is returned.
-  Status CopyFunctionDefFrom(const string& func,
+  Status CopyFunctionDefFrom(const std::string& func,
                              const FunctionLibraryDefinition& other)
       TF_LOCKS_EXCLUDED(mu_);
 
@@ -491,7 +493,7 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
 
   std::shared_ptr<FunctionDefAndOpRegistration> FindHelper(
       const string& func) const TF_SHARED_LOCKS_REQUIRED(mu_);
-  string FindGradientHelper(const string& func) const
+  std::string FindGradientHelper(const std::string& func) const
       TF_SHARED_LOCKS_REQUIRED(mu_);
 
   Status AddHelper(std::shared_ptr<FunctionDefAndOpRegistration> registration,
@@ -518,12 +520,13 @@ class FunctionLibraryDefinition : public OpRegistryInterface {
   // Remove `func` from the library. Returns non-OK Status unless `func` is in
   // the library. This should only be called when there is a guarantee that the
   // function being removed hasn't been retrieved with `Find`.
-  Status RemoveFunctionHelper(const string& func)
+  Status RemoveFunctionHelper(const std::string& func)
       TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
 
   // Remove gradient of function `func` from the library. Returns non-OK Status
   // unless `func` has a gradient.
-  Status RemoveGradient(const string& func) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+  Status RemoveGradient(const std::string& func)
+      TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
 
   mutable mutex mu_;
   const OpRegistryInterface* const default_registry_;
@@ -566,7 +569,7 @@ class FunctionLibraryRuntime {
     // The canonical device name of the device on which the function
     // should be instantiated. If empty, the function will be
     // instantiated on the local device.
-    string target;
+    std::string target;
 
     // Should the function be instantiated as a multi-device function?
     bool is_multi_device_function = false;
@@ -640,13 +643,13 @@ class FunctionLibraryRuntime {
     // `state_handle` will have the same handle and share the same
     // state (in stateful kernels); and two functions with different
     // values for `state_handle` will have independent state.
-    string state_handle;
+    std::string state_handle;
 
     // This interface is EXPERIMENTAL and subject to change.
     //
     // Instantiates the function using an executor of the given type. If empty,
     // the default TensorFlow executor will be used.
-    string executor_type;
+    std::string executor_type;
 
     // If true, the runtime will attempt to create kernels for the function at
     // instantiation time, rather than on the first run. This can be used to
@@ -680,10 +683,10 @@ class FunctionLibraryRuntime {
     bool include_optimized_graph_in_debug_string = false;
   };
   typedef uint64 Handle;
-  virtual Status Instantiate(const string& function_name, AttrSlice attrs,
+  virtual Status Instantiate(const std::string& function_name, AttrSlice attrs,
                              const InstantiateOptions& options,
                              Handle* handle) = 0;
-  Status Instantiate(const string& function_name, AttrSlice attrs,
+  Status Instantiate(const std::string& function_name, AttrSlice attrs,
                      Handle* handle) {
     auto opts = absl::make_unique<InstantiateOptions>();
     return Instantiate(function_name, attrs, *opts, handle);
@@ -738,7 +741,7 @@ class FunctionLibraryRuntime {
 
     // Parameters for remote function execution.
     bool remote_execution = false;
-    string source_device = "";  // Fully specified device name.
+    std::string source_device = "";  // Fully specified device name.
 
     // Allocator attributes specifying where the args are / rets should be put.
     // These should either be {} or match the length of args / retvals. If {},
@@ -758,7 +761,7 @@ class FunctionLibraryRuntime {
     bool run_all_kernels_inline = false;
 
     // Returns a human readable representation of this.
-    string DebugString() const;
+    std::string DebugString() const;
   };
   typedef std::function<void(const Status&)> DoneCallback;
   virtual void Run(const Options& opts, Handle handle,
@@ -786,7 +789,7 @@ class FunctionLibraryRuntime {
   // NOTE(mrry): This method assumes that the runtime is associated with a
   // default function library, and looks up `function_name` in that library.
   // It does not support overriding the function library.
-  virtual bool IsStateful(const string& function_name) const = 0;
+  virtual bool IsStateful(const std::string& function_name) const = 0;
 
   // Returns the device on which the function executes.
   virtual Device* device() = 0;
@@ -817,7 +820,7 @@ class FunctionLibraryRuntime {
 
   // Returns a debug string showing the definition of the function of
   // 'handle'.
-  virtual string DebugString(Handle handle) = 0;
+  virtual std::string DebugString(Handle handle) = 0;
 
   // Returns the graph version number.
   virtual int graph_def_version() const = 0;
@@ -847,13 +850,13 @@ class FunctionLibraryRuntime {
   // `ExecutorFactory::GetFactory()`) that will be used based on the given
   // dynamic `options` and static `attrs`. If none is specified, this method
   // will return an empty string, which leaves the decision up to the runtime.
-  static string ExecutorType(const InstantiateOptions& options,
-                             AttrSlice attrs);
+  static std::string ExecutorType(const InstantiateOptions& options,
+                                  AttrSlice attrs);
 };
 
 // Returns the device of the `arg_index`-th function input. Update
 // `composite_devices` if the input device is a composite device.
-string GetFunctionResourceInputDevice(
+std::string GetFunctionResourceInputDevice(
     const Tensor& input, const int arg_index, const FunctionDef& function_def,
     absl::flat_hash_map<string, std::vector<string>>* composite_devices);
 
@@ -864,9 +867,10 @@ string GetFunctionResourceInputDevice(
 // space. But it may be change as the implementation
 // evolves. Therefore, it should not be persisted or compared across
 // address spaces.
-string Canonicalize(const string& funcname, AttrSlice attrs,
-                    const FunctionLibraryRuntime::InstantiateOptions& options);
-string Canonicalize(const string& funcname, AttrSlice attrs);
+std::string Canonicalize(
+    const std::string& funcname, AttrSlice attrs,
+    const FunctionLibraryRuntime::InstantiateOptions& options);
+std::string Canonicalize(const std::string& funcname, AttrSlice attrs);
 
 const FunctionLibraryRuntime::Handle kInvalidHandle = -1;
 const FunctionLibraryRuntime::LocalHandle kInvalidLocalHandle = -1;
@@ -907,8 +911,8 @@ class DistributedFunctionLibraryRuntime {
   // local `handle` is filled for the instantiated function data and can be used
   // for subsequent run function calls on the remote target.
   virtual void Instantiate(
-      const string& function_name, const FunctionLibraryDefinition& lib_def,
-      AttrSlice attrs,
+      const std::string& function_name,
+      const FunctionLibraryDefinition& lib_def, AttrSlice attrs,
       const FunctionLibraryRuntime::InstantiateOptions& options,
       FunctionLibraryRuntime::LocalHandle* handle,
       FunctionLibraryRuntime::DoneCallback done) = 0;
@@ -1022,11 +1026,11 @@ Status ArgNumType(AttrSlice attrs, const OpDef::ArgDef& arg_def,
 namespace gradient {
 // Register a gradient creator for the "op".
 typedef std::function<Status(const AttrSlice& attrs, FunctionDef*)> Creator;
-bool RegisterOp(const string& op, Creator func);
+bool RegisterOp(const std::string& op, Creator func);
 
 // Returns OK the gradient creator for the "op" is found (may be
 // nullptr if REGISTER_OP_NO_GRADIENT is used.
-Status GetOpGradientCreator(const string& op, Creator* creator);
+Status GetOpGradientCreator(const std::string& op, Creator* creator);
 };  // namespace gradient
 
 // Declare explicit instantiations of GetAttr
diff --git a/tensorflow/core/framework/log_memory.h b/tensorflow/core/framework/log_memory.h
index 1b926ddaa3f..e714c742b43 100644
--- a/tensorflow/core/framework/log_memory.h
+++ b/tensorflow/core/framework/log_memory.h
@@ -52,14 +52,14 @@ class LogMemory {
     UNKNOWN_STEP_ID = -6,
   };
 
-  static const string kLogMemoryLabel;
+  static const std::string kLogMemoryLabel;
 
   // Test to see if memory logging is enabled. For now, logging is
   // enabled whenever VLOG_IS_ON(1) for the log_memory module.
   static bool IsEnabled();
 
   // Log the beginning of a step.
-  static void RecordStep(int64 step_id, const string& handle);
+  static void RecordStep(int64 step_id, const std::string& handle);
 
   // Log a tensor buffer allocation. The name indicates which kernel
   // made the allocation. If the allocation is made through an
@@ -67,8 +67,8 @@ class LogMemory {
   // otherwise step_id is one of the SpecialStepIds defined in
   // op_kernel.h, e.g. Op Kernel construction or an optimization pass
   // such as constant folding.
-  static void RecordTensorAllocation(const string& kernel_name, int64 step_id,
-                                     const Tensor& tensor);
+  static void RecordTensorAllocation(const std::string& kernel_name,
+                                     int64 step_id, const Tensor& tensor);
 
   // Log a tensor buffer deallocation. The deallocation is triggered
   // when the buffer's refcount falls to zero, and the tracking
@@ -77,10 +77,10 @@ class LogMemory {
   // corresponding tensor previously passed in to
   // RecordTensorAllocation.
   static void RecordTensorDeallocation(int64 allocation_id,
-                                       const string& allocator_name);
+                                       const std::string& allocator_name);
 
   // Log the use of a tensor as an output from a kernel.
-  static void RecordTensorOutput(const string& kernel_name, int64 step_id,
+  static void RecordTensorOutput(const std::string& kernel_name, int64 step_id,
                                  int index, const Tensor& tensor);
 
   // Log a "raw" allocation, which is just a buffer sized in
@@ -92,7 +92,7 @@ class LogMemory {
   // is executing, otherwise step_id is one of the SpecialStepIds
   // defined in op_kernel.h, e.g. Op Kernel construction or an
   // optimization pass such as constant folding.
-  static void RecordRawAllocation(const string& operation, int64 step_id,
+  static void RecordRawAllocation(const std::string& operation, int64 step_id,
                                   size_t num_bytes, void* ptr,
                                   Allocator* allocator);
 
@@ -101,7 +101,7 @@ class LogMemory {
   // enqueued using the buffer. A deferred deallocation should always
   // be followed by a matching non-deferred deallocation when the
   // buffer is actually returned and can be reused.
-  static void RecordRawDeallocation(const string& operation, int64 step_id,
+  static void RecordRawDeallocation(const std::string& operation, int64 step_id,
                                     void* ptr, Allocator* allocator,
                                     bool deferred);
 };
diff --git a/tensorflow/core/framework/node_def_util.h b/tensorflow/core/framework/node_def_util.h
index d937a8e51e1..d1a7c9aebba 100644
--- a/tensorflow/core/framework/node_def_util.h
+++ b/tensorflow/core/framework/node_def_util.h
@@ -62,16 +62,16 @@ extern const char* const kColocationGroupPrefix;
 // The parameter `max_inputs_in_summary` specifies how many inputs at most to
 // serialize in the output (in order not to get a string which is overly large).
 // The value `-1` specifies that all inputs will be shown.
-string SummarizeNodeDef(const NodeDef& node_def,
-                        int max_inputs_in_summary = -1);
-string SummarizeAttrs(const NodeDef& node_def);
-string SummarizeAttrsHelper(AttrSlice attrs, StringPiece device);
+std::string SummarizeNodeDef(const NodeDef& node_def,
+                             int max_inputs_in_summary = -1);
+std::string SummarizeAttrs(const NodeDef& node_def);
+std::string SummarizeAttrsHelper(AttrSlice attrs, StringPiece device);
 
 // Produces a formatted string pattern from the node which can uniquely identify
 // this node upstream to produce an informative error message. The pattern
 // followed is: {{node <node_name>}}
-string FormatNodeDefForError(const NodeDef& node_def);
-string FormatNodeDefForError(
+std::string FormatNodeDefForError(const NodeDef& node_def);
+std::string FormatNodeDefForError(
     StringPiece node_name, bool has_experimental_debug_info,
     const NodeDef_ExperimentalDebugInfo& experimental_debug_info);
 
@@ -148,7 +148,7 @@ class AttrSlice {
   // Returns the attr with attr_name if found.  Otherwise, returns
   // nullptr.
   const AttrValue* Find(StringPiece attr_name) const;
-  const AttrValue* FindByString(const string& attr_name) const;
+  const AttrValue* FindByString(const std::string& attr_name) const;
 
   // Returns the attr_value for attr_name if found. Otherwise, returns a
   // NotFound status.
@@ -157,8 +157,8 @@ class AttrSlice {
   // Helper class to avoid allocations in EqualAttrs.
   // TODO(irving): Will go away once NodeInfo is used.
   struct Scratch {
-    string a;
-    string b;
+    std::string a;
+    std::string b;
   };
 
   // Check if all attrs and attr values match.  Does not take defaults into
@@ -175,13 +175,13 @@ class AttrSlice {
   // If this AttrSlice has an attached NodeDef, summarize it.  This is for
   // error messages only: we intentionally do not provide direct access to the
   // NodeDef, since it is not always there.
-  string SummarizeNode() const;
+  std::string SummarizeNode() const;
 
   // Iteration over all attrs
   AttrValueMap::const_iterator begin() const { return attrs_->begin(); }
   AttrValueMap::const_iterator end() const { return attrs_->end(); }
 
-  string DebugString() const;
+  std::string DebugString() const;
 
  private:
   const NodeDef* ndef_;
@@ -195,7 +195,7 @@ bool HasNodeAttr(const NodeDef& node_def, StringPiece attr_name);
 // attr with attr_name is found in node_def, or the attr does not have
 // a matching type, a non-ok status will be returned.
 Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
-                   string* value);  // type: "string"
+                   std::string* value);  // type: "string"
 Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
                    tstring* value);  // type: "tstring"
 Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
@@ -266,7 +266,7 @@ Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
 // attr with attr_name is found in node_def, or the attr does not have
 // a matching type, false is returned.
 bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
-                    string* value);  // type: "string"
+                    std::string* value);  // type: "string"
 bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
                     int64* value);  // type: "int"
 bool TryGetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
@@ -309,7 +309,8 @@ bool TryGetNodeAttr(
 // If no attr with attr_name is found in node_def, or the attr does not have
 // a matching type, a reference to an empty string is returned.
 // REQUIRES: Must not use the returned value beyond the lifetime of node_def.
-const string& GetNodeAttrString(const AttrSlice& attrs, StringPiece attr_name);
+const std::string& GetNodeAttrString(const AttrSlice& attrs,
+                                     StringPiece attr_name);
 
 // Specialization to parse an attribute directly into a Padding enum.
 Status GetNodeAttr(const AttrSlice& attrs, StringPiece attr_name,
diff --git a/tensorflow/core/framework/op.h b/tensorflow/core/framework/op.h
index 86bc70448d2..adc52d963c9 100644
--- a/tensorflow/core/framework/op.h
+++ b/tensorflow/core/framework/op.h
@@ -45,11 +45,12 @@ class OpRegistryInterface {
   // Returns an error status and sets *op_reg_data to nullptr if no OpDef is
   // registered under that name, otherwise returns the registered OpDef.
   // Caller must not delete the returned pointer.
-  virtual Status LookUp(const string& op_type_name,
+  virtual Status LookUp(const std::string& op_type_name,
                         const OpRegistrationData** op_reg_data) const = 0;
 
   // Shorthand for calling LookUp to get the OpDef.
-  Status LookUpOpDef(const string& op_type_name, const OpDef** op_def) const;
+  Status LookUpOpDef(const std::string& op_type_name,
+                     const OpDef** op_def) const;
 };
 
 // The standard implementation of OpRegistryInterface, along with a
@@ -71,11 +72,11 @@ class OpRegistry : public OpRegistryInterface {
 
   void Register(const OpRegistrationDataFactory& op_data_factory);
 
-  Status LookUp(const string& op_type_name,
+  Status LookUp(const std::string& op_type_name,
                 const OpRegistrationData** op_reg_data) const override;
 
   // Returns OpRegistrationData* of registered op type, else returns nullptr.
-  const OpRegistrationData* LookUp(const string& op_type_name) const;
+  const OpRegistrationData* LookUp(const std::string& op_type_name) const;
 
   // Fills *ops with all registered OpDefs (except those with names
   // starting with '_' if include_internal == false) sorted in
@@ -84,7 +85,7 @@ class OpRegistry : public OpRegistryInterface {
 
   // Returns ASCII-format OpList for all registered OpDefs (except
   // those with names starting with '_' if include_internal == false).
-  string DebugString(bool include_internal) const;
+  std::string DebugString(bool include_internal) const;
 
   // A singleton available at startup.
   static OpRegistry* Global();
@@ -153,7 +154,7 @@ class OpRegistry : public OpRegistryInterface {
   Status RegisterAlreadyLocked(const OpRegistrationDataFactory& op_data_factory)
       const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
 
-  const OpRegistrationData* LookUpSlow(const string& op_type_name) const;
+  const OpRegistrationData* LookUpSlow(const std::string& op_type_name) const;
 
   mutable mutex mu_;
   // Functions in deferred_ may only be called with mu_ held.
@@ -179,11 +180,11 @@ class OpListOpRegistry : public OpRegistryInterface {
   // Does not take ownership of op_list, *op_list must outlive *this.
   explicit OpListOpRegistry(const OpList* op_list);
   ~OpListOpRegistry() override;
-  Status LookUp(const string& op_type_name,
+  Status LookUp(const std::string& op_type_name,
                 const OpRegistrationData** op_reg_data) const override;
 
   // Returns OpRegistrationData* of op type in list, else returns nullptr.
-  const OpRegistrationData* LookUp(const string& op_type_name) const;
+  const OpRegistrationData* LookUp(const std::string& op_type_name) const;
 
  private:
   // Values are owned.
@@ -225,15 +226,15 @@ template <>
 class OpDefBuilderWrapper<true> {
  public:
   explicit OpDefBuilderWrapper(const char name[]) : builder_(name) {}
-  OpDefBuilderWrapper<true>& Attr(string spec) {
+  OpDefBuilderWrapper<true>& Attr(std::string spec) {
     builder_.Attr(std::move(spec));
     return *this;
   }
-  OpDefBuilderWrapper<true>& Input(string spec) {
+  OpDefBuilderWrapper<true>& Input(std::string spec) {
     builder_.Input(std::move(spec));
     return *this;
   }
-  OpDefBuilderWrapper<true>& Output(string spec) {
+  OpDefBuilderWrapper<true>& Output(std::string spec) {
     builder_.Output(std::move(spec));
     return *this;
   }
@@ -259,11 +260,11 @@ class OpDefBuilderWrapper<true> {
     builder_.SetAllowsUninitializedInput();
     return *this;
   }
-  OpDefBuilderWrapper<true>& Deprecated(int version, string explanation) {
+  OpDefBuilderWrapper<true>& Deprecated(int version, std::string explanation) {
     builder_.Deprecated(version, std::move(explanation));
     return *this;
   }
-  OpDefBuilderWrapper<true>& Doc(string text) {
+  OpDefBuilderWrapper<true>& Doc(std::string text) {
     builder_.Doc(std::move(text));
     return *this;
   }
diff --git a/tensorflow/core/framework/op_def_builder.h b/tensorflow/core/framework/op_def_builder.h
index aab0c63636e..b69ee46cd59 100644
--- a/tensorflow/core/framework/op_def_builder.h
+++ b/tensorflow/core/framework/op_def_builder.h
@@ -53,7 +53,7 @@ struct OpRegistrationData {
 class OpDefBuilder {
  public:
   // Constructs an OpDef with just the name field set.
-  explicit OpDefBuilder(string op_name);
+  explicit OpDefBuilder(std::string op_name);
 
   // Adds an attr to this OpDefBuilder (and returns *this). The spec has
   // format "<name>:<type>" or "<name>:<type>=<default>"
@@ -86,7 +86,7 @@ class OpDefBuilder {
   // * Ability to restrict the type of the tensor like the existing
   //   restrictions for type attrs.
   // Perhaps by linking the type of the tensor to a type attr?
-  OpDefBuilder& Attr(string spec);
+  OpDefBuilder& Attr(std::string spec);
 
   // Adds an input or output to this OpDefBuilder (and returns *this).
   // The spec has form "<name>:<type-expr>" or "<name>:Ref(<type-expr>)"
@@ -103,8 +103,8 @@ class OpDefBuilder {
   // in the spec?
   // TODO(josh11b): SparseInput() and SparseOutput() matching the Python
   // handling?
-  OpDefBuilder& Input(string spec);
-  OpDefBuilder& Output(string spec);
+  OpDefBuilder& Input(std::string spec);
+  OpDefBuilder& Output(std::string spec);
 
   // Turns on the indicated boolean flag in this OpDefBuilder (and
   // returns *this).
@@ -114,7 +114,7 @@ class OpDefBuilder {
   OpDefBuilder& SetAllowsUninitializedInput();
 
   // Deprecate the op at a certain GraphDef version.
-  OpDefBuilder& Deprecated(int version, string explanation);
+  OpDefBuilder& Deprecated(int version, std::string explanation);
 
   // Adds docs to this OpDefBuilder (and returns *this).
   // Docs have the format:
@@ -130,7 +130,7 @@ class OpDefBuilder {
   // to suppress the automatically-generated type documentation in
   // generated output.
 #ifndef TF_LEAN_BINARY
-  OpDefBuilder& Doc(string text);
+  OpDefBuilder& Doc(std::string text);
 #else
   OpDefBuilder& Doc(string text) { return *this; }
 #endif
@@ -157,7 +157,7 @@ class OpDefBuilder {
   // Adds control output to this OpDefBuilder (and returns *this).
   // The <name> must be a valid node name (matches regexp
   // [a-zA-Z][a-zA-Z0-9_]*). Named control output can only exist for functions.
-  OpDefBuilder& ControlOutput(string name);
+  OpDefBuilder& ControlOutput(std::string name);
 
   OpDef* op_def() { return &op_reg_data_.op_def; }
 
@@ -166,7 +166,7 @@ class OpDefBuilder {
   std::vector<string> inputs_;
   std::vector<string> outputs_;
   std::vector<string> control_outputs_;
-  string doc_;
+  std::string doc_;
   std::vector<string> errors_;
 };
 
diff --git a/tensorflow/core/framework/op_def_util.h b/tensorflow/core/framework/op_def_util.h
index 311e40afeea..4a4a2e8e897 100644
--- a/tensorflow/core/framework/op_def_util.h
+++ b/tensorflow/core/framework/op_def_util.h
@@ -54,7 +54,7 @@ const ApiDef::Arg* FindInputArg(StringPiece name, const ApiDef& api_def);
 
 // Produce a human-readable version of an op_def that is more concise
 // than a text-format proto.  Excludes descriptions.
-string SummarizeOpDef(const OpDef& op_def);
+std::string SummarizeOpDef(const OpDef& op_def);
 
 // Returns an error if new_op is not backwards-compatible with (more
 // accepting than) old_op.
diff --git a/tensorflow/core/framework/op_kernel.h b/tensorflow/core/framework/op_kernel.h
index b4302999511..3bfcedaee82 100644
--- a/tensorflow/core/framework/op_kernel.h
+++ b/tensorflow/core/framework/op_kernel.h
@@ -145,14 +145,16 @@ class OpKernel {
 
   // Accessors.
   const NodeDef& def() const { return props_->node_def; }
-  const string& name() const { return props_->node_def.name(); }
+  const std::string& name() const { return props_->node_def.name(); }
   absl::string_view name_view() const { return name_view_; }
-  const string& type_string() const { return props_->node_def.op(); }
+  const std::string& type_string() const { return props_->node_def.op(); }
   absl::string_view type_string_view() const { return type_string_view_; }
-  const string& requested_input(int i) const {
+  const std::string& requested_input(int i) const {
     return props_->node_def.input(i);
   }
-  const string& requested_device() const { return props_->node_def.device(); }
+  const std::string& requested_device() const {
+    return props_->node_def.device();
+  }
 
   int num_inputs() const { return props_->input_types.size(); }
   DataType input_type(int i) const { return props_->input_types[i]; }
@@ -177,10 +179,11 @@ class OpKernel {
   // Returns a trace string for current computation, op name/type and input
   // tensor shape/dtype are encoded for profiler cost analysis. Most OpKernel
   // should use the default implementation.
-  virtual string TraceString(const OpKernelContext& ctx, bool verbose) const;
+  virtual std::string TraceString(const OpKernelContext& ctx,
+                                  bool verbose) const;
 
  protected:
-  string ShapeTraceString(const OpKernelContext& ctx) const;
+  std::string ShapeTraceString(const OpKernelContext& ctx) const;
 
  private:
   const std::shared_ptr<const NodeProperties> props_;
@@ -652,7 +655,7 @@ class OpKernelContext {
     SessionState* session_state = nullptr;
 
     // Unique session identifier. Can be empty.
-    string session_handle;
+    std::string session_handle;
 
     // Metadata about the session. Can be nullptr.
     const SessionMetadata* session_metadata = nullptr;
@@ -684,7 +687,7 @@ class OpKernelContext {
     StepStatsCollectorInterface* stats_collector = nullptr;
     GraphCollector* graph_collector = nullptr;
     bool run_all_kernels_inline = false;
-    const string* executor_type = nullptr;
+    const std::string* executor_type = nullptr;
 
     // TensorSliceReaderCache support.
     checkpoint::TensorSliceReaderCacheWrapper* slice_reader_cache = nullptr;
@@ -826,7 +829,7 @@ class OpKernelContext {
 
   // Returns the registered name for the executor type that is executing the
   // current kernel. If empty, the default executor is used.
-  const string& executor_type() const;
+  const std::string& executor_type() const;
 
   // Input to output forwarding.
 
@@ -1100,7 +1103,7 @@ class OpKernelContext {
   SessionState* session_state() const { return params_->session_state; }
 
   // Unique identifier of the session it belongs to. Can be empty.
-  string session_handle() const { return params_->session_handle; }
+  std::string session_handle() const { return params_->session_handle; }
 
   // Metadata about the session. Can be nullptr.
   const SessionMetadata* session_metadata() const {
@@ -1405,7 +1408,7 @@ Status SupportedDeviceTypesForNode(
 
 // Returns a message with a description of the kernels registered for op
 // `op_name`.
-string KernelsRegisteredForOp(StringPiece op_name);
+std::string KernelsRegisteredForOp(StringPiece op_name);
 
 // Call once after Op registration has completed.
 Status ValidateKernelRegistrations(const OpRegistryInterface& op_registry);
@@ -1497,13 +1500,13 @@ Status FindKernelDef(
     bool has_experimental_debug_info,
     const NodeDef_ExperimentalDebugInfo& experimental_debug_info,
     StringPiece node_op, StringPiece node_device, AttrSlice node_attrs,
-    const KernelDef** def, string* kernel_class_name);
+    const KernelDef** def, std::string* kernel_class_name);
 
 // If node_def has a corresponding kernel registered on device_type,
 // returns OK and fill in the kernel def and kernel_class_name. <def> and
 // <kernel_class_name> may be null.
 Status FindKernelDef(const DeviceType& device_type, const NodeDef& node_def,
-                     const KernelDef** def, string* kernel_class_name);
+                     const KernelDef** def, std::string* kernel_class_name);
 
 // Writes a list of all registered kernels to LOG(INFO), to help users debug
 // missing kernel errors.
diff --git a/tensorflow/core/framework/op_segment.h b/tensorflow/core/framework/op_segment.h
index ab3ef6009b3..9a6f6e9664b 100644
--- a/tensorflow/core/framework/op_segment.h
+++ b/tensorflow/core/framework/op_segment.h
@@ -46,8 +46,8 @@ class OpSegment {
 
   // A hold can be placed on a session, preventing all its kernels
   // from being deleted.
-  void AddHold(const string& session_handle);
-  void RemoveHold(const string& session_handle);
+  void AddHold(const std::string& session_handle);
+  void RemoveHold(const std::string& session_handle);
 
   // If the kernel for "node_name" has been created in the
   // "session_handle", returns the existing op kernel in "*kernel".
@@ -57,12 +57,13 @@ class OpSegment {
   //
   // OpSegment keeps the ownership of the returned "*kernel".
   typedef std::function<Status(OpKernel**)> CreateKernelFn;
-  Status FindOrCreate(const string& session_handle, const string& node_name,
-                      OpKernel** kernel, CreateKernelFn create_fn);
+  Status FindOrCreate(const std::string& session_handle,
+                      const std::string& node_name, OpKernel** kernel,
+                      CreateKernelFn create_fn);
 
   // Returns true if OpSegment should own the kernel.
   static bool ShouldOwnKernel(FunctionLibraryRuntime* lib,
-                              const string& node_op);
+                              const std::string& node_op);
 
  private:
   // op name -> OpKernel
diff --git a/tensorflow/core/framework/ops_util.h b/tensorflow/core/framework/ops_util.h
index b323109abfc..aaf2361cc9d 100644
--- a/tensorflow/core/framework/ops_util.h
+++ b/tensorflow/core/framework/ops_util.h
@@ -81,7 +81,7 @@ bool IsDim0SliceAligned(const TensorShape& s, int64 start, int64 end_or_size) {
 }
 
 // Returns <suffix> sanitized to have only [a-zA-Z0-9-_].
-string SanitizeThreadSuffix(string suffix);
+std::string SanitizeThreadSuffix(std::string suffix);
 
 // Helper to compute 'strides' given a tensor 'shape'. I.e.,
 // strides[i] = prod(shape.dim_size[(i+1):])
diff --git a/tensorflow/core/framework/rendezvous.h b/tensorflow/core/framework/rendezvous.h
index ccd6d102b5e..d59bbb2809e 100644
--- a/tensorflow/core/framework/rendezvous.h
+++ b/tensorflow/core/framework/rendezvous.h
@@ -74,7 +74,7 @@ class RendezvousInterface {
     friend class Rendezvous;
     friend class SendOp;
     friend class RecvOp;
-    string buf_;
+    std::string buf_;
   };
 
   // The caller is a tensor producer and it sends a message (a tensor
@@ -169,9 +169,11 @@ class Rendezvous : public RendezvousInterface, public core::RefCounted {
   // Constructs a rendezvous key for the tensor of "name" sent from
   // "src_device" to "dst_device". The tensor is generated in the frame
   // and iteration specified by "frame_iter".
-  static string CreateKey(const string& src_device, uint64 src_incarnation,
-                          const string& dst_device, const string& name,
-                          const FrameAndIter& frame_iter);
+  static std::string CreateKey(const std::string& src_device,
+                               uint64 src_incarnation,
+                               const std::string& dst_device,
+                               const std::string& name,
+                               const FrameAndIter& frame_iter);
 
   static Status ParseKey(StringPiece key, ParsedKey* out);
 };
diff --git a/tensorflow/core/framework/resource_mgr.h b/tensorflow/core/framework/resource_mgr.h
index 3af8d81b0dc..758837e017a 100644
--- a/tensorflow/core/framework/resource_mgr.h
+++ b/tensorflow/core/framework/resource_mgr.h
@@ -79,7 +79,7 @@ namespace tensorflow {
 class ResourceBase : public core::RefCounted {
  public:
   // Returns a debug string for *this.
-  virtual string DebugString() const = 0;
+  virtual std::string DebugString() const = 0;
 
   // Returns memory used by this resource.
   virtual int64 MemoryUsed() const { return 0; }
@@ -100,7 +100,7 @@ class ScopedStepContainer {
 
   ScopedStepContainer(const int64 step_id,
                       std::function<void(const string&)> cleanup,
-                      const string& prefix)
+                      const std::string& prefix)
       : container_(strings::StrCat("__", prefix, "_per_step_", step_id)),
         cleanup_(cleanup),
         dirty_(false) {}
@@ -125,25 +125,25 @@ class ScopedStepContainer {
   // Pass through to MakeResourceHandle with the container name
   template <typename T>
   ResourceHandle MakeResourceHandle(
-      const string& name, const DeviceBase& device) TF_MUST_USE_RESULT;
+      const std::string& name, const DeviceBase& device) TF_MUST_USE_RESULT;
   // Pass through to ResourceMgr::Create with the container name
   template <typename T>
-  Status Create(ResourceMgr* rm, const string& name,
+  Status Create(ResourceMgr* rm, const std::string& name,
                 T* resource) TF_MUST_USE_RESULT;
   // Pass through to ResourceMgr::Delete with the container name
   template <typename T>
-  Status Delete(ResourceMgr* rm, const string& name) TF_MUST_USE_RESULT;
+  Status Delete(ResourceMgr* rm, const std::string& name) TF_MUST_USE_RESULT;
   // Pass through to ResourceMgr::Lookup with the container name
   template <typename T>
-  Status Lookup(ResourceMgr* rm, const string& name,
+  Status Lookup(ResourceMgr* rm, const std::string& name,
                 T** resource) const TF_MUST_USE_RESULT;
   // Pass through to ResourceMgr::LookupOrCreate with the container name
   template <typename T>
-  Status LookupOrCreate(ResourceMgr* rm, const string& name, T** resource,
+  Status LookupOrCreate(ResourceMgr* rm, const std::string& name, T** resource,
                         std::function<Status(T**)> creator) TF_MUST_USE_RESULT;
 
  private:
-  const string container_;
+  const std::string container_;
   const std::function<void(const string&)> cleanup_;
   mutex mu_;
   mutable std::atomic<bool> dirty_ TF_GUARDED_BY(mu_);
@@ -152,11 +152,11 @@ class ScopedStepContainer {
 class ResourceMgr {
  public:
   ResourceMgr();
-  explicit ResourceMgr(const string& default_container);
+  explicit ResourceMgr(const std::string& default_container);
   ~ResourceMgr();
 
   // Returns the default container name for *this.
-  const string& default_container() const { return default_container_; }
+  const std::string& default_container() const { return default_container_; }
 
   // Creates a resource "name" in the "container".  The caller transfers
   // the ownership of one ref on "resource" to *this, regardless of whether this
@@ -165,7 +165,7 @@ class ResourceMgr {
   // REQUIRES: std::is_base_of<ResourceBase, T>
   // REQUIRES: resource != nullptr.
   template <typename T>
-  Status Create(const string& container, const string& name,
+  Status Create(const std::string& container, const std::string& name,
                 T* resource) TF_MUST_USE_RESULT;
 
   // If "container" has a resource "name", returns it in "*resource" and
@@ -174,7 +174,7 @@ class ResourceMgr {
   // REQUIRES: std::is_base_of<ResourceBase, T>
   // REQUIRES: resource != nullptr
   template <typename T, bool use_dynamic_cast = false>
-  Status Lookup(const string& container, const string& name,
+  Status Lookup(const std::string& container, const std::string& name,
                 T** resource) const TF_MUST_USE_RESULT;
 
   // Similar to Lookup, but looks up multiple resources at once, with only a
@@ -197,7 +197,7 @@ class ResourceMgr {
   // REQUIRES: std::is_base_of<ResourceBase, T>
   // REQUIRES: resource != nullptr
   template <typename T, bool use_dynamic_cast = false>
-  Status LookupOrCreate(const string& container, const string& name,
+  Status LookupOrCreate(const std::string& container, const std::string& name,
                         T** resource,
                         std::function<Status(T**)> creator) TF_MUST_USE_RESULT;
 
@@ -205,19 +205,20 @@ class ResourceMgr {
   //
   // REQUIRES: std::is_base_of<ResourceBase, T>
   template <typename T>
-  Status Delete(const string& container, const string& name) TF_MUST_USE_RESULT;
+  Status Delete(const std::string& container,
+                const std::string& name) TF_MUST_USE_RESULT;
 
   // Deletes the resource pointed by "handle".
   Status Delete(const ResourceHandle& handle) TF_MUST_USE_RESULT;
 
   // Deletes all resources from the "container" and removes the container.
-  Status Cleanup(const string& container) TF_MUST_USE_RESULT;
+  Status Cleanup(const std::string& container) TF_MUST_USE_RESULT;
 
   // Deletes all resources in all containers.
   void Clear();
 
   // Returns a text description for all resources.
-  string DebugString() const;
+  std::string DebugString() const;
 
  private:
   typedef std::pair<uint64, StringPiece> Key;
@@ -236,7 +237,7 @@ class ResourceMgr {
     std::unique_ptr<string> name;
 
     ResourceAndName();
-    ResourceAndName(ResourceBase* resource, string name);
+    ResourceAndName(ResourceBase* resource, std::string name);
     ResourceAndName(ResourceAndName&& other) noexcept;
     ~ResourceAndName();
 
@@ -247,31 +248,31 @@ class ResourceMgr {
   };
   typedef std::unordered_map<Key, ResourceAndName, KeyHash, KeyEqual> Container;
 
-  const string default_container_;
+  const std::string default_container_;
   mutable mutex mu_;
   std::unordered_map<string, Container*> containers_ TF_GUARDED_BY(mu_);
 
   template <typename T, bool use_dynamic_cast = false>
-  Status LookupInternal(const string& container, const string& name,
+  Status LookupInternal(const std::string& container, const std::string& name,
                         T** resource) const
       TF_SHARED_LOCKS_REQUIRED(mu_) TF_MUST_USE_RESULT;
 
-  Status DoCreate(const string& container, TypeIndex type, const string& name,
-                  ResourceBase* resource)
+  Status DoCreate(const std::string& container, TypeIndex type,
+                  const std::string& name, ResourceBase* resource)
       TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) TF_MUST_USE_RESULT;
 
-  Status DoLookup(const string& container, TypeIndex type, const string& name,
-                  ResourceBase** resource) const
+  Status DoLookup(const std::string& container, TypeIndex type,
+                  const std::string& name, ResourceBase** resource) const
       TF_SHARED_LOCKS_REQUIRED(mu_) TF_MUST_USE_RESULT;
 
-  Status DoDelete(const string& container, uint64 type_hash_code,
-                  const string& resource_name,
-                  const string& type_name) TF_MUST_USE_RESULT;
-  Status DoDelete(const string& container, TypeIndex type,
-                  const string& resource_name) TF_MUST_USE_RESULT;
+  Status DoDelete(const std::string& container, uint64 type_hash_code,
+                  const std::string& resource_name,
+                  const std::string& type_name) TF_MUST_USE_RESULT;
+  Status DoDelete(const std::string& container, TypeIndex type,
+                  const std::string& resource_name) TF_MUST_USE_RESULT;
 
   // Inserts the type name for 'hash_code' into the hash_code to type name map.
-  Status InsertDebugTypeName(uint64 hash_code, const string& type_name)
+  Status InsertDebugTypeName(uint64 hash_code, const std::string& type_name)
       TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) TF_MUST_USE_RESULT;
 
   // Returns the type name for the 'hash_code'.
@@ -289,14 +290,14 @@ class ResourceMgr {
 // Makes a resource handle with the specified type for a given container /
 // name.
 ResourceHandle MakeResourceHandle(
-    const string& container, const string& name, const DeviceBase& device,
-    const TypeIndex& type_index,
+    const std::string& container, const std::string& name,
+    const DeviceBase& device, const TypeIndex& type_index,
     const std::vector<DtypeAndPartialTensorShape>& dtypes_and_shapes = {})
     TF_MUST_USE_RESULT;
 
 template <typename T>
 ResourceHandle MakeResourceHandle(
-    OpKernelContext* ctx, const string& container, const string& name,
+    OpKernelContext* ctx, const std::string& container, const std::string& name,
     const std::vector<DtypeAndPartialTensorShape>& dtypes_and_shapes = {}) {
   return MakeResourceHandle(
       container.empty() ? ctx->resource_manager()->default_container()
@@ -306,7 +307,8 @@ ResourceHandle MakeResourceHandle(
 
 template <typename T>
 ResourceHandle MakeResourceHandle(
-    OpKernelConstruction* ctx, const string& container, const string& name,
+    OpKernelConstruction* ctx, const std::string& container,
+    const std::string& name,
     const std::vector<DtypeAndPartialTensorShape>& dtypes_and_shapes = {}) {
   return MakeResourceHandle(
       container.empty() ? ctx->resource_manager()->default_container()
@@ -315,7 +317,8 @@ ResourceHandle MakeResourceHandle(
 }
 
 Status MakeResourceHandleToOutput(OpKernelContext* context, int output_index,
-                                  const string& container, const string& name,
+                                  const std::string& container,
+                                  const std::string& name,
                                   const TypeIndex& type_index);
 
 // Returns a resource handle from a numbered op input.
@@ -409,19 +412,19 @@ class ContainerInfo {
   // name is name().  If resource_is_private_to_kernel() is true, the
   // kernel should delete the resource when the kernel is deleted.
   ResourceMgr* resource_manager() const { return rmgr_; }
-  const string& container() const { return container_; }
-  const string& name() const { return name_; }
+  const std::string& container() const { return container_; }
+  const std::string& name() const { return name_; }
   bool resource_is_private_to_kernel() const {
     return resource_is_private_to_kernel_;
   }
 
   // Returns a readable string for *this.
-  string DebugString() const;
+  std::string DebugString() const;
 
  private:
   ResourceMgr* rmgr_ = nullptr;
-  string container_;
-  string name_;
+  std::string container_;
+  std::string name_;
   bool resource_is_private_to_kernel_ = false;
 };
 
@@ -435,8 +438,8 @@ class ContainerInfo {
 // Returns OK if the resource is found and transfers one ref of
 // *resource to the caller. Otherwise, returns an error.
 template <typename T>
-Status GetResourceFromContext(OpKernelContext* ctx, const string& input_name,
-                              T** resource);
+Status GetResourceFromContext(OpKernelContext* ctx,
+                              const std::string& input_name, T** resource);
 
 // Utility op kernel to check if a handle to resource type T is initialized.
 template <typename T>
@@ -470,8 +473,8 @@ class ResourceHandleOp : public OpKernel {
   bool IsExpensive() override { return false; }
 
  private:
-  string container_;
-  string name_;
+  std::string container_;
+  std::string name_;
   mutex mutex_;
   Tensor resource_;
   std::atomic<bool> initialized_{false};
@@ -584,8 +587,8 @@ void CheckDeriveFromResourceBase() {
 }
 
 template <typename T>
-Status ResourceMgr::Create(const string& container, const string& name,
-                           T* resource) {
+Status ResourceMgr::Create(const std::string& container,
+                           const std::string& name, T* resource) {
   CheckDeriveFromResourceBase<T>();
   CHECK(resource != nullptr);
   mutex_lock l(mu_);
@@ -593,8 +596,8 @@ Status ResourceMgr::Create(const string& container, const string& name,
 }
 
 template <typename T, bool use_dynamic_cast>
-Status ResourceMgr::Lookup(const string& container, const string& name,
-                           T** resource) const {
+Status ResourceMgr::Lookup(const std::string& container,
+                           const std::string& name, T** resource) const {
   CheckDeriveFromResourceBase<T>();
   tf_shared_lock l(mu_);
   return LookupInternal<T, use_dynamic_cast>(container, name, resource);
@@ -632,7 +635,8 @@ struct TypeCastFunctor<T, true> {
 };
 
 template <typename T, bool use_dynamic_cast>
-Status ResourceMgr::LookupInternal(const string& container, const string& name,
+Status ResourceMgr::LookupInternal(const std::string& container,
+                                   const std::string& name,
                                    T** resource) const {
   ResourceBase* found = nullptr;
   Status s = DoLookup(container, TypeIndex::Make<T>(), name, &found);
@@ -645,8 +649,8 @@ Status ResourceMgr::LookupInternal(const string& container, const string& name,
 }
 
 template <typename T, bool use_dynamic_cast>
-Status ResourceMgr::LookupOrCreate(const string& container, const string& name,
-                                   T** resource,
+Status ResourceMgr::LookupOrCreate(const std::string& container,
+                                   const std::string& name, T** resource,
                                    std::function<Status(T**)> creator) {
   CheckDeriveFromResourceBase<T>();
   *resource = nullptr;
@@ -669,14 +673,15 @@ Status ResourceMgr::LookupOrCreate(const string& container, const string& name,
 }
 
 template <typename T>
-Status ResourceMgr::Delete(const string& container, const string& name) {
+Status ResourceMgr::Delete(const std::string& container,
+                           const std::string& name) {
   CheckDeriveFromResourceBase<T>();
   return DoDelete(container, TypeIndex::Make<T>(), name);
 }
 
 template <typename T>
-Status GetResourceFromContext(OpKernelContext* ctx, const string& input_name,
-                              T** resource) {
+Status GetResourceFromContext(OpKernelContext* ctx,
+                              const std::string& input_name, T** resource) {
   DataType dtype;
   TF_RETURN_IF_ERROR(ctx->input_dtype(input_name, &dtype));
   if (dtype == DT_RESOURCE) {
@@ -684,8 +689,8 @@ Status GetResourceFromContext(OpKernelContext* ctx, const string& input_name,
     TF_RETURN_IF_ERROR(ctx->input(input_name, &handle));
     return LookupResource(ctx, handle->scalar<ResourceHandle>()(), resource);
   }
-  string container;
-  string shared_name;
+  std::string container;
+  std::string shared_name;
   {
     mutex* mu;
     TF_RETURN_IF_ERROR(ctx->input_ref_mutex(input_name, &mu));
@@ -879,7 +884,7 @@ void ResourceHandlesOp<T>::Compute(OpKernelContext* ctx) {
 
 template <typename T>
 ResourceHandle ScopedStepContainer::MakeResourceHandle(
-    const string& name, const DeviceBase& device) {
+    const std::string& name, const DeviceBase& device) {
   mutex_lock ml(mu_);
   dirty_ = true;
   return tensorflow::MakeResourceHandle(container_, name, device,
@@ -887,13 +892,14 @@ ResourceHandle ScopedStepContainer::MakeResourceHandle(
 }
 
 template <typename T>
-Status ScopedStepContainer::Lookup(ResourceMgr* rm, const string& name,
+Status ScopedStepContainer::Lookup(ResourceMgr* rm, const std::string& name,
                                    T** resource) const {
   return rm->Lookup<T>(container_, name, resource);
 }
 
 template <typename T>
-Status ScopedStepContainer::LookupOrCreate(ResourceMgr* rm, const string& name,
+Status ScopedStepContainer::LookupOrCreate(ResourceMgr* rm,
+                                           const std::string& name,
                                            T** resource,
                                            std::function<Status(T**)> creator) {
   mutex_lock ml(mu_);
@@ -902,7 +908,7 @@ Status ScopedStepContainer::LookupOrCreate(ResourceMgr* rm, const string& name,
 }
 
 template <typename T>
-Status ScopedStepContainer::Create(ResourceMgr* rm, const string& name,
+Status ScopedStepContainer::Create(ResourceMgr* rm, const std::string& name,
                                    T* resource) {
   mutex_lock ml(mu_);
   dirty_ = true;
@@ -910,7 +916,7 @@ Status ScopedStepContainer::Create(ResourceMgr* rm, const string& name,
 }
 
 template <typename T>
-Status ScopedStepContainer::Delete(ResourceMgr* rm, const string& name) {
+Status ScopedStepContainer::Delete(ResourceMgr* rm, const std::string& name) {
   return rm->Delete<T>(container_, name);
 }
 
diff --git a/tensorflow/core/framework/resource_var.h b/tensorflow/core/framework/resource_var.h
index 39fe5bbff91..f4ae7d5de61 100644
--- a/tensorflow/core/framework/resource_var.h
+++ b/tensorflow/core/framework/resource_var.h
@@ -67,7 +67,7 @@ class Var : public ResourceBase {
   mutex* mu() { return &mu_; }
   Tensor* tensor() { return &tensor_; }
 
-  string DebugString() const override {
+  std::string DebugString() const override {
     return strings::StrCat(DataTypeString(tensor_.dtype()), "/",
                            tensor_.shape().DebugString());
   }
diff --git a/tensorflow/core/framework/session_state.h b/tensorflow/core/framework/session_state.h
index 877c9970de4..ca0abd5b9d2 100644
--- a/tensorflow/core/framework/session_state.h
+++ b/tensorflow/core/framework/session_state.h
@@ -31,13 +31,13 @@ namespace tensorflow {
 class SessionState {
  public:
   // Get a tensor from the session state.
-  Status GetTensor(const string& handle, Tensor* tensor);
+  Status GetTensor(const std::string& handle, Tensor* tensor);
 
   // Store a tensor in the session state.
-  Status AddTensor(const string& handle, const Tensor& tensor);
+  Status AddTensor(const std::string& handle, const Tensor& tensor);
 
   // Delete a tensor from the session state.
-  Status DeleteTensor(const string& handle);
+  Status DeleteTensor(const std::string& handle);
 
   int64 GetNewId();
 
@@ -60,15 +60,15 @@ class TensorStore {
   struct TensorAndKey {
     Tensor tensor;
     int64 id;
-    string device_name;
+    std::string device_name;
 
-    string GetHandle(const string& tensor_name) {
+    std::string GetHandle(const std::string& tensor_name) {
       return strings::StrCat(tensor_name, ";", id, ";", device_name);
     }
   };
 
   // Add the named tensor to the tensor store for this run.
-  Status AddTensor(const string& name, const TensorAndKey& tk);
+  Status AddTensor(const std::string& name, const TensorAndKey& tk);
 
   // Save the tensors in the tensor store of this run to the session.
   Status SaveTensors(const std::vector<string>& output_names,
diff --git a/tensorflow/core/framework/shape_inference.h b/tensorflow/core/framework/shape_inference.h
index 1ccaa8216ec..bb79b278cb1 100644
--- a/tensorflow/core/framework/shape_inference.h
+++ b/tensorflow/core/framework/shape_inference.h
@@ -344,13 +344,13 @@ class InferenceContext {
   // incomplete shape.
   DimensionHandle NumElements(ShapeHandle s);
 
-  string DebugString(ShapeHandle s);
-  string DebugString(DimensionHandle d);
-  string DebugString(const ShapeAndType& shape_and_type);
-  string DebugString(gtl::ArraySlice<ShapeAndType> shape_and_types);
+  std::string DebugString(ShapeHandle s);
+  std::string DebugString(DimensionHandle d);
+  std::string DebugString(const ShapeAndType& shape_and_type);
+  std::string DebugString(gtl::ArraySlice<ShapeAndType> shape_and_types);
 
   // Describes the whole context, for debugging purposes.
-  string DebugString() const;
+  std::string DebugString() const;
 
   // If <shape> has rank <rank>, or its rank is unknown, return OK and return
   // the shape with asserted rank in <*out>. Otherwise return an error.
diff --git a/tensorflow/core/framework/tracking_allocator.h b/tensorflow/core/framework/tracking_allocator.h
index ca18dc9a050..7b5b3914917 100644
--- a/tensorflow/core/framework/tracking_allocator.h
+++ b/tensorflow/core/framework/tracking_allocator.h
@@ -54,7 +54,7 @@ struct AllocRecord {
 class TrackingAllocator : public Allocator {
  public:
   explicit TrackingAllocator(Allocator* allocator, bool track_ids);
-  string Name() override { return allocator_->Name(); }
+  std::string Name() override { return allocator_->Name(); }
   void* AllocateRaw(size_t alignment, size_t num_bytes) override {
     return AllocateRaw(alignment, num_bytes, AllocationAttributes());
   }
diff --git a/tensorflow/core/framework/variant.h b/tensorflow/core/framework/variant.h
index e8a0c332968..f67d94b48e2 100644
--- a/tensorflow/core/framework/variant.h
+++ b/tensorflow/core/framework/variant.h
@@ -32,10 +32,10 @@ limitations under the License.
 namespace tensorflow {
 
 template <typename T>
-string TypeNameVariant(const T& value);
+std::string TypeNameVariant(const T& value);
 
 template <typename T>
-string DebugStringVariant(const T& value);
+std::string DebugStringVariant(const T& value);
 
 // Allows for specializations of Variant Decoding.  `data` may be modified in
 // the process of decoding to `value`.
@@ -43,13 +43,13 @@ template <typename T>
 bool DecodeVariant(VariantTensorData* data, T* value);
 
 template <typename T>
-bool DecodeVariant(string* buf, T* value);
+bool DecodeVariant(std::string* buf, T* value);
 
 template <typename T>
 void EncodeVariant(const T& value, VariantTensorData* data);
 
 template <typename T>
-void EncodeVariant(const T& value, string* buf);
+void EncodeVariant(const T& value, std::string* buf);
 
 // This is an implementation of a type-erased container that can store an
 // object of any type. The implementation is very similar to std::any, but has
@@ -234,7 +234,7 @@ class Variant {
     return GetValue()->TypeId();
   }
 
-  string DebugString() const {
+  std::string DebugString() const {
     return strings::StrCat(
         "Variant<type: ", TypeName(),
         " value: ", is_empty() ? "[empty]" : GetValue()->DebugString(), ">");
@@ -264,7 +264,7 @@ class Variant {
   // In the special case that a serialized Variant is stored (value
   // is a VariantTensorDataProto), returns value.TypeName(), the
   // TypeName field stored in the VariantTensorDataProto buffer.
-  string TypeName() const {
+  std::string TypeName() const {
     if (is_empty()) {
       return "";
     }
@@ -282,12 +282,12 @@ class Variant {
   bool Decode(VariantTensorData data);
 
   // Helper methods to directly serialize/deserialize from strings.
-  void Encode(string* buf) const {
+  void Encode(std::string* buf) const {
     if (!is_empty()) {
       GetValue()->Encode(buf);
     }
   }
-  bool Decode(string buf) {
+  bool Decode(std::string buf) {
     if (!is_empty()) {
       return GetValue()->Decode(std::move(buf));
     }
@@ -313,12 +313,12 @@ class Variant {
     virtual void CloneInto(ValueInterface* memory) const = 0;
     virtual void MoveAssign(ValueInterface* memory) = 0;
     virtual void MoveInto(ValueInterface* memory) = 0;
-    virtual string TypeName() const = 0;
-    virtual string DebugString() const = 0;
+    virtual std::string TypeName() const = 0;
+    virtual std::string DebugString() const = 0;
     virtual void Encode(VariantTensorData* data) const = 0;
     virtual bool Decode(VariantTensorData data) = 0;
-    virtual void Encode(string* buf) const = 0;
-    virtual bool Decode(string data) = 0;
+    virtual void Encode(std::string* buf) const = 0;
+    virtual bool Decode(std::string data) = 0;
   };
 
   template <typename T>
@@ -359,9 +359,9 @@ class Variant {
       new (memory) Value(InPlace(), std::move(value));
     }
 
-    string TypeName() const final { return TypeNameVariant(value); }
+    std::string TypeName() const final { return TypeNameVariant(value); }
 
-    string DebugString() const final { return DebugStringVariant(value); }
+    std::string DebugString() const final { return DebugStringVariant(value); }
 
     void Encode(VariantTensorData* data) const final {
       EncodeVariant(value, data);
@@ -371,9 +371,9 @@ class Variant {
       return DecodeVariant(&data, &value);
     }
 
-    void Encode(string* buf) const final { EncodeVariant(value, buf); }
+    void Encode(std::string* buf) const final { EncodeVariant(value, buf); }
 
-    bool Decode(string buf) final { return DecodeVariant(&buf, &value); }
+    bool Decode(std::string buf) final { return DecodeVariant(&buf, &value); }
 
     T value;
   };
diff --git a/tensorflow/core/framework/variant_encode_decode.h b/tensorflow/core/framework/variant_encode_decode.h
index 502bbd57422..340d01d5f5d 100644
--- a/tensorflow/core/framework/variant_encode_decode.h
+++ b/tensorflow/core/framework/variant_encode_decode.h
@@ -105,7 +105,7 @@ bool DecodeVariantImpl(VariantTensorData data,
                        TypeResolver<T, false /* is_pod */, false /* Tensor */,
                                     true /* protobuf */>,
                        T* value) {
-  string metadata;
+  std::string metadata;
   data.get_metadata(&metadata);
   return value->ParseFromString(std::move(metadata));
 }
@@ -136,27 +136,27 @@ template <typename T, bool = has_type_name<typename std::decay<T>::type>::value,
 struct TypeNameResolver {};
 
 template <typename T>
-string TypeNameVariantImpl(const T& value,
-                           TypeNameResolver<T, true /* has_type_name */>) {
+std::string TypeNameVariantImpl(const T& value,
+                                TypeNameResolver<T, true /* has_type_name */>) {
   return value.TypeName();
 }
 
 template <typename T>
-string TypeNameVariantImpl(
+std::string TypeNameVariantImpl(
     const T& value,
     TypeNameResolver<T, false /* has_type_name */, true /* Tensor */>) {
   return "tensorflow::Tensor";
 }
 
 template <typename T>
-string TypeNameVariantImpl(
+std::string TypeNameVariantImpl(
     const T& value, TypeNameResolver<T, false /* has_type_name */,
                                      false /* Tensor */, true /* protobuf */>) {
   return value.GetTypeName();
 }
 
 template <typename T>
-string TypeNameVariantImpl(
+std::string TypeNameVariantImpl(
     const T& value,
     TypeNameResolver<T, false /* has_type_name */, false /* Tensor */,
                      false /* protobuf */>) {
@@ -164,7 +164,7 @@ string TypeNameVariantImpl(
 }
 
 template <typename T>
-string TypeNameVariant(const T& value) {
+std::string TypeNameVariant(const T& value) {
   return TypeNameVariantImpl(value, TypeNameResolver<T>());
 }
 
@@ -194,27 +194,27 @@ struct DebugStringResolver {};
 // TODO(ebrevdo): Expand DebugStringResolver to return TypeString if
 // there is no StrCat<T>() constructor.
 template <typename T>
-string DebugStringVariantImpl(
+std::string DebugStringVariantImpl(
     const T& value, DebugStringResolver<T, true /* has_debug_string */>) {
   return value.DebugString();
 }
 
 template <typename T>
-string DebugStringVariantImpl(
+std::string DebugStringVariantImpl(
     const T& value, DebugStringResolver<T, false /* has_debug_string */,
                                         true /* can_strcat */>) {
   return strings::StrCat(value);
 }
 
 template <typename T>
-string DebugStringVariantImpl(
+std::string DebugStringVariantImpl(
     const T& value, DebugStringResolver<T, false /* has_debug_string */,
                                         false /* can_strcat */>) {
   return "?";
 }
 
 template <typename T>
-string DebugStringVariant(const T& value) {
+std::string DebugStringVariant(const T& value) {
   return DebugStringVariantImpl(value, DebugStringResolver<T>());
 }
 
@@ -230,7 +230,7 @@ bool DecodeVariant(VariantTensorData* data, T* value) {
 }
 
 template <typename T>
-void EncodeVariant(const T& value, string* buf) {
+void EncodeVariant(const T& value, std::string* buf) {
   VariantTensorData data;
   EncodeVariantImpl(value, TypeResolver<T>(), &data);
   data.set_type_name(TypeNameVariant(value));
@@ -239,7 +239,7 @@ void EncodeVariant(const T& value, string* buf) {
 }
 
 template <typename T>
-bool DecodeVariant(string* buf, T* value) {
+bool DecodeVariant(std::string* buf, T* value) {
   VariantTensorData data;
   if (!data.ParseFromString(*buf)) return false;
   if (!DecodeVariantImpl(std::move(data), TypeResolver<T>(), value)) {
@@ -250,7 +250,7 @@ bool DecodeVariant(string* buf, T* value) {
 
 // Specializations for VariantTensorDataProto
 template <>
-string TypeNameVariant(const VariantTensorDataProto& value);
+std::string TypeNameVariant(const VariantTensorDataProto& value);
 
 template <>
 void EncodeVariant(const VariantTensorDataProto& value,
@@ -260,10 +260,10 @@ template <>
 bool DecodeVariant(VariantTensorData* data, VariantTensorDataProto* value);
 
 template <>
-void EncodeVariant(const VariantTensorDataProto& value, string* buf);
+void EncodeVariant(const VariantTensorDataProto& value, std::string* buf);
 
 template <>
-bool DecodeVariant(string* buf, VariantTensorDataProto* value);
+bool DecodeVariant(std::string* buf, VariantTensorDataProto* value);
 
 // Encodes an array of Variant objects in to the given StringListEncoder.
 // `variant_array` is assumed to point to an array of `n` Variant objects.
diff --git a/tensorflow/core/framework/variant_op_registry.h b/tensorflow/core/framework/variant_op_registry.h
index 5879597e5eb..edfb9c544c0 100644
--- a/tensorflow/core/framework/variant_op_registry.h
+++ b/tensorflow/core/framework/variant_op_registry.h
@@ -93,7 +93,7 @@ class UnaryVariantOpRegistry {
       AsyncVariantDeviceCopyFn;
 
   // Add a decode function to the registry.
-  void RegisterDecodeFn(const string& type_name,
+  void RegisterDecodeFn(const std::string& type_name,
                         const VariantDecodeFn& decode_fn);
 
   // Returns nullptr if no decode function was found for the given TypeName.
@@ -124,7 +124,7 @@ class UnaryVariantOpRegistry {
   }
 
   // Add a unary op function to the registry.
-  void RegisterUnaryOpFn(VariantUnaryOp op, const string& device,
+  void RegisterUnaryOpFn(VariantUnaryOp op, const std::string& device,
                          const TypeIndex& type_index,
                          const VariantUnaryOpFn& unary_op_fn) {
     VariantUnaryOpFn* existing = GetUnaryOpFn(op, device, type_index);
@@ -146,7 +146,7 @@ class UnaryVariantOpRegistry {
   }
 
   // Add a binary op function to the registry.
-  void RegisterBinaryOpFn(VariantBinaryOp op, const string& device,
+  void RegisterBinaryOpFn(VariantBinaryOp op, const std::string& device,
                           const TypeIndex& type_index,
                           const VariantBinaryOpFn& add_fn) {
     VariantBinaryOpFn* existing = GetBinaryOpFn(op, device, type_index);
@@ -252,7 +252,7 @@ class UnaryVariantOpRegistry {
   // Find or insert a string into a persistent string storage
   // container; return the StringPiece pointing to the permanent string
   // location.
-  static StringPiece GetPersistentStringPiece(const string& str) {
+  static StringPiece GetPersistentStringPiece(const std::string& str) {
     const auto string_storage = PersistentStringStorage();
     auto found = string_storage->find(str);
     if (found == string_storage->end()) {
@@ -307,7 +307,7 @@ Status VariantDeviceCopy(
 template <typename Device>
 Status UnaryOpVariant(OpKernelContext* ctx, VariantUnaryOp op, const Variant& v,
                       Variant* v_out) {
-  const string& device = DeviceName<Device>::value;
+  const std::string& device = DeviceName<Device>::value;
   UnaryVariantOpRegistry::VariantUnaryOpFn* unary_op_fn =
       UnaryVariantOpRegistry::Global()->GetUnaryOpFn(op, device, v.TypeId());
   if (unary_op_fn == nullptr) {
@@ -336,7 +336,7 @@ Status BinaryOpVariants(OpKernelContext* ctx, VariantBinaryOp op,
         "type ids.  Type names: '",
         a.TypeName(), "' vs. '", b.TypeName(), "'");
   }
-  const string& device = DeviceName<Device>::value;
+  const std::string& device = DeviceName<Device>::value;
   UnaryVariantOpRegistry::VariantBinaryOpFn* binary_op_fn =
       UnaryVariantOpRegistry::Global()->GetBinaryOpFn(op, device, a.TypeId());
   if (binary_op_fn == nullptr) {
@@ -354,7 +354,7 @@ namespace variant_op_registry_fn_registration {
 template <typename T>
 class UnaryVariantDecodeRegistration {
  public:
-  UnaryVariantDecodeRegistration(const string& type_name) {
+  UnaryVariantDecodeRegistration(const std::string& type_name) {
     // The Variant is passed by pointer because it should be
     // mutable: get below may Decode the variant, which
     // is a self-mutating behavior.  The variant is not modified in
@@ -386,7 +386,8 @@ class UnaryVariantDeviceCopyRegistration {
   UnaryVariantDeviceCopyRegistration(
       const VariantDeviceCopyDirection direction, const TypeIndex& type_index,
       const LocalVariantDeviceCopyFn& device_copy_fn) {
-    const string type_index_name = port::MaybeAbiDemangle(type_index.name());
+    const std::string type_index_name =
+        port::MaybeAbiDemangle(type_index.name());
     UnaryVariantOpRegistry::Global()->RegisterDeviceCopyFn(
         direction, type_index,
         [type_index_name, device_copy_fn](
@@ -413,10 +414,11 @@ class UnaryVariantUnaryOpRegistration {
       LocalVariantUnaryOpFn;
 
  public:
-  UnaryVariantUnaryOpRegistration(VariantUnaryOp op, const string& device,
+  UnaryVariantUnaryOpRegistration(VariantUnaryOp op, const std::string& device,
                                   const TypeIndex& type_index,
                                   const LocalVariantUnaryOpFn& unary_op_fn) {
-    const string type_index_name = port::MaybeAbiDemangle(type_index.name());
+    const std::string type_index_name =
+        port::MaybeAbiDemangle(type_index.name());
     UnaryVariantOpRegistry::Global()->RegisterUnaryOpFn(
         op, device, type_index,
         [type_index_name, unary_op_fn](OpKernelContext* ctx, const Variant& v,
@@ -442,10 +444,12 @@ class UnaryVariantBinaryOpRegistration {
       LocalVariantBinaryOpFn;
 
  public:
-  UnaryVariantBinaryOpRegistration(VariantBinaryOp op, const string& device,
+  UnaryVariantBinaryOpRegistration(VariantBinaryOp op,
+                                   const std::string& device,
                                    const TypeIndex& type_index,
                                    const LocalVariantBinaryOpFn& binary_op_fn) {
-    const string type_index_name = port::MaybeAbiDemangle(type_index.name());
+    const std::string type_index_name =
+        port::MaybeAbiDemangle(type_index.name());
     UnaryVariantOpRegistry::Global()->RegisterBinaryOpFn(
         op, device, type_index,
         [type_index_name, binary_op_fn](OpKernelContext* ctx, const Variant& a,
diff --git a/tensorflow/core/framework/variant_tensor_data.h b/tensorflow/core/framework/variant_tensor_data.h
index 8c654ccec82..59246f2bb15 100644
--- a/tensorflow/core/framework/variant_tensor_data.h
+++ b/tensorflow/core/framework/variant_tensor_data.h
@@ -44,8 +44,8 @@ class VariantTensorData {
   VariantTensorData(VariantTensorDataProto proto);
 
   // Name of the type of objects being serialized.
-  const string& type_name() const { return type_name_; }
-  void set_type_name(const string& type_name) { type_name_ = type_name; }
+  const std::string& type_name() const { return type_name_; }
+  void set_type_name(const std::string& type_name) { type_name_ = type_name; }
 
   template <typename T, bool = std::is_pod<typename std::decay<T>::type>::value>
   struct PODResolver {};
@@ -62,9 +62,9 @@ class VariantTensorData {
     return GetMetadata<T>(value, PODResolver<T>());
   }
 
-  string& metadata_string() { return metadata_; }
+  std::string& metadata_string() { return metadata_; }
 
-  const string& metadata_string() const { return metadata_; }
+  const std::string& metadata_string() const { return metadata_; }
 
   // Tensors contained within objects being serialized.
   int tensors_size() const;
@@ -84,25 +84,27 @@ class VariantTensorData {
   bool FromConstProto(const VariantTensorDataProto& proto);
 
   // Serialization via VariantTensorDataProto
-  string SerializeAsString() const;
-  bool SerializeToString(string* buf);
-  bool ParseFromString(string s);
+  std::string SerializeAsString() const;
+  bool SerializeToString(std::string* buf);
+  bool ParseFromString(std::string s);
 
-  string DebugString() const;
+  std::string DebugString() const;
 
  public:
-  string type_name_;
-  string metadata_;
+  std::string type_name_;
+  std::string metadata_;
   std::vector<Tensor> tensors_;
 
  private:
   template <typename T>
-  void SetMetadata(const string& value, PODResolver<T, false /* is_pod */>) {
+  void SetMetadata(const std::string& value,
+                   PODResolver<T, false /* is_pod */>) {
     metadata_ = value;
   }
 
   template <typename T>
-  bool GetMetadata(string* value, PODResolver<T, false /* is_pod */>) const {
+  bool GetMetadata(std::string* value,
+                   PODResolver<T, false /* is_pod */>) const {
     *value = metadata_;
     return true;
   }
@@ -121,7 +123,7 @@ class VariantTensorData {
 };
 
 // For backwards compatibility for when this was a proto
-string ProtoDebugString(const VariantTensorData& object);
+std::string ProtoDebugString(const VariantTensorData& object);
 
 }  // namespace tensorflow