diff --git a/tensorflow/lite/delegates/gpu/BUILD b/tensorflow/lite/delegates/gpu/BUILD
index 72af2534988..b5fff1d84d5 100644
--- a/tensorflow/lite/delegates/gpu/BUILD
+++ b/tensorflow/lite/delegates/gpu/BUILD
@@ -12,12 +12,6 @@ exports_files([
     "metal_delegate.h",
 ])
 
-# Primary purpose of this config is to replace ::util::Status with our custom
-# light implementation ::tflite::gpu::StatusLite to reduce binary size.  Besides
-# that, certain features that were hard to communicate without full open source
-# were hidden away too such as compiled models, serialization, and metadata.
-# While the latter will be fully available with the open source release, the
-# former will have to stay until absl::Status is released.
 config_setting(
     name = "tflite_gpu_binary_release",
     values = {"copt": "-DTFLITE_GPU_BINARY_RELEASE"},
diff --git a/tensorflow/lite/delegates/gpu/api.h b/tensorflow/lite/delegates/gpu/api.h
index 803983214e2..921f2d54006 100644
--- a/tensorflow/lite/delegates/gpu/api.h
+++ b/tensorflow/lite/delegates/gpu/api.h
@@ -220,7 +220,8 @@ class InferenceBuilder {
 
   // Sets new shape for the input if underlying implementation and graph
   // structure allows dynamic tensors.
-  virtual Status SetInputShape(int index, const Dimensions& dimensions) = 0;
+  virtual absl::Status SetInputShape(int index,
+                                     const Dimensions& dimensions) = 0;
 
   // Updates object definitions for the given index. Implementation may allow
   // to use different layouts and/or data type conversions between objects
@@ -229,21 +230,21 @@ class InferenceBuilder {
   //   A user, however, has an input in DataType::FLOAT16, DataLayout::PHWC4.
   //   An implementation may allow this transformation to happen automatically
   //   under the hood.
-  virtual Status SetInputObjectDef(int index, ObjectDef def) = 0;
-  virtual Status SetOutputObjectDef(int index, ObjectDef def) = 0;
-  virtual Status SetAllInputObjectDefsTo(ObjectDef def) {
+  virtual absl::Status SetInputObjectDef(int index, ObjectDef def) = 0;
+  virtual absl::Status SetOutputObjectDef(int index, ObjectDef def) = 0;
+  virtual absl::Status SetAllInputObjectDefsTo(ObjectDef def) {
     auto input_defs = inputs();
     for (int i = 0; i < input_defs.size(); ++i) {
       RETURN_IF_ERROR(SetInputObjectDef(i, def));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
-  virtual Status SetAllOutputObjectDefsTo(ObjectDef def) {
+  virtual absl::Status SetAllOutputObjectDefsTo(ObjectDef def) {
     auto output_defs = outputs();
     for (int i = 0; i < output_defs.size(); ++i) {
       RETURN_IF_ERROR(SetOutputObjectDef(i, def));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   // Creates new instance of the inference runner. InferenceBuilder stays valid
@@ -251,7 +252,7 @@ class InferenceBuilder {
   //
   // This method may take significant time to prepare new inference runner. For
   // example, it may require to compile OpenGL shaders.
-  virtual Status Build(std::unique_ptr<InferenceRunner>* runner) = 0;
+  virtual absl::Status Build(std::unique_ptr<InferenceRunner>* runner) = 0;
 };
 
 // Runs prepared inference. Every object marked as external needs to be set
@@ -268,12 +269,12 @@ class InferenceRunner {
   // Setters allow to set or change external object for the given index. Note,
   // object need to match object definition set before in InferenceBuilder.
 
-  virtual Status GetInputObject(int index, TensorObject* object) = 0;
-  virtual Status GetOutputObject(int index, TensorObject* object) = 0;
-  virtual Status SetInputObject(int index, TensorObject object) = 0;
-  virtual Status SetOutputObject(int index, TensorObject object) = 0;
+  virtual absl::Status GetInputObject(int index, TensorObject* object) = 0;
+  virtual absl::Status GetOutputObject(int index, TensorObject* object) = 0;
+  virtual absl::Status SetInputObject(int index, TensorObject object) = 0;
+  virtual absl::Status SetOutputObject(int index, TensorObject object) = 0;
 
-  virtual Status Run() = 0;
+  virtual absl::Status Run() = 0;
 };
 
 // Encapsulated compilation/runtime tradeoffs.
diff --git a/tensorflow/lite/delegates/gpu/cl/api.cc b/tensorflow/lite/delegates/gpu/cl/api.cc
index 4e85f92c6de..a6488c51ce4 100644
--- a/tensorflow/lite/delegates/gpu/cl/api.cc
+++ b/tensorflow/lite/delegates/gpu/cl/api.cc
@@ -54,22 +54,22 @@ class NoopTensorTie : public TensorTie {
     return def.external_def == def.internal_def;
   }
 
-  Status SetExternalObject(TensorObject obj) final {
+  absl::Status SetExternalObject(TensorObject obj) final {
     if (!def().external_def.object_def.user_provided) {
-      return InvalidArgumentError("Tensor object is readonly.");
+      return absl::InvalidArgumentError("Tensor object is readonly.");
     }
     if (!IsValid(def().external_def, obj)) {
-      return InvalidArgumentError("Given object is not valid");
+      return absl::InvalidArgumentError("Given object is not valid");
     }
     obj_ = obj;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TensorObject GetExternalObject() final { return obj_; }
 
-  Status CopyToExternalObject() final { return OkStatus(); }
+  absl::Status CopyToExternalObject() final { return absl::OkStatus(); }
 
-  Status CopyFromExternalObject() final { return OkStatus(); }
+  absl::Status CopyFromExternalObject() final { return absl::OkStatus(); }
 
  private:
   TensorObject obj_;
@@ -93,45 +93,45 @@ class DefaultTensorTie : public TensorTie {
            converter_builder.IsSupported(def.external_def, def.internal_def);
   }
 
-  static Status New(const TensorTieDef& def, TensorObject internal_object,
-                    TensorObjectConverterBuilder* converter_builder,
-                    Environment* env, std::unique_ptr<TensorTie>* tie) {
+  static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
+                          TensorObjectConverterBuilder* converter_builder,
+                          Environment* env, std::unique_ptr<TensorTie>* tie) {
     auto tie_impl = absl::make_unique<DefaultTensorTie>(def, internal_object);
     RETURN_IF_ERROR(tie_impl->Init(converter_builder, env));
     *tie = std::move(tie_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status CopyToExternalObject() final {
+  absl::Status CopyToExternalObject() final {
     if (!converter_to_) {
-      return UnavailableError("Conversion is not available");
+      return absl::UnavailableError("Conversion is not available");
     }
     return converter_to_->Convert(internal_obj_, GetExternalObject());
   }
 
-  Status CopyFromExternalObject() final {
+  absl::Status CopyFromExternalObject() final {
     if (!converter_from_) {
-      return UnavailableError("Conversion is not available");
+      return absl::UnavailableError("Conversion is not available");
     }
     return converter_from_->Convert(GetExternalObject(), internal_obj_);
   }
 
-  Status SetExternalObject(TensorObject obj) final {
+  absl::Status SetExternalObject(TensorObject obj) final {
     if (!def().external_def.object_def.user_provided) {
-      return InvalidArgumentError("External object is read-only");
+      return absl::InvalidArgumentError("External object is read-only");
     }
     if (!IsValid(def().external_def, obj)) {
-      return InvalidArgumentError("Given object is not valid");
+      return absl::InvalidArgumentError("Given object is not valid");
     }
     external_obj_ = obj;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TensorObject GetExternalObject() final { return external_obj_; }
 
  private:
-  Status Init(TensorObjectConverterBuilder* converter_builder,
-              Environment* env) {
+  absl::Status Init(TensorObjectConverterBuilder* converter_builder,
+                    Environment* env) {
     RETURN_IF_ERROR(converter_builder->MakeConverter(
         def().internal_def, def().external_def, &converter_to_));
     RETURN_IF_ERROR(converter_builder->MakeConverter(
@@ -139,10 +139,10 @@ class DefaultTensorTie : public TensorTie {
     return MaybeAllocateExternalObject(env);
   }
 
-  Status MaybeAllocateExternalObject(Environment* env) {
+  absl::Status MaybeAllocateExternalObject(Environment* env) {
     const TensorObjectDef& d = def().external_def;
     if (d.object_def.user_provided) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     switch (d.object_def.object_type) {
       case ObjectType::CPU_MEMORY: {
@@ -170,9 +170,9 @@ class DefaultTensorTie : public TensorTie {
         break;
       }
       default:
-        return InternalError("Unexpected object type");
+        return absl::InternalError("Unexpected object type");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   const TensorObject internal_obj_;
@@ -198,26 +198,26 @@ class TwoStepTensorTie : public TensorTie {
            DefaultTensorTie::IsSupported(defs.second, converter_builder);
   }
 
-  static Status New(const TensorTieDef& def, TensorObject internal_object,
-                    TensorObjectConverterBuilder* converter_builder,
-                    Environment* env, std::unique_ptr<TensorTie>* tie) {
+  static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
+                          TensorObjectConverterBuilder* converter_builder,
+                          Environment* env, std::unique_ptr<TensorTie>* tie) {
     auto tie_impl = absl::make_unique<TwoStepTensorTie>(def);
     RETURN_IF_ERROR(tie_impl->Init(internal_object, converter_builder, env));
     *tie = std::move(tie_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status CopyToExternalObject() final {
+  absl::Status CopyToExternalObject() final {
     RETURN_IF_ERROR(inner_tie_->CopyToExternalObject());
     return outer_tie_->CopyToExternalObject();
   }
 
-  Status CopyFromExternalObject() final {
+  absl::Status CopyFromExternalObject() final {
     RETURN_IF_ERROR(outer_tie_->CopyFromExternalObject());
     return inner_tie_->CopyFromExternalObject();
   }
 
-  Status SetExternalObject(TensorObject obj) final {
+  absl::Status SetExternalObject(TensorObject obj) final {
     return outer_tie_->SetExternalObject(obj);
   }
 
@@ -241,9 +241,9 @@ class TwoStepTensorTie : public TensorTie {
     return std::make_pair(outer_def, inner_def);
   }
 
-  Status Init(TensorObject internal_object,
-              TensorObjectConverterBuilder* converter_builder,
-              Environment* env) {
+  absl::Status Init(TensorObject internal_object,
+                    TensorObjectConverterBuilder* converter_builder,
+                    Environment* env) {
     auto defs = MakeOuterInnerDefs(def());
     RETURN_IF_ERROR(DefaultTensorTie::New(defs.second, internal_object,
                                           converter_builder, env, &inner_tie_));
@@ -274,27 +274,27 @@ class GlBufferHolder : public TensorTie {
     return DefaultTensorTie::IsSupported(MakeClDef(def), converter_builder);
   }
 
-  static Status New(const TensorTieDef& def, TensorObject internal_object,
-                    TensorObjectConverterBuilder* converter_builder,
-                    GlInteropFabric* gl_interop_fabric, Environment* env,
-                    std::unique_ptr<TensorTie>* tie) {
+  static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
+                          TensorObjectConverterBuilder* converter_builder,
+                          GlInteropFabric* gl_interop_fabric, Environment* env,
+                          std::unique_ptr<TensorTie>* tie) {
     auto tie_impl =
         absl::make_unique<GlBufferHolder>(def, gl_interop_fabric, env);
     RETURN_IF_ERROR(DefaultTensorTie::New(MakeClDef(def), internal_object,
                                           converter_builder, env,
                                           &tie_impl->tie_));
     *tie = std::move(tie_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status SetExternalObject(TensorObject obj) final {
+  absl::Status SetExternalObject(TensorObject obj) final {
     auto ssbo = absl::get_if<OpenGlBuffer>(&obj);
     if (!ssbo) {
-      return InvalidArgumentError("Missing OpenGL SSBO");
+      return absl::InvalidArgumentError("Missing OpenGL SSBO");
     }
     auto old_ssbo = absl::get_if<OpenGlBuffer>(&external_obj_);
     if (old_ssbo && ssbo->id == old_ssbo->id) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     if (cl_object_.memory()) {
       gl_interop_fabric_->UnregisterMemory(cl_object_.memory());
@@ -304,16 +304,18 @@ class GlBufferHolder : public TensorTie {
     external_obj_ = obj;
     RETURN_IF_ERROR(tie_->SetExternalObject(OpenClBuffer{cl_object_.memory()}));
     gl_interop_fabric_->RegisterMemory(cl_object_.memory());
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TensorObject GetExternalObject() final { return external_obj_; }
 
-  Status CopyFromExternalObject() final {
+  absl::Status CopyFromExternalObject() final {
     return tie_->CopyFromExternalObject();
   }
 
-  Status CopyToExternalObject() final { return tie_->CopyToExternalObject(); }
+  absl::Status CopyToExternalObject() final {
+    return tie_->CopyToExternalObject();
+  }
 
  private:
   static TensorTieDef MakeClDef(const TensorTieDef& def) {
@@ -358,20 +360,20 @@ class TensorTieFactory {
             TwoStepTensorTie::IsSupported(def, *converter_builder_));
   }
 
-  Status NewTensorTie(const TensorTieDef& def,
-                      std::unique_ptr<TensorTie>* tie) {
+  absl::Status NewTensorTie(const TensorTieDef& def,
+                            std::unique_ptr<TensorTie>* tie) {
     TensorObject internal_object = TensorToObj(*context_.GetTensor(def.id));
     auto converter = converter_builder_.get();
     if (NoopTensorTie::IsSupported(def)) {
       *tie = absl::make_unique<NoopTensorTie>(def, internal_object);
-      return OkStatus();
+      return absl::OkStatus();
     }
     if (DefaultTensorTie::IsSupported(def, *converter)) {
       return DefaultTensorTie::New(def, internal_object, converter, &env_, tie);
     }
     if (GlBufferHolder::IsSupported(def, *converter)) {
       if (!gl_interop_fabric_) {
-        return InvalidArgumentError(
+        return absl::InvalidArgumentError(
             "GL object is used but InferenceEnvironmentOptions does not have "
             "EGL display and context set.");
       }
@@ -381,7 +383,7 @@ class TensorTieFactory {
     if (TwoStepTensorTie::IsSupported(def, *converter)) {
       return TwoStepTensorTie::New(def, internal_object, converter, &env_, tie);
     }
-    return UnimplementedError("Unsupported tensor tie definition.");
+    return absl::UnimplementedError("Unsupported tensor tie definition.");
   }
 
  private:
@@ -400,9 +402,9 @@ class InferenceRunnerImpl : public InferenceRunner {
         context_(std::move(context)),
         gl_interop_fabric_(std::move(gl_interop_fabric)) {}
 
-  Status Initialize(const std::vector<TensorTieDef>& inputs,
-                    const std::vector<TensorTieDef>& outputs,
-                    TensorTieFactory* factory) {
+  absl::Status Initialize(const std::vector<TensorTieDef>& inputs,
+                          const std::vector<TensorTieDef>& outputs,
+                          TensorTieFactory* factory) {
     RETURN_IF_ERROR(LinkTensors(inputs, factory, &inputs_));
     return LinkTensors(outputs, factory, &outputs_);
   }
@@ -415,37 +417,37 @@ class InferenceRunnerImpl : public InferenceRunner {
     return GetExternalDefinitions(outputs_);
   }
 
-  Status GetInputObject(int index, TensorObject* object) override {
+  absl::Status GetInputObject(int index, TensorObject* object) override {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     *object = inputs_[index]->GetExternalObject();
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status GetOutputObject(int index, TensorObject* object) override {
+  absl::Status GetOutputObject(int index, TensorObject* object) override {
     if (index < 0 || index >= outputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     *object = outputs_[index]->GetExternalObject();
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status SetInputObject(int index, TensorObject object) override {
+  absl::Status SetInputObject(int index, TensorObject object) override {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     return inputs_[index]->SetExternalObject(object);
   }
 
-  Status SetOutputObject(int index, TensorObject object) override {
+  absl::Status SetOutputObject(int index, TensorObject object) override {
     if (index < 0 || index >= outputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     return outputs_[index]->SetExternalObject(object);
   }
 
-  Status Run() override {
+  absl::Status Run() override {
     if (gl_interop_fabric_) {
       RETURN_IF_ERROR(gl_interop_fabric_->Start());
     }
@@ -460,20 +462,20 @@ class InferenceRunnerImpl : public InferenceRunner {
     if (gl_interop_fabric_) {
       RETURN_IF_ERROR(gl_interop_fabric_->Finish());
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
-  static Status LinkTensors(const std::vector<TensorTieDef>& defs,
-                            TensorTieFactory* factory,
-                            std::vector<std::unique_ptr<TensorTie>>* objects) {
+  static absl::Status LinkTensors(
+      const std::vector<TensorTieDef>& defs, TensorTieFactory* factory,
+      std::vector<std::unique_ptr<TensorTie>>* objects) {
     objects->reserve(defs.size());
     for (auto& def : defs) {
       std::unique_ptr<TensorTie> object;
       RETURN_IF_ERROR(factory->NewTensorTie(def, &object));
       objects->push_back(std::move(object));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   static std::vector<TensorObjectDef> GetExternalDefinitions(
@@ -511,9 +513,9 @@ class InferenceBuilderImpl : public InferenceBuilder {
   explicit InferenceBuilderImpl(Environment* environment)
       : environment_(environment) {}
 
-  Status Initialize(const InferenceOptions& options,
-                    const InferenceEnvironmentOptions& env_options,
-                    const GraphFloat32& graph) {
+  absl::Status Initialize(const InferenceOptions& options,
+                          const InferenceEnvironmentOptions& env_options,
+                          const GraphFloat32& graph) {
     context_ = absl::make_unique<InferenceContext>();
     InferenceContext::CreateInferenceInfo create_info;
     create_info.precision = GetPrecision(options);
@@ -533,7 +535,7 @@ class InferenceBuilderImpl : public InferenceBuilder {
 
     inputs_ = LinkTensors(graph, graph.inputs());
     outputs_ = LinkTensors(graph, graph.outputs());
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   std::vector<TensorObjectDef> inputs() const override {
@@ -544,40 +546,42 @@ class InferenceBuilderImpl : public InferenceBuilder {
     return GetExternalDefinitions(outputs_);
   }
 
-  Status SetInputShape(int index, const Dimensions& dimensions) override {
+  absl::Status SetInputShape(int index, const Dimensions& dimensions) override {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
-    return UnimplementedError("Changing input shapes is not supported");
+    return absl::UnimplementedError("Changing input shapes is not supported");
   }
 
-  Status SetInputObjectDef(int index, ObjectDef new_def) override {
+  absl::Status SetInputObjectDef(int index, ObjectDef new_def) override {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     auto def = inputs_[index];
     def.external_def.object_def = new_def;
     if (!tie_factory_->IsSupported(def)) {
-      return InvalidArgumentError("New object definition is not supported.");
+      return absl::InvalidArgumentError(
+          "New object definition is not supported.");
     }
     inputs_[index] = def;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status SetOutputObjectDef(int index, ObjectDef new_def) override {
+  absl::Status SetOutputObjectDef(int index, ObjectDef new_def) override {
     if (index < 0 || index >= outputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     auto def = outputs_[index];
     def.external_def.object_def = new_def;
     if (!tie_factory_->IsSupported(def)) {
-      return InvalidArgumentError("New object definition is not supported.");
+      return absl::InvalidArgumentError(
+          "New object definition is not supported.");
     }
     outputs_[index] = def;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Build(std::unique_ptr<InferenceRunner>* runner) override {
+  absl::Status Build(std::unique_ptr<InferenceRunner>* runner) override {
     if (gl_interop_fabric_ && !HasGlObjects()) {
       // destroy interop layer when there are no GL objects to avoid
       // extra synchronization cost.
@@ -588,7 +592,7 @@ class InferenceBuilderImpl : public InferenceBuilder {
     RETURN_IF_ERROR(
         runner_impl->Initialize(inputs_, outputs_, tie_factory_.get()));
     *runner = std::move(runner_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -696,7 +700,7 @@ class InferenceEnvironmentImpl : public InferenceEnvironment {
   explicit InferenceEnvironmentImpl(const InferenceEnvironmentOptions& options)
       : options_(options) {}
 
-  Status Init() {
+  absl::Status Init() {
     RETURN_IF_ERROR(LoadOpenCL());
     properties_.is_opencl_available = true;
 
@@ -716,13 +720,13 @@ class InferenceEnvironmentImpl : public InferenceEnvironment {
     properties_.is_cl_to_gl_fast_sync_supported =
         IsEglSyncFromClEventSupported();
     if (options_.IsGlAware() && !properties_.is_gl_sharing_supported) {
-      return UnavailableError("GL sharing is not supported");
+      return absl::UnavailableError("GL sharing is not supported");
     }
 
     CLContext context;
     if (options_.context) {
       if (options_.IsGlAware()) {
-        return InvalidArgumentError(
+        return absl::InvalidArgumentError(
             "OpenCL context and EGL parameters are set in the same time.");
       }
       context = CLContext(options_.context, /* has_ownership = */ false);
@@ -754,11 +758,11 @@ class InferenceEnvironmentImpl : public InferenceEnvironment {
     return environment_.Init();
   }
 
-  Status NewInferenceBuilder(const InferenceOptions& options,
-                             GraphFloat32 model,
-                             std::unique_ptr<InferenceBuilder>* builder) final {
+  absl::Status NewInferenceBuilder(
+      const InferenceOptions& options, GraphFloat32 model,
+      std::unique_ptr<InferenceBuilder>* builder) final {
     if (!IsValid(options)) {
-      return InvalidArgumentError("InferenceOptions are invalid.");
+      return absl::InvalidArgumentError("InferenceOptions are invalid.");
     }
     InferenceOptions resolved_options = options;
     ResolveAutoPriority(&resolved_options);
@@ -776,7 +780,7 @@ class InferenceEnvironmentImpl : public InferenceEnvironment {
     RETURN_IF_ERROR(
         builder_impl->Initialize(resolved_options, options_, model));
     *builder = std::move(builder_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   std::vector<uint8_t> GetSerializedBinaryCache() const final {
@@ -800,18 +804,18 @@ class InferenceEnvironmentImpl : public InferenceEnvironment {
 
 }  // namespace
 
-Status NewInferenceEnvironment(
+absl::Status NewInferenceEnvironment(
     const InferenceEnvironmentOptions& options,
     std::unique_ptr<InferenceEnvironment>* environment,
     InferenceEnvironmentProperties* properties) {
   auto env_impl = absl::make_unique<InferenceEnvironmentImpl>(options);
-  Status status = env_impl->Init();
+  absl::Status status = env_impl->Init();
   if (properties) {
     *properties = env_impl->properties();
   }
   RETURN_IF_ERROR(status);
   *environment = std::move(env_impl);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/api.h b/tensorflow/lite/delegates/gpu/cl/api.h
index 2ac5ce2e28b..9d3f9f7214c 100644
--- a/tensorflow/lite/delegates/gpu/cl/api.h
+++ b/tensorflow/lite/delegates/gpu/cl/api.h
@@ -70,7 +70,7 @@ class InferenceEnvironment {
  public:
   virtual ~InferenceEnvironment() {}
 
-  virtual Status NewInferenceBuilder(
+  virtual absl::Status NewInferenceBuilder(
       const InferenceOptions& options, GraphFloat32 model,
       std::unique_ptr<InferenceBuilder>* builder) = 0;
 
@@ -112,7 +112,7 @@ struct InferenceEnvironmentOptions {
 
 // Creates new OpenCL environment that needs to stay around until all inference
 // runners are destroyed.
-Status NewInferenceEnvironment(
+absl::Status NewInferenceEnvironment(
     const InferenceEnvironmentOptions& options,
     std::unique_ptr<InferenceEnvironment>* environment,
     InferenceEnvironmentProperties* properties /* optional */);
diff --git a/tensorflow/lite/delegates/gpu/cl/buffer.cc b/tensorflow/lite/delegates/gpu/cl/buffer.cc
index 51d9a59e888..207cdec5122 100644
--- a/tensorflow/lite/delegates/gpu/cl/buffer.cc
+++ b/tensorflow/lite/delegates/gpu/cl/buffer.cc
@@ -21,8 +21,10 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 namespace {
-Status CreateBuffer(size_t size_in_bytes, bool gpu_read_only, const void* data,
-                    CLContext* context, Buffer* result) {
+
+absl::Status CreateBuffer(size_t size_in_bytes, bool gpu_read_only,
+                          const void* data, CLContext* context,
+                          Buffer* result) {
   cl_mem_flags flags = gpu_read_only ? CL_MEM_READ_ONLY : CL_MEM_READ_WRITE;
   if (data != nullptr) {
     flags |= CL_MEM_COPY_HOST_PTR;
@@ -31,14 +33,14 @@ Status CreateBuffer(size_t size_in_bytes, bool gpu_read_only, const void* data,
   cl_mem buffer = clCreateBuffer(context->context(), flags, size_in_bytes,
                                  const_cast<void*>(data), &error_code);
   if (!buffer) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to allocate device memory with clCreateBuffer",
                      CLErrorCodeToString(error_code)));
   }
 
   *result = Buffer(buffer, size_in_bytes);
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 }  // namespace
 
@@ -69,18 +71,18 @@ void Buffer::Release() {
   }
 }
 
-Status CreateReadOnlyBuffer(size_t size_in_bytes, CLContext* context,
-                            Buffer* result) {
+absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, CLContext* context,
+                                  Buffer* result) {
   return CreateBuffer(size_in_bytes, true, nullptr, context, result);
 }
 
-Status CreateReadOnlyBuffer(size_t size_in_bytes, const void* data,
-                            CLContext* context, Buffer* result) {
+absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, const void* data,
+                                  CLContext* context, Buffer* result) {
   return CreateBuffer(size_in_bytes, true, data, context, result);
 }
 
-Status CreateReadWriteBuffer(size_t size_in_bytes, CLContext* context,
-                             Buffer* result) {
+absl::Status CreateReadWriteBuffer(size_t size_in_bytes, CLContext* context,
+                                   Buffer* result) {
   return CreateBuffer(size_in_bytes, false, nullptr, context, result);
 }
 
diff --git a/tensorflow/lite/delegates/gpu/cl/buffer.h b/tensorflow/lite/delegates/gpu/cl/buffer.h
index 4282d9c0898..84c3292084b 100644
--- a/tensorflow/lite/delegates/gpu/cl/buffer.h
+++ b/tensorflow/lite/delegates/gpu/cl/buffer.h
@@ -51,11 +51,11 @@ class Buffer {
   // Writes data to a buffer. Data should point to a region that
   // has exact size in bytes as size_in_bytes(constructor parameter).
   template <typename T>
-  Status WriteData(CLCommandQueue* queue, const absl::Span<T> data);
+  absl::Status WriteData(CLCommandQueue* queue, const absl::Span<T> data);
 
   // Reads data from Buffer into CPU memory.
   template <typename T>
-  Status ReadData(CLCommandQueue* queue, std::vector<T>* result) const;
+  absl::Status ReadData(CLCommandQueue* queue, std::vector<T>* result) const;
 
  private:
   void Release();
@@ -64,29 +64,31 @@ class Buffer {
   size_t size_;
 };
 
-Status CreateReadOnlyBuffer(size_t size_in_bytes, CLContext* context,
-                            Buffer* result);
+absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, CLContext* context,
+                                  Buffer* result);
 
-Status CreateReadOnlyBuffer(size_t size_in_bytes, const void* data,
-                            CLContext* context, Buffer* result);
+absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, const void* data,
+                                  CLContext* context, Buffer* result);
 
-Status CreateReadWriteBuffer(size_t size_in_bytes, CLContext* context,
-                             Buffer* result);
+absl::Status CreateReadWriteBuffer(size_t size_in_bytes, CLContext* context,
+                                   Buffer* result);
 
 template <typename T>
-Status Buffer::WriteData(CLCommandQueue* queue, const absl::Span<T> data) {
+absl::Status Buffer::WriteData(CLCommandQueue* queue,
+                               const absl::Span<T> data) {
   if (size_ != sizeof(T) * data.size()) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "absl::Span<T> data size is different from buffer allocated size.");
   }
   RETURN_IF_ERROR(queue->EnqueueWriteBuffer(buffer_, size_, data.data()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename T>
-Status Buffer::ReadData(CLCommandQueue* queue, std::vector<T>* result) const {
+absl::Status Buffer::ReadData(CLCommandQueue* queue,
+                              std::vector<T>* result) const {
   if (size_ % sizeof(T) != 0) {
-    return UnknownError("Wrong element size(typename T is not correct?");
+    return absl::UnknownError("Wrong element size(typename T is not correct?");
   }
 
   const int elements_count = size_ / sizeof(T);
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_command_queue.cc b/tensorflow/lite/delegates/gpu/cl/cl_command_queue.cc
index 328cdaf0a6e..7b74840c5e6 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_command_queue.cc
+++ b/tensorflow/lite/delegates/gpu/cl/cl_command_queue.cc
@@ -56,8 +56,9 @@ void CLCommandQueue::Release() {
   }
 }
 
-Status CLCommandQueue::DispatchImplicit(const CLKernel& kernel, int3 grid,
-                                        int3 work_group_size, CLEvent* event) {
+absl::Status CLCommandQueue::DispatchImplicit(const CLKernel& kernel, int3 grid,
+                                              int3 work_group_size,
+                                              CLEvent* event) {
   std::vector<size_t> local(3);
   std::vector<size_t> global(3);
   for (int i = 0; i < 3; ++i) {
@@ -72,30 +73,31 @@ Status CLCommandQueue::DispatchImplicit(const CLKernel& kernel, int3 grid,
     *event = CLEvent(resulting_event);
   }
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to clEnqueueNDRangeKernel - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(
+        absl::StrCat("Failed to clEnqueueNDRangeKernel - ",
+                     CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLCommandQueue::DispatchImplicit(const CLKernel& kernel, int3 grid,
-                                        int3 work_group_size) {
+absl::Status CLCommandQueue::DispatchImplicit(const CLKernel& kernel, int3 grid,
+                                              int3 work_group_size) {
   return DispatchImplicit(kernel, grid, work_group_size, nullptr);
 }
 
-Status CLCommandQueue::EnqueueEvent(CLEvent* event) {
+absl::Status CLCommandQueue::EnqueueEvent(CLEvent* event) {
   cl_event resulting_event;
   const int error_code = clEnqueueMarker(queue_, &resulting_event);
   *event = CLEvent(resulting_event);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to clEnqueueMarker - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(absl::StrCat("Failed to clEnqueueMarker - ",
+                                           CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLCommandQueue::EnqueueWriteImage(cl_mem memory, int3 region,
-                                         const void* data) {
+absl::Status CLCommandQueue::EnqueueWriteImage(cl_mem memory, int3 region,
+                                               const void* data) {
   const size_t origin[] = {0, 0, 0};
   const size_t r[] = {static_cast<size_t>(region.x),
                       static_cast<size_t>(region.y),
@@ -103,16 +105,16 @@ Status CLCommandQueue::EnqueueWriteImage(cl_mem memory, int3 region,
   auto error_code = clEnqueueWriteImage(queue_, memory, CL_TRUE, origin, r, 0,
                                         0, data, 0, nullptr, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to upload data to GPU (clEnqueueWriteImage) - ",
                      CLErrorCodeToString(error_code)));
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLCommandQueue::EnqueueReadImage(cl_mem memory, int3 region,
-                                        void* data) {
+absl::Status CLCommandQueue::EnqueueReadImage(cl_mem memory, int3 region,
+                                              void* data) {
   const size_t origin[] = {0, 0, 0};
   const size_t r[] = {static_cast<size_t>(region.x),
                       static_cast<size_t>(region.y),
@@ -120,45 +122,47 @@ Status CLCommandQueue::EnqueueReadImage(cl_mem memory, int3 region,
   auto error_code = clEnqueueReadImage(queue_, memory, CL_TRUE, origin, r, 0, 0,
                                        data, 0, nullptr, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to read data from GPU (clEnqueueReadImage) - ",
                      CLErrorCodeToString(error_code)));
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLCommandQueue::EnqueueWriteBuffer(cl_mem memory, size_t size_in_bytes,
-                                          const void* data) {
+absl::Status CLCommandQueue::EnqueueWriteBuffer(cl_mem memory,
+                                                size_t size_in_bytes,
+                                                const void* data) {
   auto error_code = clEnqueueWriteBuffer(
       queue_, memory, CL_TRUE, 0, size_in_bytes, data, 0, nullptr, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to upload data to GPU (clEnqueueWriteBuffer) - ",
                      CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLCommandQueue::EnqueueReadBuffer(cl_mem memory, size_t size_in_bytes,
-                                         void* data) {
+absl::Status CLCommandQueue::EnqueueReadBuffer(cl_mem memory,
+                                               size_t size_in_bytes,
+                                               void* data) {
   auto error_code = clEnqueueReadBuffer(
       queue_, memory, CL_TRUE, 0, size_in_bytes, data, 0, nullptr, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to read data from GPU (clEnqueueReadBuffer) - ",
                      CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLCommandQueue::WaitForCompletion() {
+absl::Status CLCommandQueue::WaitForCompletion() {
   auto error_code = clFinish(queue_);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to clFinish - ", CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 ProfilingCommandQueue::ProfilingCommandQueue(cl_command_queue queue)
@@ -187,14 +191,14 @@ void ProfilingCommandQueue::SetEventsLabel(const std::string& name) {
 
 void ProfilingCommandQueue::ResetMeasurements() { events_.clear(); }
 
-Status ProfilingCommandQueue::DispatchImplicit(const CLKernel& kernel,
-                                               int3 grid,
-                                               int3 work_group_size) {
+absl::Status ProfilingCommandQueue::DispatchImplicit(const CLKernel& kernel,
+                                                     int3 grid,
+                                                     int3 work_group_size) {
   events_.push_back(CLEvent());
   RETURN_IF_ERROR(CLCommandQueue::DispatchImplicit(
       kernel, grid, work_group_size, &events_[events_.size() - 1]));
   events_.back().SetName(current_label_);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 ProfilingInfo ProfilingCommandQueue::GetProfilingInfo() const {
@@ -208,7 +212,7 @@ ProfilingInfo ProfilingCommandQueue::GetProfilingInfo() const {
   return result;
 }
 
-Status ProfilingCommandQueue::GetBestWorkGroupIndex(
+absl::Status ProfilingCommandQueue::GetBestWorkGroupIndex(
     const CLKernel& kernel, const DeviceInfo& device_info, const int3& grid,
     const std::vector<int3>& work_group_sizes, int* index) {
   // Some Adreno 3xx can have wrong numbers for some events
@@ -268,20 +272,22 @@ Status ProfilingCommandQueue::GetBestWorkGroupIndex(
 
   *index = minimum_index;
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateCLCommandQueue(const CLDevice& device, const CLContext& context,
-                            CLCommandQueue* result) {
+absl::Status CreateCLCommandQueue(const CLDevice& device,
+                                  const CLContext& context,
+                                  CLCommandQueue* result) {
   int error_code;
   cl_command_queue queue =
       clCreateCommandQueue(context.context(), device.id(), 0, &error_code);
   if (!queue) {
-    return UnknownError(absl::StrCat("Failed to create a command queue - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(
+        absl::StrCat("Failed to create a command queue - ",
+                     CLErrorCodeToString(error_code)));
   }
   *result = CLCommandQueue(queue, true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 double ProfilingCommandQueue::GetQueueExecutionTimeMs() const {
@@ -300,19 +306,20 @@ double ProfilingCommandQueue::GetSumOfEventsTimeMs() const {
   return sum;
 }
 
-Status CreateProfilingCommandQueue(const CLDevice& device,
-                                   const CLContext& context,
-                                   ProfilingCommandQueue* result) {
+absl::Status CreateProfilingCommandQueue(const CLDevice& device,
+                                         const CLContext& context,
+                                         ProfilingCommandQueue* result) {
   int error_code;
   cl_command_queue queue = clCreateCommandQueue(
       context.context(), device.id(), CL_QUEUE_PROFILING_ENABLE, &error_code);
   if (!queue) {
-    return UnknownError(absl::StrCat("Failed to create a command queue - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(
+        absl::StrCat("Failed to create a command queue - ",
+                     CLErrorCodeToString(error_code)));
   }
 
   *result = ProfilingCommandQueue(queue);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 absl::Duration ProfilingInfo::GetTotalTime() const {
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_command_queue.h b/tensorflow/lite/delegates/gpu/cl/cl_command_queue.h
index 84ffeca67eb..178e3b21a1e 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_command_queue.h
+++ b/tensorflow/lite/delegates/gpu/cl/cl_command_queue.h
@@ -74,22 +74,23 @@ class CLCommandQueue {
 
   cl_command_queue queue() const { return queue_; }
 
-  virtual Status DispatchImplicit(const CLKernel& kernel, int3 grid,
-                                  int3 work_group_size);
+  virtual absl::Status DispatchImplicit(const CLKernel& kernel, int3 grid,
+                                        int3 work_group_size);
 
-  Status EnqueueEvent(CLEvent* event);
+  absl::Status EnqueueEvent(CLEvent* event);
 
-  Status DispatchImplicit(const CLKernel& kernel, int3 grid,
-                          int3 work_group_size, CLEvent* event);
+  absl::Status DispatchImplicit(const CLKernel& kernel, int3 grid,
+                                int3 work_group_size, CLEvent* event);
 
-  Status EnqueueWriteImage(cl_mem memory, int3 region, const void* data);
-  Status EnqueueReadImage(cl_mem memory, int3 region, void* data);
+  absl::Status EnqueueWriteImage(cl_mem memory, int3 region, const void* data);
+  absl::Status EnqueueReadImage(cl_mem memory, int3 region, void* data);
 
-  Status EnqueueWriteBuffer(cl_mem memory, size_t size_in_bytes,
-                            const void* data);
-  Status EnqueueReadBuffer(cl_mem memory, size_t size_in_bytes, void* data);
+  absl::Status EnqueueWriteBuffer(cl_mem memory, size_t size_in_bytes,
+                                  const void* data);
+  absl::Status EnqueueReadBuffer(cl_mem memory, size_t size_in_bytes,
+                                 void* data);
 
-  Status WaitForCompletion();
+  absl::Status WaitForCompletion();
 
  protected:
   void Release();
@@ -109,14 +110,15 @@ class ProfilingCommandQueue : public CLCommandQueue {
   ProfilingCommandQueue(const ProfilingCommandQueue&) = delete;
   ProfilingCommandQueue& operator=(const ProfilingCommandQueue&) = delete;
 
-  Status DispatchImplicit(const CLKernel& kernel, int3 grid,
-                          int3 work_group_size) override;
+  absl::Status DispatchImplicit(const CLKernel& kernel, int3 grid,
+                                int3 work_group_size) override;
 
   // will write index for fastest work_group among work_group_sizes
-  Status GetBestWorkGroupIndex(const CLKernel& kernel,
-                               const DeviceInfo& device_info, const int3& grid,
-                               const std::vector<int3>& work_group_sizes,
-                               int* index);
+  absl::Status GetBestWorkGroupIndex(const CLKernel& kernel,
+                                     const DeviceInfo& device_info,
+                                     const int3& grid,
+                                     const std::vector<int3>& work_group_sizes,
+                                     int* index);
 
   // call ResetMeasurements() to start new seriese of measurements
   void ResetMeasurements();
@@ -139,12 +141,13 @@ class ProfilingCommandQueue : public CLCommandQueue {
   std::string current_label_;
 };
 
-Status CreateCLCommandQueue(const CLDevice& device, const CLContext& context,
-                            CLCommandQueue* result);
+absl::Status CreateCLCommandQueue(const CLDevice& device,
+                                  const CLContext& context,
+                                  CLCommandQueue* result);
 
-Status CreateProfilingCommandQueue(const CLDevice& device,
-                                   const CLContext& context,
-                                   ProfilingCommandQueue* result);
+absl::Status CreateProfilingCommandQueue(const CLDevice& device,
+                                         const CLContext& context,
+                                         ProfilingCommandQueue* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_context.cc b/tensorflow/lite/delegates/gpu/cl/cl_context.cc
index e9e0ddf724b..e697c78b692 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_context.cc
+++ b/tensorflow/lite/delegates/gpu/cl/cl_context.cc
@@ -43,19 +43,21 @@ std::vector<cl_image_format> GetSupportedImage2DFormats(cl_context context,
   return result;
 }
 
-Status CreateCLContext(const CLDevice& device,
-                       cl_context_properties* properties, CLContext* result) {
+absl::Status CreateCLContext(const CLDevice& device,
+                             cl_context_properties* properties,
+                             CLContext* result) {
   int error_code;
   cl_device_id device_id = device.id();
   cl_context context =
       clCreateContext(properties, 1, &device_id, nullptr, nullptr, &error_code);
   if (!context) {
-    return UnknownError(absl::StrCat("Failed to create a compute context - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(
+        absl::StrCat("Failed to create a compute context - ",
+                     CLErrorCodeToString(error_code)));
   }
 
   *result = CLContext(context, true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
@@ -99,15 +101,16 @@ bool CLContext::IsFloatTexture2DSupported(int num_channels, DataType data_type,
   return false;
 }
 
-Status CreateCLContext(const CLDevice& device, CLContext* result) {
+absl::Status CreateCLContext(const CLDevice& device, CLContext* result) {
   return CreateCLContext(device, nullptr, result);
 }
 
-Status CreateCLGLContext(const CLDevice& device,
-                         cl_context_properties egl_context,
-                         cl_context_properties egl_display, CLContext* result) {
+absl::Status CreateCLGLContext(const CLDevice& device,
+                               cl_context_properties egl_context,
+                               cl_context_properties egl_display,
+                               CLContext* result) {
   if (!device.SupportsExtension("cl_khr_gl_sharing")) {
-    return UnavailableError("Device doesn't support CL-GL sharing.");
+    return absl::UnavailableError("Device doesn't support CL-GL sharing.");
   }
   cl_context_properties platform =
       reinterpret_cast<cl_context_properties>(device.platform());
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_context.h b/tensorflow/lite/delegates/gpu/cl/cl_context.h
index 20ec35f2b60..11922bd3678 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_context.h
+++ b/tensorflow/lite/delegates/gpu/cl/cl_context.h
@@ -51,10 +51,11 @@ class CLContext {
   bool has_ownership_ = false;
 };
 
-Status CreateCLContext(const CLDevice& device, CLContext* result);
-Status CreateCLGLContext(const CLDevice& device,
-                         cl_context_properties egl_context,
-                         cl_context_properties egl_display, CLContext* result);
+absl::Status CreateCLContext(const CLDevice& device, CLContext* result);
+absl::Status CreateCLGLContext(const CLDevice& device,
+                               cl_context_properties egl_context,
+                               cl_context_properties egl_display,
+                               CLContext* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_device.cc b/tensorflow/lite/delegates/gpu/cl/cl_device.cc
index c47f86a2928..5380c9ee653 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_device.cc
+++ b/tensorflow/lite/delegates/gpu/cl/cl_device.cc
@@ -516,11 +516,11 @@ void CLDevice::DisableOneLayerTextureArray() {
   info_.adreno_info.support_one_layer_texture_array = false;
 }
 
-Status CreateDefaultGPUDevice(CLDevice* result) {
+absl::Status CreateDefaultGPUDevice(CLDevice* result) {
   cl_uint num_platforms;
   clGetPlatformIDs(0, nullptr, &num_platforms);
   if (num_platforms == 0) {
-    return UnknownError("No supported OpenCL platform.");
+    return absl::UnknownError("No supported OpenCL platform.");
   }
   std::vector<cl_platform_id> platforms(num_platforms);
   clGetPlatformIDs(num_platforms, platforms.data(), nullptr);
@@ -529,7 +529,7 @@ Status CreateDefaultGPUDevice(CLDevice* result) {
   cl_uint num_devices;
   clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_GPU, 0, nullptr, &num_devices);
   if (num_devices == 0) {
-    return UnknownError("No GPU on current platform.");
+    return absl::UnknownError("No GPU on current platform.");
   }
 
   std::vector<cl_device_id> devices(num_devices);
@@ -537,7 +537,7 @@ Status CreateDefaultGPUDevice(CLDevice* result) {
                  nullptr);
 
   *result = CLDevice(devices[0], platform_id);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_device.h b/tensorflow/lite/delegates/gpu/cl/cl_device.h
index 7b3493e3faa..cbc95d485b9 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_device.h
+++ b/tensorflow/lite/delegates/gpu/cl/cl_device.h
@@ -191,7 +191,7 @@ class CLDevice {
   DeviceInfo info_;
 };
 
-Status CreateDefaultGPUDevice(CLDevice* result);
+absl::Status CreateDefaultGPUDevice(CLDevice* result);
 
 template <typename T>
 T GetDeviceInfo(cl_device_id id, cl_device_info info) {
@@ -204,12 +204,12 @@ T GetDeviceInfo(cl_device_id id, cl_device_info info) {
 }
 
 template <typename T>
-Status GetDeviceInfo(cl_device_id id, cl_device_info info, T* result) {
+absl::Status GetDeviceInfo(cl_device_id id, cl_device_info info, T* result) {
   cl_int error = clGetDeviceInfo(id, info, sizeof(T), result, nullptr);
   if (error != CL_SUCCESS) {
-    return InvalidArgumentError(CLErrorCodeToString(error));
+    return absl::InvalidArgumentError(CLErrorCodeToString(error));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_errors.h b/tensorflow/lite/delegates/gpu/cl/cl_errors.h
index 8c16b2696d7..fb59766bd18 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_errors.h
+++ b/tensorflow/lite/delegates/gpu/cl/cl_errors.h
@@ -27,11 +27,12 @@ namespace cl {
 
 // @return if error_code is success, then return OK status. Otherwise translates
 // error code into a message.
-inline Status GetOpenCLError(cl_int error_code) {
+inline absl::Status GetOpenCLError(cl_int error_code) {
   if (error_code == CL_SUCCESS) {
-    return OkStatus();
+    return absl::OkStatus();
   }
-  return InternalError("OpenCL error: " + CLErrorCodeToString(error_code));
+  return absl::InternalError("OpenCL error: " +
+                             CLErrorCodeToString(error_code));
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_kernel.cc b/tensorflow/lite/delegates/gpu/cl/cl_kernel.cc
index 27d4d36c68a..04bf95d870a 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_kernel.cc
+++ b/tensorflow/lite/delegates/gpu/cl/cl_kernel.cc
@@ -25,34 +25,34 @@ namespace gpu {
 namespace cl {
 namespace {
 
-Status GetKernelMaxWorkGroupSize(cl_kernel kernel, cl_device_id device_id,
-                                 int* result) {
+absl::Status GetKernelMaxWorkGroupSize(cl_kernel kernel, cl_device_id device_id,
+                                       int* result) {
   size_t max_work_group_size;
   cl_int error_code =
       clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_WORK_GROUP_SIZE,
                                sizeof(size_t), &max_work_group_size, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to get info CL_KERNEL_WORK_GROUP_SIZE ",
                      CLErrorCodeToString(error_code)));
   }
   *result = static_cast<int>(max_work_group_size);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status GetKernelPrivateMemorySize(cl_kernel kernel, cl_device_id device_id,
-                                  int* result) {
+absl::Status GetKernelPrivateMemorySize(cl_kernel kernel,
+                                        cl_device_id device_id, int* result) {
   cl_ulong private_mem_size;
   cl_int error_code =
       clGetKernelWorkGroupInfo(kernel, device_id, CL_KERNEL_PRIVATE_MEM_SIZE,
                                sizeof(cl_ulong), &private_mem_size, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to get info CL_KERNEL_PRIVATE_MEM_SIZE ",
                      CLErrorCodeToString(error_code)));
   }
   *result = static_cast<int>(private_mem_size);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
@@ -82,17 +82,17 @@ CLKernel& CLKernel::operator=(CLKernel&& kernel) {
 
 CLKernel::~CLKernel() { Release(); }
 
-Status CLKernel::ReInit() const {
+absl::Status CLKernel::ReInit() const {
   clReleaseKernel(kernel_);
   cl_kernel* kern_ptr = const_cast<cl_kernel*>(&kernel_);
   int error_code;
   *kern_ptr = clCreateKernel(program_, function_name_.c_str(), &error_code);
   if (!kernel_ || error_code != CL_SUCCESS) {
     *kern_ptr = nullptr;
-    return UnknownError(absl::StrCat("Failed to create ", function_name_,
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(absl::StrCat("Failed to create ", function_name_,
+                                           CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void CLKernel::Release() {
@@ -103,16 +103,16 @@ void CLKernel::Release() {
   }
 }
 
-Status CLKernel::CreateFromProgram(const CLProgram& program,
-                                   const std::string& function_name) {
+absl::Status CLKernel::CreateFromProgram(const CLProgram& program,
+                                         const std::string& function_name) {
   int error_code;
   function_name_ = function_name;
   kernel_ =
       clCreateKernel(program.program(), function_name.c_str(), &error_code);
   if (!kernel_ || error_code != CL_SUCCESS) {
     kernel_ = nullptr;
-    return UnknownError(absl::StrCat("Failed to create ", function_name,
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(absl::StrCat("Failed to create ", function_name,
+                                           CLErrorCodeToString(error_code)));
   }
 
   program_ = program.program();
@@ -122,64 +122,64 @@ Status CLKernel::CreateFromProgram(const CLProgram& program,
                                              &private_memory_size_));
   RETURN_IF_ERROR(GetKernelMaxWorkGroupSize(kernel_, program.GetDeviceId(),
                                             &max_work_group_size_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLKernel::SetMemory(int index, cl_mem memory) {
+absl::Status CLKernel::SetMemory(int index, cl_mem memory) {
   return SetBytes(index, &memory, sizeof(cl_mem));
 }
 
-Status CLKernel::SetMemoryAuto(cl_mem memory) {
+absl::Status CLKernel::SetMemoryAuto(cl_mem memory) {
   return SetBytesAuto(&memory, sizeof(cl_mem));
 }
 
-Status CLKernel::SetBytes(int index, const void* ptr, int length) const {
+absl::Status CLKernel::SetBytes(int index, const void* ptr, int length) const {
   const int error_code = clSetKernelArg(kernel_, index, length, ptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to set kernel arguments - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(absl::StrCat("Failed to set kernel arguments - ",
+                                           CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CLKernel::SetBytesAuto(const void* ptr, int length) {
+absl::Status CLKernel::SetBytesAuto(const void* ptr, int length) {
   const int error_code = clSetKernelArg(kernel_, binding_counter_, length, ptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to set kernel arguments - ",
-                                     CLErrorCodeToString(error_code),
-                                     "(at index - ", binding_counter_, ")"));
+    return absl::UnknownError(absl::StrCat(
+        "Failed to set kernel arguments - ", CLErrorCodeToString(error_code),
+        "(at index - ", binding_counter_, ")"));
   }
   binding_counter_++;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status CLKernel::SetBytes<FLT>(int index, const FLT& value) const {
+absl::Status CLKernel::SetBytes<FLT>(int index, const FLT& value) const {
   return SetBytes(index, value.GetData(), value.GetSize());
 }
 
 template <>
-Status CLKernel::SetBytes<FLT2>(int index, const FLT2& value) const {
+absl::Status CLKernel::SetBytes<FLT2>(int index, const FLT2& value) const {
   return SetBytes(index, value.GetData(), value.GetSize());
 }
 
 template <>
-Status CLKernel::SetBytes<FLT4>(int index, const FLT4& value) const {
+absl::Status CLKernel::SetBytes<FLT4>(int index, const FLT4& value) const {
   return SetBytes(index, value.GetData(), value.GetSize());
 }
 
 template <>
-Status CLKernel::SetBytesAuto<FLT>(const FLT& value) {
+absl::Status CLKernel::SetBytesAuto<FLT>(const FLT& value) {
   return SetBytesAuto(value.GetData(), value.GetSize());
 }
 
 template <>
-Status CLKernel::SetBytesAuto<FLT2>(const FLT2& value) {
+absl::Status CLKernel::SetBytesAuto<FLT2>(const FLT2& value) {
   return SetBytesAuto(value.GetData(), value.GetSize());
 }
 
 template <>
-Status CLKernel::SetBytesAuto<FLT4>(const FLT4& value) {
+absl::Status CLKernel::SetBytesAuto<FLT4>(const FLT4& value) {
   return SetBytesAuto(value.GetData(), value.GetSize());
 }
 
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_kernel.h b/tensorflow/lite/delegates/gpu/cl/cl_kernel.h
index 3b63e43c967..b575684d2b4 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_kernel.h
+++ b/tensorflow/lite/delegates/gpu/cl/cl_kernel.h
@@ -48,17 +48,17 @@ class CLKernel {
 
   cl_kernel kernel() const { return kernel_; }
 
-  Status CreateFromProgram(const CLProgram& program,
-                           const std::string& function_name);
+  absl::Status CreateFromProgram(const CLProgram& program,
+                                 const std::string& function_name);
 
-  Status SetMemory(int index, cl_mem memory);
-  Status SetMemoryAuto(cl_mem memory);
+  absl::Status SetMemory(int index, cl_mem memory);
+  absl::Status SetMemoryAuto(cl_mem memory);
   template <typename T>
-  Status SetBytes(int index, const T& value) const {
+  absl::Status SetBytes(int index, const T& value) const {
     return SetBytes(index, static_cast<const void*>(&value), sizeof(T));
   }
   template <typename T>
-  Status SetBytesAuto(const T& value) {
+  absl::Status SetBytesAuto(const T& value) {
     return SetBytesAuto(static_cast<const void*>(&value), sizeof(T));
   }
 
@@ -69,12 +69,12 @@ class CLKernel {
 
   // Do not use this function
   // workaround for Mali memory leak
-  Status ReInit() const;
+  absl::Status ReInit() const;
 
  private:
   void Release();
-  Status SetBytes(int index, const void* ptr, int length) const;
-  Status SetBytesAuto(const void* ptr, int length);
+  absl::Status SetBytes(int index, const void* ptr, int length) const;
+  absl::Status SetBytesAuto(const void* ptr, int length);
 
   int private_memory_size_;
   int max_work_group_size_;
@@ -87,22 +87,22 @@ class CLKernel {
 };
 
 template <>
-Status CLKernel::SetBytes<FLT>(int index, const FLT& value) const;
+absl::Status CLKernel::SetBytes<FLT>(int index, const FLT& value) const;
 
 template <>
-Status CLKernel::SetBytes<FLT2>(int index, const FLT2& value) const;
+absl::Status CLKernel::SetBytes<FLT2>(int index, const FLT2& value) const;
 
 template <>
-Status CLKernel::SetBytes<FLT4>(int index, const FLT4& value) const;
+absl::Status CLKernel::SetBytes<FLT4>(int index, const FLT4& value) const;
 
 template <>
-Status CLKernel::SetBytesAuto<FLT>(const FLT& value);
+absl::Status CLKernel::SetBytesAuto<FLT>(const FLT& value);
 
 template <>
-Status CLKernel::SetBytesAuto<FLT2>(const FLT2& value);
+absl::Status CLKernel::SetBytesAuto<FLT2>(const FLT2& value);
 
 template <>
-Status CLKernel::SetBytesAuto<FLT4>(const FLT4& value);
+absl::Status CLKernel::SetBytesAuto<FLT4>(const FLT4& value);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_program.cc b/tensorflow/lite/delegates/gpu/cl/cl_program.cc
index 3592ad895ea..690bc598777 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_program.cc
+++ b/tensorflow/lite/delegates/gpu/cl/cl_program.cc
@@ -49,28 +49,29 @@ std::string GetProgramBuildInfo(cl_program program, cl_device_id id,
   return result;
 }
 
-Status GetBinarySize(cl_program program, size_t* binary_size) {
+absl::Status GetBinarySize(cl_program program, size_t* binary_size) {
   cl_int error_code = clGetProgramInfo(program, CL_PROGRAM_BINARY_SIZES,
                                        sizeof(size_t), binary_size, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to get program binary size - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(
+        absl::StrCat("Failed to get program binary size - ",
+                     CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status BuildProgram(cl_program program, const CLDevice& device,
-                    const std::string& compiler_options) {
+absl::Status BuildProgram(cl_program program, const CLDevice& device,
+                          const std::string& compiler_options) {
   const int error_code = clBuildProgram(
       program, 0, nullptr, compiler_options.c_str(), nullptr, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat(
+    return absl::UnknownError(absl::StrCat(
         "Failed to build program executable - ",
         CLErrorCodeToString(error_code),
         GetProgramBuildInfo(program, device.id(), CL_PROGRAM_BUILD_LOG)));
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 std::string CompilerOptionToString(const CLDevice& device,
@@ -133,7 +134,7 @@ void CLProgram::Release() {
   }
 }
 
-Status CLProgram::GetBinary(std::vector<uint8_t>* result) const {
+absl::Status CLProgram::GetBinary(std::vector<uint8_t>* result) const {
   size_t binary_size;
   RETURN_IF_ERROR(GetBinarySize(program_, &binary_size));
   result->resize(result->size() + binary_size);
@@ -141,35 +142,36 @@ Status CLProgram::GetBinary(std::vector<uint8_t>* result) const {
   cl_int error_code = clGetProgramInfo(program_, CL_PROGRAM_BINARIES,
                                        binary_size, &binary_ptr, nullptr);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to get program binary - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(absl::StrCat("Failed to get program binary - ",
+                                           CLErrorCodeToString(error_code)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateCLProgram(const std::string& code,
-                       const std::string& compiler_options,
-                       const CLContext& context, const CLDevice& device,
-                       CLProgram* result) {
+absl::Status CreateCLProgram(const std::string& code,
+                             const std::string& compiler_options,
+                             const CLContext& context, const CLDevice& device,
+                             CLProgram* result) {
   int error_code;
   const char* source = code.c_str();
 
   cl_program program = clCreateProgramWithSource(context.context(), 1, &source,
                                                  nullptr, &error_code);
   if (!program || error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to create compute program - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(
+        absl::StrCat("Failed to create compute program - ",
+                     CLErrorCodeToString(error_code)));
   }
 
   *result = CLProgram(program, device.id());
   RETURN_IF_ERROR(BuildProgram(program, device, compiler_options));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateCLProgramFromBinary(const CLContext& context,
-                                 const CLDevice& device,
-                                 absl::Span<const uint8_t> binary,
-                                 CLProgram* result) {
+absl::Status CreateCLProgramFromBinary(const CLContext& context,
+                                       const CLDevice& device,
+                                       absl::Span<const uint8_t> binary,
+                                       CLProgram* result) {
   cl_int binary_status;
   cl_int error_code;
   cl_device_id devices_list[] = {device.id()};
@@ -179,13 +181,13 @@ Status CreateCLProgramFromBinary(const CLContext& context,
       context.context(), 1, devices_list, &binary_size, &binary_pointer,
       &binary_status, &error_code);
   if (binary_status != CL_SUCCESS) {
-    return UnknownError(absl::StrCat(
+    return absl::UnknownError(absl::StrCat(
         "Something wrong with binary after clCreateProgramWithBinary - ",
         binary_status));
   }
   if (error_code != CL_SUCCESS) {
-    return UnknownError(absl::StrCat("Failed to create program - ",
-                                     CLErrorCodeToString(error_code)));
+    return absl::UnknownError(absl::StrCat("Failed to create program - ",
+                                           CLErrorCodeToString(error_code)));
   }
   *result = CLProgram(program, device.id());
   return BuildProgram(program, device, "");
diff --git a/tensorflow/lite/delegates/gpu/cl/cl_program.h b/tensorflow/lite/delegates/gpu/cl/cl_program.h
index b6deb3beb95..fb2a7edb9c1 100644
--- a/tensorflow/lite/delegates/gpu/cl/cl_program.h
+++ b/tensorflow/lite/delegates/gpu/cl/cl_program.h
@@ -68,7 +68,7 @@ class CLProgram {
   // was created using clCreateProgramWithBinary.
   cl_device_id GetDeviceId() const { return device_id_; }
 
-  Status GetBinary(std::vector<uint8_t>* result) const;
+  absl::Status GetBinary(std::vector<uint8_t>* result) const;
 
  private:
   void Release();
@@ -79,15 +79,15 @@ class CLProgram {
   cl_device_id device_id_ = nullptr;
 };
 
-Status CreateCLProgram(const std::string& code,
-                       const std::string& compiler_options,
-                       const CLContext& context, const CLDevice& device,
-                       CLProgram* result);
+absl::Status CreateCLProgram(const std::string& code,
+                             const std::string& compiler_options,
+                             const CLContext& context, const CLDevice& device,
+                             CLProgram* result);
 
-Status CreateCLProgramFromBinary(const CLContext& context,
-                                 const CLDevice& device,
-                                 absl::Span<const uint8_t> binary,
-                                 CLProgram* result);
+absl::Status CreateCLProgramFromBinary(const CLContext& context,
+                                       const CLDevice& device,
+                                       absl::Span<const uint8_t> binary,
+                                       CLProgram* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/egl_sync.cc b/tensorflow/lite/delegates/gpu/cl/egl_sync.cc
index 8493fbb049f..ddc373bce31 100644
--- a/tensorflow/lite/delegates/gpu/cl/egl_sync.cc
+++ b/tensorflow/lite/delegates/gpu/cl/egl_sync.cc
@@ -21,15 +21,15 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status EglSync::NewFence(EGLDisplay display, EglSync* sync) {
+absl::Status EglSync::NewFence(EGLDisplay display, EglSync* sync) {
   EGLSyncKHR egl_sync;
   RETURN_IF_ERROR(TFLITE_GPU_CALL_EGL(eglCreateSyncKHR, &egl_sync, display,
                                       EGL_SYNC_FENCE_KHR, nullptr));
   if (egl_sync == EGL_NO_SYNC_KHR) {
-    return InternalError("Returned empty KHR EGL sync");
+    return absl::InternalError("Returned empty KHR EGL sync");
   }
   *sync = EglSync(display, egl_sync);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 EglSync& EglSync::operator=(EglSync&& sync) {
@@ -48,22 +48,23 @@ void EglSync::Invalidate() {
   }
 }
 
-Status EglSync::ServerWait() {
+absl::Status EglSync::ServerWait() {
   EGLint result;
   RETURN_IF_ERROR(
       TFLITE_GPU_CALL_EGL(eglWaitSyncKHR, &result, display_, sync_, 0));
-  return result == EGL_TRUE ? OkStatus() : InternalError("eglWaitSync failed");
+  return result == EGL_TRUE ? absl::OkStatus()
+                            : absl::InternalError("eglWaitSync failed");
 }
 
-Status EglSync::ClientWait() {
+absl::Status EglSync::ClientWait() {
   EGLint result;
   // TODO(akulik): make it active wait for better performance
   RETURN_IF_ERROR(TFLITE_GPU_CALL_EGL(eglClientWaitSyncKHR, &result, display_,
                                       sync_, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR,
                                       EGL_FOREVER_KHR));
   return result == EGL_CONDITION_SATISFIED_KHR
-             ? OkStatus()
-             : InternalError("eglClientWaitSync failed");
+             ? absl::OkStatus()
+             : absl::InternalError("eglClientWaitSync failed");
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/egl_sync.h b/tensorflow/lite/delegates/gpu/cl/egl_sync.h
index 27a551c5d59..d0943a797ee 100644
--- a/tensorflow/lite/delegates/gpu/cl/egl_sync.h
+++ b/tensorflow/lite/delegates/gpu/cl/egl_sync.h
@@ -32,7 +32,7 @@ class EglSync {
   // flushed.
   //
   // Depends on EGL_KHR_fence_sync extension.
-  static Status NewFence(EGLDisplay display, EglSync* sync);
+  static absl::Status NewFence(EGLDisplay display, EglSync* sync);
 
   // Creates invalid object.
   EglSync() : EglSync(EGL_NO_DISPLAY, EGL_NO_SYNC_KHR) {}
@@ -50,10 +50,10 @@ class EglSync {
 
   // Causes GPU to block and wait until this sync has been signaled.
   // This call does not block and returns immediately.
-  Status ServerWait();
+  absl::Status ServerWait();
 
   // Causes CPU to block and wait until this sync has been signaled.
-  Status ClientWait();
+  absl::Status ClientWait();
 
   // Returns the EGLDisplay on which this instance was created.
   EGLDisplay display() const { return display_; }
diff --git a/tensorflow/lite/delegates/gpu/cl/environment.cc b/tensorflow/lite/delegates/gpu/cl/environment.cc
index ca13e19f73f..01d034fb1f7 100644
--- a/tensorflow/lite/delegates/gpu/cl/environment.cc
+++ b/tensorflow/lite/delegates/gpu/cl/environment.cc
@@ -26,6 +26,7 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 namespace {
+
 std::string GetKernelOneLayerTextureArray() {
   return R"(
 
@@ -43,12 +44,12 @@ __kernel void main_function(__write_only image2d_array_t dst) {
 // texture, we will get zeroes instead of actual values.
 // The same kernel will work, if we use texture array with more than one layer.
 // With help of this code we can detect this bug.
-Status CheckKernelSupportOfOneLayerTextureArray(Environment* env,
-                                                bool* result) {
+absl::Status CheckKernelSupportOfOneLayerTextureArray(Environment* env,
+                                                      bool* result) {
   // No bug on Adreno 6xx
   if (env->device().GetInfo().adreno_info.gpu_version >= 600) {
     *result = true;
-    return OkStatus();
+    return absl::OkStatus();
   }
   CLKernel kernel;
   RETURN_IF_ERROR(env->program_cache()->GetOrCreateCLKernel(
@@ -75,12 +76,12 @@ Status CheckKernelSupportOfOneLayerTextureArray(Environment* env,
       break;
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateEnvironment(Environment* result, bool shared,
-                         cl_context_properties egl_context,
-                         cl_context_properties egl_display) {
+absl::Status CreateEnvironment(Environment* result, bool shared,
+                               cl_context_properties egl_context,
+                               cl_context_properties egl_display) {
   CLDevice gpu;
   RETURN_IF_ERROR(CreateDefaultGPUDevice(&gpu));
 
@@ -107,8 +108,9 @@ Status CreateEnvironment(Environment* result, bool shared,
     }
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
+
 }  // namespace
 
 Environment::Environment(CLDevice&& device, CLContext&& context,
@@ -137,7 +139,7 @@ Environment& Environment::operator=(Environment&& environment) {
   return *this;
 }
 
-Status Environment::Init() {
+absl::Status Environment::Init() {
   if (device().IsAdreno() && device().SupportsTextureArray()) {
     bool supports_one_layer;
     RETURN_IF_ERROR(
@@ -146,7 +148,7 @@ Status Environment::Init() {
       GetDevicePtr()->DisableOneLayerTextureArray();
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void Environment::SetHighPerformance() const {
@@ -266,7 +268,7 @@ TensorStorageType GetStorageTypeWithMinimalMemoryConsumption(
   return TensorStorageType::BUFFER;
 }
 
-Status CreateEnvironment(Environment* result) {
+absl::Status CreateEnvironment(Environment* result) {
   CLDevice gpu;
   RETURN_IF_ERROR(CreateDefaultGPUDevice(&gpu));
 
diff --git a/tensorflow/lite/delegates/gpu/cl/environment.h b/tensorflow/lite/delegates/gpu/cl/environment.h
index 496d6957623..b40d22d3dd6 100644
--- a/tensorflow/lite/delegates/gpu/cl/environment.h
+++ b/tensorflow/lite/delegates/gpu/cl/environment.h
@@ -57,7 +57,7 @@ class Environment {
   std::vector<TensorStorageType> GetSupportedStorages() const;
   bool IsSupported(TensorStorageType storage_type) const;
 
-  Status Init();
+  absl::Status Init();
 
   void SetHighPerformance() const;
   void SetDefaultPerformance() const;
@@ -75,7 +75,7 @@ TensorStorageType GetFastestStorageType(const CLDevice& gpu);
 TensorStorageType GetStorageTypeWithMinimalMemoryConsumption(
     const CLDevice& gpu);
 
-Status CreateEnvironment(Environment* result);
+absl::Status CreateEnvironment(Environment* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/gl_interop.cc b/tensorflow/lite/delegates/gpu/cl/gl_interop.cc
index f4db12bf133..648b772d827 100644
--- a/tensorflow/lite/delegates/gpu/cl/gl_interop.cc
+++ b/tensorflow/lite/delegates/gpu/cl/gl_interop.cc
@@ -41,10 +41,11 @@ PFNEGLCREATESYNCPROC g_eglCreateSync = nullptr;
 
 }  // namespace
 
-Status CreateEglSyncFromClEvent(cl_event event, EGLDisplay display,
-                                EglSync* sync) {
+absl::Status CreateEglSyncFromClEvent(cl_event event, EGLDisplay display,
+                                      EglSync* sync) {
   if (!IsEglSyncFromClEventSupported()) {
-    return UnimplementedError("CreateEglSyncFromClEvent is not supported");
+    return absl::UnimplementedError(
+        "CreateEglSyncFromClEvent is not supported");
   }
   EGLSync egl_sync;
   const EGLAttrib attributes[] = {EGL_CL_EVENT_HANDLE,
@@ -52,10 +53,10 @@ Status CreateEglSyncFromClEvent(cl_event event, EGLDisplay display,
   RETURN_IF_ERROR(TFLITE_GPU_CALL_EGL(g_eglCreateSync, &egl_sync, display,
                                       EGL_SYNC_CL_EVENT, attributes));
   if (egl_sync == EGL_NO_SYNC) {
-    return InternalError("Returned empty EGL sync");
+    return absl::InternalError("Returned empty EGL sync");
   }
   *sync = EglSync(display, egl_sync);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 bool IsEglSyncFromClEventSupported() {
@@ -73,52 +74,54 @@ bool IsEglSyncFromClEventSupported() {
   return supported;
 }
 
-Status CreateClEventFromEglSync(cl_context context, const EglSync& egl_sync,
-                                CLEvent* event) {
+absl::Status CreateClEventFromEglSync(cl_context context,
+                                      const EglSync& egl_sync, CLEvent* event) {
   cl_int error_code;
   cl_event new_event = clCreateEventFromEGLSyncKHR(
       context, egl_sync.sync(), egl_sync.display(), &error_code);
   if (error_code != CL_SUCCESS) {
-    return InternalError(
+    return absl::InternalError(
         absl::StrCat("Unable to create CL sync from EGL sync. ",
                      CLErrorCodeToString(error_code)));
   }
   *event = CLEvent(new_event);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 bool IsClEventFromEglSyncSupported(const CLDevice& device) {
   return device.SupportsExtension("cl_khr_egl_event");
 }
 
-Status CreateClMemoryFromGlBuffer(GLuint gl_ssbo_id, AccessType access_type,
-                                  CLContext* context, CLMemory* memory) {
+absl::Status CreateClMemoryFromGlBuffer(GLuint gl_ssbo_id,
+                                        AccessType access_type,
+                                        CLContext* context, CLMemory* memory) {
   cl_int error_code;
   auto mem = clCreateFromGLBuffer(context->context(), ToClMemFlags(access_type),
                                   gl_ssbo_id, &error_code);
   if (error_code != CL_SUCCESS) {
-    return InternalError(
+    return absl::InternalError(
         absl::StrCat("Unable to acquire CL buffer from GL buffer. ",
                      CLErrorCodeToString(error_code)));
   }
   *memory = CLMemory(mem, true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateClMemoryFromGlTexture(GLenum texture_target, GLuint texture_id,
-                                   AccessType access_type, CLContext* context,
-                                   CLMemory* memory) {
+absl::Status CreateClMemoryFromGlTexture(GLenum texture_target,
+                                         GLuint texture_id,
+                                         AccessType access_type,
+                                         CLContext* context, CLMemory* memory) {
   cl_int error_code;
   auto mem =
       clCreateFromGLTexture(context->context(), ToClMemFlags(access_type),
                             texture_target, 0, texture_id, &error_code);
   if (error_code != CL_SUCCESS) {
-    return InternalError(
+    return absl::InternalError(
         absl::StrCat("Unable to create CL buffer from GL texture. ",
                      CLErrorCodeToString(error_code)));
   }
   *memory = CLMemory(mem, true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 bool IsGlSharingSupported(const CLDevice& device) {
@@ -128,19 +131,18 @@ bool IsGlSharingSupported(const CLDevice& device) {
 
 AcquiredGlObjects::~AcquiredGlObjects() { Release({}, nullptr).IgnoreError(); }
 
-Status AcquiredGlObjects::Acquire(const std::vector<cl_mem>& memory,
-                                  cl_command_queue queue,
-                                  const std::vector<cl_event>& wait_events,
-                                  CLEvent* acquire_event,
-                                  AcquiredGlObjects* objects) {
+absl::Status AcquiredGlObjects::Acquire(
+    const std::vector<cl_mem>& memory, cl_command_queue queue,
+    const std::vector<cl_event>& wait_events, CLEvent* acquire_event,
+    AcquiredGlObjects* objects) {
   if (!memory.empty()) {
     cl_event new_event;
     cl_int error_code = clEnqueueAcquireGLObjects(
         queue, memory.size(), memory.data(), wait_events.size(),
         wait_events.data(), acquire_event ? &new_event : nullptr);
     if (error_code != CL_SUCCESS) {
-      return InternalError(absl::StrCat("Unable to acquire GL object. ",
-                                        CLErrorCodeToString(error_code)));
+      return absl::InternalError(absl::StrCat("Unable to acquire GL object. ",
+                                              CLErrorCodeToString(error_code)));
     }
     if (acquire_event) {
       *acquire_event = CLEvent(new_event);
@@ -148,19 +150,19 @@ Status AcquiredGlObjects::Acquire(const std::vector<cl_mem>& memory,
     clFlush(queue);
   }
   *objects = AcquiredGlObjects(memory, queue);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status AcquiredGlObjects::Release(const std::vector<cl_event>& wait_events,
-                                  CLEvent* release_event) {
+absl::Status AcquiredGlObjects::Release(
+    const std::vector<cl_event>& wait_events, CLEvent* release_event) {
   if (queue_ && !memory_.empty()) {
     cl_event new_event;
     cl_int error_code = clEnqueueReleaseGLObjects(
         queue_, memory_.size(), memory_.data(), wait_events.size(),
         wait_events.data(), release_event ? &new_event : nullptr);
     if (error_code != CL_SUCCESS) {
-      return InternalError(absl::StrCat("Unable to release GL object. ",
-                                        CLErrorCodeToString(error_code)));
+      return absl::InternalError(absl::StrCat("Unable to release GL object. ",
+                                              CLErrorCodeToString(error_code)));
     }
     if (release_event) {
       *release_event = CLEvent(new_event);
@@ -168,7 +170,7 @@ Status AcquiredGlObjects::Release(const std::vector<cl_event>& wait_events,
     clFlush(queue_);
     queue_ = nullptr;
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 GlInteropFabric::GlInteropFabric(EGLDisplay egl_display,
@@ -192,9 +194,9 @@ void GlInteropFabric::UnregisterMemory(cl_mem memory) {
   }
 }
 
-Status GlInteropFabric::Start() {
+absl::Status GlInteropFabric::Start() {
   if (!is_enabled()) {
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   // In GL-CL interoperability, we need to make sure GL finished processing of
@@ -235,9 +237,9 @@ Status GlInteropFabric::Start() {
                                     nullptr, &gl_objects_);
 }
 
-Status GlInteropFabric::Finish() {
+absl::Status GlInteropFabric::Finish() {
   if (!is_enabled()) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   RETURN_IF_ERROR(gl_objects_.Release({}, &outbound_event_));
 
@@ -258,7 +260,7 @@ Status GlInteropFabric::Finish() {
   // This slow sync is the only working solution right now. We have to debug why
   // above version is not working fast and reliable.
   outbound_event_.Wait();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/gl_interop.h b/tensorflow/lite/delegates/gpu/cl/gl_interop.h
index 597bee857c6..7ebc3e4bf4f 100644
--- a/tensorflow/lite/delegates/gpu/cl/gl_interop.h
+++ b/tensorflow/lite/delegates/gpu/cl/gl_interop.h
@@ -39,8 +39,8 @@ namespace cl {
 // returned sync and could be safely destroyed.
 //
 // Depends on EGL 1.5.
-Status CreateEglSyncFromClEvent(cl_event event, EGLDisplay display,
-                                EglSync* sync);
+absl::Status CreateEglSyncFromClEvent(cl_event event, EGLDisplay display,
+                                      EglSync* sync);
 
 // Returns true if 'CreateEglSyncFromClEvent' is supported.
 bool IsEglSyncFromClEventSupported();
@@ -48,20 +48,22 @@ bool IsEglSyncFromClEventSupported();
 // Creates CL event from EGL sync.
 // Created event could only be consumed by AcquiredGlObject::Acquire call as
 // a 'wait_event'.
-Status CreateClEventFromEglSync(cl_context context, const EglSync& egl_sync,
-                                CLEvent* event);
+absl::Status CreateClEventFromEglSync(cl_context context,
+                                      const EglSync& egl_sync, CLEvent* event);
 
 // Returns true if 'CreateClEventFromEglSync' is supported.
 bool IsClEventFromEglSyncSupported(const CLDevice& device);
 
 // Creates new CL memory object from OpenGL buffer.
-Status CreateClMemoryFromGlBuffer(GLuint gl_ssbo_id, AccessType access_type,
-                                  CLContext* context, CLMemory* memory);
+absl::Status CreateClMemoryFromGlBuffer(GLuint gl_ssbo_id,
+                                        AccessType access_type,
+                                        CLContext* context, CLMemory* memory);
 
 // Creates new CL memory object from OpenGL texture.
-Status CreateClMemoryFromGlTexture(GLenum texture_target, GLuint texture_id,
-                                   AccessType access_type, CLContext* context,
-                                   CLMemory* memory);
+absl::Status CreateClMemoryFromGlTexture(GLenum texture_target,
+                                         GLuint texture_id,
+                                         AccessType access_type,
+                                         CLContext* context, CLMemory* memory);
 
 // Returns true if GL objects could be shared with OpenCL context.
 bool IsGlSharingSupported(const CLDevice& device);
@@ -81,16 +83,16 @@ class AcquiredGlObjects {
   // CreateClMemoryFromGlBuffer or CreateClMemoryFromGlTexture calls.
   // If 'acquire_event' is not nullptr, it will be signared once acquisition is
   // complete.
-  static Status Acquire(const std::vector<cl_mem>& memory,
-                        cl_command_queue queue,
-                        const std::vector<cl_event>& wait_events,
-                        CLEvent* acquire_event /* optional */,
-                        AcquiredGlObjects* objects);
+  static absl::Status Acquire(const std::vector<cl_mem>& memory,
+                              cl_command_queue queue,
+                              const std::vector<cl_event>& wait_events,
+                              CLEvent* acquire_event /* optional */,
+                              AcquiredGlObjects* objects);
 
   // Releases OpenCL memory back to OpenGL context. If 'release_event' is not
   // nullptr, it will be signalled once release is complete.
-  Status Release(const std::vector<cl_event>& wait_events,
-                 CLEvent* release_event /* optional */);
+  absl::Status Release(const std::vector<cl_event>& wait_events,
+                       CLEvent* release_event /* optional */);
 
  private:
   AcquiredGlObjects(const std::vector<cl_mem>& memory, cl_command_queue queue)
@@ -108,10 +110,10 @@ class GlInteropFabric {
 
   // Ensures proper GL->CL synchronization is in place before
   // GL objects that are mapped to CL objects are used.
-  Status Start();
+  absl::Status Start();
 
   // Puts appropriate CL->GL synchronization after all work is complete.
-  Status Finish();
+  absl::Status Finish();
 
   // Registers memory to be used from GL context. Such CL memory object must
   // be created with CreateClMemoryFromGlBuffer or CreateClMemoryFromGlTexture
diff --git a/tensorflow/lite/delegates/gpu/cl/gpu_api_delegate.cc b/tensorflow/lite/delegates/gpu/cl/gpu_api_delegate.cc
index 8e2c3308a47..0e2d046eba2 100644
--- a/tensorflow/lite/delegates/gpu/cl/gpu_api_delegate.cc
+++ b/tensorflow/lite/delegates/gpu/cl/gpu_api_delegate.cc
@@ -87,8 +87,8 @@ class Delegate {
     }
   }
 
-  Status Prepare(TfLiteContext* context,
-                 const TfLiteDelegateParams* delegate_params) {
+  absl::Status Prepare(TfLiteContext* context,
+                       const TfLiteDelegateParams* delegate_params) {
     // Extract TFLite delegate execution plan from the context and convert it
     // into FlowGraph32.
     GraphFloat32 graph;
@@ -98,7 +98,7 @@ class Delegate {
     NullTransformationReporter reporter;
     ModelTransformer transformer(&graph, &reporter);
     if (!ApplyGeneralTransformations(&transformer)) {
-      return InternalError("Graph general transformations failed");
+      return absl::InternalError("Graph general transformations failed");
     }
 
     InferenceEnvironmentOptions env_options;
@@ -108,7 +108,7 @@ class Delegate {
         options_.serialized_binary_cache_data,
         options_.serialized_binary_cache_size};
     InferenceEnvironmentProperties properties;
-    Status status =
+    absl::Status status =
         NewInferenceEnvironment(env_options, &environment_, &properties);
     if (!properties.is_opencl_available) {
       context->ReportError(context,
@@ -200,7 +200,7 @@ class Delegate {
     return builder->Build(&runner_);
   }
 
-  Status SetInputsAndOutputs(TfLiteContext* context) {
+  absl::Status SetInputsAndOutputs(TfLiteContext* context) {
     int i = 0;
     for (auto index : input_indices_) {
       RETURN_IF_ERROR(
@@ -211,10 +211,10 @@ class Delegate {
       RETURN_IF_ERROR(
           runner_->SetOutputObject(i++, GetTensorObject(index, context)));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Invoke(TfLiteContext* context) {
+  absl::Status Invoke(TfLiteContext* context) {
     RETURN_IF_ERROR(SetInputsAndOutputs(context));
     return runner_->Run();
   }
@@ -310,7 +310,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = gpu_delegate->Prepare(context, params);
         if (!status.ok()) {
           context->ReportError(context, "TfLiteGpuDelegate Init: %s",
-                               status.error_message().c_str());
+                               std::string(status.message()).c_str());
           return nullptr;
         }
         return gpu_delegate;
@@ -335,7 +335,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = GetDelegate(node)->Invoke(context);
         if (!status.ok()) {
           context->ReportError(context, "TfLiteGpuDelegate Invoke: %s",
-                               status.error_message().c_str());
+                               std::string(status.message()).c_str());
           return kTfLiteError;
         }
         return kTfLiteOk;
diff --git a/tensorflow/lite/delegates/gpu/cl/inference_context.cc b/tensorflow/lite/delegates/gpu/cl/inference_context.cc
index 47998bf8c99..2ec911813e6 100644
--- a/tensorflow/lite/delegates/gpu/cl/inference_context.cc
+++ b/tensorflow/lite/delegates/gpu/cl/inference_context.cc
@@ -169,9 +169,9 @@ CLNode& CLNode::operator=(CLNode&& node) {
   return *this;
 }
 
-Status InferenceContext::InitFromGraph(const CreateInferenceInfo& create_info,
-                                       const GraphFloat32& graph,
-                                       Environment* env) {
+absl::Status InferenceContext::InitFromGraph(
+    const CreateInferenceInfo& create_info, const GraphFloat32& graph,
+    Environment* env) {
   CreationContext creation_context;
   creation_context.device = env->GetDevicePtr();
   creation_context.context = &env->context();
@@ -206,15 +206,15 @@ Status InferenceContext::InitFromGraph(const CreateInferenceInfo& create_info,
     tuning_parameters.tuning_type = TuningType::FAST;
   }
   RETURN_IF_ERROR(Tune(tuning_parameters));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status InferenceContext::InitFromGraphWithTransforms(
+absl::Status InferenceContext::InitFromGraphWithTransforms(
     const CreateInferenceInfo& create_info, GraphFloat32* graph,
     Environment* env) {
   RETURN_IF_ERROR(RunGraphTransforms(graph));
   RETURN_IF_ERROR(InitFromGraph(create_info, *graph, env));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void InferenceContext::CopyInAndOutIds(const GraphFloat32& graph) {
@@ -258,7 +258,7 @@ void InferenceContext::ReserveGraphTensors(
   tensor_reserver_.SetNext(max_id + 1);
 }
 
-Status InferenceContext::ConvertOperations(
+absl::Status InferenceContext::ConvertOperations(
     const CreationContext& creation_context, const GraphFloat32& graph,
     ModelHints hints) {
   std::vector<Node*> graph_nodes = graph.nodes();
@@ -343,7 +343,7 @@ Status InferenceContext::ConvertOperations(
     }
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void InferenceContext::Merge() {
@@ -424,15 +424,15 @@ void InferenceContext::GetUsages(
   }
 }
 
-Status InferenceContext::AllocateMemory(const CLDevice& device,
-                                        CLContext* context) {
+absl::Status InferenceContext::AllocateMemory(const CLDevice& device,
+                                              CLContext* context) {
   RETURN_IF_ERROR(AllocateMemoryForBuffers(device, context));
   RETURN_IF_ERROR(AllocateMemoryForStrongShapes(device, context));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status InferenceContext::AllocateMemoryForBuffers(const CLDevice& device,
-                                                  CLContext* context) {
+absl::Status InferenceContext::AllocateMemoryForBuffers(const CLDevice& device,
+                                                        CLContext* context) {
   std::map<ValueId, int2> buffer_usages;
   GetUsages(
       [](const TensorDescriptor& t) { return IsBufferBased(t.storage_type); },
@@ -480,11 +480,11 @@ Status InferenceContext::AllocateMemoryForBuffers(const CLDevice& device,
       created_tensors[tensor_index] = true;
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status InferenceContext::AllocateMemoryForStrongShapes(const CLDevice& device,
-                                                       CLContext* context) {
+absl::Status InferenceContext::AllocateMemoryForStrongShapes(
+    const CLDevice& device, CLContext* context) {
   std::map<ValueId, int2> usages;
   GetUsages(
       [](const TensorDescriptor& t) { return !IsBufferBased(t.storage_type); },
@@ -517,7 +517,7 @@ Status InferenceContext::AllocateMemoryForStrongShapes(const CLDevice& device,
       }
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void InferenceContext::BindMemoryToOperations() {
@@ -539,21 +539,22 @@ void InferenceContext::BindMemoryToOperations() {
   }
 }
 
-Status InferenceContext::Compile(const CreationContext& creation_context) {
+absl::Status InferenceContext::Compile(
+    const CreationContext& creation_context) {
   for (auto& node : nodes_) {
     RETURN_IF_ERROR(node.operations[0]->Compile(creation_context));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status InferenceContext::Tune(const TuningParameters& tuning_parameters) {
+absl::Status InferenceContext::Tune(const TuningParameters& tuning_parameters) {
   for (auto& node : nodes_) {
     RETURN_IF_ERROR(node.operations[0]->Tune(tuning_parameters));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status InferenceContext::AddToQueue(CLCommandQueue* queue) {
+absl::Status InferenceContext::AddToQueue(CLCommandQueue* queue) {
   if (need_manual_release_) {
     if (prev_enqueue_start_point_.is_valid()) {
       prev_enqueue_start_point_.Wait();
@@ -571,11 +572,11 @@ Status InferenceContext::AddToQueue(CLCommandQueue* queue) {
   if (need_flush_) {
     clFlush(queue->queue());
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status InferenceContext::Profile(ProfilingCommandQueue* queue,
-                                 ProfilingInfo* result) {
+absl::Status InferenceContext::Profile(ProfilingCommandQueue* queue,
+                                       ProfilingInfo* result) {
   queue->ResetMeasurements();
   for (auto& node : nodes_) {
     queue->SetEventsLabel(node.name);
@@ -583,7 +584,7 @@ Status InferenceContext::Profile(ProfilingCommandQueue* queue,
   }
   RETURN_IF_ERROR(queue->WaitForCompletion());
   *result = queue->GetProfilingInfo();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 uint64_t InferenceContext::GetSizeOfMemoryAllocatedForIntermediateTensors()
@@ -608,13 +609,15 @@ Tensor* InferenceContext::GetTensor(ValueId id) {
   }
 }
 
-Status InferenceContext::SetInputTensor(ValueId id, const TensorFloat32& tensor,
-                                        CLCommandQueue* queue) {
+absl::Status InferenceContext::SetInputTensor(ValueId id,
+                                              const TensorFloat32& tensor,
+                                              CLCommandQueue* queue) {
   return GetTensor(id)->WriteData(queue, tensor);
 }
 
-Status InferenceContext::GetOutputTensor(ValueId id, CLCommandQueue* queue,
-                                         TensorFloat32* result) {
+absl::Status InferenceContext::GetOutputTensor(ValueId id,
+                                               CLCommandQueue* queue,
+                                               TensorFloat32* result) {
   const auto& gpu_tensor = *GetTensor(id);
   const auto dst_shape = BHWC(gpu_tensor.Batch(), gpu_tensor.Height(),
                               gpu_tensor.Width(), gpu_tensor.Channels());
@@ -624,17 +627,17 @@ Status InferenceContext::GetOutputTensor(ValueId id, CLCommandQueue* queue,
   return gpu_tensor.ReadData(queue, result);
 }
 
-Status RunGraphTransforms(GraphFloat32* graph) {
+absl::Status RunGraphTransforms(GraphFloat32* graph) {
   auto merge_padding_transform = NewMergePaddingWithAdd();
   auto add_bias_transform = NewAddBias();
   ModelTransformer transformer(graph, /*reporter=*/nullptr);
   if (!transformer.Apply("add_bias", add_bias_transform.get())) {
-    return InternalError("Invalid add_bias transform");
+    return absl::InternalError("Invalid add_bias transform");
   }
   if (!transformer.Apply("merge_padding", merge_padding_transform.get())) {
-    return InternalError("Invalid merge_padding transform");
+    return absl::InternalError("Invalid merge_padding transform");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/inference_context.h b/tensorflow/lite/delegates/gpu/cl/inference_context.h
index 40b20e8806a..75365258e41 100644
--- a/tensorflow/lite/delegates/gpu/cl/inference_context.h
+++ b/tensorflow/lite/delegates/gpu/cl/inference_context.h
@@ -65,53 +65,55 @@ class InferenceContext {
     TensorStorageType storage_type;
     ModelHints hints;
   };
-  Status InitFromGraph(const CreateInferenceInfo& create_info,
-                       const GraphFloat32& graph, Environment* env);
+  absl::Status InitFromGraph(const CreateInferenceInfo& create_info,
+                             const GraphFloat32& graph, Environment* env);
 
   // Applies OpenCL-specific transformations to the graph before the
   // initialization. These transformations are either impossible or useless in
   // other backends.
-  Status InitFromGraphWithTransforms(const CreateInferenceInfo& create_info,
-                                     GraphFloat32* graph, Environment* env);
+  absl::Status InitFromGraphWithTransforms(
+      const CreateInferenceInfo& create_info, GraphFloat32* graph,
+      Environment* env);
 
-  Status AddToQueue(CLCommandQueue* queue);
-  Status Profile(ProfilingCommandQueue* queue, ProfilingInfo* result);
+  absl::Status AddToQueue(CLCommandQueue* queue);
+  absl::Status Profile(ProfilingCommandQueue* queue, ProfilingInfo* result);
   // for profiling and memory statistics
   uint64_t GetSizeOfMemoryAllocatedForIntermediateTensors() const;
 
-  Status SetInputTensor(ValueId id, const TensorFloat32& tensor,
-                        CLCommandQueue* queue);
+  absl::Status SetInputTensor(ValueId id, const TensorFloat32& tensor,
+                              CLCommandQueue* queue);
 
   // It will work only with input/output tensor ids. For all other ids we don't
   // have any guarantees.
   Tensor* GetTensor(ValueId id);
 
-  Status GetOutputTensor(ValueId id, CLCommandQueue* queue,
-                         TensorFloat32* result);
+  absl::Status GetOutputTensor(ValueId id, CLCommandQueue* queue,
+                               TensorFloat32* result);
 
  private:
   void CopyInAndOutIds(const GraphFloat32& graph);
-  Status ConvertOperations(const CreationContext& creation_context,
-                           const GraphFloat32& graph, ModelHints hints);
+  absl::Status ConvertOperations(const CreationContext& creation_context,
+                                 const GraphFloat32& graph, ModelHints hints);
   void CreateLinks();
   void ReserveGraphTensors(const CreateInferenceInfo& create_info,
                            const CreationContext& creation_context,
                            const GraphFloat32& graph);
   void Merge();
-  Status AllocateMemory(const CLDevice& device, CLContext* context);
+  absl::Status AllocateMemory(const CLDevice& device, CLContext* context);
 
-  Status AllocateMemoryForBuffers(const CLDevice& device, CLContext* context);
+  absl::Status AllocateMemoryForBuffers(const CLDevice& device,
+                                        CLContext* context);
 
-  Status AllocateMemoryForStrongShapes(const CLDevice& device,
-                                       CLContext* context);
+  absl::Status AllocateMemoryForStrongShapes(const CLDevice& device,
+                                             CLContext* context);
 
   // utility function
   void GetUsages(const std::function<bool(const TensorDescriptor&)>& functor,
                  std::map<ValueId, int2>* usages);
 
   void BindMemoryToOperations();
-  Status Compile(const CreationContext& creation_context);
-  Status Tune(const TuningParameters& tuning_parameters);
+  absl::Status Compile(const CreationContext& creation_context);
+  absl::Status Tune(const TuningParameters& tuning_parameters);
 
   // performance hacks
   bool need_flush_ = false;
@@ -175,7 +177,7 @@ class InferenceContext {
 };
 
 // Runs OpenCL specific transforms for the graph.
-Status RunGraphTransforms(GraphFloat32* graph);
+absl::Status RunGraphTransforms(GraphFloat32* graph);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/add.cc b/tensorflow/lite/delegates/gpu/cl/kernels/add.cc
index b5c37c5987f..0c96f4316ec 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/add.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/add.cc
@@ -143,17 +143,17 @@ std::string Add::GetArgsDeclaration() const {
   return args;
 }
 
-Status Add::BindArguments(CLKernel* kernel) {
+absl::Status Add::BindArguments(CLKernel* kernel) {
   for (int i = 1; i < src_depthes_.size(); ++i) {
     RETURN_IF_ERROR(kernel->SetMemoryAuto(src_[i]->GetMemoryPtr()));
   }
   for (int i = 1; i < src_depthes_.size(); ++i) {
     RETURN_IF_ERROR(kernel->SetBytesAuto(src_[i]->GetWBatchedHSB()));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Add::Compile(const CreationContext& creation_context) {
+absl::Status Add::Compile(const CreationContext& creation_context) {
   const auto code = GetElementWiseCode(definition_, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/add.h b/tensorflow/lite/delegates/gpu/cl/kernels/add.h
index ac6243cc5e4..d47954748c7 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/add.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/add.h
@@ -36,7 +36,7 @@ class Add : public ElementwiseOperation {
   Add(const OperationDef& definition, const std::vector<int>& channels,
       int dst_channels);
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Add(Add&& operation);
@@ -47,7 +47,7 @@ class Add : public ElementwiseOperation {
   void SetLinkIndex(int index) override;
   std::string GetCoreCode(const LinkingContext& context) const override;
   std::string GetArgsDeclaration() const override;
-  Status BindArguments(CLKernel* kernel) override;
+  absl::Status BindArguments(CLKernel* kernel) override;
 
  private:
   std::string GetElementWiseCode(
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc b/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc
index ad4b54853e1..deb0ebf67c4 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc
@@ -21,17 +21,17 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
-                           const CreationContext& creation_context,
-                           GPUOperation* operation,
-                           const std::vector<BHWC>& dst_sizes,
-                           const std::vector<TensorFloat32*>& dst_cpu) {
+absl::Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
+                                 const CreationContext& creation_context,
+                                 GPUOperation* operation,
+                                 const std::vector<BHWC>& dst_sizes,
+                                 const std::vector<TensorFloat32*>& dst_cpu) {
   const OperationDef& op_def = operation->GetDefinition();
   std::vector<Tensor> src(src_cpu.size());
   for (int i = 0; i < src_cpu.size(); ++i) {
     auto src_shape = src_cpu[i].shape;
     if (src_shape.b != 1 && !op_def.IsBatchSupported()) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Layout doesn't have Batch dimension, but shape.b != 1");
     }
     RETURN_IF_ERROR(CreateTensor(*creation_context.context,
@@ -45,7 +45,7 @@ Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
   for (int i = 0; i < dst_cpu.size(); ++i) {
     auto dst_shape = dst_sizes[i];
     if (dst_shape.b != 1 && !op_def.IsBatchSupported()) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Layout doesn't have Batch dimension, but shape.b != 1");
     }
     RETURN_IF_ERROR(CreateTensor(*creation_context.context,
@@ -64,22 +64,22 @@ Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
     dst_cpu[i]->data = std::vector<float>(dst_sizes[i].DimensionsProduct(), 0);
     RETURN_IF_ERROR(dst[i].ReadData(creation_context.queue, dst_cpu[i]));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
-                           const CreationContext& creation_context,
-                           GPUOperation* operation, const BHWC& dst_size,
-                           TensorFloat32* result) {
+absl::Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
+                                 const CreationContext& creation_context,
+                                 GPUOperation* operation, const BHWC& dst_size,
+                                 TensorFloat32* result) {
   return ExecuteGPUOperation(
       std::vector<TensorFloat32>{src_cpu}, creation_context, operation,
       std::vector<BHWC>{dst_size}, std::vector<TensorFloat32*>{result});
 }
 
-Status ExecuteGPUOperation(const TensorFloat32& src_cpu,
-                           const CreationContext& creation_context,
-                           GPUOperation* operation, const BHWC& dst_size,
-                           TensorFloat32* result) {
+absl::Status ExecuteGPUOperation(const TensorFloat32& src_cpu,
+                                 const CreationContext& creation_context,
+                                 GPUOperation* operation, const BHWC& dst_size,
+                                 TensorFloat32* result) {
   return ExecuteGPUOperation(std::vector<TensorFloat32>{src_cpu},
                              creation_context, operation, dst_size, result);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h b/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h
index c127d1bacd3..4d3636d0384 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h
@@ -51,21 +51,21 @@ class OpenCLOperationTest : public ::testing::Test {
   CreationContext creation_context_;
 };
 
-Status ExecuteGPUOperation(const TensorFloat32& src_cpu,
-                           const CreationContext& creation_context,
-                           GPUOperation* operation, const BHWC& dst_size,
-                           TensorFloat32* result);
+absl::Status ExecuteGPUOperation(const TensorFloat32& src_cpu,
+                                 const CreationContext& creation_context,
+                                 GPUOperation* operation, const BHWC& dst_size,
+                                 TensorFloat32* result);
 
-Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
-                           const CreationContext& creation_context,
-                           GPUOperation* operation, const BHWC& dst_size,
-                           TensorFloat32* result);
+absl::Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
+                                 const CreationContext& creation_context,
+                                 GPUOperation* operation, const BHWC& dst_size,
+                                 TensorFloat32* result);
 
-Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
-                           const CreationContext& creation_context,
-                           GPUOperation* operation,
-                           const std::vector<BHWC>& dst_sizes,
-                           const std::vector<TensorFloat32*>& dst_cpu);
+absl::Status ExecuteGPUOperation(const std::vector<TensorFloat32>& src_cpu,
+                                 const CreationContext& creation_context,
+                                 GPUOperation* operation,
+                                 const std::vector<BHWC>& dst_sizes,
+                                 const std::vector<TensorFloat32*>& dst_cpu);
 }  // namespace cl
 }  // namespace gpu
 }  // namespace tflite
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.cc b/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.cc
index 141a19de6e1..ef7915afba5 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.cc
@@ -96,7 +96,7 @@ ConcatXY& ConcatXY::operator=(ConcatXY&& operation) {
   return *this;
 }
 
-Status ConcatXY::Compile(const CreationContext& creation_context) {
+absl::Status ConcatXY::Compile(const CreationContext& creation_context) {
   const auto code =
       GetConcatKernelCode(definition_, tensors_count_, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
@@ -104,7 +104,7 @@ Status ConcatXY::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status ConcatXY::BindArguments() {
+absl::Status ConcatXY::BindArguments() {
   kernel_.ResetBindingCounter();
   for (int i = 0; i < tensors_count_; ++i) {
     RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[i]->GetMemoryPtr()));
@@ -122,7 +122,7 @@ Status ConcatXY::BindArguments() {
     y_offset += attr_.axis == Axis::HEIGHT ? height : 0;
   }
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConcatXY::GetGridSize() const {
@@ -140,12 +140,12 @@ int3 ConcatXY::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConcatXY::Tune(const TuningParameters& params) {
+absl::Status ConcatXY::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status ConcatXY::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConcatXY::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.h b/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.h
index 6bc0c87a51f..a170b593cf0 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/concat_xy.h
@@ -31,10 +31,10 @@ class ConcatXY : public GPUOperation {
   ConcatXY(const OperationDef& definition, const ConcatAttributes& attr,
            int tensors_count)
       : GPUOperation(definition), attr_(attr), tensors_count_(tensors_count) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConcatXY(ConcatXY&& operation);
@@ -43,7 +43,7 @@ class ConcatXY : public GPUOperation {
   ConcatXY& operator=(const ConcatXY&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   ConcatAttributes attr_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.cc b/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.cc
index 039fac0d0e3..3a7ec1c0cb7 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.cc
@@ -25,8 +25,8 @@ limitations under the License.
 namespace tflite {
 namespace gpu {
 namespace cl {
-
 namespace {
+
 bool IsAllChannelsX4(const std::vector<int>& channels) {
   for (int channel : channels) {
     if (channel % 4 != 0) {
@@ -146,6 +146,7 @@ std::string GetConcatKernelCode(
   c += "}\n";
   return c;
 }
+
 }  // namespace
 
 ConcatZ::ConcatZ(ConcatZ&& kernel)
@@ -164,7 +165,7 @@ ConcatZ& ConcatZ::operator=(ConcatZ&& kernel) {
   return *this;
 }
 
-Status ConcatZ::Compile(const CreationContext& creation_context) {
+absl::Status ConcatZ::Compile(const CreationContext& creation_context) {
   const auto code =
       GetConcatKernelCode(definition_, channels_, linked_operations_);
   std::vector<CompilerOptions> options;
@@ -186,7 +187,7 @@ Status ConcatZ::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status ConcatZ::BindArguments() {
+absl::Status ConcatZ::BindArguments() {
   kernel_.ResetBindingCounter();
   for (int i = 0; i < channels_.size(); ++i) {
     RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[i]->GetMemoryPtr()));
@@ -197,7 +198,7 @@ Status ConcatZ::BindArguments() {
     RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[i]->Slices()));
   }
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConcatZ::GetGridSize() const {
@@ -207,12 +208,12 @@ int3 ConcatZ::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConcatZ::Tune(const TuningParameters& params) {
+absl::Status ConcatZ::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status ConcatZ::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConcatZ::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.h b/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.h
index 9fc0fcc1fdb..ec25f6e4ed9 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/concat_z.h
@@ -32,10 +32,10 @@ class ConcatZ : public GPUOperation {
  public:
   ConcatZ(const OperationDef& definition, const std::vector<int>& channels)
       : GPUOperation(definition), channels_(channels) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConcatZ(ConcatZ&& kernel);
@@ -44,7 +44,7 @@ class ConcatZ : public GPUOperation {
   ConcatZ& operator=(const ConcatZ&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   std::vector<int> channels_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.cc b/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.cc
index e6015357bfc..b79599d8e95 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.cc
@@ -76,7 +76,7 @@ Conv3D& Conv3D::operator=(Conv3D&& operation) {
   return *this;
 }
 
-Status Conv3D::Compile(const CreationContext& creation_context) {
+absl::Status Conv3D::Compile(const CreationContext& creation_context) {
   const bool stride_correction =
       definition_.IsBatchSupported() && stride_.x != 1;
   const std::string code =
@@ -92,7 +92,7 @@ Status Conv3D::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Conv3D::BindArguments() {
+absl::Status Conv3D::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   if (conv_params_.AreWeightsBuffer()) {
@@ -131,7 +131,7 @@ Status Conv3D::BindArguments() {
       IntegralDivideRoundUp(dst_[0]->Slices(), conv_params_.block_size.w)));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWBatchedHDS()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHDS()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Conv3D::GetGridSize() const {
@@ -154,12 +154,12 @@ int3 Conv3D::GetGridSize() const {
                   conv_params_.work_group_size.z);
 }
 
-Status Conv3D::Tune(const TuningParameters& params) {
+absl::Status Conv3D::Tune(const TuningParameters& params) {
   if (conv_params_.weights_upload_type ==
           WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP ||
       conv_params_.weights_upload_type ==
           WeightsUploadType::LOCAL_MEM_BY_THREADS) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   if (conv_params_.work_group_launch_order[0] == 0 &&
       conv_params_.work_group_launch_order[1] == 1 &&
@@ -168,10 +168,10 @@ Status Conv3D::Tune(const TuningParameters& params) {
     return GetBestWorkGroupConv(params, kernel_, GetGridSize(),
                                 &conv_params_.work_group_size);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Conv3D::AddToQueue(CLCommandQueue* queue) {
+absl::Status Conv3D::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(),
                                  conv_params_.work_group_size);
@@ -903,9 +903,9 @@ Conv3D::ConvParams Conv3D::GuessBestParams(
                          x_kernel_is_1, y_kernel_is_1, z_kernel_is_1);
 }
 
-Status CreateConv3D(const CreationContext& creation_context,
-                    const OperationDef& definition,
-                    const Convolution3DAttributes& attr, Conv3D* result) {
+absl::Status CreateConv3D(const CreationContext& creation_context,
+                          const OperationDef& definition,
+                          const Convolution3DAttributes& attr, Conv3D* result) {
   *result = Conv3D(definition, attr, *creation_context.device);
   return result->UploadData(attr.weights, attr.bias, creation_context.context);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.h b/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.h
index 8fc48c4114a..00b1e868e5d 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_3d.h
@@ -39,9 +39,9 @@ namespace cl {
 class Conv3D : public GPUOperation {
  public:
   Conv3D() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Conv3D(Conv3D&& operation);
@@ -75,21 +75,21 @@ class Conv3D : public GPUOperation {
          const CLDevice& device);
 
   template <DataType T>
-  Status UploadData(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
-                    const ::tflite::gpu::Tensor<Linear, T>& biases,
-                    CLContext* context);
+  absl::Status UploadData(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
+                          const ::tflite::gpu::Tensor<Linear, T>& biases,
+                          CLContext* context);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWDI, S>& weights,
                             absl::Span<T> dst);
 
-  friend Status CreateConv3D(const CreationContext& creation_context,
-                             const OperationDef& definition,
-                             const Convolution3DAttributes& attr,
-                             Conv3D* result);
+  friend absl::Status CreateConv3D(const CreationContext& creation_context,
+                                   const OperationDef& definition,
+                                   const Convolution3DAttributes& attr,
+                                   Conv3D* result);
 
   friend std::string GenerateConv3D(
       const OperationDef& op_def, const LinearStorage& biases,
@@ -105,7 +105,7 @@ class Conv3D : public GPUOperation {
                              int dst_slices, bool x_kernel_is_1,
                              bool y_kernel_is_1, bool z_kernel_is_1) const;
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Texture2D weights_0_;
@@ -125,9 +125,9 @@ class Conv3D : public GPUOperation {
 };
 
 template <DataType T>
-Status Conv3D::UploadData(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
-                          const ::tflite::gpu::Tensor<Linear, T>& biases,
-                          CLContext* context) {
+absl::Status Conv3D::UploadData(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
+                                const ::tflite::gpu::Tensor<Linear, T>& biases,
+                                CLContext* context) {
   RETURN_IF_ERROR(UploadWeights(weights, context));
   LinearStorageCreateInfo create_info;
   create_info.storage_type = conv_params_.AreWeightsBuffer()
@@ -139,12 +139,12 @@ Status Conv3D::UploadData(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
   create_info.name = "biases";
   create_info.aligned_size = weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(create_info, biases, context, &biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType T>
-Status Conv3D::UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
-                             CLContext* context) {
+absl::Status Conv3D::UploadWeights(
+    const ::tflite::gpu::Tensor<OHWDI, T>& weights, CLContext* context) {
   const int block_size = conv_params_.block_size.w;
   const int dst_slices =
       AlignByN(IntegralDivideRoundUp(weights.shape.o, 4), block_size);
@@ -211,7 +211,7 @@ Status Conv3D::UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
     }
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType S, typename T>
@@ -271,9 +271,9 @@ void Conv3D::RearrangeWeightsData(
   }
 }
 
-Status CreateConv3D(const CreationContext& creation_context,
-                    const OperationDef& definition,
-                    const Convolution3DAttributes& attr, Conv3D* result);
+absl::Status CreateConv3D(const CreationContext& creation_context,
+                          const OperationDef& definition,
+                          const Convolution3DAttributes& attr, Conv3D* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.cc b/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.cc
index 3a8c726021c..70bd1b5249f 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.cc
@@ -291,16 +291,16 @@ ConvBuffer1x1& ConvBuffer1x1::operator=(ConvBuffer1x1&& operation) {
   return *this;
 }
 
-Status ConvBuffer1x1::Compile(const CreationContext& creation_context) {
+absl::Status ConvBuffer1x1::Compile(const CreationContext& creation_context) {
   std::string code =
       GenerateConvBuffer1x1(definition_, conv_params_, linked_operations_);
   RETURN_IF_ERROR(creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConvBuffer1x1::BindArguments() {
+absl::Status ConvBuffer1x1::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_.GetMemoryPtr()));
@@ -313,7 +313,7 @@ Status ConvBuffer1x1::BindArguments() {
                        src_width_elements * src_[0]->Height());
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_size));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvBuffer1x1::GetGridSize() const {
@@ -328,13 +328,13 @@ int3 ConvBuffer1x1::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvBuffer1x1::Tune(const TuningParameters& params) {
+absl::Status ConvBuffer1x1::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroupConv(params, kernel_, GetGridSize(),
                               &conv_params_.work_group_size);
 }
 
-Status ConvBuffer1x1::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvBuffer1x1::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(),
                                  conv_params_.work_group_size);
@@ -351,12 +351,12 @@ bool IsConvBuffer1x1Supported(const OperationDef& definition,
          attr.padding.appended.w == 0 && attr.padding.appended.h == 0;
 }
 
-Status CreateConvBuffer1x1(const CreationContext& creation_context,
-                           const OperationDef& definition,
-                           const Convolution2DAttributes& attr,
-                           ConvBuffer1x1* result, const BHWC* shape) {
+absl::Status CreateConvBuffer1x1(const CreationContext& creation_context,
+                                 const OperationDef& definition,
+                                 const Convolution2DAttributes& attr,
+                                 ConvBuffer1x1* result, const BHWC* shape) {
   if (!IsConvBuffer1x1Supported(definition, attr)) {
-    return InvalidArgumentError("ConvBuffer1x1 doesn't supported");
+    return absl::InvalidArgumentError("ConvBuffer1x1 doesn't supported");
   }
   const int dst_depth = IntegralDivideRoundUp(attr.weights.shape.o, 4);
   const int src_depth = IntegralDivideRoundUp(attr.weights.shape.i, 4);
@@ -372,10 +372,10 @@ Status CreateConvBuffer1x1(const CreationContext& creation_context,
   return result->UploadData(attr.weights, attr.bias, creation_context.context);
 }
 
-Status CreateConvBuffer1x1(const CreationContext& creation_context,
-                           const OperationDef& definition,
-                           const FullyConnectedAttributes& attr,
-                           ConvBuffer1x1* result, const BHWC* shape) {
+absl::Status CreateConvBuffer1x1(const CreationContext& creation_context,
+                                 const OperationDef& definition,
+                                 const FullyConnectedAttributes& attr,
+                                 ConvBuffer1x1* result, const BHWC* shape) {
   const int dst_depth = IntegralDivideRoundUp(attr.weights.shape.o, 4);
   const int src_depth = IntegralDivideRoundUp(attr.weights.shape.i, 4);
   ConvBuffer1x1::ConvParams conv_params;
@@ -392,11 +392,10 @@ Status CreateConvBuffer1x1(const CreationContext& creation_context,
   return result->UploadData(attr.weights, attr.bias, creation_context.context);
 }
 
-Status CreateConvBuffer1x1Wino4x4To6x6(const CreationContext& creation_context,
-                                       const OperationDef& definition,
-                                       const Convolution2DAttributes& attr,
-                                       ConvBuffer1x1* result,
-                                       const BHWC* shape) {
+absl::Status CreateConvBuffer1x1Wino4x4To6x6(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const Convolution2DAttributes& attr, ConvBuffer1x1* result,
+    const BHWC* shape) {
   const int dst_depth = IntegralDivideRoundUp(attr.weights.shape.o, 4);
   const int src_depth = IntegralDivideRoundUp(attr.weights.shape.i, 4);
   ConvBuffer1x1::ConvParams conv_params;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.h b/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.h
index 54e99d29ec7..07da846107e 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_buffer_1x1.h
@@ -45,10 +45,10 @@ class ConvBuffer1x1 : public GPUOperation {
   ConvBuffer1x1(const ConvBuffer1x1&) = delete;
   ConvBuffer1x1& operator=(const ConvBuffer1x1&) = delete;
 
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   struct ConvParams {
     int3 block_size = int3(1, 1, 1);
@@ -64,33 +64,33 @@ class ConvBuffer1x1 : public GPUOperation {
 
  private:
   ConvBuffer1x1(const OperationDef& definition, const ConvParams& conv_params);
-  friend Status CreateConvBuffer1x1(const CreationContext& creation_context,
-                                    const OperationDef& definition,
-                                    const Convolution2DAttributes& attr,
-                                    ConvBuffer1x1* result, const BHWC* shape);
-  friend Status CreateConvBuffer1x1(const CreationContext& creation_context,
-                                    const OperationDef& definition,
-                                    const FullyConnectedAttributes& attr,
-                                    ConvBuffer1x1* result, const BHWC* shape);
-  friend Status CreateConvBuffer1x1Wino4x4To6x6(
+  friend absl::Status CreateConvBuffer1x1(
+      const CreationContext& creation_context, const OperationDef& definition,
+      const Convolution2DAttributes& attr, ConvBuffer1x1* result,
+      const BHWC* shape);
+  friend absl::Status CreateConvBuffer1x1(
+      const CreationContext& creation_context, const OperationDef& definition,
+      const FullyConnectedAttributes& attr, ConvBuffer1x1* result,
+      const BHWC* shape);
+  friend absl::Status CreateConvBuffer1x1Wino4x4To6x6(
       const CreationContext& creation_context, const OperationDef& definition,
       const Convolution2DAttributes& attr, ConvBuffer1x1* result,
       const BHWC* shape);
 
   template <DataType T>
-  Status UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                    const ::tflite::gpu::Tensor<Linear, T>& biases,
-                    CLContext* context);
+  absl::Status UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                          const ::tflite::gpu::Tensor<Linear, T>& biases,
+                          CLContext* context);
   template <DataType T>
-  Status UploadDataForWinograd4x4To6x6(
+  absl::Status UploadDataForWinograd4x4To6x6(
       const ::tflite::gpu::Tensor<OHWI, T>& weights, const CLDevice& device,
       CLContext* context);
 
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Buffer weights_;
@@ -101,20 +101,20 @@ class ConvBuffer1x1 : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvBuffer1x1::UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                                 const ::tflite::gpu::Tensor<Linear, T>& biases,
-                                 CLContext* context) {
+absl::Status ConvBuffer1x1::UploadData(
+    const ::tflite::gpu::Tensor<OHWI, T>& weights,
+    const ::tflite::gpu::Tensor<Linear, T>& biases, CLContext* context) {
   RETURN_IF_ERROR(UploadWeights(weights, context));
   LinearStorageCreateInfo create_info;
   create_info.storage_type = LinearStorageType::BUFFER;
   create_info.data_type = definition_.GetDataType();
   create_info.aligned_size = weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(create_info, biases, context, &biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType T>
-Status ConvBuffer1x1::UploadDataForWinograd4x4To6x6(
+absl::Status ConvBuffer1x1::UploadDataForWinograd4x4To6x6(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, const CLDevice& device,
     CLContext* context) {
   ::tflite::gpu::Tensor<OHWI, T> wino_weights;
@@ -132,7 +132,7 @@ Status ConvBuffer1x1::UploadDataForWinograd4x4To6x6(
 }
 
 template <DataType T>
-Status ConvBuffer1x1::UploadWeights(
+absl::Status ConvBuffer1x1::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4);
   const int src_depth = IntegralDivideRoundUp(weights.shape.i, 4);
@@ -162,21 +162,22 @@ Status ConvBuffer1x1::UploadWeights(
 bool IsConvBuffer1x1Supported(const OperationDef& definition,
                               const Convolution2DAttributes& attr);
 
-Status CreateConvBuffer1x1(const CreationContext& creation_context,
-                           const OperationDef& definition,
-                           const Convolution2DAttributes& attr,
-                           ConvBuffer1x1* result, const BHWC* shape = nullptr);
+absl::Status CreateConvBuffer1x1(const CreationContext& creation_context,
+                                 const OperationDef& definition,
+                                 const Convolution2DAttributes& attr,
+                                 ConvBuffer1x1* result,
+                                 const BHWC* shape = nullptr);
 
-Status CreateConvBuffer1x1(const CreationContext& creation_context,
-                           const OperationDef& definition,
-                           const FullyConnectedAttributes& attr,
-                           ConvBuffer1x1* result, const BHWC* shape = nullptr);
+absl::Status CreateConvBuffer1x1(const CreationContext& creation_context,
+                                 const OperationDef& definition,
+                                 const FullyConnectedAttributes& attr,
+                                 ConvBuffer1x1* result,
+                                 const BHWC* shape = nullptr);
 
-Status CreateConvBuffer1x1Wino4x4To6x6(const CreationContext& creation_context,
-                                       const OperationDef& definition,
-                                       const Convolution2DAttributes& attr,
-                                       ConvBuffer1x1* result,
-                                       const BHWC* shape = nullptr);
+absl::Status CreateConvBuffer1x1Wino4x4To6x6(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const Convolution2DAttributes& attr, ConvBuffer1x1* result,
+    const BHWC* shape = nullptr);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.cc b/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.cc
index ceb3b8985e8..07d2da9d641 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.cc
@@ -219,7 +219,7 @@ ConvConstants& ConvConstants::operator=(ConvConstants&& kernel) {
   return *this;
 }
 
-Status ConvConstants::Compile(const CreationContext& creation_context) {
+absl::Status ConvConstants::Compile(const CreationContext& creation_context) {
   const bool stride_correction =
       definition_.IsBatchSupported() && stride_.x != 1;
   const auto code = GenerateConvolutionConstantCode(
@@ -240,7 +240,7 @@ Status ConvConstants::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status ConvConstants::BindArguments() {
+absl::Status ConvConstants::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_.GetMemoryPtr()));
@@ -254,7 +254,7 @@ Status ConvConstants::BindArguments() {
       kernel_.SetBytesAuto(int2(dilation_.x * src_[0]->Batch(), dilation_.y)));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWBatchedHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvConstants::GetGridSize() const {
@@ -263,12 +263,12 @@ int3 ConvConstants::GetGridSize() const {
   return int3(grid_x, grid_y, 1);
 }
 
-Status ConvConstants::Tune(const TuningParameters& params) {
+absl::Status ConvConstants::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status ConvConstants::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvConstants::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -294,12 +294,12 @@ bool IsConvConstantsSupported(const CLDevice& device,
   return filters_buffer_size <= kConstantMaxSize && flt4_registers <= 8;
 }
 
-Status CreateConvConstants(const CreationContext& creation_context,
-                           const OperationDef& definition,
-                           const Convolution2DAttributes& attr,
-                           ConvConstants* result) {
+absl::Status CreateConvConstants(const CreationContext& creation_context,
+                                 const OperationDef& definition,
+                                 const Convolution2DAttributes& attr,
+                                 ConvConstants* result) {
   if (!IsConvConstantsSupported(*creation_context.device, definition, attr)) {
-    return InvalidArgumentError("ConvConstants doesn't supported");
+    return absl::InvalidArgumentError("ConvConstants doesn't supported");
   }
   *result = ConvConstants(definition, attr);
   RETURN_IF_ERROR(
@@ -310,8 +310,7 @@ Status CreateConvConstants(const CreationContext& creation_context,
   create_info.aligned_size = attr.weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.h b/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.h
index b4830d20fd1..fc0e66b5e86 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_constants.h
@@ -35,10 +35,10 @@ namespace cl {
 class ConvConstants : public GPUOperation {
  public:
   ConvConstants() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvConstants(ConvConstants&& kernel);
@@ -47,10 +47,9 @@ class ConvConstants : public GPUOperation {
   ConvConstants& operator=(const ConvConstants&) = delete;
 
  private:
-  friend Status CreateConvConstants(const CreationContext& creation_context,
-                                    const OperationDef& definition,
-                                    const Convolution2DAttributes& attr,
-                                    ConvConstants* result);
+  friend absl::Status CreateConvConstants(
+      const CreationContext& creation_context, const OperationDef& definition,
+      const Convolution2DAttributes& attr, ConvConstants* result);
   explicit ConvConstants(const OperationDef& definition,
                          const Convolution2DAttributes& attr)
       : GPUOperation(definition),
@@ -62,14 +61,14 @@ class ConvConstants : public GPUOperation {
         dst_channels_(attr.weights.shape.o) {}
 
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Buffer weights_;
@@ -87,7 +86,7 @@ class ConvConstants : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvConstants::UploadWeights(
+absl::Status ConvConstants::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4);
   const int kernel_x = weights.shape.w;
@@ -157,10 +156,10 @@ bool IsConvConstantsSupported(const CLDevice& device,
                               const OperationDef& definition,
                               const Convolution2DAttributes& attr);
 
-Status CreateConvConstants(const CreationContext& creation_context,
-                           const OperationDef& definition,
-                           const Convolution2DAttributes& attr,
-                           ConvConstants* result);
+absl::Status CreateConvConstants(const CreationContext& creation_context,
+                                 const OperationDef& definition,
+                                 const Convolution2DAttributes& attr,
+                                 ConvConstants* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.cc b/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.cc
index c1860d6452f..bd4f53395f3 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.cc
@@ -173,7 +173,7 @@ ConvPowerVR& ConvPowerVR::operator=(ConvPowerVR&& operation) {
   return *this;
 }
 
-Status ConvPowerVR::Compile(const CreationContext& creation_context) {
+absl::Status ConvPowerVR::Compile(const CreationContext& creation_context) {
   const bool stride_correction =
       definition_.IsBatchSupported() && stride_padding_.x != 1;
   const std::string code =
@@ -189,7 +189,7 @@ Status ConvPowerVR::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status ConvPowerVR::BindArguments() {
+absl::Status ConvPowerVR::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_.GetMemoryPtr()));
@@ -211,7 +211,7 @@ Status ConvPowerVR::BindArguments() {
   }
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWBatchedHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvPowerVR::GetGridSize() const {
@@ -245,13 +245,13 @@ int3 ConvPowerVR::GetGridSize() const {
   }
 }
 
-Status ConvPowerVR::Tune(const TuningParameters& params) {
+absl::Status ConvPowerVR::Tune(const TuningParameters& params) {
   if (conv_params_.weights_upload_type ==
           WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP ||
       conv_params_.weights_upload_type ==
           WeightsUploadType::LOCAL_MEM_BY_THREADS ||
       conv_params_.fixed_work_group_size) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   if (conv_params_.work_group_launch_order[0] == 0 &&
       conv_params_.work_group_launch_order[1] == 1 &&
@@ -260,10 +260,10 @@ Status ConvPowerVR::Tune(const TuningParameters& params) {
     return GetBestWorkGroupConv(params, kernel_, GetGridSize(),
                                 &conv_params_.work_group_size);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConvPowerVR::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvPowerVR::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(),
                                  conv_params_.work_group_size);
@@ -848,27 +848,26 @@ ConvPowerVR::ConvParams ConvPowerVR::GuessBestParamsWinograd(
   return params;
 }
 
-Status CreateConvPowerVR(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const Convolution2DAttributes& attr,
-                         ConvPowerVR* result, const BHWC* dst_shape) {
+absl::Status CreateConvPowerVR(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const Convolution2DAttributes& attr,
+                               ConvPowerVR* result, const BHWC* dst_shape) {
   *result = ConvPowerVR(definition, attr, *creation_context.device, dst_shape);
   return result->UploadData(attr.weights, attr.bias, creation_context.context);
 }
 
-Status CreateConvPowerVR(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const FullyConnectedAttributes& attr,
-                         ConvPowerVR* result, const BHWC* dst_shape) {
+absl::Status CreateConvPowerVR(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const FullyConnectedAttributes& attr,
+                               ConvPowerVR* result, const BHWC* dst_shape) {
   *result = ConvPowerVR(definition, attr, *creation_context.device, dst_shape);
   return result->UploadData(attr.weights, attr.bias, creation_context.context);
 }
 
-Status CreateConvPowerVRWino4x4To6x6(const CreationContext& creation_context,
-                                     const OperationDef& definition,
-                                     const Convolution2DAttributes& attr,
-                                     ConvPowerVR* result,
-                                     const BHWC* dst_shape) {
+absl::Status CreateConvPowerVRWino4x4To6x6(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const Convolution2DAttributes& attr, ConvPowerVR* result,
+    const BHWC* dst_shape) {
   *result = ConvPowerVR(definition);
   result->conv_params_ = result->GuessBestParamsWinograd(
       *creation_context.device, definition, attr, dst_shape);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.h b/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.h
index 44145c585da..954205f1ca3 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_powervr.h
@@ -39,9 +39,9 @@ namespace cl {
 class ConvPowerVR : public GPUOperation {
  public:
   ConvPowerVR() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvPowerVR(ConvPowerVR&& operation);
@@ -87,29 +87,31 @@ class ConvPowerVR : public GPUOperation {
   explicit ConvPowerVR(const OperationDef& definition);
 
   template <DataType T>
-  Status UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                    const ::tflite::gpu::Tensor<Linear, T>& biases,
-                    CLContext* context);
+  absl::Status UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                          const ::tflite::gpu::Tensor<Linear, T>& biases,
+                          CLContext* context);
   template <DataType T>
-  Status UploadDataForWinograd4x4To6x6(
+  absl::Status UploadDataForWinograd4x4To6x6(
       const ::tflite::gpu::Tensor<OHWI, T>& weights, const CLDevice& device,
       CLContext* context);
 
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
-  friend Status CreateConvPowerVR(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const Convolution2DAttributes& attr,
-                                  ConvPowerVR* result, const BHWC* dst_shape);
+  friend absl::Status CreateConvPowerVR(const CreationContext& creation_context,
+                                        const OperationDef& definition,
+                                        const Convolution2DAttributes& attr,
+                                        ConvPowerVR* result,
+                                        const BHWC* dst_shape);
 
-  friend Status CreateConvPowerVR(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const FullyConnectedAttributes& attr,
-                                  ConvPowerVR* result, const BHWC* dst_shape);
+  friend absl::Status CreateConvPowerVR(const CreationContext& creation_context,
+                                        const OperationDef& definition,
+                                        const FullyConnectedAttributes& attr,
+                                        ConvPowerVR* result,
+                                        const BHWC* dst_shape);
 
-  friend Status CreateConvPowerVRWino4x4To6x6(
+  friend absl::Status CreateConvPowerVRWino4x4To6x6(
       const CreationContext& creation_context, const OperationDef& definition,
       const Convolution2DAttributes& attr, ConvPowerVR* result,
       const BHWC* dst_shape);
@@ -138,7 +140,7 @@ class ConvPowerVR : public GPUOperation {
                              bool different_weights_for_height,
                              const BHWC* dst_shape = nullptr) const;
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Buffer weights_;
@@ -152,20 +154,20 @@ class ConvPowerVR : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvPowerVR::UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                               const ::tflite::gpu::Tensor<Linear, T>& biases,
-                               CLContext* context) {
+absl::Status ConvPowerVR::UploadData(
+    const ::tflite::gpu::Tensor<OHWI, T>& weights,
+    const ::tflite::gpu::Tensor<Linear, T>& biases, CLContext* context) {
   RETURN_IF_ERROR(UploadWeights(weights, context));
   LinearStorageCreateInfo create_info;
   create_info.storage_type = LinearStorageType::BUFFER;
   create_info.data_type = conv_params_.weights_data_type;
   create_info.aligned_size = weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(create_info, biases, context, &biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType T>
-Status ConvPowerVR::UploadDataForWinograd4x4To6x6(
+absl::Status ConvPowerVR::UploadDataForWinograd4x4To6x6(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, const CLDevice& device,
     CLContext* context) {
   ::tflite::gpu::Tensor<OHWI, T> wino_weights;
@@ -179,12 +181,12 @@ Status ConvPowerVR::UploadDataForWinograd4x4To6x6(
   bias.shape = Linear(weights.shape.o);
   bias.data.resize(weights.shape.o, 0.0f);
   RETURN_IF_ERROR(CreateLinearStorage(create_info, bias, context, &biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType T>
-Status ConvPowerVR::UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                                  CLContext* context) {
+absl::Status ConvPowerVR::UploadWeights(
+    const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4);
   const int src_depth = IntegralDivideRoundUp(weights.shape.i, 4);
 
@@ -210,21 +212,22 @@ Status ConvPowerVR::UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
   }
 }
 
-Status CreateConvPowerVR(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const Convolution2DAttributes& attr,
-                         ConvPowerVR* result, const BHWC* dst_shape = nullptr);
+absl::Status CreateConvPowerVR(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const Convolution2DAttributes& attr,
+                               ConvPowerVR* result,
+                               const BHWC* dst_shape = nullptr);
 
-Status CreateConvPowerVR(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const FullyConnectedAttributes& attr,
-                         ConvPowerVR* result, const BHWC* dst_shape = nullptr);
+absl::Status CreateConvPowerVR(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const FullyConnectedAttributes& attr,
+                               ConvPowerVR* result,
+                               const BHWC* dst_shape = nullptr);
 
-Status CreateConvPowerVRWino4x4To6x6(const CreationContext& creation_context,
-                                     const OperationDef& definition,
-                                     const Convolution2DAttributes& attr,
-                                     ConvPowerVR* result,
-                                     const BHWC* dst_shape = nullptr);
+absl::Status CreateConvPowerVRWino4x4To6x6(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const Convolution2DAttributes& attr, ConvPowerVR* result,
+    const BHWC* dst_shape = nullptr);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.cc b/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.cc
index 780d6646ea8..953f564c40a 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.cc
@@ -30,6 +30,7 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 namespace {
+
 std::string GenerateConvCode(
     const OperationDef& op_def, const int3& block_size, bool is1x1,
     bool adreno4xx_optimization, bool stride_correction,
@@ -384,7 +385,7 @@ ConvTexture& ConvTexture::operator=(ConvTexture&& operation) {
   return *this;
 }
 
-Status ConvTexture::Compile(const CreationContext& creation_context) {
+absl::Status ConvTexture::Compile(const CreationContext& creation_context) {
   auto storage_type = definition_.GetPrimaryStorageType();
   bool is1x1 = kernel_size_.x == 1 && kernel_size_.y == 1;
   bool adreno4xx_optimization =
@@ -407,7 +408,7 @@ Status ConvTexture::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status ConvTexture::BindArguments() {
+absl::Status ConvTexture::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_0_.GetMemoryPtr()));
@@ -427,7 +428,7 @@ Status ConvTexture::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(stride_));
   RETURN_IF_ERROR(
       kernel_.SetBytesAuto(int2(padding_.x * src_[0]->Batch(), padding_.y)));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvTexture::GetGridSize() const {
@@ -438,37 +439,36 @@ int3 ConvTexture::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvTexture::Tune(const TuningParameters& params) {
+absl::Status ConvTexture::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroupConv(params, kernel_, GetGridSize(),
                               &work_group_size_);
 }
 
-Status ConvTexture::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvTexture::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status CreateConvTexture(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const Convolution2DAttributes& attr,
-                         ConvTexture* result) {
+absl::Status CreateConvTexture(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const Convolution2DAttributes& attr,
+                               ConvTexture* result) {
   *result = ConvTexture(definition, attr);
   return result->UploadData(attr.weights, attr.bias, creation_context.context);
 }
 
-Status CreateConvTexture(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const FullyConnectedAttributes& attr,
-                         ConvTexture* result) {
+absl::Status CreateConvTexture(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const FullyConnectedAttributes& attr,
+                               ConvTexture* result) {
   *result = ConvTexture(definition);
   return result->UploadData(attr.weights, attr.bias, creation_context.context);
 }
 
-Status CreateConvTextureWino4x4To6x6(const CreationContext& creation_context,
-                                     const OperationDef& definition,
-                                     const Convolution2DAttributes& attr,
-                                     ConvTexture* result) {
+absl::Status CreateConvTextureWino4x4To6x6(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const Convolution2DAttributes& attr, ConvTexture* result) {
   *result = ConvTexture(definition);
   result->different_weights_for_height_ = true;
   result->block_size_ = {4, 1, 2};
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.h b/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.h
index fb25f655057..b7fbac91cf2 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/conv_texture.h
@@ -41,10 +41,10 @@ namespace cl {
 class ConvTexture : public GPUOperation {
  public:
   ConvTexture() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvTexture(ConvTexture&& operation);
@@ -53,16 +53,16 @@ class ConvTexture : public GPUOperation {
   ConvTexture& operator=(const ConvTexture&) = delete;
 
  private:
-  friend Status CreateConvTexture(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const Convolution2DAttributes& attr,
-                                  ConvTexture* result);
-  friend Status CreateConvTexture(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const FullyConnectedAttributes& attr,
-                                  ConvTexture* result);
+  friend absl::Status CreateConvTexture(const CreationContext& creation_context,
+                                        const OperationDef& definition,
+                                        const Convolution2DAttributes& attr,
+                                        ConvTexture* result);
+  friend absl::Status CreateConvTexture(const CreationContext& creation_context,
+                                        const OperationDef& definition,
+                                        const FullyConnectedAttributes& attr,
+                                        ConvTexture* result);
 
-  friend Status CreateConvTextureWino4x4To6x6(
+  friend absl::Status CreateConvTextureWino4x4To6x6(
       const CreationContext& creation_context, const OperationDef& definition,
       const Convolution2DAttributes& attr, ConvTexture* result);
 
@@ -70,25 +70,25 @@ class ConvTexture : public GPUOperation {
               const Convolution2DAttributes& attr);
   explicit ConvTexture(const OperationDef& definition);
   template <DataType T>
-  Status UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                    const ::tflite::gpu::Tensor<Linear, T>& biases,
-                    CLContext* context);
+  absl::Status UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                          const ::tflite::gpu::Tensor<Linear, T>& biases,
+                          CLContext* context);
 
   template <DataType T>
-  Status UploadDataForWinograd4x4To6x6(
+  absl::Status UploadDataForWinograd4x4To6x6(
       const ::tflite::gpu::Tensor<OHWI, T>& weights, const CLDevice& device,
       CLContext* context);
 
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst_0, absl::Span<T> dst_1,
                             absl::Span<T> dst_2, absl::Span<T> dst_3);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Texture2D weights_0_;
@@ -114,20 +114,20 @@ class ConvTexture : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvTexture::UploadData(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                               const ::tflite::gpu::Tensor<Linear, T>& biases,
-                               CLContext* context) {
+absl::Status ConvTexture::UploadData(
+    const ::tflite::gpu::Tensor<OHWI, T>& weights,
+    const ::tflite::gpu::Tensor<Linear, T>& biases, CLContext* context) {
   RETURN_IF_ERROR(UploadWeights(weights, context));
   LinearStorageCreateInfo create_info;
   create_info.storage_type = LinearStorageType::TEXTURE_2D;
   create_info.data_type = definition_.GetDataType();
   create_info.aligned_size = weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(create_info, biases, context, &biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType T>
-Status ConvTexture::UploadDataForWinograd4x4To6x6(
+absl::Status ConvTexture::UploadDataForWinograd4x4To6x6(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, const CLDevice& device,
     CLContext* context) {
   ::tflite::gpu::Tensor<OHWI, T> wino_weights;
@@ -145,8 +145,8 @@ Status ConvTexture::UploadDataForWinograd4x4To6x6(
 }
 
 template <DataType T>
-Status ConvTexture::UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                                  CLContext* context) {
+absl::Status ConvTexture::UploadWeights(
+    const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4);
   dst_depth = AlignByN(dst_depth, block_size_.z);
   const int src_depth = IntegralDivideRoundUp(weights.shape.i, 4);
@@ -246,20 +246,19 @@ void ConvTexture::RearrangeWeightsData(
   }
 }
 
-Status CreateConvTexture(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const Convolution2DAttributes& attr,
-                         ConvTexture* result);
+absl::Status CreateConvTexture(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const Convolution2DAttributes& attr,
+                               ConvTexture* result);
 
-Status CreateConvTexture(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const FullyConnectedAttributes& attr,
-                         ConvTexture* result);
+absl::Status CreateConvTexture(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const FullyConnectedAttributes& attr,
+                               ConvTexture* result);
 
-Status CreateConvTextureWino4x4To6x6(const CreationContext& creation_context,
-                                     const OperationDef& definition,
-                                     const Convolution2DAttributes& attr,
-                                     ConvTexture* result);
+absl::Status CreateConvTextureWino4x4To6x6(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const Convolution2DAttributes& attr, ConvTexture* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/converter.cc b/tensorflow/lite/delegates/gpu/cl/kernels/converter.cc
index 947c39cd299..e3170f068e9 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/converter.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/converter.cc
@@ -35,12 +35,12 @@ namespace {
 
 class OpenClConverterImpl : public TensorObjectConverter {
  public:
-  virtual Status Init(const TensorObjectDef& input_def,
-                      const TensorObjectDef& output_def,
-                      Environment* environment) = 0;
+  virtual absl::Status Init(const TensorObjectDef& input_def,
+                            const TensorObjectDef& output_def,
+                            Environment* environment) = 0;
 
  protected:
-  Status DispatchKernel(cl_mem input, cl_mem output) {
+  absl::Status DispatchKernel(cl_mem input, cl_mem output) {
     kernel_.ResetBindingCounter();
     RETURN_IF_ERROR(kernel_.SetMemoryAuto(input));
     RETURN_IF_ERROR(kernel_.SetMemoryAuto(output));
@@ -119,9 +119,9 @@ class FromTensorConverter : public OpenClConverterImpl {
   })");
   }
 
-  Status Init(const TensorObjectDef& input_def,
-              const TensorObjectDef& output_def,
-              Environment* environment) final {
+  absl::Status Init(const TensorObjectDef& input_def,
+                    const TensorObjectDef& output_def,
+                    Environment* environment) final {
     auto params_kernel = output_def.object_def.data_layout == DataLayout::BHWC
                              ? GetToBhwcKernel(input_def, output_def)
                              : GetToDhwc4Kernel(input_def, output_def);
@@ -157,11 +157,12 @@ __kernel void from_tensor()" +
         environment->device(), &kernel_);
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto output = absl::get_if<OpenClBuffer>(&output_obj);
     if (!output || !output->memobj) {
-      return InvalidArgumentError("Missing output in from_tensor converter");
+      return absl::InvalidArgumentError(
+          "Missing output in from_tensor converter");
     }
     auto input_texture = absl::get_if<OpenClTexture>(&input_obj);
     if (input_texture && input_texture->memobj) {
@@ -171,7 +172,7 @@ __kernel void from_tensor()" +
     if (input_buffer && input_buffer->memobj) {
       return DispatchKernel(input_buffer->memobj, output->memobj);
     }
-    return InvalidArgumentError("Missing input in from_tensor converter");
+    return absl::InvalidArgumentError("Missing input in from_tensor converter");
   }
 };
 
@@ -225,9 +226,9 @@ class ToTensorConverter : public OpenClConverterImpl {
 )");
   }
 
-  Status Init(const TensorObjectDef& input_def,
-              const TensorObjectDef& output_def,
-              Environment* environment) final {
+  absl::Status Init(const TensorObjectDef& input_def,
+                    const TensorObjectDef& output_def,
+                    Environment* environment) final {
     auto params_kernel = input_def.object_def.data_layout == DataLayout::BHWC
                              ? GetFromBhwcKernel(input_def, output_def)
                              : GetFromDhwc4Kernel(input_def, output_def);
@@ -261,11 +262,11 @@ __kernel void to_tensor()" +
         &kernel_);
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto input = absl::get_if<OpenClBuffer>(&input_obj);
     if (!input || !input->memobj) {
-      return InvalidArgumentError("Missing input in to_tensor converter");
+      return absl::InvalidArgumentError("Missing input in to_tensor converter");
     }
     auto output_texture = absl::get_if<OpenClTexture>(&output_obj);
     if (output_texture && output_texture->memobj) {
@@ -275,7 +276,7 @@ __kernel void to_tensor()" +
     if (output_buffer && output_buffer->memobj) {
       return DispatchKernel(input->memobj, output_buffer->memobj);
     }
-    return InvalidArgumentError("Missing input in to_tensor converter");
+    return absl::InvalidArgumentError("Missing input in to_tensor converter");
   }
 };
 
@@ -318,18 +319,18 @@ class TrivialCopier : public OpenClConverterImpl {
            input.data_layout == output.data_layout;
   }
 
-  Status Init(const TensorObjectDef& input_def,
-              const TensorObjectDef& output_def,
-              Environment* environment) final {
+  absl::Status Init(const TensorObjectDef& input_def,
+                    const TensorObjectDef& output_def,
+                    Environment* environment) final {
     dims_ = input_def.dimensions;
     data_type_ = input_def.object_def.data_type;
     queue_ = environment->queue();
     region_ = CalculateTextureRegion(output_def);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto texture_input = absl::get_if<OpenClTexture>(&input_obj);
     auto texture_output = absl::get_if<OpenClTexture>(&output_obj);
     if (texture_input && texture_output) {
@@ -340,12 +341,12 @@ class TrivialCopier : public OpenClConverterImpl {
     if (buffer_input && buffer_output) {
       return Copy(*buffer_input, *buffer_output);
     }
-    return InternalError("Unexpected object");
+    return absl::InternalError("Unexpected object");
   }
 
-  Status Copy(const OpenClBuffer& input, const OpenClBuffer& output) {
+  absl::Status Copy(const OpenClBuffer& input, const OpenClBuffer& output) {
     if (input.memobj == output.memobj) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     return GetOpenCLError(clEnqueueCopyBuffer(
         queue_->queue(), input.memobj, output.memobj, 0, 0,
@@ -353,9 +354,9 @@ class TrivialCopier : public OpenClConverterImpl {
         nullptr));
   }
 
-  Status Copy(const OpenClTexture& input, const OpenClTexture& output) {
+  absl::Status Copy(const OpenClTexture& input, const OpenClTexture& output) {
     if (input.memobj == output.memobj) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     size_t origin[3] = {0, 0, 0};
     return GetOpenCLError(
@@ -380,18 +381,18 @@ class CpuCopier : public OpenClConverterImpl {
              IsOpenClTextureOrBuffer(input.object_type)));
   }
 
-  Status Init(const TensorObjectDef& input_def,
-              const TensorObjectDef& output_def,
-              Environment* environment) final {
+  absl::Status Init(const TensorObjectDef& input_def,
+                    const TensorObjectDef& output_def,
+                    Environment* environment) final {
     region_ = CalculateTextureRegion(
         input_def.object_def.object_type == ObjectType::CPU_MEMORY ? output_def
                                                                    : input_def);
     queue_ = environment->queue();
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto cpu_input = absl::get_if<CpuMemory>(&input_obj);
     auto cpu_output = absl::get_if<CpuMemory>(&output_obj);
     if (cpu_input) {
@@ -419,7 +420,7 @@ class CpuCopier : public OpenClConverterImpl {
             buffer_input->memobj, cpu_output->size_bytes, cpu_output->data);
       }
     }
-    return InternalError("Unexpected object");
+    return absl::InternalError("Unexpected object");
   }
 
  private:
@@ -442,7 +443,7 @@ class OpenClTensorConverterBuilder : public TensorObjectConverterBuilder {
             ToTensorConverter::IsSupported(input_def, output_def));
   }
 
-  Status MakeConverter(
+  absl::Status MakeConverter(
       const TensorObjectDef& input, const TensorObjectDef& output,
       std::unique_ptr<TensorObjectConverter>* converter) final {
     std::unique_ptr<OpenClConverterImpl> impl;
@@ -457,11 +458,11 @@ class OpenClTensorConverterBuilder : public TensorObjectConverterBuilder {
     } else if (ToTensorConverter::IsSupported(input_def, output_def)) {
       impl = absl::make_unique<ToTensorConverter>();
     } else {
-      return UnimplementedError("Unsupported conversion");
+      return absl::UnimplementedError("Unsupported conversion");
     }
     RETURN_IF_ERROR(impl->Init(input, output, environment_));
     *converter = std::move(impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   Environment* environment_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.cc b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.cc
index 921a257aa7e..417fb63e820 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.cc
@@ -368,7 +368,8 @@ ConvolutionTransposed& ConvolutionTransposed::operator=(
   return *this;
 }
 
-Status ConvolutionTransposed::Compile(const CreationContext& creation_context) {
+absl::Status ConvolutionTransposed::Compile(
+    const CreationContext& creation_context) {
   const auto code = GenerateConvolutionTransposedCode(
       definition_, biases_, *creation_context.device, weights_are_buffer_,
       block_size_, linked_operations_);
@@ -380,7 +381,7 @@ Status ConvolutionTransposed::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status ConvolutionTransposed::BindArguments() {
+absl::Status ConvolutionTransposed::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   if (weights_are_buffer_) {
@@ -399,7 +400,7 @@ Status ConvolutionTransposed::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(padding_));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvolutionTransposed::GetGridSize() const {
@@ -412,21 +413,21 @@ int3 ConvolutionTransposed::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvolutionTransposed::Tune(const TuningParameters& params) {
+absl::Status ConvolutionTransposed::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroupConv(params, kernel_, GetGridSize(),
                               &work_group_size_);
 }
 
-Status ConvolutionTransposed::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvolutionTransposed::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status CreateConvolutionTransposed(const CreationContext& creation_context,
-                                   const OperationDef& definition,
-                                   const ConvolutionTransposedAttributes& attr,
-                                   ConvolutionTransposed* result) {
+absl::Status CreateConvolutionTransposed(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const ConvolutionTransposedAttributes& attr,
+    ConvolutionTransposed* result) {
   *result = ConvolutionTransposed(definition, attr, *creation_context.device);
   RETURN_IF_ERROR(
       result->UploadWeights(attr.weights, creation_context.context));
@@ -438,8 +439,7 @@ Status CreateConvolutionTransposed(const CreationContext& creation_context,
   create_info.aligned_size = attr.weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.h b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.h
index 73fce020f5a..7545b9091e2 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed.h
@@ -38,10 +38,10 @@ namespace cl {
 class ConvolutionTransposed : public GPUOperation {
  public:
   ConvolutionTransposed() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvolutionTransposed(ConvolutionTransposed&& operation);
@@ -50,7 +50,7 @@ class ConvolutionTransposed : public GPUOperation {
   ConvolutionTransposed& operator=(const ConvolutionTransposed&) = delete;
 
  private:
-  friend Status CreateConvolutionTransposed(
+  friend absl::Status CreateConvolutionTransposed(
       const CreationContext& creation_context, const OperationDef& definition,
       const ConvolutionTransposedAttributes& attr,
       ConvolutionTransposed* result);
@@ -58,14 +58,14 @@ class ConvolutionTransposed : public GPUOperation {
                                  const ConvolutionTransposedAttributes& attr,
                                  const CLDevice& device);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   LinearStorage biases_;
@@ -88,7 +88,7 @@ class ConvolutionTransposed : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvolutionTransposed::UploadWeights(
+absl::Status ConvolutionTransposed::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int dst_depth =
       AlignByN(IntegralDivideRoundUp(weights.shape.o, 4), block_size_.z);
@@ -153,7 +153,7 @@ Status ConvolutionTransposed::UploadWeights(
     }
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType S, typename T>
@@ -208,10 +208,9 @@ void ConvolutionTransposed::RearrangeWeightsData(
   }
 }
 
-Status CreateConvolutionTransposed(const CreationContext& creation_context,
-                                   const OperationDef& definition,
-                                   const ConvolutionTransposedAttributes& attr,
-                                   ConvolutionTransposed* result);
+absl::Status CreateConvolutionTransposed(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const ConvolutionTransposedAttributes& attr, ConvolutionTransposed* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.cc b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.cc
index 147674b7eff..9d3f0b2639c 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.cc
@@ -396,7 +396,7 @@ ConvolutionTransposed3D& ConvolutionTransposed3D::operator=(
   return *this;
 }
 
-Status ConvolutionTransposed3D::Compile(
+absl::Status ConvolutionTransposed3D::Compile(
     const CreationContext& creation_context) {
   const auto code = GenerateConvolutionTransposed3DCode(
       definition_, biases_, *creation_context.device, weights_are_buffer_,
@@ -417,7 +417,7 @@ Status ConvolutionTransposed3D::Compile(
       *creation_context.device, &kernel_);
 }
 
-Status ConvolutionTransposed3D::BindArguments() {
+absl::Status ConvolutionTransposed3D::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   if (weights_are_buffer_) {
@@ -444,7 +444,7 @@ Status ConvolutionTransposed3D::BindArguments() {
       IntegralDivideRoundUp(dst_[0]->Slices(), block_size_.w)));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWHDS()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHDS()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvolutionTransposed3D::GetGridSize() const {
@@ -459,18 +459,18 @@ int3 ConvolutionTransposed3D::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvolutionTransposed3D::Tune(const TuningParameters& params) {
+absl::Status ConvolutionTransposed3D::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroupConv(params, kernel_, GetGridSize(),
                               &work_group_size_);
 }
 
-Status ConvolutionTransposed3D::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvolutionTransposed3D::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status CreateConvolutionTransposed3D(
+absl::Status CreateConvolutionTransposed3D(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposed3DAttributes& attr,
     ConvolutionTransposed3D* result) {
@@ -485,8 +485,7 @@ Status CreateConvolutionTransposed3D(
   create_info.aligned_size = attr.weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.h b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.h
index c3fbd87a240..763494efce6 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3d.h
@@ -38,10 +38,10 @@ namespace cl {
 class ConvolutionTransposed3D : public GPUOperation {
  public:
   ConvolutionTransposed3D() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvolutionTransposed3D(ConvolutionTransposed3D&& operation);
@@ -50,7 +50,7 @@ class ConvolutionTransposed3D : public GPUOperation {
   ConvolutionTransposed3D& operator=(const ConvolutionTransposed3D&) = delete;
 
  private:
-  friend Status CreateConvolutionTransposed3D(
+  friend absl::Status CreateConvolutionTransposed3D(
       const CreationContext& creation_context, const OperationDef& definition,
       const ConvolutionTransposed3DAttributes& attr,
       ConvolutionTransposed3D* result);
@@ -58,14 +58,14 @@ class ConvolutionTransposed3D : public GPUOperation {
                           const ConvolutionTransposed3DAttributes& attr,
                           const CLDevice& device);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWDI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   LinearStorage biases_;
@@ -88,7 +88,7 @@ class ConvolutionTransposed3D : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvolutionTransposed3D::UploadWeights(
+absl::Status ConvolutionTransposed3D::UploadWeights(
     const ::tflite::gpu::Tensor<OHWDI, T>& weights, CLContext* context) {
   const int dst_depth =
       AlignByN(IntegralDivideRoundUp(weights.shape.o, 4), block_size_.z);
@@ -155,7 +155,7 @@ Status ConvolutionTransposed3D::UploadWeights(
     }
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType S, typename T>
@@ -214,7 +214,7 @@ void ConvolutionTransposed3D::RearrangeWeightsData(
   }
 }
 
-Status CreateConvolutionTransposed3D(
+absl::Status CreateConvolutionTransposed3D(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposed3DAttributes& attr,
     ConvolutionTransposed3D* result);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.cc b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.cc
index 7b19ac0ba38..4be593be57b 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.cc
@@ -304,12 +304,11 @@ ConvolutionTransposed3x3& ConvolutionTransposed3x3::operator=(
   return *this;
 }
 
-Status ConvolutionTransposed3x3::Compile(
+absl::Status ConvolutionTransposed3x3::Compile(
     const CreationContext& creation_context) {
   const auto code = GenerateConvolutionTransposedCode(
       definition_, biases_, linked_operations_, weights_upload_type_, padding_,
       work_group_launch_order_);
-
   std::vector<CompilerOptions> options;
   if (definition_.precision == CalculationsPrecision::F16 &&
       creation_context.device->IsPowerVR()) {
@@ -318,11 +317,10 @@ Status ConvolutionTransposed3x3::Compile(
   RETURN_IF_ERROR(creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", options, *creation_context.context,
       *creation_context.device, &kernel_));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConvolutionTransposed3x3::BindArguments() {
+absl::Status ConvolutionTransposed3x3::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_.GetMemoryPtr()));
@@ -337,10 +335,7 @@ Status ConvolutionTransposed3x3::BindArguments() {
       padding_.x >= 1 ? (padding_.x - 1) / 2 : (padding_.x - 2) / 2;
   const int padding_y =
       padding_.y >= 1 ? (padding_.y - 1) / 2 : (padding_.y - 2) / 2;
-  RETURN_IF_ERROR(
-      kernel_.SetBytesAuto(int2(padding_x * src_[0]->Batch(), padding_y)));
-
-  return OkStatus();
+  return kernel_.SetBytesAuto(int2(padding_x * src_[0]->Batch(), padding_y));
 }
 
 int3 ConvolutionTransposed3x3::GetGridSize() const {
@@ -358,7 +353,7 @@ int3 ConvolutionTransposed3x3::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvolutionTransposed3x3::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvolutionTransposed3x3::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -370,13 +365,13 @@ bool IsConvolutionTransposed3x3Supported(
          attr.stride.w == 2 && attr.stride.h == 2;
 }
 
-Status CreateConvolutionTransposed3x3(
+absl::Status CreateConvolutionTransposed3x3(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposed3x3* result) {
   if (!IsConvolutionTransposed3x3Supported(*creation_context.device, definition,
                                            attr)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "ConvolutionTransposed3x3 doesn't support this attributes");
   }
   const int2 padding = int2(attr.padding.prepended.w, attr.padding.prepended.h);
@@ -391,7 +386,7 @@ Status CreateConvolutionTransposed3x3(
   create_info.aligned_size = attr.weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.h b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.h
index 9e12d884719..5da112e19c0 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3.h
@@ -37,8 +37,8 @@ namespace cl {
 class ConvolutionTransposed3x3 : public GPUOperation {
  public:
   ConvolutionTransposed3x3() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvolutionTransposed3x3(ConvolutionTransposed3x3&& operation);
@@ -56,19 +56,19 @@ class ConvolutionTransposed3x3 : public GPUOperation {
  private:
   ConvolutionTransposed3x3(const OperationDef& definition,
                            const CLDevice& device, int2 padding);
-  friend Status CreateConvolutionTransposed3x3(
+  friend absl::Status CreateConvolutionTransposed3x3(
       const CreationContext& creation_context, const OperationDef& definition,
       const ConvolutionTransposedAttributes& attr,
       ConvolutionTransposed3x3* result);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   int2 padding_;
@@ -82,7 +82,7 @@ class ConvolutionTransposed3x3 : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvolutionTransposed3x3::UploadWeights(
+absl::Status ConvolutionTransposed3x3::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int src_depth = IntegralDivideRoundUp(weights.shape.i, 4);
   const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4);
@@ -165,7 +165,7 @@ bool IsConvolutionTransposed3x3Supported(
     const CLDevice& device, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr);
 
-Status CreateConvolutionTransposed3x3(
+absl::Status CreateConvolutionTransposed3x3(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposed3x3* result);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.cc b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.cc
index 40838d28eed..b8e4b25443e 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.cc
@@ -221,19 +221,18 @@ ConvolutionTransposed3x3Thin& ConvolutionTransposed3x3Thin::operator=(
   return *this;
 }
 
-Status ConvolutionTransposed3x3Thin::Compile(
+absl::Status ConvolutionTransposed3x3Thin::Compile(
     const CreationContext& creation_context) {
   const auto code = GenerateConvolutionTransposedCode(
       definition_, biases_, IntegralDivideRoundUp(src_channels_, 4),
       IntegralDivideRoundUp(dst_channels_, 4), *creation_context.device,
       linked_operations_);
-
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_);
 }
 
-Status ConvolutionTransposed3x3Thin::BindArguments() {
+absl::Status ConvolutionTransposed3x3Thin::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_.GetMemoryPtr()));
@@ -242,7 +241,7 @@ Status ConvolutionTransposed3x3Thin::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(dst_[0]->GetMemoryPtrForWriting()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvolutionTransposed3x3Thin::GetGridSize() const {
@@ -252,12 +251,13 @@ int3 ConvolutionTransposed3x3Thin::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvolutionTransposed3x3Thin::Tune(const TuningParameters& params) {
+absl::Status ConvolutionTransposed3x3Thin::Tune(
+    const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status ConvolutionTransposed3x3Thin::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvolutionTransposed3x3Thin::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -271,13 +271,13 @@ bool IsConvolutionTransposed3x3ThinSupported(
          attr.padding.appended.h == 1;
 }
 
-Status CreateConvolutionTransposed3x3Thin(
+absl::Status CreateConvolutionTransposed3x3Thin(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposed3x3Thin* result) {
   if (!IsConvolutionTransposed3x3ThinSupported(*creation_context.device,
                                                attr)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "ConvolutionTransposed3x3Thin doesn't support this attributes");
   }
   *result = ConvolutionTransposed3x3Thin(definition, attr);
@@ -291,8 +291,7 @@ Status CreateConvolutionTransposed3x3Thin(
   create_info.aligned_size = attr.weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.h b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.h
index f8d10d6c6b8..f2a0d586bd1 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin.h
@@ -37,10 +37,10 @@ namespace cl {
 class ConvolutionTransposed3x3Thin : public GPUOperation {
  public:
   ConvolutionTransposed3x3Thin() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvolutionTransposed3x3Thin(ConvolutionTransposed3x3Thin&& operation);
@@ -51,7 +51,7 @@ class ConvolutionTransposed3x3Thin : public GPUOperation {
       delete;
 
  private:
-  friend Status CreateConvolutionTransposed3x3Thin(
+  friend absl::Status CreateConvolutionTransposed3x3Thin(
       const CreationContext& creation_context, const OperationDef& definition,
       const ConvolutionTransposedAttributes& attr,
       ConvolutionTransposed3x3Thin* result);
@@ -59,14 +59,14 @@ class ConvolutionTransposed3x3Thin : public GPUOperation {
       const OperationDef& definition,
       const ConvolutionTransposedAttributes& attr);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Buffer weights_;
@@ -80,7 +80,7 @@ class ConvolutionTransposed3x3Thin : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvolutionTransposed3x3Thin::UploadWeights(
+absl::Status ConvolutionTransposed3x3Thin::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int src_depth = IntegralDivideRoundUp(src_channels_, 4);
   const int dst_depth = IntegralDivideRoundUp(dst_channels_, 4);
@@ -150,7 +150,7 @@ void ConvolutionTransposed3x3Thin::RearrangeWeightsData(
 bool IsConvolutionTransposed3x3ThinSupported(
     const CLDevice& device, const ConvolutionTransposedAttributes& attr);
 
-Status CreateConvolutionTransposed3x3Thin(
+absl::Status CreateConvolutionTransposed3x3Thin(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposed3x3Thin* result);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.cc b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.cc
index 1e36be17778..a558fe6cb3c 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.cc
@@ -301,7 +301,7 @@ ConvolutionTransposed4x4& ConvolutionTransposed4x4::operator=(
   return *this;
 }
 
-Status ConvolutionTransposed4x4::Compile(
+absl::Status ConvolutionTransposed4x4::Compile(
     const CreationContext& creation_context) {
   const auto code = GenerateConvolutionTransposedCode(
       definition_, biases_, linked_operations_, weights_upload_type_);
@@ -314,11 +314,10 @@ Status ConvolutionTransposed4x4::Compile(
   RETURN_IF_ERROR(creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", options, *creation_context.context,
       *creation_context.device, &kernel_));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConvolutionTransposed4x4::BindArguments() {
+absl::Status ConvolutionTransposed4x4::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_.GetMemoryPtr()));
@@ -329,8 +328,7 @@ Status ConvolutionTransposed4x4::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
   const int32_t filters_offset = 4 * 16 * src_[0]->Slices();
   RETURN_IF_ERROR(kernel_.SetBytesAuto(filters_offset));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvolutionTransposed4x4::GetGridSize() const {
@@ -341,7 +339,7 @@ int3 ConvolutionTransposed4x4::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvolutionTransposed4x4::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvolutionTransposed4x4::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -354,13 +352,13 @@ bool IsConvolutionTransposed4x4Supported(
          attr.padding.prepended.w == 1 && attr.padding.prepended.h == 1;
 }
 
-Status CreateConvolutionTransposed4x4(
+absl::Status CreateConvolutionTransposed4x4(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposed4x4* result) {
   if (!IsConvolutionTransposed4x4Supported(*creation_context.device, definition,
                                            attr)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "ConvolutionTransposed4x4 doesn't support this attributes");
   }
   *result = ConvolutionTransposed4x4(definition, *creation_context.device);
@@ -373,7 +371,7 @@ Status CreateConvolutionTransposed4x4(
   create_info.aligned_size = attr.weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.h b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.h
index 8d92542c908..7bf37c56119 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4.h
@@ -37,8 +37,8 @@ namespace cl {
 class ConvolutionTransposed4x4 : public GPUOperation {
  public:
   ConvolutionTransposed4x4() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvolutionTransposed4x4(ConvolutionTransposed4x4&& operation);
@@ -56,19 +56,19 @@ class ConvolutionTransposed4x4 : public GPUOperation {
  private:
   ConvolutionTransposed4x4(const OperationDef& definition,
                            const CLDevice& device);
-  friend Status CreateConvolutionTransposed4x4(
+  friend absl::Status CreateConvolutionTransposed4x4(
       const CreationContext& creation_context, const OperationDef& definition,
       const ConvolutionTransposedAttributes& attr,
       ConvolutionTransposed4x4* result);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Buffer weights_;
@@ -80,7 +80,7 @@ class ConvolutionTransposed4x4 : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvolutionTransposed4x4::UploadWeights(
+absl::Status ConvolutionTransposed4x4::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int src_depth = IntegralDivideRoundUp(weights.shape.i, 4);
   const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4);
@@ -150,7 +150,7 @@ bool IsConvolutionTransposed4x4Supported(
     const CLDevice& device, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr);
 
-Status CreateConvolutionTransposed4x4(
+absl::Status CreateConvolutionTransposed4x4(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposed4x4* result);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.cc b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.cc
index 03b9ab0eb6c..8ea40bedd7d 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.cc
@@ -184,7 +184,7 @@ ConvolutionTransposedThin& ConvolutionTransposedThin::operator=(
   return *this;
 }
 
-Status ConvolutionTransposedThin::Compile(
+absl::Status ConvolutionTransposedThin::Compile(
     const CreationContext& creation_context) {
   const auto code = GenerateConvolutionTransposedCode(
       definition_, IntegralDivideRoundUp(src_channels_, 4), dst_channels_,
@@ -201,7 +201,7 @@ Status ConvolutionTransposedThin::Compile(
       *creation_context.device, &kernel_);
 }
 
-Status ConvolutionTransposedThin::BindArguments() {
+absl::Status ConvolutionTransposedThin::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_buf_.GetMemoryPtr()));
@@ -210,7 +210,7 @@ Status ConvolutionTransposedThin::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(bias_value_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ConvolutionTransposedThin::GetGridSize() const {
@@ -220,12 +220,12 @@ int3 ConvolutionTransposedThin::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ConvolutionTransposedThin::Tune(const TuningParameters& params) {
+absl::Status ConvolutionTransposedThin::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status ConvolutionTransposedThin::AddToQueue(CLCommandQueue* queue) {
+absl::Status ConvolutionTransposedThin::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -238,18 +238,18 @@ bool IsConvolutionTransposedThinSupported(
          attr.padding.appended.w == 0 && attr.padding.appended.h == 0;
 }
 
-Status CreateConvolutionTransposedThin(
+absl::Status CreateConvolutionTransposedThin(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposedThin* result) {
   if (!IsConvolutionTransposedThinSupported(*creation_context.device, attr)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "ConvolutionTransposedThin doesn't support this attributes");
   }
   *result = ConvolutionTransposedThin(definition, attr);
   RETURN_IF_ERROR(
       result->UploadWeights(attr.weights, creation_context.context));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.h b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.h
index 0642a7c928b..573772965ae 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin.h
@@ -38,10 +38,10 @@ namespace cl {
 class ConvolutionTransposedThin : public GPUOperation {
  public:
   ConvolutionTransposedThin() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ConvolutionTransposedThin(ConvolutionTransposedThin&& operation);
@@ -51,21 +51,21 @@ class ConvolutionTransposedThin : public GPUOperation {
       delete;
 
  private:
-  friend Status CreateConvolutionTransposedThin(
+  friend absl::Status CreateConvolutionTransposedThin(
       const CreationContext& creation_context, const OperationDef& definition,
       const ConvolutionTransposedAttributes& attr,
       ConvolutionTransposedThin* result);
   ConvolutionTransposedThin(const OperationDef& definition,
                             const ConvolutionTransposedAttributes& attr);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Buffer weights_buf_;
@@ -80,7 +80,7 @@ class ConvolutionTransposedThin : public GPUOperation {
 };
 
 template <DataType T>
-Status ConvolutionTransposedThin::UploadWeights(
+absl::Status ConvolutionTransposedThin::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int src_depth = IntegralDivideRoundUp(src_channels_, 4);
   const int elements_count =
@@ -136,7 +136,7 @@ void ConvolutionTransposedThin::RearrangeWeightsData(
 bool IsConvolutionTransposedThinSupported(
     const CLDevice& device, const ConvolutionTransposedAttributes& attr);
 
-Status CreateConvolutionTransposedThin(
+absl::Status CreateConvolutionTransposedThin(
     const CreationContext& creation_context, const OperationDef& definition,
     const ConvolutionTransposedAttributes& attr,
     ConvolutionTransposedThin* result);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.cc b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.cc
index e7bf31b0d37..99bec18c7f8 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.cc
@@ -226,7 +226,8 @@ DepthWiseConvolution& DepthWiseConvolution::operator=(
   return *this;
 }
 
-Status DepthWiseConvolution::Compile(const CreationContext& creation_context) {
+absl::Status DepthWiseConvolution::Compile(
+    const CreationContext& creation_context) {
   const bool stride_correction =
       definition_.IsBatchSupported() && stride_.x != 1;
   const auto code = GenerateDepthWiseConvolutionCode(
@@ -237,7 +238,7 @@ Status DepthWiseConvolution::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status DepthWiseConvolution::BindArguments() {
+absl::Status DepthWiseConvolution::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_));
@@ -255,7 +256,7 @@ Status DepthWiseConvolution::BindArguments() {
   }
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWBatchedHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 DepthWiseConvolution::GetGridSize() const {
@@ -265,20 +266,20 @@ int3 DepthWiseConvolution::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status DepthWiseConvolution::Tune(const TuningParameters& params) {
+absl::Status DepthWiseConvolution::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status DepthWiseConvolution::AddToQueue(CLCommandQueue* queue) {
+absl::Status DepthWiseConvolution::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status CreateDepthWiseConvolution(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const DepthwiseConvolution2DAttributes& attr,
-                                  DepthWiseConvolution* result) {
+absl::Status CreateDepthWiseConvolution(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const DepthwiseConvolution2DAttributes& attr,
+    DepthWiseConvolution* result) {
   bool weights_are_buffer = creation_context.device->IsMali();
   *result = DepthWiseConvolution(definition, attr, weights_are_buffer);
   RETURN_IF_ERROR(
@@ -291,7 +292,7 @@ Status CreateDepthWiseConvolution(const CreationContext& creation_context,
   create_info.aligned_size = attr.weights.shape.o * attr.weights.shape.i;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.h b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.h
index 5915ed94502..8f3320ae57b 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv.h
@@ -38,10 +38,10 @@ namespace cl {
 class DepthWiseConvolution : public GPUOperation {
  public:
   DepthWiseConvolution() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   DepthWiseConvolution(DepthWiseConvolution&& operation);
@@ -50,7 +50,7 @@ class DepthWiseConvolution : public GPUOperation {
   DepthWiseConvolution& operator=(const DepthWiseConvolution&) = delete;
 
  private:
-  friend Status CreateDepthWiseConvolution(
+  friend absl::Status CreateDepthWiseConvolution(
       const CreationContext& creation_context, const OperationDef& definition,
       const DepthwiseConvolution2DAttributes& attr,
       DepthWiseConvolution* result);
@@ -58,14 +58,14 @@ class DepthWiseConvolution : public GPUOperation {
                        const DepthwiseConvolution2DAttributes& attr,
                        bool weights_are_buffer);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   bool weights_are_buffer_;
@@ -86,7 +86,7 @@ class DepthWiseConvolution : public GPUOperation {
 };
 
 template <DataType T>
-Status DepthWiseConvolution::UploadWeights(
+absl::Status DepthWiseConvolution::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int dst_channels = weights.shape.i * weights.shape.o;
   const int dst_depth = IntegralDivideRoundUp(dst_channels, 4);
@@ -130,7 +130,7 @@ Status DepthWiseConvolution::UploadWeights(
     weights_ = weights_tex2d_.GetMemoryPtr();
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType S, typename T>
@@ -162,10 +162,9 @@ void DepthWiseConvolution::RearrangeWeightsData(
   }
 }
 
-Status CreateDepthWiseConvolution(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const DepthwiseConvolution2DAttributes& attr,
-                                  DepthWiseConvolution* result);
+absl::Status CreateDepthWiseConvolution(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const DepthwiseConvolution2DAttributes& attr, DepthWiseConvolution* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.cc b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.cc
index e3297cb6814..57d30dd2734 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.cc
@@ -256,7 +256,7 @@ DepthWiseConvolution3D& DepthWiseConvolution3D::operator=(
   return *this;
 }
 
-Status DepthWiseConvolution3D::Compile(
+absl::Status DepthWiseConvolution3D::Compile(
     const CreationContext& creation_context) {
   const bool stride_correction =
       definition_.IsBatchSupported() && stride_.x != 1;
@@ -268,7 +268,7 @@ Status DepthWiseConvolution3D::Compile(
       *creation_context.device, &kernel_);
 }
 
-Status DepthWiseConvolution3D::BindArguments() {
+absl::Status DepthWiseConvolution3D::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   if (weights_are_buffer_) {
@@ -295,7 +295,7 @@ Status DepthWiseConvolution3D::BindArguments() {
   }
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWBatchedHDS()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHDS()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 DepthWiseConvolution3D::GetGridSize() const {
@@ -305,17 +305,17 @@ int3 DepthWiseConvolution3D::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status DepthWiseConvolution3D::Tune(const TuningParameters& params) {
+absl::Status DepthWiseConvolution3D::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status DepthWiseConvolution3D::AddToQueue(CLCommandQueue* queue) {
+absl::Status DepthWiseConvolution3D::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status CreateDepthWiseConvolution3D(
+absl::Status CreateDepthWiseConvolution3D(
     const CreationContext& creation_context, const OperationDef& definition,
     const DepthwiseConvolution3DAttributes& attr,
     DepthWiseConvolution3D* result) {
@@ -330,7 +330,7 @@ Status CreateDepthWiseConvolution3D(
   create_info.aligned_size = attr.weights.shape.o * attr.weights.shape.i;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.h b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.h
index e3c565422af..78ca6862416 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3d.h
@@ -38,10 +38,10 @@ namespace cl {
 class DepthWiseConvolution3D : public GPUOperation {
  public:
   DepthWiseConvolution3D() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   DepthWiseConvolution3D(DepthWiseConvolution3D&& operation);
@@ -50,7 +50,7 @@ class DepthWiseConvolution3D : public GPUOperation {
   DepthWiseConvolution3D& operator=(const DepthWiseConvolution3D&) = delete;
 
  private:
-  friend Status CreateDepthWiseConvolution3D(
+  friend absl::Status CreateDepthWiseConvolution3D(
       const CreationContext& creation_context, const OperationDef& definition,
       const DepthwiseConvolution3DAttributes& attr,
       DepthWiseConvolution3D* result);
@@ -58,14 +58,14 @@ class DepthWiseConvolution3D : public GPUOperation {
                          const DepthwiseConvolution3DAttributes& attr,
                          const CLDevice& device);
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWDI, T>& weights,
+                             CLContext* context);
 
   template <DataType S, typename T>
   void RearrangeWeightsData(const ::tflite::gpu::Tensor<OHWDI, S>& weights,
                             absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Texture2D weights_tex2d_;
@@ -85,7 +85,7 @@ class DepthWiseConvolution3D : public GPUOperation {
 };
 
 template <DataType T>
-Status DepthWiseConvolution3D::UploadWeights(
+absl::Status DepthWiseConvolution3D::UploadWeights(
     const ::tflite::gpu::Tensor<OHWDI, T>& weights, CLContext* context) {
   const int dst_channels = weights.shape.i * weights.shape.o;
   const int dst_slices = IntegralDivideRoundUp(dst_channels, 4);
@@ -123,7 +123,7 @@ Status DepthWiseConvolution3D::UploadWeights(
           gpu_data.data(), context, &weights_tex2d_));
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType S, typename T>
@@ -158,7 +158,7 @@ void DepthWiseConvolution3D::RearrangeWeightsData(
   }
 }
 
-Status CreateDepthWiseConvolution3D(
+absl::Status CreateDepthWiseConvolution3D(
     const CreationContext& creation_context, const OperationDef& definition,
     const DepthwiseConvolution3DAttributes& attr,
     DepthWiseConvolution3D* result);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.cc b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.cc
index 704df26f2ba..3324adada3b 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.cc
@@ -297,7 +297,8 @@ DepthWiseConv3x3& DepthWiseConv3x3::operator=(DepthWiseConv3x3&& operation) {
   return *this;
 }
 
-Status DepthWiseConv3x3::Compile(const CreationContext& creation_context) {
+absl::Status DepthWiseConv3x3::Compile(
+    const CreationContext& creation_context) {
   std::string code = GenerateDepthWiseConvCode(
       definition_, linked_operations_, *creation_context.device,
       weights_are_buffer_, local_mem_uploads_);
@@ -311,15 +312,14 @@ Status DepthWiseConv3x3::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status DepthWiseConv3x3::BindArguments() {
+absl::Status DepthWiseConv3x3::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(dst_[0]->GetMemoryPtrForWriting()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 DepthWiseConv3x3::GetGridSize() const {
@@ -329,15 +329,15 @@ int3 DepthWiseConv3x3::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status DepthWiseConv3x3::Tune(const TuningParameters& params) {
+absl::Status DepthWiseConv3x3::Tune(const TuningParameters& params) {
   if (local_mem_uploads_) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status DepthWiseConv3x3::AddToQueue(CLCommandQueue* queue) {
+absl::Status DepthWiseConv3x3::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -351,12 +351,11 @@ bool IsDepthWiseConv3x3Supported(const DepthwiseConvolution2DAttributes& attr) {
          attr.padding.appended.h == 1;
 }
 
-Status CreateDepthWiseConv3x3(const CreationContext& creation_context,
-                              const OperationDef& definition,
-                              const DepthwiseConvolution2DAttributes& attr,
-                              DepthWiseConv3x3* result) {
+absl::Status CreateDepthWiseConv3x3(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const DepthwiseConvolution2DAttributes& attr, DepthWiseConv3x3* result) {
   if (!IsDepthWiseConv3x3Supported(attr)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "DepthWiseConv3x3 doesn't support this attributes");
   }
   bool weights_are_buffer =
@@ -364,9 +363,8 @@ Status CreateDepthWiseConv3x3(const CreationContext& creation_context,
   bool local_mem_uploads =
       weights_are_buffer && creation_context.device->IsPowerVR();
   *result = DepthWiseConv3x3(definition, weights_are_buffer, local_mem_uploads);
-  RETURN_IF_ERROR(result->UploadWeightsAndBiases(attr.weights, attr.bias,
-                                                 creation_context.context));
-  return OkStatus();
+  return result->UploadWeightsAndBiases(attr.weights, attr.bias,
+                                        creation_context.context);
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.h b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.h
index 1630557afc9..936ab773229 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/depth_wise_conv_3x3.h
@@ -38,10 +38,10 @@ namespace cl {
 class DepthWiseConv3x3 : public GPUOperation {
  public:
   DepthWiseConv3x3() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   DepthWiseConv3x3(DepthWiseConv3x3&& operation);
@@ -53,11 +53,11 @@ class DepthWiseConv3x3 : public GPUOperation {
   explicit DepthWiseConv3x3(const OperationDef& definition,
                             bool weights_are_buffer, bool local_mem_uploads);
   template <DataType T>
-  Status UploadWeightsAndBiases(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                                const ::tflite::gpu::Tensor<Linear, T>& biases,
-                                CLContext* context);
+  absl::Status UploadWeightsAndBiases(
+      const ::tflite::gpu::Tensor<OHWI, T>& weights,
+      const ::tflite::gpu::Tensor<Linear, T>& biases, CLContext* context);
 
-  friend Status CreateDepthWiseConv3x3(
+  friend absl::Status CreateDepthWiseConv3x3(
       const CreationContext& creation_context, const OperationDef& definition,
       const DepthwiseConvolution2DAttributes& attr, DepthWiseConv3x3* result);
 
@@ -66,7 +66,7 @@ class DepthWiseConv3x3 : public GPUOperation {
       const ::tflite::gpu::Tensor<OHWI, S>& weights,
       const ::tflite::gpu::Tensor<Linear, S>& biases, absl::Span<T> dst);
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   bool weights_are_buffer_;
@@ -80,7 +80,7 @@ class DepthWiseConv3x3 : public GPUOperation {
 };
 
 template <DataType T>
-Status DepthWiseConv3x3::UploadWeightsAndBiases(
+absl::Status DepthWiseConv3x3::UploadWeightsAndBiases(
     const ::tflite::gpu::Tensor<OHWI, T>& weights,
     const ::tflite::gpu::Tensor<Linear, T>& biases, CLContext* context) {
   const int src_depth = IntegralDivideRoundUp(weights.shape.i, 4);
@@ -122,7 +122,7 @@ Status DepthWiseConv3x3::UploadWeightsAndBiases(
     weights_ = weights_tex2d_.GetMemoryPtr();
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType S, typename T>
@@ -160,10 +160,9 @@ void DepthWiseConv3x3::RearrangeWeightsAndBiasesData(
 
 bool IsDepthWiseConv3x3Supported(const DepthwiseConvolution2DAttributes& attr);
 
-Status CreateDepthWiseConv3x3(const CreationContext& creation_context,
-                              const OperationDef& definition,
-                              const DepthwiseConvolution2DAttributes& attr,
-                              DepthWiseConv3x3* result);
+absl::Status CreateDepthWiseConv3x3(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const DepthwiseConvolution2DAttributes& attr, DepthWiseConv3x3* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.cc b/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.cc
index 7c394a45669..e435bccef03 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.cc
@@ -203,14 +203,14 @@ std::string ElementwiseTwoInput::GetArgsDeclaration() const {
   return args;
 }
 
-Status ElementwiseTwoInput::BindArguments(CLKernel* kernel) {
+absl::Status ElementwiseTwoInput::BindArguments(CLKernel* kernel) {
   if (use_scalar_para_) {
     RETURN_IF_ERROR(kernel->SetBytesAuto(scalar_para_));
   } else {
     RETURN_IF_ERROR(kernel->SetMemoryAuto(src_[1]->GetMemoryPtr()));
     RETURN_IF_ERROR(kernel->SetBytesAuto(src_[1]->GetWBatchedHSB()));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 ElementwiseTwoInput CreateElementwiseTwoInput(
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.h b/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.h
index 8bf33b0c128..4c85fee6071 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/elementwise.h
@@ -75,7 +75,7 @@ class ElementwiseTwoInput : public ElementwiseOperation {
   void SetLinkIndex(int index) override;
   std::string GetCoreCode(const LinkingContext& context) const override;
   std::string GetArgsDeclaration() const override;
-  Status BindArguments(CLKernel* kernel) override;
+  absl::Status BindArguments(CLKernel* kernel) override;
   inline void SetScalarPara(FLT scalar) {
     scalar_para_ = scalar;
     use_scalar_para_ = true;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.cc b/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.cc
index 44a3e97554c..f93648f82fc 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.cc
@@ -113,7 +113,7 @@ FullyConnected& FullyConnected::operator=(FullyConnected&& kernel) {
   return *this;
 }
 
-Status FullyConnected::Compile(const CreationContext& creation_context) {
+absl::Status FullyConnected::Compile(const CreationContext& creation_context) {
   int wg_width = 32;
   int wg_height = 4;
   int work_items;
@@ -134,10 +134,10 @@ Status FullyConnected::Compile(const CreationContext& creation_context) {
     }
     work_items = work_group_size_.x * work_group_size_.y * work_group_size_.z;
   } while (work_items > kernel_.GetMaxWorkGroupSize());
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status FullyConnected::AddToQueue(CLCommandQueue* queue) {
+absl::Status FullyConnected::AddToQueue(CLCommandQueue* queue) {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(weights_.GetMemoryPtr()));
@@ -146,15 +146,14 @@ Status FullyConnected::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(dst_[0]->GetMemoryPtrForWriting()));
   RETURN_IF_ERROR(
       kernel_.SetBytesAuto(int2(src_[0]->Slices(), dst_[0]->Slices())));
-
   return queue->DispatchImplicit(kernel_, {dst_[0]->Slices(), 1, 1},
                                  work_group_size_);
 }
 
-Status CreateFullyConnected(const CreationContext& creation_context,
-                            const OperationDef& definition,
-                            const FullyConnectedAttributes& attr,
-                            FullyConnected* result) {
+absl::Status CreateFullyConnected(const CreationContext& creation_context,
+                                  const OperationDef& definition,
+                                  const FullyConnectedAttributes& attr,
+                                  FullyConnected* result) {
   *result = FullyConnected(definition);
   RETURN_IF_ERROR(
       result->UploadWeights(attr.weights, creation_context.context));
@@ -165,7 +164,7 @@ Status CreateFullyConnected(const CreationContext& creation_context,
   create_info.aligned_size = attr.weights.shape.o;
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, attr.bias, creation_context.context, &result->biases_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.h b/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.h
index 83ac279a71b..bc7cbd32fb0 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/fully_connected.h
@@ -37,9 +37,9 @@ namespace cl {
 class FullyConnected : public GPUOperation {
  public:
   FullyConnected() = default;
-  Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   FullyConnected(FullyConnected&& kernel);
@@ -49,14 +49,13 @@ class FullyConnected : public GPUOperation {
 
  private:
   explicit FullyConnected(const OperationDef& definition);
-  friend Status CreateFullyConnected(const CreationContext& creation_context,
-                                     const OperationDef& definition,
-                                     const FullyConnectedAttributes& attr,
-                                     FullyConnected* result);
+  friend absl::Status CreateFullyConnected(
+      const CreationContext& creation_context, const OperationDef& definition,
+      const FullyConnectedAttributes& attr, FullyConnected* result);
 
   template <DataType T>
-  Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
-                       CLContext* context);
+  absl::Status UploadWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
+                             CLContext* context);
 
   template <DataType T, typename S>
   void RearrangeWeights(const ::tflite::gpu::Tensor<OHWI, T>& weights,
@@ -69,7 +68,7 @@ class FullyConnected : public GPUOperation {
 };
 
 template <DataType T>
-Status FullyConnected::UploadWeights(
+absl::Status FullyConnected::UploadWeights(
     const ::tflite::gpu::Tensor<OHWI, T>& weights, CLContext* context) {
   const int src_depth = IntegralDivideRoundUp(weights.shape.i, 4);
   const int dst_depth = IntegralDivideRoundUp(weights.shape.o, 4);
@@ -123,10 +122,10 @@ void FullyConnected::RearrangeWeights(
   }
 }
 
-Status CreateFullyConnected(const CreationContext& creation_context,
-                            const OperationDef& definition,
-                            const FullyConnectedAttributes& attr,
-                            FullyConnected* result);
+absl::Status CreateFullyConnected(const CreationContext& creation_context,
+                                  const OperationDef& definition,
+                                  const FullyConnectedAttributes& attr,
+                                  FullyConnected* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.cc b/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.cc
index 4972bb9f737..9f4c9871123 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.cc
@@ -154,7 +154,7 @@ ElementwiseOperation& ElementwiseOperation::operator=(
   return *this;
 }
 
-Status ElementwiseOperation::BindArguments() {
+absl::Status ElementwiseOperation::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArguments(&kernel_));
@@ -162,7 +162,7 @@ Status ElementwiseOperation::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(dst_[0]->GetMemoryPtrForWriting()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWBatchedHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWBatchedHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 ElementwiseOperation::GetGridSize() const {
@@ -172,19 +172,20 @@ int3 ElementwiseOperation::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status ElementwiseOperation::Compile(const CreationContext& creation_context) {
+absl::Status ElementwiseOperation::Compile(
+    const CreationContext& creation_context) {
   const auto code = GetElementWiseCode(definition_, *this, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_);
 }
 
-Status ElementwiseOperation::AddToQueue(CLCommandQueue* queue) {
+absl::Status ElementwiseOperation::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status ElementwiseOperation::Tune(const TuningParameters& params) {
+absl::Status ElementwiseOperation::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
@@ -209,12 +210,12 @@ std::string PostProcess(const std::vector<ElementwiseOperation*>& linked_ops,
   return code;
 }
 
-Status BindArgs(CLKernel* kernel,
-                const std::vector<ElementwiseOperation*>& linked_ops) {
+absl::Status BindArgs(CLKernel* kernel,
+                      const std::vector<ElementwiseOperation*>& linked_ops) {
   for (auto linked_op : linked_ops) {
     RETURN_IF_ERROR(linked_op->BindArguments(kernel));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.h b/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.h
index 4507f0eb81d..17817682bce 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/gpu_operation.h
@@ -96,11 +96,15 @@ class GPUOperation {
   void SetSrc(Tensor* ptr, int index = 0);
   void SetDst(Tensor* ptr, int index = 0);
 
-  virtual Status AddToQueue(CLCommandQueue* queue) { return OkStatus(); }
-  virtual Status Tune(const TuningParameters& params) { return OkStatus(); }
+  virtual absl::Status AddToQueue(CLCommandQueue* queue) {
+    return absl::OkStatus();
+  }
+  virtual absl::Status Tune(const TuningParameters& params) {
+    return absl::OkStatus();
+  }
 
-  virtual Status Compile(const CreationContext& creation_context) {
-    return OkStatus();
+  virtual absl::Status Compile(const CreationContext& creation_context) {
+    return absl::OkStatus();
   }
 
   const OperationDef& GetDefinition() const { return definition_; }
@@ -127,10 +131,10 @@ class ElementwiseOperation : public GPUOperation {
       : GPUOperation(definition) {}
 
   virtual ~ElementwiseOperation() {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   ElementwiseOperation(ElementwiseOperation&& operation);
@@ -150,10 +154,12 @@ class ElementwiseOperation : public GPUOperation {
 
   virtual std::string GetCoreCode(const LinkingContext& context) const = 0;
   virtual std::string GetArgsDeclaration() const { return ""; }
-  virtual Status BindArguments(CLKernel* kernel) { return OkStatus(); }
+  virtual absl::Status BindArguments(CLKernel* kernel) {
+    return absl::OkStatus();
+  }
 
  protected:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
   CLKernel kernel_;
   int3 work_group_size_ = int3(8, 4, 1);
@@ -171,8 +177,8 @@ std::string PostProcess(const std::vector<ElementwiseOperation*>& linked_ops,
 // Binds arguments to given kernel for elementwise operations in
 // linked_ops.
 // Every ElementwiseOperation can bind her arguments.
-Status BindArgs(CLKernel* kernel,
-                const std::vector<ElementwiseOperation*>& linked_ops);
+absl::Status BindArgs(CLKernel* kernel,
+                      const std::vector<ElementwiseOperation*>& linked_ops);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/lstm.cc b/tensorflow/lite/delegates/gpu/cl/kernels/lstm.cc
index f2e53a06908..77eea07f278 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/lstm.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/lstm.cc
@@ -121,14 +121,14 @@ LSTM& LSTM::operator=(LSTM&& kernel) {
   return *this;
 }
 
-Status LSTM::Compile(const CreationContext& creation_context) {
+absl::Status LSTM::Compile(const CreationContext& creation_context) {
   const auto code = GetLSTMCode(definition_, *creation_context.device);
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_);
 }
 
-Status LSTM::BindArguments() {
+absl::Status LSTM::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[1]->GetMemoryPtr()));
@@ -137,8 +137,7 @@ Status LSTM::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->Batch()));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 LSTM::GetGridSize() const {
@@ -148,12 +147,12 @@ int3 LSTM::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status LSTM::Tune(const TuningParameters& params) {
+absl::Status LSTM::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status LSTM::AddToQueue(CLCommandQueue* queue) {
+absl::Status LSTM::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/lstm.h b/tensorflow/lite/delegates/gpu/cl/kernels/lstm.h
index 3e84887cdc2..27b072ed001 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/lstm.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/lstm.h
@@ -28,9 +28,9 @@ namespace cl {
 class LSTM : public GPUOperation {
  public:
   explicit LSTM(const OperationDef& definition);
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   LSTM(LSTM&& kernel);
@@ -39,7 +39,7 @@ class LSTM : public GPUOperation {
   LSTM& operator=(const LSTM&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   CLKernel kernel_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.cc b/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.cc
index 194daee5f1e..56109fc713b 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.cc
@@ -218,7 +218,7 @@ MaxUnpooling& MaxUnpooling::operator=(MaxUnpooling&& kernel) {
   return *this;
 }
 
-Status MaxUnpooling::Compile(const CreationContext& creation_context) {
+absl::Status MaxUnpooling::Compile(const CreationContext& creation_context) {
   const auto code = GetMaxUnpoolingKernelCode(
       definition_, *creation_context.device, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
@@ -226,7 +226,7 @@ Status MaxUnpooling::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status MaxUnpooling::BindArguments() {
+absl::Status MaxUnpooling::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[1]->GetMemoryPtr()));
@@ -237,8 +237,7 @@ Status MaxUnpooling::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(kernel_size_));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(padding_));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(stride_));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 MaxUnpooling::GetGridSize() const {
@@ -248,12 +247,12 @@ int3 MaxUnpooling::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status MaxUnpooling::Tune(const TuningParameters& params) {
+absl::Status MaxUnpooling::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status MaxUnpooling::AddToQueue(CLCommandQueue* queue) {
+absl::Status MaxUnpooling::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -291,7 +290,7 @@ MaxUnpooling3D& MaxUnpooling3D::operator=(MaxUnpooling3D&& kernel) {
   return *this;
 }
 
-Status MaxUnpooling3D::Compile(const CreationContext& creation_context) {
+absl::Status MaxUnpooling3D::Compile(const CreationContext& creation_context) {
   const auto code = GetMaxUnpooling3DKernelCode(
       definition_, *creation_context.device, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
@@ -299,7 +298,7 @@ Status MaxUnpooling3D::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status MaxUnpooling3D::BindArguments() {
+absl::Status MaxUnpooling3D::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[1]->GetMemoryPtr()));
@@ -316,8 +315,7 @@ Status MaxUnpooling3D::BindArguments() {
       kernel_.SetBytesAuto(int4(padding_.x, padding_.y, padding_.z, 1)));
   RETURN_IF_ERROR(
       kernel_.SetBytesAuto(int4(stride_.x, stride_.y, stride_.z, 1)));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 MaxUnpooling3D::GetGridSize() const {
@@ -327,12 +325,12 @@ int3 MaxUnpooling3D::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status MaxUnpooling3D::Tune(const TuningParameters& params) {
+absl::Status MaxUnpooling3D::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status MaxUnpooling3D::AddToQueue(CLCommandQueue* queue) {
+absl::Status MaxUnpooling3D::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.h b/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.h
index c7479acb728..19184ee1e89 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling.h
@@ -29,10 +29,10 @@ class MaxUnpooling : public GPUOperation {
  public:
   MaxUnpooling(const OperationDef& definition,
                const MaxUnpooling2DAttributes& attr);
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   MaxUnpooling(MaxUnpooling&& kernel);
@@ -41,7 +41,7 @@ class MaxUnpooling : public GPUOperation {
   MaxUnpooling& operator=(const MaxUnpooling&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   int2 stride_;
@@ -59,10 +59,10 @@ class MaxUnpooling3D : public GPUOperation {
  public:
   MaxUnpooling3D(const OperationDef& definition,
                  const MaxUnpooling3DAttributes& attr);
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   MaxUnpooling3D(MaxUnpooling3D&& kernel);
@@ -71,7 +71,7 @@ class MaxUnpooling3D : public GPUOperation {
   MaxUnpooling3D& operator=(const MaxUnpooling3D&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   int3 stride_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/mean.cc b/tensorflow/lite/delegates/gpu/cl/kernels/mean.cc
index 9dd0546c059..f79a30e33dd 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/mean.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/mean.cc
@@ -103,7 +103,7 @@ Mean& Mean::operator=(Mean&& operation) {
   return *this;
 }
 
-Status Mean::Compile(const CreationContext& creation_context) {
+absl::Status Mean::Compile(const CreationContext& creation_context) {
   if (creation_context.device->IsAdreno3xx()) {
     work_group_size_ = int3(16, 8, 1);
   }
@@ -114,7 +114,7 @@ Status Mean::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Mean::BindArguments() {
+absl::Status Mean::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -124,7 +124,7 @@ Status Mean::BindArguments() {
   const double size_0 = work_group_size_.x * work_group_size_.y;
   const double size_1 = total_size / size_0;
   RETURN_IF_ERROR(kernel_.SetBytesAuto(float2(1.0 / size_1, 1.0 / size_0)));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Mean::GetGridSize() const {
@@ -134,7 +134,7 @@ int3 Mean::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Mean::AddToQueue(CLCommandQueue* queue) {
+absl::Status Mean::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/mean.h b/tensorflow/lite/delegates/gpu/cl/kernels/mean.h
index 0c0d3fff81c..4525551b5f2 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/mean.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/mean.h
@@ -30,9 +30,9 @@ class Mean : public GPUOperation {
  public:
   Mean() = default;
   explicit Mean(const OperationDef& definition) : GPUOperation(definition) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Mean(Mean&& operation);
@@ -41,7 +41,7 @@ class Mean : public GPUOperation {
   Mean& operator=(const Mean&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
   CLKernel kernel_;
 
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.cc b/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.cc
index 45f48246078..fde0712a412 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.cc
@@ -89,7 +89,7 @@ std::string MultiplyAdd::GetArgsDeclaration() const {
   return args;
 }
 
-Status MultiplyAdd::BindArguments(CLKernel* kernel) {
+absl::Status MultiplyAdd::BindArguments(CLKernel* kernel) {
   if (use_mul_vec_) {
     RETURN_IF_ERROR(kernel->SetMemoryAuto(mul_vec_.GetMemoryPtr()));
   }
@@ -102,12 +102,12 @@ Status MultiplyAdd::BindArguments(CLKernel* kernel) {
   if (scalar_add_.Active()) {
     RETURN_IF_ERROR(kernel->SetBytesAuto(scalar_add_));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status MultiplyAdd::UploadMul(const MultiplyAttributes& attr,
-                              CalculationsPrecision scalar_precision,
-                              CLContext* context) {
+absl::Status MultiplyAdd::UploadMul(const MultiplyAttributes& attr,
+                                    CalculationsPrecision scalar_precision,
+                                    CLContext* context) {
   auto mul = absl::get_if<::tflite::gpu::Tensor<Linear, DataType::FLOAT32>>(
       &attr.param);
   auto mul_scalar = absl::get_if<float>(&attr.param);
@@ -116,12 +116,12 @@ Status MultiplyAdd::UploadMul(const MultiplyAttributes& attr,
   } else {
     scalar_mul_ = FLT(scalar_precision, *mul_scalar);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status MultiplyAdd::UploadAdd(const AddAttributes& attr,
-                              CalculationsPrecision scalar_precision,
-                              CLContext* context) {
+absl::Status MultiplyAdd::UploadAdd(const AddAttributes& attr,
+                                    CalculationsPrecision scalar_precision,
+                                    CLContext* context) {
   auto add = absl::get_if<::tflite::gpu::Tensor<Linear, DataType::FLOAT32>>(
       &attr.param);
   auto add_scalar = absl::get_if<float>(&attr.param);
@@ -130,12 +130,13 @@ Status MultiplyAdd::UploadAdd(const AddAttributes& attr,
   } else {
     scalar_add_ = FLT(scalar_precision, *add_scalar);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateMultiplyAdd(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const MultiplyAttributes& attr, MultiplyAdd* result) {
+absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const MultiplyAttributes& attr,
+                               MultiplyAdd* result) {
   const auto scalar_precision = creation_context.device->IsPowerVR()
                                     ? CalculationsPrecision::F32
                                     : definition.precision;
@@ -143,12 +144,12 @@ Status CreateMultiplyAdd(const CreationContext& creation_context,
   RETURN_IF_ERROR(
       result->UploadMul(attr, scalar_precision, creation_context.context));
   result->SetLinkIndex(0);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateMultiplyAdd(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const AddAttributes& attr, MultiplyAdd* result) {
+absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const AddAttributes& attr, MultiplyAdd* result) {
   const auto scalar_precision = creation_context.device->IsPowerVR()
                                     ? CalculationsPrecision::F32
                                     : definition.precision;
@@ -156,13 +157,14 @@ Status CreateMultiplyAdd(const CreationContext& creation_context,
   RETURN_IF_ERROR(
       result->UploadAdd(attr, scalar_precision, creation_context.context));
   result->SetLinkIndex(0);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateMultiplyAdd(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const MultiplyAttributes& mul_attr,
-                         const AddAttributes& add_attr, MultiplyAdd* result) {
+absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const MultiplyAttributes& mul_attr,
+                               const AddAttributes& add_attr,
+                               MultiplyAdd* result) {
   const auto scalar_precision = creation_context.device->IsPowerVR()
                                     ? CalculationsPrecision::F32
                                     : definition.precision;
@@ -172,7 +174,7 @@ Status CreateMultiplyAdd(const CreationContext& creation_context,
   RETURN_IF_ERROR(
       result->UploadAdd(add_attr, scalar_precision, creation_context.context));
   result->SetLinkIndex(0);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.h b/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.h
index 83bb6e11216..4047a7e5c1b 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/multiply_add.h
@@ -40,40 +40,42 @@ class MultiplyAdd : public ElementwiseOperation {
   MultiplyAdd(const MultiplyAdd&) = delete;
   MultiplyAdd& operator=(const MultiplyAdd&) = delete;
 
-  Status UploadMul(const MultiplyAttributes& attr,
-                   CalculationsPrecision scalar_precision, CLContext* context);
-  Status UploadAdd(const AddAttributes& attr,
-                   CalculationsPrecision scalar_precision, CLContext* context);
+  absl::Status UploadMul(const MultiplyAttributes& attr,
+                         CalculationsPrecision scalar_precision,
+                         CLContext* context);
+  absl::Status UploadAdd(const AddAttributes& attr,
+                         CalculationsPrecision scalar_precision,
+                         CLContext* context);
 
   template <DataType T>
-  Status UploadMul(const ::tflite::gpu::Tensor<Linear, T>& mul,
-                   CLContext* context);
+  absl::Status UploadMul(const ::tflite::gpu::Tensor<Linear, T>& mul,
+                         CLContext* context);
 
   template <DataType T>
-  Status UploadAdd(const ::tflite::gpu::Tensor<Linear, T>& add,
-                   CLContext* context);
+  absl::Status UploadAdd(const ::tflite::gpu::Tensor<Linear, T>& add,
+                         CLContext* context);
 
   void SetLinkIndex(int index) override;
   std::string GetCoreCode(const LinkingContext& context) const override;
 
   std::string GetArgsDeclaration() const override;
-  Status BindArguments(CLKernel* kernel) override;
+  absl::Status BindArguments(CLKernel* kernel) override;
 
-  friend Status CreateMultiplyAdd(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const MultiplyAttributes& attr,
-                                  MultiplyAdd* result);
+  friend absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                                        const OperationDef& definition,
+                                        const MultiplyAttributes& attr,
+                                        MultiplyAdd* result);
 
-  friend Status CreateMultiplyAdd(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const AddAttributes& attr,
-                                  MultiplyAdd* result);
+  friend absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                                        const OperationDef& definition,
+                                        const AddAttributes& attr,
+                                        MultiplyAdd* result);
 
-  friend Status CreateMultiplyAdd(const CreationContext& creation_context,
-                                  const OperationDef& definition,
-                                  const MultiplyAttributes& mul_attr,
-                                  const AddAttributes& add_attr,
-                                  MultiplyAdd* result);
+  friend absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                                        const OperationDef& definition,
+                                        const MultiplyAttributes& mul_attr,
+                                        const AddAttributes& add_attr,
+                                        MultiplyAdd* result);
 
  private:
   explicit MultiplyAdd(const OperationDef& definition)
@@ -89,41 +91,43 @@ class MultiplyAdd : public ElementwiseOperation {
   FLT scalar_add_;
 };
 
-Status CreateMultiplyAdd(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const MultiplyAttributes& attr, MultiplyAdd* result);
+absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const MultiplyAttributes& attr,
+                               MultiplyAdd* result);
 
-Status CreateMultiplyAdd(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const AddAttributes& attr, MultiplyAdd* result);
+absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const AddAttributes& attr, MultiplyAdd* result);
 
-Status CreateMultiplyAdd(const CreationContext& creation_context,
-                         const OperationDef& definition,
-                         const MultiplyAttributes& mul_attr,
-                         const AddAttributes& add_attr, MultiplyAdd* result);
+absl::Status CreateMultiplyAdd(const CreationContext& creation_context,
+                               const OperationDef& definition,
+                               const MultiplyAttributes& mul_attr,
+                               const AddAttributes& add_attr,
+                               MultiplyAdd* result);
 
 template <DataType T>
-Status MultiplyAdd::UploadMul(const ::tflite::gpu::Tensor<Linear, T>& mul,
-                              CLContext* context) {
+absl::Status MultiplyAdd::UploadMul(const ::tflite::gpu::Tensor<Linear, T>& mul,
+                                    CLContext* context) {
   LinearStorageCreateInfo create_info;
   create_info.storage_type =
       DeduceLinearStorageType(definition_.GetPrimaryStorageType());
   create_info.data_type = definition_.GetDataType();
   RETURN_IF_ERROR(CreateLinearStorage(create_info, mul, context, &mul_vec_));
   use_mul_vec_ = true;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <DataType T>
-Status MultiplyAdd::UploadAdd(const ::tflite::gpu::Tensor<Linear, T>& add,
-                              CLContext* context) {
+absl::Status MultiplyAdd::UploadAdd(const ::tflite::gpu::Tensor<Linear, T>& add,
+                                    CLContext* context) {
   LinearStorageCreateInfo create_info;
   create_info.storage_type =
       DeduceLinearStorageType(definition_.GetPrimaryStorageType());
   create_info.data_type = definition_.GetDataType();
   RETURN_IF_ERROR(CreateLinearStorage(create_info, add, context, &add_vec_));
   use_add_vec_ = true;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/padding.cc b/tensorflow/lite/delegates/gpu/cl/kernels/padding.cc
index 1443f5958db..48edcb448a1 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/padding.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/padding.cc
@@ -169,7 +169,7 @@ Padding& Padding::operator=(Padding&& kernel) {
   return *this;
 }
 
-Status Padding::Compile(const CreationContext& creation_context) {
+absl::Status Padding::Compile(const CreationContext& creation_context) {
   const auto code =
       GetPaddingCode(definition_, linked_operations_, attributes_);
   return creation_context.cache->GetOrCreateCLKernel(
@@ -177,7 +177,7 @@ Status Padding::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Padding::BindArguments() {
+absl::Status Padding::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -187,7 +187,7 @@ Status Padding::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
   const auto& prep = attributes_.prepended;
   RETURN_IF_ERROR(kernel_.SetBytesAuto(int4(prep.w, prep.h, prep.c, prep.b)));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Padding::GetGridSize() const {
@@ -197,12 +197,12 @@ int3 Padding::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Padding::Tune(const TuningParameters& params) {
+absl::Status Padding::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status Padding::AddToQueue(CLCommandQueue* queue) {
+absl::Status Padding::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/padding.h b/tensorflow/lite/delegates/gpu/cl/kernels/padding.h
index 38e78d4a461..ddf9f9583be 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/padding.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/padding.h
@@ -28,10 +28,10 @@ namespace cl {
 class Padding : public GPUOperation {
  public:
   Padding(const OperationDef& definition, const PadAttributes& attr);
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Padding(Padding&& kernel);
@@ -40,7 +40,7 @@ class Padding : public GPUOperation {
   Padding& operator=(const Padding&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   PadAttributes attributes_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/pooling.cc b/tensorflow/lite/delegates/gpu/cl/kernels/pooling.cc
index 17705782f93..fb985461c02 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/pooling.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/pooling.cc
@@ -408,7 +408,7 @@ Pooling& Pooling::operator=(Pooling&& kernel) {
   return *this;
 }
 
-Status Pooling::Compile(const CreationContext& creation_context) {
+absl::Status Pooling::Compile(const CreationContext& creation_context) {
   std::string code;
   const bool stride_correction =
       definition_.IsBatchSupported() && stride_.x != 1;
@@ -423,7 +423,7 @@ Status Pooling::Compile(const CreationContext& creation_context) {
                                      linked_operations_, output_indices_);
       break;
     default:
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "You should create another kernel with this params");
       break;
   }
@@ -432,7 +432,7 @@ Status Pooling::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Pooling::BindArguments() {
+absl::Status Pooling::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -447,7 +447,7 @@ Status Pooling::BindArguments() {
       kernel_.SetBytesAuto(int2(padding_.x * src_[0]->Batch(), padding_.y)));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(stride_));
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Pooling::GetGridSize() const {
@@ -457,12 +457,12 @@ int3 Pooling::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Pooling::Tune(const TuningParameters& params) {
+absl::Status Pooling::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status Pooling::AddToQueue(CLCommandQueue* queue) {
+absl::Status Pooling::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
@@ -506,7 +506,7 @@ Pooling3D& Pooling3D::operator=(Pooling3D&& kernel) {
   return *this;
 }
 
-Status Pooling3D::Compile(const CreationContext& creation_context) {
+absl::Status Pooling3D::Compile(const CreationContext& creation_context) {
   std::string code;
   const bool stride_correction =
       definition_.IsBatchSupported() && stride_.x != 1;
@@ -521,7 +521,7 @@ Status Pooling3D::Compile(const CreationContext& creation_context) {
                                        linked_operations_, output_indices_);
       break;
     default:
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "You should create another kernel with this params");
       break;
   }
@@ -530,7 +530,7 @@ Status Pooling3D::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Pooling3D::BindArguments() {
+absl::Status Pooling3D::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -550,7 +550,7 @@ Status Pooling3D::BindArguments() {
   RETURN_IF_ERROR(
       kernel_.SetBytesAuto(int4(stride_.x, stride_.y, stride_.z, 1)));
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Pooling3D::GetGridSize() const {
@@ -560,12 +560,12 @@ int3 Pooling3D::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Pooling3D::Tune(const TuningParameters& params) {
+absl::Status Pooling3D::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status Pooling3D::AddToQueue(CLCommandQueue* queue) {
+absl::Status Pooling3D::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/pooling.h b/tensorflow/lite/delegates/gpu/cl/kernels/pooling.h
index eaeb188f19e..09d2d5260f7 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/pooling.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/pooling.h
@@ -30,10 +30,10 @@ namespace cl {
 class Pooling : public GPUOperation {
  public:
   Pooling(const OperationDef& definition, const Pooling2DAttributes& attr);
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Pooling(Pooling&& kernel);
@@ -42,7 +42,7 @@ class Pooling : public GPUOperation {
   Pooling& operator=(const Pooling&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   int2 stride_;
@@ -62,10 +62,10 @@ Pooling CreatePooling(const OperationDef& definition,
 class Pooling3D : public GPUOperation {
  public:
   Pooling3D(const OperationDef& definition, const Pooling3DAttributes& attr);
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Pooling3D(Pooling3D&& kernel);
@@ -74,7 +74,7 @@ class Pooling3D : public GPUOperation {
   Pooling3D& operator=(const Pooling3D&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   int3 stride_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/prelu.cc b/tensorflow/lite/delegates/gpu/cl/kernels/prelu.cc
index 8aa357b91b4..1879d390ad6 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/prelu.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/prelu.cc
@@ -73,21 +73,21 @@ std::string PReLU::GetArgsDeclaration() const {
   return args;
 }
 
-Status PReLU::BindArguments(CLKernel* kernel) {
+absl::Status PReLU::BindArguments(CLKernel* kernel) {
   RETURN_IF_ERROR(kernel->SetMemoryAuto(alpha_.GetMemoryPtr()));
   if (clip_.Active()) {
     RETURN_IF_ERROR(kernel->SetBytesAuto(clip_));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreatePReLU(const CreationContext& creation_context,
-                   const OperationDef& definition, const PReLUAttributes& attr,
-                   PReLU* result) {
+absl::Status CreatePReLU(const CreationContext& creation_context,
+                         const OperationDef& definition,
+                         const PReLUAttributes& attr, PReLU* result) {
   auto alpha = absl::get_if<::tflite::gpu::Tensor<Linear, DataType::FLOAT32>>(
       &attr.alpha);
   if (!alpha) {
-    return InvalidArgumentError("Alpha is missing");
+    return absl::InvalidArgumentError("Alpha is missing");
   }
   const auto scalar_precision = creation_context.device->IsPowerVR()
                                     ? CalculationsPrecision::F32
@@ -95,7 +95,7 @@ Status CreatePReLU(const CreationContext& creation_context,
   *result = PReLU(definition, attr, scalar_precision);
   RETURN_IF_ERROR(result->UploadParameters(*alpha, creation_context.context));
   result->SetLinkIndex(0);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/prelu.h b/tensorflow/lite/delegates/gpu/cl/kernels/prelu.h
index 0feb387e644..4ba0a92158f 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/prelu.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/prelu.h
@@ -44,30 +44,30 @@ class PReLU : public ElementwiseOperation {
   void SetLinkIndex(int index) override;
   std::string GetCoreCode(const LinkingContext& context) const override;
   std::string GetArgsDeclaration() const override;
-  Status BindArguments(CLKernel* kernel) override;
+  absl::Status BindArguments(CLKernel* kernel) override;
 
-  friend Status CreatePReLU(const CreationContext& creation_context,
-                            const OperationDef& definition,
-                            const PReLUAttributes& attr, PReLU* result);
+  friend absl::Status CreatePReLU(const CreationContext& creation_context,
+                                  const OperationDef& definition,
+                                  const PReLUAttributes& attr, PReLU* result);
 
  private:
   PReLU(const OperationDef& definition, const PReLUAttributes& attr,
         CalculationsPrecision scalar_precision);
 
   template <DataType T>
-  Status UploadParameters(const ::tflite::gpu::Tensor<Linear, T>& parameters,
-                          CLContext* context);
+  absl::Status UploadParameters(
+      const ::tflite::gpu::Tensor<Linear, T>& parameters, CLContext* context);
 
   FLT clip_;
   LinearStorage alpha_;
 };
 
-Status CreatePReLU(const CreationContext& creation_context,
-                   const OperationDef& definition, const PReLUAttributes& attr,
-                   PReLU* result);
+absl::Status CreatePReLU(const CreationContext& creation_context,
+                         const OperationDef& definition,
+                         const PReLUAttributes& attr, PReLU* result);
 
 template <DataType T>
-Status PReLU::UploadParameters(
+absl::Status PReLU::UploadParameters(
     const ::tflite::gpu::Tensor<Linear, T>& parameters, CLContext* context) {
   LinearStorageCreateInfo create_info;
   create_info.storage_type =
@@ -75,7 +75,7 @@ Status PReLU::UploadParameters(
   create_info.data_type = definition_.GetPrimaryDataType();
   RETURN_IF_ERROR(
       CreateLinearStorage(create_info, parameters, context, &alpha_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.cc b/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.cc
index f7751fac6ff..e0346a66ff9 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.cc
@@ -92,17 +92,17 @@ std::string QuantizeAndDequantize::GetArgsDeclaration() const {
                       scale_.GetDeclaration());
 }
 
-Status QuantizeAndDequantize::BindArguments(CLKernel* kernel) {
+absl::Status QuantizeAndDequantize::BindArguments(CLKernel* kernel) {
   RETURN_IF_ERROR(kernel->SetBytesAuto(min_));
   RETURN_IF_ERROR(kernel->SetBytesAuto(max_));
   RETURN_IF_ERROR(kernel->SetBytesAuto(scale_));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateQuantizeAndDequantize(const CreationContext& creation_context,
-                                   const OperationDef& definition,
-                                   const QuantizeAndDequantizeAttributes& attr,
-                                   QuantizeAndDequantize* result) {
+absl::Status CreateQuantizeAndDequantize(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const QuantizeAndDequantizeAttributes& attr,
+    QuantizeAndDequantize* result) {
   const auto scalar_precision = creation_context.device->IsPowerVR()
                                     ? CalculationsPrecision::F32
                                     : definition.precision;
@@ -120,7 +120,7 @@ Status CreateQuantizeAndDequantize(const CreationContext& creation_context,
     *result = QuantizeAndDequantize(definition, attr, scalar_precision);
   }
   result->SetLinkIndex(0);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.h b/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.h
index 07fa8f21773..41c295e881d 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize.h
@@ -57,9 +57,9 @@ class QuantizeAndDequantize : public ElementwiseOperation {
   void SetLinkIndex(int index) override;
   std::string GetCoreCode(const LinkingContext& context) const override;
   std::string GetArgsDeclaration() const override;
-  Status BindArguments(CLKernel* kernel) override;
+  absl::Status BindArguments(CLKernel* kernel) override;
 
-  friend Status CreateQuantizeAndDequantize(
+  friend absl::Status CreateQuantizeAndDequantize(
       const CreationContext& creation_context, const OperationDef& definition,
       const QuantizeAndDequantizeAttributes& attr,
       QuantizeAndDequantize* result);
@@ -70,27 +70,26 @@ class QuantizeAndDequantize : public ElementwiseOperation {
                         CalculationsPrecision scalar_precision);
 
   template <DataType T>
-  Status UploadParameters(const ::tflite::gpu::Tensor<Linear, T>& parameters,
-                          CLContext* context);
+  absl::Status UploadParameters(
+      const ::tflite::gpu::Tensor<Linear, T>& parameters, CLContext* context);
 
   FLT min_;
   FLT max_;
   FLT scale_;
 };
 
-Status CreateQuantizeAndDequantize(const CreationContext& creation_context,
-                                   const OperationDef& definition,
-                                   const QuantizeAndDequantizeAttributes& attr,
-                                   QuantizeAndDequantize* result);
+absl::Status CreateQuantizeAndDequantize(
+    const CreationContext& creation_context, const OperationDef& definition,
+    const QuantizeAndDequantizeAttributes& attr, QuantizeAndDequantize* result);
 
 template <DataType T>
-Status QuantizeAndDequantize::UploadParameters(
+absl::Status QuantizeAndDequantize::UploadParameters(
     const ::tflite::gpu::Tensor<Linear, T>& parameters, CLContext* context) {
   LinearStorageCreateInfo create_info;
   create_info.storage_type =
       DeduceLinearStorageType(definition_.GetPrimaryStorageType());
   create_info.data_type = definition_.GetPrimaryDataType();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/relu.cc b/tensorflow/lite/delegates/gpu/cl/kernels/relu.cc
index ce903972c35..a96db2aa45e 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/relu.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/relu.cc
@@ -80,14 +80,14 @@ std::string ReLU::GetArgsDeclaration() const {
   return args;
 }
 
-Status ReLU::BindArguments(CLKernel* kernel) {
+absl::Status ReLU::BindArguments(CLKernel* kernel) {
   if (alpha_.Active()) {
     RETURN_IF_ERROR(kernel->SetBytesAuto(alpha_));
   }
   if (clip_.Active()) {
     RETURN_IF_ERROR(kernel->SetBytesAuto(clip_));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 ReLU CreateReLU(const CreationContext& creation_context,
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/relu.h b/tensorflow/lite/delegates/gpu/cl/kernels/relu.h
index c4fb68588d3..c8260a33faf 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/relu.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/relu.h
@@ -37,7 +37,7 @@ class ReLU : public ElementwiseOperation {
   void SetLinkIndex(int index) override;
   std::string GetCoreCode(const LinkingContext& context) const override;
   std::string GetArgsDeclaration() const override;
-  Status BindArguments(CLKernel* kernel) override;
+  absl::Status BindArguments(CLKernel* kernel) override;
 
   friend ReLU CreateReLU(const CreationContext& creation_context,
                          const OperationDef& definition,
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/reshape.cc b/tensorflow/lite/delegates/gpu/cl/kernels/reshape.cc
index 3bb3cdd5d22..e1589e9d682 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/reshape.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/reshape.cc
@@ -156,7 +156,7 @@ Reshape& Reshape::operator=(Reshape&& operation) {
   return *this;
 }
 
-Status Reshape::Compile(const CreationContext& creation_context) {
+absl::Status Reshape::Compile(const CreationContext& creation_context) {
   const auto code = definition_.IsBatchSupported()
                         ? GetReshapeBatchedCode(definition_, linked_operations_)
                         : GetReshapeCode(definition_, linked_operations_);
@@ -165,7 +165,7 @@ Status Reshape::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Reshape::BindArguments() {
+absl::Status Reshape::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -174,8 +174,7 @@ Status Reshape::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->Channels()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->Channels()));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Reshape::GetGridSize() const {
@@ -185,12 +184,12 @@ int3 Reshape::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Reshape::Tune(const TuningParameters& params) {
+absl::Status Reshape::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status Reshape::AddToQueue(CLCommandQueue* queue) {
+absl::Status Reshape::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/reshape.h b/tensorflow/lite/delegates/gpu/cl/kernels/reshape.h
index 2117ef05907..e11c066ebd3 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/reshape.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/reshape.h
@@ -29,10 +29,10 @@ class Reshape : public GPUOperation {
  public:
   explicit Reshape(const OperationDef& definition)
       : GPUOperation(definition), work_group_size_(8, 4, 1) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Reshape(Reshape&& operation);
@@ -41,7 +41,7 @@ class Reshape : public GPUOperation {
   Reshape& operator=(const Reshape&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   CLKernel kernel_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.cc b/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.cc
index 3741a02aa5b..de6813e741f 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.cc
@@ -120,7 +120,7 @@ Reshapex4& Reshapex4::operator=(Reshapex4&& operation) {
   return *this;
 }
 
-Status Reshapex4::Compile(const CreationContext& creation_context) {
+absl::Status Reshapex4::Compile(const CreationContext& creation_context) {
   const auto code = definition_.IsBatchSupported()
                         ? GetReshapeBatchedCode(definition_, linked_operations_)
                         : GetReshapeCode(definition_, linked_operations_);
@@ -129,15 +129,14 @@ Status Reshapex4::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Reshapex4::BindArguments() {
+absl::Status Reshapex4::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(dst_[0]->GetMemoryPtrForWriting()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Reshapex4::GetGridSize() const {
@@ -147,12 +146,12 @@ int3 Reshapex4::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Reshapex4::Tune(const TuningParameters& params) {
+absl::Status Reshapex4::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status Reshapex4::AddToQueue(CLCommandQueue* queue) {
+absl::Status Reshapex4::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.h b/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.h
index 656e299b547..d61224a7367 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/reshapex4.h
@@ -30,10 +30,10 @@ class Reshapex4 : public GPUOperation {
  public:
   explicit Reshapex4(const OperationDef& definition)
       : GPUOperation(definition), work_group_size_(8, 4, 1) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Reshapex4(Reshapex4&& operation);
@@ -42,7 +42,7 @@ class Reshapex4 : public GPUOperation {
   Reshapex4& operator=(const Reshapex4&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   CLKernel kernel_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/resize.cc b/tensorflow/lite/delegates/gpu/cl/kernels/resize.cc
index bd109020004..5d578fe6e09 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/resize.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/resize.cc
@@ -209,7 +209,7 @@ Resize& Resize::operator=(Resize&& operation) {
   return *this;
 }
 
-Status Resize::Compile(const CreationContext& creation_context) {
+absl::Status Resize::Compile(const CreationContext& creation_context) {
   const auto code = GetResizeCode(definition_, attr_.type,
                                   attr_.half_pixel_centers, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
@@ -217,7 +217,7 @@ Status Resize::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Resize::BindArguments() {
+absl::Status Resize::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -230,7 +230,7 @@ Status Resize::BindArguments() {
       float2(CalculateResizeScale(src_[0]->Width(), dst_[0]->Width(), attr_),
              CalculateResizeScale(src_[0]->Height(), dst_[0]->Height(), attr_));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(scale_factor));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Resize::GetGridSize() const {
@@ -240,12 +240,12 @@ int3 Resize::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Resize::AddToQueue(CLCommandQueue* queue) {
+absl::Status Resize::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status Resize::Tune(const TuningParameters& params) {
+absl::Status Resize::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
@@ -271,7 +271,7 @@ Resize3D& Resize3D::operator=(Resize3D&& operation) {
   return *this;
 }
 
-Status Resize3D::Compile(const CreationContext& creation_context) {
+absl::Status Resize3D::Compile(const CreationContext& creation_context) {
   const auto code =
       GetResize3DCode(definition_, attr_.type, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
@@ -279,7 +279,7 @@ Status Resize3D::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status Resize3D::BindArguments() {
+absl::Status Resize3D::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -296,7 +296,7 @@ Status Resize3D::BindArguments() {
       CalculateResizeScale(src_[0]->Height(), dst_[0]->Height(), attr_),
       CalculateResizeScale(src_[0]->Depth(), dst_[0]->Depth(), attr_), 1.0f);
   RETURN_IF_ERROR(kernel_.SetBytesAuto(scale_factor));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Resize3D::GetGridSize() const {
@@ -306,12 +306,12 @@ int3 Resize3D::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Resize3D::AddToQueue(CLCommandQueue* queue) {
+absl::Status Resize3D::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status Resize3D::Tune(const TuningParameters& params) {
+absl::Status Resize3D::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/resize.h b/tensorflow/lite/delegates/gpu/cl/kernels/resize.h
index a80f9a98382..04459e12ff9 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/resize.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/resize.h
@@ -27,10 +27,10 @@ namespace cl {
 
 class Resize : public GPUOperation {
  public:
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Resize(Resize&& operation);
@@ -45,7 +45,7 @@ class Resize : public GPUOperation {
   Resize(const OperationDef& definition, const Resize2DAttributes& attr)
       : GPUOperation(definition), attr_(attr) {}
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Resize2DAttributes attr_;
@@ -58,10 +58,10 @@ Resize CreateResize(const OperationDef& definition,
 
 class Resize3D : public GPUOperation {
  public:
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Resize3D(Resize3D&& operation);
@@ -76,7 +76,7 @@ class Resize3D : public GPUOperation {
   Resize3D(const OperationDef& definition, const Resize3DAttributes& attr)
       : GPUOperation(definition), attr_(attr) {}
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   Resize3DAttributes attr_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/softmax.cc b/tensorflow/lite/delegates/gpu/cl/kernels/softmax.cc
index 350abf7f64e..0f9fcb03097 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/softmax.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/softmax.cc
@@ -79,14 +79,14 @@ Softmax& Softmax::operator=(Softmax&& kernel) {
   return *this;
 }
 
-Status Softmax::Compile(const CreationContext& creation_context) {
+absl::Status Softmax::Compile(const CreationContext& creation_context) {
   const auto code = GetSoftmaxKernelCode(definition_, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_);
 }
 
-Status Softmax::BindArguments() {
+absl::Status Softmax::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -94,7 +94,7 @@ Status Softmax::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWBatchedHSB()));
   RETURN_IF_ERROR(
       kernel_.SetBytesAuto(GetMaskForLastPlane(src_[0]->Channels())));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Softmax::GetGridSize() const {
@@ -104,12 +104,12 @@ int3 Softmax::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Softmax::Tune(const TuningParameters& params) {
+absl::Status Softmax::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status Softmax::AddToQueue(CLCommandQueue* queue) {
+absl::Status Softmax::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/softmax.h b/tensorflow/lite/delegates/gpu/cl/kernels/softmax.h
index b8b7846e8de..703a40a4e89 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/softmax.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/softmax.h
@@ -30,10 +30,10 @@ class Softmax : public GPUOperation {
  public:
   Softmax() = default;
   explicit Softmax(const OperationDef& definition) : GPUOperation(definition) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Softmax(Softmax&& kernel);
@@ -44,7 +44,7 @@ class Softmax : public GPUOperation {
   friend Softmax CreateSoftmax();
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
   CLKernel kernel_;
   int3 work_group_size_ = int3(8, 4, 1);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.cc b/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.cc
index 168dc6ce4a9..09e6c978026 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.cc
@@ -115,14 +115,14 @@ Softmax1x1& Softmax1x1::operator=(Softmax1x1&& kernel) {
   return *this;
 }
 
-Status Softmax1x1::Compile(const CreationContext& creation_context) {
+absl::Status Softmax1x1::Compile(const CreationContext& creation_context) {
   const auto code = GetSoftmaxKernelCode(definition_, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_);
 }
 
-Status Softmax1x1::AddToQueue(CLCommandQueue* queue) {
+absl::Status Softmax1x1::AddToQueue(CLCommandQueue* queue) {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.h b/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.h
index 0fd5325a863..0d28145ca03 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1.h
@@ -30,9 +30,9 @@ class Softmax1x1 : public GPUOperation {
   Softmax1x1() = default;
   explicit Softmax1x1(const OperationDef& definition)
       : GPUOperation(definition) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Softmax1x1(Softmax1x1&& kernel);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc b/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc
index db6882ce4f4..b763684516a 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.cc
@@ -96,14 +96,14 @@ SpaceToDepth& SpaceToDepth::operator=(SpaceToDepth&& operation) {
   return *this;
 }
 
-Status SpaceToDepth::Compile(const CreationContext& creation_context) {
+absl::Status SpaceToDepth::Compile(const CreationContext& creation_context) {
   const auto code = GetSpaceToDepthCode(definition_, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_);
 }
 
-Status SpaceToDepth::BindArguments() {
+absl::Status SpaceToDepth::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -121,12 +121,12 @@ int3 SpaceToDepth::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status SpaceToDepth::Tune(const TuningParameters& params) {
+absl::Status SpaceToDepth::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status SpaceToDepth::AddToQueue(CLCommandQueue* queue) {
+absl::Status SpaceToDepth::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.h b/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.h
index 3d316569fcb..9dd257a4c4d 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/space_to_depth.h
@@ -30,9 +30,9 @@ class SpaceToDepth : public GPUOperation {
  public:
   SpaceToDepth(const OperationDef& op_def, const SpaceToDepthAttributes& attr)
       : GPUOperation(op_def), attr_(attr), work_group_size_(8, 4, 1) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   SpaceToDepth(SpaceToDepth&& operation);
   SpaceToDepth& operator=(SpaceToDepth&& operation);
@@ -40,7 +40,7 @@ class SpaceToDepth : public GPUOperation {
   SpaceToDepth& operator=(const SpaceToDepth&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   SpaceToDepthAttributes attr_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.cc b/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.cc
index 4f5cf9b26c7..19f1b185d3c 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.cc
@@ -166,7 +166,7 @@ StridedSlice& StridedSlice::operator=(StridedSlice&& operation) {
   return *this;
 }
 
-Status StridedSlice::Compile(const CreationContext& creation_context) {
+absl::Status StridedSlice::Compile(const CreationContext& creation_context) {
   const auto code = GetStridedSliceCode(definition_, Is4Aligned(attributes_),
                                         linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
@@ -174,7 +174,7 @@ Status StridedSlice::Compile(const CreationContext& creation_context) {
       *creation_context.device, &kernel_);
 }
 
-Status StridedSlice::BindArguments() {
+absl::Status StridedSlice::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -187,7 +187,7 @@ Status StridedSlice::BindArguments() {
                                 attributes_.strides.c, attributes_.strides.b)));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 StridedSlice::GetGridSize() const {
@@ -197,12 +197,12 @@ int3 StridedSlice::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status StridedSlice::Tune(const TuningParameters& params) {
+absl::Status StridedSlice::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status StridedSlice::AddToQueue(CLCommandQueue* queue) {
+absl::Status StridedSlice::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.h b/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.h
index f30f6777134..ee6f18fdacb 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/strided_slice.h
@@ -27,10 +27,10 @@ namespace cl {
 class StridedSlice : public GPUOperation {
  public:
   StridedSlice(const OperationDef& definition, const SliceAttributes& attr);
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
 
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   StridedSlice(StridedSlice&& operation);
@@ -39,7 +39,7 @@ class StridedSlice : public GPUOperation {
   StridedSlice& operator=(const StridedSlice&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   SliceAttributes attributes_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/transpose.cc b/tensorflow/lite/delegates/gpu/cl/kernels/transpose.cc
index cab9b728866..66a272fa2da 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/transpose.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/transpose.cc
@@ -125,14 +125,14 @@ Transpose& Transpose::operator=(Transpose&& operation) {
   return *this;
 }
 
-Status Transpose::Compile(const CreationContext& creation_context) {
+absl::Status Transpose::Compile(const CreationContext& creation_context) {
   const auto code = GetTransposeCode(definition_, attr_, linked_operations_);
   return creation_context.cache->GetOrCreateCLKernel(
       code, "main_function", *creation_context.context,
       *creation_context.device, &kernel_);
 }
 
-Status Transpose::BindArguments() {
+absl::Status Transpose::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(BindArgs(&kernel_, linked_operations_));
@@ -141,8 +141,7 @@ Status Transpose::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(src_[0]->Channels()));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->Channels()));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Transpose::GetGridSize() const {
@@ -152,12 +151,12 @@ int3 Transpose::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Transpose::Tune(const TuningParameters& params) {
+absl::Status Transpose::Tune(const TuningParameters& params) {
   RETURN_IF_ERROR(BindArguments());
   return GetBestWorkGroup(params, kernel_, GetGridSize(), &work_group_size_);
 }
 
-Status Transpose::AddToQueue(CLCommandQueue* queue) {
+absl::Status Transpose::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/transpose.h b/tensorflow/lite/delegates/gpu/cl/kernels/transpose.h
index 22c155a79ba..61038b1e0ca 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/transpose.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/transpose.h
@@ -28,9 +28,9 @@ class Transpose : public GPUOperation {
  public:
   Transpose(const OperationDef& definition, const TransposeAttributes& attr)
       : GPUOperation(definition), attr_(attr), work_group_size_(8, 4, 1) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Transpose(Transpose&& operation);
@@ -39,7 +39,7 @@ class Transpose : public GPUOperation {
   Transpose& operator=(const Transpose&) = delete;
 
  private:
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   TransposeAttributes attr_;
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/winograd.cc b/tensorflow/lite/delegates/gpu/cl/kernels/winograd.cc
index 9bb89874c3d..81a8fc690c4 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/winograd.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/winograd.cc
@@ -381,7 +381,7 @@ Winograd4x4To36& Winograd4x4To36::operator=(Winograd4x4To36&& operation) {
   return *this;
 }
 
-Status Winograd4x4To36::Compile(const CreationContext& creation_context) {
+absl::Status Winograd4x4To36::Compile(const CreationContext& creation_context) {
   std::vector<CompilerOptions> options;
   if (creation_context.device->IsAdreno()) {
     options.push_back(CompilerOptions::ADRENO_MORE_WAVES);
@@ -397,10 +397,10 @@ Status Winograd4x4To36::Compile(const CreationContext& creation_context) {
       code, "main_function", options, *creation_context.context,
       *creation_context.device, &kernel_));
   work_group_size_ = SelectBestWorkGroup();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Winograd4x4To36::UploadBt(CLContext* context) {
+absl::Status Winograd4x4To36::UploadBt(CLContext* context) {
   ::tflite::gpu::Tensor<Linear, DataType::FLOAT32> bt_aligned;
   bt_aligned.shape = Linear(6 * 8);
   bt_aligned.data.resize(6 * 8);
@@ -427,7 +427,7 @@ int3 Winograd4x4To36::SelectBestWorkGroup() {
   return GetFirstSuitableWorkGroup(wgs, kernel_.GetMaxWorkGroupSize());
 }
 
-Status Winograd4x4To36::BindArguments() {
+absl::Status Winograd4x4To36::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(bt_.GetMemoryPtr()));
@@ -444,8 +444,7 @@ Status Winograd4x4To36::BindArguments() {
       kernel_.SetBytesAuto(int2(-padding_.prepended.w, -padding_.prepended.h)));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(tiles_total));
   RETURN_IF_ERROR(kernel_.SetBytesAuto(tiles_x));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Winograd4x4To36::GetGridSize() const {
@@ -455,7 +454,7 @@ int3 Winograd4x4To36::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Winograd4x4To36::Tune(const TuningParameters& params) {
+absl::Status Winograd4x4To36::Tune(const TuningParameters& params) {
   switch (params.tuning_type) {
     case TuningType::EXHAUSTIVE:
       RETURN_IF_ERROR(BindArguments());
@@ -464,19 +463,19 @@ Status Winograd4x4To36::Tune(const TuningParameters& params) {
     case TuningType::FAST:
     default:
       work_group_size_ = SelectBestWorkGroup();
-      return OkStatus();
+      return absl::OkStatus();
   }
 }
 
-Status Winograd4x4To36::AddToQueue(CLCommandQueue* queue) {
+absl::Status Winograd4x4To36::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status CreateWinograd4x4To36(const CreationContext& creation_context,
-                             const OperationDef& definition,
-                             const Padding2D& padding,
-                             Winograd4x4To36* result) {
+absl::Status CreateWinograd4x4To36(const CreationContext& creation_context,
+                                   const OperationDef& definition,
+                                   const Padding2D& padding,
+                                   Winograd4x4To36* result) {
   *result = Winograd4x4To36(definition, padding);
   return result->UploadBt(creation_context.context);
 }
@@ -499,7 +498,7 @@ Winograd36To4x4& Winograd36To4x4::operator=(Winograd36To4x4&& operation) {
   return *this;
 }
 
-Status Winograd36To4x4::Compile(const CreationContext& creation_context) {
+absl::Status Winograd36To4x4::Compile(const CreationContext& creation_context) {
   std::vector<CompilerOptions> options;
   if (definition_.precision == CalculationsPrecision::F16 &&
       creation_context.device->IsPowerVR()) {
@@ -511,10 +510,10 @@ Status Winograd36To4x4::Compile(const CreationContext& creation_context) {
       code, "main_function", options, *creation_context.context,
       *creation_context.device, &kernel_));
   work_group_size_ = SelectBestWorkGroup();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Winograd36To4x4::UploadAt(CLContext* context) {
+absl::Status Winograd36To4x4::UploadAt(CLContext* context) {
   ::tflite::gpu::Tensor<Linear, DataType::FLOAT32> at_aligned;
   at_aligned.shape = Linear(4 * 8);
   at_aligned.data.resize(4 * 8);
@@ -541,7 +540,7 @@ int3 Winograd36To4x4::SelectBestWorkGroup() {
   return GetFirstSuitableWorkGroup(wgs, kernel_.GetMaxWorkGroupSize());
 }
 
-Status Winograd36To4x4::BindArguments() {
+absl::Status Winograd36To4x4::BindArguments() {
   kernel_.ResetBindingCounter();
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(src_[0]->GetMemoryPtr()));
   RETURN_IF_ERROR(kernel_.SetMemoryAuto(at_.GetMemoryPtr()));
@@ -552,8 +551,7 @@ Status Winograd36To4x4::BindArguments() {
   RETURN_IF_ERROR(kernel_.SetBytesAuto(dst_[0]->GetWHSB()));
   const int tiles_x = IntegralDivideRoundUp(dst_[0]->Width(), 4);
   RETURN_IF_ERROR(kernel_.SetBytesAuto(tiles_x));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int3 Winograd36To4x4::GetGridSize() const {
@@ -565,7 +563,7 @@ int3 Winograd36To4x4::GetGridSize() const {
   return int3(grid_x, grid_y, grid_z);
 }
 
-Status Winograd36To4x4::Tune(const TuningParameters& params) {
+absl::Status Winograd36To4x4::Tune(const TuningParameters& params) {
   switch (params.tuning_type) {
     case TuningType::EXHAUSTIVE:
       RETURN_IF_ERROR(BindArguments());
@@ -574,16 +572,16 @@ Status Winograd36To4x4::Tune(const TuningParameters& params) {
     case TuningType::FAST:
     default:
       work_group_size_ = SelectBestWorkGroup();
-      return OkStatus();
+      return absl::OkStatus();
   }
 }
 
-Status Winograd36To4x4::AddToQueue(CLCommandQueue* queue) {
+absl::Status Winograd36To4x4::AddToQueue(CLCommandQueue* queue) {
   RETURN_IF_ERROR(BindArguments());
   return queue->DispatchImplicit(kernel_, GetGridSize(), work_group_size_);
 }
 
-Status CreateWinograd36To4x4(
+absl::Status CreateWinograd36To4x4(
     const CreationContext& creation_context, const OperationDef& definition,
     const ::tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases,
     Winograd36To4x4* result) {
@@ -594,7 +592,6 @@ Status CreateWinograd36To4x4(
   create_info.name = "biases";
   RETURN_IF_ERROR(CreateLinearStorage(
       create_info, biases, creation_context.context, &result->biases_));
-
   return result->UploadAt(creation_context.context);
 }
 
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/winograd.h b/tensorflow/lite/delegates/gpu/cl/kernels/winograd.h
index f6b80b67f32..5a0444c4be5 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/winograd.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/winograd.h
@@ -36,9 +36,9 @@ class Winograd4x4To36 : public GPUOperation {
   Winograd4x4To36() = default;
   Winograd4x4To36(const OperationDef& definition, const Padding2D& padding)
       : GPUOperation(definition), padding_(padding) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Winograd4x4To36(Winograd4x4To36&& operation);
@@ -47,17 +47,16 @@ class Winograd4x4To36 : public GPUOperation {
   Winograd4x4To36& operator=(const Winograd4x4To36&) = delete;
 
  private:
-  friend Status CreateWinograd4x4To36(const CreationContext& creation_context,
-                                      const OperationDef& definition,
-                                      const Padding2D& padding,
-                                      Winograd4x4To36* result);
+  friend absl::Status CreateWinograd4x4To36(
+      const CreationContext& creation_context, const OperationDef& definition,
+      const Padding2D& padding, Winograd4x4To36* result);
 
-  Status UploadBt(CLContext* context);
+  absl::Status UploadBt(CLContext* context);
 
   // Must be called after kernel compilation
   int3 SelectBestWorkGroup();
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   LinearStorage bt_;
@@ -67,18 +66,19 @@ class Winograd4x4To36 : public GPUOperation {
   int3 work_group_size_ = int3(128, 1, 1);
 };
 
-Status CreateWinograd4x4To36(const CreationContext& creation_context,
-                             const OperationDef& definition,
-                             const Padding2D& padding, Winograd4x4To36* result);
+absl::Status CreateWinograd4x4To36(const CreationContext& creation_context,
+                                   const OperationDef& definition,
+                                   const Padding2D& padding,
+                                   Winograd4x4To36* result);
 
 class Winograd36To4x4 : public GPUOperation {
  public:
   Winograd36To4x4() = default;
   explicit Winograd36To4x4(const OperationDef& definition)
       : GPUOperation(definition) {}
-  Status AddToQueue(CLCommandQueue* queue) override;
-  Status Tune(const TuningParameters& params) override;
-  Status Compile(const CreationContext& creation_context) override;
+  absl::Status AddToQueue(CLCommandQueue* queue) override;
+  absl::Status Tune(const TuningParameters& params) override;
+  absl::Status Compile(const CreationContext& creation_context) override;
 
   // Move only
   Winograd36To4x4(Winograd36To4x4&& operation);
@@ -87,17 +87,17 @@ class Winograd36To4x4 : public GPUOperation {
   Winograd36To4x4& operator=(const Winograd36To4x4&) = delete;
 
  private:
-  friend Status CreateWinograd36To4x4(
+  friend absl::Status CreateWinograd36To4x4(
       const CreationContext& creation_context, const OperationDef& definition,
       const ::tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases,
       Winograd36To4x4* result);
 
-  Status UploadAt(CLContext* context);
+  absl::Status UploadAt(CLContext* context);
 
   // Must be called after kernel compilation
   int3 SelectBestWorkGroup();
 
-  Status BindArguments();
+  absl::Status BindArguments();
   int3 GetGridSize() const;
 
   LinearStorage at_;
@@ -107,7 +107,7 @@ class Winograd36To4x4 : public GPUOperation {
   int3 work_group_size_ = int3(128, 1, 1);
 };
 
-Status CreateWinograd36To4x4(
+absl::Status CreateWinograd36To4x4(
     const CreationContext& creation_context, const OperationDef& definition,
     const ::tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases,
     Winograd36To4x4* result);
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.cc b/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.cc
index 7a2e54840b9..683116091b8 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.cc
@@ -75,9 +75,10 @@ std::vector<int3> GenerateWorkGroupSizesXY128Linear(
   return work_groups;
 }
 
-Status GetBestWorkGroupAlignedToGrid(const TuningParameters& params,
-                                     const CLKernel& kernel, const int3& grid,
-                                     int3* best_work_group) {
+absl::Status GetBestWorkGroupAlignedToGrid(const TuningParameters& params,
+                                           const CLKernel& kernel,
+                                           const int3& grid,
+                                           int3* best_work_group) {
   std::vector<int3> work_groups;
   RETURN_IF_ERROR(GenerateWorkGroupSizesAlignedToGrid(
       grid, params.info->max_work_group_sizes, kernel.GetMaxWorkGroupSize(),
@@ -86,7 +87,7 @@ Status GetBestWorkGroupAlignedToGrid(const TuningParameters& params,
   RETURN_IF_ERROR(params.queue->GetBestWorkGroupIndex(
       kernel, *params.info, grid, work_groups, &best_work_group_index));
   *best_work_group = work_groups[best_work_group_index];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int GetPenalty(int grid_size, int group_size) {
@@ -202,30 +203,31 @@ int3 GetWorkGroupConv(const int3& grid, int max_size, int max_z_size) {
   return int3(wg_x, wg_y, wg_z);
 }
 
-Status GetBestWorkGroupXY128(const TuningParameters& params,
-                             const CLKernel& kernel, const int3& grid,
-                             WorkGroupSizeAlignment z_alignment,
-                             int3* best_work_group) {
+absl::Status GetBestWorkGroupXY128(const TuningParameters& params,
+                                   const CLKernel& kernel, const int3& grid,
+                                   WorkGroupSizeAlignment z_alignment,
+                                   int3* best_work_group) {
   std::vector<int3> work_groups = GenerateWorkGroupSizesXY128(
       grid, kernel.GetMaxWorkGroupSize(), z_alignment);
   int best_work_group_index;
   RETURN_IF_ERROR(params.queue->GetBestWorkGroupIndex(
       kernel, *params.info, grid, work_groups, &best_work_group_index));
   *best_work_group = work_groups[best_work_group_index];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status GetBestWorkGroupXY128Linear(const TuningParameters& params,
-                                   const CLKernel& kernel, const int3& grid,
-                                   WorkGroupSizeAlignment z_alignment,
-                                   int3* best_work_group) {
+absl::Status GetBestWorkGroupXY128Linear(const TuningParameters& params,
+                                         const CLKernel& kernel,
+                                         const int3& grid,
+                                         WorkGroupSizeAlignment z_alignment,
+                                         int3* best_work_group) {
   std::vector<int3> work_groups = GenerateWorkGroupSizesXY128Linear(
       grid, kernel.GetMaxWorkGroupSize(), z_alignment);
   int best_work_group_index;
   RETURN_IF_ERROR(params.queue->GetBestWorkGroupIndex(
       kernel, *params.info, grid, work_groups, &best_work_group_index));
   *best_work_group = work_groups[best_work_group_index];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 bool XY128RequiresMoreWorkGroupsThenXY128Linear(int width, int height) {
@@ -244,24 +246,25 @@ bool XY128RequiresMoreWorkGroupsThenXY128Linear(int width, int height) {
   return !have_equal_work_groups;
 }
 
-Status GetBestWorkGroup(const TuningParameters& params, const CLKernel& kernel,
-                        const int3& grid, int3* best_work_group) {
+absl::Status GetBestWorkGroup(const TuningParameters& params,
+                              const CLKernel& kernel, const int3& grid,
+                              int3* best_work_group) {
   switch (params.tuning_type) {
     case TuningType::FAST:
       *best_work_group = GetWorkGroup(grid, kernel.GetMaxWorkGroupSize());
-      return OkStatus();
+      return absl::OkStatus();
     case TuningType::EXHAUSTIVE:
       return GetBestWorkGroupAlignedToGrid(params, kernel, grid,
                                            best_work_group);
     default:
       *best_work_group = {8, 4, 1};
-      return OkStatus();
+      return absl::OkStatus();
   }
 }
 
-Status GetBestWorkGroupConv(const TuningParameters& params,
-                            const CLKernel& kernel, const int3& grid,
-                            int3* best_work_group) {
+absl::Status GetBestWorkGroupConv(const TuningParameters& params,
+                                  const CLKernel& kernel, const int3& grid,
+                                  int3* best_work_group) {
   switch (params.tuning_type) {
     case TuningType::FAST: {
       int max_z_size = 16;
@@ -271,14 +274,14 @@ Status GetBestWorkGroupConv(const TuningParameters& params,
       max_z_size = std::min(max_z_size, params.info->max_work_group_sizes.z);
       *best_work_group =
           GetWorkGroupConv(grid, kernel.GetMaxWorkGroupSize(), max_z_size);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case TuningType::EXHAUSTIVE:
       return GetBestWorkGroupAlignedToGrid(params, kernel, grid,
                                            best_work_group);
     default:
       *best_work_group = {8, 4, 1};
-      return OkStatus();
+      return absl::OkStatus();
   }
 }
 
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.h b/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.h
index 4b9801e6009..7cc60f4723f 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.h
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/work_group_picking.h
@@ -31,16 +31,17 @@ namespace cl {
 // Here and later you can find XY128, this is because 128 is SIMD width of A6xx
 // And XY128 means that work_group_size.x * work_group_size.y % 128 = 0
 // We need it to correctly work with constants uploading on A6xx
-Status GetBestWorkGroupXY128(const TuningParameters& params,
-                             const CLKernel& kernel, const int3& grid,
-                             WorkGroupSizeAlignment z_alignment,
-                             int3* best_work_group);
-
-Status GetBestWorkGroupXY128Linear(const TuningParameters& params,
+absl::Status GetBestWorkGroupXY128(const TuningParameters& params,
                                    const CLKernel& kernel, const int3& grid,
                                    WorkGroupSizeAlignment z_alignment,
                                    int3* best_work_group);
 
+absl::Status GetBestWorkGroupXY128Linear(const TuningParameters& params,
+                                         const CLKernel& kernel,
+                                         const int3& grid,
+                                         WorkGroupSizeAlignment z_alignment,
+                                         int3* best_work_group);
+
 int3 GetWorkGroupXY128ConvLinear(const int3& grid);
 
 int3 GetWorkGroupXY128Simple(const int3& grid);
@@ -48,12 +49,13 @@ int3 GetWorkGroupXY128Conv(const int3& grid);
 
 bool XY128RequiresMoreWorkGroupsThenXY128Linear(int width, int height);
 
-Status GetBestWorkGroup(const TuningParameters& params, const CLKernel& kernel,
-                        const int3& grid, int3* best_work_group);
+absl::Status GetBestWorkGroup(const TuningParameters& params,
+                              const CLKernel& kernel, const int3& grid,
+                              int3* best_work_group);
 
-Status GetBestWorkGroupConv(const TuningParameters& params,
-                            const CLKernel& kernel, const int3& grid,
-                            int3* best_work_group);
+absl::Status GetBestWorkGroupConv(const TuningParameters& params,
+                                  const CLKernel& kernel, const int3& grid,
+                                  int3* best_work_group);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/linear_storage.cc b/tensorflow/lite/delegates/gpu/cl/linear_storage.cc
index cd7fe729c7d..4fb21d0ec6a 100644
--- a/tensorflow/lite/delegates/gpu/cl/linear_storage.cc
+++ b/tensorflow/lite/delegates/gpu/cl/linear_storage.cc
@@ -15,6 +15,8 @@ limitations under the License.
 
 #include "tensorflow/lite/delegates/gpu/cl/linear_storage.h"
 
+#include "tensorflow/lite/delegates/gpu/common/status.h"
+
 namespace tflite {
 namespace gpu {
 namespace cl {
@@ -73,29 +75,31 @@ LinearStorageType DeduceLinearStorageType(
   }
 }
 
-Status CreateBufferLinearStorage(int size, DataType data_type, void* data,
-                                 CLContext* context, LinearStorage* result) {
+absl::Status CreateBufferLinearStorage(int size, DataType data_type, void* data,
+                                       CLContext* context,
+                                       LinearStorage* result) {
   const int float4_size =
       data_type == DataType::FLOAT32 ? sizeof(float4) : sizeof(half4);
   *result = LinearStorage(size, LinearStorageType::BUFFER, data_type);
   RETURN_IF_ERROR(CreateReadOnlyBuffer(float4_size * size, data, context,
                                        &result->buffer_storage_));
   result->memory_ = result->buffer_storage_.GetMemoryPtr();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateTextureLinearStorage(int size, DataType data_type, void* data,
-                                  CLContext* context, LinearStorage* result) {
+absl::Status CreateTextureLinearStorage(int size, DataType data_type,
+                                        void* data, CLContext* context,
+                                        LinearStorage* result) {
   *result = LinearStorage(size, LinearStorageType::TEXTURE_2D, data_type);
   RETURN_IF_ERROR(CreateTexture2DRGBA(data_type, size, 1, data, context,
                                       &result->texture_storage_));
   result->memory_ = result->texture_storage_.GetMemoryPtr();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateLinearStorage(const LinearStorageCreateInfo& creation_info,
-                           int size, void* data, CLContext* context,
-                           LinearStorage* result) {
+absl::Status CreateLinearStorage(const LinearStorageCreateInfo& creation_info,
+                                 int size, void* data, CLContext* context,
+                                 LinearStorage* result) {
   if (creation_info.storage_type == LinearStorageType::BUFFER) {
     return CreateBufferLinearStorage(size, creation_info.data_type, data,
                                      context, result);
diff --git a/tensorflow/lite/delegates/gpu/cl/linear_storage.h b/tensorflow/lite/delegates/gpu/cl/linear_storage.h
index 3d3d9d5222f..93aecd57854 100644
--- a/tensorflow/lite/delegates/gpu/cl/linear_storage.h
+++ b/tensorflow/lite/delegates/gpu/cl/linear_storage.h
@@ -64,12 +64,12 @@ class LinearStorage {
   std::string GetDeclaration() const;
 
  private:
-  friend Status CreateTextureLinearStorage(int size, DataType data_type,
-                                           void* data, CLContext* context,
-                                           LinearStorage* result);
-  friend Status CreateBufferLinearStorage(int size, DataType data_type,
-                                          void* data, CLContext* context,
-                                          LinearStorage* result);
+  friend absl::Status CreateTextureLinearStorage(int size, DataType data_type,
+                                                 void* data, CLContext* context,
+                                                 LinearStorage* result);
+  friend absl::Status CreateBufferLinearStorage(int size, DataType data_type,
+                                                void* data, CLContext* context,
+                                                LinearStorage* result);
 
   LinearStorage(int depth, LinearStorageType storage_type, DataType data_type);
 
@@ -83,20 +83,22 @@ class LinearStorage {
   DataType data_type_;
 };
 
-Status CreateBufferLinearStorage(int size, DataType data_type, void* data,
-                                 CLContext* context, LinearStorage* result);
+absl::Status CreateBufferLinearStorage(int size, DataType data_type, void* data,
+                                       CLContext* context,
+                                       LinearStorage* result);
 
-Status CreateTextureLinearStorage(int size, DataType data_type, void* data,
-                                  CLContext* context, LinearStorage* result);
+absl::Status CreateTextureLinearStorage(int size, DataType data_type,
+                                        void* data, CLContext* context,
+                                        LinearStorage* result);
 
-Status CreateLinearStorage(const LinearStorageCreateInfo& creation_info,
-                           int size, void* data, CLContext* context,
-                           LinearStorage* result);
+absl::Status CreateLinearStorage(const LinearStorageCreateInfo& creation_info,
+                                 int size, void* data, CLContext* context,
+                                 LinearStorage* result);
 
 template <DataType T>
-Status CreateLinearStorage(const LinearStorageCreateInfo& creation_info,
-                           const ::tflite::gpu::Tensor<Linear, T>& tensor,
-                           CLContext* context, LinearStorage* result) {
+absl::Status CreateLinearStorage(const LinearStorageCreateInfo& creation_info,
+                                 const ::tflite::gpu::Tensor<Linear, T>& tensor,
+                                 CLContext* context, LinearStorage* result) {
   int size = creation_info.aligned_size != 0 ? creation_info.aligned_size
                                              : tensor.shape.v;
   const int depth = IntegralDivideRoundUp(size, 4);
@@ -112,7 +114,7 @@ Status CreateLinearStorage(const LinearStorageCreateInfo& creation_info,
                                         context, result));
   }
   result->SetName(creation_info.name);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.cc b/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.cc
index 3b471ce816c..be551bc9973 100644
--- a/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.cc
+++ b/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.cc
@@ -31,11 +31,11 @@ namespace cl {
     function = reinterpret_cast<PFN_##function>(dlsym(libopencl, #function));  \
   }
 
-Status LoadOpenCL() {
+absl::Status LoadOpenCL() {
   void* libopencl = dlopen("libOpenCL.so", RTLD_NOW | RTLD_LOCAL);
   if (libopencl) {
     LoadOpenCLFunctions(libopencl, false);
-    return OkStatus();
+    return absl::OkStatus();
   } else {
     // Pixel phone?
     libopencl = dlopen("libOpenCL-pixel.so", RTLD_NOW | RTLD_LOCAL);
@@ -45,9 +45,9 @@ Status LoadOpenCL() {
           reinterpret_cast<enableOpenCL_t>(dlsym(libopencl, "enableOpenCL"));
       enableOpenCL();
       LoadOpenCLFunctions(libopencl, true);
-      return OkStatus();
+      return absl::OkStatus();
     } else {
-      return UnknownError(
+      return absl::UnknownError(
           absl::StrCat("OpenCL library not loaded - ", dlerror()));
     }
   }
diff --git a/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h b/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h
index 16ae24437a3..2201b4c1e5d 100644
--- a/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h
+++ b/tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h
@@ -27,7 +27,7 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status LoadOpenCL();
+absl::Status LoadOpenCL();
 void LoadOpenCLFunctions(void *libopencl, bool is_pixel);
 
 typedef cl_int(CL_API_CALL *PFN_clGetPlatformIDs)(
diff --git a/tensorflow/lite/delegates/gpu/cl/program_cache.cc b/tensorflow/lite/delegates/gpu/cl/program_cache.cc
index e6735b448de..285aa06d99b 100644
--- a/tensorflow/lite/delegates/gpu/cl/program_cache.cc
+++ b/tensorflow/lite/delegates/gpu/cl/program_cache.cc
@@ -56,7 +56,7 @@ ProgramCache& ProgramCache::operator=(ProgramCache&& program_cache) {
   return *this;
 }
 
-Status ProgramCache::GetOrCreateCLKernel(
+absl::Status ProgramCache::GetOrCreateCLKernel(
     const std::string& code, const std::string& function_name,
     const std::vector<CompilerOptions>& compiler_options,
     const CLContext& context, const CLDevice& device, CLKernel* result) {
@@ -64,32 +64,31 @@ Status ProgramCache::GetOrCreateCLKernel(
   ProgramDescriptor desc{code, options, use_fingerprints_};
   auto it = programs_.find(desc);
   if (it != programs_.end()) {
-    RETURN_IF_ERROR(result->CreateFromProgram(it->second, function_name));
-    return OkStatus();
+    return result->CreateFromProgram(it->second, function_name);
   }
 
   CLProgram program;
   RETURN_IF_ERROR(CreateCLProgram(code, options, context, device, &program));
   RETURN_IF_ERROR(result->CreateFromProgram(program, function_name));
   programs_.insert(std::make_pair(std::move(desc), std::move(program)));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ProgramCache::GetOrCreateCLKernel(const std::string& code,
-                                         const std::string& function_name,
-                                         const CLContext& context,
-                                         const CLDevice& device,
-                                         CLKernel* result) {
+absl::Status ProgramCache::GetOrCreateCLKernel(const std::string& code,
+                                               const std::string& function_name,
+                                               const CLContext& context,
+                                               const CLDevice& device,
+                                               CLKernel* result) {
   return GetOrCreateCLKernel(code, function_name, {}, context, device, result);
 }
 
-Status ProgramCache::AddSerializedCache(
+absl::Status ProgramCache::AddSerializedCache(
     const CLContext& context, const CLDevice& device,
     absl::Span<const uint8_t> serialized_cache) {
   flatbuffers::Verifier verifier(serialized_cache.data(),
                                  serialized_cache.size());
   if (!data::VerifyCompiledCacheBuffer(verifier)) {
-    return InvalidArgumentError("Serialized model is corrupted.");
+    return absl::InvalidArgumentError("Serialized model is corrupted.");
   }
 
   auto model = data::GetCompiledCache(serialized_cache.data());
@@ -97,7 +96,7 @@ Status ProgramCache::AddSerializedCache(
                                model->driver_version()->size());
 
   if (device.GetPlatformVersion() != platform_version) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "OpenCL driver changed, cache invalid, should be regenerated");
   }
 
@@ -116,10 +115,10 @@ Status ProgramCache::AddSerializedCache(
       programs_.insert(std::make_pair(std::move(desc), std::move(program)));
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ProgramCache::GetSerializedCache(
+absl::Status ProgramCache::GetSerializedCache(
     const CLDevice& device, std::vector<uint8_t>* serialized_cache) const {
   ::flatbuffers::FlatBufferBuilder builder;
   std::vector<flatbuffers::Offset<data::Program>> serialized_programs;
@@ -140,9 +139,9 @@ Status ProgramCache::GetSerializedCache(
   data::FinishCompiledCacheBuffer(builder, cache_builder.Finish());
   size_t next_element = serialized_cache->size();
   serialized_cache->resize(serialized_cache->size() + builder.GetSize());
-  memcpy(&(*serialized_cache)[next_element], builder.GetBufferPointer(),
-         builder.GetSize());
-  return OkStatus();
+  std::memcpy(&(*serialized_cache)[next_element], builder.GetBufferPointer(),
+              builder.GetSize());
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/program_cache.h b/tensorflow/lite/delegates/gpu/cl/program_cache.h
index b8d019d3d47..21f9583a59a 100644
--- a/tensorflow/lite/delegates/gpu/cl/program_cache.h
+++ b/tensorflow/lite/delegates/gpu/cl/program_cache.h
@@ -41,20 +41,21 @@ class ProgramCache {
   ProgramCache(const ProgramCache&) = delete;
   ProgramCache& operator=(const ProgramCache&) = delete;
 
-  Status GetOrCreateCLKernel(
+  absl::Status GetOrCreateCLKernel(
       const std::string& code, const std::string& function_name,
       const std::vector<CompilerOptions>& compiler_options,
       const CLContext& context, const CLDevice& device, CLKernel* result);
 
-  Status GetOrCreateCLKernel(const std::string& code,
-                             const std::string& function_name,
-                             const CLContext& context, const CLDevice& device,
-                             CLKernel* result);
+  absl::Status GetOrCreateCLKernel(const std::string& code,
+                                   const std::string& function_name,
+                                   const CLContext& context,
+                                   const CLDevice& device, CLKernel* result);
 
-  Status AddSerializedCache(const CLContext& context, const CLDevice& device,
-                            absl::Span<const uint8_t> serialized_cache);
-  Status GetSerializedCache(const CLDevice& device,
-                            std::vector<uint8_t>* serialized_cache) const;
+  absl::Status AddSerializedCache(const CLContext& context,
+                                  const CLDevice& device,
+                                  absl::Span<const uint8_t> serialized_cache);
+  absl::Status GetSerializedCache(const CLDevice& device,
+                                  std::vector<uint8_t>* serialized_cache) const;
 
  private:
   struct ProgramDescriptor {
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.cc b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.cc
index a420373f50a..d2d775f819f 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.cc
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.cc
@@ -29,11 +29,12 @@ namespace gpu {
 namespace cl {
 namespace {
 
-Status SelectConvolutionAdreno(const Convolution2DAttributes& attr,
-                               const BHWC& dst_shape,
-                               const CreationContext& creation_context,
-                               const OperationDef& op_def, ModelHints hints,
-                               std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionAdreno(const Convolution2DAttributes& attr,
+                                     const BHWC& dst_shape,
+                                     const CreationContext& creation_context,
+                                     const OperationDef& op_def,
+                                     ModelHints hints,
+                                     std::unique_ptr<GPUOperation>* ptr) {
   if (IsConvConstantsSupported(*creation_context.device, op_def, attr)) {
     ConvConstants conv;
     RETURN_IF_ERROR(CreateConvConstants(creation_context, op_def, attr, &conv));
@@ -43,28 +44,24 @@ Status SelectConvolutionAdreno(const Convolution2DAttributes& attr,
     RETURN_IF_ERROR(CreateConvTexture(creation_context, op_def, attr, &conv));
     *ptr = absl::make_unique<ConvTexture>(std::move(conv));
   }
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConvolutionWinogradAdreno(const Convolution2DAttributes& attr,
-                                       const BHWC& dst_shape,
-                                       const CreationContext& creation_context,
-                                       const OperationDef& op_def,
-                                       ModelHints hints,
-                                       std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionWinogradAdreno(
+    const Convolution2DAttributes& attr, const BHWC& dst_shape,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    ModelHints hints, std::unique_ptr<GPUOperation>* ptr) {
   ConvTexture conv;
   RETURN_IF_ERROR(
       CreateConvTextureWino4x4To6x6(creation_context, op_def, attr, &conv));
   *ptr = absl::make_unique<ConvTexture>(std::move(conv));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConvolutionNVidia(const Convolution2DAttributes& attr,
-                               const CreationContext& creation_context,
-                               const OperationDef& op_def,
-                               std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionNVidia(const Convolution2DAttributes& attr,
+                                     const CreationContext& creation_context,
+                                     const OperationDef& op_def,
+                                     std::unique_ptr<GPUOperation>* ptr) {
   if (IsConvConstantsSupported(*creation_context.device, op_def, attr)) {
     ConvConstants conv;
     RETURN_IF_ERROR(CreateConvConstants(creation_context, op_def, attr, &conv));
@@ -74,24 +71,24 @@ Status SelectConvolutionNVidia(const Convolution2DAttributes& attr,
     RETURN_IF_ERROR(CreateConvPowerVR(creation_context, op_def, attr, &conv));
     *ptr = absl::make_unique<ConvPowerVR>(std::move(conv));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConvolutionPowerVR(const Convolution2DAttributes& attr,
-                                const CreationContext& creation_context,
-                                const OperationDef& op_def,
-                                std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionPowerVR(const Convolution2DAttributes& attr,
+                                      const CreationContext& creation_context,
+                                      const OperationDef& op_def,
+                                      std::unique_ptr<GPUOperation>* ptr) {
   ConvPowerVR conv;
   RETURN_IF_ERROR(CreateConvPowerVR(creation_context, op_def, attr, &conv));
   *ptr = absl::make_unique<ConvPowerVR>(std::move(conv));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConvolutionMali(const Convolution2DAttributes& attr,
-                             const BHWC& dst_shape,
-                             const CreationContext& creation_context,
-                             const OperationDef& op_def,
-                             std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionMali(const Convolution2DAttributes& attr,
+                                   const BHWC& dst_shape,
+                                   const CreationContext& creation_context,
+                                   const OperationDef& op_def,
+                                   std::unique_ptr<GPUOperation>* ptr) {
   if (op_def.src_tensors[0].storage_type == TensorStorageType::BUFFER &&
       IsConvBuffer1x1Supported(op_def, attr)) {
     ConvBuffer1x1 conv;
@@ -104,14 +101,13 @@ Status SelectConvolutionMali(const Convolution2DAttributes& attr,
         CreateConvPowerVR(creation_context, op_def, attr, &conv, &dst_shape));
     *ptr = absl::make_unique<ConvPowerVR>(std::move(conv));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConvolutionWinogradMali(const Convolution2DAttributes& attr,
-                                     const BHWC& dst_shape,
-                                     const CreationContext& creation_context,
-                                     const OperationDef& op_def,
-                                     std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionWinogradMali(
+    const Convolution2DAttributes& attr, const BHWC& dst_shape,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr) {
   if (op_def.src_tensors[0].storage_type == TensorStorageType::BUFFER) {
     ConvBuffer1x1 conv;
     RETURN_IF_ERROR(CreateConvBuffer1x1Wino4x4To6x6(creation_context, op_def,
@@ -123,17 +119,16 @@ Status SelectConvolutionWinogradMali(const Convolution2DAttributes& attr,
                                                   attr, &conv, &dst_shape));
     *ptr = absl::make_unique<ConvPowerVR>(std::move(conv));
   }
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
 
-Status SelectConvolution(const Convolution2DAttributes& attr,
-                         const BHWC& dst_shape,
-                         const CreationContext& creation_context,
-                         const OperationDef& op_def, ModelHints hints,
-                         std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolution(const Convolution2DAttributes& attr,
+                               const BHWC& dst_shape,
+                               const CreationContext& creation_context,
+                               const OperationDef& op_def, ModelHints hints,
+                               std::unique_ptr<GPUOperation>* ptr) {
   switch (creation_context.device->vendor()) {
     case Vendor::QUALCOMM:
       return SelectConvolutionAdreno(attr, dst_shape, creation_context, op_def,
@@ -152,12 +147,10 @@ Status SelectConvolution(const Convolution2DAttributes& attr,
   }
 }
 
-Status SelectConvolutionForWinograd(const Convolution2DAttributes& attr,
-                                    const BHWC& dst_shape,
-                                    const CreationContext& creation_context,
-                                    const OperationDef& op_def,
-                                    ModelHints hints,
-                                    std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionForWinograd(
+    const Convolution2DAttributes& attr, const BHWC& dst_shape,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    ModelHints hints, std::unique_ptr<GPUOperation>* ptr) {
   switch (creation_context.device->vendor()) {
     case Vendor::QUALCOMM:
       return SelectConvolutionWinogradAdreno(attr, dst_shape, creation_context,
@@ -169,7 +162,7 @@ Status SelectConvolutionForWinograd(const Convolution2DAttributes& attr,
       RETURN_IF_ERROR(
           CreateConvPowerVRWino4x4To6x6(creation_context, op_def, attr, &conv));
       *ptr = absl::make_unique<ConvPowerVR>(std::move(conv));
-      return OkStatus();
+      return absl::OkStatus();
     }
     case Vendor::MALI:
       return SelectConvolutionWinogradMali(attr, dst_shape, creation_context,
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.h b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.h
index dc0657ec47c..94723527ad5 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.h
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_selector.h
@@ -28,18 +28,16 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status SelectConvolution(const Convolution2DAttributes& attr,
-                         const BHWC& dst_shape,
-                         const CreationContext& creation_context,
-                         const OperationDef& op_def, ModelHints hints,
-                         std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectConvolution(const Convolution2DAttributes& attr,
+                               const BHWC& dst_shape,
+                               const CreationContext& creation_context,
+                               const OperationDef& op_def, ModelHints hints,
+                               std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectConvolutionForWinograd(const Convolution2DAttributes& attr,
-                                    const BHWC& dst_shape,
-                                    const CreationContext& creation_context,
-                                    const OperationDef& op_def,
-                                    ModelHints hints,
-                                    std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectConvolutionForWinograd(
+    const Convolution2DAttributes& attr, const BHWC& dst_shape,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    ModelHints hints, std::unique_ptr<GPUOperation>* ptr);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.cc b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.cc
index 8dd0ef6b3cb..12e99b57aa7 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.cc
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.cc
@@ -28,7 +28,7 @@ namespace gpu {
 namespace cl {
 namespace {
 
-Status SelectConvolutionTransposedAdreno(
+absl::Status SelectConvolutionTransposedAdreno(
     const ConvolutionTransposedAttributes& attr,
     const CreationContext& creation_context, const OperationDef& op_def,
     std::unique_ptr<GPUOperation>* ptr) {
@@ -49,10 +49,10 @@ Status SelectConvolutionTransposedAdreno(
         CreateConvolutionTransposed(creation_context, op_def, attr, &conv));
     *ptr = absl::make_unique<ConvolutionTransposed>(std::move(conv));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConvolutionTransposedPowerVR(
+absl::Status SelectConvolutionTransposedPowerVR(
     const ConvolutionTransposedAttributes& attr,
     const CreationContext& creation_context, const OperationDef& op_def,
     std::unique_ptr<GPUOperation>* ptr) {
@@ -85,10 +85,10 @@ Status SelectConvolutionTransposedPowerVR(
         CreateConvolutionTransposed(creation_context, op_def, attr, &conv));
     *ptr = absl::make_unique<ConvolutionTransposed>(std::move(conv));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConvolutionTransposedMali(
+absl::Status SelectConvolutionTransposedMali(
     const ConvolutionTransposedAttributes& attr,
     const CreationContext& creation_context, const OperationDef& op_def,
     std::unique_ptr<GPUOperation>* ptr) {
@@ -96,14 +96,15 @@ Status SelectConvolutionTransposedMali(
   RETURN_IF_ERROR(
       CreateConvolutionTransposed(creation_context, op_def, attr, &conv));
   *ptr = absl::make_unique<ConvolutionTransposed>(std::move(conv));
-  return OkStatus();
+  return absl::OkStatus();
 }
+
 }  // namespace
 
-Status SelectConvolutionTransposed(const ConvolutionTransposedAttributes& attr,
-                                   const CreationContext& creation_context,
-                                   const OperationDef& op_def,
-                                   std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConvolutionTransposed(
+    const ConvolutionTransposedAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr) {
   switch (creation_context.device->vendor()) {
     case Vendor::QUALCOMM:
       return SelectConvolutionTransposedAdreno(attr, creation_context, op_def,
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.h b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.h
index 50f5e5baad5..ff37c1024ad 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.h
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/convolution_transposed_selector.h
@@ -26,10 +26,10 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status SelectConvolutionTransposed(const ConvolutionTransposedAttributes& attr,
-                                   const CreationContext& creation_context,
-                                   const OperationDef& op_def,
-                                   std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectConvolutionTransposed(
+    const ConvolutionTransposedAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/default/default_selector.cc b/tensorflow/lite/delegates/gpu/cl/selectors/default/default_selector.cc
index 9fe7aa9732e..e2a941870db 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/default/default_selector.cc
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/default/default_selector.cc
@@ -28,12 +28,13 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status SelectDefault(const CreationContext& creation_context,
-                     const OperationDef& op_def, ModelHints hints,
-                     const std::vector<Value<TensorRef<BHWC>>*>& inputs,
-                     const std::vector<Value<TensorRef<BHWC>>*>& outputs,
-                     const Node& node, GPUOperationsSubgraph* gpu_subgraph) {
-  return UnimplementedError(
+absl::Status SelectDefault(const CreationContext& creation_context,
+                           const OperationDef& op_def, ModelHints hints,
+                           const std::vector<Value<TensorRef<BHWC>>*>& inputs,
+                           const std::vector<Value<TensorRef<BHWC>>*>& outputs,
+                           const Node& node,
+                           GPUOperationsSubgraph* gpu_subgraph) {
+  return absl::UnimplementedError(
       absl::StrCat("No selector for ", node.operation.type));
 }
 
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/default_selector.h b/tensorflow/lite/delegates/gpu/cl/selectors/default_selector.h
index b4b996cc4fb..05e33501cd4 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/default_selector.h
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/default_selector.h
@@ -29,11 +29,12 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status SelectDefault(const CreationContext& creation_context,
-                     const OperationDef& op_def, ModelHints hints,
-                     const std::vector<Value<TensorRef<BHWC>>*>& inputs,
-                     const std::vector<Value<TensorRef<BHWC>>*>& outputs,
-                     const Node& node, GPUOperationsSubgraph* gpu_subgraph);
+absl::Status SelectDefault(const CreationContext& creation_context,
+                           const OperationDef& op_def, ModelHints hints,
+                           const std::vector<Value<TensorRef<BHWC>>*>& inputs,
+                           const std::vector<Value<TensorRef<BHWC>>*>& outputs,
+                           const Node& node,
+                           GPUOperationsSubgraph* gpu_subgraph);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.cc b/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.cc
index 85afa3fff43..0098117dea1 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.cc
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.cc
@@ -26,10 +26,10 @@ namespace gpu {
 namespace cl {
 namespace {
 
-Status SelectDWConvolutionAdreno(const DepthwiseConvolution2DAttributes& attr,
-                                 const CreationContext& creation_context,
-                                 const OperationDef& op_def,
-                                 std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectDWConvolutionAdreno(
+    const DepthwiseConvolution2DAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr) {
   if (!op_def.IsBatchSupported() && IsDepthWiseConv3x3Supported(attr)) {
     DepthWiseConv3x3 dw_conv;
     RETURN_IF_ERROR(
@@ -41,13 +41,13 @@ Status SelectDWConvolutionAdreno(const DepthwiseConvolution2DAttributes& attr,
         CreateDepthWiseConvolution(creation_context, op_def, attr, &dw_conv));
     *ptr = absl::make_unique<DepthWiseConvolution>(std::move(dw_conv));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectDWConvolutionPowerVR(const DepthwiseConvolution2DAttributes& attr,
-                                  const CreationContext& creation_context,
-                                  const OperationDef& op_def,
-                                  std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectDWConvolutionPowerVR(
+    const DepthwiseConvolution2DAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr) {
   if (!op_def.IsBatchSupported() && IsDepthWiseConv3x3Supported(attr)) {
     DepthWiseConv3x3 dw_conv;
     RETURN_IF_ERROR(
@@ -59,13 +59,13 @@ Status SelectDWConvolutionPowerVR(const DepthwiseConvolution2DAttributes& attr,
         CreateDepthWiseConvolution(creation_context, op_def, attr, &dw_conv));
     *ptr = absl::make_unique<DepthWiseConvolution>(std::move(dw_conv));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectDWConvolutionMali(const DepthwiseConvolution2DAttributes& attr,
-                               const CreationContext& creation_context,
-                               const OperationDef& op_def,
-                               std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectDWConvolutionMali(
+    const DepthwiseConvolution2DAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr) {
   const auto storage_type = op_def.src_tensors[0].storage_type;
   bool buffer_type = storage_type == TensorStorageType::BUFFER ||
                      storage_type == TensorStorageType::IMAGE_BUFFER;
@@ -83,14 +83,14 @@ Status SelectDWConvolutionMali(const DepthwiseConvolution2DAttributes& attr,
         CreateDepthWiseConvolution(creation_context, op_def, attr, &dw_conv));
     *ptr = absl::make_unique<DepthWiseConvolution>(std::move(dw_conv));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 }  // namespace
 
-Status SelectDWConvolution(const DepthwiseConvolution2DAttributes& attr,
-                           const CreationContext& creation_context,
-                           const OperationDef& op_def,
-                           std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectDWConvolution(const DepthwiseConvolution2DAttributes& attr,
+                                 const CreationContext& creation_context,
+                                 const OperationDef& op_def,
+                                 std::unique_ptr<GPUOperation>* ptr) {
   switch (creation_context.device->vendor()) {
     case Vendor::QUALCOMM:
       return SelectDWConvolutionAdreno(attr, creation_context, op_def, ptr);
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.h b/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.h
index c15f2946495..7f7cc6da604 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.h
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/dw_convolution_selector.h
@@ -26,10 +26,10 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status SelectDWConvolution(const DepthwiseConvolution2DAttributes& attr,
-                           const CreationContext& creation_context,
-                           const OperationDef& op_def,
-                           std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectDWConvolution(const DepthwiseConvolution2DAttributes& attr,
+                                 const CreationContext& creation_context,
+                                 const OperationDef& op_def,
+                                 std::unique_ptr<GPUOperation>* ptr);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.cc b/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.cc
index 05d28b412ad..2a04a04460d 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.cc
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.cc
@@ -27,10 +27,11 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status SelectFullyConnectedAdreno(const FullyConnectedAttributes& attr,
-                                  const CreationContext& creation_context,
-                                  const OperationDef& op_def, int batch_size,
-                                  std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectFullyConnectedAdreno(const FullyConnectedAttributes& attr,
+                                        const CreationContext& creation_context,
+                                        const OperationDef& op_def,
+                                        int batch_size,
+                                        std::unique_ptr<GPUOperation>* ptr) {
   if (op_def.IsBatchSupported()) {
     ConvTexture conv;
     RETURN_IF_ERROR(CreateConvTexture(creation_context, op_def, attr, &conv));
@@ -41,13 +42,13 @@ Status SelectFullyConnectedAdreno(const FullyConnectedAttributes& attr,
         CreateFullyConnected(creation_context, op_def, attr, &fc));
     *ptr = absl::make_unique<FullyConnected>(std::move(fc));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectFullyConnectedPowerVR(const FullyConnectedAttributes& attr,
-                                   const CreationContext& creation_context,
-                                   const OperationDef& op_def, int batch_size,
-                                   std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectFullyConnectedPowerVR(
+    const FullyConnectedAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    int batch_size, std::unique_ptr<GPUOperation>* ptr) {
   if (op_def.IsBatchSupported()) {
     ConvPowerVR conv;
     RETURN_IF_ERROR(CreateConvPowerVR(creation_context, op_def, attr, &conv));
@@ -58,13 +59,14 @@ Status SelectFullyConnectedPowerVR(const FullyConnectedAttributes& attr,
         CreateFullyConnected(creation_context, op_def, attr, &fc));
     *ptr = absl::make_unique<FullyConnected>(std::move(fc));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectFullyConnectedMali(const FullyConnectedAttributes& attr,
-                                const CreationContext& creation_context,
-                                const OperationDef& op_def, int batch_size,
-                                std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectFullyConnectedMali(const FullyConnectedAttributes& attr,
+                                      const CreationContext& creation_context,
+                                      const OperationDef& op_def,
+                                      int batch_size,
+                                      std::unique_ptr<GPUOperation>* ptr) {
   if (op_def.IsBatchSupported()) {
     if (op_def.src_tensors[0].storage_type == TensorStorageType::BUFFER) {
       ConvBuffer1x1 conv;
@@ -82,13 +84,13 @@ Status SelectFullyConnectedMali(const FullyConnectedAttributes& attr,
         CreateFullyConnected(creation_context, op_def, attr, &fc));
     *ptr = absl::make_unique<FullyConnected>(std::move(fc));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectFullyConnected(const FullyConnectedAttributes& attr,
-                            const CreationContext& creation_context,
-                            const OperationDef& op_def, int batch_size,
-                            std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectFullyConnected(const FullyConnectedAttributes& attr,
+                                  const CreationContext& creation_context,
+                                  const OperationDef& op_def, int batch_size,
+                                  std::unique_ptr<GPUOperation>* ptr) {
   switch (creation_context.device->vendor()) {
     case Vendor::QUALCOMM:
       return SelectFullyConnectedAdreno(attr, creation_context, op_def,
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.h b/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.h
index 023020b6041..4ae44490996 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.h
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/fully_connected_selector.h
@@ -26,10 +26,10 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status SelectFullyConnected(const FullyConnectedAttributes& attr,
-                            const CreationContext& creation_context,
-                            const OperationDef& op_def, int batch_size,
-                            std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectFullyConnected(const FullyConnectedAttributes& attr,
+                                  const CreationContext& creation_context,
+                                  const OperationDef& op_def, int batch_size,
+                                  std::unique_ptr<GPUOperation>* ptr);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.cc b/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.cc
index 2fcb90fc8d1..b0996aa53ea 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.cc
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.cc
@@ -36,6 +36,7 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 namespace {
+
 bool IsWidthBroadcastedForSecondInput(
     const std::vector<Value<TensorRef<BHWC>>*>& inputs) {
   return inputs.size() == 2 &&
@@ -74,14 +75,14 @@ bool IsSuitableForWinograd4x4To6x6(const Convolution2DAttributes& attr,
   return suitable_attributes && recommended_channels && recommended_hw;
 }
 
-Status WinogradFromNode(const CreationContext& creation_context,
-                        const OperationDef& op_def, ModelHints hints,
-                        const BHWC& input_shape, const BHWC& output_shape,
-                        const Convolution2DAttributes& attr,
-                        GPUOperationsSubgraph* gpu_subgraph) {
+absl::Status WinogradFromNode(const CreationContext& creation_context,
+                              const OperationDef& op_def, ModelHints hints,
+                              const BHWC& input_shape, const BHWC& output_shape,
+                              const Convolution2DAttributes& attr,
+                              GPUOperationsSubgraph* gpu_subgraph) {
   if (!IsSuitableForWinograd4x4To6x6(attr, *creation_context.device,
                                      output_shape)) {
-    return UnimplementedError("No implementation for this case.");
+    return absl::UnimplementedError("No implementation for this case.");
   }
 
   const int tiles_x = IntegralDivideRoundUp(output_shape.w, 4);
@@ -140,18 +141,16 @@ Status WinogradFromNode(const CreationContext& creation_context,
   }
   RETURN_IF_ERROR(SelectWinograd36To4x4(creation_context, winograd_down_def,
                                         bias_copy, &winograd_down.operation));
-
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
 
-Status GPUOperationFromNode(const CreationContext& creation_context,
-                            const OperationDef& op_def, ModelHints hints,
-                            const std::vector<Value<TensorRef<BHWC>>*>& inputs,
-                            const std::vector<Value<TensorRef<BHWC>>*>& outputs,
-                            const Node& node,
-                            GPUOperationsSubgraph* gpu_subgraph) {
+absl::Status GPUOperationFromNode(
+    const CreationContext& creation_context, const OperationDef& op_def,
+    ModelHints hints, const std::vector<Value<TensorRef<BHWC>>*>& inputs,
+    const std::vector<Value<TensorRef<BHWC>>*>& outputs, const Node& node,
+    GPUOperationsSubgraph* gpu_subgraph) {
   std::unique_ptr<GPUOperation>* gpu_op =
       InitSingleOpSubgraph(inputs, outputs, gpu_subgraph);
   auto op_type = OperationTypeFromString(node.operation.type);
@@ -183,7 +182,7 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
           }
           SelectAdd(op_def, channels, output->tensor.shape.c, gpu_op);
         }
-        return OkStatus();
+        return absl::OkStatus();
       }
     }
     case OperationType::CONCAT: {
@@ -202,7 +201,7 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
       if (WinogradFromNode(creation_context, op_def, hints, input_shape,
                            output_shape, attr, gpu_subgraph)
               .ok()) {
-        return OkStatus();
+        return absl::OkStatus();
       } else {
         gpu_op = InitSingleOpSubgraph(inputs, outputs, gpu_subgraph);
         return SelectConvolution(attr, output_shape, creation_context, op_def,
@@ -228,13 +227,13 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
     }
     case OperationType::LSTM: {
       SelectLSTM(op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::MAX_UNPOOLING_2D: {
       auto attr =
           absl::any_cast<MaxUnpooling2DAttributes>(node.operation.attributes);
       SelectMaxUnpooling(attr, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::MEAN: {
       auto attr = absl::any_cast<MeanAttributes>(node.operation.attributes);
@@ -256,24 +255,24 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
               CreateElementwiseTwoInput(op_def, op_type, broadcast);
           *gpu_op =
               absl::make_unique<ElementwiseTwoInput>(std::move(operation));
-          return OkStatus();
+          return absl::OkStatus();
         } else {
-          return UnimplementedError(
+          return absl::UnimplementedError(
               "No support of multiply with more than 2 inputs");
         }
-        return OkStatus();
+        return absl::OkStatus();
       }
     }
     case OperationType::PAD: {
       auto attr = absl::any_cast<PadAttributes>(node.operation.attributes);
       SelectPadding(attr, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::POOLING_2D: {
       auto attr =
           absl::any_cast<Pooling2DAttributes>(node.operation.attributes);
       SelectPooling(attr, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::PRELU: {
       auto attr = absl::any_cast<PReLUAttributes>(node.operation.attributes);
@@ -288,13 +287,13 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
     case OperationType::RELU: {
       auto attr = absl::any_cast<ReLUAttributes>(node.operation.attributes);
       SelectReLU(creation_context, attr, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::RESHAPE: {
       const int src_channels = inputs[0]->tensor.shape.c;
       auto attr = absl::any_cast<ReshapeAttributes>(node.operation.attributes);
       SelectReshape(src_channels, attr.new_shape.c, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::RESIZE: {
       auto attr = absl::any_cast<Resize2DAttributes>(node.operation.attributes);
@@ -303,23 +302,23 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
     case OperationType::SLICE: {
       auto attr = absl::any_cast<SliceAttributes>(node.operation.attributes);
       SelectStridedSlice(attr, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::SOFTMAX: {
       SelectSoftmax(inputs[0]->tensor.shape, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::SPACE_TO_DEPTH: {
       auto attr =
           absl::any_cast<SpaceToDepthAttributes>(node.operation.attributes);
       SelectSpaceToDepth(attr, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::TRANSPOSE: {
       auto attr =
           absl::any_cast<TransposeAttributes>(node.operation.attributes);
       SelectTranspose(attr, op_def, gpu_op);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::ABS:
     case OperationType::COS:
@@ -335,7 +334,7 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
       ElementwiseOneInput operation =
           CreateElementwiseOneInput(op_def, op_type);
       *gpu_op = absl::make_unique<ElementwiseOneInput>(std::move(operation));
-      return OkStatus();
+      return absl::OkStatus();
     }
     case OperationType::DIV:
     case OperationType::MAXIMUM:
@@ -352,7 +351,7 @@ Status GPUOperationFromNode(const CreationContext& creation_context,
       ElementwiseTwoInput operation = CreateElementwiseTwoInput(
           creation_context, op_def, op_type, broadcast, attr);
       *gpu_op = absl::make_unique<ElementwiseTwoInput>(std::move(operation));
-      return OkStatus();
+      return absl::OkStatus();
     }
     default:
       return SelectDefault(creation_context, op_def, hints, inputs, outputs,
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.h b/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.h
index bcb46c1e0c4..dd09c16dad0 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.h
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/operation_selector.h
@@ -29,12 +29,11 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 
-Status GPUOperationFromNode(const CreationContext& creation_context,
-                            const OperationDef& op_def, ModelHints hints,
-                            const std::vector<Value<TensorRef<BHWC>>*>& inputs,
-                            const std::vector<Value<TensorRef<BHWC>>*>& outputs,
-                            const Node& node,
-                            GPUOperationsSubgraph* gpu_subgraph);
+absl::Status GPUOperationFromNode(
+    const CreationContext& creation_context, const OperationDef& op_def,
+    ModelHints hints, const std::vector<Value<TensorRef<BHWC>>*>& inputs,
+    const std::vector<Value<TensorRef<BHWC>>*>& outputs, const Node& node,
+    GPUOperationsSubgraph* gpu_subgraph);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.cc b/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.cc
index ff26a3be601..44a88165e4c 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.cc
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.cc
@@ -59,14 +59,14 @@ void SelectReLU(const CreationContext& creation_context,
   *ptr = absl::make_unique<ReLU>(std::move(relu));
 }
 
-Status SelectPReLU(const PReLUAttributes& attr,
-                   const CreationContext& creation_context,
-                   const OperationDef& op_def,
-                   std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectPReLU(const PReLUAttributes& attr,
+                         const CreationContext& creation_context,
+                         const OperationDef& op_def,
+                         std::unique_ptr<GPUOperation>* ptr) {
   PReLU operation;
   RETURN_IF_ERROR(CreatePReLU(creation_context, op_def, attr, &operation));
   *ptr = absl::make_unique<PReLU>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void SelectPooling(const Pooling2DAttributes& attr, const OperationDef& op_def,
@@ -88,31 +88,32 @@ void SelectAdd(const OperationDef& op_def, const std::vector<int>& channels,
   *ptr = absl::make_unique<Add>(std::move(operation));
 }
 
-Status SelectResize(const Resize2DAttributes& attr, const OperationDef& op_def,
-                    std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectResize(const Resize2DAttributes& attr,
+                          const OperationDef& op_def,
+                          std::unique_ptr<GPUOperation>* ptr) {
   Resize operation = CreateResize(op_def, attr);
   *ptr = absl::make_unique<Resize>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectConcat(const ConcatAttributes& attr,
-                    const std::vector<int>& channels,
-                    const OperationDef& op_def,
-                    std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectConcat(const ConcatAttributes& attr,
+                          const std::vector<int>& channels,
+                          const OperationDef& op_def,
+                          std::unique_ptr<GPUOperation>* ptr) {
   switch (attr.axis) {
     case Axis::CHANNELS: {
       ConcatZ operation = CreateConcatZ(op_def, channels);
       *ptr = absl::make_unique<ConcatZ>(std::move(operation));
-      return OkStatus();
+      return absl::OkStatus();
     }
     case Axis::WIDTH:
     case Axis::HEIGHT: {
       ConcatXY operation = CreateConcatXY(op_def, attr, channels.size());
       *ptr = absl::make_unique<ConcatXY>(std::move(operation));
-      return OkStatus();
+      return absl::OkStatus();
     }
     default:
-      return UnimplementedError("No concat for this axis.");
+      return absl::UnimplementedError("No concat for this axis.");
   }
 }
 
@@ -147,36 +148,36 @@ void SelectStridedSlice(const SliceAttributes& attr, const OperationDef& op_def,
   *ptr = absl::make_unique<StridedSlice>(std::move(operation));
 }
 
-Status SelectMean(const MeanAttributes& attr, const OperationDef& op_def,
-                  std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectMean(const MeanAttributes& attr, const OperationDef& op_def,
+                        std::unique_ptr<GPUOperation>* ptr) {
   if (attr.dims != std::set<Axis>({Axis::HEIGHT, Axis::WIDTH})) {
-    return UnimplementedError("Mean operation supports only HW plane");
+    return absl::UnimplementedError("Mean operation supports only HW plane");
   }
   Mean operation = CreateMean(op_def);
   *ptr = absl::make_unique<Mean>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectMultiplyScalar(const MultiplyAttributes& attr,
-                            const CreationContext& creation_context,
-                            const OperationDef& op_def,
-                            std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectMultiplyScalar(const MultiplyAttributes& attr,
+                                  const CreationContext& creation_context,
+                                  const OperationDef& op_def,
+                                  std::unique_ptr<GPUOperation>* ptr) {
   MultiplyAdd operation;
   RETURN_IF_ERROR(
       CreateMultiplyAdd(creation_context, op_def, attr, &operation));
   *ptr = absl::make_unique<MultiplyAdd>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectBroadcastAdd(const AddAttributes& attr,
-                          const CreationContext& creation_context,
-                          const OperationDef& op_def,
-                          std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectBroadcastAdd(const AddAttributes& attr,
+                                const CreationContext& creation_context,
+                                const OperationDef& op_def,
+                                std::unique_ptr<GPUOperation>* ptr) {
   MultiplyAdd operation;
   RETURN_IF_ERROR(
       CreateMultiplyAdd(creation_context, op_def, attr, &operation));
   *ptr = absl::make_unique<MultiplyAdd>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void SelectSoftmax(const BHWC& shape, const OperationDef& op_def,
@@ -197,18 +198,18 @@ void SelectTranspose(const TransposeAttributes& attr,
   *ptr = absl::make_unique<Transpose>(std::move(operation));
 }
 
-Status SelectWinograd4x4To36(const CreationContext& creation_context,
-                             const Padding2D& padding,
-                             const OperationDef& op_def,
-                             std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectWinograd4x4To36(const CreationContext& creation_context,
+                                   const Padding2D& padding,
+                                   const OperationDef& op_def,
+                                   std::unique_ptr<GPUOperation>* ptr) {
   Winograd4x4To36 operation;
   RETURN_IF_ERROR(
       CreateWinograd4x4To36(creation_context, op_def, padding, &operation));
   *ptr = absl::make_unique<Winograd4x4To36>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectWinograd36To4x4(
+absl::Status SelectWinograd36To4x4(
     const CreationContext& creation_context, const OperationDef& op_def,
     const ::tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases,
     std::unique_ptr<GPUOperation>* ptr) {
@@ -216,18 +217,18 @@ Status SelectWinograd36To4x4(
   RETURN_IF_ERROR(
       CreateWinograd36To4x4(creation_context, op_def, biases, &operation));
   *ptr = absl::make_unique<Winograd36To4x4>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SelectQuantizeAndDequantize(const QuantizeAndDequantizeAttributes& attr,
-                                   const CreationContext& creation_context,
-                                   const OperationDef& op_def,
-                                   std::unique_ptr<GPUOperation>* ptr) {
+absl::Status SelectQuantizeAndDequantize(
+    const QuantizeAndDequantizeAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr) {
   QuantizeAndDequantize operation;
   RETURN_IF_ERROR(
       CreateQuantizeAndDequantize(creation_context, op_def, attr, &operation));
   *ptr = absl::make_unique<QuantizeAndDequantize>(std::move(operation));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.h b/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.h
index d9a5365fc9e..118701fe9b0 100644
--- a/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.h
+++ b/tensorflow/lite/delegates/gpu/cl/selectors/simple_selectors.h
@@ -33,10 +33,10 @@ void SelectReLU(const CreationContext& creation_context,
                 const ReLUAttributes& attr, const OperationDef& op_def,
                 std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectPReLU(const PReLUAttributes& attr,
-                   const CreationContext& creation_context,
-                   const OperationDef& op_def,
-                   std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectPReLU(const PReLUAttributes& attr,
+                         const CreationContext& creation_context,
+                         const OperationDef& op_def,
+                         std::unique_ptr<GPUOperation>* ptr);
 
 void SelectPooling(const Pooling2DAttributes& attr, const OperationDef& op_def,
                    std::unique_ptr<GPUOperation>* ptr);
@@ -48,13 +48,14 @@ void SelectMaxUnpooling(const MaxUnpooling2DAttributes& attr,
 void SelectAdd(const OperationDef& op_def, const std::vector<int>& channels,
                int dst_channels, std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectResize(const Resize2DAttributes& attr, const OperationDef& op_def,
-                    std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectResize(const Resize2DAttributes& attr,
+                          const OperationDef& op_def,
+                          std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectConcat(const ConcatAttributes& attr,
-                    const std::vector<int>& channels,
-                    const OperationDef& op_def,
-                    std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectConcat(const ConcatAttributes& attr,
+                          const std::vector<int>& channels,
+                          const OperationDef& op_def,
+                          std::unique_ptr<GPUOperation>* ptr);
 
 void SelectReshape(int src_channels, int dst_channels,
                    const OperationDef& op_def,
@@ -66,18 +67,18 @@ void SelectPadding(const PadAttributes& attr, const OperationDef& op_def,
 void SelectStridedSlice(const SliceAttributes& attr, const OperationDef& op_def,
                         std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectMean(const MeanAttributes& attr, const OperationDef& op_def,
-                  std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectMean(const MeanAttributes& attr, const OperationDef& op_def,
+                        std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectMultiplyScalar(const MultiplyAttributes& attr,
-                            const CreationContext& creation_context,
-                            const OperationDef& op_def,
-                            std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectMultiplyScalar(const MultiplyAttributes& attr,
+                                  const CreationContext& creation_context,
+                                  const OperationDef& op_def,
+                                  std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectBroadcastAdd(const AddAttributes& attr,
-                          const CreationContext& creation_context,
-                          const OperationDef& op_def,
-                          std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectBroadcastAdd(const AddAttributes& attr,
+                                const CreationContext& creation_context,
+                                const OperationDef& op_def,
+                                std::unique_ptr<GPUOperation>* ptr);
 
 void SelectSoftmax(const BHWC& shape, const OperationDef& op_def,
                    std::unique_ptr<GPUOperation>* ptr);
@@ -90,20 +91,20 @@ void SelectTranspose(const TransposeAttributes& attr,
                      const OperationDef& op_def,
                      std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectWinograd4x4To36(const CreationContext& creation_context,
-                             const Padding2D& padding,
-                             const OperationDef& op_def,
-                             std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectWinograd4x4To36(const CreationContext& creation_context,
+                                   const Padding2D& padding,
+                                   const OperationDef& op_def,
+                                   std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectWinograd36To4x4(
+absl::Status SelectWinograd36To4x4(
     const CreationContext& creation_context, const OperationDef& op_def,
     const ::tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases,
     std::unique_ptr<GPUOperation>* ptr);
 
-Status SelectQuantizeAndDequantize(const QuantizeAndDequantizeAttributes& attr,
-                                   const CreationContext& creation_context,
-                                   const OperationDef& op_def,
-                                   std::unique_ptr<GPUOperation>* ptr);
+absl::Status SelectQuantizeAndDequantize(
+    const QuantizeAndDequantizeAttributes& attr,
+    const CreationContext& creation_context, const OperationDef& op_def,
+    std::unique_ptr<GPUOperation>* ptr);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/storage_type_util.cc b/tensorflow/lite/delegates/gpu/cl/storage_type_util.cc
index 26eb3ad3538..f6201fa92ca 100644
--- a/tensorflow/lite/delegates/gpu/cl/storage_type_util.cc
+++ b/tensorflow/lite/delegates/gpu/cl/storage_type_util.cc
@@ -24,6 +24,7 @@ limitations under the License.
 namespace tflite {
 namespace gpu {
 namespace cl {
+
 bool CanCreateTensorWithShape(const CLContext& context, const CLDevice& device,
                               const BHWDC& shape,
                               const TensorDescriptor& descriptor) {
diff --git a/tensorflow/lite/delegates/gpu/cl/tensor.cc b/tensorflow/lite/delegates/gpu/cl/tensor.cc
index e9de22c6dc0..308e1b69205 100644
--- a/tensorflow/lite/delegates/gpu/cl/tensor.cc
+++ b/tensorflow/lite/delegates/gpu/cl/tensor.cc
@@ -27,9 +27,10 @@ namespace tflite {
 namespace gpu {
 namespace cl {
 namespace {
-Status CreateImageBufferFromBuffer(const CLContext& context, cl_mem memory,
-                                   enum DataType data_type, int width,
-                                   cl_mem* result) {
+
+absl::Status CreateImageBufferFromBuffer(const CLContext& context,
+                                         cl_mem memory, enum DataType data_type,
+                                         int width, cl_mem* result) {
   cl_image_format format;
   cl_image_desc desc;
   std::memset(&desc, 0, sizeof(desc));
@@ -44,16 +45,17 @@ Status CreateImageBufferFromBuffer(const CLContext& context, cl_mem memory,
   *result = clCreateImage(context.context(), CL_MEM_READ_WRITE, &format, &desc,
                           nullptr, &error);
   if (error != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to create Texture2D (clCreateImage)",
                      CLErrorCodeToString(error)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateTensor(const CLContext& context, const CLDevice& device,
-                    const BHWDC& shape, const TensorDescriptor& descriptor,
-                    cl_mem memory, Tensor* result) {
+absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
+                          const BHWDC& shape,
+                          const TensorDescriptor& descriptor, cl_mem memory,
+                          Tensor* result) {
   const bool memory_owner = memory == nullptr;
   if (memory_owner) {
     CLMemory mem;
@@ -72,8 +74,9 @@ Status CreateTensor(const CLContext& context, const CLDevice& device,
   } else {
     *result = Tensor(memory, memory_owner, shape, descriptor);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
+
 }  // namespace
 
 Tensor::Tensor(cl_mem memory, bool memory_owner, const BHWC& shape,
@@ -156,41 +159,48 @@ int3 Tensor::GetFullTensorRegion() const {
   }
 }
 
-Status Tensor::IsValid(const BHWC& shape) const {
+absl::Status Tensor::IsValid(const BHWC& shape) const {
   if (shape.b != shape_.b) {
-    return InvalidArgumentError("Shape batch does not match tensor batch");
+    return absl::InvalidArgumentError(
+        "Shape batch does not match tensor batch");
   }
   if (shape.w != shape_.w) {
-    return InvalidArgumentError("Shape width does not match tensor width");
+    return absl::InvalidArgumentError(
+        "Shape width does not match tensor width");
   }
   if (shape.h != shape_.h) {
-    return InvalidArgumentError("Shape height does not match tensor height");
+    return absl::InvalidArgumentError(
+        "Shape height does not match tensor height");
   }
   if (shape.c != shape_.c) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Shape channels does not match tensor channels");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Tensor::IsValid(const BHWDC& shape) const {
+absl::Status Tensor::IsValid(const BHWDC& shape) const {
   if (shape.b != shape_.b) {
-    return InvalidArgumentError("Shape batch does not match tensor batch");
+    return absl::InvalidArgumentError(
+        "Shape batch does not match tensor batch");
   }
   if (shape.w != shape_.w) {
-    return InvalidArgumentError("Shape width does not match tensor width");
+    return absl::InvalidArgumentError(
+        "Shape width does not match tensor width");
   }
   if (shape.h != shape_.h) {
-    return InvalidArgumentError("Shape height does not match tensor height");
+    return absl::InvalidArgumentError(
+        "Shape height does not match tensor height");
   }
   if (shape.d != shape_.d) {
-    return InvalidArgumentError("Shape depth does not match tensor depth");
+    return absl::InvalidArgumentError(
+        "Shape depth does not match tensor depth");
   }
   if (shape.c != shape_.c) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Shape channels does not match tensor channels");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 int Tensor::GetChannelsAlignment() const {
@@ -230,8 +240,8 @@ cl_mem Tensor::GetMemoryPtr() const {
 
 cl_mem Tensor::GetMemoryPtrForWriting() const { return memory_; }
 
-Status Tensor::WriteDataBHWDC(absl::Span<const float> in,
-                              CLCommandQueue* queue) {
+absl::Status Tensor::WriteDataBHWDC(absl::Span<const float> in,
+                                    CLCommandQueue* queue) {
   void* data_ptr = nullptr;
   const int aligned_channels = GetAlignedChannels();
   const int elements_count =
@@ -263,24 +273,26 @@ Status Tensor::WriteDataBHWDC(absl::Span<const float> in,
           queue->EnqueueWriteImage(memory_, GetFullTensorRegion(), data_ptr));
       break;
     default:
-      return InternalError("Unsupported tensor storage type");
+      return absl::InternalError("Unsupported tensor storage type");
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Tensor::WriteData(CLCommandQueue* queue, const TensorFloat32& src) {
+absl::Status Tensor::WriteData(CLCommandQueue* queue,
+                               const TensorFloat32& src) {
   RETURN_IF_ERROR(IsValid(src.shape));
   return WriteDataBHWDC(absl::MakeConstSpan(src.data), queue);
 }
 
-Status Tensor::WriteData(CLCommandQueue* queue, const Tensor5DFloat32& src) {
+absl::Status Tensor::WriteData(CLCommandQueue* queue,
+                               const Tensor5DFloat32& src) {
   RETURN_IF_ERROR(IsValid(src.shape));
   return WriteDataBHWDC(absl::MakeConstSpan(src.data), queue);
 }
 
-Status Tensor::ReadDataBHWDC(absl::Span<float> out,
-                             CLCommandQueue* queue) const {
+absl::Status Tensor::ReadDataBHWDC(absl::Span<float> out,
+                                   CLCommandQueue* queue) const {
   void* data_ptr = nullptr;
   const int aligned_channels = GetAlignedChannels();
   const int elements_count =
@@ -309,7 +321,7 @@ Status Tensor::ReadDataBHWDC(absl::Span<float> out,
           queue->EnqueueReadImage(memory_, GetFullTensorRegion(), data_ptr));
       break;
     default:
-      return InternalError("Unsupported tensor storage type");
+      return absl::InternalError("Unsupported tensor storage type");
   }
 
   if (descriptor_.data_type == DataType::FLOAT32) {
@@ -318,57 +330,62 @@ Status Tensor::ReadDataBHWDC(absl::Span<float> out,
     DataToBHWDC(absl::MakeConstSpan(data_h.data(), data_h.size()), out);
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Tensor::ReadData(CLCommandQueue* queue, TensorFloat32* dst) const {
+absl::Status Tensor::ReadData(CLCommandQueue* queue, TensorFloat32* dst) const {
   RETURN_IF_ERROR(IsValid(dst->shape));
   return ReadDataBHWDC(absl::MakeSpan(dst->data), queue);
 }
 
-Status Tensor::ReadData(CLCommandQueue* queue, Tensor5DFloat32* dst) const {
+absl::Status Tensor::ReadData(CLCommandQueue* queue,
+                              Tensor5DFloat32* dst) const {
   RETURN_IF_ERROR(IsValid(dst->shape));
   return ReadDataBHWDC(absl::MakeSpan(dst->data), queue);
 }
 
-Status CreateTensor(const CLContext& context, const CLDevice& device,
-                    const BHWC& shape, const TensorDescriptor& descriptor,
-                    Tensor* result) {
+absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
+                          const BHWC& shape, const TensorDescriptor& descriptor,
+                          Tensor* result) {
   const BHWDC shape5D(shape.b, shape.h, shape.w, 1, shape.c);
   return CreateTensor(context, device, shape5D, descriptor, nullptr, result);
 }
 
-Status CreateTensor(const CLContext& context, const CLDevice& device,
-                    const BHWDC& shape, const TensorDescriptor& descriptor,
-                    Tensor* result) {
+absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
+                          const BHWDC& shape,
+                          const TensorDescriptor& descriptor, Tensor* result) {
   return CreateTensor(context, device, shape, descriptor, nullptr, result);
 }
 
-Status CreateSharedTensor(const CLContext& context, const CLDevice& device,
-                          cl_mem memory, const BHWC& shape,
-                          const TensorDescriptor& descriptor, Tensor* result) {
+absl::Status CreateSharedTensor(const CLContext& context,
+                                const CLDevice& device, cl_mem memory,
+                                const BHWC& shape,
+                                const TensorDescriptor& descriptor,
+                                Tensor* result) {
   const BHWDC shape5D(shape.b, shape.h, shape.w, 1, shape.c);
   return CreateTensor(context, device, shape5D, descriptor, memory, result);
 }
 
-Status CreateSharedTensor(const CLContext& context, const CLDevice& device,
-                          cl_mem memory, const BHWDC& shape,
-                          const TensorDescriptor& descriptor, Tensor* result) {
+absl::Status CreateSharedTensor(const CLContext& context,
+                                const CLDevice& device, cl_mem memory,
+                                const BHWDC& shape,
+                                const TensorDescriptor& descriptor,
+                                Tensor* result) {
   return CreateTensor(context, device, shape, descriptor, memory, result);
 }
 
-Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
-                            const BHWC& shape,
-                            const TensorDescriptor& descriptor,
-                            CLMemory* result) {
+absl::Status AllocateTensorMemory(const CLContext& context,
+                                  const CLDevice& device, const BHWC& shape,
+                                  const TensorDescriptor& descriptor,
+                                  CLMemory* result) {
   const BHWDC shape5D(shape.b, shape.h, shape.w, 1, shape.c);
   return AllocateTensorMemory(context, device, shape5D, descriptor, result);
 }
 
-Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
-                            const BHWDC& shape,
-                            const TensorDescriptor& descriptor,
-                            CLMemory* result) {
+absl::Status AllocateTensorMemory(const CLContext& context,
+                                  const CLDevice& device, const BHWDC& shape,
+                                  const TensorDescriptor& descriptor,
+                                  CLMemory* result) {
   const int slices = IntegralDivideRoundUp(shape.c, 4);
   switch (descriptor.storage_type) {
     case TensorStorageType::BUFFER:
@@ -379,12 +396,12 @@ Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
       cl_mem memory = clCreateBuffer(context.context(), CL_MEM_READ_WRITE,
                                      data_size, nullptr, &error_code);
       if (!memory) {
-        return UnknownError(
+        return absl::UnknownError(
             absl::StrCat("Failed to allocate device memory with clCreateBuffer",
                          CLErrorCodeToString(error_code)));
       }
       *result = CLMemory(memory, true);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case TensorStorageType::TEXTURE_2D: {
       cl_image_desc desc;
@@ -406,13 +423,13 @@ Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
       cl_mem memory = CreateImage2DLegacy(context.context(), CL_MEM_READ_WRITE,
                                           &format, &desc, nullptr, &error_code);
       if (error_code != CL_SUCCESS) {
-        return UnknownError(
+        return absl::UnknownError(
             absl::StrCat("Failed to create Texture2D (clCreateImage)",
                          CLErrorCodeToString(error_code)));
       }
 
       *result = CLMemory(memory, true);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case TensorStorageType::TEXTURE_3D: {
       cl_image_desc desc;
@@ -434,13 +451,13 @@ Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
       cl_mem memory = CreateImage3DLegacy(context.context(), CL_MEM_READ_WRITE,
                                           &format, &desc, nullptr, &error_code);
       if (error_code != CL_SUCCESS) {
-        return UnknownError(
+        return absl::UnknownError(
             absl::StrCat("Failed to create Texture3D (clCreateImage)",
                          CLErrorCodeToString(error_code)));
       }
 
       *result = CLMemory(memory, true);
-      return OkStatus();
+      return absl::OkStatus();
     }
     case TensorStorageType::TEXTURE_ARRAY: {
       cl_image_desc desc;
@@ -463,18 +480,18 @@ Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
       cl_mem memory = clCreateImage(context.context(), CL_MEM_READ_WRITE,
                                     &format, &desc, nullptr, &error_code);
       if (error_code != CL_SUCCESS) {
-        return UnknownError(
+        return absl::UnknownError(
             absl::StrCat("Failed to create TextureArray (clCreateImage)",
                          CLErrorCodeToString(error_code)));
       }
 
       *result = CLMemory(memory, true);
-      return OkStatus();
+      return absl::OkStatus();
     }
 
     case TensorStorageType::SINGLE_TEXTURE_2D: {
       if (slices != 1) {
-        return InvalidArgumentError(absl::StrCat(
+        return absl::InvalidArgumentError(absl::StrCat(
             "SINGLE_TEXTURE_2D support only channels in range [1-4], but ",
             shape.c, "was provided"));
       }
@@ -495,7 +512,7 @@ Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
         format.image_channel_data_type =
             ToImageChannelType(descriptor.data_type);
       } else {
-        return InvalidArgumentError(absl::StrCat(
+        return absl::InvalidArgumentError(absl::StrCat(
             "This device doesn't support ", shape.c, "-channel textures."));
       }
 
@@ -503,17 +520,17 @@ Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
       cl_mem memory = CreateImage2DLegacy(context.context(), CL_MEM_READ_WRITE,
                                           &format, &desc, nullptr, &error_code);
       if (error_code != CL_SUCCESS) {
-        return UnknownError(
+        return absl::UnknownError(
             absl::StrCat("Failed to create Texture2D (clCreateImage)",
                          CLErrorCodeToString(error_code)));
       }
 
       *result = CLMemory(memory, true);
-      return OkStatus();
+      return absl::OkStatus();
     }
 
     default:
-      return InternalError("Unsupported tensor storage type");
+      return absl::InternalError("Unsupported tensor storage type");
   }
 }
 
diff --git a/tensorflow/lite/delegates/gpu/cl/tensor.h b/tensorflow/lite/delegates/gpu/cl/tensor.h
index 34a45436386..a27c54a74e5 100644
--- a/tensorflow/lite/delegates/gpu/cl/tensor.h
+++ b/tensorflow/lite/delegates/gpu/cl/tensor.h
@@ -87,20 +87,22 @@ class Tensor {
   // memory ptr.
   cl_mem GetMemoryPtrForWriting() const;
 
-  Status WriteData(CLCommandQueue* queue, const TensorFloat32& src);
-  Status WriteData(CLCommandQueue* queue, const Tensor5DFloat32& src);
-  Status ReadData(CLCommandQueue* queue, TensorFloat32* dst) const;
-  Status ReadData(CLCommandQueue* queue, Tensor5DFloat32* dst) const;
+  absl::Status WriteData(CLCommandQueue* queue, const TensorFloat32& src);
+  absl::Status WriteData(CLCommandQueue* queue, const Tensor5DFloat32& src);
+  absl::Status ReadData(CLCommandQueue* queue, TensorFloat32* dst) const;
+  absl::Status ReadData(CLCommandQueue* queue, Tensor5DFloat32* dst) const;
 
  private:
-  Status IsValid(const BHWC& shape) const;
-  Status IsValid(const BHWDC& shape) const;
+  absl::Status IsValid(const BHWC& shape) const;
+  absl::Status IsValid(const BHWDC& shape) const;
 
   int GetChannelsAlignment() const;
   int GetAlignedChannels() const;
 
-  Status WriteDataBHWDC(absl::Span<const float> in, CLCommandQueue* queue);
-  Status ReadDataBHWDC(absl::Span<float> out, CLCommandQueue* queue) const;
+  absl::Status WriteDataBHWDC(absl::Span<const float> in,
+                              CLCommandQueue* queue);
+  absl::Status ReadDataBHWDC(absl::Span<float> out,
+                             CLCommandQueue* queue) const;
 
   template <typename T>
   void DataFromBHWDC(absl::Span<const float> src, absl::Span<T> dst) const;
@@ -145,31 +147,35 @@ class Tensor {
 
 using TensorPtr = std::shared_ptr<Tensor>;
 
-Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
-                            const BHWC& shape,
-                            const TensorDescriptor& descriptor,
-                            CLMemory* result);
+absl::Status AllocateTensorMemory(const CLContext& context,
+                                  const CLDevice& device, const BHWC& shape,
+                                  const TensorDescriptor& descriptor,
+                                  CLMemory* result);
 
-Status AllocateTensorMemory(const CLContext& context, const CLDevice& device,
-                            const BHWDC& shape,
-                            const TensorDescriptor& descriptor,
-                            CLMemory* result);
+absl::Status AllocateTensorMemory(const CLContext& context,
+                                  const CLDevice& device, const BHWDC& shape,
+                                  const TensorDescriptor& descriptor,
+                                  CLMemory* result);
 
-Status CreateTensor(const CLContext& context, const CLDevice& device,
-                    const BHWC& shape, const TensorDescriptor& descriptor,
-                    Tensor* result);
+absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
+                          const BHWC& shape, const TensorDescriptor& descriptor,
+                          Tensor* result);
 
-Status CreateTensor(const CLContext& context, const CLDevice& device,
-                    const BHWDC& shape, const TensorDescriptor& descriptor,
-                    Tensor* result);
-
-Status CreateSharedTensor(const CLContext& context, const CLDevice& device,
-                          cl_mem memory, const BHWC& shape,
+absl::Status CreateTensor(const CLContext& context, const CLDevice& device,
+                          const BHWDC& shape,
                           const TensorDescriptor& descriptor, Tensor* result);
 
-Status CreateSharedTensor(const CLContext& context, const CLDevice& device,
-                          cl_mem memory, const BHWDC& shape,
-                          const TensorDescriptor& descriptor, Tensor* result);
+absl::Status CreateSharedTensor(const CLContext& context,
+                                const CLDevice& device, cl_mem memory,
+                                const BHWC& shape,
+                                const TensorDescriptor& descriptor,
+                                Tensor* result);
+
+absl::Status CreateSharedTensor(const CLContext& context,
+                                const CLDevice& device, cl_mem memory,
+                                const BHWDC& shape,
+                                const TensorDescriptor& descriptor,
+                                Tensor* result);
 
 }  // namespace cl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/cl/tensor_test.cc b/tensorflow/lite/delegates/gpu/cl/tensor_test.cc
index 7c859c43e6e..99ba269cf60 100644
--- a/tensorflow/lite/delegates/gpu/cl/tensor_test.cc
+++ b/tensorflow/lite/delegates/gpu/cl/tensor_test.cc
@@ -30,8 +30,9 @@ namespace gpu {
 namespace cl {
 namespace {
 
-Status TensorGenericTest(const BHWC& shape, const TensorDescriptor& descriptor,
-                         Environment* env) {
+absl::Status TensorGenericTest(const BHWC& shape,
+                               const TensorDescriptor& descriptor,
+                               Environment* env) {
   TensorFloat32 tensor_cpu;
   tensor_cpu.shape = shape;
   tensor_cpu.data.resize(shape.DimensionsProduct());
@@ -53,15 +54,15 @@ Status TensorGenericTest(const BHWC& shape, const TensorDescriptor& descriptor,
 
   for (int i = 0; i < tensor_gpu.data.size(); ++i) {
     if (tensor_gpu.data[i] != tensor_cpu.data[i]) {
-      return InternalError("Wrong value.");
+      return absl::InternalError("Wrong value.");
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Tensor5DGenericTest(const BHWDC& shape,
-                           const TensorDescriptor& descriptor,
-                           Environment* env) {
+absl::Status Tensor5DGenericTest(const BHWDC& shape,
+                                 const TensorDescriptor& descriptor,
+                                 Environment* env) {
   Tensor5DFloat32 tensor_cpu;
   tensor_cpu.shape = shape;
   tensor_cpu.data.resize(shape.DimensionsProduct());
@@ -83,14 +84,14 @@ Status Tensor5DGenericTest(const BHWDC& shape,
 
   for (int i = 0; i < tensor_gpu.data.size(); ++i) {
     if (tensor_gpu.data[i] != tensor_cpu.data[i]) {
-      return InternalError("Wrong value.");
+      return absl::InternalError("Wrong value.");
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status TensorTests(DataType data_type, TensorStorageType storage_type,
-                   Environment* env) {
+absl::Status TensorTests(DataType data_type, TensorStorageType storage_type,
+                         Environment* env) {
   RETURN_IF_ERROR(TensorGenericTest(
       BHWC(1, 6, 7, 3), {data_type, storage_type, Layout::HWC}, env));
   RETURN_IF_ERROR(TensorGenericTest(
@@ -125,7 +126,7 @@ Status TensorTests(DataType data_type, TensorStorageType storage_type,
       BHWDC(7, 6, 1, 3, 7), {data_type, storage_type, Layout::BHWDC}, env));
   RETURN_IF_ERROR(Tensor5DGenericTest(
       BHWDC(13, 7, 3, 4, 3), {data_type, storage_type, Layout::BHWDC}, env));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 TEST_F(OpenCLTest, BufferF32) {
diff --git a/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc b/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc
index f231cf3143a..151924197c2 100644
--- a/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc
+++ b/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc
@@ -45,10 +45,11 @@ class DelegateContext {
             const TfLiteDelegateParams* delegate_params) {
     auto denormalized_graph =
         reinterpret_cast<GraphFloat32*>(delegate_params->delegate->data_);
-    Status status = BuildModel(context, delegate_params, denormalized_graph);
+    absl::Status status =
+        BuildModel(context, delegate_params, denormalized_graph);
     if (!status.ok()) {
       context->ReportError(context, "Failed to convert a model: %s",
-                           status.error_message().c_str());
+                           std::string(status.message()).c_str());
     }
     return status.ok();
   }
@@ -82,14 +83,14 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
   return status;
 }
 
-Status FlatBufferToGPUGraph(
+absl::Status FlatBufferToGPUGraph(
     const std::unique_ptr<tflite::FlatBufferModel>& flatbuffer,
     GraphFloat32* graph) {
   tflite::ops::builtin::BuiltinOpResolver op_resolver;
   std::unique_ptr<tflite::Interpreter> interpreter;
   tflite::InterpreterBuilder interpreter_builder(*flatbuffer, op_resolver);
   if (interpreter_builder(&interpreter) != kTfLiteOk || !interpreter) {
-    return InternalError("Unable to prepare TfLite interpreter.");
+    return absl::InternalError("Unable to prepare TfLite interpreter.");
   }
   interpreter->UseNNAPI(false);
   TfLiteDelegate delegate;
@@ -101,20 +102,20 @@ Status FlatBufferToGPUGraph(
   delegate.FreeBufferHandle = nullptr;
 
   if (interpreter->ModifyGraphWithDelegate(&delegate) != kTfLiteOk) {
-    return InternalError("Conversion from TfLite model failed.");
+    return absl::InternalError("Conversion from TfLite model failed.");
   }
 
   NullTransformationReporter reporter;
   ModelTransformer transformer(graph, &reporter);
   if (!ApplyGeneralTransformations(&transformer)) {
-    return InternalError("Graph general transformations failed");
+    return absl::InternalError("Graph general transformations failed");
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 }  // namespace
 
-Status RunModelSample(const std::string& model_name) {
+absl::Status RunModelSample(const std::string& model_name) {
   auto flatbuffer = tflite::FlatBufferModel::BuildFromFile(model_name.c_str());
   GraphFloat32 graph_cl;
   RETURN_IF_ERROR(FlatBufferToGPUGraph(flatbuffer, &graph_cl));
@@ -160,7 +161,7 @@ Status RunModelSample(const std::string& model_name) {
     std::cout << "Total time - " << average_inference_time << "ms" << std::endl;
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace cl
diff --git a/tensorflow/lite/delegates/gpu/cl/texture2d.cc b/tensorflow/lite/delegates/gpu/cl/texture2d.cc
index 907721dad8c..022c15660ce 100644
--- a/tensorflow/lite/delegates/gpu/cl/texture2d.cc
+++ b/tensorflow/lite/delegates/gpu/cl/texture2d.cc
@@ -21,8 +21,9 @@ namespace cl {
 namespace {
 
 // Creates new 4-channel 2D texture with cl_channel_type elements
-Status CreateTexture2D(int width, int height, cl_channel_type type, void* data,
-                       CLContext* context, Texture2D* result) {
+absl::Status CreateTexture2D(int width, int height, cl_channel_type type,
+                             void* data, CLContext* context,
+                             Texture2D* result) {
   cl_image_desc desc;
   desc.image_type = CL_MEM_OBJECT_IMAGE2D;
   desc.image_width = width;
@@ -47,14 +48,14 @@ Status CreateTexture2D(int width, int height, cl_channel_type type, void* data,
   cl_mem texture = CreateImage2DLegacy(context->context(), flags, &format,
                                        &desc, data, &error_code);
   if (error_code != CL_SUCCESS) {
-    return UnknownError(
+    return absl::UnknownError(
         absl::StrCat("Failed to create Texture2D (clCreateImage)",
                      CLErrorCodeToString(error_code)));
   }
 
   *result = Texture2D(texture, width, height, type);
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 }  // namespace
 
@@ -95,20 +96,20 @@ void Texture2D::Release() {
 }
 
 // Creates new 4-channel 2D texture with f32 elements
-Status CreateTexture2DRGBA32F(int width, int height, CLContext* context,
-                              Texture2D* result) {
+absl::Status CreateTexture2DRGBA32F(int width, int height, CLContext* context,
+                                    Texture2D* result) {
   return CreateTexture2D(width, height, CL_FLOAT, nullptr, context, result);
 }
 
 // Creates new 4-channel 2D texture with f16 elements
-Status CreateTexture2DRGBA16F(int width, int height, CLContext* context,
-                              Texture2D* result) {
+absl::Status CreateTexture2DRGBA16F(int width, int height, CLContext* context,
+                                    Texture2D* result) {
   return CreateTexture2D(width, height, CL_HALF_FLOAT, nullptr, context,
                          result);
 }
 
-Status CreateTexture2DRGBA(DataType type, int width, int height,
-                           CLContext* context, Texture2D* result) {
+absl::Status CreateTexture2DRGBA(DataType type, int width, int height,
+                                 CLContext* context, Texture2D* result) {
   if (type == DataType::FLOAT32) {
     return CreateTexture2D(width, height, CL_FLOAT, nullptr, context, result);
   } else {
@@ -117,8 +118,9 @@ Status CreateTexture2DRGBA(DataType type, int width, int height,
   }
 }
 
-Status CreateTexture2DRGBA(DataType type, int width, int height, void* data,
-                           CLContext* context, Texture2D* result) {
+absl::Status CreateTexture2DRGBA(DataType type, int width, int height,
+                                 void* data, CLContext* context,
+                                 Texture2D* result) {
   if (type == DataType::FLOAT32) {
     return CreateTexture2D(width, height, CL_FLOAT, data, context, result);
   } else {
diff --git a/tensorflow/lite/delegates/gpu/cl/texture2d.h b/tensorflow/lite/delegates/gpu/cl/texture2d.h
index bdac984a2db..c12d8a2836c 100644
--- a/tensorflow/lite/delegates/gpu/cl/texture2d.h
+++ b/tensorflow/lite/delegates/gpu/cl/texture2d.h
@@ -50,11 +50,11 @@ class Texture2D {
   // Writes data to a texture. Data should point to a region that
   // has exact width * height * sizeof(pixel) bytes.
   template <typename T>
-  Status WriteData(CLCommandQueue* queue, const absl::Span<T> data);
+  absl::Status WriteData(CLCommandQueue* queue, const absl::Span<T> data);
 
   // Reads data from Texture2D into CPU memory.
   template <typename T>
-  Status ReadData(CLCommandQueue* queue, std::vector<T>* result) const;
+  absl::Status ReadData(CLCommandQueue* queue, std::vector<T>* result) const;
 
  private:
   void Release();
@@ -68,43 +68,45 @@ class Texture2D {
 using Texture2DPtr = std::shared_ptr<Texture2D>;
 
 // Creates new 4-channel 2D texture with f32 elements
-Status CreateTexture2DRGBA32F(int width, int height, CLContext* context,
-                              Texture2D* result);
+absl::Status CreateTexture2DRGBA32F(int width, int height, CLContext* context,
+                                    Texture2D* result);
 
 // Creates new 4-channel 2D texture with f16 elements
-Status CreateTexture2DRGBA16F(int width, int height, CLContext* context,
-                              Texture2D* result);
+absl::Status CreateTexture2DRGBA16F(int width, int height, CLContext* context,
+                                    Texture2D* result);
 
-Status CreateTexture2DRGBA(DataType type, int width, int height,
-                           CLContext* context, Texture2D* result);
+absl::Status CreateTexture2DRGBA(DataType type, int width, int height,
+                                 CLContext* context, Texture2D* result);
 
-Status CreateTexture2DRGBA(DataType type, int width, int height, void* data,
-                           CLContext* context, Texture2D* result);
+absl::Status CreateTexture2DRGBA(DataType type, int width, int height,
+                                 void* data, CLContext* context,
+                                 Texture2D* result);
 
 template <typename T>
-Status Texture2D::WriteData(CLCommandQueue* queue, const absl::Span<T> data) {
+absl::Status Texture2D::WriteData(CLCommandQueue* queue,
+                                  const absl::Span<T> data) {
   const int element_size = ChannelTypeToSizeInBytes(channel_type_);
   if (sizeof(T) % element_size != 0) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Template type T has not suitable element type for created texture.");
   }
   if (4 * width_ * height_ * element_size != data.size() * sizeof(T)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "absl::Span<T> data size is different from texture allocated size.");
   }
 
   RETURN_IF_ERROR(queue->EnqueueWriteImage(texture_, int3(width_, height_, 1),
                                            data.data()));
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename T>
-Status Texture2D::ReadData(CLCommandQueue* queue,
-                           std::vector<T>* result) const {
+absl::Status Texture2D::ReadData(CLCommandQueue* queue,
+                                 std::vector<T>* result) const {
   const int element_size = ChannelTypeToSizeInBytes(channel_type_);
   if (sizeof(T) != element_size) {
-    return InvalidArgumentError("Pixel format is different.");
+    return absl::InvalidArgumentError("Pixel format is different.");
   }
 
   const int elements_count = width_ * height_ * 4;
diff --git a/tensorflow/lite/delegates/gpu/common/BUILD b/tensorflow/lite/delegates/gpu/common/BUILD
index 08612e37b3e..30ac016ff83 100644
--- a/tensorflow/lite/delegates/gpu/common/BUILD
+++ b/tensorflow/lite/delegates/gpu/common/BUILD
@@ -24,8 +24,8 @@ cc_library(
     srcs = ["custom_parsers.cc"],
     hdrs = ["custom_parsers.h"],
     deps = [
-        "//tensorflow/lite/delegates/gpu/common:shape",
-        "//tensorflow/lite/delegates/gpu/common:status",
+        ":shape",
+        ":status",
         "@com_google_absl//absl/strings",
         "@com_google_absl//absl/types:any",
         "@flatbuffers",
@@ -193,6 +193,7 @@ cc_test(
 cc_library(
     name = "status",
     hdrs = ["status.h"],
+    deps = ["@com_google_absl//absl/status"],
 )
 
 cc_library(
diff --git a/tensorflow/lite/delegates/gpu/common/convert.cc b/tensorflow/lite/delegates/gpu/common/convert.cc
index 81d09b2797e..cee2e8f0e60 100644
--- a/tensorflow/lite/delegates/gpu/common/convert.cc
+++ b/tensorflow/lite/delegates/gpu/common/convert.cc
@@ -30,15 +30,15 @@ constexpr int kPhwo4i4ChannelsInPlane = 4;
 constexpr int kPiohw4ChannelsInPlane = 4;
 
 // Layout is Po,H,W,OI4x4.
-Status ConvertToPHWO4I4(absl::Span<const float> in, const OHWI& shape,
-                        absl::Span<float> out, bool reverse_space) {
+absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const OHWI& shape,
+                              absl::Span<float> out, bool reverse_space) {
   if (in.size() != shape.DimensionsProduct()) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPHWO4I4: Input data size does not match expected size: ",
         in.size(), " != ", shape.DimensionsProduct()));
   }
   if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPHWO4I4: Output data size does not match expected size: ",
         out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
   }
@@ -69,7 +69,7 @@ Status ConvertToPHWO4I4(absl::Span<const float> in, const OHWI& shape,
       }
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
@@ -110,15 +110,15 @@ uint3 Get3DSizeForPHWO4I4(const OHWI& shape) {
 }
 
 // Layout is Po,H,W,OI4x4.
-Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
-                        absl::Span<float> out) {
+absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
+                              absl::Span<float> out) {
   if (in.size() != shape.DimensionsProduct()) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPHWO4I4: Input data size does not match expected size: ",
         in.size(), " != ", shape.DimensionsProduct()));
   }
   if (out.size() != GetElementsSizeForPHWO4I4(shape)) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPHWO4I4: Output data size does not match expected size: ",
         out.size(), " != ", GetElementsSizeForPHWO4I4(shape)));
   }
@@ -147,7 +147,7 @@ Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
       }
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 std::vector<float> ConvertToPHWO4I4(
@@ -164,15 +164,15 @@ uint32_t GetElementsSizeForPIOHW4(const OHWI& shape) {
          shape.w;
 }
 
-Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
-                       absl::Span<float> out) {
+absl::Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
+                             absl::Span<float> out) {
   if (in.size() != shape.DimensionsProduct()) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPIOHW4: Input data size does not match expected size: ",
         in.size(), " != ", shape.DimensionsProduct()));
   }
   if (out.size() != GetElementsSizeForPIOHW4(shape)) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPIOHW4: Output data size does not match expected size: ",
         out.size(), " != ", GetElementsSizeForPIOHW4(shape)));
   }
@@ -194,7 +194,7 @@ Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
       }
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 std::vector<float> ConvertToPIOHW4(
@@ -207,29 +207,29 @@ std::vector<float> ConvertToPIOHW4(
 }
 
 template <typename T>
-Status ValidateConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
-                              absl::Span<T> out) {
+absl::Status ValidateConvertToPHWC4(absl::Span<const float> in,
+                                    const BHWC& shape, absl::Span<T> out) {
   if (in.size() != shape.DimensionsProduct()) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPHWC4: Input data size does not match expected size: ",
         in.size(), " != ", shape.DimensionsProduct()));
   }
   if (out.size() != GetElementsSizeForPHWC4(shape)) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertToPHWC4: Output data size does not match expected size: ",
         out.size(), " != ", GetElementsSizeForPHWC4(shape)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // Layout is Pc,H,W,C4 where P - is a plane based on channels.
-Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
-                      absl::Span<float> out) {
+absl::Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
+                            absl::Span<float> out) {
   RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
   if (shape.c == 4) {
     std::memcpy(out.data(), in.data(),
                 shape.DimensionsProduct() * sizeof(float));
-    return OkStatus();
+    return absl::OkStatus();
   }
   // Layout is Pc,H,W,C4 where P - is a plane based on channels.
   int num_planes = IntegralDivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
@@ -256,7 +256,7 @@ Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
   const int remaining_channels =
       shape.c - num_full_planes * kPhwc4ChannelsInPlane;
   if (remaining_channels == 0) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   for (int b = 0; b < shape.b; b++) {
     const float* src =
@@ -272,12 +272,12 @@ Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
       dest += kPhwc4ChannelsInPlane;
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // Layout is Pc,H,W,C4 where P - is a plane based on channels.
-Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
-                          absl::Span<HalfBits> out) {
+absl::Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
+                                absl::Span<HalfBits> out) {
   RETURN_IF_ERROR(ValidateConvertToPHWC4(in, shape, out));
 
   // Layout is Pc,H,W,C4 where P - is a plane based on channels.
@@ -308,7 +308,7 @@ Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
   const int remaining_channels =
       shape.c - num_full_planes * kPhwc4ChannelsInPlane;
   if (remaining_channels == 0) {
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   for (int b = 0; b < shape.b; b++) {
@@ -349,11 +349,11 @@ Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
         }
         break;
       default:
-        return UnimplementedError(
+        return absl::UnimplementedError(
             "ConvertToPHWC4Half: Unsupported channels per planes count.");
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 std::vector<float> ConvertToPHWC4(
@@ -383,28 +383,28 @@ uint32_t GetElementsSizeForPHWC4(const BHWC& shape) {
 }
 
 template <typename T>
-Status ValidateConvertFromPHWC4(absl::Span<const T> in, const BHWC& shape,
-                                absl::Span<float> out) {
+absl::Status ValidateConvertFromPHWC4(absl::Span<const T> in, const BHWC& shape,
+                                      absl::Span<float> out) {
   if (in.size() != GetElementsSizeForPHWC4(shape)) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertFromPHWC4: Input data size does not match expected size: ",
         in.size(), " != ", GetElementsSizeForPHWC4(shape)));
   }
   if (out.size() != shape.DimensionsProduct()) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "ConvertFromPHWC4: Output data size does not match expected size: ",
         out.size(), " != ", shape.DimensionsProduct()));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
-                        absl::Span<float> out) {
+absl::Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
+                              absl::Span<float> out) {
   RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
   if (shape.c == 4) {
     std::memcpy(out.data(), in.data(),
                 shape.DimensionsProduct() * sizeof(float));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   int num_planes = IntegralDivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
@@ -429,7 +429,7 @@ Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
   const int remaining_channels =
       shape.c - num_full_planes * kPhwc4ChannelsInPlane;
   if (remaining_channels == 0) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   for (int b = 0; b < shape.b; b++) {
     const float* src = in.data() + b * padded_size +
@@ -443,11 +443,11 @@ Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
       dest += shape.c;
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in, const BHWC& shape,
-                            absl::Span<float> out) {
+absl::Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in,
+                                  const BHWC& shape, absl::Span<float> out) {
   RETURN_IF_ERROR(ValidateConvertFromPHWC4(in, shape, out));
   int num_planes = IntegralDivideRoundUp(shape.c, kPhwc4ChannelsInPlane);
   const int num_pixels = shape.h * shape.w;
@@ -474,7 +474,7 @@ Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in, const BHWC& shape,
   const int remaining_channels =
       shape.c - num_full_planes * kPhwc4ChannelsInPlane;
   if (remaining_channels == 0) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   for (int b = 0; b < shape.b; b++) {
     const HalfBits* src = in.data() + b * padded_size +
@@ -508,11 +508,11 @@ Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in, const BHWC& shape,
         }
         break;
       default:
-        return UnimplementedError(
+        return absl::UnimplementedError(
             "ConvertToPHWC4Half: Unsupported channels per planes count.");
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/convert.h b/tensorflow/lite/delegates/gpu/common/convert.h
index 30a0a5f3183..3aba9c913c5 100644
--- a/tensorflow/lite/delegates/gpu/common/convert.h
+++ b/tensorflow/lite/delegates/gpu/common/convert.h
@@ -29,19 +29,19 @@ namespace gpu {
 
 // PHWC4 layout is where channels are grouped by 4 in a row and P stands for
 // a plane that was derived by dividing channels by 4.
-Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
-                      absl::Span<float> out);
-Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
-                          absl::Span<HalfBits> out);
+absl::Status ConvertToPHWC4(absl::Span<const float> in, const BHWC& shape,
+                            absl::Span<float> out);
+absl::Status ConvertToPHWC4Half(absl::Span<const float> in, const BHWC& shape,
+                                absl::Span<HalfBits> out);
 
 // @return number of elements when shape is converted into PHWC4.
 uint32_t GetElementsSizeForPHWC4(const BHWC& shape);
 
 // Operation is opposite to ConvertToPHWC4.
-Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
-                        absl::Span<float> out);
-Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in, const BHWC& shape,
-                            absl::Span<float> out);
+absl::Status ConvertFromPHWC4(absl::Span<const float> in, const BHWC& shape,
+                              absl::Span<float> out);
+absl::Status ConvertFromPHWC4Half(absl::Span<const HalfBits> in,
+                                  const BHWC& shape, absl::Span<float> out);
 
 // Convenience wrapper around a method above.
 std::vector<float> ConvertToPHWC4(
@@ -53,8 +53,8 @@ uint32_t GetElementsSizeForPIOHW4(const OHWI& shape);
 
 // PIOHW4 layout re-arranges weights in groups by 4, where outer dimension is
 // P which is OxI/4.
-Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
-                       absl::Span<float> out);
+absl::Status ConvertToPIOHW4(absl::Span<const float> in, const OHWI& shape,
+                             absl::Span<float> out);
 
 // Convenience wrapper around a method above.
 std::vector<float> ConvertToPIOHW4(
@@ -79,8 +79,8 @@ uint3 Get3DSizeForPHWO4I4(const OHWI& shape);
 uint32_t GetElementsSizeForPHWO4I4(const IHWO& shape);
 
 // Layout is Po,H,W,OI4x4.
-Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
-                        absl::Span<float> out);
+absl::Status ConvertToPHWO4I4(absl::Span<const float> in, const IHWO& shape,
+                              absl::Span<float> out);
 
 // Convenience wrapper around a method above.
 std::vector<float> ConvertToPHWO4I4(
diff --git a/tensorflow/lite/delegates/gpu/common/custom_parsers.cc b/tensorflow/lite/delegates/gpu/common/custom_parsers.cc
index d46a9247c81..e43cba05525 100644
--- a/tensorflow/lite/delegates/gpu/common/custom_parsers.cc
+++ b/tensorflow/lite/delegates/gpu/common/custom_parsers.cc
@@ -25,10 +25,10 @@ limitations under the License.
 namespace tflite {
 namespace gpu {
 
-Status ParseCustomAttributes(absl::string_view op_name, const void* data,
-                             uint32_t data_size, absl::any* attr,
-                             BHWC* output_shape) {
-  return UnimplementedError(absl::StrCat(
+absl::Status ParseCustomAttributes(absl::string_view op_name, const void* data,
+                                   uint32_t data_size, absl::any* attr,
+                                   BHWC* output_shape) {
+  return absl::UnimplementedError(absl::StrCat(
       "Attributes parsing is not enabled for ", op_name, " operation"));
 }
 
diff --git a/tensorflow/lite/delegates/gpu/common/custom_parsers.h b/tensorflow/lite/delegates/gpu/common/custom_parsers.h
index e9a191d46cb..707087e6fdb 100644
--- a/tensorflow/lite/delegates/gpu/common/custom_parsers.h
+++ b/tensorflow/lite/delegates/gpu/common/custom_parsers.h
@@ -27,9 +27,9 @@ namespace gpu {
 
 // Matches the custom operation by the string name and parses attributes stored
 // as flexbuffers.
-Status ParseCustomAttributes(absl::string_view op_name, const void* data,
-                             uint32_t data_size, absl::any* attr,
-                             BHWC* output_shape);
+absl::Status ParseCustomAttributes(absl::string_view op_name, const void* data,
+                                   uint32_t data_size, absl::any* attr,
+                                   BHWC* output_shape);
 
 }  // namespace gpu
 }  // namespace tflite
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management.cc b/tensorflow/lite/delegates/gpu/common/memory_management.cc
index 5cfd26b1832..d7e6a060eb2 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management.cc
+++ b/tensorflow/lite/delegates/gpu/common/memory_management.cc
@@ -55,8 +55,9 @@ OffsetsAssignment ObjectsToOffsets(
   return result;
 }
 
-Status BestGreedy(const std::vector<TensorUsageRecord<size_t>>& usage_records,
-                  ObjectsAssignment<size_t>* assignment) {
+absl::Status BestGreedy(
+    const std::vector<TensorUsageRecord<size_t>>& usage_records,
+    ObjectsAssignment<size_t>* assignment) {
   RETURN_IF_ERROR(
       GreedyBySizeDistPriorityAssignment(usage_records, assignment));
   ObjectsAssignment<size_t> assignment_by_breadth;
@@ -64,11 +65,11 @@ Status BestGreedy(const std::vector<TensorUsageRecord<size_t>>& usage_records,
       TotalSize(assignment_by_breadth) < TotalSize(*assignment)) {
     std::swap(*assignment, assignment_by_breadth);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment,
     const UsageGraph* reallocation_graph) {
@@ -89,14 +90,14 @@ Status AssignObjectsToTensors(
     case MemoryStrategy::MINCOSTFLOW:
       return MinCostFlowAssignment(usage_records, assignment);
     default:
-      return InternalError(
+      return absl::InternalError(
           "MemoryStrategy is not supported with current tensor size type.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<BHWC>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment,
     const UsageGraph* reallocation_graph) {
@@ -106,14 +107,14 @@ Status AssignObjectsToTensors(
     case MemoryStrategy::EQUALITY:
       return EqualityAssignmentWithHash(usage_records, assignment);
     default:
-      return InternalError(
+      return absl::InternalError(
           "MemoryStrategy is not supported with current tensor size type.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<uint2>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment,
     const UsageGraph* reallocation_graph) {
@@ -125,14 +126,14 @@ Status AssignObjectsToTensors(
     case MemoryStrategy::GREEDY_IN_ORDER:
       return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
     default:
-      return InternalError(
+      return absl::InternalError(
           "MemoryStrategy is not supported with current tensor size type.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<uint3>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment,
     const UsageGraph* reallocation_graph) {
@@ -144,13 +145,13 @@ Status AssignObjectsToTensors(
     case MemoryStrategy::GREEDY_IN_ORDER:
       return GreedyInOrderAssignmentMultidimensional(usage_records, assignment);
     default:
-      return InternalError(
+      return absl::InternalError(
           "MemoryStrategy is not supported with current tensor size type.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status AssignOffsetsToTensors(
+absl::Status AssignOffsetsToTensors(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     const MemoryStrategy& strategy, OffsetsAssignment* assignment,
     const UsageGraph* reallocation_graph) {
@@ -161,7 +162,7 @@ Status AssignOffsetsToTensors(
   RETURN_IF_ERROR(AssignObjectsToTensors(
       usage_records, strategy, &objects_assignment, reallocation_graph));
   *assignment = ObjectsToOffsets(objects_assignment);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management.h b/tensorflow/lite/delegates/gpu/common/memory_management.h
index e45c361d955..7df4947ee3d 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management.h
+++ b/tensorflow/lite/delegates/gpu/common/memory_management.h
@@ -79,8 +79,9 @@ enum class MemoryStrategy {
 
 // Chooses greedy algorithm with the lowest memory consumption for given usage
 // records and returns corresponding shared objects assignment.
-Status BestGreedy(const std::vector<TensorUsageRecord<size_t>>& usage_records,
-                  ObjectsAssignment<size_t>* assignment);
+absl::Status BestGreedy(
+    const std::vector<TensorUsageRecord<size_t>>& usage_records,
+    ObjectsAssignment<size_t>* assignment);
 
 // Calculates the assignment of shared objects to given tensors, including
 // objects' sizes. Below there are specializations for different types, that
@@ -90,7 +91,7 @@ Status BestGreedy(const std::vector<TensorUsageRecord<size_t>>& usage_records,
 // can be larger. Currently only GREEDY_IN_ORDER strategy can use this
 // reallocation_graph.
 template <typename TensorSizeT>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<TensorSizeT>* assignment,
     const UsageGraph* reallocation_graph = nullptr) {
@@ -100,39 +101,39 @@ Status AssignObjectsToTensors(
     case MemoryStrategy::EQUALITY:
       return EqualityAssignment(usage_records, assignment);
     default:
-      return InternalError(
+      return absl::InternalError(
           "MemoryStrategy is not supported with current tensor size type.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment,
     const UsageGraph* reallocation_graph);
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<BHWC>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment,
     const UsageGraph* reallocation_graph);
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<uint2>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment,
     const UsageGraph* reallocation_graph);
 
 template <>
-Status AssignObjectsToTensors(
+absl::Status AssignObjectsToTensors(
     const std::vector<TensorUsageRecord<uint3>>& usage_records,
     MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment,
     const UsageGraph* reallocation_graph);
 
 // Calculates the assignment of tensors to offsets, considering those tensors
 // are going to be allocated in one continuous memory block.
-Status AssignOffsetsToTensors(
+absl::Status AssignOffsetsToTensors(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     const MemoryStrategy& strategy, OffsetsAssignment* assignment,
     const UsageGraph* reallocation_graph = nullptr);
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h b/tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h
index 0955393e00c..fdccce5159f 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h
@@ -29,7 +29,7 @@ namespace gpu {
 
 // Fast version of Equality Assignments for hashable types.
 template <typename TensorSizeT>
-Status EqualityAssignmentWithHash(
+absl::Status EqualityAssignmentWithHash(
     const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
     ObjectsAssignment<TensorSizeT>* assignment) {
   size_t num_records = usage_records.size();
@@ -69,12 +69,12 @@ Status EqualityAssignmentWithHash(
           {usage_records[i].last_task, assignment->object_ids[i]});
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // Slower version of Equality Assignments for unhashable types.
 template <typename TensorSizeT>
-Status EqualityAssignment(
+absl::Status EqualityAssignment(
     const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
     ObjectsAssignment<TensorSizeT>* assignment) {
   size_t num_records = usage_records.size();
@@ -109,7 +109,7 @@ Status EqualityAssignment(
       dealloc_task[best_obj] = usage_records[i].last_task;
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.cc b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.cc
index 5d0f6b620b0..2c138b4c14c 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.cc
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.cc
@@ -46,7 +46,7 @@ struct TaskBreadthWithId {
 
 }  // namespace
 
-Status GreedyByBreadthAssignment(
+absl::Status GreedyByBreadthAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     ObjectsAssignment<size_t>* assignment) {
   std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records);
@@ -133,10 +133,10 @@ Status GreedyByBreadthAssignment(
   // In the end all tensors must be assigned to some objects.
   for (const auto& obj_id : assignment->object_ids) {
     if (obj_id == kNotAssigned) {
-      return InternalError("Error while calculating the assignment.");
+      return absl::InternalError("Error while calculating the assignment.");
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h
index c139ba0fe0f..47035229920 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h
@@ -44,7 +44,7 @@ namespace gpu {
 // tensor’s size, assign current tensor to the smallest of them;
 // - If there are suitable objects only with size less than current tensor’s
 // size, assign current tensor to the largest of them and increase its size.
-Status GreedyByBreadthAssignment(
+absl::Status GreedyByBreadthAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     ObjectsAssignment<size_t>* assignment);
 
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.cc b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.cc
index bf56c6d92dd..76309ce8f1b 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.cc
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.cc
@@ -60,7 +60,7 @@ struct SizeDistPriorityInfo {
 
 }  // namespace
 
-Status GreedyBySizeAssignment(
+absl::Status GreedyBySizeAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     OffsetsAssignment* assignment) {
   const size_t num_tensors = usage_records.size();
@@ -104,7 +104,7 @@ Status GreedyBySizeAssignment(
           prev_offset, cur_offset + usage_records[allocated_id].tensor_size);
     }
     if (assignment->total_size < prev_offset) {
-      return InternalError("Total size is wrong.");
+      return absl::InternalError("Total size is wrong.");
     }
 
     // If no suitable gap found, we should allocate current tensor after the
@@ -125,7 +125,7 @@ Status GreedyBySizeAssignment(
     assignment->total_size =
         std::max(assignment->total_size, best_offset + rec->tensor_size);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // Assigns given tensors to shared objects, using the following greedy
@@ -152,7 +152,7 @@ Status GreedyBySizeAssignment(
 // object with size equal to current tensor's size;
 // - Modify SizeDistPriority records of tensors, that haven't been assigned yet,
 // to reflect distance changes after that assignment.
-Status GreedyBySizeDistPriorityAssignment(
+absl::Status GreedyBySizeDistPriorityAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     ObjectsAssignment<size_t>* assignment) {
   std::vector<size_t> positional_max =
@@ -175,7 +175,7 @@ Status GreedyBySizeDistPriorityAssignment(
       ++pos;
     }
     if (pos == 0) {
-      return InternalError("Variable pos must be positive.");
+      return absl::InternalError("Variable pos must be positive.");
     }
     priority_info[rec_id].position = pos - 1;
   }
@@ -198,7 +198,7 @@ Status GreedyBySizeDistPriorityAssignment(
     if (best_info_id == kNotAssigned) {
       // During each iteration we assign exactly one of the tensors, so some not
       // yet assigned tensors must exist.
-      return InternalError("Invalid value for variable best_info_id.");
+      return absl::InternalError("Invalid value for variable best_info_id.");
     }
 
     size_t best_rec_id = priority_info[best_info_id].tensor_usage_id;
@@ -271,7 +271,7 @@ Status GreedyBySizeDistPriorityAssignment(
       }
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h
index fb875fd0920..b0ad9d18911 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h
@@ -38,7 +38,7 @@ namespace gpu {
 // gap. Otherwise we can allocate it after the rightmost tensor, which usage
 // interval intersects with usage interval of current tensor. So we assign
 // corresponding offset to current tensor and the tensor becomes assigned.
-Status GreedyBySizeAssignment(
+absl::Status GreedyBySizeAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     OffsetsAssignment* assignment);
 
@@ -66,7 +66,7 @@ Status GreedyBySizeAssignment(
 // object with size equal to current tensor's size;
 // - Modify SizeDistPriority records of tensors, that haven't been assigned yet,
 // to reflect distance changes after that assignment.
-Status GreedyBySizeDistPriorityAssignment(
+absl::Status GreedyBySizeDistPriorityAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     ObjectsAssignment<size_t>* assignment);
 
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h
index b454920ffcb..8c3719e4a8b 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h
@@ -46,7 +46,7 @@ namespace gpu {
 //
 //   3. Shared object size may increase when tensor requests larger size.
 template <typename TensorSizeT>
-Status GreedyInOrderAssignment(
+absl::Status GreedyInOrderAssignment(
     const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
     ObjectsAssignment<TensorSizeT>* assignment,
     const UsageGraph* reallocation_graph = nullptr) {
@@ -111,7 +111,7 @@ Status GreedyInOrderAssignment(
       }
       // best_it can't be equal to pool.end(), because pool is not empty
       if (best_it == pool.end()) {
-        return InternalError(
+        return absl::InternalError(
             "No shared object is found in non-empty pool in "
             "GreedyInOrderAssignment.");
       }
@@ -135,14 +135,14 @@ Status GreedyInOrderAssignment(
           {usage_records[i].last_task, assignment->object_ids[i]});
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // The same algorithm as above, but for multidimensional case. The only
 // difference is that shared object dimensions can't be increased to be reused
 // for tensor, that is larger (at least by one dimension).
 template <typename TensorSizeT>
-Status GreedyInOrderAssignmentMultidimensional(
+absl::Status GreedyInOrderAssignmentMultidimensional(
     const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
     ObjectsAssignment<TensorSizeT>* assignment) {
   size_t num_records = usage_records.size();
@@ -198,7 +198,7 @@ Status GreedyInOrderAssignmentMultidimensional(
           {usage_records[i].last_task, assignment->object_ids[i]});
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.cc b/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.cc
index ab15af88429..059c23fab33 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.cc
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.cc
@@ -211,14 +211,14 @@ class MinCostFlowSolver {
 // auxiliary flow graph, find minimum-cost flow in it and calculates the
 // assignment of shared objects to tensors, using the result of the flow
 // algorithm.
-Status MinCostFlowAssignment(
+absl::Status MinCostFlowAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     ObjectsAssignment<size_t>* assignment) {
   MinCostFlowSolver solver;
   solver.Build(usage_records);
   solver.Solve();
   solver.CalculateAssignment(assignment);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h b/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h
index 7e45f83c79e..1284c12c5c2 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h
@@ -30,7 +30,7 @@ namespace gpu {
 // auxiliary flow graph, find minimum-cost flow in it and calculates the
 // assignment of shared objects to tensors, using the result of the flow
 // algorithm.
-Status MinCostFlowAssignment(
+absl::Status MinCostFlowAssignment(
     const std::vector<TensorUsageRecord<size_t>>& usage_records,
     ObjectsAssignment<size_t>* assignment);
 
diff --git a/tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h b/tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h
index 94cd41ed9a5..8a00c67d853 100644
--- a/tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h
+++ b/tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h
@@ -30,7 +30,7 @@ namespace gpu {
 // The problem of memory management is NP-complete. This implements a
 // naive algorithm that assigns each tensor to a separate object in memory.
 template <typename TensorSizeT>
-Status NaiveAssignment(
+absl::Status NaiveAssignment(
     const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
     ObjectsAssignment<TensorSizeT>* assignment) {
   assignment->object_sizes.resize(usage_records.size());
@@ -40,7 +40,7 @@ Status NaiveAssignment(
     assignment->object_ids[i] = i;
     assignment->object_sizes[i] = record.tensor_size;
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/model.h b/tensorflow/lite/delegates/gpu/common/model.h
index 6989584a24c..2e38bcc5f3f 100644
--- a/tensorflow/lite/delegates/gpu/common/model.h
+++ b/tensorflow/lite/delegates/gpu/common/model.h
@@ -136,33 +136,33 @@ class Graph {
   // for a value. If a value had another producer, it will reassign producer
   // appropriately. If a value didn't have a producer, it will be removed
   // from a graph's input.
-  virtual Status SetProducer(NodeId producer, ValueId value) = 0;
+  virtual absl::Status SetProducer(NodeId producer, ValueId value) = 0;
 
   // Removes a producer for the given value. Value becomes producer-less and
   // therefore becomes graph's input.
-  virtual Status RemoveProducer(ValueId value) = 0;
+  virtual absl::Status RemoveProducer(ValueId value) = 0;
 
   // Sets a consumer for the given value. There could be multiple consumers
   // for a value.
-  virtual Status AddConsumer(NodeId consumer, ValueId value) = 0;
+  virtual absl::Status AddConsumer(NodeId consumer, ValueId value) = 0;
 
   // Replace input value for given node.
-  virtual Status ReplaceInput(NodeId node, ValueId old_value,
-                              ValueId new_value) = 0;
+  virtual absl::Status ReplaceInput(NodeId node, ValueId old_value,
+                                    ValueId new_value) = 0;
 
   // Removes a consumer for the given value. If value does not have any
   // consumers it becomes graph's output.
-  virtual Status RemoveConsumer(NodeId consumer, ValueId value) = 0;
+  virtual absl::Status RemoveConsumer(NodeId consumer, ValueId value) = 0;
 
   // Removes node from this graph. For all input values this node will be
   // removed from consumers and for all output values a producer will be
   // removed.
-  virtual Status DeleteNode(NodeId id) = 0;
+  virtual absl::Status DeleteNode(NodeId id) = 0;
 
   // Removes value from this graph. It will be removed from inputs for all
   // dependent nodes. A node that was a producer of this value will loose its
   // output.
-  virtual Status DeleteValue(ValueId id) = 0;
+  virtual absl::Status DeleteValue(ValueId id) = 0;
 };
 
 // Implementation of a Graph interface. It keeps values and nodes referenced by
@@ -268,7 +268,7 @@ class Model : public Graph<TensorT> {
     return values_[id].consumers;
   }
 
-  Status SetProducer(NodeId producer, ValueId value) final {
+  absl::Status SetProducer(NodeId producer, ValueId value) final {
     ValueDef* v;
     RETURN_IF_ERROR(LookupValue(value, &v));
     Value<TensorT>* value_ptr = v->value.get();
@@ -278,12 +278,13 @@ class Model : public Graph<TensorT> {
 
     // check if this value has the same producer already
     if (node_ptr == v->producer) {
-      return InvalidArgumentError("Node is already a producer of the value");
+      return absl::InvalidArgumentError(
+          "Node is already a producer of the value");
     }
 
     // Check if the node is a consumer of this value.
     if (IsInput(producer, value)) {
-      return InvalidArgumentError("Node is a consumer of the value");
+      return absl::InvalidArgumentError("Node is a consumer of the value");
     }
     // TODO(akulik): detect circular dependency?
 
@@ -293,22 +294,23 @@ class Model : public Graph<TensorT> {
     }
     v->producer = node_ptr;
     n->outputs.push_back(value_ptr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status RemoveProducer(ValueId value) final {
+  absl::Status RemoveProducer(ValueId value) final {
     ValueDef* v;
     RETURN_IF_ERROR(LookupValue(value, &v));
     Value<TensorT>* value_ptr = v->value.get();
     if (v->producer == nullptr) {
-      return InvalidArgumentError("Value does not have a producer");
+      return absl::InvalidArgumentError("Value does not have a producer");
     }
     Erase(&nodes_[v->producer->id].outputs, value_ptr);
     v->producer = nullptr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status ReplaceInput(NodeId node, ValueId old_value, ValueId new_value) final {
+  absl::Status ReplaceInput(NodeId node, ValueId old_value,
+                            ValueId new_value) final {
     ValueDef* v_old;
     RETURN_IF_ERROR(LookupValue(old_value, &v_old));
     Value<TensorT>* value_old_ptr = v_old->value.get();
@@ -321,17 +323,17 @@ class Model : public Graph<TensorT> {
 
     // Check if the node is a consumer of old_value.
     if (!IsInput(node, old_value)) {
-      return InvalidArgumentError("old_value must be input of node.");
+      return absl::InvalidArgumentError("old_value must be input of node.");
     }
 
     // Check if the node is not a consumer of new_value.
     if (IsInput(node, new_value)) {
-      return InvalidArgumentError("new_value can not be input of node.");
+      return absl::InvalidArgumentError("new_value can not be input of node.");
     }
 
     // Check if this value has the same producer already
     if (node_ptr == v_new->producer) {
-      return InvalidArgumentError("new_value can not be output of node.");
+      return absl::InvalidArgumentError("new_value can not be output of node.");
     }
 
     for (int i = 0; i < n->inputs.size(); ++i) {
@@ -342,10 +344,10 @@ class Model : public Graph<TensorT> {
     }
     v_new->consumers.push_back(node_ptr);
     Erase(&v_old->consumers, node_ptr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status AddConsumer(NodeId consumer, ValueId value) final {
+  absl::Status AddConsumer(NodeId consumer, ValueId value) final {
     ValueDef* v;
     RETURN_IF_ERROR(LookupValue(value, &v));
     Value<TensorT>* value_ptr = v->value.get();
@@ -355,20 +357,21 @@ class Model : public Graph<TensorT> {
 
     // check if this value has the same producer already
     if (node_ptr == v->producer) {
-      return InvalidArgumentError("Node is a producer of the value");
+      return absl::InvalidArgumentError("Node is a producer of the value");
     }
 
     // check if this value has the same consumer already
     if (IsInput(consumer, value)) {
-      return InvalidArgumentError("Node is already a consumer of the value");
+      return absl::InvalidArgumentError(
+          "Node is already a consumer of the value");
     }
 
     n->inputs.push_back(value_ptr);
     v->consumers.push_back(node_ptr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status RemoveConsumer(NodeId consumer, ValueId value) final {
+  absl::Status RemoveConsumer(NodeId consumer, ValueId value) final {
     ValueDef* v;
     RETURN_IF_ERROR(LookupValue(value, &v));
     Value<TensorT>* value_ptr = v->value.get();
@@ -376,14 +379,14 @@ class Model : public Graph<TensorT> {
     RETURN_IF_ERROR(LookupNode(consumer, &n));
     Node* node_ptr = n->node.get();
     if (!IsInput(consumer, value)) {
-      return InvalidArgumentError("Node is not a consumer of the value");
+      return absl::InvalidArgumentError("Node is not a consumer of the value");
     }
     Erase(&n->inputs, value_ptr);
     Erase(&v->consumers, node_ptr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status DeleteNode(NodeId id) final {
+  absl::Status DeleteNode(NodeId id) final {
     NodeDef* n;
     RETURN_IF_ERROR(LookupNode(id, &n));
     Node* node_ptr = n->node.get();
@@ -396,10 +399,10 @@ class Model : public Graph<TensorT> {
     n->inputs.clear();
     n->outputs.clear();
     n->node.reset();
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status DeleteValue(ValueId id) final {
+  absl::Status DeleteValue(ValueId id) final {
     ValueDef* v;
     RETURN_IF_ERROR(LookupValue(id, &v));
     Value<TensorT>* value_ptr = v->value.get();
@@ -414,10 +417,10 @@ class Model : public Graph<TensorT> {
     v->producer = nullptr;
     v->consumers.clear();
     v->value.reset();
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status MakeExactCopy(Model<TensorT>* model) const {
+  absl::Status MakeExactCopy(Model<TensorT>* model) const {
     model->nodes_.clear();
     model->values_.clear();
     model->name_ = name_;
@@ -440,7 +443,7 @@ class Model : public Graph<TensorT> {
         }
       }
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -475,29 +478,29 @@ class Model : public Graph<TensorT> {
   }
 
   // @return non-nullptr NodeDef that has valid Node or an error
-  Status LookupNode(NodeId id, NodeDef** node_def) {
+  absl::Status LookupNode(NodeId id, NodeDef** node_def) {
     if (id >= nodes_.size()) {
-      return OutOfRangeError("NodeId is out of range");
+      return absl::OutOfRangeError("NodeId is out of range");
     }
     auto& n = nodes_[id];
     if (!n.node) {
-      return OutOfRangeError("Node is already deleted");
+      return absl::OutOfRangeError("Node is already deleted");
     }
     *node_def = &n;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   // @return non-nullptr ValueDef that has valid Value or an error
-  Status LookupValue(ValueId id, ValueDef** value_def) {
+  absl::Status LookupValue(ValueId id, ValueDef** value_def) {
     if (id >= values_.size()) {
-      return OutOfRangeError("ValueId is out of range");
+      return absl::OutOfRangeError("ValueId is out of range");
     }
     auto& v = values_[id];
     if (!v.value) {
-      return OutOfRangeError("Value is already deleted");
+      return absl::OutOfRangeError("Value is already deleted");
     }
     *value_def = &v;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   template <typename Pred>
@@ -537,14 +540,14 @@ class Model : public Graph<TensorT> {
 // outputs that are consumed only by to_keep. In such case to_keep inherits all
 // to_remove inputs.
 template <typename TensorT>
-Status RemovePrecedingNode(Graph<TensorT>* graph, const Node* to_remove,
-                           const Node* to_keep) {
+absl::Status RemovePrecedingNode(Graph<TensorT>* graph, const Node* to_remove,
+                                 const Node* to_keep) {
   // Make sure all outputs from to_remove are consumed by to_keep.
   for (auto output : graph->FindOutputs(to_remove->id)) {
     auto consumers = graph->FindConsumers(output->id);
     if (consumers.size() > 1 ||
         (consumers.size() == 1 && consumers[0] != to_keep)) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Output from to_remove node has other consumers");
     }
   }
@@ -562,13 +565,13 @@ Status RemovePrecedingNode(Graph<TensorT>* graph, const Node* to_remove,
 // Removes to_remove node that follows to_keep node only if to_remove has inputs
 // that are produced by to_keep. to_keep inherits all to_remove inputs.
 template <typename TensorT>
-Status RemoveFollowingNode(Graph<TensorT>* graph, const Node* to_remove,
-                           const Node* to_keep) {
+absl::Status RemoveFollowingNode(Graph<TensorT>* graph, const Node* to_remove,
+                                 const Node* to_keep) {
   // Make sure all inputs to to_remove are produced by to_keep.
   for (auto input : graph->FindInputs(to_remove->id)) {
     Node* producer = graph->FindProducer(input->id);
     if (producer->id != to_keep->id) {
-      return InvalidArgumentError("To_remove node has other inputs");
+      return absl::InvalidArgumentError("To_remove node has other inputs");
     }
   }
 
@@ -584,12 +587,12 @@ Status RemoveFollowingNode(Graph<TensorT>* graph, const Node* to_remove,
 // Removes to_remove node.
 // Requires that node has one input and one output;
 template <typename TensorT>
-Status RemoveOneInputOneOutputNode(Graph<TensorT>* graph,
-                                   const Node* to_remove) {
+absl::Status RemoveOneInputOneOutputNode(Graph<TensorT>* graph,
+                                         const Node* to_remove) {
   auto inputs = graph->FindInputs(to_remove->id);
   auto outputs = graph->FindOutputs(to_remove->id);
   if (inputs.size() != 1 || outputs.size() != 1) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "To_remove node must have 1 input and 1 output");
   }
   auto input_id = inputs[0]->id;
@@ -604,26 +607,26 @@ Status RemoveOneInputOneOutputNode(Graph<TensorT>* graph,
   if (!producer && consumers.empty()) {
     RETURN_IF_ERROR(graph->DeleteValue(input_id));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename TensorT>
-Status AddOutput(Graph<TensorT>* graph, const Node* from_node,
-                 Value<TensorT>** output) {
+absl::Status AddOutput(Graph<TensorT>* graph, const Node* from_node,
+                       Value<TensorT>** output) {
   auto link = graph->NewValue();
   RETURN_IF_ERROR(graph->SetProducer(from_node->id, link->id));
   *output = link;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename TensorT>
-Status ConnectTwoNodes(Graph<TensorT>* graph, const Node* from_node,
-                       const Node* to_node, Value<TensorT>** output) {
+absl::Status ConnectTwoNodes(Graph<TensorT>* graph, const Node* from_node,
+                             const Node* to_node, Value<TensorT>** output) {
   Value<TensorT>* link;
   RETURN_IF_ERROR(AddOutput(graph, from_node, &link));
   RETURN_IF_ERROR(graph->AddConsumer(to_node->id, link->id));
   *output = link;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 using GraphFloat32 = Model<TensorRef<BHWC>>;
diff --git a/tensorflow/lite/delegates/gpu/common/model_builder.cc b/tensorflow/lite/delegates/gpu/common/model_builder.cc
index b37c3542413..94899efe91e 100644
--- a/tensorflow/lite/delegates/gpu/common/model_builder.cc
+++ b/tensorflow/lite/delegates/gpu/common/model_builder.cc
@@ -65,9 +65,9 @@ namespace {
 //   node(output)
 // will turn into:
 //   node(copy(output)) <- passthrough_node(output)
-Status NewPassthroughNode(GraphFloat32* graph, Node* node,
-                          const Value<TensorRef<BHWC>>* output,
-                          Node** passthru_node) {
+absl::Status NewPassthroughNode(GraphFloat32* graph, Node* node,
+                                const Value<TensorRef<BHWC>>* output,
+                                Node** passthru_node) {
   *passthru_node = graph->NewNode();
   // Make copies for every output in the original node.
   RETURN_IF_ERROR(graph->SetProducer((*passthru_node)->id, output->id));
@@ -76,18 +76,18 @@ Status NewPassthroughNode(GraphFloat32* graph, Node* node,
   RETURN_IF_ERROR(graph->AddConsumer((*passthru_node)->id, copy_output->id));
   copy_output->tensor = output->tensor;
   copy_output->tensor.ref = -1;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename T>
-Status CreateVectorCopyData(const TfLiteTensor& tensor, T* tensor_data) {
+absl::Status CreateVectorCopyData(const TfLiteTensor& tensor, T* tensor_data) {
   if (tensor.bytes % sizeof(T) != 0) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         absl::StrCat("Input data size ", tensor.bytes,
                      " is not aligned to expected type: ", sizeof(T)));
   }
   std::memcpy(tensor_data, tensor.data.uint8, tensor.bytes);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src,
@@ -98,8 +98,8 @@ void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src,
 }
 
 template <>
-Status CreateVectorCopyData<float>(const TfLiteTensor& tensor,
-                                   float* tensor_data) {
+absl::Status CreateVectorCopyData<float>(const TfLiteTensor& tensor,
+                                         float* tensor_data) {
   switch (tensor.type) {
     case kTfLiteFloat32:
       std::memcpy(tensor_data, tensor.data.f, tensor.bytes);
@@ -110,104 +110,97 @@ Status CreateVectorCopyData<float>(const TfLiteTensor& tensor,
           reinterpret_cast<uint16_t const*>(tensor.data.f16), tensor_data);
       break;
     default:
-      return InvalidArgumentError("Unsupported data type for float32 tensor");
+      return absl::InvalidArgumentError(
+          "Unsupported data type for float32 tensor");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename ShapeT>
-Status SetAllDimensions(const TfLiteIntArray* dimensions, ShapeT* shape);
+absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, ShapeT* shape);
 
 template <>
-Status SetAllDimensions<Scalar>(const TfLiteIntArray* dimensions,
-                                Scalar* shape) {
+absl::Status SetAllDimensions<Scalar>(const TfLiteIntArray* dimensions,
+                                      Scalar* shape) {
   if (dimensions->size < 0) {
-    return InvalidArgumentError("Invalid Scalar dimensions");
+    return absl::InvalidArgumentError("Invalid Scalar dimensions");
   }
   for (int i = 0; i < dimensions->size; ++i) {
     if (dimensions->data[i] != 1) {
-      return InvalidArgumentError("Dimension can not be reduced to scalar.");
+      return absl::InvalidArgumentError(
+          "Dimension can not be reduced to scalar.");
     }
   }
   shape->v = 1;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status SetAllDimensions<Linear>(const TfLiteIntArray* dimensions,
-                                Linear* shape) {
+absl::Status SetAllDimensions<Linear>(const TfLiteIntArray* dimensions,
+                                      Linear* shape) {
   if (dimensions->size <= 0) {
-    return InvalidArgumentError("Dimension is empty.");
+    return absl::InvalidArgumentError("Dimension is empty.");
   }
   for (int i = 0; i < dimensions->size - 1; ++i) {
     if (dimensions->data[i] != 1) {
-      return InvalidArgumentError("Dimension can not be reduced to linear.");
+      return absl::InvalidArgumentError(
+          "Dimension can not be reduced to linear.");
     }
   }
   shape->v = dimensions->data[dimensions->size - 1];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status SetAllDimensions<HWC>(const TfLiteIntArray* dimensions, HWC* shape) {
+absl::Status SetAllDimensions<HWC>(const TfLiteIntArray* dimensions,
+                                   HWC* shape) {
   if (dimensions->size != 4) {
-    return InvalidArgumentError("Dimensions are not HWC");
+    return absl::InvalidArgumentError("Dimensions are not HWC");
   }
   if (dimensions->data[0] != 1) {
-    return UnimplementedError("Batch size is not equal to 1.");
+    return absl::UnimplementedError("Batch size is not equal to 1.");
   }
   shape->h = dimensions->data[1];
   shape->w = dimensions->data[2];
   shape->c = dimensions->data[3];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status SetAllDimensions<HW>(const TfLiteIntArray* dimensions, HW* shape) {
+absl::Status SetAllDimensions<HW>(const TfLiteIntArray* dimensions, HW* shape) {
   if (dimensions->size != 2) {
-    return InvalidArgumentError("Dimensions are not HW");
+    return absl::InvalidArgumentError("Dimensions are not HW");
   }
   shape->h = dimensions->data[0];
   shape->w = dimensions->data[1];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status SetAllDimensions<OHWI>(const TfLiteIntArray* dimensions, OHWI* shape) {
+absl::Status SetAllDimensions<OHWI>(const TfLiteIntArray* dimensions,
+                                    OHWI* shape) {
   if (dimensions->size != 4) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         absl::StrCat("Dimensions are not OHWI: ", dimensions->size));
   }
   shape->o = dimensions->data[0];
   shape->h = dimensions->data[1];
   shape->w = dimensions->data[2];
   shape->i = dimensions->data[3];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <>
-Status SetAllDimensions<IHWO>(const TfLiteIntArray* dimensions, IHWO* shape) {
+absl::Status SetAllDimensions<BHWC>(const TfLiteIntArray* dimensions,
+                                    BHWC* shape) {
   if (dimensions->size != 4) {
-    return InvalidArgumentError(
-        absl::StrCat("Dimensions are not IHWO: ", dimensions->size));
-  }
-  shape->i = dimensions->data[0];
-  shape->h = dimensions->data[1];
-  shape->w = dimensions->data[2];
-  shape->o = dimensions->data[3];
-  return OkStatus();
-}
-
-template <>
-Status SetAllDimensions<BHWC>(const TfLiteIntArray* dimensions, BHWC* shape) {
-  if (dimensions->size != 4) {
-    return InvalidArgumentError("Dimensions are not BHWC");
+    return absl::InvalidArgumentError("Dimensions are not BHWC");
   }
   shape->b = dimensions->data[0];
   shape->h = dimensions->data[1];
   shape->w = dimensions->data[2];
   shape->c = dimensions->data[3];
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 DataType ToDataType(TfLiteType type) {
@@ -253,46 +246,46 @@ int GetNumberOfRuntimeOutputsForNode(const TfLiteContext* context,
   return number_of_runtime_outputs;
 }
 
-Status CheckTensorIsAvailable(const TfLiteContext* context,
-                              const TfLiteNode* tflite_node, int idx) {
+absl::Status CheckTensorIsAvailable(const TfLiteContext* context,
+                                    const TfLiteNode* tflite_node, int idx) {
   // If tensor id is in range, it's guaranteed that it'll be available.
   if (idx >= tflite_node->inputs->size) {
-    return OutOfRangeError(
+    return absl::OutOfRangeError(
         absl::StrFormat("Requested index goes beyond array size (%d vs %d).",
                         idx, tflite_node->inputs->data[idx]));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckInputsOutputs(const TfLiteContext* context,
-                          const TfLiteNode* tflite_node, int runtime_inputs,
-                          int outputs) {
+absl::Status CheckInputsOutputs(const TfLiteContext* context,
+                                const TfLiteNode* tflite_node,
+                                int runtime_inputs, int outputs) {
   int runtime_inputs_from_model =
       GetNumberOfRuntimeInputsForNode(context, tflite_node);
   if (runtime_inputs_from_model != runtime_inputs) {
-    return InternalError(absl::StrFormat(
+    return absl::InternalError(absl::StrFormat(
         "Expected %d runtime input tensor(s), but node has %d runtime "
         "input(s).",
         runtime_inputs, runtime_inputs_from_model));
   }
   int runtime_outputs = GetNumberOfRuntimeOutputsForNode(context, tflite_node);
   if (runtime_outputs != outputs) {
-    return InternalError(
+    return absl::InternalError(
         absl::StrFormat("Expected %d output tensor(s), but node has %d "
                         "output(s).",
                         outputs, runtime_outputs));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckInputsConstsOutputs(const TfLiteContext* context,
-                                const TfLiteNode* tflite_node,
-                                int runtime_inputs, int const_inputs,
-                                int outputs) {
+absl::Status CheckInputsConstsOutputs(const TfLiteContext* context,
+                                      const TfLiteNode* tflite_node,
+                                      int runtime_inputs, int const_inputs,
+                                      int outputs) {
   int const_inputs_from_model =
       GetNumberOfConstInputsForNode(context, tflite_node);
   if (const_inputs_from_model != const_inputs) {
-    return InternalError(absl::StrFormat(
+    return absl::InternalError(absl::StrFormat(
         "Expected %d const input tensor(s), but node has %d const "
         "input(s).",
         const_inputs, const_inputs_from_model));
@@ -310,9 +303,9 @@ class ObjectReader {
         tflite_node_(tflite_node),
         tensor_to_value_(tensor_to_value) {}
 
-  Status ReadValue(uint32_t idx, Value<TensorRef<BHWC>>** value) const {
+  absl::Status ReadValue(uint32_t idx, Value<TensorRef<BHWC>>** value) const {
     if (idx >= tflite_node_->inputs->size) {
-      return OutOfRangeError(
+      return absl::OutOfRangeError(
           absl::StrCat("ReadValue: input tensor index: ", idx));
     }
     return ReadValueByTensorIdx(tflite_node_->inputs->data[idx], value);
@@ -322,21 +315,21 @@ class ObjectReader {
     return GetNumberOfRuntimeInputsForNode(context_, tflite_node_);
   }
 
-  Status GetTensorDims(uint32_t idx, TfLiteIntArray* dimensions) const {
+  absl::Status GetTensorDims(uint32_t idx, TfLiteIntArray* dimensions) const {
     if (idx >= tflite_node_->inputs->size) {
-      return OutOfRangeError(absl::StrCat("Input tensor index: ", idx));
+      return absl::OutOfRangeError(absl::StrCat("Input tensor index: ", idx));
     }
     const int tensor_idx = tflite_node_->inputs->data[idx];
     if (tensor_idx < 0 || tensor_idx > context_->tensors_size) {
-      return OutOfRangeError(absl::StrCat("Tensor index: ", tensor_idx));
+      return absl::OutOfRangeError(absl::StrCat("Tensor index: ", tensor_idx));
     }
     const TfLiteTensor& tflite_tensor = context_->tensors[tensor_idx];
     *dimensions = *tflite_tensor.dims;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   template <typename TensorT>
-  Status ReadTensor(uint32_t idx, TensorT* t) const {
+  absl::Status ReadTensor(uint32_t idx, TensorT* t) const {
     RETURN_IF_ERROR(CheckTensorIsAvailable(context_, tflite_node_, idx));
     const int32_t tensor_idx = tflite_node_->inputs->data[idx];
     const TfLiteTensor* tflite_tensor = context_->tensors + tensor_idx;
@@ -349,9 +342,9 @@ class ObjectReader {
     return SetAllDimensions(tflite_tensor->dims, &t->shape);
   }
 
-  Status AddOutput(const Node* node, int id) {
+  absl::Status AddOutput(const Node* node, int id) {
     if (tflite_node_->outputs->size <= id) {
-      return InvalidArgumentError(absl::StrCat(
+      return absl::InvalidArgumentError(absl::StrCat(
           "Data id ", id, " must be less than tflite node outputs size ",
           tflite_node_->outputs->size));
     }
@@ -359,32 +352,32 @@ class ObjectReader {
     Value<TensorRef<BHWC>>* value;
     RETURN_IF_ERROR(ReadValueByTensorIdx(output_tensor_idx, &value));
     RETURN_IF_ERROR(graph_->SetProducer(node->id, value->id));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status AddOutputs(const Node* node) {
+  absl::Status AddOutputs(const Node* node) {
     for (int i = 0; i < tflite_node_->outputs->size; ++i) {
       RETURN_IF_ERROR(AddOutput(node, i));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status AddInput(const Node* node, uint32_t idx) {
+  absl::Status AddInput(const Node* node, uint32_t idx) {
     Value<TensorRef<BHWC>>* input;
     RETURN_IF_ERROR(ReadValue(idx, &input));
     return graph_->AddConsumer(node->id, input->id);
   }
 
-  Status ReadValueByTensorIdx(uint32_t tensor_idx,
-                              Value<TensorRef<BHWC>>** value) const {
+  absl::Status ReadValueByTensorIdx(uint32_t tensor_idx,
+                                    Value<TensorRef<BHWC>>** value) const {
     if (tensor_idx >= tensor_to_value_->size()) {
-      return OutOfRangeError(
+      return absl::OutOfRangeError(
           absl::StrCat("ReadValue: input tensor index: ", tensor_idx));
     }
     if ((*tensor_to_value_)[tensor_idx] == nullptr) {
       const TfLiteTensor& tflite_tensor = context_->tensors[tensor_idx];
       if (tflite::IsConstantTensor(&tflite_tensor)) {
-        return NotFoundError(absl::StrCat(
+        return absl::NotFoundError(absl::StrCat(
             "ReadValue: value is a constant tensor: ", tensor_idx));
       }
       Value<TensorRef<BHWC>>* value = graph_->NewValue();
@@ -394,7 +387,7 @@ class ObjectReader {
       (*tensor_to_value_)[tensor_idx] = value;
     }
     *value = (*tensor_to_value_)[tensor_idx];
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TfLiteTensor* GetInputTensor(int index) const {
@@ -409,9 +402,9 @@ class ObjectReader {
                : nullptr;
   }
 
-  Status VerifyInputsConstsOutputs(const TfLiteNode* tflite_node,
-                                   int runtime_inputs, int const_inputs,
-                                   int outputs) {
+  absl::Status VerifyInputsConstsOutputs(const TfLiteNode* tflite_node,
+                                         int runtime_inputs, int const_inputs,
+                                         int outputs) {
     return CheckInputsConstsOutputs(context_, tflite_node, runtime_inputs,
                                     const_inputs, outputs);
   }
@@ -430,28 +423,30 @@ class TFLiteOperationParser {
 
   // Parses TFLite operation. This method allows expanding fused operations
   // into more than one node.
-  virtual Status Parse(const TfLiteNode* tflite_node,
-                       const TfLiteRegistration* registration,
-                       GraphFloat32* graph, ObjectReader* reader) = 0;
+  virtual absl::Status Parse(const TfLiteNode* tflite_node,
+                             const TfLiteRegistration* registration,
+                             GraphFloat32* graph, ObjectReader* reader) = 0;
 
   // Verifies whether passed tflite node may be built by GPU delegate or not.
-  virtual Status IsSupported(const TfLiteContext* context,
-                             const TfLiteNode* tflite_node,
-                             const TfLiteRegistration* registration) = 0;
+  virtual absl::Status IsSupported(const TfLiteContext* context,
+                                   const TfLiteNode* tflite_node,
+                                   const TfLiteRegistration* registration) = 0;
 };
 
-Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
+absl::Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
   switch (fused_activation) {
     case kTfLiteActNone:
     case kTfLiteActRelu:
     case kTfLiteActRelu1:
     case kTfLiteActRelu6:
     case kTfLiteActTanh:
-      return OkStatus();
+      return absl::OkStatus();
     case kTfLiteActSignBit:
-      return UnimplementedError("TfLiteFusedActivation.kTfLiteActSignBit");
+      return absl::UnimplementedError(
+          "TfLiteFusedActivation.kTfLiteActSignBit");
     case kTfLiteActSigmoid:
-      return UnimplementedError("TfLiteFusedActivation.kTfLiteActSigmoid");
+      return absl::UnimplementedError(
+          "TfLiteFusedActivation.kTfLiteActSigmoid");
 
       // Do not add default; we want compilation error rather than run-time
       // error.
@@ -461,15 +456,15 @@ Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
 // If there is fused activation present, then there will be another node created
 // that will have identical output as the given node. New operation node will
 // depend on the given node output.
-Status MaybeFuseActivation(TfLiteFusedActivation fused_activation,
-                           const std::vector<uint32_t>& output_indices,
-                           GraphFloat32* graph, Node* node) {
+absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation,
+                                 const std::vector<uint32_t>& output_indices,
+                                 GraphFloat32* graph, Node* node) {
   if (fused_activation == kTfLiteActNone) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   const auto& outputs = graph->FindOutputs(node->id);
   if (outputs.empty()) {
-    return InternalError("Empty outputs in fused node");
+    return absl::InternalError("Empty outputs in fused node");
   }
   switch (fused_activation) {
     case kTfLiteActRelu:
@@ -497,16 +492,16 @@ Status MaybeFuseActivation(TfLiteFusedActivation fused_activation,
       }
       break;
     default:
-      return NotFoundError(
+      return absl::NotFoundError(
           absl::StrCat("Unsupported fused activation: ", fused_activation));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status MaybeFuseActivationToTheSingleOutput(
+absl::Status MaybeFuseActivationToTheSingleOutput(
     TfLiteFusedActivation fused_activation, GraphFloat32* graph, Node* node) {
   if (graph->FindOutputs(node->id).size() != 1) {
-    return InternalError("Number of outputs exceeds 1");
+    return absl::InternalError("Number of outputs exceeds 1");
   }
   return MaybeFuseActivation(fused_activation, {0}, graph, node);
 }
@@ -524,9 +519,10 @@ void UpdatePadding(const TfLitePadding& padding, const BHWC& input_shape,
   }
 }
 
-Status GetFullyConnectedAttributes(int weights_tensor_id, int bias_tensor_id,
-                                   ObjectReader* reader,
-                                   FullyConnectedAttributes* attr) {
+absl::Status GetFullyConnectedAttributes(int weights_tensor_id,
+                                         int bias_tensor_id,
+                                         ObjectReader* reader,
+                                         FullyConnectedAttributes* attr) {
   Tensor<HW, DataType::FLOAT32> weights;
   RETURN_IF_ERROR(reader->ReadTensor(weights_tensor_id, &weights));
   attr->weights.data = std::move(weights.data);
@@ -537,100 +533,100 @@ Status GetFullyConnectedAttributes(int weights_tensor_id, int bias_tensor_id,
   attr->weights.shape.i = weights.shape.w;
   reader->ReadTensor(bias_tensor_id, &attr->bias).IgnoreError();  // optional
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename ParamsT>
-Status RetrieveBuiltinData(const TfLiteNode* tflite_node,
-                           ParamsT** tf_options) {
+absl::Status RetrieveBuiltinData(const TfLiteNode* tflite_node,
+                                 ParamsT** tf_options) {
   const auto* params =
       reinterpret_cast<const ParamsT*>(tflite_node->builtin_data);
   if (!params) {
-    return InternalError("Unable to retrieve builtin_data.");
+    return absl::InternalError("Unable to retrieve builtin_data.");
   }
   *tf_options = const_cast<ParamsT*>(params);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename ParamsType>
-Status RetrieveCustomInitialData(const TfLiteNode* tflite_node,
-                                 ParamsType** tf_options) {
+absl::Status RetrieveCustomInitialData(const TfLiteNode* tflite_node,
+                                       ParamsType** tf_options) {
   const auto* params =
       reinterpret_cast<const ParamsType*>(tflite_node->custom_initial_data);
   if (!params) {
-    return InternalError("Unable to retrieve custom_initial_data.");
+    return absl::InternalError("Unable to retrieve custom_initial_data.");
   }
   *tf_options = const_cast<ParamsType*>(params);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckMaxSupportedOpVersion(const TfLiteRegistration* registration,
-                                  int max_version) {
+absl::Status CheckMaxSupportedOpVersion(const TfLiteRegistration* registration,
+                                        int max_version) {
   const int op_version = registration->version;
   if (op_version > max_version) {
-    return UnimplementedError(
+    return absl::UnimplementedError(
         absl::StrFormat("Max version supported: %d. Requested version %d.",
                         max_version, op_version));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckExactSupportedOpVersion(const TfLiteRegistration* registration,
-                                    int expected_version) {
+absl::Status CheckExactSupportedOpVersion(
+    const TfLiteRegistration* registration, int expected_version) {
   int op_version = registration->version;
   if (op_version != expected_version) {
-    return UnimplementedError(
+    return absl::UnimplementedError(
         absl::StrFormat("Only version %d is supported. Requested version %d.",
                         expected_version, op_version));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckKernels(int kernel_h, int kernel_w) {
+absl::Status CheckKernels(int kernel_h, int kernel_w) {
   if (kernel_h <= 0 || kernel_w <= 0) {
-    return InvalidArgumentError(absl::StrFormat(
+    return absl::InvalidArgumentError(absl::StrFormat(
         "Incorrect kernel values: kernel_height = %d, kernel_width = %d.",
         kernel_h, kernel_w));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckStrides(int strides_h, int strides_w) {
+absl::Status CheckStrides(int strides_h, int strides_w) {
   if (strides_h <= 0 || strides_w <= 0) {
-    return InvalidArgumentError(absl::StrFormat(
+    return absl::InvalidArgumentError(absl::StrFormat(
         "Incorrect stride values: stride_height = %d, stride_width = %d.",
         strides_h, strides_w));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckDilation(int dilation_h, int dilation_w) {
+absl::Status CheckDilation(int dilation_h, int dilation_w) {
   if (dilation_h <= 0 || dilation_w <= 0) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         absl::StrFormat("Incorrect dilation values: dilation_factor = %d, "
                         "dilation_factor = %d.",
                         dilation_h, dilation_w));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckStridesAndDilation(int strides_h, int strides_w, int dilation_h,
-                               int dilation_w) {
+absl::Status CheckStridesAndDilation(int strides_h, int strides_w,
+                                     int dilation_h, int dilation_w) {
   RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
   RETURN_IF_ERROR(CheckDilation(dilation_h, dilation_w));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckKernelsAndStrides(int kernel_h, int kernel_w, int strides_h,
-                              int strides_w) {
+absl::Status CheckKernelsAndStrides(int kernel_h, int kernel_w, int strides_h,
+                                    int strides_w) {
   RETURN_IF_ERROR(CheckKernels(kernel_h, kernel_w));
   RETURN_IF_ERROR(CheckStrides(strides_h, strides_w));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // Creates a simple node that holds tensor value.
-Status NewConstNode(TensorFloat32 t, GraphFloat32* graph,
-                    Value<TensorRef<BHWC>>** value) {
+absl::Status NewConstNode(TensorFloat32 t, GraphFloat32* graph,
+                          Value<TensorRef<BHWC>>** value) {
   ConstTensorAttributes attr;
   attr.tensor = std::move(t);
   Node* node = graph->NewNode();
@@ -642,59 +638,59 @@ Status NewConstNode(TensorFloat32 t, GraphFloat32* graph,
   (*value)->tensor.ref = attr.tensor.id;
   (*value)->tensor.type = attr.tensor.kType;
   (*value)->tensor.shape = attr.tensor.shape;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ParsePoolingAttributes(const TfLitePoolParams* tf_options,
-                              const BHWC& input_shape,
-                              Pooling2DAttributes* attr) {
+absl::Status ParsePoolingAttributes(const TfLitePoolParams* tf_options,
+                                    const BHWC& input_shape,
+                                    Pooling2DAttributes* attr) {
   attr->kernel = ToHW(tf_options->filter_height, tf_options->filter_width);
   attr->strides = ToHW(tf_options->stride_height, tf_options->stride_width);
   UpdatePadding(tf_options->padding, input_shape, attr);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ExtractTensorShape(const TfLiteTensor& tflite_tensor, BHWC* bhwc) {
+absl::Status ExtractTensorShape(const TfLiteTensor& tflite_tensor, BHWC* bhwc) {
   const TfLiteIntArray* dims = tflite_tensor.dims;
   switch (dims->size) {
     case 1:
       *bhwc = BHWC(dims->data[0], 1, 1, 1);
-      return OkStatus();
+      return absl::OkStatus();
     case 2:
       *bhwc = BHWC(dims->data[0], 1, 1, dims->data[1]);
-      return OkStatus();
+      return absl::OkStatus();
     case 3:
       *bhwc = BHWC(dims->data[0], 1, dims->data[1], dims->data[2]);
-      return OkStatus();
+      return absl::OkStatus();
     case 4:
       *bhwc = BHWC(dims->data[0], dims->data[1], dims->data[2], dims->data[3]);
-      return OkStatus();
+      return absl::OkStatus();
     default:
-      return InvalidArgumentError(absl::StrCat(
+      return absl::InvalidArgumentError(absl::StrCat(
           "Tensor \"", tflite_tensor.name ? tflite_tensor.name : "nullptr",
           "\" has bad input dims size: ", dims->size, "."));
   }
 }
 
-Status ParseInputsWithConstTensor(Node* node, ObjectReader* reader,
-                                  TensorOrScalar* tensor_or_scalar) {
+absl::Status ParseInputsWithConstTensor(Node* node, ObjectReader* reader,
+                                        TensorOrScalar* tensor_or_scalar) {
   const std::string& opname = node->operation.type;
 
   // Determine runtime/constant tensors.
   const TfLiteTensor* input0 = reader->GetInputTensor(0);
   if (!input0) {
-    return InvalidArgumentError("Couldn't get the 1st input tensor for " +
-                                opname);
+    return absl::InvalidArgumentError("Couldn't get the 1st input tensor for " +
+                                      opname);
   }
   const TfLiteTensor* input1 = reader->GetInputTensor(1);
   if (!input1) {
-    return InvalidArgumentError("Couldn't get the 2nd input tensor for " +
-                                opname);
+    return absl::InvalidArgumentError("Couldn't get the 2nd input tensor for " +
+                                      opname);
   }
   const bool constant_tensor0 = IsConstantTensor(input0);
   const bool constant_tensor1 = IsConstantTensor(input1);
   if (constant_tensor0 && constant_tensor1) {
-    return InvalidArgumentError("No runtime input tensors for " + opname);
+    return absl::InvalidArgumentError("No runtime input tensors for " + opname);
   }
   const bool runtime_tensor0 = !constant_tensor0;
   const bool runtime_tensor1 = !constant_tensor1;
@@ -722,26 +718,26 @@ Status ParseInputsWithConstTensor(Node* node, ObjectReader* reader,
       *tensor_or_scalar = std::move(tensor);
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 class AddOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     if (tflite_node->inputs->size != 2) {
-      return UnimplementedError("ADD requires two input tensors.");
+      return absl::UnimplementedError("ADD requires two input tensors.");
     }
     // TODO(eignasheva): Add shapes check.
     TfLiteAddParams* tf_options = nullptr;
     return RetrieveBuiltinData(tflite_node, &tf_options);
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     // TFLite currently only supports 2 input ADDs.  Thus, the logic below only
     // considers 2 input cases.  The underlying GPU shader programs can accept
     // more inputs, but the logic below would have to be expanded.
@@ -755,7 +751,7 @@ class AddOperationParser : public TFLiteOperationParser {
     const auto* tf_options =
         reinterpret_cast<const TfLiteAddParams*>(tflite_node->builtin_data);
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     return MaybeFuseActivationToTheSingleOutput(tf_options->activation, graph,
                                                 node);
@@ -764,9 +760,9 @@ class AddOperationParser : public TFLiteOperationParser {
 
 class ConcatenationOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
 
     // TODO(eignasheva): add proper tensor availability checking
@@ -776,12 +772,12 @@ class ConcatenationOperationParser : public TFLiteOperationParser {
     // TODO(eignasheva): add axis checking.
     TfLiteConcatenationParams* tf_options = nullptr;
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     ConcatAttributes attr;
     // Read inputs first to make sure const node is added to a graph before
     // concat node to ensure topological order.
@@ -832,16 +828,16 @@ class ConcatenationOperationParser : public TFLiteOperationParser {
     const auto* tf_options = reinterpret_cast<const TfLiteConcatenationParams*>(
         tflite_node->builtin_data);
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     RETURN_IF_ERROR(MaybeFuseActivationToTheSingleOutput(tf_options->activation,
                                                          graph, node));
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
-  Status SetAxis(const std::vector<BHWC>& input_shapes, Axis* axis) {
+  absl::Status SetAxis(const std::vector<BHWC>& input_shapes, Axis* axis) {
     *axis = Axis::BATCH;
     for (int i = 1; i < input_shapes.size(); i++) {
       if (input_shapes[0].h != input_shapes[i].h &&
@@ -851,7 +847,7 @@ class ConcatenationOperationParser : public TFLiteOperationParser {
         break;
       }
     }
-    if (*axis == Axis::BATCH) return OkStatus();
+    if (*axis == Axis::BATCH) return absl::OkStatus();
     for (int i = 1; i < input_shapes.size(); i++) {
       if (input_shapes[0].b != input_shapes[i].b &&
           input_shapes[0].w != input_shapes[i].w &&
@@ -860,7 +856,7 @@ class ConcatenationOperationParser : public TFLiteOperationParser {
         break;
       }
     }
-    if (*axis == Axis::HEIGHT) return OkStatus();
+    if (*axis == Axis::HEIGHT) return absl::OkStatus();
     for (int i = 1; i < input_shapes.size(); i++) {
       if (input_shapes[0].b != input_shapes[i].b &&
           input_shapes[0].h != input_shapes[i].h &&
@@ -869,25 +865,25 @@ class ConcatenationOperationParser : public TFLiteOperationParser {
         break;
       }
     }
-    if (*axis == Axis::WIDTH) return OkStatus();
+    if (*axis == Axis::WIDTH) return absl::OkStatus();
     for (int i = 1; i < input_shapes.size(); i++) {
       if (input_shapes[0].b != input_shapes[i].b &&
           input_shapes[0].w != input_shapes[i].w &&
           input_shapes[0].h != input_shapes[i].h) {
-        return UnimplementedError(
+        return absl::UnimplementedError(
             "Can concatenate tensors only by batch, height, width, or "
             "channels.");
       }
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class Conv2DOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
@@ -900,9 +896,9 @@ class Conv2DOperationParser : public TFLiteOperationParser {
     return IsActivationSupported(tf_options->activation);
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::CONVOLUTION_2D);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -915,7 +911,7 @@ class Conv2DOperationParser : public TFLiteOperationParser {
     const auto* tf_options =
         reinterpret_cast<const TfLiteConvParams*>(tflite_node->builtin_data);
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     attr.strides = ToHW(tf_options->stride_height, tf_options->stride_width);
     attr.dilations = HW(tf_options->dilation_height_factor,
@@ -925,26 +921,26 @@ class Conv2DOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(MaybeFuseActivationToTheSingleOutput(tf_options->activation,
                                                          graph, node));
     node->operation.attributes = std::move(attr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class Convolution2DTransposeBiasParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckTensorIsAvailable(context, tflite_node, 1));
     TfLiteTransposeConvParams* tf_options = nullptr;
     RETURN_IF_ERROR(RetrieveCustomInitialData(tflite_node, &tf_options));
     RETURN_IF_ERROR(
         CheckStrides(tf_options->stride_height, tf_options->stride_width));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     auto* node = graph->NewNode();
     node->operation.type = ToString(OperationType::CONVOLUTION_TRANSPOSED);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -963,15 +959,15 @@ class Convolution2DTransposeBiasParser : public TFLiteOperationParser {
                   &attr);
 
     node->operation.attributes = std::move(attr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class DepthwiseConvolutionOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 2));
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
@@ -991,37 +987,38 @@ class DepthwiseConvolutionOperationParser : public TFLiteOperationParser {
                            : nullptr;
     const auto* output = context->tensors + tflite_node->outputs->data[0];
     if (!input->dims || input->dims->size != 4) {
-      return InvalidArgumentError("input.dims.size != 4");
+      return absl::InvalidArgumentError("input.dims.size != 4");
     }
     if (!filter->dims || filter->dims->size != 4) {
-      return InvalidArgumentError("filter.dims.size != 4");
+      return absl::InvalidArgumentError("filter.dims.size != 4");
     }
     if (!output->dims || output->dims->size != 4) {
-      return InvalidArgumentError("output.dims.size != 4");
+      return absl::InvalidArgumentError("output.dims.size != 4");
     }
     if (input->dims->data[0] != output->dims->data[0]) {
-      return InvalidArgumentError("input.b != output.b");
+      return absl::InvalidArgumentError("input.b != output.b");
     }
     const int input_depth = input->dims->data[3];
     const int output_depth = output->dims->data[3];
     if (filter->dims->data[3] != output_depth) {
-      return InvalidArgumentError("filter.i != output.c");
+      return absl::InvalidArgumentError("filter.i != output.c");
     }
     if (output_depth != input_depth * depth_multiplier) {
-      return InvalidArgumentError("output.c != input.c * depth_multiplier");
+      return absl::InvalidArgumentError(
+          "output.c != input.c * depth_multiplier");
     }
     if (bias && NumElements(bias) != output_depth) {
-      return InvalidArgumentError("bias.size != output.c");
+      return absl::InvalidArgumentError("bias.size != output.c");
     }
     if (depth_multiplier != 1 && input_depth != 1) {
-      return UnimplementedError("depth_multiplier != 1 && input.c != 1");
+      return absl::UnimplementedError("depth_multiplier != 1 && input.c != 1");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::DEPTHWISE_CONVOLUTION);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1047,7 +1044,7 @@ class DepthwiseConvolutionOperationParser : public TFLiteOperationParser {
       TransposeWeights(input, filter, output, depth_multiplier, &attr);
     }
     node->operation.attributes = std::move(attr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -1086,9 +1083,9 @@ class ElementwiseOperationParser : public TFLiteOperationParser {
   explicit ElementwiseOperationParser(OperationType operation_type)
       : operation_type_(operation_type) {}
 
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     if (IsOneArgumentOperation()) {
       RETURN_IF_ERROR(CheckInputsConstsOutputs(context, tflite_node,
@@ -1106,16 +1103,17 @@ class ElementwiseOperationParser : public TFLiteOperationParser {
                                                /*const_inputs=*/1,
                                                /*outputs=*/1));
     } else {
-      return InvalidArgumentError("Op can only handle 1 or 2 operand(s).");
+      return absl::InvalidArgumentError(
+          "Op can only handle 1 or 2 operand(s).");
     }
     TfLiteFusedActivation activation;
     RETURN_IF_ERROR(GetActivation(tflite_node, &activation));
     return IsActivationSupported(activation);
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(operation_type_);
 
@@ -1132,7 +1130,7 @@ class ElementwiseOperationParser : public TFLiteOperationParser {
                                                         /*const_inputs=*/0,
                                                         /*outputs=*/1));
       if (tflite_node->inputs->size != 2) {
-        return InvalidArgumentError("Applies only two input tensors");
+        return absl::InvalidArgumentError("Applies only two input tensors");
       }
       RETURN_IF_ERROR(reader->AddInput(node, 0));
       RETURN_IF_ERROR(reader->AddInput(node, 1));
@@ -1173,32 +1171,32 @@ class ElementwiseOperationParser : public TFLiteOperationParser {
       RETURN_IF_ERROR(ParseInputsWithConstTensor(node, reader, &attr.param));
       node->operation.attributes = std::move(attr);
     } else {
-      return InvalidArgumentError("Incorrect operation type passed");
+      return absl::InvalidArgumentError("Incorrect operation type passed");
     }
 
     return reader->AddOutputs(node);
   }
 
  private:
-  Status GetActivation(const TfLiteNode* tflite_node,
-                       TfLiteFusedActivation* activation) const {
+  absl::Status GetActivation(const TfLiteNode* tflite_node,
+                             TfLiteFusedActivation* activation) const {
     if (operation_type_ == OperationType::DIV) {
       TfLiteDivParams* tf_options;
       RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
       *activation = tf_options ? tf_options->activation : kTfLiteActNone;
-      return OkStatus();
+      return absl::OkStatus();
     }
     if (operation_type_ == OperationType::SUB) {
       TfLiteSubParams* tf_options;
       RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
       *activation = tf_options ? tf_options->activation : kTfLiteActNone;
-      return OkStatus();
+      return absl::OkStatus();
     }
 
     // Return kTfLiteActNone as other ops either do not have TfLiteXxxParams or
     // TfLiteXxxParams.activation.
     *activation = kTfLiteActNone;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   bool IsOneArgumentOperation() const {
@@ -1247,23 +1245,24 @@ class ElementwiseOperationParser : public TFLiteOperationParser {
 
 class FullyConnectedOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     TfLiteFullyConnectedParams* tf_options = nullptr;
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
     if (tf_options->weights_format !=
         kTfLiteFullyConnectedWeightsFormatDefault) {
-      return UnimplementedError("Unsupported FullyConnected weights format.");
+      return absl::UnimplementedError(
+          "Unsupported FullyConnected weights format.");
     }
     // TODO(eignasheva): check input shape
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     RETURN_IF_ERROR(reader->AddInput(node, 0));
 
@@ -1272,7 +1271,8 @@ class FullyConnectedOperationParser : public TFLiteOperationParser {
             tflite_node->builtin_data);
     if (tf_options->weights_format !=
         kTfLiteFullyConnectedWeightsFormatDefault) {
-      return UnimplementedError("Unsupported FullyConnected weights format.");
+      return absl::UnimplementedError(
+          "Unsupported FullyConnected weights format.");
     }
 
     FullyConnectedAttributes attr;
@@ -1284,7 +1284,7 @@ class FullyConnectedOperationParser : public TFLiteOperationParser {
     int batch_size = input->tensor.shape.b;
     if (input->tensor.shape.DimensionsProduct() / batch_size !=
         weights.shape.w) {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           "Amount of input data should match weights width");
     }
 
@@ -1306,7 +1306,7 @@ class FullyConnectedOperationParser : public TFLiteOperationParser {
 
     conv->operation.type = ToString(OperationType::FULLY_CONNECTED);
     conv->operation.attributes = std::move(attr);
-    Status result = reader->AddOutputs(conv);
+    absl::Status result = reader->AddOutputs(conv);
     RETURN_IF_ERROR(MaybeFuseActivationToTheSingleOutput(tf_options->activation,
                                                          graph, conv));
 
@@ -1316,15 +1316,15 @@ class FullyConnectedOperationParser : public TFLiteOperationParser {
 
 class HardSwishOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration*) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration*) final {
     return CheckInputsOutputs(context, tflite_node, /*runtime_inputs=*/1,
                               /*outputs=*/1);
   }
 
-  Status Parse(const TfLiteNode*, const TfLiteRegistration*,
-               GraphFloat32* graph, ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode*, const TfLiteRegistration*,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::HARD_SWISH);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1353,9 +1353,9 @@ class HardSwishOperationParser : public TFLiteOperationParser {
 //
 class LSTMOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckExactSupportedOpVersion(registration, 2));
     // TODO(eignasheva): Fix bad check.
     // RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
@@ -1364,23 +1364,23 @@ class LSTMOperationParser : public TFLiteOperationParser {
     TfLiteLSTMParams* tf_options = nullptr;
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
     RETURN_IF_ERROR(CheckParameters(tf_options));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     if (tflite_node->inputs->size != 5) {
-      return InvalidArgumentError("LSTM should have 5 input tensors");
+      return absl::InvalidArgumentError("LSTM should have 5 input tensors");
     }
     if (tflite_node->outputs->size != 4) {
-      return InvalidArgumentError("LSTM should have 4 output tensors");
+      return absl::InvalidArgumentError("LSTM should have 4 output tensors");
     }
 
     const auto* params =
         reinterpret_cast<const TfLiteLSTMParams*>(tflite_node->builtin_data);
     if (!params) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     RETURN_IF_ERROR(CheckParameters(params));
 
@@ -1423,58 +1423,61 @@ class LSTMOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(reader->AddOutput(lstm_node, 1));  // new_state
     RETURN_IF_ERROR(reader->AddOutput(lstm_node, 0));  // activation
 
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
-  Status CheckParameters(const TfLiteLSTMParams* tf_options) {
+  absl::Status CheckParameters(const TfLiteLSTMParams* tf_options) {
     if (tf_options->kernel_type !=
         TfLiteLSTMKernelType::kTfLiteLSTMBasicKernel) {
-      return UnimplementedError("Only kTfLiteLSTMBasicKernel is supported.");
+      return absl::UnimplementedError(
+          "Only kTfLiteLSTMBasicKernel is supported.");
     }
     if (tf_options->activation != kTfLiteActTanh) {
-      return UnimplementedError("Only TANH activation is supported.");
+      return absl::UnimplementedError("Only TANH activation is supported.");
     }
     if (tf_options->cell_clip != 0.0f) {
-      return UnimplementedError("cell_clip is not supported.");
+      return absl::UnimplementedError("cell_clip is not supported.");
     }
     if (tf_options->proj_clip != 0.0f) {
-      return UnimplementedError("proj_clip is not supported.");
+      return absl::UnimplementedError("proj_clip is not supported.");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class MulOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     if (tflite_node->inputs->size != 2) {
-      return UnimplementedError("MUL requires two input tensors.");
+      return absl::UnimplementedError("MUL requires two input tensors.");
     }
     TfLiteMulParams* tf_options;
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
     return IsActivationSupported(tf_options->activation);
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     // Determine runtime/constant tensors.
     const TfLiteTensor* input0 = reader->GetInputTensor(0);
     if (!input0) {
-      return InvalidArgumentError("Couldn't get the 1st input tensor for MUL.");
+      return absl::InvalidArgumentError(
+          "Couldn't get the 1st input tensor for MUL.");
     }
     const TfLiteTensor* input1 = reader->GetInputTensor(1);
     if (!input1) {
-      return InvalidArgumentError("Couldn't get the 2nd input tensor for MUL.");
+      return absl::InvalidArgumentError(
+          "Couldn't get the 2nd input tensor for MUL.");
     }
     const bool constant_tensor0 = IsConstantTensor(input0);
     const bool constant_tensor1 = IsConstantTensor(input1);
     if (constant_tensor0 && constant_tensor1) {
-      return InvalidArgumentError("No runtime input tensors for MUL.");
+      return absl::InvalidArgumentError("No runtime input tensors for MUL.");
     }
     const bool runtime_tensor0 = !constant_tensor0;
     const bool runtime_tensor1 = !constant_tensor1;
@@ -1516,24 +1519,24 @@ class MulOperationParser : public TFLiteOperationParser {
     const auto* tf_options =
         reinterpret_cast<const TfLiteMulParams*>(tflite_node->builtin_data);
     if (!tf_options) {
-      return InternalError("Missing TfLiteMulParams");
+      return absl::InternalError("Missing TfLiteMulParams");
     }
     return MaybeFuseActivationToTheSingleOutput(tf_options->activation, graph,
                                                 node);
   }
 
  private:
-  Status ParseApplyMask(Node* node, int input_tensor0, int input_tensor1,
-                        GraphFloat32* graph, ObjectReader* reader) {
+  absl::Status ParseApplyMask(Node* node, int input_tensor0, int input_tensor1,
+                              GraphFloat32* graph, ObjectReader* reader) {
     RETURN_IF_ERROR(reader->AddInput(node, input_tensor0));
     RETURN_IF_ERROR(reader->AddInput(node, input_tensor1));
     return reader->AddOutputs(node);
   }
 
-  Status ParseMultiplyScalar(Node* node, int runtime_tensor,
-                             int constant_tensor,
-                             const TfLiteIntArray* constant_dims,
-                             GraphFloat32* graph, ObjectReader* reader) {
+  absl::Status ParseMultiplyScalar(Node* node, int runtime_tensor,
+                                   int constant_tensor,
+                                   const TfLiteIntArray* constant_dims,
+                                   GraphFloat32* graph, ObjectReader* reader) {
     RETURN_IF_ERROR(reader->AddInput(node, runtime_tensor));
     MultiplyAttributes attr;
     if (constant_dims->size <= 0) {
@@ -1552,16 +1555,16 @@ class MulOperationParser : public TFLiteOperationParser {
 
 class PReLUOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     // TODO(eignasheva): add params check
-    return OkStatus();
+    return absl::OkStatus();
   }
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::PRELU);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1569,10 +1572,10 @@ class PReLUOperationParser : public TFLiteOperationParser {
 
     PReLUAttributes attr;
     Tensor<Linear, DataType::FLOAT32> linear_alpha;
-    Status status = reader->ReadTensor(1, &linear_alpha);
+    absl::Status status = reader->ReadTensor(1, &linear_alpha);
     if (status.ok()) {
       if (linear_alpha.shape.v != input_shape.c) {
-        return InvalidArgumentError(
+        return absl::InvalidArgumentError(
             "Linear alpha shape does not match the number of input channels.");
       }
       attr.alpha = std::move(linear_alpha);
@@ -1582,7 +1585,8 @@ class PReLUOperationParser : public TFLiteOperationParser {
       if (hwc_alpha.shape.h != input_shape.h ||
           hwc_alpha.shape.w != input_shape.w ||
           hwc_alpha.shape.c != input_shape.c) {
-        return InvalidArgumentError("Alpha shape does not match input shape.");
+        return absl::InvalidArgumentError(
+            "Alpha shape does not match input shape.");
       }
       attr.alpha = std::move(hwc_alpha);
     }
@@ -1595,15 +1599,15 @@ class PadOperationParser : public TFLiteOperationParser {
  public:
   explicit PadOperationParser(bool mirror_pad) : mirror_pad_(mirror_pad) {}
 
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     if (mirror_pad_) {
       auto* tf_options = reinterpret_cast<const TfLiteMirrorPaddingParams*>(
           tflite_node->builtin_data);
       if (tf_options->mode !=
           TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect) {
-        return InvalidArgumentError(
+        return absl::InvalidArgumentError(
             "Only Reflective padding is supported for Mirror Pad operation.");
       }
     }
@@ -1611,12 +1615,12 @@ class PadOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
     RETURN_IF_ERROR(CheckTensorIsAvailable(context, tflite_node, 1));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::PAD);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1634,14 +1638,15 @@ class PadOperationParser : public TFLiteOperationParser {
 
     // 4x2 tensor with paddings.
     if (paddings.shape.h != 4 || paddings.shape.w != 2) {
-      return InvalidArgumentError("Paddings tensor has unexpected shape.");
+      return absl::InvalidArgumentError(
+          "Paddings tensor has unexpected shape.");
     }
     attr.prepended = BHWC(paddings.data[0], paddings.data[2], paddings.data[4],
                           paddings.data[6]);
     attr.appended = BHWC(paddings.data[1], paddings.data[3], paddings.data[5],
                          paddings.data[7]);
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -1650,9 +1655,9 @@ class PadOperationParser : public TFLiteOperationParser {
 
 class Pooling2DOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     TfLitePoolParams* tf_options = nullptr;
     auto status = RetrieveCustomInitialData(tflite_node, &tf_options);
@@ -1675,9 +1680,9 @@ class Pooling2DOperationParser : public TFLiteOperationParser {
  public:
   explicit Pooling2DOperationParser(PoolingType type) : type_(type) {}
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::POOLING_2D);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1699,7 +1704,7 @@ class Pooling2DOperationParser : public TFLiteOperationParser {
           reinterpret_cast<const TfLitePoolParams*>(tflite_node->builtin_data);
     }
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
 
     std::vector<uint32_t> max_tensor_id{0};
@@ -1719,7 +1724,7 @@ class Pooling2DOperationParser : public TFLiteOperationParser {
     }
     RETURN_IF_ERROR(ParsePoolingAttributes(tf_options, input_shape, &attr));
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -1730,16 +1735,16 @@ class ReLUOperationParser : public TFLiteOperationParser {
  public:
   explicit ReLUOperationParser(int clip) : clip_(clip) {}
 
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::RELU);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1759,19 +1764,19 @@ class ReLUOperationParser : public TFLiteOperationParser {
 
 class ReshapeOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
     // TODO(eignasheva): add shape checking
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::RESHAPE);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1784,7 +1789,7 @@ class ReshapeOperationParser : public TFLiteOperationParser {
     ReshapeAttributes attr;
     attr.new_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
@@ -1793,9 +1798,9 @@ class Resize2DOperationParser : public TFLiteOperationParser {
   explicit Resize2DOperationParser(SamplingType sampling_type)
       : sampling_type_(sampling_type) {}
 
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 3));
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
@@ -1805,12 +1810,12 @@ class Resize2DOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(GetAlignCornersValue(tflite_node, &align_corners));
     bool half_pixel_centers;
     RETURN_IF_ERROR(GetHalfPixelCentersValue(tflite_node, &half_pixel_centers));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::RESIZE);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -1826,12 +1831,12 @@ class Resize2DOperationParser : public TFLiteOperationParser {
     attr.new_shape.CopyAllDefinedAxis(
         graph->FindOutputs(node->id)[0]->tensor.shape);
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
-  Status GetAlignCornersValue(const TfLiteNode* tflite_node,
-                              bool* align_corners) {
+  absl::Status GetAlignCornersValue(const TfLiteNode* tflite_node,
+                                    bool* align_corners) {
     switch (sampling_type_) {
       case SamplingType::BILINEAR:
         return GetAlignCornersValueForType<TfLiteResizeBilinearParams>(
@@ -1840,61 +1845,62 @@ class Resize2DOperationParser : public TFLiteOperationParser {
         return GetAlignCornersValueForType<TfLiteResizeNearestNeighborParams>(
             tflite_node, align_corners);
       case SamplingType::UNKNOWN:
-        return InternalError("Sampling type is not specified");
+        return absl::InternalError("Sampling type is not specified");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   template <class T>
-  Status GetAlignCornersValueForType(const TfLiteNode* tflite_node,
-                                     bool* align_corners) {
+  absl::Status GetAlignCornersValueForType(const TfLiteNode* tflite_node,
+                                           bool* align_corners) {
     const auto* tf_options =
         reinterpret_cast<const T*>(tflite_node->builtin_data);
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     *align_corners = tf_options->align_corners;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status GetHalfPixelCentersValue(const TfLiteNode* tflite_node,
-                                  bool* half_pixel_centers) {
+  absl::Status GetHalfPixelCentersValue(const TfLiteNode* tflite_node,
+                                        bool* half_pixel_centers) {
     if (sampling_type_ == SamplingType::BILINEAR) {
       const auto* tf_options = reinterpret_cast<TfLiteResizeBilinearParams*>(
           tflite_node->builtin_data);
       if (!tf_options) {
-        return InternalError("Missing tflite params for ResizeBilinear op");
+        return absl::InternalError(
+            "Missing tflite params for ResizeBilinear op");
       }
       if (tf_options->align_corners && tf_options->half_pixel_centers) {
-        return InternalError(
+        return absl::InternalError(
             "If half_pixel_centers is True, align_corners must be False.");
       }
       *half_pixel_centers = tf_options->half_pixel_centers;
     } else {
       *half_pixel_centers = false;
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status CheckOnlyUpsamplingIsSupported(const TfLiteContext* context,
-                                        const TfLiteNode* tflite_node) {
+  absl::Status CheckOnlyUpsamplingIsSupported(const TfLiteContext* context,
+                                              const TfLiteNode* tflite_node) {
     const auto* input = context->tensors + tflite_node->inputs->data[0];
     const auto* output = context->tensors + tflite_node->outputs->data[0];
 
     if (!input->dims || input->dims->size != 4) {
-      return InvalidArgumentError("input.dims.size != 4");
+      return absl::InvalidArgumentError("input.dims.size != 4");
     }
     if (!output->dims || output->dims->size != 4) {
-      return InvalidArgumentError("output.dims.size != 4");
+      return absl::InvalidArgumentError("output.dims.size != 4");
     }
     if (output->dims->data[1] < input->dims->data[1] ||
         output->dims->data[2] < input->dims->data[2]) {
-      return InvalidArgumentError(absl::StrCat(
+      return absl::InvalidArgumentError(absl::StrCat(
           "Only upsampling is supported, received output h,w = ",
           output->dims->data[1], ",", output->dims->data[2],
           " input h,w = ", input->dims->data[1], ",", input->dims->data[2]));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   SamplingType sampling_type_ = SamplingType::UNKNOWN;
@@ -1902,16 +1908,16 @@ class Resize2DOperationParser : public TFLiteOperationParser {
 
 class SliceOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::SLICE);
     RETURN_IF_ERROR(reader->AddOutputs(node));
@@ -1925,7 +1931,7 @@ class SliceOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(reader->ReadTensor(1, &starts));
     RETURN_IF_ERROR(reader->ReadTensor(2, &sizes));
     if (starts.data.size() != sizes.data.size()) {
-      return InvalidArgumentError("Starts amount != sizes amount.");
+      return absl::InvalidArgumentError("Starts amount != sizes amount.");
     }
     if (starts.data.size() == 4) {
       attr.starts =
@@ -1939,30 +1945,31 @@ class SliceOperationParser : public TFLiteOperationParser {
           BHWC(input->tensor.shape.b, starts.data[0] + sizes.data[0],
                starts.data[1] + sizes.data[1], starts.data[2] + sizes.data[2]);
     } else {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           "Slicing is supported for 3 or 4 dimensional tensors only.");
     }
     RETURN_IF_ERROR(UpdateIfNegative(input->tensor.shape, &attr));
 
     auto out_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
     if ((attr.ends.b - attr.starts.b) != out_shape.b) {
-      return UnimplementedError("Output batch don't match");
+      return absl::UnimplementedError("Output batch don't match");
     }
     if ((attr.ends.h - attr.starts.h) != out_shape.h) {
-      return UnimplementedError("Output height doesn't match");
+      return absl::UnimplementedError("Output height doesn't match");
     }
     if ((attr.ends.w - attr.starts.w) != out_shape.w) {
-      return UnimplementedError("Output width doesn't match");
+      return absl::UnimplementedError("Output width doesn't match");
     }
     if ((attr.ends.c - attr.starts.c) != out_shape.c) {
-      return UnimplementedError("Output channels don't match");
+      return absl::UnimplementedError("Output channels don't match");
     }
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
-  Status UpdateIfNegative(const BHWC& input_shape, SliceAttributes* attr) {
+  absl::Status UpdateIfNegative(const BHWC& input_shape,
+                                SliceAttributes* attr) {
     if (attr->ends.h < 0) {
       attr->ends.h = input_shape.h + attr->ends.h;
     }
@@ -1975,15 +1982,15 @@ class SliceOperationParser : public TFLiteOperationParser {
     if (attr->ends.b < 0) {
       attr->ends.b = input_shape.b + attr->ends.b;
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class SoftmaxOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
@@ -1991,14 +1998,14 @@ class SoftmaxOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
     if (tf_options->beta != 1) {
       // TODO(eignasheva): figure out, what's wrong with softmax.
-      return UnimplementedError("Softmax.beta != 1 is not supported.");
+      return absl::UnimplementedError("Softmax.beta != 1 is not supported.");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::SOFTMAX);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -2007,27 +2014,27 @@ class SoftmaxOperationParser : public TFLiteOperationParser {
     const auto* tf_options =
         reinterpret_cast<const TfLiteSoftmaxParams*>(tflite_node->builtin_data);
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     if (tf_options->beta != 1) {
       // there is multiply by scalar operation fused in softmax. Make a layer
       // out of it before softmax.
-      return UnimplementedError("Softmax.beta != 1 is not supported.");
+      return absl::UnimplementedError("Softmax.beta != 1 is not supported.");
       // auto mul_node = reader->NewPassthroughNode(node);
       // mul_node->operation.type = ToString(OperationType::MUL);
     }
     SoftmaxAttributes attr;
     attr.axis = Axis::CHANNELS;  // always by channels
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class SpaceToDepthOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
@@ -2035,17 +2042,19 @@ class SpaceToDepthOperationParser : public TFLiteOperationParser {
     TfLiteSpaceToDepthParams* s2d_params = nullptr;
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &s2d_params));
     if (s2d_params->block_size == 1) {
-      return InvalidArgumentError("SPACE_TO_DEPTH block_size = 1 is a no-op.");
+      return absl::InvalidArgumentError(
+          "SPACE_TO_DEPTH block_size = 1 is a no-op.");
     }
     if (s2d_params->block_size < 1) {
-      return InvalidArgumentError("SPACE_TO_DEPTH block_size must be > 1.");
+      return absl::InvalidArgumentError(
+          "SPACE_TO_DEPTH block_size must be > 1.");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::SPACE_TO_DEPTH);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -2055,25 +2064,25 @@ class SpaceToDepthOperationParser : public TFLiteOperationParser {
     SpaceToDepthAttributes attr;
     attr.block_size = tf_options->block_size;
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class StridedSliceOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     TfLiteStridedSliceParams* tf_options = nullptr;
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
     RETURN_IF_ERROR(CheckOptionsSupport(tf_options));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::SLICE);
     RETURN_IF_ERROR(reader->AddOutputs(node));
@@ -2087,7 +2096,7 @@ class StridedSliceOperationParser : public TFLiteOperationParser {
     bool read_without_batch = tmp.data.size() == 3;
     bool read_with_batch = tmp.data.size() == 4;
     if (!read_without_batch && !read_with_batch) {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           "Slicing is supported for 3 or 4 dimensional tensors only.");
     }
 
@@ -2095,7 +2104,7 @@ class StridedSliceOperationParser : public TFLiteOperationParser {
         tflite_node->builtin_data);
     auto out_shape = graph->FindOutputs(node->id)[0]->tensor.shape;
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     RETURN_IF_ERROR(CheckOptionsSupport(tf_options));
 
@@ -2110,36 +2119,37 @@ class StridedSliceOperationParser : public TFLiteOperationParser {
     }
     if (attr.strides.b == 0 || attr.strides.h == 0 || attr.strides.w == 0 ||
         attr.strides.c == 0) {
-      return InvalidArgumentError("stride values must be non-zero");
+      return absl::InvalidArgumentError("stride values must be non-zero");
     }
     if (attr.strides.b < 0 || attr.strides.h < 0 || attr.strides.w < 0 ||
         attr.strides.c < 0) {
-      return UnimplementedError("Reverse slices are not supported.");
+      return absl::UnimplementedError("Reverse slices are not supported.");
     }
     if ((attr.ends.b - attr.starts.b + attr.strides.b - 1) / attr.strides.b !=
         out_shape.b) {
-      return UnimplementedError("Output batch don't match");
+      return absl::UnimplementedError("Output batch don't match");
     }
     if ((attr.ends.h - attr.starts.h + attr.strides.h - 1) / attr.strides.h !=
         out_shape.h) {
-      return UnimplementedError("Output height doesn't match");
+      return absl::UnimplementedError("Output height doesn't match");
     }
     if ((attr.ends.w - attr.starts.w + attr.strides.w - 1) / attr.strides.w !=
         out_shape.w) {
-      return UnimplementedError("Output width doesn't match");
+      return absl::UnimplementedError("Output width doesn't match");
     }
     if ((attr.ends.c - attr.starts.c + attr.strides.c - 1) / attr.strides.c !=
         out_shape.c) {
-      return UnimplementedError("Output channels don't match");
+      return absl::UnimplementedError("Output channels don't match");
     }
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
-  Status UpdateWithMask(const TfLiteStridedSliceParams* tf_options,
-                        const BHWC& input_shape, int ignore_b, int ignore_h,
-                        int ignore_w, int ignore_c, SliceAttributes* attr) {
+  absl::Status UpdateWithMask(const TfLiteStridedSliceParams* tf_options,
+                              const BHWC& input_shape, int ignore_b,
+                              int ignore_h, int ignore_w, int ignore_c,
+                              SliceAttributes* attr) {
     if (tf_options->begin_mask & ignore_h) {
       attr->starts.h = 0;
     }
@@ -2165,10 +2175,11 @@ class StridedSliceOperationParser : public TFLiteOperationParser {
     if (tf_options->end_mask & ignore_b) {
       attr->ends.b = input_shape.b;
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status UpdateIfNegative(const BHWC& input_shape, SliceAttributes* attr) {
+  absl::Status UpdateIfNegative(const BHWC& input_shape,
+                                SliceAttributes* attr) {
     if (attr->ends.h < 0) {
       attr->ends.h = input_shape.h + attr->ends.h;
     }
@@ -2181,17 +2192,18 @@ class StridedSliceOperationParser : public TFLiteOperationParser {
     if (attr->ends.b < 0) {
       attr->ends.b = input_shape.b + attr->ends.b;
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status ReadAttribsWithBatch(const ObjectReader* reader,
-                              const TfLiteStridedSliceParams* tf_options,
-                              const BHWC& input_shape, SliceAttributes* attr) {
-    auto read_bhwc = [&](int tensor_index, BHWC* bhwc) -> Status {
+  absl::Status ReadAttribsWithBatch(const ObjectReader* reader,
+                                    const TfLiteStridedSliceParams* tf_options,
+                                    const BHWC& input_shape,
+                                    SliceAttributes* attr) {
+    auto read_bhwc = [&](int tensor_index, BHWC* bhwc) -> absl::Status {
       Tensor<Linear, DataType::INT32> t;
       RETURN_IF_ERROR(reader->ReadTensor(tensor_index, &t));
       *bhwc = BHWC(t.data[0], t.data[1], t.data[2], t.data[3]);
-      return OkStatus();
+      return absl::OkStatus();
     };
 
     RETURN_IF_ERROR(read_bhwc(1, &attr->starts));
@@ -2199,18 +2211,17 @@ class StridedSliceOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(read_bhwc(3, &attr->strides));
     RETURN_IF_ERROR(UpdateIfNegative(input_shape, attr));
     RETURN_IF_ERROR(UpdateWithMask(tf_options, input_shape, 1, 2, 4, 8, attr));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status ReadAttribsWithoutBatch(const ObjectReader* reader,
-                                 const TfLiteStridedSliceParams* tf_options,
-                                 const BHWC& input_shape,
-                                 SliceAttributes* attr) {
-    auto read_hwc = [&](int tensor_index, BHWC* bhwc) -> Status {
+  absl::Status ReadAttribsWithoutBatch(
+      const ObjectReader* reader, const TfLiteStridedSliceParams* tf_options,
+      const BHWC& input_shape, SliceAttributes* attr) {
+    auto read_hwc = [&](int tensor_index, BHWC* bhwc) -> absl::Status {
       Tensor<Linear, DataType::INT32> t;
       RETURN_IF_ERROR(reader->ReadTensor(tensor_index, &t));
       *bhwc = BHWC(0, t.data[0], t.data[1], t.data[2]);
-      return OkStatus();
+      return absl::OkStatus();
     };
 
     RETURN_IF_ERROR(read_hwc(1, &attr->starts));
@@ -2221,43 +2232,43 @@ class StridedSliceOperationParser : public TFLiteOperationParser {
     attr->starts.b = 0;
     attr->ends.b = input_shape.b;
     attr->strides.b = 1;
-    return OkStatus();
+    return absl::OkStatus();
   }
-  Status CheckOptionsSupport(const TfLiteStridedSliceParams* tf_options) {
+  absl::Status CheckOptionsSupport(const TfLiteStridedSliceParams* tf_options) {
     if (tf_options->ellipsis_mask) {
-      return UnimplementedError("Slice does not support ellipsis_mask.");
+      return absl::UnimplementedError("Slice does not support ellipsis_mask.");
     }
     if (tf_options->new_axis_mask) {
-      return UnimplementedError("Slice does not support new_axis_mask.");
+      return absl::UnimplementedError("Slice does not support new_axis_mask.");
     }
     if (tf_options->shrink_axis_mask) {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           "Slice does not support shrink_axis_mask parameter. ");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class TransposeConvOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     RETURN_IF_ERROR(CheckTensorIsAvailable(context, tflite_node, 1));
     TfLiteTransposeConvParams* tf_options = nullptr;
     RETURN_IF_ERROR(RetrieveBuiltinData(tflite_node, &tf_options));
     RETURN_IF_ERROR(
         CheckStrides(tf_options->stride_height, tf_options->stride_width));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   // TFLite's TRANSPOSE_CONV expects 3 input (output shape, weights, and input)
   // and allows configurable padding & stride.
   // TODO(impjdi): Translate output_shape to attr.adjacent.
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     auto* node = graph->NewNode();
     node->operation.type = ToString(OperationType::CONVOLUTION_TRANSPOSED);
     Value<TensorRef<BHWC>>* input;
@@ -2268,7 +2279,7 @@ class TransposeConvOperationParser : public TFLiteOperationParser {
     const auto* tf_options = reinterpret_cast<const TfLiteTransposeConvParams*>(
         tflite_node->builtin_data);
     if (!tf_options) {
-      return InternalError("Missing tflite options.");
+      return absl::InternalError("Missing tflite options.");
     }
     ConvolutionTransposedAttributes attr;
     attr.stride = tf_options
@@ -2281,24 +2292,24 @@ class TransposeConvOperationParser : public TFLiteOperationParser {
     UpdatePadding(tf_options->padding,
                   graph->FindInputs(node->id)[0]->tensor.shape, &attr);
     node->operation.attributes = std::move(attr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class TransposeOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckMaxSupportedOpVersion(registration, 1));
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::TRANSPOSE);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -2314,19 +2325,20 @@ class TransposeOperationParser : public TFLiteOperationParser {
     } else if (perm.data.size() == 2) {
       attr.perm = BHWC(0, 1, perm.data[0] + 2, perm.data[1] + 2);
     } else {
-      return InvalidArgumentError("Permutation for transpose is invalid.");
+      return absl::InvalidArgumentError(
+          "Permutation for transpose is invalid.");
     }
 
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class Unpooling2DOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     TfLitePoolParams* tf_options = nullptr;
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/2, /*outputs=*/1));
@@ -2334,12 +2346,12 @@ class Unpooling2DOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(CheckKernelsAndStrides(
         tf_options->filter_height, tf_options->filter_width,
         tf_options->stride_height, tf_options->stride_width));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     node->operation.type = ToString(OperationType::MAX_UNPOOLING_2D);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -2350,7 +2362,7 @@ class Unpooling2DOperationParser : public TFLiteOperationParser {
     const auto* tf_options = reinterpret_cast<const TfLitePoolParams*>(
         tflite_node->custom_initial_data);
     if (!tf_options) {
-      return InternalError("Missing tflite params");
+      return absl::InternalError("Missing tflite params");
     }
     attr.kernel = ToHW(tf_options->filter_height, tf_options->filter_width);
     attr.strides = ToHW(tf_options->stride_height, tf_options->stride_width);
@@ -2360,22 +2372,22 @@ class Unpooling2DOperationParser : public TFLiteOperationParser {
 
     auto output_value = graph->FindOutputs(node->id)[0];
     output_value->tensor.shape = CalculateOutputShape(input_shape, attr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 // TODO(impjdi): BATCH_TO_SPACE/SPACE_TO_BATCH shouldn't be supported.
 class BatchToSpaceOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
-    return OkStatus();
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     auto* node = graph->NewNode();
     node->operation.type = ToString(OperationType::BATCH_TO_SPACE);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -2385,7 +2397,7 @@ class BatchToSpaceOperationParser : public TFLiteOperationParser {
     Tensor<Linear, DataType::INT32> block;
     RETURN_IF_ERROR(reader->ReadTensor(1, &block));
     if (block.shape.v != 2) {
-      return InternalError("Space has to be HxW.");
+      return absl::InternalError("Space has to be HxW.");
     }
     bs_attr.block.h = block.data[0];
     bs_attr.block.w = block.data[1];
@@ -2394,7 +2406,7 @@ class BatchToSpaceOperationParser : public TFLiteOperationParser {
     RETURN_IF_ERROR(reader->ReadTensor(2, &crop));
     auto crop_shape = crop.shape;
     if (crop_shape.h != 2 && crop_shape.w != 2) {
-      return InternalError("Space has to be HxW.");
+      return absl::InternalError("Space has to be HxW.");
     }
 
     bs_attr.crop.prepended.h = crop.data[0];
@@ -2404,21 +2416,21 @@ class BatchToSpaceOperationParser : public TFLiteOperationParser {
     bs_attr.crop.appended.w = crop.data[3];
 
     node->operation.attributes = std::move(bs_attr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class SpaceToBatchOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
-    return OkStatus();
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     auto* node = graph->NewNode();
     node->operation.type = ToString(OperationType::SPACE_TO_BATCH);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -2427,7 +2439,7 @@ class SpaceToBatchOperationParser : public TFLiteOperationParser {
     Tensor<Linear, DataType::INT32> block;
     RETURN_IF_ERROR(reader->ReadTensor(1, &block));
     if (block.shape.v != 2) {
-      return InternalError("Space has to be HxW.");
+      return absl::InternalError("Space has to be HxW.");
     }
     sb_attr.block.h = block.data[0];
     sb_attr.block.w = block.data[1];
@@ -2437,7 +2449,7 @@ class SpaceToBatchOperationParser : public TFLiteOperationParser {
     auto padding_shape = padding.shape;
 
     if (padding_shape.h != 2 && padding_shape.w != 2) {
-      return InternalError("Space has to be HxW.");
+      return absl::InternalError("Space has to be HxW.");
     }
 
     sb_attr.padding.prepended.h = padding.data[0];
@@ -2447,23 +2459,23 @@ class SpaceToBatchOperationParser : public TFLiteOperationParser {
     sb_attr.padding.appended.w = padding.data[3];
 
     node->operation.attributes = std::move(sb_attr);
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class RoIToTransformMatrixOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/1, /*outputs=*/1));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     RETURN_IF_ERROR(reader->AddInput(node, 0));  // bbox
     RETURN_IF_ERROR(reader->AddOutputs(node));
@@ -2478,7 +2490,7 @@ class RoIToTransformMatrixOperationParser : public TFLiteOperationParser {
 
     auto output_value = graph->FindOutputs(node->id)[0];
     output_value->tensor.shape = output_shape;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -2486,17 +2498,17 @@ class RoIToTransformMatrixOperationParser : public TFLiteOperationParser {
 
 class TransformTensorOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/2, /*outputs=*/1));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     RETURN_IF_ERROR(reader->AddInput(node, 0));  // data
     RETURN_IF_ERROR(reader->AddInput(node, 1));  // bbox
@@ -2515,7 +2527,7 @@ class TransformTensorOperationParser : public TFLiteOperationParser {
     output_value->tensor.shape =
         BHWC(1, output_shape.h, output_shape.w,
              graph->FindInputs(node->id)[0]->tensor.shape.c);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -2523,17 +2535,17 @@ class TransformTensorOperationParser : public TFLiteOperationParser {
 
 class TransformLandmarksOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     RETURN_IF_ERROR(CheckInputsOutputs(context, tflite_node,
                                        /*runtime_inputs=*/2, /*outputs=*/1));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     RETURN_IF_ERROR(reader->AddInput(node, 0));  // data
     RETURN_IF_ERROR(reader->AddInput(node, 1));  // bbox
@@ -2549,7 +2561,7 @@ class TransformLandmarksOperationParser : public TFLiteOperationParser {
     auto output_value = graph->FindOutputs(node->id)[0];
 
     output_value->tensor.shape = graph->FindInputs(node->id)[0]->tensor.shape;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -2557,16 +2569,16 @@ class TransformLandmarksOperationParser : public TFLiteOperationParser {
 
 class Landmarks2TransformMatrixOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     return CheckInputsOutputs(context, tflite_node, /*runtime_inputs=*/1,
                               /*outputs=*/1);
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     Node* node = graph->NewNode();
     RETURN_IF_ERROR(reader->AddInput(node, 0));  // landmarks
     RETURN_IF_ERROR(reader->AddOutputs(node));   // transform matrix
@@ -2581,7 +2593,7 @@ class Landmarks2TransformMatrixOperationParser : public TFLiteOperationParser {
 
     auto output_value = graph->FindOutputs(node->id)[0];
     output_value->tensor.shape = output_shape;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -2589,16 +2601,16 @@ class Landmarks2TransformMatrixOperationParser : public TFLiteOperationParser {
 
 class MeanOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
     return CheckInputsOutputs(context, tflite_node, /*runtime_inputs=*/1,
                               /*outputs=*/1);
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
     auto* node = graph->NewNode();
     node->operation.type = ToString(OperationType::MEAN);
     RETURN_IF_ERROR(reader->AddInput(node, 0));
@@ -2623,27 +2635,27 @@ class MeanOperationParser : public TFLiteOperationParser {
           unsupported = unsupported.empty() ? "channels" : unsupported;
           ABSL_FALLTHROUGH_INTENDED;
         default:
-          return UnimplementedError(
+          return absl::UnimplementedError(
               absl::StrCat("Unsupported mean dimension: ", unsupported));
       }
     }
     node->operation.attributes = attr;
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class UnsupportedOperationParser : public TFLiteOperationParser {
  public:
-  Status IsSupported(const TfLiteContext* context,
-                     const TfLiteNode* tflite_node,
-                     const TfLiteRegistration* registration) final {
-    return UnimplementedError("Operation is not supported.");
+  absl::Status IsSupported(const TfLiteContext* context,
+                           const TfLiteNode* tflite_node,
+                           const TfLiteRegistration* registration) final {
+    return absl::UnimplementedError("Operation is not supported.");
   }
 
-  Status Parse(const TfLiteNode* tflite_node,
-               const TfLiteRegistration* registration, GraphFloat32* graph,
-               ObjectReader* reader) final {
-    return UnimplementedError("Operation is not supported.");
+  absl::Status Parse(const TfLiteNode* tflite_node,
+                     const TfLiteRegistration* registration,
+                     GraphFloat32* graph, ObjectReader* reader) final {
+    return absl::UnimplementedError("Operation is not supported.");
   }
 };
 
@@ -2772,15 +2784,15 @@ std::unique_ptr<TFLiteOperationParser> NewOperationParser(
   return absl::make_unique<UnsupportedOperationParser>();
 }
 
-Status GetNodeAndRegistration(TfLiteContext* context, int node_id,
-                              TfLiteNode** tflite_node,
-                              TfLiteRegistration** registration) {
+absl::Status GetNodeAndRegistration(TfLiteContext* context, int node_id,
+                                    TfLiteNode** tflite_node,
+                                    TfLiteRegistration** registration) {
   if (context->GetNodeAndRegistration(context, node_id, tflite_node,
                                       registration) != kTfLiteOk) {
-    return InvalidArgumentError(absl::StrCat(
+    return absl::InvalidArgumentError(absl::StrCat(
         "Couldn't get node and registration info for op: ", node_id));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 using IsNodeSupportedFn = tflite::delegates::IsNodeSupportedFn;
@@ -2963,8 +2975,8 @@ class GraphWithDequantPartitionHelper
   std::set<int> dequant_nodes_to_save_;
 };
 
-Status IsSupported(const TfLiteContext* context, TfLiteNode* node,
-                   const TfLiteRegistration* registration) {
+absl::Status IsSupported(const TfLiteContext* context, TfLiteNode* node,
+                         const TfLiteRegistration* registration) {
   return NewOperationParser(registration)
       ->IsSupported(context, node, registration);
 }
@@ -2983,8 +2995,8 @@ bool IsAllFloatTensors(const TfLiteContext* context,
 }
 }  // namespace
 
-Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
-                                      TensorRef<BHWC>* tensor_ref) {
+absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
+                                            TensorRef<BHWC>* tensor_ref) {
   tensor_ref->type = ToDataType(tflite_tensor.type);
   return ExtractTensorShape(tflite_tensor, &tensor_ref->shape);
 }
@@ -2998,7 +3010,9 @@ TfLiteIntArray* GetOpsToReplace(TfLiteContext* context) {
           std::string* unsupported_details) -> bool {
     const auto status = IsSupported(context, node, registration);
     if (!status.ok()) {
-      if (unsupported_details) *unsupported_details = status.error_message();
+      if (unsupported_details) {
+        *unsupported_details = std::string(status.message());
+      }
       return false;
     }
 
@@ -3048,9 +3062,9 @@ TfLiteIntArray* GetOpsToReplace(TfLiteContext* context) {
   return ConvertVectorToTfLiteIntArray(ops_to_replace);
 }
 
-Status BuildModel(TfLiteContext* context,
-                  const TfLiteDelegateParams* delegate_params,
-                  GraphFloat32* graph) {
+absl::Status BuildModel(TfLiteContext* context,
+                        const TfLiteDelegateParams* delegate_params,
+                        GraphFloat32* graph) {
   std::vector<std::unique_ptr<TFLiteOperationParser>> operations;
   std::vector<int> tflite_nodes;
   for (int i = 0; i < delegate_params->nodes_to_replace->size; ++i) {
@@ -3065,7 +3079,7 @@ Status BuildModel(TfLiteContext* context,
     }
     auto op_parser = NewOperationParser(registration);
     if (!op_parser) {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           absl::StrCat("Operation ", registration->builtin_code, "(",
                        registration->custom_name,
                        ") is not supported by TFLite GPU Delegate."));
@@ -3085,25 +3099,25 @@ Status BuildModel(TfLiteContext* context,
     const auto status =
         operations[i]->Parse(tflite_node, registration, graph, &reader);
     if (!status.ok()) {
-      return InternalError(absl::StrCat(GetOpNameByRegistration(*registration),
-                                        ": ", status.error_message()));
+      return absl::InternalError(absl::StrCat(
+          GetOpNameByRegistration(*registration), ": ", status.message()));
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status BuildFinalModel(TfLiteContext* context,
-                       const TfLiteDelegateParams* delegate_params,
-                       GraphFloat32* graph) {
+absl::Status BuildFinalModel(TfLiteContext* context,
+                             const TfLiteDelegateParams* delegate_params,
+                             GraphFloat32* graph) {
   RETURN_IF_ERROR(BuildModel(context, delegate_params, graph));
 
   // Apply general transformations on the graph.
   NullTransformationReporter reporter;
   ModelTransformer transformer(graph, &reporter);
   if (!ApplyGeneralTransformations(&transformer)) {
-    return InternalError("Graph general transformations failed");
+    return absl::InternalError("Graph general transformations failed");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/model_builder.h b/tensorflow/lite/delegates/gpu/common/model_builder.h
index f81dd90933c..b8fcab0c5c8 100644
--- a/tensorflow/lite/delegates/gpu/common/model_builder.h
+++ b/tensorflow/lite/delegates/gpu/common/model_builder.h
@@ -32,19 +32,19 @@ TfLiteIntArray* GetOpsToReplace(TfLiteContext* context);
 
 // Extracts TFLite delegate execution plan from the input TFLite context and
 // converts it into generic graph format.
-Status BuildModel(TfLiteContext* context,
-                  const TfLiteDelegateParams* delegate_params,
-                  GraphFloat32* graph);
+absl::Status BuildModel(TfLiteContext* context,
+                        const TfLiteDelegateParams* delegate_params,
+                        GraphFloat32* graph);
 
 // Same as above but also apply all transformations on the final graph.
 // Prefer using this method instead of BuildModel.
-Status BuildFinalModel(TfLiteContext* context,
-                       const TfLiteDelegateParams* delegate_params,
-                       GraphFloat32* graph);
+absl::Status BuildFinalModel(TfLiteContext* context,
+                             const TfLiteDelegateParams* delegate_params,
+                             GraphFloat32* graph);
 
 // Module-internal converter, exposed for unit testing purpose only.
-Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
-                                      TensorRef<BHWC>* tensor_ref);
+absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor,
+                                            TensorRef<BHWC>* tensor_ref);
 
 }  // namespace gpu
 }  // namespace tflite
diff --git a/tensorflow/lite/delegates/gpu/common/operations.cc b/tensorflow/lite/delegates/gpu/common/operations.cc
index b20b24d28c3..771ed7378b9 100644
--- a/tensorflow/lite/delegates/gpu/common/operations.cc
+++ b/tensorflow/lite/delegates/gpu/common/operations.cc
@@ -519,14 +519,15 @@ BHWC CalculateOutputShape(const BHWC& input, const MeanAttributes& attr) {
   return BHWC(b, h, w, c);
 }
 
-Status CalculateOutputShape(const std::vector<BHWC>& input,
-                            const ConcatAttributes& attr, BHWC* output_shape) {
+absl::Status CalculateOutputShape(const std::vector<BHWC>& input,
+                                  const ConcatAttributes& attr,
+                                  BHWC* output_shape) {
   BHWC new_shape = input[0];
   switch (attr.axis) {
     case Axis::CHANNELS:
       for (int i = 1; i < input.size(); i++) {
         if (input[i].h != new_shape.h || input[i].w != new_shape.w) {
-          return InvalidArgumentError(
+          return absl::InvalidArgumentError(
               "Height and Width must be the same when concatenating "
               "by channels axis");
         }
@@ -536,7 +537,7 @@ Status CalculateOutputShape(const std::vector<BHWC>& input,
     case Axis::HEIGHT:
       for (int i = 1; i < input.size(); i++) {
         if (input[i].w != new_shape.w || input[i].c != new_shape.c) {
-          return InvalidArgumentError(
+          return absl::InvalidArgumentError(
               "Channels and Width must be the same when concatenating "
               "by height axis");
         }
@@ -546,7 +547,7 @@ Status CalculateOutputShape(const std::vector<BHWC>& input,
     case Axis::WIDTH:
       for (int i = 1; i < input.size(); i++) {
         if (input[i].h != new_shape.h || input[i].c != new_shape.c) {
-          return InvalidArgumentError(
+          return absl::InvalidArgumentError(
               "Height and Channels must be the same when concatenating "
               "by width axis");
         }
@@ -554,11 +555,11 @@ Status CalculateOutputShape(const std::vector<BHWC>& input,
       }
       break;
     default:
-      return InvalidArgumentError("Invalid axis");
+      return absl::InvalidArgumentError("Invalid axis");
       break;
   }
   *output_shape = new_shape;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 Padding2D CalculateSamePadding(const BHWC& input,
diff --git a/tensorflow/lite/delegates/gpu/common/operations.h b/tensorflow/lite/delegates/gpu/common/operations.h
index 16016d334cf..4eb41dfe1a3 100644
--- a/tensorflow/lite/delegates/gpu/common/operations.h
+++ b/tensorflow/lite/delegates/gpu/common/operations.h
@@ -202,8 +202,9 @@ BHWDC CalculateOutputShape(const BHWDC& input, const Pooling3DAttributes& attr);
 
 // @return shape of a tensor after Concat operation is applied to the given
 //         input.
-Status CalculateOutputShape(const std::vector<BHWC>& input,
-                            const ConcatAttributes& attr, BHWC* output_shape);
+absl::Status CalculateOutputShape(const std::vector<BHWC>& input,
+                                  const ConcatAttributes& attr,
+                                  BHWC* output_shape);
 
 // @return padding for pooling operation to make sure output keep the same shape
 // as the given input.
diff --git a/tensorflow/lite/delegates/gpu/common/status.h b/tensorflow/lite/delegates/gpu/common/status.h
index 250a3b5e3eb..d6b5dd8a94a 100644
--- a/tensorflow/lite/delegates/gpu/common/status.h
+++ b/tensorflow/lite/delegates/gpu/common/status.h
@@ -1,4 +1,4 @@
-/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
+/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
 
 Licensed under the Apache License, Version 2.0 (the "License");
 you may not use this file except in compliance with the License.
@@ -16,109 +16,7 @@ limitations under the License.
 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_STATUS_H_
 #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_STATUS_H_
 
-#include <string>
-
-namespace tflite {
-namespace gpu {
-
-enum class StatusCode {
-  kOk = 0,
-  kCancelled = 1,
-  kUnknown = 2,
-  kInvalidArgument = 3,
-  kDeadlineExceeded = 4,
-  kNotFound = 5,
-  kAlreadyExists = 6,
-  kPermissionDenied = 7,
-  kResourceExhausted = 8,
-  kFailedPrecondition = 9,
-  kAborted = 10,
-  kOutOfRange = 11,
-  kUnimplemented = 12,
-  kInternal = 13,
-  kUnavailable = 14,
-  kDataLoss = 15,
-  kUnauthenticated = 16,
-  kDoNotUseReservedForFutureExpansionUseDefaultInSwitchInstead_ = 20
-};
-
-// Lite version of Status without dependency on protobuf.
-// TODO(b/128867901): Migrate to absl::Status.
-class Status {
- public:
-  Status() = default;
-  Status(StatusCode code) : code_(code) {}
-  Status(StatusCode code, const std::string& error_message)
-      : code_(code), error_message_(error_message) {}
-
-  const std::string& error_message() const { return error_message_; }
-  StatusCode code() const { return code_; }
-  bool ok() const { return code_ == StatusCode::kOk; }
-
-  void IgnoreError() const {}
-
- private:
-  StatusCode code_ = StatusCode::kOk;
-  std::string error_message_;
-};
-
-#define RETURN_IF_ERROR(status)        \
-  {                                    \
-    const auto status2 = (status);     \
-    if (!status2.ok()) return status2; \
-  }
-
-inline Status OkStatus() { return Status(); }
-
-inline Status AlreadyExistsError(const std::string& message) {
-  return Status(StatusCode::kAlreadyExists, message);
-}
-
-inline Status DeadlineExceededError(const std::string& message) {
-  return Status(StatusCode::kDeadlineExceeded, message);
-}
-
-inline Status FailedPreconditionError(const std::string& message) {
-  return Status(StatusCode::kFailedPrecondition, message);
-}
-
-inline Status InternalError(const std::string& message) {
-  return Status(StatusCode::kInternal, message);
-}
-
-inline Status InvalidArgumentError(const std::string& message) {
-  return Status(StatusCode::kInvalidArgument, message);
-}
-
-inline Status NotFoundError(const std::string& message) {
-  return Status(StatusCode::kNotFound, message);
-}
-
-inline Status OutOfRangeError(const std::string& message) {
-  return Status(StatusCode::kOutOfRange, message);
-}
-
-inline Status PermissionDeniedError(const std::string& message) {
-  return Status(StatusCode::kPermissionDenied, message);
-}
-
-inline Status ResourceExhaustedError(const std::string& message) {
-  return Status(StatusCode::kResourceExhausted, message);
-}
-
-inline Status UnavailableError(const std::string& message) {
-  return Status(StatusCode::kUnavailable, message);
-}
-
-inline Status UnimplementedError(const std::string& message) {
-  return Status(StatusCode::kUnimplemented, message);
-}
-
-inline Status UnknownError(const std::string& message) {
-  return Status(StatusCode::kUnknown, message);
-}
-
-}  // namespace gpu
-}  // namespace tflite
+#include "absl/status/status.h"
+#define RETURN_IF_ERROR(s) {auto c=(s);if(!c.ok())return c;}
 
 #endif  // TENSORFLOW_LITE_DELEGATES_GPU_COMMON_STATUS_H_
diff --git a/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc b/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc
index cbd62fa6853..08d9448f7e5 100644
--- a/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc
+++ b/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc
@@ -30,21 +30,21 @@ namespace tflite {
 namespace gpu {
 namespace testing {
 
-Status InterpreterInvokeWithOpResolver(const ::tflite::Model* model,
-                                       TfLiteDelegate* delegate,
-                                       const OpResolver& op_resolver,
-                                       const std::vector<TensorFloat32>& inputs,
-                                       std::vector<TensorFloat32>* outputs) {
+absl::Status InterpreterInvokeWithOpResolver(
+    const ::tflite::Model* model, TfLiteDelegate* delegate,
+    const OpResolver& op_resolver, const std::vector<TensorFloat32>& inputs,
+    std::vector<TensorFloat32>* outputs) {
   auto interpreter = absl::make_unique<Interpreter>();
   if (InterpreterBuilder(model, op_resolver)(&interpreter) != kTfLiteOk) {
-    return InternalError("Unable to create TfLite InterpreterBuilder");
+    return absl::InternalError("Unable to create TfLite InterpreterBuilder");
   }
   if (delegate && interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
-    return InternalError("Unable to modify TfLite graph with the delegate");
+    return absl::InternalError(
+        "Unable to modify TfLite graph with the delegate");
   }
   interpreter->SetNumThreads(1);
   if (interpreter->AllocateTensors() != kTfLiteOk) {
-    return InternalError("Unable to allocate TfLite tensors");
+    return absl::InternalError("Unable to allocate TfLite tensors");
   }
   for (int i = 0; i < inputs.size(); ++i) {
     DCHECK_EQ(interpreter->tensor(interpreter->inputs()[i])->type,
@@ -57,10 +57,10 @@ Status InterpreterInvokeWithOpResolver(const ::tflite::Model* model,
                 inputs[i].data.size() * sizeof(float));
   }
   if (interpreter->Invoke() != kTfLiteOk) {
-    return InternalError("Unable to invoke TfLite interpreter");
+    return absl::InternalError("Unable to invoke TfLite interpreter");
   }
   if (!outputs || !outputs->empty()) {
-    return InternalError("Invalid outputs pointer");
+    return absl::InternalError("Invalid outputs pointer");
   }
   outputs->reserve(interpreter->outputs().size());
   for (auto t : interpreter->outputs()) {
@@ -69,7 +69,7 @@ Status InterpreterInvokeWithOpResolver(const ::tflite::Model* model,
     bhwc.id = t;
     // TODO(impjdi) Relax this condition to arbitrary batch size.
     if (out_tensor->dims->data[0] != 1) {
-      return InternalError("Batch dimension is expected to be 1");
+      return absl::InternalError("Batch dimension is expected to be 1");
     }
     bhwc.shape.b = out_tensor->dims->data[0];
     switch (out_tensor->dims->size) {
@@ -89,20 +89,21 @@ Status InterpreterInvokeWithOpResolver(const ::tflite::Model* model,
         bhwc.shape.c = out_tensor->dims->data[3];
         break;
       default:
-        return InternalError("Unsupported dimensions size " +
-                             std::to_string(out_tensor->dims->size));
+        return absl::InternalError("Unsupported dimensions size " +
+                                   std::to_string(out_tensor->dims->size));
     }
     bhwc.data = std::vector<float>(
         out_tensor->data.f,
         out_tensor->data.f + out_tensor->bytes / sizeof(float));
     outputs->push_back(bhwc);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status InterpreterInvoke(const ::tflite::Model* model, TfLiteDelegate* delegate,
-                         const std::vector<TensorFloat32>& inputs,
-                         std::vector<TensorFloat32>* outputs) {
+absl::Status InterpreterInvoke(const ::tflite::Model* model,
+                               TfLiteDelegate* delegate,
+                               const std::vector<TensorFloat32>& inputs,
+                               std::vector<TensorFloat32>* outputs) {
   ops::builtin::BuiltinOpResolver builtin_op_resolver;
   return InterpreterInvokeWithOpResolver(model, delegate, builtin_op_resolver,
                                          inputs, outputs);
diff --git a/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.h b/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.h
index a38a5d1363a..ca2825b7563 100644
--- a/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.h
+++ b/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.h
@@ -31,18 +31,18 @@ namespace testing {
 // Runs Tensorflow Lite model using Tensorflow Lite with a delegate and
 // an appropriate operations resolver. If delegate is nullptr, inference will
 // be done only on CPU.
-Status InterpreterInvokeWithOpResolver(const ::tflite::Model* model,
-                                       TfLiteDelegate* delegate,
-                                       const OpResolver& op_resolver,
-                                       const std::vector<TensorFloat32>& inputs,
-                                       std::vector<TensorFloat32>* outputs);
+absl::Status InterpreterInvokeWithOpResolver(
+    const ::tflite::Model* model, TfLiteDelegate* delegate,
+    const OpResolver& op_resolver, const std::vector<TensorFloat32>& inputs,
+    std::vector<TensorFloat32>* outputs);
 
 // Runs Tensorflow Lite model using Tensorflow Lite with a delegate and
 // builtin operations resolver. If delegate is nullptr, inference will
 // be done only on CPU.
-Status InterpreterInvoke(const ::tflite::Model* model, TfLiteDelegate* delegate,
-                         const std::vector<TensorFloat32>& inputs,
-                         std::vector<TensorFloat32>* outputs);
+absl::Status InterpreterInvoke(const ::tflite::Model* model,
+                               TfLiteDelegate* delegate,
+                               const std::vector<TensorFloat32>& inputs,
+                               std::vector<TensorFloat32>* outputs);
 
 }  // namespace testing
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.cc b/tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.cc
index 872c4bcd903..0011cc24dfa 100644
--- a/tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.cc
+++ b/tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.cc
@@ -61,7 +61,7 @@ class AddQuantAdjustments : public NodeTransformation {
       // The tensor information should rename the same.
       Value<TensorRef<BHWC>>* adjusted_value = graph->NewValue();
       adjusted_value->tensor = output_value->tensor;
-      Status status =
+      absl::Status status =
           graph->SetProducer(quant_and_dequant_node->id, adjusted_value->id);
       if (!status.ok()) {
         return {TransformStatus::INVALID,
diff --git a/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc b/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc
index 586c7a34a37..4efb98a6847 100644
--- a/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc
+++ b/tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc
@@ -81,11 +81,11 @@ class MergeConvolutionWithAdd : public SequenceTransformation {
       return {TransformStatus::SKIPPED, ""};
     }
 
-    Status status = RemoveFollowingNode(graph, &add_node, &conv_node);
+    absl::Status status = RemoveFollowingNode(graph, &add_node, &conv_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
               "Unable to remove add node after convolution: " +
-                  status.error_message()};
+                  std::string(status.message())};
     }
     return {TransformStatus::APPLIED, ""};
   }
@@ -131,11 +131,11 @@ class MergeAddWithConvolution : public SequenceTransformation {
       return {TransformStatus::SKIPPED, ""};
     }
 
-    Status status = RemovePrecedingNode(graph, &add_node, &conv_node);
+    absl::Status status = RemovePrecedingNode(graph, &add_node, &conv_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
               "Unable to remove add node after convolution: " +
-                  status.error_message()};
+                  std::string(status.message())};
     }
     return {TransformStatus::APPLIED, ""};
   }
diff --git a/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc b/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc
index 6b106a4be62..055327d3534 100644
--- a/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc
+++ b/tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc
@@ -74,11 +74,11 @@ class MergeConvolutionWithMul : public SequenceTransformation {
       return {TransformStatus::SKIPPED, ""};
     }
 
-    Status status = RemoveFollowingNode(graph, &mul_node, &conv_node);
+    absl::Status status = RemoveFollowingNode(graph, &mul_node, &conv_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
               "Unable to remove mul node after convolution: " +
-                  status.error_message()};
+                  std::string(status.message())};
     }
     return {TransformStatus::APPLIED, ""};
   }
@@ -134,11 +134,11 @@ class MergeMulWithConvolution : public SequenceTransformation {
       return {TransformStatus::SKIPPED, ""};
     }
 
-    Status status = RemovePrecedingNode(graph, &mul_node, &conv_node);
+    absl::Status status = RemovePrecedingNode(graph, &mul_node, &conv_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
               "Unable to remove mul node after convolution: " +
-                  status.error_message()};
+                  std::string(status.message())};
     }
     return {TransformStatus::APPLIED, ""};
   }
diff --git a/tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc b/tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc
index 5e98edac943..17aac83baf7 100644
--- a/tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc
+++ b/tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc
@@ -76,10 +76,10 @@ class MakePaddingFromZerosConcat : public NodeTransformation {
                     "Padding for concat axis is unsupported: " +
                         ToString(concat_attr.axis)};
         }
-        Status status = RemovePrecedingNode(graph, dep, node);
+        absl::Status status = RemovePrecedingNode(graph, dep, node);
         if (!status.ok()) {
-          return {TransformStatus::INVALID,
-                  "Unable to remove const node: " + status.error_message()};
+          return {TransformStatus::INVALID, "Unable to remove const node: " +
+                                                std::string(status.message())};
         }
         node->operation.attributes = pad_attr;
         node->operation.type = ToString(OperationType::PAD);
diff --git a/tensorflow/lite/delegates/gpu/common/transformations/match_dilated_convolution.cc b/tensorflow/lite/delegates/gpu/common/transformations/match_dilated_convolution.cc
index 5257ba44f0e..f1c56477834 100644
--- a/tensorflow/lite/delegates/gpu/common/transformations/match_dilated_convolution.cc
+++ b/tensorflow/lite/delegates/gpu/common/transformations/match_dilated_convolution.cc
@@ -72,7 +72,7 @@ class MatchDilatedConvolution : public SequenceTransformation {
       conv_node.operation.attributes = std::move(conv2d_attr);
     }
 
-    Status status = RemoveFollowingNode(graph, &bs_node, &conv_node);
+    absl::Status status = RemoveFollowingNode(graph, &bs_node, &conv_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
               "Unable to remove batch_to_space node after convolution."};
diff --git a/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc b/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc
index 5e2f1e17f54..23e99bc3305 100644
--- a/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc
+++ b/tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc
@@ -62,11 +62,11 @@ class MergePaddingWith2DOperation : public SequenceTransformation {
     }
 
     Attr* node_attr = absl::any_cast<Attr>(&op_node->operation.attributes);
-    Status status = RemovePrecedingNode(graph, pad_node, op_node);
+    absl::Status status = RemovePrecedingNode(graph, pad_node, op_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
               "Unable to remove Pad node with Operation node: " +
-                  status.error_message()};
+                  std::string(status.message())};
     }
 
     node_attr->padding.appended.h += pad_attr.appended.h;
@@ -154,10 +154,10 @@ class MergePaddingWithAddOperation : public NodeTransformation {
               "Cannot remove padding when this broadcast/scalar ADD"};
     }
 
-    Status status = RemovePrecedingNode(graph, node, add_node);
+    absl::Status status = RemovePrecedingNode(graph, node, add_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
-              "Unable to remove Pad node " + status.error_message()};
+              "Unable to remove Pad node " + std::string(status.message())};
     }
 
     return {TransformStatus::APPLIED,
diff --git a/tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc b/tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc
index 64779990178..e80b244b34f 100644
--- a/tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc
+++ b/tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc
@@ -44,10 +44,10 @@ class RemoveOperation : public SequenceTransformation {
     if (!remove_predicate_(graph, op_node)) {
       return {TransformStatus::SKIPPED, ""};
     }
-    Status status = RemoveFollowingNode(graph, op_node, prev_op_node);
+    absl::Status status = RemoveFollowingNode(graph, op_node, prev_op_node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
-              "Unable to remove a node: " + status.error_message()};
+              "Unable to remove a node: " + std::string(status.message())};
     }
     return {TransformStatus::APPLIED, ""};
   }
@@ -116,10 +116,10 @@ class RemoveIdentityReshape : public NodeTransformation {
       return {TransformStatus::SKIPPED,
               "Can not apply transformation when node output is graph output"};
     }
-    Status status = RemoveOneInputOneOutputNode(graph, node);
+    absl::Status status = RemoveOneInputOneOutputNode(graph, node);
     if (!status.ok()) {
       return {TransformStatus::INVALID,
-              "Unable to remove a node: " + status.error_message()};
+              "Unable to remove a node: " + std::string(status.message())};
     }
     return {TransformStatus::APPLIED,
             "Removed reshape with input_shape == output_shape."};
diff --git a/tensorflow/lite/delegates/gpu/common/workgroup_selection.cc b/tensorflow/lite/delegates/gpu/common/workgroup_selection.cc
index d6d22aa6a62..d18e3726a1c 100644
--- a/tensorflow/lite/delegates/gpu/common/workgroup_selection.cc
+++ b/tensorflow/lite/delegates/gpu/common/workgroup_selection.cc
@@ -184,10 +184,9 @@ template std::vector<uint3> GenerateWorkGroupSizes(
     WorkGroupSizeAlignment z_alignment);
 
 template <typename T>
-Status GenerateWorkGroupSizesAlignedToGrid(const T& grid,
-                                           const T& max_work_group_size,
-                                           const int max_work_group_invocations,
-                                           std::vector<T>* work_groups) {
+absl::Status GenerateWorkGroupSizesAlignedToGrid(
+    const T& grid, const T& max_work_group_size,
+    const int max_work_group_invocations, std::vector<T>* work_groups) {
   auto alignment = WorkGroupSizeAlignment::PRECISE;
   *work_groups = GenerateWorkGroupSizes<T>(
       grid, /*min_work_group_total_size = */ 32, max_work_group_invocations,
@@ -197,16 +196,16 @@ Status GenerateWorkGroupSizesAlignedToGrid(const T& grid,
     AddCornerCases(grid, max_work_group_invocations, max_work_group_size,
                    alignment, alignment, alignment, work_groups);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // Specializations of GenerateWorkGroupSizesAlignedToGrid for int3 and uint3
 
-template Status GenerateWorkGroupSizesAlignedToGrid(
+template absl::Status GenerateWorkGroupSizesAlignedToGrid(
     const int3& grid, const int3& max_work_group_size,
     const int max_work_group_invocations, std::vector<int3>* work_groups);
 
-template Status GenerateWorkGroupSizesAlignedToGrid(
+template absl::Status GenerateWorkGroupSizesAlignedToGrid(
     const uint3& grid, const uint3& max_work_group_size,
     const int max_work_group_invocations, std::vector<uint3>* work_groups);
 
diff --git a/tensorflow/lite/delegates/gpu/common/workgroup_selection.h b/tensorflow/lite/delegates/gpu/common/workgroup_selection.h
index 80915ff5c95..75967cb04df 100644
--- a/tensorflow/lite/delegates/gpu/common/workgroup_selection.h
+++ b/tensorflow/lite/delegates/gpu/common/workgroup_selection.h
@@ -42,10 +42,9 @@ std::vector<T> GenerateWorkGroupSizes(
     WorkGroupSizeAlignment y_alignment, WorkGroupSizeAlignment z_alignment);
 
 template <typename T>
-Status GenerateWorkGroupSizesAlignedToGrid(const T& grid,
-                                           const T& max_work_group_size,
-                                           const int max_work_group_invocations,
-                                           std::vector<T>* work_groups);
+absl::Status GenerateWorkGroupSizesAlignedToGrid(
+    const T& grid, const T& max_work_group_size,
+    const int max_work_group_invocations, std::vector<T>* work_groups);
 
 }  // namespace gpu
 }  // namespace tflite
diff --git a/tensorflow/lite/delegates/gpu/delegate.cc b/tensorflow/lite/delegates/gpu/delegate.cc
index 452f81f536d..3451119c71d 100644
--- a/tensorflow/lite/delegates/gpu/delegate.cc
+++ b/tensorflow/lite/delegates/gpu/delegate.cc
@@ -70,8 +70,8 @@ class Delegate {
     options_ = options ? *options : TfLiteGpuDelegateOptionsV2Default();
   }
 
-  Status Prepare(TfLiteContext* context,
-                 const TfLiteDelegateParams* delegate_params) {
+  absl::Status Prepare(TfLiteContext* context,
+                       const TfLiteDelegateParams* delegate_params) {
     thread_id_prepare_ = std::this_thread::get_id();
 
     // Extract TFLite delegate execution plan from the context and convert it
@@ -98,9 +98,10 @@ class Delegate {
 
     std::unique_ptr<InferenceBuilder> builder;
     bool graph_is_destroyed;
-    Status status = InitializeOpenClApi(&graph, &builder, &graph_is_destroyed);
+    absl::Status status =
+        InitializeOpenClApi(&graph, &builder, &graph_is_destroyed);
     if (!status.ok()) {
-      context->ReportError(context, "%s", status.error_message().c_str());
+      TF_LITE_KERNEL_LOG(context, std::string(status.message()).c_str());
       context->ReportError(context, "Falling back to OpenGL");
 
       // Graph need to be re-created because it is moved above.
@@ -132,7 +133,7 @@ class Delegate {
     return builder->Build(&runner_);
   }
 
-  Status SetInputsAndOutputs(TfLiteContext* context) {
+  absl::Status SetInputsAndOutputs(TfLiteContext* context) {
     int i = 0;
     for (auto index : input_indices_) {
       RETURN_IF_ERROR(
@@ -143,15 +144,15 @@ class Delegate {
       RETURN_IF_ERROR(
           runner_->SetOutputObject(i++, GetTensorObject(index, context)));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Invoke(TfLiteContext* context) {
+  absl::Status Invoke(TfLiteContext* context) {
     if (thread_id_prepare_ != std::this_thread::get_id()) {
       TFLITE_LOG(tflite::TFLITE_LOG_WARNING,
                  "GpuDelegate invoke thread != prepare thread");
       if (enforce_same_thread_) {
-        return FailedPreconditionError(
+        return absl::FailedPreconditionError(
             "GpuDelegate must run on the same thread where it was "
             "initialized.");
       }
@@ -178,9 +179,9 @@ class Delegate {
   TfLiteDelegate* tflite_delegate() { return &delegate_; }
 
  private:
-  Status InitializeOpenClApi(GraphFloat32* graph,
-                             std::unique_ptr<InferenceBuilder>* builder,
-                             bool* graph_is_destroyed) {
+  absl::Status InitializeOpenClApi(GraphFloat32* graph,
+                                   std::unique_ptr<InferenceBuilder>* builder,
+                                   bool* graph_is_destroyed) {
     *graph_is_destroyed = false;
     cl::InferenceEnvironmentOptions env_options;
     cl::InferenceEnvironmentProperties properties;
@@ -207,11 +208,11 @@ class Delegate {
         options, std::move(*graph), builder));
     TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
                          "Initialized OpenCL-based API.");
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status InitializeOpenGlApi(GraphFloat32* graph,
-                             std::unique_ptr<InferenceBuilder>* builder) {
+  absl::Status InitializeOpenGlApi(GraphFloat32* graph,
+                                   std::unique_ptr<InferenceBuilder>* builder) {
     gl::InferenceEnvironmentOptions env_options;
     gl::InferenceEnvironmentProperties properties;
     RETURN_IF_ERROR(
@@ -226,7 +227,7 @@ class Delegate {
     enforce_same_thread_ = true;
     TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
                          "Initialized OpenGL-based API.");
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TfLiteDelegate delegate_ = {
@@ -269,7 +270,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = gpu_delegate->Prepare(context, params);
         if (!status.ok()) {
           context->ReportError(context, "TfLiteGpuDelegate Init: %s",
-                               status.error_message().c_str());
+                               std::string(status.message()).c_str());
           return nullptr;
         }
         return gpu_delegate;
@@ -294,7 +295,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = GetDelegate(node)->Invoke(context);
         if (!status.ok()) {
           context->ReportError(context, "TfLiteGpuDelegate Invoke: %s",
-                               status.error_message().c_str());
+                               std::string(status.message()).c_str());
           return kTfLiteError;
         }
         return kTfLiteOk;
diff --git a/tensorflow/lite/delegates/gpu/gl/api.cc b/tensorflow/lite/delegates/gpu/gl/api.cc
index f9adbf253c1..f50f3458a8f 100644
--- a/tensorflow/lite/delegates/gpu/gl/api.cc
+++ b/tensorflow/lite/delegates/gpu/gl/api.cc
@@ -58,20 +58,20 @@ class InferenceContextImpl : public InferenceContext {
   explicit InferenceContextImpl(std::unique_ptr<Runtime> runtime)
       : runtime_(std::move(runtime)) {}
 
-  Status Execute() final {
+  absl::Status Execute() final {
     std::lock_guard<std::mutex> lock(guard_);
     if (state_ != InferenceContextState::NOT_STARTED) {
-      return FailedPreconditionError("InferenceContext is not reset");
+      return absl::FailedPreconditionError("InferenceContext is not reset");
     }
     state_ = InferenceContextState::IN_PROGRESS;
     return runtime_->Execute();
   }
 
-  Status Reset() final {
+  absl::Status Reset() final {
     std::lock_guard<std::mutex> lock(guard_);
     // TODO(akulik): should Reset not return Status?
     state_ = InferenceContextState::NOT_STARTED;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   RuntimeStats stats() const final { return runtime_->stats(); }
@@ -94,10 +94,10 @@ class InferenceContextWithBatchImpl : public InferenceContext {
         refs_(std::move(refs)),
         runtime_(std::move(runtime)) {}
 
-  Status Execute() final {
+  absl::Status Execute() final {
     std::lock_guard<std::mutex> lock(guard_);
     if (state_ != InferenceContextState::NOT_STARTED) {
-      return FailedPreconditionError("InferenceContext is not reset");
+      return absl::FailedPreconditionError("InferenceContext is not reset");
     }
     state_ = InferenceContextState::IN_PROGRESS;
 
@@ -112,7 +112,7 @@ class InferenceContextWithBatchImpl : public InferenceContext {
       if (!buffer) continue;
 
       if (buffer->bytes_size() % byte_size) {
-        return InvalidArgumentError(absl::StrCat(
+        return absl::InvalidArgumentError(absl::StrCat(
             "Object ", id, " does not match expected byte size: ", byte_size));
       }
 
@@ -120,7 +120,7 @@ class InferenceContextWithBatchImpl : public InferenceContext {
       if (num_batches == 0) {
         num_batches = b;
       } else if (num_batches != b) {
-        return InvalidArgumentError(absl::StrCat(
+        return absl::InvalidArgumentError(absl::StrCat(
             "Object ", id, " size does not match expected batch size: ", b,
             " vs ", num_batches));
       }
@@ -135,7 +135,7 @@ class InferenceContextWithBatchImpl : public InferenceContext {
         if (buffer) {
           auto ref = refs_->FindBuffer(id);
           if (!ref) {
-            return InvalidArgumentError(
+            return absl::InvalidArgumentError(
                 absl::StrCat("Reference to ", id, " is not found"));
           }
           RETURN_IF_ERROR(buffer->MakeView(b * byte_size, byte_size, ref));
@@ -143,14 +143,14 @@ class InferenceContextWithBatchImpl : public InferenceContext {
       }
       RETURN_IF_ERROR(runtime_->Execute());
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Reset() final {
+  absl::Status Reset() final {
     std::lock_guard<std::mutex> lock(guard_);
     state_ = InferenceContextState::NOT_STARTED;
     // TODO(akulik): should Reset not return Status?
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   RuntimeStats stats() const final { return runtime_->stats(); }
@@ -197,8 +197,8 @@ class CompiledModelImpl
   explicit CompiledModelImpl(const GpuInfo& gpu_info) : gpu_info_(gpu_info) {}
 
   // Called while compiling shaders from scratch
-  Status Add(const WorkgroupsCalculator& workgroup_calculator,
-             ShaderCode code) {
+  absl::Status Add(const WorkgroupsCalculator& workgroup_calculator,
+                   ShaderCode code) {
     // Calculate workgroup size.
     uint3 workgroup_size = workgroup_calculator.Calculate(code);
     uint3 num_workgroups = IntegralDivideRoundUp(code.workload, workgroup_size);
@@ -220,13 +220,13 @@ class CompiledModelImpl
         num_workgroups,
         shader_idx,
     });
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   // Store full shader and compile it if necessary.
   // Returns full_shader_index
-  Status AddFullShader(const std::string& partial_shader,
-                       const uint3& workgroup_size, size_t* size) {
+  absl::Status AddFullShader(const std::string& partial_shader,
+                             const uint3& workgroup_size, size_t* size) {
     std::string shader_src = GetShaderHeader(workgroup_size) + partial_shader;
     auto it = shader_to_index_.find(shader_src);
     if (it == shader_to_index_.end()) {
@@ -239,10 +239,10 @@ class CompiledModelImpl
     } else {
       *size = it->second;
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status NewRun(
+  absl::Status NewRun(
       const RuntimeOptions& options, const ObjectManager* objects,
       CommandQueue* command_queue,
       std::unique_ptr<InferenceContext>* inference_context) const final {
@@ -273,15 +273,16 @@ class CompiledModelImpl
       *inference_context =
           absl::make_unique<InferenceContextImpl>(std::move(runtime));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
 #ifndef TFLITE_GPU_BINARY_RELEASE
   // Called on deserialization
-  Status OnProgram(const std::vector<Variable>& parameters,
-                   const std::vector<Object>& objects,
-                   const uint3& workgroup_size, const uint3& num_workgroups,
-                   size_t partial_shader_index) final {
+  absl::Status OnProgram(const std::vector<Variable>& parameters,
+                         const std::vector<Object>& objects,
+                         const uint3& workgroup_size,
+                         const uint3& num_workgroups,
+                         size_t partial_shader_index) final {
     for (auto& object : objects) {
       if (IsRef(object)) {
         object_sizes_[GetRef(object)] = ByteSizeOf(object);
@@ -298,10 +299,10 @@ class CompiledModelImpl
         num_workgroups,
         shader_idx,
     });
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Serialize(
+  absl::Status Serialize(
       std::vector<uint8_t>* serialized_compiled_model) const final {
     SerializedCompiledModelBuilder builder;
 
@@ -338,13 +339,13 @@ class CompiledModelImpl
     auto data = builder.Finalize(options);
     serialized_compiled_model->insert(serialized_compiled_model->end(),
                                       data.begin(), data.end());
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status OnShader(absl::Span<const char> shader_src) final {
+  absl::Status OnShader(absl::Span<const char> shader_src) final {
     std::string source(shader_src.data(), shader_src.size());
     partial_shaders_.push_back(source);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   void OnOptions(const CompiledModelOptions& options) final {
@@ -371,45 +372,48 @@ class CompiledModelImpl
 };
 }  // namespace
 
-Status Compile(const CompilationOptions& options, const GraphFloat32& model,
-               const std::unordered_set<int>& tflite_graph_io,
-               const NodeShader& node_shader,
-               const WorkgroupsCalculator& workgroup_calculator,
-               std::unique_ptr<CompiledModel>* compiled_model) {
+absl::Status Compile(const CompilationOptions& options,
+                     const GraphFloat32& model,
+                     const std::unordered_set<int>& tflite_graph_io,
+                     const NodeShader& node_shader,
+                     const WorkgroupsCalculator& workgroup_calculator,
+                     std::unique_ptr<CompiledModel>* compiled_model) {
   if (!IsBatchMatchesForAllValues(model)) {
-    return InvalidArgumentError("Only identical batch dimension is supported");
+    return absl::InvalidArgumentError(
+        "Only identical batch dimension is supported");
   }
   GpuInfo gpu_info;
   RETURN_IF_ERROR(RequestGpuInfo(&gpu_info));
   if (!IsOpenGl31OrAbove(gpu_info)) {
-    return InternalError(
+    return absl::InternalError(
         "OpenGL ES 3.1 or above is required to use OpenGL inference.");
   }
   auto compiled_model_impl = absl::make_unique<CompiledModelImpl>(gpu_info);
   compiled_model_impl->set_dynamic_batch(options.dynamic_batch);
   auto compiler = NewCompiler(&node_shader, &gpu_info, options);
-  RETURN_IF_ERROR(
-      compiler->Compile(model, tflite_graph_io, [&](ShaderCode code) -> Status {
+  RETURN_IF_ERROR(compiler->Compile(
+      model, tflite_graph_io, [&](ShaderCode code) -> absl::Status {
         return compiled_model_impl->Add(workgroup_calculator, std::move(code));
       }));
   *compiled_model = std::move(compiled_model_impl);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 #ifndef TFLITE_GPU_BINARY_RELEASE
-Status ReadSerializedModel(const std::vector<uint8_t>& serialized_model,
-                           std::unique_ptr<CompiledModel>* compiled_model) {
+absl::Status ReadSerializedModel(
+    const std::vector<uint8_t>& serialized_model,
+    std::unique_ptr<CompiledModel>* compiled_model) {
   GpuInfo gpu_info;
   RETURN_IF_ERROR(RequestGpuInfo(&gpu_info));
   if (!IsOpenGl31OrAbove(gpu_info)) {
-    return InternalError(
+    return absl::InternalError(
         "OpenGL ES 3.1 or above is required to use OpenGL inference.");
   }
   auto compiled_model_impl = absl::make_unique<CompiledModelImpl>(gpu_info);
   RETURN_IF_ERROR(DeserializeCompiledModel(
       absl::MakeConstSpan(serialized_model), compiled_model_impl.get()));
   *compiled_model = std::move(compiled_model_impl);
-  return OkStatus();
+  return absl::OkStatus();
 }
 #endif  // TFLITE_GPU_BINARY_RELEASE
 
diff --git a/tensorflow/lite/delegates/gpu/gl/api.h b/tensorflow/lite/delegates/gpu/gl/api.h
index 78b277852d0..c37eb9b7772 100644
--- a/tensorflow/lite/delegates/gpu/gl/api.h
+++ b/tensorflow/lite/delegates/gpu/gl/api.h
@@ -51,7 +51,7 @@ class CompiledModel {
   //
   // NewRun call as well as subsequent calls to InferenceContext methods should
   // be done from the same EGL context.
-  virtual Status NewRun(
+  virtual absl::Status NewRun(
       const RuntimeOptions& options, const ObjectManager* objects,
       CommandQueue* command_queue,
       std::unique_ptr<InferenceContext>* inference_context) const = 0;
@@ -59,23 +59,25 @@ class CompiledModel {
 #ifndef TFLITE_GPU_BINARY_RELEASE
   // Serializes compiled model to a string.
   // @return true if serialization finished successfully.
-  virtual Status Serialize(
+  virtual absl::Status Serialize(
       std::vector<uint8_t>* serialized_compiled_model) const = 0;
 #endif  // TFLITE_GPU_BINARY_RELEASE
 };
 
 // Turns the given model into "compiled" form that is suitable for inference.
-Status Compile(const CompilationOptions& options, const GraphFloat32& model,
-               const std::unordered_set<int>& tflite_graph_io,
-               const NodeShader& node_shader,
-               const WorkgroupsCalculator& workgroup_calculator,
-               std::unique_ptr<CompiledModel>* compiled_model);
+absl::Status Compile(const CompilationOptions& options,
+                     const GraphFloat32& model,
+                     const std::unordered_set<int>& tflite_graph_io,
+                     const NodeShader& node_shader,
+                     const WorkgroupsCalculator& workgroup_calculator,
+                     std::unique_ptr<CompiledModel>* compiled_model);
 
 #ifndef TFLITE_GPU_BINARY_RELEASE
 // Reads serialized representation previously created with
 // CompiledModel::Serialize call.
-Status ReadSerializedModel(const std::vector<uint8_t>& serialized_model,
-                           std::unique_ptr<CompiledModel>* compiled_model);
+absl::Status ReadSerializedModel(
+    const std::vector<uint8_t>& serialized_model,
+    std::unique_ptr<CompiledModel>* compiled_model);
 #endif  // TFLITE_GPU_BINARY_RELEASE
 
 // Encapsulates everything needed for one or more inference executions done
@@ -89,13 +91,13 @@ class InferenceContext {
   virtual RuntimeStats stats() const = 0;
 
   // Executes inference.
-  virtual Status Execute() = 0;
+  virtual absl::Status Execute() = 0;
 
   // Asks context to reset it for another round. Keep in mind that does not
   // affect inputs nor outputs which are not cleared, so it is possible to
   // re-use them.
   // It is an error to call Reset while previous run is still in progress.
-  virtual Status Reset() = 0;
+  virtual absl::Status Reset() = 0;
 };
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/api2.cc b/tensorflow/lite/delegates/gpu/gl/api2.cc
index 68bfa42411f..64e301338e1 100644
--- a/tensorflow/lite/delegates/gpu/gl/api2.cc
+++ b/tensorflow/lite/delegates/gpu/gl/api2.cc
@@ -50,16 +50,16 @@ std::string GetShaderHeader(uint3 localsize) {
 }
 
 // Wraps given SSBO into GlBuffer object that does not have ownership.
-Status WrapSSBO(OpenGlBuffer ssbo, GlBuffer* buffer) {
+absl::Status WrapSSBO(OpenGlBuffer ssbo, GlBuffer* buffer) {
   int64_t size_bytes;
   RETURN_IF_ERROR(GetSSBOSize(ssbo.id, &size_bytes));
   *buffer = GlBuffer(GL_SHADER_STORAGE_BUFFER, ssbo.id, size_bytes, 0, false);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status MaybeAllocateGlBuffer(const TensorObjectDef& def, GlBuffer* ssbo) {
+absl::Status MaybeAllocateGlBuffer(const TensorObjectDef& def, GlBuffer* ssbo) {
   if (def.object_def.object_type != gpu::ObjectType::OPENGL_SSBO) {
-    return InvalidArgumentError("Tensor object is not GL SSBO");
+    return absl::InvalidArgumentError("Tensor object is not GL SSBO");
   }
   const uint32_t num_elements = NumElements(def);
   switch (def.object_def.data_type) {
@@ -68,10 +68,10 @@ Status MaybeAllocateGlBuffer(const TensorObjectDef& def, GlBuffer* ssbo) {
     case DataType::FLOAT16:
       return CreateReadWriteShaderStorageBuffer<uint16_t>(num_elements, ssbo);
     default:
-      return InternalError(
+      return absl::InternalError(
           "Unable to create new GL SSBO. Unsupported data type.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // Does one-step conversion between internal and external objects.
@@ -89,58 +89,59 @@ class DefaultTensorTie : public TensorTie {
            converter_builder.IsSupported(def.external_def, def.internal_def);
   }
 
-  static Status New(const TensorTieDef& def,
-                    TensorObjectConverterBuilder* converter_builder,
-                    ObjectManager* objects, std::unique_ptr<TensorTie>* tie) {
+  static absl::Status New(const TensorTieDef& def,
+                          TensorObjectConverterBuilder* converter_builder,
+                          ObjectManager* objects,
+                          std::unique_ptr<TensorTie>* tie) {
     auto tie_impl =
         absl::make_unique<DefaultTensorTie>(def, TensorObject{}, objects);
     RETURN_IF_ERROR(tie_impl->Init(converter_builder));
     *tie = std::move(tie_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  static Status New(const TensorTieDef& def,
-                    TensorObjectConverterBuilder* converter_builder,
-                    TensorObject internal_object,
-                    std::unique_ptr<TensorTie>* tie) {
+  static absl::Status New(const TensorTieDef& def,
+                          TensorObjectConverterBuilder* converter_builder,
+                          TensorObject internal_object,
+                          std::unique_ptr<TensorTie>* tie) {
     if (!IsValid(def.internal_def, internal_object)) {
-      return InternalError("Internal object does not match definition.");
+      return absl::InternalError("Internal object does not match definition.");
     }
 
     auto tie_impl =
         absl::make_unique<DefaultTensorTie>(def, internal_object, nullptr);
     RETURN_IF_ERROR(tie_impl->Init(converter_builder));
     *tie = std::move(tie_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status CopyToExternalObject() final {
+  absl::Status CopyToExternalObject() final {
     if (!converter_to_) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     return converter_to_->Convert(internal_obj_, GetExternalObject());
   }
 
-  Status CopyFromExternalObject() final {
+  absl::Status CopyFromExternalObject() final {
     if (!converter_from_) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     return converter_from_->Convert(GetExternalObject(), internal_obj_);
   }
 
-  Status SetExternalObject(TensorObject obj) final {
+  absl::Status SetExternalObject(TensorObject obj) final {
     if (!def().external_def.object_def.user_provided) {
-      return InvalidArgumentError("External object is read-only");
+      return absl::InvalidArgumentError("External object is read-only");
     }
     if (!IsValid(def().external_def, obj)) {
-      return InvalidArgumentError("Given object is not valid");
+      return absl::InvalidArgumentError("Given object is not valid");
     }
     // TODO(akulik): external object should propagate to internal.
     if (IsSameDef()) {
-      return UnimplementedError("Not supported");
+      return absl::UnimplementedError("Not supported");
     }
     external_obj_ = obj;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TensorObject GetExternalObject() final { return external_obj_; }
@@ -159,7 +160,8 @@ class DefaultTensorTie : public TensorTie {
             internal_def.data_layout == DataLayout::DHWC4 &&
             def().external_def.dimensions.c == 4);
   }
-  Status Init(TensorObjectConverterBuilder* converter_builder) {
+
+  absl::Status Init(TensorObjectConverterBuilder* converter_builder) {
     // First check is an object is user provided.
     const auto& external_def = def().external_def.object_def;
 
@@ -174,7 +176,7 @@ class DefaultTensorTie : public TensorTie {
 
     if (external_def.user_provided) {
       if (is_same_def) {
-        return OkStatus();
+        return absl::OkStatus();
       }
       // Object is provided by a user, but runtime expects different object
       // type. Therefore, we have to allocate internal object and convert.
@@ -186,19 +188,19 @@ class DefaultTensorTie : public TensorTie {
         // Object is NOT provided by a user, but it matches definition expected
         // by runtime. Conversion is not needed.
         external_obj_ = internal_obj_;
-        return OkStatus();
+        return absl::OkStatus();
       }
 
       // Object is NOT provided by a user.
       return MaybeAllocateExternalObject();
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status MaybeAllocateInternalObject() {
+  absl::Status MaybeAllocateInternalObject() {
     const TensorObjectDef& d = def().internal_def;
     if (d.object_def.user_provided) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     switch (d.object_def.object_type) {
       case gpu::ObjectType::OPENGL_SSBO: {
@@ -210,12 +212,12 @@ class DefaultTensorTie : public TensorTie {
       }
       // TODO(akulik): support textures as internal object when compiler permits
       default:
-        return InternalError("Unexpected object type");
+        return absl::InternalError("Unexpected object type");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status MaybeAllocateExternalObject() {
+  absl::Status MaybeAllocateExternalObject() {
     const TensorObjectDef& d = def().external_def;
     switch (d.object_def.object_type) {
       case gpu::ObjectType::CPU_MEMORY: {
@@ -232,9 +234,9 @@ class DefaultTensorTie : public TensorTie {
         break;
       }
       default:
-        return InternalError("Unexpected object type");
+        return absl::InternalError("Unexpected object type");
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   ObjectManager* objects_;
@@ -266,26 +268,27 @@ class TwoStepTensorTie : public TensorTie {
            DefaultTensorTie::IsSupported(defs.second, converter_builder);
   }
 
-  static Status New(const TensorTieDef& def,
-                    TensorObjectConverterBuilder* converter_builder,
-                    ObjectManager* objects, std::unique_ptr<TensorTie>* tie) {
+  static absl::Status New(const TensorTieDef& def,
+                          TensorObjectConverterBuilder* converter_builder,
+                          ObjectManager* objects,
+                          std::unique_ptr<TensorTie>* tie) {
     auto tie_impl = absl::make_unique<TwoStepTensorTie>(def);
     RETURN_IF_ERROR(tie_impl->Init(converter_builder, objects));
     *tie = std::move(tie_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status CopyToExternalObject() final {
+  absl::Status CopyToExternalObject() final {
     RETURN_IF_ERROR(inner_tie_->CopyToExternalObject());
     return outer_tie_->CopyToExternalObject();
   }
 
-  Status CopyFromExternalObject() final {
+  absl::Status CopyFromExternalObject() final {
     RETURN_IF_ERROR(outer_tie_->CopyFromExternalObject());
     return inner_tie_->CopyFromExternalObject();
   }
 
-  Status SetExternalObject(TensorObject obj) final {
+  absl::Status SetExternalObject(TensorObject obj) final {
     return outer_tie_->SetExternalObject(obj);
   }
 
@@ -321,8 +324,8 @@ class TwoStepTensorTie : public TensorTie {
     return std::make_pair(outer_def, inner_def);
   }
 
-  Status Init(TensorObjectConverterBuilder* converter_builder,
-              ObjectManager* objects) {
+  absl::Status Init(TensorObjectConverterBuilder* converter_builder,
+                    ObjectManager* objects) {
     auto defs = MakeOuterInnerDefs(def());
     RETURN_IF_ERROR(DefaultTensorTie::New(defs.second, converter_builder,
                                           objects, &inner_tie_));
@@ -346,8 +349,8 @@ class TensorTieFactory {
             TwoStepTensorTie::IsSupported(def, *converter_builder_));
   }
 
-  Status NewTensorTie(const TensorTieDef& def, ObjectManager* objects,
-                      std::unique_ptr<TensorTie>* tie) {
+  absl::Status NewTensorTie(const TensorTieDef& def, ObjectManager* objects,
+                            std::unique_ptr<TensorTie>* tie) {
     auto converter = converter_builder_.get();
     if (DefaultTensorTie::IsSupported(def, *converter)) {
       return DefaultTensorTie::New(def, converter, objects, tie);
@@ -355,7 +358,7 @@ class TensorTieFactory {
     if (TwoStepTensorTie::IsSupported(def, *converter)) {
       return TwoStepTensorTie::New(def, converter, objects, tie);
     }
-    return UnimplementedError("Unsupported tensor tie definition.");
+    return absl::UnimplementedError("Unsupported tensor tie definition.");
   }
 
  private:
@@ -368,16 +371,16 @@ class InferenceRunnerImpl : public InferenceRunner {
                       std::unique_ptr<ObjectManager> objects)
       : runtime_(std::move(runtime)), objects_(std::move(objects)) {}
 
-  Status Initialize(const std::vector<TensorTieDef>& inputs,
-                    const std::vector<TensorTieDef>& outputs,
-                    TensorTieFactory* tie_factory) {
+  absl::Status Initialize(const std::vector<TensorTieDef>& inputs,
+                          const std::vector<TensorTieDef>& outputs,
+                          TensorTieFactory* tie_factory) {
     RETURN_IF_ERROR(LinkTensors(inputs, tie_factory, &inputs_));
     RETURN_IF_ERROR(LinkTensors(outputs, tie_factory, &outputs_));
     for (const auto& def : outputs) {
       output_to_cpu_ |= def.external_def.object_def.object_type ==
                         gpu::ObjectType::CPU_MEMORY;
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   std::vector<TensorObjectDef> inputs() const override {
@@ -388,37 +391,37 @@ class InferenceRunnerImpl : public InferenceRunner {
     return GetExternalDefinitions(outputs_);
   }
 
-  Status GetInputObject(int index, TensorObject* object) override {
+  absl::Status GetInputObject(int index, TensorObject* object) override {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     *object = inputs_[index]->GetExternalObject();
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status GetOutputObject(int index, TensorObject* object) override {
+  absl::Status GetOutputObject(int index, TensorObject* object) override {
     if (index < 0 || index >= outputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     *object = outputs_[index]->GetExternalObject();
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status SetInputObject(int index, TensorObject object) override {
+  absl::Status SetInputObject(int index, TensorObject object) override {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     return inputs_[index]->SetExternalObject(object);
   }
 
-  Status SetOutputObject(int index, TensorObject object) override {
+  absl::Status SetOutputObject(int index, TensorObject object) override {
     if (index < 0 || index >= outputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     return outputs_[index]->SetExternalObject(object);
   }
 
-  Status Run() override {
+  absl::Status Run() override {
     for (auto& obj : inputs_) {
       RETURN_IF_ERROR(obj->CopyFromExternalObject());
     }
@@ -430,20 +433,20 @@ class InferenceRunnerImpl : public InferenceRunner {
     if (output_to_cpu_) {
       RETURN_IF_ERROR(runtime_->command_queue()->WaitForCompletion());
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
-  Status LinkTensors(const std::vector<TensorTieDef>& defs,
-                     TensorTieFactory* tie_factory,
-                     std::vector<std::unique_ptr<TensorTie>>* objects) {
+  absl::Status LinkTensors(const std::vector<TensorTieDef>& defs,
+                           TensorTieFactory* tie_factory,
+                           std::vector<std::unique_ptr<TensorTie>>* objects) {
     objects->reserve(defs.size());
     for (auto& def : defs) {
       std::unique_ptr<TensorTie> object;
       RETURN_IF_ERROR(tie_factory->NewTensorTie(def, objects_.get(), &object));
       objects->push_back(std::move(object));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   static std::vector<TensorObjectDef> GetExternalDefinitions(
@@ -474,10 +477,10 @@ class InferenceBuilderImpl : public InferenceBuilder {
         gpu_info_(gpu_info),
         tie_factory_(env_options_) {}
 
-  Status Initialize() {
+  absl::Status Initialize() {
     inputs_ = LinkTensors(graph_.inputs());
     outputs_ = LinkTensors(graph_.outputs());
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   std::vector<TensorObjectDef> inputs() const final {
@@ -488,40 +491,42 @@ class InferenceBuilderImpl : public InferenceBuilder {
     return GetExternalDefinitions(outputs_);
   }
 
-  Status SetInputShape(int index, const Dimensions& dimensions) final {
+  absl::Status SetInputShape(int index, const Dimensions& dimensions) final {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
-    return UnimplementedError("Changing input shapes is not supported");
+    return absl::UnimplementedError("Changing input shapes is not supported");
   }
 
-  Status SetInputObjectDef(int index, ObjectDef new_def) final {
+  absl::Status SetInputObjectDef(int index, ObjectDef new_def) final {
     if (index < 0 || index >= inputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     auto def = inputs_[index];
     def.external_def.object_def = new_def;
     if (!tie_factory_.IsSupported(def)) {
-      return InvalidArgumentError("New object definition is not supported.");
+      return absl::InvalidArgumentError(
+          "New object definition is not supported.");
     }
     inputs_[index] = def;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status SetOutputObjectDef(int index, ObjectDef new_def) final {
+  absl::Status SetOutputObjectDef(int index, ObjectDef new_def) final {
     if (index < 0 || index >= outputs_.size()) {
-      return OutOfRangeError("Index is out of range");
+      return absl::OutOfRangeError("Index is out of range");
     }
     auto def = outputs_[index];
     def.external_def.object_def = new_def;
     if (!tie_factory_.IsSupported(def)) {
-      return InvalidArgumentError("New object definition is not supported.");
+      return absl::InvalidArgumentError(
+          "New object definition is not supported.");
     }
     outputs_[index] = def;
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Build(std::unique_ptr<InferenceRunner>* runner) final {
+  absl::Status Build(std::unique_ptr<InferenceRunner>* runner) final {
     auto kernels = NewNodeShaderRegistry();
     CompilationOptions compiler_options;
     compiler_options.allow_precision_loss =
@@ -551,7 +556,7 @@ class InferenceBuilderImpl : public InferenceBuilder {
         std::move(runtime), std::move(external_objects));
     RETURN_IF_ERROR(runner_impl->Initialize(inputs_, outputs_, &tie_factory_));
     RETURN_IF_ERROR(
-        compiler->Compile(graph_, {}, [&](ShaderCode code) -> Status {
+        compiler->Compile(graph_, {}, [&](ShaderCode code) -> absl::Status {
           auto workgroup = workgroup_calculator->Calculate(code);
           size_t shader_index;
           std::string shader_src =
@@ -574,7 +579,7 @@ class InferenceBuilderImpl : public InferenceBuilder {
         }));
     RETURN_IF_ERROR(runtime_ptr->PrepareForExecution());
     *runner = std::move(runner_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -624,39 +629,39 @@ class InferenceEnvironmentImpl : public InferenceEnvironment {
   explicit InferenceEnvironmentImpl(const InferenceEnvironmentOptions& options)
       : env_options_(options) {}
 
-  Status Init() {
+  absl::Status Init() {
     RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&egl_env_));
 
     RETURN_IF_ERROR(RequestGpuInfo(&gpu_info_));
     properties_.is_opengl_available = IsOpenGl31OrAbove(gpu_info_);
     if (!properties_.is_opengl_available) {
-      return InternalError(
+      return absl::InternalError(
           "OpenGL ES 3.1 or above is required to use OpenGL inference.");
     }
     if (!env_options_.queue) {
       queue_ = NewCommandQueue(gpu_info_);
       env_options_.queue = queue_.get();
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status NewInferenceBuilder(GraphFloat32&& model,
-                             const InferenceOptions& options,
-                             std::unique_ptr<InferenceBuilder>* builder) final {
+  absl::Status NewInferenceBuilder(
+      GraphFloat32&& model, const InferenceOptions& options,
+      std::unique_ptr<InferenceBuilder>* builder) final {
     if (!IsValid(options)) {
-      return InvalidArgumentError("InferenceOptions are invalid.");
+      return absl::InvalidArgumentError("InferenceOptions are invalid.");
     }
     InferenceOptions resolved_options = options;
     ResolveAutoPriority(&resolved_options);
     if (!IsBatchMatchesForAllValues(model)) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Only identical batch dimension is supported");
     }
     auto builder_impl = absl::make_unique<InferenceBuilderImpl>(
         env_options_, resolved_options, std::move(model), &gpu_info_);
     RETURN_IF_ERROR(builder_impl->Initialize());
     *builder = std::move(builder_impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   const InferenceEnvironmentProperties& properties() const {
@@ -673,18 +678,18 @@ class InferenceEnvironmentImpl : public InferenceEnvironment {
 
 }  // namespace
 
-Status NewInferenceEnvironment(
+absl::Status NewInferenceEnvironment(
     const InferenceEnvironmentOptions& options,
     std::unique_ptr<InferenceEnvironment>* environment,
     InferenceEnvironmentProperties* properties) {
   auto env_impl = absl::make_unique<InferenceEnvironmentImpl>(options);
-  Status status = env_impl->Init();
+  absl::Status status = env_impl->Init();
   if (properties) {
     *properties = env_impl->properties();
   }
   RETURN_IF_ERROR(status);
   *environment = std::move(env_impl);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/api2.h b/tensorflow/lite/delegates/gpu/gl/api2.h
index ac58fef0ffa..05062064dd6 100644
--- a/tensorflow/lite/delegates/gpu/gl/api2.h
+++ b/tensorflow/lite/delegates/gpu/gl/api2.h
@@ -41,7 +41,7 @@ class InferenceEnvironment {
  public:
   virtual ~InferenceEnvironment() = default;
 
-  virtual Status NewInferenceBuilder(
+  virtual absl::Status NewInferenceBuilder(
       GraphFloat32&& model, const InferenceOptions& options,
       std::unique_ptr<InferenceBuilder>* builder) = 0;
 };
@@ -52,7 +52,7 @@ struct InferenceEnvironmentOptions {
 
 // Creates a new OpenGL environment that needs to stay around until all
 // inference runners are destroyed.
-Status NewInferenceEnvironment(
+absl::Status NewInferenceEnvironment(
     const InferenceEnvironmentOptions& options,
     std::unique_ptr<InferenceEnvironment>* environment,
     InferenceEnvironmentProperties* properties /* optional */);
diff --git a/tensorflow/lite/delegates/gpu/gl/command_queue.cc b/tensorflow/lite/delegates/gpu/gl/command_queue.cc
index 87823761127..8500a50859c 100644
--- a/tensorflow/lite/delegates/gpu/gl/command_queue.cc
+++ b/tensorflow/lite/delegates/gpu/gl/command_queue.cc
@@ -30,17 +30,18 @@ namespace {
 
 class DefaultCommandQueue : public CommandQueue {
  public:
-  Status Dispatch(const GlProgram& program, const uint3& workgroups) override {
+  absl::Status Dispatch(const GlProgram& program,
+                        const uint3& workgroups) override {
     RETURN_IF_ERROR(program.Dispatch(workgroups));
     return TFLITE_GPU_CALL_GL(glMemoryBarrier, GL_ALL_BARRIER_BITS);
   }
 
-  Status WaitForCompletion() override {
+  absl::Status WaitForCompletion() override {
     // TODO(akulik): Maybe let the user choose which wait method to use.
     return GlActiveSyncWait();
   }
 
-  Status Flush() override { return OkStatus(); }
+  absl::Status Flush() override { return absl::OkStatus(); }
 };
 
 // On Adreno do flush periodically as this affects performance. Command queue
@@ -54,26 +55,27 @@ class AdrenoCommandQueue : public DefaultCommandQueue {
   explicit AdrenoCommandQueue(int flush_every_n)
       : flush_every_n_(flush_every_n) {}
 
-  Status Dispatch(const GlProgram& program, const uint3& workgroups) final {
+  absl::Status Dispatch(const GlProgram& program,
+                        const uint3& workgroups) final {
     RETURN_IF_ERROR(DefaultCommandQueue::Dispatch(program, workgroups));
     if ((++program_counter_ % flush_every_n_) == 0) {
       glFlush();
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status WaitForCompletion() override {
+  absl::Status WaitForCompletion() override {
     program_counter_ = 0;
     return DefaultCommandQueue::WaitForCompletion();
   }
 
-  Status Flush() final {
+  absl::Status Flush() final {
     // Flush exactly once after the last dispatch.
     if (program_counter_ != 0) {
       program_counter_ = 0;
       glFlush();
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
diff --git a/tensorflow/lite/delegates/gpu/gl/command_queue.h b/tensorflow/lite/delegates/gpu/gl/command_queue.h
index 6695852fc86..d9bff04a837 100644
--- a/tensorflow/lite/delegates/gpu/gl/command_queue.h
+++ b/tensorflow/lite/delegates/gpu/gl/command_queue.h
@@ -35,14 +35,14 @@ class CommandQueue {
   virtual ~CommandQueue() = default;
 
   // Dispatches a program. It may or may not call glFlush.
-  virtual Status Dispatch(const GlProgram& program,
-                          const uint3& workgroups) = 0;
+  virtual absl::Status Dispatch(const GlProgram& program,
+                                const uint3& workgroups) = 0;
 
   // Called at the end of dispatching of all programs.
-  virtual Status Flush() = 0;
+  virtual absl::Status Flush() = 0;
 
   // Waits until all programs dispatched prior this call are completed.
-  virtual Status WaitForCompletion() = 0;
+  virtual absl::Status WaitForCompletion() = 0;
 };
 
 // By default memory barrier is inserted after every dispatch.
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler.cc b/tensorflow/lite/delegates/gpu/gl/compiler.cc
index cef8139fe1e..a5f5b35f2d2 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler.cc
+++ b/tensorflow/lite/delegates/gpu/gl/compiler.cc
@@ -102,9 +102,9 @@ class CompilerImpl : public Compiler {
     }
   }
 
-  Status Compile(const GraphFloat32& graph,
-                 const std::unordered_set<int>& tflite_graph_io,
-                 const ShaderCodeCallback& callback) final {
+  absl::Status Compile(const GraphFloat32& graph,
+                       const std::unordered_set<int>& tflite_graph_io,
+                       const ShaderCodeCallback& callback) final {
     // It is important to have ids in a compiled graph identical to the given
     // graph.
     RETURN_IF_ERROR(graph.MakeExactCopy(&compiled_graph_));
@@ -129,22 +129,22 @@ class CompilerImpl : public Compiler {
     if (options_.fuse_operations) {
       FuseAutoOutputWithInline fuse_inline;
       if (!transformer.Apply("fuse_auto_with_inline", &fuse_inline)) {
-        return InternalError("fuse_auto_with_inline failed");
+        return absl::InternalError("fuse_auto_with_inline failed");
       }
       FuseInplaceUpdate fuse_inplace;
       if (!transformer.Apply("fuse_inplace_update", &fuse_inplace)) {
-        return InternalError("fuse_inplace failed");
+        return absl::InternalError("fuse_inplace failed");
       }
       if (options_.auto_input_fusion) {
         FuseAutoInput fuse_auto_input;
         if (!transformer.Apply("fuse_auto_input", &fuse_auto_input)) {
-          return InternalError("fuse_auto_input failed");
+          return absl::InternalError("fuse_auto_input failed");
         }
       }
     }
     RemoveUnusedInplaceUpdates remove_inplace_updates;
     if (!transformer.Apply("remove_inplace_updates", &remove_inplace_updates)) {
-      return InternalError("remove_inplace_updates failed");
+      return absl::InternalError("remove_inplace_updates failed");
     }
 
     // Prepare internal objects.
@@ -176,7 +176,7 @@ class CompilerImpl : public Compiler {
         auto shape = outputs[0]->tensor.shape;
         for (auto output : outputs) {
           if (shape != output->tensor.shape) {
-            return FailedPreconditionError(
+            return absl::FailedPreconditionError(
                 "Workload uint3() requires all output sizes to match");
           }
         }
@@ -274,7 +274,7 @@ class CompilerImpl : public Compiler {
       RETURN_IF_ERROR(codegen.Build(std::move(attr), &shader_code));
       RETURN_IF_ERROR(callback(std::move(shader_code)));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler.h b/tensorflow/lite/delegates/gpu/gl/compiler.h
index e8b434869e2..7769890b769 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler.h
+++ b/tensorflow/lite/delegates/gpu/gl/compiler.h
@@ -31,7 +31,7 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-using ShaderCodeCallback = std::function<Status(ShaderCode code)>;
+using ShaderCodeCallback = std::function<absl::Status(ShaderCode code)>;
 
 class Compiler {
  public:
@@ -40,9 +40,9 @@ class Compiler {
   // Goes over a graph and generates OpenGL shaders for the given graph.
   // Callback is called for every generated shader. Callback may execute shaders
   // as they come or store them elsewhere to execute later.
-  virtual Status Compile(const GraphFloat32& graph,
-                         const std::unordered_set<int>& tflite_graph_io,
-                         const ShaderCodeCallback& callback) = 0;
+  virtual absl::Status Compile(const GraphFloat32& graph,
+                               const std::unordered_set<int>& tflite_graph_io,
+                               const ShaderCodeCallback& callback) = 0;
 };
 
 std::unique_ptr<Compiler> NewCompiler(
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.cc b/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.cc
index 923b0bd47ec..4048a07d087 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.cc
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.cc
@@ -25,8 +25,8 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-Status MergeCode(CompiledNodeAttributes* attr,
-                 CompiledNodeAttributes* merged_attr) {
+absl::Status MergeCode(CompiledNodeAttributes* attr,
+                       CompiledNodeAttributes* merged_attr) {
   // build a map of known names.
   std::unordered_set<std::string> known_names;
   for (const auto& parameter : merged_attr->code.parameters) {
@@ -56,7 +56,7 @@ Status MergeCode(CompiledNodeAttributes* attr,
             std::back_inserter(merged_attr->code.parameters));
   std::move(attr->node_indices.begin(), attr->node_indices.end(),
             std::back_inserter(merged_attr->node_indices));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h b/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h
index d41a734f4e2..8d36504d0c3 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h
@@ -42,8 +42,8 @@ struct CompiledNodeAttributes {
 // Moves all code objects, parameters and node indices from attr to merged_attr.
 // Parameters and objects in attr.code.source_code are renamed to ensure
 // uniqueness.
-Status MergeCode(CompiledNodeAttributes* attr,
-                 CompiledNodeAttributes* merged_attr);
+absl::Status MergeCode(CompiledNodeAttributes* attr,
+                       CompiledNodeAttributes* merged_attr);
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.cc b/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.cc
index 01ea764b0b0..55e6d94eb7d 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.cc
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.cc
@@ -46,8 +46,8 @@ absl::string_view PastSubstr(absl::string_view s, absl::string_view subs) {
 
 }  // namespace
 
-Status TextPreprocessor::Rewrite(const std::string& input,
-                                 std::string* output) {
+absl::Status TextPreprocessor::Rewrite(const std::string& input,
+                                       std::string* output) {
   absl::string_view s = input;
   std::string result;
   while (true) {
@@ -57,7 +57,7 @@ Status TextPreprocessor::Rewrite(const std::string& input,
       break;
     }
     if (inline_block.size() == 1) {
-      return NotFoundError("Unable to find end of inline block");
+      return absl::NotFoundError("Unable to find end of inline block");
     }
     s = PastSubstr(s, inline_block);
     bool processed = false;
@@ -74,20 +74,20 @@ Status TextPreprocessor::Rewrite(const std::string& input,
           processed = true;
           break;
         case RewriteStatus::ERROR:
-          return InternalError(absl::StrCat("Error while rewriting '",
-                                            inline_block, "': ", result));
+          return absl::InternalError(absl::StrCat("Error while rewriting '",
+                                                  inline_block, "': ", result));
       }
     }
     if (!processed) {
       if (!keep_unknown_rewrites_) {
-        return NotFoundError(absl::StrCat("Didn't find inline rewrite for '",
-                                          inline_block, "'"));
+        return absl::NotFoundError(absl::StrCat(
+            "Didn't find inline rewrite for '", inline_block, "'"));
       }
       absl::StrAppend(&result, inline_block);
     }
   }
   *output = std::move(result);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h b/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h
index f01698e784f..29fad004d3c 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h
@@ -58,7 +58,7 @@ class TextPreprocessor {
   }
 
   // input and output may point to the same object.
-  Status Rewrite(const std::string& input, std::string* output);
+  absl::Status Rewrite(const std::string& input, std::string* output);
 
  private:
   const char inline_delimiter_;
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/rename.cc b/tensorflow/lite/delegates/gpu/gl/compiler/rename.cc
index 674002b74b2..956f6afae28 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/rename.cc
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/rename.cc
@@ -174,17 +174,17 @@ class ObjectRewriter : public InlineRewrite {
 
 }  // namespace
 
-Status Rename(const NameFunctor& name_func, GeneratedCode* code) {
+absl::Status Rename(const NameFunctor& name_func, GeneratedCode* code) {
   VariableRewriter variable_rewriter("$", name_func);
   ObjectRewriter object_rewriter("$", name_func);
   for (auto&& uniform_parameter : code->parameters) {
     if (!variable_rewriter.AddVariable(std::move(uniform_parameter))) {
-      return InternalError("Variable name already exists");
+      return absl::InternalError("Variable name already exists");
     }
   }
   for (auto&& object : code->objects) {
     if (!object_rewriter.AddObject(object.first, std::move(object.second))) {
-      return InternalError("Object name already exists");
+      return absl::InternalError("Object name already exists");
     }
   }
   TextPreprocessor preprocessor('$', /*keep_unknown_rewrites=*/true);
@@ -195,7 +195,7 @@ Status Rename(const NameFunctor& name_func, GeneratedCode* code) {
   code->source_code = source_code;
   code->parameters = variable_rewriter.GetUniformParameters();
   code->objects = object_rewriter.GetObjects();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/rename.h b/tensorflow/lite/delegates/gpu/gl/compiler/rename.h
index 06921dbe3da..e38ade1a3b9 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/rename.h
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/rename.h
@@ -32,7 +32,7 @@ using NameFunctor = std::function<std::string(absl::string_view name)>;
 
 // Rewrites source code, objects and parameters with the new names supplied
 // by the given functor.
-Status Rename(const NameFunctor& name_func, GeneratedCode* code);
+absl::Status Rename(const NameFunctor& name_func, GeneratedCode* code);
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.cc b/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.cc
index e6100919097..e473f9e77ff 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.cc
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.cc
@@ -32,8 +32,8 @@ ShaderCodegen::ShaderCodegen(const CompilationOptions& options,
                              const GpuInfo& gpu_info)
     : options_(options), gpu_type_(gpu_info.type) {}
 
-Status ShaderCodegen::Build(CompiledNodeAttributes attr,
-                            ShaderCode* shader_code) const {
+absl::Status ShaderCodegen::Build(CompiledNodeAttributes attr,
+                                  ShaderCode* shader_code) const {
   VariableAccessor variable_accessor(options_.inline_parameters,
                                      options_.vulkan_support);
   ObjectAccessor object_accessor(gpu_type_ == GpuType::MALI,
@@ -41,18 +41,18 @@ Status ShaderCodegen::Build(CompiledNodeAttributes attr,
 
   const auto add_object = [&](const std::string& name, Object&& object) {
     if (!object_accessor.AddObject(name, std::forward<Object>(object))) {
-      return AlreadyExistsError(absl::StrCat("Object \"", name, "\""));
+      return absl::AlreadyExistsError(absl::StrCat("Object \"", name, "\""));
     }
-    return OkStatus();
+    return absl::OkStatus();
   };
 
   const auto add_uniform_parameter = [&](Variable&& variable) {
     const std::string name = variable.name;
     if (!variable_accessor.AddUniformParameter(std::move(variable))) {
-      return AlreadyExistsError(
+      return absl::AlreadyExistsError(
           absl::StrCat("Uniform parameter \"", name, "\""));
     }
-    return OkStatus();
+    return absl::OkStatus();
   };
 
   for (auto&& object : attr.code.objects) {
@@ -62,7 +62,8 @@ Status ShaderCodegen::Build(CompiledNodeAttributes attr,
   for (auto&& variable : attr.code.shared_variables) {
     const std::string name = variable.name;
     if (!variable_accessor.AddSharedVariable(std::move(variable))) {
-      return AlreadyExistsError(absl::StrCat("Shared variable \"", name, "\""));
+      return absl::AlreadyExistsError(
+          absl::StrCat("Shared variable \"", name, "\""));
     }
   }
 
@@ -169,7 +170,7 @@ Status ShaderCodegen::Build(CompiledNodeAttributes attr,
       ShaderCode(variable_accessor.GetUniformParameters(),
                  object_accessor.GetObjects(), attr.code.workload,
                  attr.code.workgroup, partial_source_code, attr.node_indices);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.h b/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.h
index c4f09a3b6b9..12d2708d221 100644
--- a/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.h
+++ b/tensorflow/lite/delegates/gpu/gl/compiler/shader_codegen.h
@@ -39,7 +39,8 @@ class ShaderCodegen {
   ShaderCodegen(const CompilationOptions& options, const GpuInfo& gpu_info);
 
   // Builds final program representation.
-  Status Build(CompiledNodeAttributes attr, ShaderCode* shader_code) const;
+  absl::Status Build(CompiledNodeAttributes attr,
+                     ShaderCode* shader_code) const;
 
  private:
   const CompilationOptions options_;
diff --git a/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc b/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc
index 3b37ba26058..fc86b0f3cb1 100644
--- a/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc
+++ b/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc
@@ -31,7 +31,7 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-Status ConverterBhwcToPhwc4::Create(ConverterBhwcToPhwc4* converter) {
+absl::Status ConverterBhwcToPhwc4::Create(ConverterBhwcToPhwc4* converter) {
   uint3 workgroup_size = uint3(4, 4, 4);
   std::string shader_source = GetShaderHeader(workgroup_size) + R"(
     layout(std430) buffer;
@@ -69,22 +69,24 @@ Status ConverterBhwcToPhwc4::Create(ConverterBhwcToPhwc4* converter) {
   GlProgram program;
   RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
   *converter = ConverterBhwcToPhwc4(std::move(program), workgroup_size);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConverterBhwcToPhwc4::Convert(const BHWC& shape, const GlBuffer& source,
-                                     CommandQueue* command_queue,
-                                     GlBuffer* destination) {
+absl::Status ConverterBhwcToPhwc4::Convert(const BHWC& shape,
+                                           const GlBuffer& source,
+                                           CommandQueue* command_queue,
+                                           GlBuffer* destination) {
   if (source.bytes_size() < BytesForBHWC(shape)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "BhwcToPhwc4: Input data size does not match expected size.");
   }
   if (destination->bytes_size() < BytesForPHWC4(shape)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "BhwcToPhwc4: output data size does not match expected size.");
   }
   if (shape.b != 1) {
-    return UnimplementedError("BhwcToPhwc4: Batch size is not equal to 1.");
+    return absl::UnimplementedError(
+        "BhwcToPhwc4: Batch size is not equal to 1.");
   }
   uint3 workload = uint3(shape.w, shape.h, IntegralDivideRoundUp(shape.c, 4));
   uint3 num_workgroups = IntegralDivideRoundUp(workload, workgroup_size_);
diff --git a/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h b/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h
index 9d9e6402ffa..9f699433a50 100644
--- a/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h
+++ b/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h
@@ -32,11 +32,11 @@ class ConverterBhwcToPhwc4 {
   // Creates invalid object.
   ConverterBhwcToPhwc4() : program_(), workgroup_size_() {}
 
-  static Status Create(ConverterBhwcToPhwc4* converter);
+  static absl::Status Create(ConverterBhwcToPhwc4* converter);
 
-  Status Convert(const BHWC& shape, const GlBuffer& source,
-                 CommandQueue* command_queue /* optional */,
-                 GlBuffer* destination);
+  absl::Status Convert(const BHWC& shape, const GlBuffer& source,
+                       CommandQueue* command_queue /* optional */,
+                       GlBuffer* destination);
 
  private:
   explicit ConverterBhwcToPhwc4(GlProgram program, const uint3& workgroup_size)
diff --git a/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc b/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc
index 6fc424047a1..73ab9f67d94 100644
--- a/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc
+++ b/tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc
@@ -41,7 +41,7 @@ inline std::vector<float> GenerateFloats(float multiplier, int size) {
   return v;
 }
 
-Status RunTest(const BHWC& shape) {
+absl::Status RunTest(const BHWC& shape) {
   // Create random input and calculate expected output for it.
   std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct());
   std::vector<float> output(GetElementsSizeForPHWC4(shape), 0);
@@ -71,9 +71,9 @@ Status RunTest(const BHWC& shape) {
   RETURN_IF_ERROR(output_buffer.Read(
       absl::MakeSpan(converted_output.data(), converted_output.size())));
   if (output != converted_output) {
-    return InternalError("Outputs don't match");
+    return absl::InternalError("Outputs don't match");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 TEST(HwcToPhwc4, Smoke) {
diff --git a/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc b/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc
index c63fee9f8bd..5a9f51c0425 100644
--- a/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc
+++ b/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc
@@ -31,7 +31,7 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-Status ConverterPhwc4ToBhwc::Create(ConverterPhwc4ToBhwc* converter) {
+absl::Status ConverterPhwc4ToBhwc::Create(ConverterPhwc4ToBhwc* converter) {
   uint3 workgroup_size = uint3(4, 4, 4);
   std::string shader_source = GetShaderHeader(workgroup_size) + R"(
     layout(std430) buffer;
@@ -62,22 +62,24 @@ Status ConverterPhwc4ToBhwc::Create(ConverterPhwc4ToBhwc* converter) {
   GlProgram program;
   RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
   *converter = ConverterPhwc4ToBhwc(std::move(program), workgroup_size);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status ConverterPhwc4ToBhwc::Convert(const BHWC& shape, const GlBuffer& source,
-                                     CommandQueue* command_queue,
-                                     GlBuffer* destination) {
+absl::Status ConverterPhwc4ToBhwc::Convert(const BHWC& shape,
+                                           const GlBuffer& source,
+                                           CommandQueue* command_queue,
+                                           GlBuffer* destination) {
   if (source.bytes_size() < BytesForPHWC4(shape)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Phwc4ToBhwc: Input data size does not match expected size.");
   }
   if (destination->bytes_size() < BytesForBHWC(shape)) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Phwc4ToBhwc: output data size does not match expected size.");
   }
   if (shape.b != 1) {
-    return UnimplementedError("Phwc4ToBhwc: Batch size is not equal to 1.");
+    return absl::UnimplementedError(
+        "Phwc4ToBhwc: Batch size is not equal to 1.");
   }
 
   uint3 workload = uint3(shape.w, shape.h, shape.c);
diff --git a/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h b/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h
index c8b181223ae..d9a4dd34ee8 100644
--- a/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h
+++ b/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h
@@ -32,11 +32,11 @@ class ConverterPhwc4ToBhwc {
   // Creates invalid object.
   ConverterPhwc4ToBhwc() : program_(), workgroup_size_() {}
 
-  static Status Create(ConverterPhwc4ToBhwc* converter);
+  static absl::Status Create(ConverterPhwc4ToBhwc* converter);
 
-  Status Convert(const BHWC& shape, const GlBuffer& source,
-                 CommandQueue* command_queue /* optional */,
-                 GlBuffer* destination);
+  absl::Status Convert(const BHWC& shape, const GlBuffer& source,
+                       CommandQueue* command_queue /* optional */,
+                       GlBuffer* destination);
 
  private:
   explicit ConverterPhwc4ToBhwc(GlProgram program, const uint3& workgroup_size)
diff --git a/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc b/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc
index 6f969bb7801..34346e3ce9d 100644
--- a/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc
+++ b/tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc
@@ -41,7 +41,7 @@ inline std::vector<float> GenerateFloats(float multiplier, int size) {
   return v;
 }
 
-Status RunTest(const BHWC& shape) {
+absl::Status RunTest(const BHWC& shape) {
   // Create random input and calculate expected output for it.
   std::vector<float> input =
       GenerateFloats(0.01, GetElementsSizeForPHWC4(shape));
@@ -72,9 +72,9 @@ Status RunTest(const BHWC& shape) {
   RETURN_IF_ERROR(output_buffer.Read(
       absl::MakeSpan(converted_output.data(), converted_output.size())));
   if (output != converted_output) {
-    return InternalError("Outputs don't match");
+    return absl::InternalError("Outputs don't match");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 TEST(Phwc4ToHwc, Smoke) {
diff --git a/tensorflow/lite/delegates/gpu/gl/egl_context.cc b/tensorflow/lite/delegates/gpu/gl/egl_context.cc
index 46fbed24291..f01bafcacff 100644
--- a/tensorflow/lite/delegates/gpu/gl/egl_context.cc
+++ b/tensorflow/lite/delegates/gpu/gl/egl_context.cc
@@ -26,19 +26,19 @@ namespace gpu {
 namespace gl {
 namespace {
 
-Status GetConfig(EGLDisplay display, const EGLint* attributes,
-                 EGLConfig* config) {
+absl::Status GetConfig(EGLDisplay display, const EGLint* attributes,
+                       EGLConfig* config) {
   EGLint config_count;
   bool chosen = eglChooseConfig(display, attributes, config, 1, &config_count);
   RETURN_IF_ERROR(GetOpenGlErrors());
   if (!chosen || config_count == 0) {
-    return InternalError("No EGL error, but eglChooseConfig failed.");
+    return absl::InternalError("No EGL error, but eglChooseConfig failed.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateContext(EGLDisplay display, EGLContext shared_context,
-                     EGLConfig config, EglContext* egl_context) {
+absl::Status CreateContext(EGLDisplay display, EGLContext shared_context,
+                           EGLConfig config, EglContext* egl_context) {
   static const EGLint attributes[] = {EGL_CONTEXT_CLIENT_VERSION, 3,
 #ifdef _DEBUG  // Add debugging bit
                                       EGL_CONTEXT_FLAGS_KHR,
@@ -49,10 +49,10 @@ Status CreateContext(EGLDisplay display, EGLContext shared_context,
       eglCreateContext(display, config, shared_context, attributes);
   RETURN_IF_ERROR(GetOpenGlErrors());
   if (context == EGL_NO_CONTEXT) {
-    return InternalError("No EGL error, but eglCreateContext failed.");
+    return absl::InternalError("No EGL error, but eglCreateContext failed.");
   }
   *egl_context = EglContext(context, display, config, true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 bool HasExtension(EGLDisplay display, const char* name) {
@@ -93,34 +93,36 @@ EglContext& EglContext::operator=(EglContext&& other) {
   return *this;
 }
 
-Status EglContext::MakeCurrent(EGLSurface read, EGLSurface write) {
+absl::Status EglContext::MakeCurrent(EGLSurface read, EGLSurface write) {
   bool is_made_current = eglMakeCurrent(display_, write, read, context_);
   RETURN_IF_ERROR(GetOpenGlErrors());
   if (!is_made_current) {
-    return InternalError("No EGL error, but eglMakeCurrent failed.");
+    return absl::InternalError("No EGL error, but eglMakeCurrent failed.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 bool EglContext::IsCurrent() const {
   return context_ == eglGetCurrentContext();
 }
 
-Status CreateConfiglessContext(EGLDisplay display, EGLContext shared_context,
-                               EglContext* egl_context) {
+absl::Status CreateConfiglessContext(EGLDisplay display,
+                                     EGLContext shared_context,
+                                     EglContext* egl_context) {
   if (!HasExtension(display, "EGL_KHR_no_config_context")) {
-    return UnavailableError("EGL_KHR_no_config_context not supported");
+    return absl::UnavailableError("EGL_KHR_no_config_context not supported");
   }
   return CreateContext(display, shared_context, EGL_NO_CONFIG_KHR, egl_context);
 }
 
-Status CreateSurfacelessContext(EGLDisplay display, EGLContext shared_context,
-                                EglContext* egl_context) {
+absl::Status CreateSurfacelessContext(EGLDisplay display,
+                                      EGLContext shared_context,
+                                      EglContext* egl_context) {
   if (!HasExtension(display, "EGL_KHR_create_context")) {
-    return UnavailableError("EGL_KHR_create_context not supported");
+    return absl::UnavailableError("EGL_KHR_create_context not supported");
   }
   if (!HasExtension(display, "EGL_KHR_surfaceless_context")) {
-    return UnavailableError("EGL_KHR_surfaceless_context not supported");
+    return absl::UnavailableError("EGL_KHR_surfaceless_context not supported");
   }
   const EGLint attributes[] = {EGL_RENDERABLE_TYPE, EGL_OPENGL_ES3_BIT_KHR,
                                EGL_NONE};
@@ -129,8 +131,8 @@ Status CreateSurfacelessContext(EGLDisplay display, EGLContext shared_context,
   return CreateContext(display, shared_context, config, egl_context);
 }
 
-Status CreatePBufferContext(EGLDisplay display, EGLContext shared_context,
-                            EglContext* egl_context) {
+absl::Status CreatePBufferContext(EGLDisplay display, EGLContext shared_context,
+                                  EglContext* egl_context) {
   const EGLint attributes[] = {
       EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,     EGL_BIND_TO_TEXTURE_RGB,
       EGL_TRUE,         EGL_RENDERABLE_TYPE, EGL_OPENGL_ES3_BIT_KHR,
diff --git a/tensorflow/lite/delegates/gpu/gl/egl_context.h b/tensorflow/lite/delegates/gpu/gl/egl_context.h
index 72c53d2dd2e..a93f1fdc4c4 100644
--- a/tensorflow/lite/delegates/gpu/gl/egl_context.h
+++ b/tensorflow/lite/delegates/gpu/gl/egl_context.h
@@ -61,9 +61,9 @@ class EglContext {
 
   // Make this EglContext the current EGL context on this thread, replacing
   // the existing current.
-  Status MakeCurrent(EGLSurface read, EGLSurface write);
+  absl::Status MakeCurrent(EGLSurface read, EGLSurface write);
 
-  Status MakeCurrentSurfaceless() {
+  absl::Status MakeCurrentSurfaceless() {
     return MakeCurrent(EGL_NO_SURFACE, EGL_NO_SURFACE);
   }
 
@@ -86,14 +86,16 @@ class EglContext {
 
 // It uses the EGL_KHR_no_config_context extension to create a no config context
 // since most modern hardware supports the extension.
-Status CreateConfiglessContext(EGLDisplay display, EGLContext shared_context,
-                               EglContext* egl_context);
+absl::Status CreateConfiglessContext(EGLDisplay display,
+                                     EGLContext shared_context,
+                                     EglContext* egl_context);
 
-Status CreateSurfacelessContext(EGLDisplay display, EGLContext shared_context,
-                                EglContext* egl_context);
+absl::Status CreateSurfacelessContext(EGLDisplay display,
+                                      EGLContext shared_context,
+                                      EglContext* egl_context);
 
-Status CreatePBufferContext(EGLDisplay display, EGLContext shared_context,
-                            EglContext* egl_context);
+absl::Status CreatePBufferContext(EGLDisplay display, EGLContext shared_context,
+                                  EglContext* egl_context);
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/egl_environment.cc b/tensorflow/lite/delegates/gpu/gl/egl_environment.cc
index baf6002e6c1..8ae75acd933 100644
--- a/tensorflow/lite/delegates/gpu/gl/egl_environment.cc
+++ b/tensorflow/lite/delegates/gpu/gl/egl_environment.cc
@@ -28,28 +28,28 @@ namespace {
 // TODO(akulik): detect power management event when all contexts are destroyed
 // and OpenGL ES is reinitialized. See eglMakeCurrent
 
-Status InitDisplay(EGLDisplay* egl_display) {
+absl::Status InitDisplay(EGLDisplay* egl_display) {
   RETURN_IF_ERROR(
       TFLITE_GPU_CALL_EGL(eglGetDisplay, egl_display, EGL_DEFAULT_DISPLAY));
   if (*egl_display == EGL_NO_DISPLAY) {
-    return UnavailableError("eglGetDisplay returned nullptr");
+    return absl::UnavailableError("eglGetDisplay returned nullptr");
   }
   bool is_initialized;
   RETURN_IF_ERROR(TFLITE_GPU_CALL_EGL(eglInitialize, &is_initialized,
                                       *egl_display, nullptr, nullptr));
   if (!is_initialized) {
-    return InternalError("No EGL error, but eglInitialize failed");
+    return absl::InternalError("No EGL error, but eglInitialize failed");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
 
-Status EglEnvironment::NewEglEnvironment(
+absl::Status EglEnvironment::NewEglEnvironment(
     std::unique_ptr<EglEnvironment>* egl_environment) {
   *egl_environment = absl::make_unique<EglEnvironment>();
   RETURN_IF_ERROR((*egl_environment)->Init());
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 EglEnvironment::~EglEnvironment() {
@@ -61,12 +61,12 @@ EglEnvironment::~EglEnvironment() {
   }
 }
 
-Status EglEnvironment::Init() {
+absl::Status EglEnvironment::Init() {
   bool is_bound;
   RETURN_IF_ERROR(
       TFLITE_GPU_CALL_EGL(eglBindAPI, &is_bound, EGL_OPENGL_ES_API));
   if (!is_bound) {
-    return InternalError("No EGL error, but eglBindAPI failed");
+    return absl::InternalError("No EGL error, but eglBindAPI failed");
   }
 
   // Re-use context and display if it was created on this thread.
@@ -77,7 +77,7 @@ Status EglEnvironment::Init() {
   } else {
     RETURN_IF_ERROR(InitDisplay(&display_));
 
-    Status status = InitConfiglessContext();
+    absl::Status status = InitConfiglessContext();
     if (!status.ok()) {
       status = InitSurfacelessContext();
     }
@@ -94,33 +94,30 @@ Status EglEnvironment::Init() {
   }
   // TODO(akulik): when do we need ForceSyncTurning?
   ForceSyncTurning();
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status EglEnvironment::InitConfiglessContext() {
+absl::Status EglEnvironment::InitConfiglessContext() {
   RETURN_IF_ERROR(CreateConfiglessContext(display_, EGL_NO_CONTEXT, &context_));
   return context_.MakeCurrentSurfaceless();
 }
 
-Status EglEnvironment::InitSurfacelessContext() {
+absl::Status EglEnvironment::InitSurfacelessContext() {
   RETURN_IF_ERROR(
       CreateSurfacelessContext(display_, EGL_NO_CONTEXT, &context_));
-  Status status = context_.MakeCurrentSurfaceless();
-  if (!status.ok()) {
-    return status;
-  }
+  RETURN_IF_ERROR(context_.MakeCurrentSurfaceless());
 
   // PowerVR support EGL_KHR_surfaceless_context, but glFenceSync crashes on
   // PowerVR when it is surface-less.
   RETURN_IF_ERROR(RequestGpuInfo(&gpu_info_));
   if (gpu_info_.type == GpuType::POWERVR) {
-    return UnavailableError(
+    return absl::UnavailableError(
         "Surface-less context is not properly supported on powervr.");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status EglEnvironment::InitPBufferContext() {
+absl::Status EglEnvironment::InitPBufferContext() {
   RETURN_IF_ERROR(CreatePBufferContext(display_, EGL_NO_CONTEXT, &context_));
   RETURN_IF_ERROR(CreatePbufferRGBSurface(context_.config(), display_, 1, 1,
                                           &surface_read_));
diff --git a/tensorflow/lite/delegates/gpu/gl/egl_environment.h b/tensorflow/lite/delegates/gpu/gl/egl_environment.h
index fa7ca047b6e..cb6616496dd 100644
--- a/tensorflow/lite/delegates/gpu/gl/egl_environment.h
+++ b/tensorflow/lite/delegates/gpu/gl/egl_environment.h
@@ -36,7 +36,7 @@ namespace gl {
 // EGL environment needs to be created once per thread.
 class EglEnvironment {
  public:
-  static Status NewEglEnvironment(
+  static absl::Status NewEglEnvironment(
       std::unique_ptr<EglEnvironment>* egl_environment);
 
   EglEnvironment() = default;
@@ -47,10 +47,10 @@ class EglEnvironment {
   const GpuInfo& gpu_info() const { return gpu_info_; }
 
  private:
-  Status Init();
-  Status InitConfiglessContext();
-  Status InitSurfacelessContext();
-  Status InitPBufferContext();
+  absl::Status Init();
+  absl::Status InitConfiglessContext();
+  absl::Status InitSurfacelessContext();
+  absl::Status InitPBufferContext();
 
   EGLDisplay display_ = EGL_NO_DISPLAY;
   EglSurface surface_draw_;
diff --git a/tensorflow/lite/delegates/gpu/gl/egl_surface.cc b/tensorflow/lite/delegates/gpu/gl/egl_surface.cc
index eaccea6411e..d0f062af392 100644
--- a/tensorflow/lite/delegates/gpu/gl/egl_surface.cc
+++ b/tensorflow/lite/delegates/gpu/gl/egl_surface.cc
@@ -44,9 +44,9 @@ void EglSurface::Invalidate() {
   }
 }
 
-Status CreatePbufferRGBSurface(EGLConfig config, EGLDisplay display,
-                               uint32_t height, uint32_t width,
-                               EglSurface* egl_surface) {
+absl::Status CreatePbufferRGBSurface(EGLConfig config, EGLDisplay display,
+                                     uint32_t height, uint32_t width,
+                                     EglSurface* egl_surface) {
   const EGLint pbuffer_attributes[] = {EGL_WIDTH,
                                        static_cast<EGLint>(width),
                                        EGL_HEIGHT,
@@ -60,10 +60,11 @@ Status CreatePbufferRGBSurface(EGLConfig config, EGLDisplay display,
       eglCreatePbufferSurface(display, config, pbuffer_attributes);
   RETURN_IF_ERROR(GetOpenGlErrors());
   if (surface == EGL_NO_SURFACE) {
-    return InternalError("No EGL error, but eglCreatePbufferSurface failed");
+    return absl::InternalError(
+        "No EGL error, but eglCreatePbufferSurface failed");
   }
   *egl_surface = EglSurface(surface, display);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/egl_surface.h b/tensorflow/lite/delegates/gpu/gl/egl_surface.h
index 793dc7a9dc6..5d39aed33fb 100644
--- a/tensorflow/lite/delegates/gpu/gl/egl_surface.h
+++ b/tensorflow/lite/delegates/gpu/gl/egl_surface.h
@@ -56,9 +56,9 @@ class EglSurface {
 };
 
 // Creates off-screen pbuffer-based surface of the given height and width.
-Status CreatePbufferRGBSurface(EGLConfig config, EGLDisplay display,
-                               uint32_t height, uint32_t width,
-                               EglSurface* egl_surface);
+absl::Status CreatePbufferRGBSurface(EGLConfig config, EGLDisplay display,
+                                     uint32_t height, uint32_t width,
+                                     EglSurface* egl_surface);
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_buffer.cc b/tensorflow/lite/delegates/gpu/gl/gl_buffer.cc
index 509cadca60d..1de49676219 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_buffer.cc
+++ b/tensorflow/lite/delegates/gpu/gl/gl_buffer.cc
@@ -21,9 +21,10 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-Status CopyBuffer(const GlBuffer& read_buffer, const GlBuffer& write_buffer) {
+absl::Status CopyBuffer(const GlBuffer& read_buffer,
+                        const GlBuffer& write_buffer) {
   if (read_buffer.bytes_size() != write_buffer.bytes_size()) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Read buffer does not match write buffer size.");
   }
   gl_buffer_internal::BufferBinder read_buffer_binder(GL_COPY_READ_BUFFER,
@@ -35,7 +36,7 @@ Status CopyBuffer(const GlBuffer& read_buffer, const GlBuffer& write_buffer) {
                             write_buffer.offset(), read_buffer.bytes_size());
 }
 
-Status GetSSBOSize(GLuint id, int64_t* size_bytes) {
+absl::Status GetSSBOSize(GLuint id, int64_t* size_bytes) {
   GLuint prev_id;
   RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glGetIntegerv,
                                      GL_SHADER_STORAGE_BUFFER_BINDING,
@@ -75,19 +76,19 @@ void GlBuffer::Invalidate() {
   }
 }
 
-Status GlBuffer::BindToIndex(uint32_t index) const {
+absl::Status GlBuffer::BindToIndex(uint32_t index) const {
   return TFLITE_GPU_CALL_GL(glBindBufferRange, target_, index, id_, offset_,
                             bytes_size_);
 }
 
-Status GlBuffer::MakeView(size_t offset, size_t bytes_size,
-                          GlBuffer* gl_buffer) {
+absl::Status GlBuffer::MakeView(size_t offset, size_t bytes_size,
+                                GlBuffer* gl_buffer) {
   if (offset + bytes_size > bytes_size_) {
-    return OutOfRangeError("GlBuffer view is out of range.");
+    return absl::OutOfRangeError("GlBuffer view is out of range.");
   }
   *gl_buffer = GlBuffer(target_, id_, bytes_size, offset_ + offset,
                         /*has_ownership=*/false);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 GlBuffer GlBuffer::MakeRef() {
@@ -121,12 +122,13 @@ GlPersistentBuffer::~GlPersistentBuffer() {
   glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
 }
 
-Status CreatePersistentBuffer(size_t size, GlPersistentBuffer* gl_buffer) {
+absl::Status CreatePersistentBuffer(size_t size,
+                                    GlPersistentBuffer* gl_buffer) {
   PFNGLBUFFERSTORAGEEXTPROC glBufferStorageEXT = nullptr;
   glBufferStorageEXT = reinterpret_cast<PFNGLBUFFERSTORAGEEXTPROC>(
       eglGetProcAddress("glBufferStorageEXT"));
   if (!glBufferStorageEXT) {
-    return UnavailableError("glBufferStorageEXT is not supported");
+    return absl::UnavailableError("glBufferStorageEXT is not supported");
   }
   gl_buffer_internal::BufferId id;
   gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id.id());
@@ -140,7 +142,7 @@ Status CreatePersistentBuffer(size_t size, GlPersistentBuffer* gl_buffer) {
       GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT_EXT));
   *gl_buffer = GlPersistentBuffer{
       GL_SHADER_STORAGE_BUFFER, id.Release(), size, 0, true, data};
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_buffer.h b/tensorflow/lite/delegates/gpu/gl/gl_buffer.h
index a7e19abde70..3225679ec5a 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_buffer.h
+++ b/tensorflow/lite/delegates/gpu/gl/gl_buffer.h
@@ -60,30 +60,31 @@ class GlBuffer {
   // Reads data from buffer into CPU memory. Data should point to a region that
   // has at least bytes_size available.
   template <typename T>
-  Status Read(absl::Span<T> data) const;
+  absl::Status Read(absl::Span<T> data) const;
 
   // Writes data to a buffer.
   template <typename T>
-  Status Write(absl::Span<const T> data);
+  absl::Status Write(absl::Span<const T> data);
 
   // Maps GPU memory to CPU address space and calls reader that may read from
   // that memory.
   template <typename T>
-  Status MappedRead(
-      const std::function<Status(absl::Span<const T>)>& reader) const;
+  absl::Status MappedRead(
+      const std::function<absl::Status(absl::Span<const T>)>& reader) const;
 
   // Maps GPU memory to CPU address space and calls writer that may write into
   // that memory.
   template <typename T>
-  Status MappedWrite(const std::function<Status(absl::Span<T>)>& writer);
+  absl::Status MappedWrite(
+      const std::function<absl::Status(absl::Span<T>)>& writer);
 
-  Status MakeView(size_t offset, size_t bytes_size, GlBuffer* gl_buffer);
+  absl::Status MakeView(size_t offset, size_t bytes_size, GlBuffer* gl_buffer);
 
   // Makes a copy without ownership of the buffer.
   GlBuffer MakeRef();
 
   // Binds a buffer to an index.
-  Status BindToIndex(uint32_t index) const;
+  absl::Status BindToIndex(uint32_t index) const;
 
   // Releases the ownership of the buffer object.
   void Release() { has_ownership_ = false; }
@@ -112,9 +113,10 @@ class GlBuffer {
   bool has_ownership_;
 };
 
-Status CopyBuffer(const GlBuffer& read_buffer, const GlBuffer& write_buffer);
+absl::Status CopyBuffer(const GlBuffer& read_buffer,
+                        const GlBuffer& write_buffer);
 
-Status GetSSBOSize(GLuint id, int64_t* size_bytes);
+absl::Status GetSSBOSize(GLuint id, int64_t* size_bytes);
 
 // Creates new shader storage buffer that will be modified and used many
 // times.
@@ -122,20 +124,20 @@ Status GetSSBOSize(GLuint id, int64_t* size_bytes);
 // See https://www.khronos.org/opengl/wiki/Shader_Storage_Buffer_Object for
 // details.
 template <typename T>
-Status CreateReadWriteShaderStorageBuffer(uint32_t num_elements,
-                                          GlBuffer* gl_buffer);
+absl::Status CreateReadWriteShaderStorageBuffer(uint32_t num_elements,
+                                                GlBuffer* gl_buffer);
 
 // Creates new shader storage buffer that will be filled with data once which
 // will be used many times.
 template <typename T>
-Status CreateReadOnlyShaderStorageBuffer(absl::Span<const T> data,
-                                         GlBuffer* gl_buffer);
+absl::Status CreateReadOnlyShaderStorageBuffer(absl::Span<const T> data,
+                                               GlBuffer* gl_buffer);
 
 // Adapts raw Buffer::Read method to read data into a vector.
 template <typename T>
-Status AppendFromBuffer(const GlBuffer& buffer, std::vector<T>* data) {
+absl::Status AppendFromBuffer(const GlBuffer& buffer, std::vector<T>* data) {
   if (buffer.bytes_size() % sizeof(T) != 0) {
-    return InvalidArgumentError("Buffer is not aligned");
+    return absl::InvalidArgumentError("Buffer is not aligned");
   }
   size_t num_elements = buffer.bytes_size() / sizeof(T);
   data->resize(data->size() + num_elements);
@@ -167,7 +169,7 @@ class GlPersistentBuffer : public GlBuffer {
 };
 
 // Creates read-write persistent buffer with valid CPU pointer
-Status CreatePersistentBuffer(size_t size, GlPersistentBuffer* gl_buffer);
+absl::Status CreatePersistentBuffer(size_t size, GlPersistentBuffer* gl_buffer);
 
 ////////////////////////////////////////////////////////////////////////////////
 // Implementation details are below.
@@ -243,8 +245,8 @@ class BufferMapper {
 }  // namespace gl_buffer_internal
 
 template <typename T>
-Status CreateReadWriteShaderStorageBuffer(uint32_t num_elements,
-                                          GlBuffer* gl_buffer) {
+absl::Status CreateReadWriteShaderStorageBuffer(uint32_t num_elements,
+                                                GlBuffer* gl_buffer) {
   gl_buffer_internal::BufferId id;
   gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id.id());
   // TODO(akulik): benchmark DYNAMIC vs STREAM buffer
@@ -253,12 +255,12 @@ Status CreateReadWriteShaderStorageBuffer(uint32_t num_elements,
                                      GL_STREAM_COPY));
   *gl_buffer = GlBuffer{GL_SHADER_STORAGE_BUFFER, id.Release(),
                         num_elements * sizeof(T), 0, true};
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename T>
-Status CreateReadOnlyShaderStorageBuffer(absl::Span<const T> data,
-                                         GlBuffer* gl_buffer) {
+absl::Status CreateReadOnlyShaderStorageBuffer(absl::Span<const T> data,
+                                               GlBuffer* gl_buffer) {
   gl_buffer_internal::BufferId id;
   gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id.id());
   RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glBufferData, GL_SHADER_STORAGE_BUFFER,
@@ -266,26 +268,26 @@ Status CreateReadOnlyShaderStorageBuffer(absl::Span<const T> data,
                                      GL_STATIC_READ));
   *gl_buffer = GlBuffer{GL_SHADER_STORAGE_BUFFER, id.Release(),
                         data.size() * sizeof(T), 0, true};
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename T>
-Status GlBuffer::Read(absl::Span<T> data) const {
+absl::Status GlBuffer::Read(absl::Span<T> data) const {
   if (data.size() * sizeof(T) < bytes_size()) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Read from buffer failed. Destination data is shorter than buffer.");
   }
   // TODO(akulik): glCopyBufferSubData is actually available in ES 3.1, try it.
   return MappedRead<T>([this, data](absl::Span<const T> src) {
     std::memcpy(data.data(), src.data(), bytes_size());
-    return OkStatus();
+    return absl::OkStatus();
   });
 }
 
 template <typename T>
-Status GlBuffer::Write(absl::Span<const T> data) {
+absl::Status GlBuffer::Write(absl::Span<const T> data) {
   if (data.size() * sizeof(T) > bytes_size_) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Write to buffer failed. Source data is larger than buffer.");
   }
   gl_buffer_internal::BufferBinder binder(target_, id_);
@@ -294,10 +296,10 @@ Status GlBuffer::Write(absl::Span<const T> data) {
 }
 
 template <typename T>
-Status GlBuffer::MappedRead(
-    const std::function<Status(absl::Span<const T> d)>& reader) const {
+absl::Status GlBuffer::MappedRead(
+    const std::function<absl::Status(absl::Span<const T> d)>& reader) const {
   if (bytes_size_ % sizeof(T) != 0) {
-    return InvalidArgumentError("Buffer is not aligned");
+    return absl::InvalidArgumentError("Buffer is not aligned");
   }
   gl_buffer_internal::BufferBinder binder(target_, id_);
   gl_buffer_internal::BufferMapper mapper(target_, offset_, bytes_size_,
@@ -310,10 +312,10 @@ Status GlBuffer::MappedRead(
 }
 
 template <typename T>
-Status GlBuffer::MappedWrite(
-    const std::function<Status(absl::Span<T> d)>& writer) {
+absl::Status GlBuffer::MappedWrite(
+    const std::function<absl::Status(absl::Span<T> d)>& writer) {
   if (bytes_size_ % sizeof(T) != 0) {
-    return InvalidArgumentError("Buffer is not aligned");
+    return absl::InvalidArgumentError("Buffer is not aligned");
   }
   gl_buffer_internal::BufferBinder binder(target_, id_);
   gl_buffer_internal::BufferMapper mapper(target_, offset_, bytes_size_,
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_buffer_test.cc b/tensorflow/lite/delegates/gpu/gl/gl_buffer_test.cc
index 1d8031fcf39..863f5ec6020 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_buffer_test.cc
+++ b/tensorflow/lite/delegates/gpu/gl/gl_buffer_test.cc
@@ -89,7 +89,7 @@ TEST(Buffer, SubView) {
   GlBuffer view1;
   ASSERT_TRUE(buffer.MakeView(4, 16, &view1).ok());
   GlBuffer view2;
-  EXPECT_NE(view1.MakeView(1, 16, &view2), OkStatus());
+  EXPECT_FALSE(view1.MakeView(1, 16, &view2).ok());
   ASSERT_TRUE(view1.MakeView(2, 2, &view2).ok());
 
   EXPECT_FALSE(view2.has_ownership());
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_call.h b/tensorflow/lite/delegates/gpu/gl/gl_call.h
index a8a81bae608..1a392d6aca3 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_call.h
+++ b/tensorflow/lite/delegates/gpu/gl/gl_call.h
@@ -53,12 +53,13 @@ namespace gl_call_internal {
 template <typename T>
 struct Caller {
   template <typename F, typename ErrorF, typename... Params>
-  Status operator()(const std::string& context, F func, ErrorF error_func,
-                    T* result, Params&&... params) {
+  absl::Status operator()(const std::string& context, F func, ErrorF error_func,
+                          T* result, Params&&... params) {
     *result = func(std::forward<Params>(params)...);
     const auto status = error_func();
-    if (status.ok()) return OkStatus();
-    return Status(status.code(), status.error_message() + ": " + context);
+    if (status.ok()) return absl::OkStatus();
+    return absl::Status(status.code(),
+                        std::string(status.message()) + ": " + context);
   }
 };
 
@@ -66,25 +67,27 @@ struct Caller {
 template<>
 struct Caller<void> {
   template <typename F, typename ErrorF, typename... Params>
-  Status operator()(const std::string& context, F func, ErrorF error_func,
-                    Params&&... params) {
+  absl::Status operator()(const std::string& context, F func, ErrorF error_func,
+                          Params&&... params) {
     func(std::forward<Params>(params)...);
     const auto status = error_func();
-    if (status.ok()) return OkStatus();
-    return Status(status.code(), status.error_message() + ": " + context);
+    if (status.ok()) return absl::OkStatus();
+    return absl::Status(status.code(),
+                        std::string(status.message()) + ": " + context);
   }
 };
 
 template <typename F, typename ErrorF, typename ResultT, typename... ParamsT>
-Status CallAndCheckError(const std::string& context, F func, ErrorF error_func,
-                         ResultT* result, ParamsT&&... params) {
+absl::Status CallAndCheckError(const std::string& context, F func,
+                               ErrorF error_func, ResultT* result,
+                               ParamsT&&... params) {
   return Caller<ResultT>()(context, func, error_func, result,
                            std::forward<ParamsT>(params)...);
 }
 
 template <typename F, typename ErrorF, typename... Params>
-Status CallAndCheckError(const std::string& context, F func, ErrorF error_func,
-                         Params&&... params) {
+absl::Status CallAndCheckError(const std::string& context, F func,
+                               ErrorF error_func, Params&&... params) {
   return Caller<void>()(context, func, error_func,
                         std::forward<Params>(params)...);
 }
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_errors.cc b/tensorflow/lite/delegates/gpu/gl/gl_errors.cc
index 1a40e38ea9c..3ad6be8a25e 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_errors.cc
+++ b/tensorflow/lite/delegates/gpu/gl/gl_errors.cc
@@ -58,83 +58,83 @@ struct ErrorFormatter {
 
 // TODO(akulik): create new error space for GL error.
 
-Status GetOpenGlErrors() {
+absl::Status GetOpenGlErrors() {
   auto error = glGetError();
   if (error == GL_NO_ERROR) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   auto error2 = glGetError();
   if (error2 == GL_NO_ERROR) {
-    return InternalError(ErrorToString(error));
+    return absl::InternalError(ErrorToString(error));
   }
   std::vector<GLenum> errors = {error, error2};
   for (error = glGetError(); error != GL_NO_ERROR; error = glGetError()) {
     errors.push_back(error);
   }
-  return InternalError(absl::StrJoin(errors, ",", ErrorFormatter()));
+  return absl::InternalError(absl::StrJoin(errors, ",", ErrorFormatter()));
 }
 
-Status GetEglError() {
+absl::Status GetEglError() {
   EGLint error = eglGetError();
   switch (error) {
     case EGL_SUCCESS:
-      return OkStatus();
+      return absl::OkStatus();
     case EGL_NOT_INITIALIZED:
-      return InternalError(
+      return absl::InternalError(
           "EGL is not initialized, or could not be initialized, for the "
           "specified EGL display connection.");
     case EGL_BAD_ACCESS:
-      return InternalError(
+      return absl::InternalError(
           "EGL cannot access a requested resource (for example a context is "
           "bound in another thread).");
     case EGL_BAD_ALLOC:
-      return InternalError(
+      return absl::InternalError(
           "EGL failed to allocate resources for the requested operation.");
     case EGL_BAD_ATTRIBUTE:
-      return InternalError(
+      return absl::InternalError(
           "An unrecognized attribute or attribute value was passed in the "
           "attribute list.");
     case EGL_BAD_CONTEXT:
-      return InternalError(
+      return absl::InternalError(
           "An EGLContext argument does not name a valid EGL rendering "
           "context.");
     case EGL_BAD_CONFIG:
-      return InternalError(
+      return absl::InternalError(
           "An EGLConfig argument does not name a valid EGL frame buffer "
           "configuration.");
     case EGL_BAD_CURRENT_SURFACE:
-      return InternalError(
+      return absl::InternalError(
           "The current surface of the calling thread is a window, pixel buffer "
           "or pixmap that is no longer valid.");
     case EGL_BAD_DISPLAY:
-      return InternalError(
+      return absl::InternalError(
           "An EGLDisplay argument does not name a valid EGL display "
           "connection.");
     case EGL_BAD_SURFACE:
-      return InternalError(
+      return absl::InternalError(
           "An EGLSurface argument does not name a valid surface (window, pixel "
           "buffer or pixmap) configured for GL rendering.");
     case EGL_BAD_MATCH:
-      return InternalError(
+      return absl::InternalError(
           "Arguments are inconsistent (for example, a valid context requires "
           "buffers not supplied by a valid surface).");
     case EGL_BAD_PARAMETER:
-      return InternalError("One or more argument values are invalid.");
+      return absl::InternalError("One or more argument values are invalid.");
     case EGL_BAD_NATIVE_PIXMAP:
-      return InternalError(
+      return absl::InternalError(
           "A NativePixmapType argument does not refer to a valid native "
           "pixmap.");
     case EGL_BAD_NATIVE_WINDOW:
-      return InternalError(
+      return absl::InternalError(
           "A NativeWindowType argument does not refer to a valid native "
           "window.");
     case EGL_CONTEXT_LOST:
-      return InternalError(
+      return absl::InternalError(
           "A power management event has occurred. The application must destroy "
           "all contexts and reinitialize OpenGL ES state and objects to "
           "continue rendering.");
   }
-  return UnknownError("EGL error: " + std::to_string(error));
+  return absl::UnknownError("EGL error: " + std::to_string(error));
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_errors.h b/tensorflow/lite/delegates/gpu/gl/gl_errors.h
index 978e642abaa..761eddd8901 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_errors.h
+++ b/tensorflow/lite/delegates/gpu/gl/gl_errors.h
@@ -23,10 +23,10 @@ namespace gpu {
 namespace gl {
 
 // @return recent opengl errors and packs them into Status.
-Status GetOpenGlErrors();
+absl::Status GetOpenGlErrors();
 
 // @return the error of the last called EGL function in the current thread.
-Status GetEglError();
+absl::Status GetEglError();
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_program.cc b/tensorflow/lite/delegates/gpu/gl/gl_program.cc
index def82357a6a..d6e56ca64c4 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_program.cc
+++ b/tensorflow/lite/delegates/gpu/gl/gl_program.cc
@@ -29,19 +29,19 @@ namespace gpu {
 namespace gl {
 namespace {
 
-Status CreateNewProgramId(GLuint* program_id) {
+absl::Status CreateNewProgramId(GLuint* program_id) {
   RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glCreateProgram, program_id));
   if (!*program_id) {
-    return UnknownError("Can't create opengl program: 0 program_id");
+    return absl::UnknownError("Can't create opengl program: 0 program_id");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CheckProgramLinked(GLuint program_id) {
+absl::Status CheckProgramLinked(GLuint program_id) {
   GLint linked;
   glGetProgramiv(program_id, GL_LINK_STATUS, &linked);
   if (linked == GL_TRUE) {
-    return OkStatus();
+    return absl::OkStatus();
   }
   GLint info_size;
   glGetProgramiv(program_id, GL_INFO_LOG_LENGTH, &info_size);
@@ -49,26 +49,26 @@ Status CheckProgramLinked(GLuint program_id) {
   errors.resize(info_size + 1 /* plus \0 */);
   glGetProgramInfoLog(program_id, info_size + 1, nullptr, &errors[0]);
   // TODO(akulik): use glValidateProgram to gather more info.
-  return UnavailableError("Program is not properly linked: " + errors);
+  return absl::UnavailableError("Program is not properly linked: " + errors);
 }
 
 struct ParameterSetter {
-  Status operator()(int value) {
+  absl::Status operator()(int value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform1i, program_id, uniform_id,
                               value);
   }
 
-  Status operator()(const int2& value) {
+  absl::Status operator()(const int2& value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform2i, program_id, uniform_id,
                               value.x, value.y);
   }
 
-  Status operator()(const int4& value) {
+  absl::Status operator()(const int4& value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform4i, program_id, uniform_id,
                               value.x, value.y, value.z, value.w);
   }
 
-  Status operator()(const std::vector<int2>& value) {
+  absl::Status operator()(const std::vector<int2>& value) {
     std::vector<GLint> ints(value.size() * 2, 0);
     for (int i = 0; i < value.size(); ++i) {
       ints[i * 2] = value[i].x;
@@ -78,32 +78,32 @@ struct ParameterSetter {
                               ints.size(), ints.data());
   }
 
-  Status operator()(unsigned int value) {
+  absl::Status operator()(unsigned int value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform1ui, program_id, uniform_id,
                               value);
   }
 
-  Status operator()(const uint4& value) {
+  absl::Status operator()(const uint4& value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform4ui, program_id, uniform_id,
                               value.x, value.y, value.z, value.w);
   }
 
-  Status operator()(float value) {
+  absl::Status operator()(float value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform1f, program_id, uniform_id,
                               value);
   }
 
-  Status operator()(const float2& value) {
+  absl::Status operator()(const float2& value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform2f, program_id, uniform_id,
                               value.x, value.y);
   }
 
-  Status operator()(const float4& value) {
+  absl::Status operator()(const float4& value) {
     return TFLITE_GPU_CALL_GL(glProgramUniform4f, program_id, uniform_id,
                               value.x, value.y, value.z, value.w);
   }
 
-  Status operator()(const std::vector<float4>& value) {
+  absl::Status operator()(const std::vector<float4>& value) {
     std::vector<GLfloat> floats(value.size() * 4, 0);
     for (int i = 0; i < value.size(); ++i) {
       floats[i * 4] = value[i].x;
@@ -121,8 +121,8 @@ struct ParameterSetter {
 
 }  // namespace
 
-Status GlProgram::CreateWithShader(const GlShader& shader,
-                                   GlProgram* gl_program) {
+absl::Status GlProgram::CreateWithShader(const GlShader& shader,
+                                         GlProgram* gl_program) {
   GLuint program_id;
   RETURN_IF_ERROR(CreateNewProgramId(&program_id));
 
@@ -136,11 +136,11 @@ Status GlProgram::CreateWithShader(const GlShader& shader,
   RETURN_IF_ERROR(CheckProgramLinked(program.id()));
 
   *gl_program = std::move(program);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status GlProgram::CreateWithBinaryShader(const BinaryShader& shader,
-                                         GlProgram* gl_program) {
+absl::Status GlProgram::CreateWithBinaryShader(const BinaryShader& shader,
+                                               GlProgram* gl_program) {
   GLuint program_id;
   RETURN_IF_ERROR(CreateNewProgramId(&program_id));
 
@@ -154,15 +154,15 @@ Status GlProgram::CreateWithBinaryShader(const BinaryShader& shader,
   RETURN_IF_ERROR(CheckProgramLinked(program.id()));
 
   *gl_program = std::move(program);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status GlProgram::GetBinary(BinaryShader* binary_shader) {
+absl::Status GlProgram::GetBinary(BinaryShader* binary_shader) {
   GLint size = 0;
   RETURN_IF_ERROR(
       TFLITE_GPU_CALL_GL(glGetProgramiv, id_, GL_PROGRAM_BINARY_LENGTH, &size));
   if (!size) {
-    return InternalError("Getting binary size failed.");
+    return absl::InternalError("Getting binary size failed.");
   }
   // TODO(akulik): call
   // glProgramParameteri(id_, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE)
@@ -174,10 +174,10 @@ Status GlProgram::GetBinary(BinaryShader* binary_shader) {
                                      &returned_size, &format,
                                      reinterpret_cast<void*>(&binary[0])));
   if (size != returned_size) {
-    return InternalError("Getting binary is failed.");
+    return absl::InternalError("Getting binary is failed.");
   }
   *binary_shader = BinaryShader(format, std::move(binary));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 GlProgram::GlProgram(GlProgram&& program) : id_(program.id_) {
@@ -201,16 +201,16 @@ GlProgram& GlProgram::operator=(GlProgram&& program) {
 
 GlProgram::~GlProgram() { Invalidate(); }
 
-Status GlProgram::SetParameter(const Variable& param) {
+absl::Status GlProgram::SetParameter(const Variable& param) {
   GLint uniform_location;
   RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glGetUniformLocation, &uniform_location,
                                      id_, param.name.c_str()));
   return absl::visit(ParameterSetter{id_, uniform_location}, param.value);
 }
 
-Status GlProgram::Dispatch(const uint3& workgroups) const {
+absl::Status GlProgram::Dispatch(const uint3& workgroups) const {
   if (workgroups.x == 0 || workgroups.y == 0 || workgroups.z == 0) {
-    return InvalidArgumentError("Invalid workgroups");
+    return absl::InvalidArgumentError("Invalid workgroups");
   }
   RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glUseProgram, id_));
   return TFLITE_GPU_CALL_GL(glDispatchCompute, workgroups.x, workgroups.y,
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_program.h b/tensorflow/lite/delegates/gpu/gl/gl_program.h
index dfd6bde4c59..892cb8e0850 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_program.h
+++ b/tensorflow/lite/delegates/gpu/gl/gl_program.h
@@ -40,12 +40,13 @@ class GlProgram {
   // a program. Thus, if this call returns a program, one may set parameters and
   // finally execute a program.
   // therefore it needs to be handled elsewhere.
-  static Status CreateWithShader(const GlShader& shader, GlProgram* gl_program);
+  static absl::Status CreateWithShader(const GlShader& shader,
+                                       GlProgram* gl_program);
 
   // Same as CreateWithShader but takes compiled shader in a binary form,
   // therefore compilation step is avoided.
-  static Status CreateWithBinaryShader(const BinaryShader& shader,
-                                       GlProgram* gl_program);
+  static absl::Status CreateWithBinaryShader(const BinaryShader& shader,
+                                             GlProgram* gl_program);
 
   // move-only
   GlProgram(GlProgram&& program);
@@ -59,12 +60,12 @@ class GlProgram {
 
   // Returns a binary representation for a shader currently attached and linked
   // into this program.
-  Status GetBinary(BinaryShader* binary_shader);
+  absl::Status GetBinary(BinaryShader* binary_shader);
 
-  Status SetParameter(const Variable& param);
+  absl::Status SetParameter(const Variable& param);
 
   // Executes program
-  Status Dispatch(const uint3& workgroups) const;
+  absl::Status Dispatch(const uint3& workgroups) const;
 
   bool is_valid() const { return id_ != 0; }
 
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_shader.cc b/tensorflow/lite/delegates/gpu/gl/gl_shader.cc
index 32391749985..e3823a24d93 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_shader.cc
+++ b/tensorflow/lite/delegates/gpu/gl/gl_shader.cc
@@ -42,9 +42,9 @@ GlShader& GlShader::operator=(GlShader&& shader) {
 
 GlShader::~GlShader() { Invalidate(); }
 
-Status GlShader::CompileShader(GLenum shader_type,
-                               const std::string& shader_source,
-                               GlShader* gl_shader) {
+absl::Status GlShader::CompileShader(GLenum shader_type,
+                                     const std::string& shader_source,
+                                     GlShader* gl_shader) {
   // NOTE: code compilation can fail due to gl errors happened before
   GLuint shader_id;
   RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glCreateShader, &shader_id, shader_type));
@@ -64,12 +64,12 @@ Status GlShader::CompileShader(GLenum shader_type,
     glGetShaderiv(shader.id(), GL_INFO_LOG_LENGTH, &info_log_len);
     std::string errors(info_log_len, 0);
     glGetShaderInfoLog(shader.id(), info_log_len, nullptr, &errors[0]);
-    return InternalError("Shader compilation failed: " + errors +
-                         "\nProblem shader is:\n" + shader_source);
+    return absl::InternalError("Shader compilation failed: " + errors +
+                               "\nProblem shader is:\n" + shader_source);
   }
 
   *gl_shader = std::move(shader);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_shader.h b/tensorflow/lite/delegates/gpu/gl/gl_shader.h
index d0ec421bb16..45adc59207b 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_shader.h
+++ b/tensorflow/lite/delegates/gpu/gl/gl_shader.h
@@ -33,9 +33,9 @@ class GlShader {
   //
   // @param shader_type is one of GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, or
   // GL_COMPUTE_SHADER.
-  static Status CompileShader(GLenum shader_type,
-                              const std::string& shader_source,
-                              GlShader* gl_shader);
+  static absl::Status CompileShader(GLenum shader_type,
+                                    const std::string& shader_source,
+                                    GlShader* gl_shader);
 
   GlShader() : id_(0) {}
 
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_sync.cc b/tensorflow/lite/delegates/gpu/gl/gl_sync.cc
index 92caaa5c78a..89d3a88d16f 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_sync.cc
+++ b/tensorflow/lite/delegates/gpu/gl/gl_sync.cc
@@ -25,7 +25,7 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-Status GlSyncWait() {
+absl::Status GlSyncWait() {
   GlSync sync;
   RETURN_IF_ERROR(GlSync::NewSync(&sync));
   // Flush sync and loop afterwards without it.
@@ -37,16 +37,16 @@ Status GlSyncWait() {
         break;
       case GL_CONDITION_SATISFIED:
       case GL_ALREADY_SIGNALED:
-        return OkStatus();
+        return absl::OkStatus();
       case GL_WAIT_FAILED:
         return GetOpenGlErrors();
     }
     status = glClientWaitSync(sync.sync(), 0, /* timeout ns = */ 10000000);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status GlActiveSyncWait() {
+absl::Status GlActiveSyncWait() {
   GlSync sync;
   RETURN_IF_ERROR(GlSync::NewSync(&sync));
   // Since creating a Sync object is itself a GL command it *must* be flushed.
@@ -59,7 +59,7 @@ Status GlActiveSyncWait() {
       break;
     case GL_CONDITION_SATISFIED:
     case GL_ALREADY_SIGNALED:
-      return OkStatus();
+      return absl::OkStatus();
     case GL_WAIT_FAILED:
       return GetOpenGlErrors();
   }
@@ -69,7 +69,7 @@ Status GlActiveSyncWait() {
   while (true) {
     glGetSynciv(sync.sync(), GL_SYNC_STATUS, sizeof(GLint), nullptr, &result);
     if (result == GL_SIGNALED) {
-      return OkStatus();
+      return absl::OkStatus();
     }
 #ifdef __ARM_ACLE
     // Try to save CPU power by yielding CPU to another thread.
@@ -78,7 +78,7 @@ Status GlActiveSyncWait() {
   }
 }
 
-Status GlShaderSync::NewSync(GlShaderSync* gl_sync) {
+absl::Status GlShaderSync::NewSync(GlShaderSync* gl_sync) {
   GlShaderSync sync;
   RETURN_IF_ERROR(CreatePersistentBuffer(sizeof(int), &sync.flag_buffer_));
   static const std::string* kCode = new std::string(R"(#version 310 es
@@ -94,16 +94,16 @@ Status GlShaderSync::NewSync(GlShaderSync* gl_sync) {
   RETURN_IF_ERROR(GlShader::CompileShader(GL_COMPUTE_SHADER, *kCode, &shader));
   RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &sync.flag_program_));
   *gl_sync = std::move(sync);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 // How it works: GPU writes a buffer and CPU checks the buffer value to be
 // changed. The buffer is accessible for writing by GPU and reading by CPU
 // simultaneously - persistent buffer or buffer across shild context can be used
 // for that.
-Status GlShaderSync::Wait() {
+absl::Status GlShaderSync::Wait() {
   if (!flag_buffer_.is_valid()) {
-    return UnavailableError("GlShaderSync is not initialized.");
+    return absl::UnavailableError("GlShaderSync is not initialized.");
   }
   RETURN_IF_ERROR(flag_buffer_.BindToIndex(0));
   volatile int* flag_ptr_ = reinterpret_cast<int*>(flag_buffer_.data());
@@ -115,7 +115,7 @@ Status GlShaderSync::Wait() {
   // Wait for the value is being updated by the shader.
   while (*flag_ptr_ != 1) {
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_sync.h b/tensorflow/lite/delegates/gpu/gl/gl_sync.h
index dadb4b1192f..8b5d910910d 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_sync.h
+++ b/tensorflow/lite/delegates/gpu/gl/gl_sync.h
@@ -32,12 +32,12 @@ namespace gl {
 // GlSync is moveable but not copyable.
 class GlSync {
  public:
-  static Status NewSync(GlSync* gl_sync) {
+  static absl::Status NewSync(GlSync* gl_sync) {
     GLsync sync;
     RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glFenceSync, &sync,
                                        GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
     *gl_sync = GlSync(sync);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   // Creates invalid object.
@@ -75,12 +75,12 @@ class GlSync {
 };
 
 // Waits until GPU is done with processing.
-Status GlSyncWait();
+absl::Status GlSyncWait();
 
 // Waits until all commands are flushed and then performs active waiting by
 // spinning a thread and checking sync status. It leads to shorter wait time
 // (up to tens of ms) but consumes more CPU.
-Status GlActiveSyncWait();
+absl::Status GlActiveSyncWait();
 
 // CPU checks the value in the buffer that is going to be written by GPU. The
 // persistent buffer is used for the simultaneous access to the buffer by GPU
@@ -88,9 +88,9 @@ Status GlActiveSyncWait();
 // is not supported by the device.
 class GlShaderSync {
  public:
-  static Status NewSync(GlShaderSync* gl_sync);
+  static absl::Status NewSync(GlShaderSync* gl_sync);
   GlShaderSync() {}
-  Status Wait();
+  absl::Status Wait();
 
  private:
   GlProgram flag_program_;
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_texture.cc b/tensorflow/lite/delegates/gpu/gl/gl_texture.cc
index eb20deca758..0267a52e44f 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_texture.cc
+++ b/tensorflow/lite/delegates/gpu/gl/gl_texture.cc
@@ -120,31 +120,31 @@ void GlTexture::Invalidate() {
   }
 }
 
-Status GlTexture::BindImage(uint32_t index, GLenum access) const {
+absl::Status GlTexture::BindImage(uint32_t index, GLenum access) const {
   return TFLITE_GPU_CALL_GL(glBindImageTexture, index, id_, /* level = */ 0,
                             /* layered = */ GL_TRUE, layer_, access, format_);
 }
 
-Status GlTexture::BindAsReadonlyImage(uint32_t index) const {
+absl::Status GlTexture::BindAsReadonlyImage(uint32_t index) const {
   return BindImage(index, GL_READ_ONLY);
 }
 
-Status GlTexture::BindAsWriteonlyImage(uint32_t index) const {
+absl::Status GlTexture::BindAsWriteonlyImage(uint32_t index) const {
   return BindImage(index, GL_WRITE_ONLY);
 }
 
-Status GlTexture::BindAsReadWriteImage(uint32_t index) const {
+absl::Status GlTexture::BindAsReadWriteImage(uint32_t index) const {
   return BindImage(index, GL_READ_WRITE);
 }
 
-Status GlTexture::BindAsSampler2D(uint32_t index) const {
+absl::Status GlTexture::BindAsSampler2D(uint32_t index) const {
   RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glActiveTexture, GL_TEXTURE0 + index));
   return TFLITE_GPU_CALL_GL(glBindTexture, GL_TEXTURE_2D, id_);
 }
 
 namespace {
 
-Status SetTextureWrapAndFilter(GLenum target, GLenum texture_format) {
+absl::Status SetTextureWrapAndFilter(GLenum target, GLenum texture_format) {
   if (texture_format == GL_RGBA32F) {
     RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glTexParameteri, target,
                                        GL_TEXTURE_WRAP_S, GL_REPEAT));
@@ -177,14 +177,16 @@ Status SetTextureWrapAndFilter(GLenum target, GLenum texture_format) {
     RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glTexParameteri, target,
                                        GL_TEXTURE_MIN_FILTER, GL_LINEAR));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateReadOnlyRgba2dImageTexture(DataType data_type, const uint2& size,
-                                        const void* data, size_t byte_size,
-                                        GlTexture* gl_texture) {
+absl::Status CreateReadOnlyRgba2dImageTexture(DataType data_type,
+                                              const uint2& size,
+                                              const void* data,
+                                              size_t byte_size,
+                                              GlTexture* gl_texture) {
   if (byte_size != /* RGBA=*/4 * SizeOf(data_type) * size.x * size.y) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Creating image texture failed. Source data size is not matching "
         "expected dimensions.");
   }
@@ -202,14 +204,16 @@ Status CreateReadOnlyRgba2dImageTexture(DataType data_type, const uint2& size,
                                      0, 0, size.x, size.y, format, type, data));
   *gl_texture = GlTexture(kTarget, id.Release(), internal_format, byte_size, 0,
                           /*owned=*/true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateReadOnlyRgba3dImageTexture(DataType data_type, const uint3& size,
-                                        const void* data, size_t byte_size,
-                                        GlTexture* gl_texture) {
+absl::Status CreateReadOnlyRgba3dImageTexture(DataType data_type,
+                                              const uint3& size,
+                                              const void* data,
+                                              size_t byte_size,
+                                              GlTexture* gl_texture) {
   if (byte_size != /* RGBA=*/4 * SizeOf(data_type) * size.x * size.y * size.z) {
-    return InvalidArgumentError(
+    return absl::InvalidArgumentError(
         "Creating image texture failed. Source data is larger than dimensions "
         "product.");
   }
@@ -228,53 +232,54 @@ Status CreateReadOnlyRgba3dImageTexture(DataType data_type, const uint3& size,
                                      type, data));
   *gl_texture = GlTexture(kTarget, id.Release(), internal_format, byte_size, 0,
                           /*owned=*/true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
 
-Status CreateReadOnlyImageTexture(const uint2& size,
-                                  absl::Span<const float> data,
-                                  GlTexture* gl_texture) {
+absl::Status CreateReadOnlyImageTexture(const uint2& size,
+                                        absl::Span<const float> data,
+                                        GlTexture* gl_texture) {
   return CreateReadOnlyRgba2dImageTexture(DataType::FLOAT32, size, data.data(),
                                           data.size() * sizeof(float),
                                           gl_texture);
 }
 
-Status CreateReadOnlyImageTexture(const uint3& size,
-                                  absl::Span<const float> data,
-                                  GlTexture* gl_texture) {
+absl::Status CreateReadOnlyImageTexture(const uint3& size,
+                                        absl::Span<const float> data,
+                                        GlTexture* gl_texture) {
   return CreateReadOnlyRgba3dImageTexture(DataType::FLOAT32, size, data.data(),
                                           data.size() * sizeof(float),
                                           gl_texture);
 }
 
-Status CreateReadOnlyImageTextureU8(const uint2& size,
-                                    absl::Span<const uint8_t> data,
-                                    GlTexture* gl_texture) {
+absl::Status CreateReadOnlyImageTextureU8(const uint2& size,
+                                          absl::Span<const uint8_t> data,
+                                          GlTexture* gl_texture) {
   return CreateReadOnlyRgba2dImageTexture(DataType::UINT8, size, data.data(),
                                           data.size() * sizeof(uint8_t),
                                           gl_texture);
 }
 
-Status CreateReadOnlyImageTextureF16(const uint2& size,
-                                     absl::Span<const uint16_t> data,
-                                     GlTexture* gl_texture) {
+absl::Status CreateReadOnlyImageTextureF16(const uint2& size,
+                                           absl::Span<const uint16_t> data,
+                                           GlTexture* gl_texture) {
   return CreateReadOnlyRgba2dImageTexture(DataType::FLOAT16, size, data.data(),
                                           data.size() * sizeof(uint16_t),
                                           gl_texture);
 }
 
-Status CreateReadOnlyImageTextureF16(const uint3& size,
-                                     absl::Span<const uint16_t> data,
-                                     GlTexture* gl_texture) {
+absl::Status CreateReadOnlyImageTextureF16(const uint3& size,
+                                           absl::Span<const uint16_t> data,
+                                           GlTexture* gl_texture) {
   return CreateReadOnlyRgba3dImageTexture(DataType::FLOAT16, size, data.data(),
                                           data.size() * sizeof(uint16_t),
                                           gl_texture);
 }
 
-Status CreateReadWriteRgbaImageTexture(DataType data_type, const uint2& size,
-                                       GlTexture* gl_texture) {
+absl::Status CreateReadWriteRgbaImageTexture(DataType data_type,
+                                             const uint2& size,
+                                             GlTexture* gl_texture) {
   const GLenum kTarget = GL_TEXTURE_2D;
   const GLenum internal_format = ToTextureInternalFormat(data_type);
   gl_texture_internal::TextureId id;
@@ -287,11 +292,12 @@ Status CreateReadWriteRgbaImageTexture(DataType data_type, const uint2& size,
   *gl_texture = GlTexture(kTarget, id.Release(), internal_format, byte_size,
                           /* layer = */ 0,
                           /* owned = */ true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CreateReadWriteRgbaImageTexture(DataType data_type, const uint3& size,
-                                       GlTexture* gl_texture) {
+absl::Status CreateReadWriteRgbaImageTexture(DataType data_type,
+                                             const uint3& size,
+                                             GlTexture* gl_texture) {
   const GLenum kTarget = GL_TEXTURE_2D_ARRAY;
   GLenum internal_format = ToTextureInternalFormat(data_type);
   gl_texture_internal::TextureId id;
@@ -305,7 +311,7 @@ Status CreateReadWriteRgbaImageTexture(DataType data_type, const uint3& size,
   *gl_texture = GlTexture(kTarget, id.Release(), internal_format, byte_size,
                           /* layer = */ 0,
                           /* owned = */ true);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/gl_texture.h b/tensorflow/lite/delegates/gpu/gl/gl_texture.h
index 951b22f23f1..60e22b47229 100644
--- a/tensorflow/lite/delegates/gpu/gl/gl_texture.h
+++ b/tensorflow/lite/delegates/gpu/gl/gl_texture.h
@@ -57,16 +57,16 @@ class GlTexture {
   ~GlTexture();
 
   // Binds a texture as an image to the given index.
-  Status BindAsReadonlyImage(uint32_t index) const;
+  absl::Status BindAsReadonlyImage(uint32_t index) const;
 
   // Bind texture as an image for write access at given index.
-  Status BindAsWriteonlyImage(uint32_t index) const;
+  absl::Status BindAsWriteonlyImage(uint32_t index) const;
 
   // Bind texture as an image for read-write access at given index.
-  Status BindAsReadWriteImage(uint32_t index) const;
+  absl::Status BindAsReadWriteImage(uint32_t index) const;
 
   // Binds a texture as a sampler to the given index.
-  Status BindAsSampler2D(uint32_t index) const;
+  absl::Status BindAsSampler2D(uint32_t index) const;
 
   GLenum target() const { return target_; }
 
@@ -87,7 +87,7 @@ class GlTexture {
  private:
   void Invalidate();
 
-  Status BindImage(uint32_t index, GLenum access) const;
+  absl::Status BindImage(uint32_t index, GLenum access) const;
 
   GLuint id_;
   GLenum target_;
@@ -101,53 +101,55 @@ class GlTexture {
 // will be used for reading.
 //
 // @param size defines 2D image texture size where each pixel is RGBA.
-Status CreateReadOnlyImageTexture(const uint2& size,
-                                  absl::Span<const float> data,
-                                  GlTexture* gl_texture);
+absl::Status CreateReadOnlyImageTexture(const uint2& size,
+                                        absl::Span<const float> data,
+                                        GlTexture* gl_texture);
 
 // Creates new 2D image texture that will be filled with float16 data once which
 // will be used for reading.
 //
 // @param size defines 2D image texture size where each pixel is RGBA.
-Status CreateReadOnlyImageTextureF16(const uint2& size,
-                                     absl::Span<const uint16_t> data,
-                                     GlTexture* gl_texture);
+absl::Status CreateReadOnlyImageTextureF16(const uint2& size,
+                                           absl::Span<const uint16_t> data,
+                                           GlTexture* gl_texture);
 
 // Creates new 2D image texture that will be filled with uint8 data once which
 // will be used for reading.
 //
 // @param size defines 2D image texture size where each pixel is RGBA.
-Status CreateReadOnlyImageTextureU8(const uint2& size,
-                                    absl::Span<const uint8_t> data,
-                                    GlTexture* gl_texture);
+absl::Status CreateReadOnlyImageTextureU8(const uint2& size,
+                                          absl::Span<const uint8_t> data,
+                                          GlTexture* gl_texture);
 
 // Creates new 3D RGBA image texture that will be filled with float32 data once
 // which will be used for reading.
 //
 // @param size defines 3D image texture size where each pixel is RGBA.
-Status CreateReadOnlyImageTexture(const uint3& size,
-                                  absl::Span<const float> data,
-                                  GlTexture* gl_texture);
+absl::Status CreateReadOnlyImageTexture(const uint3& size,
+                                        absl::Span<const float> data,
+                                        GlTexture* gl_texture);
 
 // Creates new 3D RGBA image texture that will be filled with float16 data once
 // which will be used for reading.
 //
 // @param size defines 3D image texture size where each pixel is RGBA.
-Status CreateReadOnlyImageTextureF16(const uint3& size,
-                                     absl::Span<const uint16_t> data,
-                                     GlTexture* gl_texture);
+absl::Status CreateReadOnlyImageTextureF16(const uint3& size,
+                                           absl::Span<const uint16_t> data,
+                                           GlTexture* gl_texture);
 
 // Creates new RGBA 2D image texture
 //
 // @param size defines 2D image texture size where each pixel is RGBA.
-Status CreateReadWriteRgbaImageTexture(DataType data_type, const uint2& size,
-                                       GlTexture* gl_texture);
+absl::Status CreateReadWriteRgbaImageTexture(DataType data_type,
+                                             const uint2& size,
+                                             GlTexture* gl_texture);
 
 // Creates new RGBA 3D image texture
 //
 // @param size defines 3D image texture size where each pixel is RGBA.
-Status CreateReadWriteRgbaImageTexture(DataType data_type, const uint3& size,
-                                       GlTexture* gl_texture);
+absl::Status CreateReadWriteRgbaImageTexture(DataType data_type,
+                                             const uint3& size,
+                                             GlTexture* gl_texture);
 
 GLenum ToTextureFormat(DataType type);
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/add.cc b/tensorflow/lite/delegates/gpu/gl/kernels/add.cc
index 12124a8cc57..135253112ba 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/add.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/add.cc
@@ -34,8 +34,8 @@ namespace {
 
 class Add : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto attr = absl::any_cast<AddAttributes>(ctx.node->operation.attributes);
     auto adds = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
     auto scalar = absl::get_if<float>(&attr.param);
@@ -60,13 +60,13 @@ class Add : public NodeShader {
             /*input=*/IOStructure::ONLY_DEFINITIONS,
             /*output=*/IOStructure::AUTO,
         };
-        return OkStatus();
+        return absl::OkStatus();
       }
 
       std::string code = "value_0 = value_0";
       for (int index = 1; index < inputs.size(); ++index) {
         if (inputs[index]->tensor.shape != inputs[0]->tensor.shape) {
-          return InvalidArgumentError("Shapes are not equal");
+          return absl::InvalidArgumentError("Shapes are not equal");
         }
         absl::StrAppend(&code, " + value_", index);
       }
@@ -81,7 +81,7 @@ class Add : public NodeShader {
           /*input=*/IOStructure::AUTO,
           /*output=*/IOStructure::AUTO,
       };
-      return OkStatus();
+      return absl::OkStatus();
     }
 
     if (scalar) {
@@ -111,7 +111,7 @@ class Add : public NodeShader {
       };
     }
 
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/concat.cc b/tensorflow/lite/delegates/gpu/gl/kernels/concat.cc
index a97d618e0b6..43afab2922e 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/concat.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/concat.cc
@@ -67,10 +67,10 @@ class AlignedConcatByChannels : public NodeShader {
     return true;
   }
 
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     if (!IsSupported(ctx)) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "This case is not supported by aligned concat");
     }
     auto inputs = ctx.graph->FindInputs(ctx.node->id);
@@ -94,7 +94,7 @@ class AlignedConcatByChannels : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
@@ -127,10 +127,10 @@ class ConcatByAnyChannel : public NodeShader {
     return true;
   }
 
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     if (!IsSupported(ctx)) {
-      return UnimplementedError("This case is not supported by concat");
+      return absl::UnimplementedError("This case is not supported by concat");
     }
 
     auto inputs = ctx.graph->FindInputs(ctx.node->id);
@@ -182,7 +182,7 @@ class ConcatByAnyChannel : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::ONLY_DEFINITIONS,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -348,8 +348,8 @@ class FlatConcatByHeight : public NodeShader {
     return true;
   }
 
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto inputs = ctx.graph->FindInputs(ctx.node->id);
     std::string code;
     std::vector<Variable> params;
@@ -382,7 +382,7 @@ class FlatConcatByHeight : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
@@ -415,8 +415,8 @@ class FlatConcatByWidth : public NodeShader {
     return true;
   }
 
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto inputs = ctx.graph->FindInputs(ctx.node->id);
     std::string code;
     std::vector<Variable> params;
@@ -449,21 +449,22 @@ class FlatConcatByWidth : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class FlatConcat : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     if (FlatConcatByHeight::IsSupported(ctx)) {
       return flat_concat_by_height_.GenerateCode(ctx, generated_code);
     }
     if (FlatConcatByWidth::IsSupported(ctx)) {
       return flat_concat_by_width_.GenerateCode(ctx, generated_code);
     }
-    return InvalidArgumentError("This case is not supported by flat concat");
+    return absl::InvalidArgumentError(
+        "This case is not supported by flat concat");
   }
 
  private:
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/conv.cc b/tensorflow/lite/delegates/gpu/gl/kernels/conv.cc
index 0b18a4c4246..5c88402c1d1 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/conv.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/conv.cc
@@ -37,8 +37,8 @@ namespace {
 
 class Convolution : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto input = ctx.graph->FindInputs(ctx.node->id)[0];
     auto attr = absl::any_cast<const Convolution2DAttributes&>(
         ctx.node->operation.attributes);
@@ -139,7 +139,7 @@ class Convolution : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
@@ -160,24 +160,24 @@ int SelectMultiplier(int32_t input_width,
 
 class Convolution1x1 : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto input = ctx.graph->FindInputs(ctx.node->id)[0];
     auto output = ctx.graph->FindOutputs(ctx.node->id)[0];
     auto attr = absl::any_cast<const Convolution2DAttributes&>(
         ctx.node->operation.attributes);
     if (attr.weights.shape.h != 1 || attr.weights.shape.w != 1) {
-      return UnimplementedError("Height and width should be 1.");
+      return absl::UnimplementedError("Height and width should be 1.");
     }
     if (attr.dilations.h != 1 || attr.dilations.w != 1) {
-      return UnimplementedError("Dilations are not supported.");
+      return absl::UnimplementedError("Dilations are not supported.");
     }
     if (attr.strides.h != 1 || attr.strides.w != 1) {
-      return UnimplementedError("Strides are not supported.");
+      return absl::UnimplementedError("Strides are not supported.");
     }
     if (attr.padding.appended.h != 0 || attr.padding.appended.w != 0 ||
         attr.padding.prepended.h != 0 || attr.padding.prepended.w != 0) {
-      return UnimplementedError("Padding is not supported.");
+      return absl::UnimplementedError("Padding is not supported.");
     }
 
     int multiplier = SelectMultiplier(input->tensor.shape.w, ctx);
@@ -280,7 +280,7 @@ class Convolution1x1 : public NodeShader {
         /*output=*/multiplier == 1 ? IOStructure::AUTO
                                    : IOStructure::ONLY_DEFINITIONS,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/converter.cc b/tensorflow/lite/delegates/gpu/gl/kernels/converter.cc
index 189beedf815..bc4c61075a3 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/converter.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/converter.cc
@@ -31,11 +31,11 @@ namespace gl {
 namespace {
 
 // Wraps given SSBO into GlBuffer object that does not have ownership.
-Status WrapSSBO(OpenGlBuffer ssbo, GlBuffer* buffer) {
+absl::Status WrapSSBO(OpenGlBuffer ssbo, GlBuffer* buffer) {
   int64_t size_bytes;
   RETURN_IF_ERROR(GetSSBOSize(ssbo.id, &size_bytes));
   *buffer = GlBuffer(GL_SHADER_STORAGE_BUFFER, ssbo.id, size_bytes, 0, false);
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 std::string GetShaderHeader(const uint3& localsize) {
@@ -49,12 +49,12 @@ class OpenGlConverterImpl : public TensorObjectConverter {
   explicit OpenGlConverterImpl(CommandQueue* command_queue)
       : command_queue_(command_queue) {}
 
-  virtual Status Init(const TensorObjectDef& input_def,
-                      const TensorObjectDef& output_def) = 0;
+  virtual absl::Status Init(const TensorObjectDef& input_def,
+                            const TensorObjectDef& output_def) = 0;
 
  protected:
-  Status InitializeProgram(const uint3& workgroup_size,
-                           const std::string& shader_source) {
+  absl::Status InitializeProgram(const uint3& workgroup_size,
+                                 const std::string& shader_source) {
     workgroup_size_ = workgroup_size;
     GlShader shader;
     RETURN_IF_ERROR(GlShader::CompileShader(
@@ -63,7 +63,7 @@ class OpenGlConverterImpl : public TensorObjectConverter {
     return GlProgram::CreateWithShader(shader, &program_);
   }
 
-  Status Dispatch(const uint3& workload) {
+  absl::Status Dispatch(const uint3& workload) {
     uint3 num_workgroups = IntegralDivideRoundUp(workload, workgroup_size_);
     if (command_queue_) {
       return command_queue_->Dispatch(program_, num_workgroups);
@@ -103,12 +103,12 @@ class FromTensorConverter : public OpenGlConverterImpl {
            input.data_layout == DataLayout::DHWC4;
   }
 
-  Status Init(const TensorObjectDef& input_def,
-              const TensorObjectDef& output_def) final {
+  absl::Status Init(const TensorObjectDef& input_def,
+                    const TensorObjectDef& output_def) final {
     shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h,
                   output_def.dimensions.w, output_def.dimensions.c);
     if (shape_.b != 1) {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           "FromTensorConverter: Batch size != 1 is not supported.");
     }
 
@@ -135,18 +135,18 @@ class FromTensorConverter : public OpenGlConverterImpl {
     })");
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto output = absl::get_if<OpenGlBuffer>(&output_obj);
     if (!output || !output->id) {
-      return InvalidArgumentError("Missing output in converter");
+      return absl::InvalidArgumentError("Missing output in converter");
     }
     auto input = absl::get_if<OpenGlBuffer>(&input_obj);
     if (!input || !input->id) {
-      return InvalidArgumentError("Missing input in converter");
+      return absl::InvalidArgumentError("Missing input in converter");
     }
     if (input->id == output->id) {
-      return InvalidArgumentError("Can not execute inplace conversion");
+      return absl::InvalidArgumentError("Can not execute inplace conversion");
     }
     GlBuffer input_ssbo;
     RETURN_IF_ERROR(WrapSSBO(*input, &input_ssbo));
@@ -154,11 +154,11 @@ class FromTensorConverter : public OpenGlConverterImpl {
     RETURN_IF_ERROR(WrapSSBO(*output, &output_ssbo));
 
     if (input_ssbo.bytes_size() != SizeInBytesDHWC4(shape_)) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "FromTensorConverter: input data size does not match expected size.");
     }
     if (output_ssbo.bytes_size() != SizeInBytesBHWC(shape_)) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "FromTensorConverter: output data size does not match expected "
           "size.");
     }
@@ -191,12 +191,12 @@ class ToTensorConverter : public OpenGlConverterImpl {
            output.data_layout == DataLayout::DHWC4;
   }
 
-  Status Init(const TensorObjectDef& input_def,
-              const TensorObjectDef& output_def) final {
+  absl::Status Init(const TensorObjectDef& input_def,
+                    const TensorObjectDef& output_def) final {
     shape_ = BHWC(output_def.dimensions.b, output_def.dimensions.h,
                   output_def.dimensions.w, output_def.dimensions.c);
     if (shape_.b != 1) {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           "FromTensorConverter: Batch size != 1 is not supported.");
     }
 
@@ -230,18 +230,18 @@ class ToTensorConverter : public OpenGlConverterImpl {
     })");
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto output = absl::get_if<OpenGlBuffer>(&output_obj);
     if (!output || !output->id) {
-      return InvalidArgumentError("Missing output in converter");
+      return absl::InvalidArgumentError("Missing output in converter");
     }
     auto input = absl::get_if<OpenGlBuffer>(&input_obj);
     if (!input || !input->id) {
-      return InvalidArgumentError("Missing input in converter");
+      return absl::InvalidArgumentError("Missing input in converter");
     }
     if (input->id == output->id) {
-      return InvalidArgumentError("Can not execute inplace conversion");
+      return absl::InvalidArgumentError("Can not execute inplace conversion");
     }
     GlBuffer input_ssbo;
     RETURN_IF_ERROR(WrapSSBO(*input, &input_ssbo));
@@ -249,11 +249,11 @@ class ToTensorConverter : public OpenGlConverterImpl {
     RETURN_IF_ERROR(WrapSSBO(*output, &output_ssbo));
 
     if (input_ssbo.bytes_size() != SizeInBytesBHWC(shape_)) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "ToTensorConverter: input data size does not match expected size.");
     }
     if (output_ssbo.bytes_size() != SizeInBytesDHWC4(shape_)) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "ToTensorConverter: output data size does not match expected size.");
     }
     auto d = IntegralDivideRoundUp(shape_.c, 4);
@@ -279,19 +279,19 @@ class TrivialCopier : public TensorObjectConverter {
            input.data_layout == output.data_layout;
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto ssbo_input = absl::get_if<OpenGlBuffer>(&input_obj);
     auto ssbo_output = absl::get_if<OpenGlBuffer>(&output_obj);
     if (ssbo_input && ssbo_output) {
       return Copy(*ssbo_input, *ssbo_output);
     }
-    return InternalError("Unexpected object");
+    return absl::InternalError("Unexpected object");
   }
 
-  Status Copy(OpenGlBuffer input, OpenGlBuffer output) {
+  absl::Status Copy(OpenGlBuffer input, OpenGlBuffer output) {
     if (input.id == output.id) {
-      return OkStatus();
+      return absl::OkStatus();
     }
     GlBuffer input_obj;
     RETURN_IF_ERROR(WrapSSBO(input, &input_obj));
@@ -313,8 +313,8 @@ class CpuCopier : public TensorObjectConverter {
              input.object_type == ObjectType::OPENGL_SSBO));
   }
 
-  Status Convert(const TensorObject& input_obj,
-                 const TensorObject& output_obj) override {
+  absl::Status Convert(const TensorObject& input_obj,
+                       const TensorObject& output_obj) override {
     auto cpu_input = absl::get_if<CpuMemory>(&input_obj);
     auto cpu_output = absl::get_if<CpuMemory>(&output_obj);
     if (cpu_input) {
@@ -335,7 +335,7 @@ class CpuCopier : public TensorObjectConverter {
             static_cast<uint8_t*>(cpu_output->data), cpu_output->size_bytes));
       }
     }
-    return InternalError("Unexpected object");
+    return absl::InternalError("Unexpected object");
   }
 };
 
@@ -355,7 +355,7 @@ class TensorConverterBuilderImpl : public TensorObjectConverterBuilder {
             ToTensorConverter::IsSupported(input_def, output_def));
   }
 
-  Status MakeConverter(
+  absl::Status MakeConverter(
       const TensorObjectDef& input, const TensorObjectDef& output,
       std::unique_ptr<TensorObjectConverter>* converter) final {
     std::unique_ptr<OpenGlConverterImpl> impl;
@@ -363,20 +363,22 @@ class TensorConverterBuilderImpl : public TensorObjectConverterBuilder {
     const auto& output_def = output.object_def;
     if (TrivialCopier::IsSupported(input_def, output_def)) {
       *converter = absl::make_unique<TrivialCopier>();
-      return OkStatus();
-    } else if (CpuCopier::IsSupported(input_def, output_def)) {
+      return absl::OkStatus();
+    }
+    if (CpuCopier::IsSupported(input_def, output_def)) {
       *converter = absl::make_unique<CpuCopier>();
-      return OkStatus();
-    } else if (FromTensorConverter::IsSupported(input_def, output_def)) {
+      return absl::OkStatus();
+    }
+    if (FromTensorConverter::IsSupported(input_def, output_def)) {
       impl = absl::make_unique<FromTensorConverter>(command_queue_);
     } else if (ToTensorConverter::IsSupported(input_def, output_def)) {
       impl = absl::make_unique<ToTensorConverter>(command_queue_);
     } else {
-      return UnimplementedError("Unsupported conversion");
+      return absl::UnimplementedError("Unsupported conversion");
     }
     RETURN_IF_ERROR(impl->Init(input, output));
     *converter = std::move(impl);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc b/tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc
index daba2f6d9ef..5f14f093c55 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/converter_test.cc
@@ -45,7 +45,7 @@ Dimensions ToDimensions(const BHWC& shape) {
   return Dimensions(shape.b, shape.h, shape.w, shape.c);
 }
 
-Status RunFromTensorTest(const BHWC& shape) {
+absl::Status RunFromTensorTest(const BHWC& shape) {
   // Create random input and calculate expected output for it.
   std::vector<float> input =
       GenerateFloats(0.01, GetElementsSizeForPHWC4(shape));
@@ -85,9 +85,9 @@ Status RunFromTensorTest(const BHWC& shape) {
   RETURN_IF_ERROR(output_buffer.Read(
       absl::MakeSpan(converted_output.data(), converted_output.size())));
   if (output != converted_output) {
-    return InternalError("Outputs don't match");
+    return absl::InternalError("Outputs don't match");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 TEST(FromTensor, Smoke) {
@@ -103,7 +103,7 @@ TEST(FromTensor, Smoke) {
   }
 }
 
-Status RunToTensorTest(const BHWC& shape) {
+absl::Status RunToTensorTest(const BHWC& shape) {
   // Create random input and calculate expected output for it.
   std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct());
   std::vector<float> output(GetElementsSizeForPHWC4(shape), 0);
@@ -142,9 +142,9 @@ Status RunToTensorTest(const BHWC& shape) {
   RETURN_IF_ERROR(output_buffer.Read(
       absl::MakeSpan(converted_output.data(), converted_output.size())));
   if (output != converted_output) {
-    return InternalError("Outputs don't match");
+    return absl::InternalError("Outputs don't match");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 TEST(ToTensor, Smoke) {
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc b/tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc
index a8d71a943b7..38ddbf361b4 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/depthwise_conv.cc
@@ -36,8 +36,8 @@ namespace {
 
 class DepthwiseConvolution : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto input = ctx.graph->FindInputs(ctx.node->id)[0];
     auto attr = absl::any_cast<const DepthwiseConvolution2DAttributes&>(
         ctx.node->operation.attributes);
@@ -146,7 +146,7 @@ class DepthwiseConvolution : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc b/tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc
index 35b233cbdcc..aa254770535 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/elementwise.cc
@@ -31,8 +31,8 @@ class ElementwiseOneArgument : public NodeShader {
  public:
   explicit ElementwiseOneArgument(OperationType operation_type)
       : operation_type_(operation_type) {}
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     std::string source;
     switch (operation_type_) {
       case OperationType::ABS:
@@ -89,7 +89,8 @@ class ElementwiseOneArgument : public NodeShader {
         source = "value_0 = tanh(value_0);";
         break;
       default:
-        return InvalidArgumentError("Incorrect elementwise operation type.");
+        return absl::InvalidArgumentError(
+            "Incorrect elementwise operation type.");
     }
     *generated_code = {
         /*parameters=*/{},
@@ -101,7 +102,7 @@ class ElementwiseOneArgument : public NodeShader {
         /*input=*/IOStructure::AUTO,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
@@ -144,8 +145,8 @@ class ElementwiseTwoArguments : public NodeShader {
     return true;
   }
 
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     std::vector<Variable> parameters;
     std::vector<std::pair<std::string, Object>> objects;
     std::string argument0, argument1;
@@ -159,7 +160,7 @@ class ElementwiseTwoArguments : public NodeShader {
       const ElementwiseAttributes* attr = absl::any_cast<ElementwiseAttributes>(
           &ctx.node->operation.attributes);
       if (!attr) {
-        return InvalidArgumentError(
+        return absl::InvalidArgumentError(
             "Couldn't read attributes for the scalar of const vector case.");
       }
       auto* tensor =
@@ -167,7 +168,7 @@ class ElementwiseTwoArguments : public NodeShader {
               &attr->param);
       auto* scalar = absl::get_if<float>(&attr->param);
       if (!tensor && !scalar) {
-        return InvalidArgumentError(
+        return absl::InvalidArgumentError(
             "Couldn't read scalar of const vector data from the attributes.");
       }
 
@@ -208,7 +209,7 @@ class ElementwiseTwoArguments : public NodeShader {
         break;
       }
       default:
-        return InvalidArgumentError(
+        return absl::InvalidArgumentError(
             "Incorrect elementwise with scalar operation type.");
     }
     source = absl::Substitute(source, argument0, argument1);
@@ -222,7 +223,7 @@ class ElementwiseTwoArguments : public NodeShader {
         /*input=*/IOStructure::AUTO,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.cc b/tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.cc
index f4ad5b8cc0a..a8246515247 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/fully_connected.cc
@@ -34,8 +34,8 @@ namespace {
 
 class FullyConnectedBuffers : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto attr = absl::any_cast<const FullyConnectedAttributes&>(
         ctx.node->operation.attributes);
 
@@ -106,7 +106,7 @@ class FullyConnectedBuffers : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::ONLY_DEFINITIONS,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/lstm.cc b/tensorflow/lite/delegates/gpu/gl/kernels/lstm.cc
index e248cdfb31a..7179ba00581 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/lstm.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/lstm.cc
@@ -43,8 +43,8 @@ namespace {
 //
 class LstmNodeShader : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     std::string code = R"(
       vec4 prev_state  = $input_data_1[gid.x, gid.y, gid.z]$;
 
@@ -80,7 +80,7 @@ class LstmNodeShader : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.cc b/tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.cc
index 2e977625489..c8961eee087 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.cc
@@ -33,8 +33,8 @@ namespace {
 
 class MaxUnpooling : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto attr = absl::any_cast<MaxUnpooling2DAttributes>(
         ctx.node->operation.attributes);
     std::vector<Variable> parameters = {
@@ -66,7 +66,7 @@ class MaxUnpooling : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/mean.cc b/tensorflow/lite/delegates/gpu/gl/kernels/mean.cc
index 9328351f169..e94c952ffaa 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/mean.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/mean.cc
@@ -32,11 +32,11 @@ namespace {
 
 class Mean : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto attr = absl::any_cast<MeanAttributes>(ctx.node->operation.attributes);
     if (attr.dims != std::set<Axis>({Axis::HEIGHT, Axis::WIDTH})) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Mean calculation is supported only for height and width.");
     }
 
@@ -72,7 +72,7 @@ class Mean : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/mul.cc b/tensorflow/lite/delegates/gpu/gl/kernels/mul.cc
index 7de4caea81d..6e825dc862d 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/mul.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/mul.cc
@@ -52,8 +52,8 @@ bool IsApplyMaskSupported(const NodeShader::GenerationContext& ctx) {
   return shape1.h == 1 && shape1.w == 1 && shape0.c == shape1.c;
 }
 
-Status GenerateApplyMaskCode(const NodeShader::GenerationContext& ctx,
-                             GeneratedCode* generated_code) {
+absl::Status GenerateApplyMaskCode(const NodeShader::GenerationContext& ctx,
+                                   GeneratedCode* generated_code) {
   const auto inputs = ctx.graph->FindInputs(ctx.node->id);
   const auto& shape0 = inputs[0]->tensor.shape;
   const auto& shape1 = inputs[1]->tensor.shape;
@@ -80,11 +80,11 @@ Status GenerateApplyMaskCode(const NodeShader::GenerationContext& ctx,
       /*input=*/IOStructure::ONLY_DEFINITIONS,
       /*output=*/IOStructure::AUTO,
   };
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status GenerateMultiplyScalarCode(const NodeShader::GenerationContext& ctx,
-                                  GeneratedCode* generated_code) {
+absl::Status GenerateMultiplyScalarCode(
+    const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) {
   auto attr =
       absl::any_cast<MultiplyAttributes>(ctx.node->operation.attributes);
   auto muls = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.param);
@@ -103,7 +103,7 @@ Status GenerateMultiplyScalarCode(const NodeShader::GenerationContext& ctx,
     };
   } else {
     if (!muls) {
-      return InvalidArgumentError("Empty parameters for Multiplication.");
+      return absl::InvalidArgumentError("Empty parameters for Multiplication.");
     }
     auto shape = ctx.graph->FindInputs(ctx.node->id)[0]->tensor.shape;
     *generated_code = {
@@ -120,13 +120,13 @@ Status GenerateMultiplyScalarCode(const NodeShader::GenerationContext& ctx,
     };
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 class Multiply : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     if (IsApplyMaskSupported(ctx)) {
       return GenerateApplyMaskCode(ctx, generated_code);
     } else {
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/pad.cc b/tensorflow/lite/delegates/gpu/gl/kernels/pad.cc
index 14fe55d943a..3fc84aa675e 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/pad.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/pad.cc
@@ -34,22 +34,22 @@ namespace {
 
 class Pad : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto input = ctx.graph->FindInputs(ctx.node->id)[0];
     auto attr = absl::any_cast<PadAttributes>(ctx.node->operation.attributes);
 
     if (attr.type != PaddingContentType::ZEROS &&
         attr.type != PaddingContentType::REFLECT) {
-      return UnimplementedError(
+      return absl::UnimplementedError(
           "Only ZERO and REFLECT padding types are supported.");
     }
     if (attr.appended.h < 0 || attr.appended.w < 0 || attr.appended.c < 0 ||
         attr.prepended.h < 0 || attr.prepended.w < 0 || attr.prepended.c < 0) {
-      return UnimplementedError("Negative padding is not supported.");
+      return absl::UnimplementedError("Negative padding is not supported.");
     }
     if (attr.appended.b != 0 || attr.prepended.b != 0) {
-      return UnimplementedError("Padding for BATCH is not supported.");
+      return absl::UnimplementedError("Padding for BATCH is not supported.");
     }
     std::vector<Variable> parameters = {
         {"input_data_0_h", input->tensor.shape.h},
@@ -130,7 +130,7 @@ class Pad : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc b/tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc
index 8f140c33fca..5c6aefcde1c 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/pooling.cc
@@ -31,14 +31,14 @@ namespace gpu {
 namespace gl {
 namespace {
 
-Status GenerateMaxPoolingCode(const Pooling2DAttributes& attr,
-                              const NodeShader::GenerationContext& ctx,
-                              GeneratedCode* generated_code) {
+absl::Status GenerateMaxPoolingCode(const Pooling2DAttributes& attr,
+                                    const NodeShader::GenerationContext& ctx,
+                                    GeneratedCode* generated_code) {
   auto input = ctx.graph->FindInputs(ctx.node->id)[0];
 
   if (attr.padding.prepended.h > attr.kernel.h ||
       attr.padding.prepended.w > attr.kernel.w) {
-    return InvalidArgumentError("Padding is bigger than kernel.");
+    return absl::InvalidArgumentError("Padding is bigger than kernel.");
   }
 
   std::vector<Variable> parameters = {
@@ -94,12 +94,12 @@ Status GenerateMaxPoolingCode(const Pooling2DAttributes& attr,
       /*input=*/IOStructure::ONLY_DEFINITIONS,
       /*output=*/IOStructure::AUTO,
   };
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status GenerateAveragePoolingCode(const Pooling2DAttributes& attr,
-                                  const NodeShader::GenerationContext& ctx,
-                                  GeneratedCode* generated_code) {
+absl::Status GenerateAveragePoolingCode(
+    const Pooling2DAttributes& attr, const NodeShader::GenerationContext& ctx,
+    GeneratedCode* generated_code) {
   auto input = ctx.graph->FindInputs(ctx.node->id)[0];
 
   std::vector<Variable> parameters = {
@@ -136,13 +136,13 @@ Status GenerateAveragePoolingCode(const Pooling2DAttributes& attr,
       /*input=*/IOStructure::ONLY_DEFINITIONS,
       /*output=*/IOStructure::AUTO,
   };
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 class Pooling : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     const auto& attr =
         absl::any_cast<Pooling2DAttributes>(ctx.node->operation.attributes);
     switch (attr.type) {
@@ -151,7 +151,7 @@ class Pooling : public NodeShader {
       case PoolingType::MAX:
         return GenerateMaxPoolingCode(attr, ctx, generated_code);
       default:
-        return InvalidArgumentError("Incorrect attributes' type.");
+        return absl::InvalidArgumentError("Incorrect attributes' type.");
     }
   }
 };
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/prelu.cc b/tensorflow/lite/delegates/gpu/gl/kernels/prelu.cc
index 88078935ee2..28f8551f530 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/prelu.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/prelu.cc
@@ -35,17 +35,17 @@ namespace {
 
 class PReLULinearAlpha : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto output = ctx.graph->FindOutputs(ctx.node->id)[0];
     auto attr =
         absl::any_cast<const PReLUAttributes&>(ctx.node->operation.attributes);
     auto alpha = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.alpha);
     if (!alpha) {
-      return InvalidArgumentError("Alpha is missing");
+      return absl::InvalidArgumentError("Alpha is missing");
     }
     if (alpha->shape.v != output->tensor.shape.c) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Alpha shape does not match the number of channels.");
     }
 
@@ -79,25 +79,26 @@ class PReLULinearAlpha : public NodeShader {
                   /*input=*/IOStructure::AUTO,
                   /*output=*/IOStructure::AUTO,
               };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class PReLUFull : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto output = ctx.graph->FindOutputs(ctx.node->id)[0];
     auto attr =
         absl::any_cast<const PReLUAttributes&>(ctx.node->operation.attributes);
     auto alpha = absl::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha);
     if (!alpha) {
-      return InvalidArgumentError("Alpha is missing");
+      return absl::InvalidArgumentError("Alpha is missing");
     }
     if (alpha->shape.h != output->tensor.shape.h ||
         alpha->shape.w != output->tensor.shape.w ||
         alpha->shape.c != output->tensor.shape.c) {
-      return InvalidArgumentError("Alpha shape does not match input shape.");
+      return absl::InvalidArgumentError(
+          "Alpha shape does not match input shape.");
     }
 
     auto shape = output->tensor.shape;
@@ -141,14 +142,14 @@ class PReLUFull : public NodeShader {
                   /*input=*/IOStructure::AUTO,
                   /*output=*/IOStructure::AUTO,
               };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
 class PReLU : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto attr =
         absl::any_cast<const PReLUAttributes&>(ctx.node->operation.attributes);
     auto alpha = absl::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha);
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc b/tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc
index 3f21124aee9..1d45e07aeee 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc
@@ -31,8 +31,8 @@ namespace {
 
 class QuantizeAndDequantize : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     std::string code;
     // Constants
     code += "vec4 scale = vec4($quant_scale$);";
@@ -59,7 +59,7 @@ class QuantizeAndDequantize : public NodeShader {
         /*input=*/IOStructure::AUTO,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/registry.cc b/tensorflow/lite/delegates/gpu/gl/kernels/registry.cc
index 6903abc0b26..8f6de92acd8 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/registry.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/registry.cc
@@ -120,19 +120,19 @@ class Registry : public NodeShader {
 
   ~Registry() final = default;
 
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     std::vector<std::string> errors;
     auto it = shaders_.find(ctx.node->operation.type);
     if (it != shaders_.end()) {
       for (auto& shader : it->second) {
         const auto status = shader->GenerateCode(ctx, generated_code);
         if (status.ok()) return status;
-        errors.push_back(status.error_message());
+        errors.push_back(std::string(status.message()));
       }
     }
-    return NotFoundError(absl::StrCat("Suitable node shader is not found: ",
-                                      absl::StrJoin(errors, ", ")));
+    return absl::NotFoundError(absl::StrCat(
+        "Suitable node shader is not found: ", absl::StrJoin(errors, ", ")));
   }
 
  private:
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/relu.cc b/tensorflow/lite/delegates/gpu/gl/kernels/relu.cc
index a8e006ed151..a9357968a90 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/relu.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/relu.cc
@@ -33,8 +33,8 @@ namespace {
 
 class ReLU : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto attr = absl::any_cast<ReLUAttributes>(ctx.node->operation.attributes);
     // clamp(value, min(0, alpha * value), clip)
     std::vector<Variable> params;
@@ -62,7 +62,7 @@ class ReLU : public NodeShader {
         /*input=*/IOStructure::AUTO,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/reshape.cc b/tensorflow/lite/delegates/gpu/gl/kernels/reshape.cc
index cd01417cff5..9734ff14a1e 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/reshape.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/reshape.cc
@@ -32,19 +32,19 @@ namespace {
 
 class Reshape : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto input = ctx.graph->FindInputs(ctx.node->id)[0];
     auto output = ctx.graph->FindOutputs(ctx.node->id)[0];
     if (input->tensor.shape.DimensionsProduct() !=
         output->tensor.shape.DimensionsProduct()) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Number of elements in input & output tensors don't match.");
     }
     auto attr =
         absl::any_cast<ReshapeAttributes>(ctx.node->operation.attributes);
     if (attr.new_shape != output->tensor.shape) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Dimensions for output does not match new_shape attribute");
     }
 
@@ -80,7 +80,7 @@ class Reshape : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/resize.cc b/tensorflow/lite/delegates/gpu/gl/kernels/resize.cc
index 33d59518987..004ae14fe8b 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/resize.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/resize.cc
@@ -33,10 +33,8 @@ namespace {
 
 class Resize : public NodeShader {
  public:
-  Resize() {}
-
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto input = ctx.graph->FindInputs(ctx.node->id)[0];
     auto output = ctx.graph->FindOutputs(ctx.node->id)[0];
     auto attr =
@@ -44,15 +42,15 @@ class Resize : public NodeShader {
 
     if (input->tensor.shape.w > output->tensor.shape.w ||
         input->tensor.shape.h > output->tensor.shape.h) {
-      return InvalidArgumentError("Output size is less than input size.");
+      return absl::InvalidArgumentError("Output size is less than input size.");
     }
     if (output->tensor.shape.w != attr.new_shape.w ||
         output->tensor.shape.h != attr.new_shape.h) {
-      return InvalidArgumentError(
+      return absl::InvalidArgumentError(
           "Output size does not match new_size in attributes.");
     }
     if (input->tensor.shape.c != output->tensor.shape.c) {
-      return InvalidArgumentError("Input/output channels mismatch.");
+      return absl::InvalidArgumentError("Input/output channels mismatch.");
     }
     if (input->tensor.shape.h == 1 && input->tensor.shape.w == 1) {
       // Copy a single element from input.
@@ -66,7 +64,7 @@ class Resize : public NodeShader {
           /*input=*/IOStructure::ONLY_DEFINITIONS,
           /*output=*/IOStructure::AUTO,
       };
-      return OkStatus();
+      return absl::OkStatus();
     }
     std::vector<Variable> parameters = {
         {"input_data_0_h", input->tensor.shape.h},
@@ -107,7 +105,7 @@ class Resize : public NodeShader {
       value_0 = $input_data_0[coord.x, coord.y, gid.z]$;
       )";
     } else {
-      return InvalidArgumentError("Unknown sampling type");
+      return absl::InvalidArgumentError("Unknown sampling type");
     }
     *generated_code = {
         /*parameters=*/std::move(parameters),
@@ -119,7 +117,7 @@ class Resize : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/slice.cc b/tensorflow/lite/delegates/gpu/gl/kernels/slice.cc
index d0fe1923d4e..ab4497c4b62 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/slice.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/slice.cc
@@ -33,8 +33,8 @@ namespace {
 
 class Slice : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto output = ctx.graph->FindOutputs(ctx.node->id)[0];
 
     auto attr =
@@ -107,7 +107,7 @@ class Slice : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc b/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc
index e59343df7b6..b6c8e144a09 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc
@@ -41,17 +41,19 @@ float4 GetMask(int num_channels) {
 
 class Softmax : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     const auto* input = ctx.graph->FindInputs(ctx.node->id)[0];
     const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0];
     const auto& attr = absl::any_cast<const SoftmaxAttributes&>(
         ctx.node->operation.attributes);
     if (input->tensor.shape != output->tensor.shape) {
-      return InvalidArgumentError("Input and output shapes do not match.");
+      return absl::InvalidArgumentError(
+          "Input and output shapes do not match.");
     }
     if (attr.axis != Axis::CHANNELS) {
-      return UnimplementedError("Softmax is only supported for channels axis.");
+      return absl::UnimplementedError(
+          "Softmax is only supported for channels axis.");
     }
     return input->tensor.shape.h == 1 && input->tensor.shape.w == 1
                ? GenerateCodeFor1x1(ctx, generated_code)
@@ -59,8 +61,8 @@ class Softmax : public NodeShader {
   }
 
  private:
-  Status GenerateCodeFor1x1(const GenerationContext& ctx,
-                            GeneratedCode* generated_code) const {
+  absl::Status GenerateCodeFor1x1(const GenerationContext& ctx,
+                                  GeneratedCode* generated_code) const {
     const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0];
     const int depth = IntegralDivideRoundUp(output->tensor.shape.c, 4);
     std::vector<Variable> shared_variables = {
@@ -133,11 +135,11 @@ class Softmax : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::ONLY_DEFINITIONS,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status GenerateCodeGeneral(const GenerationContext& ctx,
-                             GeneratedCode* generated_code) const {
+  absl::Status GenerateCodeGeneral(const GenerationContext& ctx,
+                                   GeneratedCode* generated_code) const {
     const auto* output = ctx.graph->FindOutputs(ctx.node->id)[0];
     std::vector<Variable> parameters = {
         {"src_depth", IntegralDivideRoundUp(output->tensor.shape.c, 4)},
@@ -172,7 +174,7 @@ class Softmax : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::ONLY_DEFINITIONS,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc b/tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc
index 1d49da0e3fa..b1e650a1ffc 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/space_to_depth.cc
@@ -31,8 +31,8 @@ namespace {
 
 class SpaceToDepth : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     const auto attr =
         absl::any_cast<SpaceToDepthAttributes>(ctx.node->operation.attributes);
     const auto& input_data_0 = ctx.graph->FindInputs(ctx.node->id)[0]->tensor;
@@ -60,7 +60,7 @@ class SpaceToDepth : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 }  // namespace
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/test_util.cc b/tensorflow/lite/delegates/gpu/gl/kernels/test_util.cc
index de6e324017d..e9abec7eec6 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/test_util.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/test_util.cc
@@ -68,9 +68,9 @@ bool SingleOpModel::PopulateTensor(int index, std::vector<float>&& data) {
   return true;
 }
 
-Status SingleOpModel::Invoke(const CompilationOptions& compile_options,
-                             const RuntimeOptions& runtime_options,
-                             const NodeShader& shader) {
+absl::Status SingleOpModel::Invoke(const CompilationOptions& compile_options,
+                                   const RuntimeOptions& runtime_options,
+                                   const NodeShader& shader) {
   std::unique_ptr<EglEnvironment> env;
   RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env));
 
@@ -125,10 +125,10 @@ Status SingleOpModel::Invoke(const CompilationOptions& compile_options,
         CopyFromPHWC4Buffer(*objects.FindBuffer(output->id), &tensor));
     outputs_.push_back(std::move(tensor));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status SingleOpModel::Invoke(const NodeShader& shader) {
+absl::Status SingleOpModel::Invoke(const NodeShader& shader) {
   return Invoke(CompilationOptions(), RuntimeOptions(), shader);
 }
 
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/test_util.h b/tensorflow/lite/delegates/gpu/gl/kernels/test_util.h
index c917220d075..42a789020df 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/test_util.h
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/test_util.h
@@ -48,10 +48,10 @@ class SingleOpModel {
 
   bool PopulateTensor(int index, std::vector<float>&& data);
 
-  Status Invoke(const NodeShader& shader);
-  Status Invoke(const CompilationOptions& compile_options,
-                const RuntimeOptions& runtime_options,
-                const NodeShader& shader);
+  absl::Status Invoke(const NodeShader& shader);
+  absl::Status Invoke(const CompilationOptions& compile_options,
+                      const RuntimeOptions& runtime_options,
+                      const NodeShader& shader);
 
   const std::vector<float>& GetOutput(int index) const {
     return outputs_[index].data;
diff --git a/tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc b/tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc
index 7fcfde4f92a..eb28672d49f 100644
--- a/tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc
+++ b/tensorflow/lite/delegates/gpu/gl/kernels/transpose_conv.cc
@@ -35,8 +35,8 @@ namespace {
 
 class ConvolutionTransposedBuffers : public NodeShader {
  public:
-  Status GenerateCode(const GenerationContext& ctx,
-                      GeneratedCode* generated_code) const final {
+  absl::Status GenerateCode(const GenerationContext& ctx,
+                            GeneratedCode* generated_code) const final {
     auto input = ctx.graph->FindInputs(ctx.node->id)[0];
     auto attr = absl::any_cast<const ConvolutionTransposedAttributes&>(
         ctx.node->operation.attributes);
@@ -63,10 +63,10 @@ class ConvolutionTransposedBuffers : public NodeShader {
     ivec2 p0 = ($padding$ + $stride$ - gid.xy % $stride$) % $stride$;
     for (int y = p0.y; y < $kernel_size.y$; y += $stride.y$) {
       for (int x = p0.x; x < $kernel_size.x$; x += $stride.x$) {
-      
-        int i = int(float(y * $kernel_size.x$) + float(x));        
+
+        int i = int(float(y * $kernel_size.x$) + float(x));
         ivec2 idx = ivec2(vec2(gid.xy + ivec2(x, y)) - vec2($padding$));
-        
+
         if (IN_BOUNDS(idx, ivec2(0), ivec2($input_data_0_w$, $input_data_0_h$) * $stride$)) {
           ivec2 coord = idx / $stride$;
           for (int l = 0; l < $src_depth$; ++l) {
@@ -94,7 +94,7 @@ class ConvolutionTransposedBuffers : public NodeShader {
         /*input=*/IOStructure::ONLY_DEFINITIONS,
         /*output=*/IOStructure::AUTO,
     };
-    return OkStatus();
+    return absl::OkStatus();
   }
 };
 
diff --git a/tensorflow/lite/delegates/gpu/gl/node_shader.h b/tensorflow/lite/delegates/gpu/gl/node_shader.h
index 38364656b7a..d98bdbf8914 100644
--- a/tensorflow/lite/delegates/gpu/gl/node_shader.h
+++ b/tensorflow/lite/delegates/gpu/gl/node_shader.h
@@ -101,8 +101,8 @@ class NodeShader {
   };
 
   // Generates shader code for a node. The code should be just a function body.
-  virtual Status GenerateCode(const GenerationContext& ctx,
-                              GeneratedCode* generated_code) const = 0;
+  virtual absl::Status GenerateCode(const GenerationContext& ctx,
+                                    GeneratedCode* generated_code) const = 0;
 
   // Limit the size of the const offsets array
   static constexpr int kMaxConstArraySize = 9;
diff --git a/tensorflow/lite/delegates/gpu/gl/object_manager.cc b/tensorflow/lite/delegates/gpu/gl/object_manager.cc
index 4eca794a20a..c37be507b2b 100644
--- a/tensorflow/lite/delegates/gpu/gl/object_manager.cc
+++ b/tensorflow/lite/delegates/gpu/gl/object_manager.cc
@@ -24,21 +24,22 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-Status CreatePHWC4BufferFromTensor(const TensorFloat32& tensor,
-                                   GlBuffer* gl_buffer) {
+absl::Status CreatePHWC4BufferFromTensor(const TensorFloat32& tensor,
+                                         GlBuffer* gl_buffer) {
   std::vector<float> transposed(GetElementsSizeForPHWC4(tensor.shape));
   RETURN_IF_ERROR(
       ConvertToPHWC4(tensor.data, tensor.shape, absl::MakeSpan(transposed)));
   return CreateReadOnlyShaderStorageBuffer<float>(transposed, gl_buffer);
 }
 
-Status CreatePHWC4BufferFromTensorRef(const TensorRef<BHWC>& tensor_ref,
-                                      GlBuffer* gl_buffer) {
+absl::Status CreatePHWC4BufferFromTensorRef(const TensorRef<BHWC>& tensor_ref,
+                                            GlBuffer* gl_buffer) {
   return CreateReadWriteShaderStorageBuffer<float>(
       GetElementsSizeForPHWC4(tensor_ref.shape), gl_buffer);
 }
 
-Status CopyFromPHWC4Buffer(const GlBuffer& buffer, TensorFloat32* tensor) {
+absl::Status CopyFromPHWC4Buffer(const GlBuffer& buffer,
+                                 TensorFloat32* tensor) {
   return buffer.MappedRead<float>(
       [tensor, &buffer](absl::Span<const float> data) {
         tensor->data.resize(tensor->shape.DimensionsProduct());
@@ -47,12 +48,12 @@ Status CopyFromPHWC4Buffer(const GlBuffer& buffer, TensorFloat32* tensor) {
       });
 }
 
-Status ObjectManager::RegisterBuffer(uint32_t id, GlBuffer buffer) {
+absl::Status ObjectManager::RegisterBuffer(uint32_t id, GlBuffer buffer) {
   if (id >= buffers_.size()) {
     buffers_.resize(id + 1);
   }
   buffers_[id] = absl::make_unique<GlBuffer>(std::move(buffer));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void ObjectManager::RemoveBuffer(uint32_t id) {
@@ -65,12 +66,12 @@ GlBuffer* ObjectManager::FindBuffer(uint32_t id) const {
   return id >= buffers_.size() ? nullptr : buffers_[id].get();
 }
 
-Status ObjectManager::RegisterTexture(uint32_t id, GlTexture texture) {
+absl::Status ObjectManager::RegisterTexture(uint32_t id, GlTexture texture) {
   if (id >= textures_.size()) {
     textures_.resize(id + 1);
   }
   textures_[id] = absl::make_unique<GlTexture>(std::move(texture));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 void ObjectManager::RemoveTexture(uint32_t id) {
diff --git a/tensorflow/lite/delegates/gpu/gl/object_manager.h b/tensorflow/lite/delegates/gpu/gl/object_manager.h
index 8fa82871b50..0a7de28e1dc 100644
--- a/tensorflow/lite/delegates/gpu/gl/object_manager.h
+++ b/tensorflow/lite/delegates/gpu/gl/object_manager.h
@@ -41,7 +41,7 @@ namespace gl {
 class ObjectManager {
  public:
   // Moves ownership over the given buffer to the manager.
-  Status RegisterBuffer(uint32_t id, GlBuffer buffer);
+  absl::Status RegisterBuffer(uint32_t id, GlBuffer buffer);
 
   void RemoveBuffer(uint32_t id);
 
@@ -49,7 +49,7 @@ class ObjectManager {
   GlBuffer* FindBuffer(uint32_t id) const;
 
   // Moves ownership over the given texture to the manager.
-  Status RegisterTexture(uint32_t id, GlTexture texture);
+  absl::Status RegisterTexture(uint32_t id, GlTexture texture);
 
   void RemoveTexture(uint32_t id);
 
@@ -67,17 +67,17 @@ class ObjectManager {
 
 // Creates read-only buffer from the given tensor. Tensor data is converted to
 // PHWC4 layout.
-Status CreatePHWC4BufferFromTensor(const TensorFloat32& tensor,
-                                   GlBuffer* gl_buffer);
+absl::Status CreatePHWC4BufferFromTensor(const TensorFloat32& tensor,
+                                         GlBuffer* gl_buffer);
 
 // Creates read-write buffer for the given tensor shape, where data layout is
 // supposed to be PHWC4.
-Status CreatePHWC4BufferFromTensorRef(const TensorRef<BHWC>& tensor_ref,
-                                      GlBuffer* gl_buffer);
+absl::Status CreatePHWC4BufferFromTensorRef(const TensorRef<BHWC>& tensor_ref,
+                                            GlBuffer* gl_buffer);
 
 // Copies data from a buffer that holds data in PHWC4 layout to the given
 // tensor.
-Status CopyFromPHWC4Buffer(const GlBuffer& buffer, TensorFloat32* tensor);
+absl::Status CopyFromPHWC4Buffer(const GlBuffer& buffer, TensorFloat32* tensor);
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/request_gpu_info.cc b/tensorflow/lite/delegates/gpu/gl/request_gpu_info.cc
index 7134fc010d0..0769a5014b4 100644
--- a/tensorflow/lite/delegates/gpu/gl/request_gpu_info.cc
+++ b/tensorflow/lite/delegates/gpu/gl/request_gpu_info.cc
@@ -28,7 +28,7 @@ namespace tflite {
 namespace gpu {
 namespace gl {
 
-Status RequestGpuInfo(GpuInfo* gpu_info) {
+absl::Status RequestGpuInfo(GpuInfo* gpu_info) {
   GpuInfo info;
 
   const GLubyte* renderer_name = glGetString(GL_RENDERER);
@@ -73,7 +73,7 @@ Status RequestGpuInfo(GpuInfo* gpu_info) {
   glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &info.max_array_texture_layers);
   RETURN_IF_ERROR(GetOpenGlErrors());
   *gpu_info = info;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/request_gpu_info.h b/tensorflow/lite/delegates/gpu/gl/request_gpu_info.h
index 4eba7a55c2a..f9d203e2325 100644
--- a/tensorflow/lite/delegates/gpu/gl/request_gpu_info.h
+++ b/tensorflow/lite/delegates/gpu/gl/request_gpu_info.h
@@ -28,7 +28,7 @@ namespace gl {
 
 // This method performs multiple GL calls, therefore, egl context needs to be
 // created upfront.
-Status RequestGpuInfo(GpuInfo* gpu_info);
+absl::Status RequestGpuInfo(GpuInfo* gpu_info);
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/runtime.cc b/tensorflow/lite/delegates/gpu/gl/runtime.cc
index 14e30389cf0..2a48b59c8d9 100644
--- a/tensorflow/lite/delegates/gpu/gl/runtime.cc
+++ b/tensorflow/lite/delegates/gpu/gl/runtime.cc
@@ -41,13 +41,13 @@ namespace gl {
 namespace {
 
 struct TextureF16Maker {
-  Status operator()(const uint3& size) const {
+  absl::Status operator()(const uint3& size) const {
     return CreateReadOnlyImageTextureF16(size, data, gl_texture);
   }
-  Status operator()(const uint2& size) const {
+  absl::Status operator()(const uint2& size) const {
     return CreateReadOnlyImageTextureF16(size, data, gl_texture);
   }
-  Status operator()(const size_t& size) const {
+  absl::Status operator()(const size_t& size) const {
     return CreateReadOnlyImageTextureF16(uint2(static_cast<uint32_t>(size), 1U),
                                          data, gl_texture);
   }
@@ -56,13 +56,13 @@ struct TextureF16Maker {
 };
 
 struct TextureF32Maker {
-  Status operator()(const uint3& size) const {
+  absl::Status operator()(const uint3& size) const {
     return CreateReadOnlyImageTexture(size, data, gl_texture);
   }
-  Status operator()(const uint2& size) const {
+  absl::Status operator()(const uint2& size) const {
     return CreateReadOnlyImageTexture(size, data, gl_texture);
   }
-  Status operator()(const size_t& size) const {
+  absl::Status operator()(const size_t& size) const {
     return CreateReadOnlyImageTexture(uint2(static_cast<uint32_t>(size), 1U),
                                       data, gl_texture);
   }
@@ -70,20 +70,21 @@ struct TextureF32Maker {
   GlTexture* gl_texture;
 };
 
-Status MakeGlTexture(const Object& object, const ObjectData& data,
-                     GlTexture* gl_texture) {
+absl::Status MakeGlTexture(const Object& object, const ObjectData& data,
+                           GlTexture* gl_texture) {
   if (object.access == AccessType::READ_WRITE ||
       object.access == AccessType::WRITE) {
-    return InvalidArgumentError("Read-write textures are not supported");
+    return absl::InvalidArgumentError("Read-write textures are not supported");
   }
   if (object.data_type != DataType::FLOAT16 &&
       object.data_type != DataType::FLOAT32) {
-    return InvalidArgumentError("Textures support float16 or float32 only.");
+    return absl::InvalidArgumentError(
+        "Textures support float16 or float32 only.");
   }
   switch (object.data_type) {
     case DataType::FLOAT16: {
       if (data.size() % 2 != 0) {
-        return InvalidArgumentError("Texture size is not aligned");
+        return absl::InvalidArgumentError("Texture size is not aligned");
       }
       return absl::visit(
           TextureF16Maker{
@@ -96,7 +97,7 @@ Status MakeGlTexture(const Object& object, const ObjectData& data,
     }
     case DataType::FLOAT32: {
       if (data.size() % sizeof(float) != 0) {
-        return InvalidArgumentError("Texture size is not aligned");
+        return absl::InvalidArgumentError("Texture size is not aligned");
       }
       return absl::visit(
           TextureF32Maker{
@@ -108,18 +109,18 @@ Status MakeGlTexture(const Object& object, const ObjectData& data,
           object.size);
     }
     default:
-      return InvalidArgumentError("Unsupported textures data type.");
+      return absl::InvalidArgumentError("Unsupported textures data type.");
   }
 }
 
 struct TextureRefMaker {
-  Status operator()(const uint3& size) const {
+  absl::Status operator()(const uint3& size) const {
     return CreateReadWriteRgbaImageTexture(type, size, gl_texture);
   }
-  Status operator()(const uint2& size) const {
+  absl::Status operator()(const uint2& size) const {
     return CreateReadWriteRgbaImageTexture(type, size, gl_texture);
   }
-  Status operator()(const size_t& size) const {
+  absl::Status operator()(const size_t& size) const {
     return CreateReadWriteRgbaImageTexture(
         type, uint2(static_cast<uint32_t>(size), 1U), gl_texture);
   }
@@ -128,37 +129,38 @@ struct TextureRefMaker {
 };
 
 // Makes read-write gl texture
-Status MakeGlTextureRef(const Object& object, GlTexture* gl_texture) {
+absl::Status MakeGlTextureRef(const Object& object, GlTexture* gl_texture) {
   return absl::visit(TextureRefMaker{object.data_type, gl_texture},
                      object.size);
 }
 
-Status MakeGlBuffer(const Object& object, const ObjectData& data,
-                    GlBuffer* gl_buffer) {
+absl::Status MakeGlBuffer(const Object& object, const ObjectData& data,
+                          GlBuffer* gl_buffer) {
   if (data.size() % SizeOf(object.data_type) != 0) {
-    return InvalidArgumentError("Buffer size is not aligned");
+    return absl::InvalidArgumentError("Buffer size is not aligned");
   }
   return CreateReadOnlyShaderStorageBuffer(absl::MakeConstSpan(data),
                                            gl_buffer);
 }
 
 // Looks up an object with the given id. If found, makes a binding function.
-Status MakeBindingFunc(const Object& object, uint32_t id,
-                       const ObjectManager& objects,
-                       std::function<Status()>* binding_func) {
+absl::Status MakeBindingFunc(const Object& object, uint32_t id,
+                             const ObjectManager& objects,
+                             std::function<absl::Status()>* binding_func) {
   const uint32_t binding = object.binding;
   switch (object.object_type) {
     case ObjectType::BUFFER: {
       auto ptr = objects.FindBuffer(id);
       if (!ptr) {
-        return NotFoundError(absl::StrCat("Buffer ", id, " is not found"));
+        return absl::NotFoundError(
+            absl::StrCat("Buffer ", id, " is not found"));
       }
 
       // Validate buffer.
       size_t size_in_bytes = ByteSizeOf(object);
       // TODO(akulik): make comparison != instead of <
       if (ptr->bytes_size() < size_in_bytes) {
-        return FailedPreconditionError(
+        return absl::FailedPreconditionError(
             absl::StrCat("Buffer ", id, " size in bytes ", ptr->bytes_size(),
                          " < requested size_in_bytes ", size_in_bytes));
       }
@@ -168,15 +170,16 @@ Status MakeBindingFunc(const Object& object, uint32_t id,
     case ObjectType::TEXTURE: {
       auto ptr = objects.FindTexture(id);
       if (!ptr) {
-        return NotFoundError(absl::StrCat("Texture ", id, " is not found"));
+        return absl::NotFoundError(
+            absl::StrCat("Texture ", id, " is not found"));
       }
       *binding_func = [=]() { return ptr->BindAsReadWriteImage(binding); };
       break;
     }
     case ObjectType::UNKNOWN:
-      return InvalidArgumentError("Unknown object type");
+      return absl::InvalidArgumentError("Unknown object type");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
@@ -194,10 +197,10 @@ Runtime::Runtime(const RuntimeOptions& options, const GpuInfo& gpu_info,
   }
 }
 
-Status Runtime::AddProgram(const GlShader& shader,
-                           const std::vector<Variable>& parameters,
-                           const std::vector<Object>& objects,
-                           const uint3& num_workgroups) {
+absl::Status Runtime::AddProgram(const GlShader& shader,
+                                 const std::vector<Variable>& parameters,
+                                 const std::vector<Object>& objects,
+                                 const uint3& num_workgroups) {
   GlProgram program;
   RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
 
@@ -217,10 +220,10 @@ Status Runtime::AddProgram(const GlShader& shader,
       // Reference object could be provided externally as a model input/output
       // but also for debugging purposes. Otherwise all references are collected
       // and allocated later.
-      Status status = MakeBindingFunc(object, GetRef(object),
-                                      *external_objects_, &binding_func);
+      absl::Status status = MakeBindingFunc(object, GetRef(object),
+                                            *external_objects_, &binding_func);
       if (!status.ok()) {
-        if (status.code() == StatusCode::kNotFound) {
+        if (absl::IsNotFound(status)) {
           program.refs.push_back(object);
           continue;  // don't add to binding.
         }
@@ -238,10 +241,10 @@ Status Runtime::AddProgram(const GlShader& shader,
 
   // All parameters once set stay with program, therefore, we only need to keep
   // program and bindings for execution.
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Runtime::AllocateInternalObject(const Object& object) {
+absl::Status Runtime::AllocateInternalObject(const Object& object) {
   const ObjectRef ref = GetRef(object);
   switch (object.object_type) {
     case ObjectType::BUFFER: {
@@ -260,15 +263,16 @@ Status Runtime::AllocateInternalObject(const Object& object) {
       break;
     }
     default:
-      return InternalError("Unexpected internal object type");
+      return absl::InternalError("Unexpected internal object type");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Runtime::AllocateConstObject(const Object& object, uint32_t* id) {
+absl::Status Runtime::AllocateConstObject(const Object& object, uint32_t* id) {
   const ObjectData* data = GetData(object);
   if (data == nullptr) {
-    return InternalError("Unable to allocate reference as a const object");
+    return absl::InternalError(
+        "Unable to allocate reference as a const object");
   }
   *id = next_const_id_++;
   switch (object.object_type) {
@@ -289,12 +293,12 @@ Status Runtime::AllocateConstObject(const Object& object, uint32_t* id) {
       break;
     }
     case ObjectType::UNKNOWN:
-      return InternalError("Unknown object type");
+      return absl::InternalError("Unknown object type");
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Runtime::PrepareForExecution() {
+absl::Status Runtime::PrepareForExecution() {
   if (shared_readonly_buffer_ && !shared_readonly_buffer_->empty()) {
     GlBuffer shared_buffer;
     RETURN_IF_ERROR(
@@ -320,11 +324,10 @@ Status Runtime::PrepareForExecution() {
       // Check whether it is created already.
       BindFunc binding;
       ObjectRef ref = GetRef(object);
-      Status status = MakeBindingFunc(object, ref, internal_objects_, &binding);
+      absl::Status status =
+          MakeBindingFunc(object, ref, internal_objects_, &binding);
       if (!status.ok()) {
-        if (status.code() != StatusCode::kNotFound) {
-          return status;
-        }
+        if (absl::IsNotFound(status)) return status;
         RETURN_IF_ERROR(AllocateInternalObject(object));
         RETURN_IF_ERROR(
             MakeBindingFunc(object, ref, internal_objects_, &binding));
@@ -333,7 +336,7 @@ Status Runtime::PrepareForExecution() {
     }
     program.refs.clear();
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 namespace {
@@ -399,8 +402,8 @@ struct AddUsageRecordForTextureFunc {
 
 // We assume that AddUsageRecord for different objects is called in order of
 // program_id.
-Status AddUsageRecord(CombinedUsageRecords* usage_records, const Object& object,
-                      const size_t program_id) {
+absl::Status AddUsageRecord(CombinedUsageRecords* usage_records,
+                            const Object& object, const size_t program_id) {
   auto ref = GetRef(object);
   if (ref >= usage_records->usage_refs.size()) {
     usage_records->usage_refs.resize(ref + 1, kNotAssigned);
@@ -416,17 +419,17 @@ Status AddUsageRecord(CombinedUsageRecords* usage_records, const Object& object,
     } else {
       UpdateUsageRecord(&usage_records->buffers[usage_ref], program_id);
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
   if (object.object_type == ObjectType::TEXTURE) {
     absl::visit(AddUsageRecordForTextureFunc{usage_records, ref, program_id},
                 object.size);
-    return OkStatus();
+    return absl::OkStatus();
   }
-  return InternalError("Unexpected object type");
+  return absl::InternalError("Unexpected object type");
 }
 
-Status ApplyBuffersAssignment(
+absl::Status ApplyBuffersAssignment(
     const ObjectsAssignment<size_t>& assignment,
     const std::vector<size_t>& global_ref_to_usage_rec,
     const std::vector<Object*>& global_ref_to_object_ptr,
@@ -462,11 +465,11 @@ Status ApplyBuffersAssignment(
     }
     (*global_ref_to_shared_ref)[global_ref] = shared_ref;
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 template <typename ObjectSizeT>
-Status ApplyTexturesAssignment(
+absl::Status ApplyTexturesAssignment(
     const ObjectsAssignment<ObjectSizeT>& assignment,
     const std::vector<size_t>& global_ref_to_usage_rec,
     const std::vector<Object*>& global_ref_to_object_ptr,
@@ -504,7 +507,7 @@ Status ApplyTexturesAssignment(
     }
     (*global_ref_to_shared_ref)[global_ref] = shared_ref;
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
@@ -512,7 +515,8 @@ Status ApplyTexturesAssignment(
 // Assign shared objects to internal objects, using memory allocation
 // algorithms. Usage records for the algorithms are calculated separately for
 // each data type and object type.
-Status Runtime::AssignInternalObjects(std::vector<Object>* shared_objects) {
+absl::Status Runtime::AssignInternalObjects(
+    std::vector<Object>* shared_objects) {
   // Build tensor usage records, clusterized by object type and data type.
   std::map<DataType, CombinedUsageRecords> usage_records_by_data_type;
   std::vector<Object*> global_ref_to_object_ptr;
@@ -579,10 +583,10 @@ Status Runtime::AssignInternalObjects(std::vector<Object>* shared_objects) {
       object.object = global_ref_to_shared_ref[GetRef(object)];
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status Runtime::Execute() {
+absl::Status Runtime::Execute() {
   for (const auto& descriptor : programs_) {
     for (auto& b : descriptor.bindings) {
       RETURN_IF_ERROR(b());
@@ -590,7 +594,7 @@ Status Runtime::Execute() {
     RETURN_IF_ERROR(command_queue_->Dispatch(descriptor.program,
                                              descriptor.num_workgroups));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/runtime.h b/tensorflow/lite/delegates/gpu/gl/runtime.h
index b66a7fdfaa4..97f0f732834 100644
--- a/tensorflow/lite/delegates/gpu/gl/runtime.h
+++ b/tensorflow/lite/delegates/gpu/gl/runtime.h
@@ -44,17 +44,17 @@ class Runtime {
           CommandQueue* command_queue, const ObjectManager* external_objects);
 
   // Takes parameters and objects and prepares GL program.
-  Status AddProgram(const GlShader& shader,
-                    const std::vector<Variable>& parameters,
-                    const std::vector<Object>& objects,
-                    const uint3& num_workgroups);
+  absl::Status AddProgram(const GlShader& shader,
+                          const std::vector<Variable>& parameters,
+                          const std::vector<Object>& objects,
+                          const uint3& num_workgroups);
 
   // Needs to be called once all programs and shaders has been added to runtime.
-  Status PrepareForExecution();
+  absl::Status PrepareForExecution();
 
   // Executes all compiled programs.
   // TODO(akulik): add more controls over execution. Execution policy?
-  Status Execute();
+  absl::Status Execute();
 
   // Gets access to objects created while executing generated code.
   const ObjectManager* internal_objects() const { return &internal_objects_; }
@@ -72,14 +72,14 @@ class Runtime {
   }
 
  private:
-  Status AllocateInternalObject(const Object& object);
+  absl::Status AllocateInternalObject(const Object& object);
 
-  Status AllocateConstObject(const Object& object, uint32_t* id);
+  absl::Status AllocateConstObject(const Object& object, uint32_t* id);
 
   // Goes over objects in programs and decides how to allocate them to
   // minimize total allocated memory. Returns a collection of objects to be
   // allocated and shared by internal objects.
-  Status AssignInternalObjects(std::vector<Object>* objects);
+  absl::Status AssignInternalObjects(std::vector<Object>* objects);
 
   const RuntimeOptions options_;
   const GpuInfo gpu_info_;
@@ -92,7 +92,7 @@ class Runtime {
 
   std::unique_ptr<SharedBufferData> shared_readonly_buffer_;
 
-  using BindFunc = std::function<Status()>;
+  using BindFunc = std::function<absl::Status()>;
 
   // Encapsulates a program and all object to bind before dispatch.
   struct CompiledProgramDescriptor {
diff --git a/tensorflow/lite/delegates/gpu/gl/runtime/shared_buffer.h b/tensorflow/lite/delegates/gpu/gl/runtime/shared_buffer.h
index d4f49d1952c..11b094637f2 100644
--- a/tensorflow/lite/delegates/gpu/gl/runtime/shared_buffer.h
+++ b/tensorflow/lite/delegates/gpu/gl/runtime/shared_buffer.h
@@ -55,7 +55,7 @@ class SharedBufferData {
   bool empty() const { return shared_data_.empty(); }
 
   // Returns a single GlBuffer that owns entire shared data.
-  Status CreateSharedGlBuffer(GlBuffer* gl_buffer) {
+  absl::Status CreateSharedGlBuffer(GlBuffer* gl_buffer) {
     // Upload data to a buffer
     gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER,
                                             buffer_id_.id());
@@ -64,7 +64,7 @@ class SharedBufferData {
                                        GL_STATIC_READ));
     *gl_buffer = GlBuffer(GL_SHADER_STORAGE_BUFFER, buffer_id_.Release(),
                           shared_data_.size(), 0, /*has_ownership=*/true);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
  private:
diff --git a/tensorflow/lite/delegates/gpu/gl/serialization.cc b/tensorflow/lite/delegates/gpu/gl/serialization.cc
index 17db339fa98..7e15cf2d271 100644
--- a/tensorflow/lite/delegates/gpu/gl/serialization.cc
+++ b/tensorflow/lite/delegates/gpu/gl/serialization.cc
@@ -390,15 +390,15 @@ absl::Span<const uint8_t> SerializedCompiledModelBuilder::Finalize(
 
 namespace {
 
-Status ParseParameter(const data::UniformParameter& fb_parameter,
-                      Variable* parameter) {
+absl::Status ParseParameter(const data::UniformParameter& fb_parameter,
+                            Variable* parameter) {
   parameter->name = fb_parameter.name()->str();
   switch (fb_parameter.type()) {
     case data::ParameterType::INT32: {
       auto* ptr = fb_parameter.data_as_DataInt32();
       if (ptr == nullptr) {
-        return InvalidArgumentError("Unexpected data type '" + parameter->name +
-                                    "'");
+        return absl::InvalidArgumentError("Unexpected data type '" +
+                                          parameter->name + "'");
       }
       switch (ptr->data()->size()) {
         case 1:
@@ -412,16 +412,16 @@ Status ParseParameter(const data::UniformParameter& fb_parameter,
                                   (*ptr->data())[2], (*ptr->data())[3]);
           break;
         default:
-          return InvalidArgumentError("Unexpected size for parameter '" +
-                                      parameter->name + "'");
+          return absl::InvalidArgumentError("Unexpected size for parameter '" +
+                                            parameter->name + "'");
       }
       break;
     }
     case data::ParameterType::UINT32: {
       auto* ptr = fb_parameter.data_as_DataUint32();
       if (ptr == nullptr) {
-        return InvalidArgumentError("Unexpected data type '" + parameter->name +
-                                    "'");
+        return absl::InvalidArgumentError("Unexpected data type '" +
+                                          parameter->name + "'");
       }
       switch (ptr->data()->size()) {
         case 1:
@@ -432,16 +432,16 @@ Status ParseParameter(const data::UniformParameter& fb_parameter,
                                    (*ptr->data())[2], (*ptr->data())[3]);
           break;
         default:
-          return InvalidArgumentError("Unexpected size for parameter '" +
-                                      parameter->name + "'");
+          return absl::InvalidArgumentError("Unexpected size for parameter '" +
+                                            parameter->name + "'");
       }
       break;
     }
     case data::ParameterType::FLOAT32: {
       auto* ptr = fb_parameter.data_as_DataFloat();
       if (ptr == nullptr) {
-        return InvalidArgumentError("Unexpected data type '" + parameter->name +
-                                    "'");
+        return absl::InvalidArgumentError("Unexpected data type '" +
+                                          parameter->name + "'");
       }
       switch (ptr->data()->size()) {
         case 1:
@@ -455,21 +455,21 @@ Status ParseParameter(const data::UniformParameter& fb_parameter,
                                     (*ptr->data())[2], (*ptr->data())[3]);
           break;
         default:
-          return InvalidArgumentError("Unexpected size for parameter '" +
-                                      parameter->name + "'");
+          return absl::InvalidArgumentError("Unexpected size for parameter '" +
+                                            parameter->name + "'");
       }
       break;
     }
     case data::ParameterType::INT32_2: {
       auto* ptr = fb_parameter.data_as_DataInt32();
       if (ptr == nullptr) {
-        return InvalidArgumentError("Unexpected data type '" + parameter->name +
-                                    "'");
+        return absl::InvalidArgumentError("Unexpected data type '" +
+                                          parameter->name + "'");
       }
 
       if (ptr->data()->size() % 2 != 0) {
-        return InvalidArgumentError("Unexpected size for parameter '" +
-                                    parameter->name + "'");
+        return absl::InvalidArgumentError("Unexpected size for parameter '" +
+                                          parameter->name + "'");
       }
 
       std::vector<int2> values(ptr->data()->size() / 2);
@@ -480,7 +480,7 @@ Status ParseParameter(const data::UniformParameter& fb_parameter,
       break;
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 DataType ToEnum(data::DataType type) {
@@ -520,7 +520,7 @@ AccessType ToEnum(data::AccessType type) {
   }
 }
 
-Status ParseObject(const data::Object& fb_object, Object* object) {
+absl::Status ParseObject(const data::Object& fb_object, Object* object) {
   object->access = ToEnum(fb_object.access());
   object->binding = fb_object.binding();
   object->object_type = ToEnum(fb_object.type());
@@ -543,7 +543,7 @@ Status ParseObject(const data::Object& fb_object, Object* object) {
       break;
     }
     case data::ObjectSize::NONE:
-      return InvalidArgumentError("Texture size is not set");
+      return absl::InvalidArgumentError("Texture size is not set");
   }
 
   switch (fb_object.object_type()) {
@@ -560,10 +560,10 @@ Status ParseObject(const data::Object& fb_object, Object* object) {
       break;
     }
     case data::ObjectVariant::NONE: {
-      return InvalidArgumentError("Object is not set");
+      return absl::InvalidArgumentError("Object is not set");
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 CompiledModelOptions ParseParameters(const data::Parameters& fb_parameters) {
@@ -574,11 +574,11 @@ CompiledModelOptions ParseParameters(const data::Parameters& fb_parameters) {
 
 }  // namespace
 
-Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
-                                DeserializationHandler* handler) {
+absl::Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
+                                      DeserializationHandler* handler) {
   flatbuffers::Verifier verifier(serialized.data(), serialized.size());
   if (!data::VerifyCompiledModelBuffer(verifier)) {
-    return InvalidArgumentError("Serialized model is corrupted.");
+    return absl::InvalidArgumentError("Serialized model is corrupted.");
   }
 
   auto model = data::GetCompiledModel(serialized.data());
@@ -612,7 +612,7 @@ Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
                                        program->shader_index()));
   }
   handler->OnOptions(ParseParameters(*model->parameters()));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace gl
diff --git a/tensorflow/lite/delegates/gpu/gl/serialization.h b/tensorflow/lite/delegates/gpu/gl/serialization.h
index c3c88b4c462..82b76a475f5 100644
--- a/tensorflow/lite/delegates/gpu/gl/serialization.h
+++ b/tensorflow/lite/delegates/gpu/gl/serialization.h
@@ -67,19 +67,19 @@ class DeserializationHandler {
  public:
   virtual ~DeserializationHandler() = default;
 
-  virtual Status OnShader(absl::Span<const char> shader_src) = 0;
+  virtual absl::Status OnShader(absl::Span<const char> shader_src) = 0;
 
-  virtual Status OnProgram(const std::vector<Variable>& parameters,
-                           const std::vector<Object>& objects,
-                           const uint3& workgroup_size,
-                           const uint3& num_workgroups,
-                           size_t shader_index) = 0;
+  virtual absl::Status OnProgram(const std::vector<Variable>& parameters,
+                                 const std::vector<Object>& objects,
+                                 const uint3& workgroup_size,
+                                 const uint3& num_workgroups,
+                                 size_t shader_index) = 0;
 
   virtual void OnOptions(const CompiledModelOptions& options) = 0;
 };
 
-Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
-                                DeserializationHandler* handler);
+absl::Status DeserializeCompiledModel(absl::Span<const uint8_t> serialized,
+                                      DeserializationHandler* handler);
 
 }  // namespace gl
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/gl/serialization_test.cc b/tensorflow/lite/delegates/gpu/gl/serialization_test.cc
index 25aa9be73b2..37c08129139 100644
--- a/tensorflow/lite/delegates/gpu/gl/serialization_test.cc
+++ b/tensorflow/lite/delegates/gpu/gl/serialization_test.cc
@@ -45,18 +45,19 @@ struct ProgramDesc {
 };
 
 struct Handler : public DeserializationHandler {
-  Status OnShader(absl::Span<const char> shader_src) final {
+  absl::Status OnShader(absl::Span<const char> shader_src) final {
     shaders.push_back(std::string(shader_src.data(), shader_src.size()));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status OnProgram(const std::vector<Variable>& parameters,
-                   const std::vector<Object>& objects,
-                   const uint3& workgroup_size, const uint3& num_workgroups,
-                   size_t shader_index) final {
+  absl::Status OnProgram(const std::vector<Variable>& parameters,
+                         const std::vector<Object>& objects,
+                         const uint3& workgroup_size,
+                         const uint3& num_workgroups,
+                         size_t shader_index) final {
     programs.push_back(
         {parameters, objects, workgroup_size, num_workgroups, shader_index});
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   void OnOptions(const CompiledModelOptions& o) final { options = o; }
diff --git a/tensorflow/lite/delegates/gpu/gl_delegate.cc b/tensorflow/lite/delegates/gpu/gl_delegate.cc
index 16aaafa5c94..5ebefb4a6eb 100644
--- a/tensorflow/lite/delegates/gpu/gl_delegate.cc
+++ b/tensorflow/lite/delegates/gpu/gl_delegate.cc
@@ -93,7 +93,8 @@ class Delegate {
     }
   }
 
-  Status CopyFromBufferHandle(TfLiteBufferHandle handle, TfLiteTensor* tensor) {
+  absl::Status CopyFromBufferHandle(TfLiteBufferHandle handle,
+                                    TfLiteTensor* tensor) {
     ValueRef ref;
     RETURN_IF_ERROR(FindObject(handle, &ref));
     auto buffer = phwc4_objects_.FindBuffer(handle);
@@ -105,8 +106,8 @@ class Delegate {
     });
   }
 
-  Status CopyToBufferHandle(TfLiteBufferHandle handle,
-                            TfLiteTensor* tensor) const {
+  absl::Status CopyToBufferHandle(TfLiteBufferHandle handle,
+                                  TfLiteTensor* tensor) const {
     ValueRef ref;
     RETURN_IF_ERROR(FindObject(handle, &ref));
     auto buffer = phwc4_objects_.FindBuffer(handle);
@@ -117,7 +118,7 @@ class Delegate {
     });
   }
 
-  Status BindBufferToTensor(GLuint ssbo, int tensor_index) {
+  absl::Status BindBufferToTensor(GLuint ssbo, int tensor_index) {
     int64_t bytes_size;
     RETURN_IF_ERROR(GetSSBOSize(ssbo, &bytes_size));
     return bhwc_objects_.RegisterBuffer(
@@ -126,8 +127,8 @@ class Delegate {
                                /* has_ownership = */ false));
   }
 
-  Status Prepare(TfLiteContext* context,
-                 const TfLiteDelegateParams* delegate_params) {
+  absl::Status Prepare(TfLiteContext* context,
+                       const TfLiteDelegateParams* delegate_params) {
     // Extract TFLite delegate execution plan from the context and convert it
     // into FlowGraph32.
     GraphFloat32 graph;
@@ -137,7 +138,7 @@ class Delegate {
     NullTransformationReporter reporter;
     ModelTransformer transformer(&graph, &reporter);
     if (!ApplyGeneralTransformations(&transformer)) {
-      return InternalError("Graph general transformations failed");
+      return absl::InternalError("Graph general transformations failed");
     }
 
     if (!env_) RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env_));
@@ -176,7 +177,7 @@ class Delegate {
         tflite_graph_io.insert(tensor_index);
         const auto* input = find_value(tensor_index);
         if (!input || tensor->type != TfLiteType::kTfLiteFloat32) {
-          return NotFoundError("Input tensor is not found in the graph.");
+          return absl::NotFoundError("Input tensor is not found in the graph.");
         }
 
         inputs_.push_back(input->id);
@@ -215,7 +216,8 @@ class Delegate {
         tflite_graph_io.insert(tensor_index);
         const auto* output = find_value(tensor_index);
         if (!output || tensor->type != TfLiteType::kTfLiteFloat32) {
-          return NotFoundError("Output tensor is not found in the graph.");
+          return absl::NotFoundError(
+              "Output tensor is not found in the graph.");
         }
 
         outputs_.push_back(output->id);
@@ -270,14 +272,14 @@ class Delegate {
     RETURN_IF_ERROR(compiled_model->NewRun(runtime_options, &phwc4_objects_,
                                            command_queue_.get(),
                                            &inference_context_));
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Invoke(TfLiteContext* context) {
+  absl::Status Invoke(TfLiteContext* context) {
     const EGLContext egl_context_at_delegate_init = env_->context().context();
     const EGLContext egl_context_at_delegate_invoke = eglGetCurrentContext();
     if (egl_context_at_delegate_init != egl_context_at_delegate_invoke) {
-      return FailedPreconditionError(
+      return absl::FailedPreconditionError(
           "Delegate should run on the same thread where it was initialized.");
     }
 
@@ -330,18 +332,18 @@ class Delegate {
         RETURN_IF_ERROR(CopyFromBufferHandle(id, &tensor));
       }
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TfLiteDelegate* tflite_delegate() { return &delegate_; }
 
  private:
-  Status FindObject(ValueId id, ValueRef* ref) const {
+  absl::Status FindObject(ValueId id, ValueRef* ref) const {
     if (id >= tensors_.size()) {
-      return InvalidArgumentError("Invalid buffer id");
+      return absl::InvalidArgumentError("Invalid buffer id");
     }
     *ref = tensors_[id];
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TfLiteDelegate delegate_ = {
@@ -387,7 +389,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = gpu_delegate->Prepare(context, params);
         if (status.ok()) return gpu_delegate;
         context->ReportError(context, "TfLiteGpuDelegate Prepare: %s",
-                             status.error_message().c_str());
+                             std::string(status.message()).c_str());
         return nullptr;
       },
       // .free
@@ -401,7 +403,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = GetGpuDelegate(node)->Invoke(context);
         if (status.ok()) return kTfLiteOk;
         context->ReportError(context, "TfLiteGpuDelegate Invoke: %s",
-                             status.error_message().c_str());
+                             std::string(status.message()).c_str());
         return kTfLiteError;
       },
       nullptr,              // .profiling_string
@@ -425,7 +427,7 @@ TfLiteStatus DelegateCopyFromBufferHandle(TfLiteContext* context,
   const auto status = gpu_delegate->CopyFromBufferHandle(buffer_handle, tensor);
   if (status.ok()) return kTfLiteOk;
   context->ReportError(context, "TfLiteGpuDelegate CopyFromBufferHandle: %s",
-                       status.error_message().c_str());
+                       std::string(status.message()).c_str());
   return kTfLiteError;
 }
 
@@ -438,7 +440,7 @@ TfLiteStatus DelegateCopyToBufferHandle(TfLiteContext* context,
   const auto status = gpu_delegate->CopyToBufferHandle(buffer_handle, tensor);
   if (status.ok()) return kTfLiteOk;
   context->ReportError(context, "TfLiteGpuDelegate CopyToBufferHandle: %s",
-                       status.error_message().c_str());
+                       std::string(status.message()).c_str());
   return kTfLiteError;
 }
 
diff --git a/tensorflow/lite/delegates/gpu/metal/BUILD b/tensorflow/lite/delegates/gpu/metal/BUILD
index b3fbf179293..b407083d8d2 100644
--- a/tensorflow/lite/delegates/gpu/metal/BUILD
+++ b/tensorflow/lite/delegates/gpu/metal/BUILD
@@ -163,11 +163,12 @@ objc_library(
     srcs = ["environment.mm"],
     hdrs = ["environment.h"],
     copts = DEFAULT_COPTS,
-    sdk_frameworks = [
-        "Metal",
-    ],
+    sdk_frameworks = ["Metal"],
     deps = [
         ":common",
+        # TODO(b/152322289): The following dependency is not needed, but a Bazel
+        # bug causes a build failure without an additional dummy dependency.
+        "//tensorflow/lite/delegates/gpu/common:convert",
     ],
 )
 
diff --git a/tensorflow/lite/delegates/gpu/metal/api.cc b/tensorflow/lite/delegates/gpu/metal/api.cc
index 9232b527af3..744094c8c03 100644
--- a/tensorflow/lite/delegates/gpu/metal/api.cc
+++ b/tensorflow/lite/delegates/gpu/metal/api.cc
@@ -142,13 +142,14 @@ std::vector<ComputeTaskDescriptorPtr> SelectSpaceToDepth(
   return SpaceToDepth(id, input_id, output_id, attr);
 }
 
-Status RegisterPrimaryOps(const GraphFloat32& graph, const Node* node,
-                          const std::vector<ValueId>& inputs,
-                          const std::vector<ValueId>& outputs,
-                          const RuntimeOptions& options,
-                          std::vector<ComputeTaskDescriptorPtr>* tasks) {
+absl::Status RegisterPrimaryOps(const GraphFloat32& graph, const Node* node,
+                                const std::vector<ValueId>& inputs,
+                                const std::vector<ValueId>& outputs,
+                                const RuntimeOptions& options,
+                                std::vector<ComputeTaskDescriptorPtr>* tasks) {
   if (!IsBatchMatchesForAllValues(graph)) {
-    return InvalidArgumentError("Only identical batch dimension is supported");
+    return absl::InvalidArgumentError(
+        "Only identical batch dimension is supported");
   }
   int node_id = static_cast<int>(node->id);
   auto op_type = OperationTypeFromString(node->operation.type);
@@ -233,7 +234,7 @@ Status RegisterPrimaryOps(const GraphFloat32& graph, const Node* node,
           *tasks = ElementwiseWithTwoInputs(node_id, inputs, outputs[0],
                                             op_type, broadcast);
         } else {
-          return UnimplementedError(
+          return absl::UnimplementedError(
               "No support of multiply with more than 2 inputs");
         }
       }
@@ -241,7 +242,7 @@ Status RegisterPrimaryOps(const GraphFloat32& graph, const Node* node,
     case OperationType::PAD: {
       auto attr = absl::any_cast<PadAttributes>(node->operation.attributes);
       if (attr.appended.b != 0 || attr.prepended.b != 0) {
-        return UnimplementedError("Padding for BATCH is not supported.");
+        return absl::UnimplementedError("Padding for BATCH is not supported.");
       }
       *tasks = Padding(node_id, inputs[0], outputs[0], attr);
       break;
@@ -278,7 +279,8 @@ Status RegisterPrimaryOps(const GraphFloat32& graph, const Node* node,
     case OperationType::SOFTMAX: {
       auto attr = absl::any_cast<SoftmaxAttributes>(node->operation.attributes);
       if (attr.axis != Axis::CHANNELS) {
-        return UnimplementedError("Softmax supports only CHANNELS dimension");
+        return absl::UnimplementedError(
+            "Softmax supports only CHANNELS dimension");
       }
       *tasks = SelectSoftmax(graph, node_id, inputs[0], outputs[0]);
       break;
@@ -330,15 +332,16 @@ Status RegisterPrimaryOps(const GraphFloat32& graph, const Node* node,
     case OperationType::SPACE_TO_BATCH:
     case OperationType::TRANSPOSE:
     case OperationType::UNKNOWN:
-      return UnimplementedError("Unsupported op: " + node->operation.type);
+      return absl::UnimplementedError("Unsupported op: " +
+                                      node->operation.type);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace
 
-Status Compile(const GraphFloat32& graph, const RuntimeOptions& options,
-               CompiledModel* compiled_model) {
+absl::Status Compile(const GraphFloat32& graph, const RuntimeOptions& options,
+                     CompiledModel* compiled_model) {
   for (const auto& node : graph.nodes()) {
     std::vector<ValueId> inputs;
     for (auto& input : graph.FindInputs(node->id)) {
@@ -355,11 +358,11 @@ Status Compile(const GraphFloat32& graph, const RuntimeOptions& options,
       auto primary_status =
           RegisterPrimaryOps(graph, node, inputs, outputs, options, &tasks);
       if (!primary_status.ok()) {
-        return UnimplementedError(absl::Substitute(
-            "Unsupported op type: $0; custom registry error: "
-            "$1; primary registry error: $2;",
-            node->operation.type, custom_status.error_message(),
-            primary_status.error_message()));
+        return absl::UnimplementedError(
+            absl::Substitute("Unsupported op type: $0; custom registry error: "
+                             "$1; primary registry error: $2;",
+                             node->operation.type, custom_status.message(),
+                             primary_status.message()));
       }
     }
     for (auto task : tasks) {
@@ -367,7 +370,7 @@ Status Compile(const GraphFloat32& graph, const RuntimeOptions& options,
     }
     compiled_model->insert(compiled_model->end(), tasks.begin(), tasks.end());
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace metal
diff --git a/tensorflow/lite/delegates/gpu/metal/api.h b/tensorflow/lite/delegates/gpu/metal/api.h
index dd3c423a612..c1c7648638c 100644
--- a/tensorflow/lite/delegates/gpu/metal/api.h
+++ b/tensorflow/lite/delegates/gpu/metal/api.h
@@ -26,8 +26,8 @@ namespace gpu {
 namespace metal {
 
 // Builds CompiledModel out of GraphFloat32 graph using provided RuntimeOptions.
-Status Compile(const GraphFloat32& graph, const RuntimeOptions& options,
-               CompiledModel* compiled_model);
+absl::Status Compile(const GraphFloat32& graph, const RuntimeOptions& options,
+                     CompiledModel* compiled_model);
 
 }  // namespace metal
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/metal/common.h b/tensorflow/lite/delegates/gpu/metal/common.h
index 9d7d66176f6..6f4e94ed2e7 100644
--- a/tensorflow/lite/delegates/gpu/metal/common.h
+++ b/tensorflow/lite/delegates/gpu/metal/common.h
@@ -39,10 +39,9 @@ id<MTLDevice> GetBestSupportedMetalDevice();
 ///     both.
 /// @discussion The function autoselects the maximum shader language version supported by the target
 ///     OS. FastMath is enabled.
-::tflite::gpu::Status CreateComputeProgram(id<MTLDevice> device, NSString* code,
-                                           NSString* functionName,
-                                           NSDictionary<NSString*, NSString*>* macros,
-                                           id<MTLComputePipelineState>* program);
+absl::Status CreateComputeProgram(id<MTLDevice> device, NSString* code, NSString* functionName,
+                                  NSDictionary<NSString*, NSString*>* macros,
+                                  id<MTLComputePipelineState>* program);
 
 }  // namespace metal
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/metal/common.mm b/tensorflow/lite/delegates/gpu/metal/common.mm
index 7167430a343..cc5a98dfffc 100644
--- a/tensorflow/lite/delegates/gpu/metal/common.mm
+++ b/tensorflow/lite/delegates/gpu/metal/common.mm
@@ -34,9 +34,9 @@ namespace metal {
 
 id<MTLDevice> GetBestSupportedMetalDevice() { return MTLCreateSystemDefaultDevice(); }
 
-Status CreateComputeProgram(id<MTLDevice> device, NSString* code, NSString* functionName,
-                            NSDictionary<NSString*, NSString*>* macros,
-                            id<MTLComputePipelineState>* program) {
+absl::Status CreateComputeProgram(id<MTLDevice> device, NSString* code, NSString* functionName,
+                                  NSDictionary<NSString*, NSString*>* macros,
+                                  id<MTLComputePipelineState>* program) {
   MTLCompileOptions* options = [[MTLCompileOptions alloc] init];
 
   // Runtime checks for the iOS version independently of minimum target iOS.
@@ -70,14 +70,14 @@ Status CreateComputeProgram(id<MTLDevice> device, NSString* code, NSString* func
   if (!library) {
     NSString* errorString =
         [NSString stringWithFormat:@"newLibraryWithSource: %@", [error localizedDescription]];
-    return InternalError([errorString UTF8String]);
+    return absl::InternalError([errorString UTF8String]);
   }
 
   id<MTLFunction> function = [library newFunctionWithName:functionName];
   if (!function) {
     NSString* errorString =
         [NSString stringWithFormat:@"newFunctionWithName: %@", [error localizedDescription]];
-    return InternalError([errorString UTF8String]);
+    return absl::InternalError([errorString UTF8String]);
   }
 
   *program = [device newComputePipelineStateWithFunction:function error:&error];
@@ -85,9 +85,9 @@ Status CreateComputeProgram(id<MTLDevice> device, NSString* code, NSString* func
     NSString* errorString =
         [NSString stringWithFormat:@"newComputePipelineStateWithFunction error: %@",
                                    [error localizedDescription]];
-    return InternalError([errorString UTF8String]);
+    return absl::InternalError([errorString UTF8String]);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace metal
diff --git a/tensorflow/lite/delegates/gpu/metal/common_test.mm b/tensorflow/lite/delegates/gpu/metal/common_test.mm
index 18a495ebd18..7cedac0f799 100644
--- a/tensorflow/lite/delegates/gpu/metal/common_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/common_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <tuple>
 #include <vector>
 
@@ -25,7 +26,6 @@ limitations under the License.
 
 using ::tflite::gpu::metal::GetBestSupportedMetalDevice;
 using ::tflite::gpu::metal::CreateComputeProgram;
-using ::tflite::gpu::Status;
 
 @interface CommonTest : XCTestCase
 
@@ -53,16 +53,16 @@ kernel void FunctionName(device TYPE* const src_buffer[[buffer(0)]],
   XCTAssertNotNil(device, @"The Metal device must exists on real device");
   NSString* functionName = @"FunctionName";
   id<MTLComputePipelineState> program;
-  Status status;
+  absl::Status status;
 
   NSDictionary* macrosFloat4 = @{@"TYPE" : @"float4"};
   status = CreateComputeProgram(device, code, functionName, macrosFloat4, &program);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.messasge()).c_str());
   XCTAssertNotNil(program);
 
   NSDictionary* macrosHalf4 = @{@"TYPE" : @"half4"};
   status = CreateComputeProgram(device, code, functionName, macrosHalf4, &program);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.messasge()).c_str());
   XCTAssertNotNil(program);
 
   // This compilation is intended to be incorrect
diff --git a/tensorflow/lite/delegates/gpu/metal/compiled_model.cc b/tensorflow/lite/delegates/gpu/metal/compiled_model.cc
index 5545ad39161..06cc10a0520 100644
--- a/tensorflow/lite/delegates/gpu/metal/compiled_model.cc
+++ b/tensorflow/lite/delegates/gpu/metal/compiled_model.cc
@@ -564,10 +564,10 @@ ComputeTaskDescriptorPtr FuseChain(const FusionSequence& chain) {
 
 }  // namespace
 
-Status ValidateOptimizeModel(const std::vector<ValueId>& input_buffers,
-                             const std::vector<ValueId>& output_buffers,
-                             const CompiledModel& input_vector,
-                             CompiledModel* output) {
+absl::Status ValidateOptimizeModel(const std::vector<ValueId>& input_buffers,
+                                   const std::vector<ValueId>& output_buffers,
+                                   const CompiledModel& input_vector,
+                                   CompiledModel* output) {
   std::list<ComputeTaskDescriptorPtr> input;
   input.insert(input.end(), input_vector.begin(), input_vector.end());
   OptimizationInfo info;
@@ -606,10 +606,10 @@ Status ValidateOptimizeModel(const std::vector<ValueId>& input_buffers,
         std::to_string(info.unused_input_buffer_ids.size()) +
         "\nMissing output buffers " +
         std::to_string(info.missing_output_buffer_ids.size());
-    return InternalError(message);
+    return absl::InternalError(message);
   }
   for (const auto& chain : sorted_chains) output->push_back(FuseChain(chain));
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace metal
diff --git a/tensorflow/lite/delegates/gpu/metal/compiled_model.h b/tensorflow/lite/delegates/gpu/metal/compiled_model.h
index 5f9982d0a66..222534402d9 100644
--- a/tensorflow/lite/delegates/gpu/metal/compiled_model.h
+++ b/tensorflow/lite/delegates/gpu/metal/compiled_model.h
@@ -31,9 +31,10 @@ using CompiledModel = std::vector<ComputeTaskDescriptorPtr>;
 // Receives input CompiledModel, validates, optimizes it and returns output
 // CompiledModel. No shader compilation or memory allocation happen here, this
 // function just does high-level operations fusion.
-Status ValidateOptimizeModel(const std::vector<ValueId>& input_buffers,
-                             const std::vector<ValueId>& output_buffers,
-                             const CompiledModel& input, CompiledModel* output);
+absl::Status ValidateOptimizeModel(const std::vector<ValueId>& input_buffers,
+                                   const std::vector<ValueId>& output_buffers,
+                                   const CompiledModel& input,
+                                   CompiledModel* output);
 
 }  // namespace metal
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/metal/compiled_model_test.mm b/tensorflow/lite/delegates/gpu/metal/compiled_model_test.mm
index 59827ce2c08..83870123321 100644
--- a/tensorflow/lite/delegates/gpu/metal/compiled_model_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/compiled_model_test.mm
@@ -183,7 +183,7 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   auto nodes = MulLinkable(1, 1, 2);
   std::vector<ComputeTaskDescriptorPtr> model;
   auto status = ValidateOptimizeModel({1}, {2}, nodes, &model);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 // Outputs: one missing, one unused.
@@ -195,8 +195,8 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   std::vector<std::string> errorMessages = {"Input operations count 1", "Unused operations 1",
                                             "Unused inputs 1", "Missing output buffers 1"};
   for (const std::string& message : errorMessages) {
-    bool doesContainMessage = status.error_message().find(message) != std::string::npos;
-    XCTAssertTrue(doesContainMessage, @"%s", status.error_message().c_str());
+    bool doesContainMessage = std::string(status.message()).find(message) != std::string::npos;
+    XCTAssertTrue(doesContainMessage, @"%s", std::string(status.message()).c_str());
   }
 }
 
@@ -205,7 +205,7 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   auto nodes = MulLinkable(1, 1, 2);
   std::vector<ComputeTaskDescriptorPtr> model;
   auto status = ValidateOptimizeModel({1}, {2, 3}, nodes, &model);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 // Unused input => empty graph, missing output.
@@ -216,8 +216,8 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   std::vector<std::string> errorMessages = {"Input operations count 1", "Unused operations 0",
                                             "Unused inputs 1", "Missing output buffers 1"};
   for (const std::string& message : errorMessages) {
-    bool doesContainMessage = status.error_message().find(message) != std::string::npos;
-    XCTAssertTrue(doesContainMessage, @"%s", status.error_message().c_str());
+    bool doesContainMessage = std::string(status.message()).find(message) != std::string::npos;
+    XCTAssertTrue(doesContainMessage, @"%s", std::string(status.message()).c_str());
   }
 }
 
@@ -228,7 +228,7 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   nodes.insert(nodes.end(), nodes2.begin(), nodes2.end());
   std::vector<ComputeTaskDescriptorPtr> model;
   auto status = ValidateOptimizeModel({1}, {3}, nodes, &model);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 // Two sequential operations. Not fused.
@@ -238,14 +238,14 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   nodes.insert(nodes.end(), nodes2.begin(), nodes2.end());
   std::vector<ComputeTaskDescriptorPtr> model;
   auto status = ValidateOptimizeModel({1}, {3}, nodes, &model);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testAddOperationSuccess {
   auto nodes = Add2(1, 1, 2, 3);
   std::vector<ComputeTaskDescriptorPtr> model;
   auto status = ValidateOptimizeModel({1, 2}, {3}, nodes, &model);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testAddOperationFused {
@@ -254,7 +254,7 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   graph.insert(graph.end(), graph2.begin(), graph2.end());
   std::vector<ComputeTaskDescriptorPtr> model;
   auto status = ValidateOptimizeModel({1, 2}, {4}, graph, &model);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   XCTAssertTrue(model.size() == 1, @"Not fused, more than one task descriptor.");
 }
 
@@ -266,7 +266,7 @@ static std::vector<ComputeTaskDescriptorPtr> Add2Linkable(int id, ValueId input_
   graph.insert(graph.end(), graph3.begin(), graph3.end());
   std::vector<ComputeTaskDescriptorPtr> model;
   auto status = ValidateOptimizeModel({1, 2}, {5}, graph, &model);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/compute_task.h b/tensorflow/lite/delegates/gpu/metal/compute_task.h
index 611185b8fc1..b03a8436077 100644
--- a/tensorflow/lite/delegates/gpu/metal/compute_task.h
+++ b/tensorflow/lite/delegates/gpu/metal/compute_task.h
@@ -31,12 +31,12 @@ limitations under the License.
 @interface TFLComputeTask : NSObject
 
 /// Returns empty string or error if shader can't be compiled.
-- (::tflite::gpu::Status)compileWithDevice:(id<MTLDevice>)device
-                            taskDescriptor:(::tflite::gpu::metal::ComputeTaskDescriptorPtr)desc
-                            runtimeOptions:(const ::tflite::gpu::metal::RuntimeOptions&)options;
+- (absl::Status)compileWithDevice:(id<MTLDevice>)device
+                   taskDescriptor:(::tflite::gpu::metal::ComputeTaskDescriptorPtr)desc
+                   runtimeOptions:(const ::tflite::gpu::metal::RuntimeOptions&)options;
 
 /// Updates dimensions for inputs/outputs/intermediate tensors
-- (::tflite::gpu::Status)
+- (absl::Status)
     setInputDimensionsWithDevice:(id<MTLDevice>)device
                       dimensions:(std::map<::tflite::gpu::ValueId, ::tflite::gpu::BHWC>*)dimensions;
 
@@ -50,12 +50,11 @@ limitations under the License.
 /// @param sharedBufferIds contain shared buffer id for each tensor usage record id.
 /// @param sharedBuffers contain metal handles to the allocated buffers for each shared buffer id.
 /// TODO(ypisarchyk): probably we can decrease the number of parameters here
-- (::tflite::gpu::Status)assignBuffers:(std::map<::tflite::gpu::ValueId, id<MTLBuffer>>*)buffers
-                             outputIds:(const std::vector<::tflite::gpu::ValueId>&)outputIds
-                        usageRecordIds:
-                            (const std::map<::tflite::gpu::ValueId, size_t>&)usageRecordIds
-                       sharedBufferIds:(const std::vector<size_t>&)sharedBufferIds
-                         sharedBuffers:(const std::vector<id<MTLBuffer>>&)sharedBuffers;
+- (absl::Status)assignBuffers:(std::map<::tflite::gpu::ValueId, id<MTLBuffer>>*)buffers
+                    outputIds:(const std::vector<::tflite::gpu::ValueId>&)outputIds
+               usageRecordIds:(const std::map<::tflite::gpu::ValueId, size_t>&)usageRecordIds
+              sharedBufferIds:(const std::vector<size_t>&)sharedBufferIds
+                sharedBuffers:(const std::vector<id<MTLBuffer>>&)sharedBuffers;
 
 - (void)encodeWithEncoder:(id<MTLComputeCommandEncoder>)encoder
        inputOutputBuffers:
diff --git a/tensorflow/lite/delegates/gpu/metal/compute_task.mm b/tensorflow/lite/delegates/gpu/metal/compute_task.mm
index 24b89c1b11c..d3e3466ca6f 100644
--- a/tensorflow/lite/delegates/gpu/metal/compute_task.mm
+++ b/tensorflow/lite/delegates/gpu/metal/compute_task.mm
@@ -29,8 +29,6 @@ limitations under the License.
 
 using ::tflite::gpu::AlignByN;
 using ::tflite::gpu::BHWC;
-using ::tflite::gpu::InternalError;
-using ::tflite::gpu::InvalidArgumentError;
 using ::tflite::gpu::HalfBits;
 using ::tflite::gpu::metal::ComputeTaskDescriptorPtr;
 using ::tflite::gpu::metal::CreateComputeProgram;
@@ -38,8 +36,6 @@ using ::tflite::gpu::metal::DispatchParamsFunction;
 using ::tflite::gpu::metal::OutputDimensions;
 using ::tflite::gpu::metal::RuntimeOptions;
 using ::tflite::gpu::metal::UniformsFunction;
-using ::tflite::gpu::OkStatus;
-using ::tflite::gpu::Status;
 using ::tflite::gpu::uint3;
 using ::tflite::gpu::ValueId;
 
@@ -70,9 +66,9 @@ using ::tflite::gpu::ValueId;
   std::string _description;
 }
 
-- (Status)compileWithDevice:(id<MTLDevice>)device
-             taskDescriptor:(ComputeTaskDescriptorPtr)desc
-             runtimeOptions:(const RuntimeOptions&)options {
+- (absl::Status)compileWithDevice:(id<MTLDevice>)device
+                   taskDescriptor:(ComputeTaskDescriptorPtr)desc
+                   runtimeOptions:(const RuntimeOptions&)options {
   NSString* barrier;
   // simdgroup_barrier is supported on macOS 10.13+ and Metal shading language version 2.0
   if (@available(macOS 10.13, iOS 10.0, tvOS 10.0, *)) {
@@ -123,7 +119,7 @@ using ::tflite::gpu::ValueId;
   id<MTLComputePipelineState> program;
   RETURN_IF_ERROR(CreateComputeProgram(device, code, @"ComputeFunction", macros, &program));
   if (!program) {
-    return InternalError("Unknown shader compilation error");
+    return absl::InternalError("Unknown shader compilation error");
   }
   for (auto& buffer : desc->input_buffers) {
     _inputBuffers.emplace_back(InputBuffer{buffer.id, nil});
@@ -148,12 +144,13 @@ using ::tflite::gpu::ValueId;
   _resizeFunction = desc->resize_function;
   _program = program;
   _description = desc->description;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-- (Status)setInputDimensionsWithDevice:(id<MTLDevice>)device
-                            dimensions:
-                                (std::map<::tflite::gpu::ValueId, ::tflite::gpu::BHWC>*)dimensions {
+- (absl::Status)setInputDimensionsWithDevice:(id<MTLDevice>)device
+                                  dimensions:
+                                      (std::map<::tflite::gpu::ValueId, ::tflite::gpu::BHWC>*)
+                                          dimensions {
   // Re-calculate output buffers dimensions
   for (auto& buffer : _outputBuffers) {
     auto outputDimensions = buffer.dimensionsFunction(*dimensions);
@@ -180,23 +177,23 @@ using ::tflite::gpu::ValueId;
     error += "is larger than the MTLDevice can support: ";
     error += std::to_string(threadsPerGroup.width) + ", " + std::to_string(threadsPerGroup.height) +
              ", " + std::to_string(threadsPerGroup.depth);
-    return InvalidArgumentError(error);
+    return absl::InvalidArgumentError(error);
   }
   _groupsCount = workGroups.second;
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-- (Status)assignBuffers:(std::map<::tflite::gpu::ValueId, id<MTLBuffer>>*)buffers
-              outputIds:(const std::vector<::tflite::gpu::ValueId>&)outputIds
-         usageRecordIds:(const std::map<ValueId, size_t>&)usageRecordIds
-        sharedBufferIds:(const std::vector<size_t>&)sharedBufferIds
-          sharedBuffers:(const std::vector<id<MTLBuffer>>&)sharedBuffers {
+- (absl::Status)assignBuffers:(std::map<::tflite::gpu::ValueId, id<MTLBuffer>>*)buffers
+                    outputIds:(const std::vector<::tflite::gpu::ValueId>&)outputIds
+               usageRecordIds:(const std::map<ValueId, size_t>&)usageRecordIds
+              sharedBufferIds:(const std::vector<size_t>&)sharedBufferIds
+                sharedBuffers:(const std::vector<id<MTLBuffer>>&)sharedBuffers {
   for (auto& buffer : _outputBuffers) {
     // If the buffer is intermediate: set its metalHandle from sharedBuffers
     if (std::find(outputIds.begin(), outputIds.end(), buffer.uid) == outputIds.end()) {
       auto usageRecordIt = usageRecordIds.find(buffer.uid);
       if (usageRecordIt == usageRecordIds.end()) {
-        return InternalError("TensorUsageRecord for intermediate tensor is not found.");
+        return absl::InternalError("TensorUsageRecord for intermediate tensor is not found.");
       }
       buffer.metalHandle = sharedBuffers.at(sharedBufferIds.at(usageRecordIt->second));
       (*buffers)[buffer.uid] = buffer.metalHandle;
@@ -207,7 +204,7 @@ using ::tflite::gpu::ValueId;
   for (auto& buffer : _inputBuffers) {
     buffer.metalHandle = (*buffers)[buffer.uid];
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 - (void)encodeWithEncoder:(id<MTLComputeCommandEncoder>)encoder
diff --git a/tensorflow/lite/delegates/gpu/metal/inference_context.h b/tensorflow/lite/delegates/gpu/metal/inference_context.h
index 8569a4ed009..97a6f3b3b18 100644
--- a/tensorflow/lite/delegates/gpu/metal/inference_context.h
+++ b/tensorflow/lite/delegates/gpu/metal/inference_context.h
@@ -50,12 +50,12 @@ limitations under the License.
 /// @return Status signals whether model is compiled successfully or not.
 /// @discussion Previously added operations are distilled into sorted list of sets of
 ///             ComputeTaskDescriptors, which can be fused into a single GPU task.
-- (::tflite::gpu::Status)
-    compileModelWithDevice:(id<MTLDevice>)device
-           taskDescriptors:
-               (const std::vector<::tflite::gpu::metal::ComputeTaskDescriptorPtr>&)taskDescriptors
-           outputBufferIDs:(const std::vector<::tflite::gpu::ValueId>&)outputBufferIDs
-            runtimeOptions:(const ::tflite::gpu::metal::RuntimeOptions&)options;
+- (absl::Status)compileModelWithDevice:(id<MTLDevice>)device
+                       taskDescriptors:
+                           (const std::vector<::tflite::gpu::metal::ComputeTaskDescriptorPtr>&)
+                               taskDescriptors
+                       outputBufferIDs:(const std::vector<::tflite::gpu::ValueId>&)outputBufferIDs
+                        runtimeOptions:(const ::tflite::gpu::metal::RuntimeOptions&)options;
 
 /// Creates intermediate buffers. The model is ready to be used after this call.
 /// @param inputDimensions Used to create resources: shaders, buffers.
@@ -63,7 +63,7 @@ limitations under the License.
 /// @return Status signals whether intermediate buffers are successfully created or not.
 /// @discussion The operation is intended to be lightweight with minimum overhead. A preceding call
 ///             compileModelWithDevice() must be made with the proper device parameter set.
-- (::tflite::gpu::Status)
+- (absl::Status)
     setInputDimensions:(const std::map<::tflite::gpu::ValueId, ::tflite::gpu::BHWC>&)inputDimensions
       outputDimensions:(std::map<::tflite::gpu::ValueId, ::tflite::gpu::BHWC>*)outputDimensions
        taskDescriptors:
diff --git a/tensorflow/lite/delegates/gpu/metal/inference_context.mm b/tensorflow/lite/delegates/gpu/metal/inference_context.mm
index fb3a51f4694..d5589ae8ab4 100644
--- a/tensorflow/lite/delegates/gpu/metal/inference_context.mm
+++ b/tensorflow/lite/delegates/gpu/metal/inference_context.mm
@@ -32,9 +32,6 @@ limitations under the License.
 using ::tflite::gpu::BHWC;
 using ::tflite::gpu::metal::ComputeTaskDescriptorPtr;
 using ::tflite::gpu::metal::RuntimeOptions;
-using ::tflite::gpu::InternalError;
-using ::tflite::gpu::OkStatus;
-using ::tflite::gpu::Status;
 using ::tflite::gpu::ValueId;
 using ::tflite::gpu::AlignByN;
 using ::tflite::gpu::HalfBits;
@@ -48,10 +45,10 @@ using ::tflite::gpu::TensorUsageRecord;
   RuntimeOptions _options;
 }
 
-- (Status)compileModelWithDevice:(id<MTLDevice>)device
-                 taskDescriptors:(const std::vector<ComputeTaskDescriptorPtr>&)taskDescriptors
-                 outputBufferIDs:(const std::vector<ValueId>&)requestedOutputBufferIDs
-                  runtimeOptions:(const RuntimeOptions&)options {
+- (absl::Status)compileModelWithDevice:(id<MTLDevice>)device
+                       taskDescriptors:(const std::vector<ComputeTaskDescriptorPtr>&)taskDescriptors
+                       outputBufferIDs:(const std::vector<ValueId>&)requestedOutputBufferIDs
+                        runtimeOptions:(const RuntimeOptions&)options {
   _device = device;
   _outputIds = requestedOutputBufferIDs;
   _options = options;
@@ -61,12 +58,12 @@ using ::tflite::gpu::TensorUsageRecord;
     RETURN_IF_ERROR([task compileWithDevice:_device taskDescriptor:node runtimeOptions:_options]);
     _computeTasks.emplace_back(task);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-- (Status)setInputDimensions:(const std::map<ValueId, BHWC>&)inputDimensions
-            outputDimensions:(std::map<ValueId, BHWC>*)outputDimensions
-             taskDescriptors:(const std::vector<ComputeTaskDescriptorPtr>&)taskDescriptors {
+- (absl::Status)setInputDimensions:(const std::map<ValueId, BHWC>&)inputDimensions
+                  outputDimensions:(std::map<ValueId, BHWC>*)outputDimensions
+                   taskDescriptors:(const std::vector<ComputeTaskDescriptorPtr>&)taskDescriptors {
   // These maps contain all input/output/intermediate buffers shared across model.
   std::map<ValueId, BHWC> dimensions = inputDimensions;
   std::map<ValueId, id<MTLBuffer>> buffers;
@@ -97,7 +94,7 @@ using ::tflite::gpu::TensorUsageRecord;
       if (!usageRecordIds.count(outputId)) {
         const auto it = dimensions.find(outputId);
         if (it == dimensions.end()) {
-          return InternalError("Dimensions for intermediate tensor not found.");
+          return absl::InternalError("Dimensions for intermediate tensor not found.");
         }
         usageRecordIds[outputId] = usageRecords.size();
         usageRecords.emplace_back(it->second.w * it->second.h * AlignByN(it->second.c, 4), i, i);
@@ -133,14 +130,14 @@ using ::tflite::gpu::TensorUsageRecord;
       error += std::to_string(assignment.object_ids[i]) +
                " with size: " + std::to_string(bufferSize) +
                " exceeds MTLDevice maxBufferLength: " + std::to_string([_device maxBufferLength]);
-      return ::tflite::gpu::ResourceExhaustedError(error);
+      return absl::ResourceExhaustedError(error);
     }
 #endif
 #if defined(__MAC_10_12) && __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_12
     if ([_device currentAllocatedSize] + bufferSize > [_device recommendedMaxWorkingSetSize]) {
       std::string error("Out of memory in MTLBuffer allocation. Currently allocated: ");
       error += std::to_string([_device currentAllocatedSize]);
-      return ::tflite::gpu::ResourceExhaustedError(error);
+      return absl::ResourceExhaustedError(error);
     }
 #endif
 
@@ -154,7 +151,7 @@ using ::tflite::gpu::TensorUsageRecord;
                         sharedBufferIds:assignment.object_ids
                           sharedBuffers:sharedBuffers]);
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 - (void)encodeWithEncoder:(id<MTLComputeCommandEncoder>)commandEncoder
diff --git a/tensorflow/lite/delegates/gpu/metal/inference_context_test.mm b/tensorflow/lite/delegates/gpu/metal/inference_context_test.mm
index 14ea40c68b4..4d9e54a0ca0 100644
--- a/tensorflow/lite/delegates/gpu/metal/inference_context_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/inference_context_test.mm
@@ -17,6 +17,8 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
+
 #include "tensorflow/lite/delegates/gpu/common/shape.h"
 #include "tensorflow/lite/delegates/gpu/common/types.h"
 #include "tensorflow/lite/delegates/gpu/common/util.h"
@@ -170,9 +172,9 @@ static std::vector<ComputeTaskDescriptorPtr> MulArrayLinkable(
   std::map<ValueId, TensorFloat32> inputs{{inputBufferID, input}};
   std::map<ValueId, TensorFloat32> outputs{{outputBufferID, {}}};
   auto status = RunGraph(graph, _device, inputs, &outputs);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2.2f, 3.3f, 4.4f}, outputs[outputBufferID].data, 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testImmutableShaderOutput {
@@ -187,9 +189,9 @@ static std::vector<ComputeTaskDescriptorPtr> MulArrayLinkable(
   std::map<ValueId, TensorFloat32> inputs{{inputBufferID, input}};
   std::map<ValueId, TensorFloat32> outputs{{outputBufferID, {}}};
   auto status = RunGraph(graph, _device, inputs, &outputs);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 4, 9, 16, 25, 36, 49}, outputs[outputBufferID].data, 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testUniformShaderOutput {
@@ -203,9 +205,9 @@ static std::vector<ComputeTaskDescriptorPtr> MulArrayLinkable(
   std::map<ValueId, TensorFloat32> inputs{{inputBufferID, input}};
   std::map<ValueId, TensorFloat32> outputs{{outputBufferID, {}}};
   auto status = RunGraph(graph, _device, inputs, &outputs);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 4, 6}, outputs[outputBufferID].data, 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testUniformAndImmutableShaderOutput {
@@ -222,9 +224,9 @@ static std::vector<ComputeTaskDescriptorPtr> MulArrayLinkable(
   std::map<ValueId, TensorFloat32> inputs{{inputBufferID, input}};
   std::map<ValueId, TensorFloat32> outputs{{outputBufferID, {}}};
   auto status = RunGraph(graph, _device, inputs, &outputs);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 6, 12, 20, 26, 38, 52}, outputs[outputBufferID].data, 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/add_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/add_test.mm
index 10481b2a867..540308f23b4 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/add_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/add_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -65,9 +66,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   XCTAssertTrue(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8}));
   XCTAssertTrue(model.PopulateTensor(1, {0.1, 0.2, 0.3, 0.5}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-1.9, 0.4, 1.0, 1.3}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testInputTensorAndScalar {
@@ -85,9 +86,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {-2.0, 0.2, 0.7, 0.8, 1.1, 2.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-1.9, 0.3, 0.8, 0.9, 1.2, 2.1}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testInputTensorWithConstantBroadcast {
@@ -112,10 +113,10 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::ADD), std::move(attr)}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status =
       CompareVectors({11.0, 22.0, 13.0, 24.0, 15.0, 26.0, 17.0, 28.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/concat_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/concat_test.mm
index b67c1ca839c..195a2986628 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/concat_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/concat_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -66,9 +67,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   XCTAssertTrue(model.PopulateTensor(0, {1, 3, 5, 7}));
   XCTAssertTrue(model.PopulateTensor(1, {2, 4, 6, 8}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4, 5, 6, 7, 8}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTwoInputTensorsByAlignedChannel {
@@ -92,9 +93,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   XCTAssertTrue(model.PopulateTensor(1, {5, 6, 7, 8}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4, 5, 6, 7, 8}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTwoInputTensorsByHeight {
@@ -118,9 +119,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   XCTAssertTrue(model.PopulateTensor(0, {1, 2}));
   XCTAssertTrue(model.PopulateTensor(1, {3, 4, 5, 6}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4, 5, 6}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTwoInputTensorsByWidth {
@@ -144,8 +145,8 @@ using ::tflite::gpu::metal::SingleOpModel;
   XCTAssertTrue(model.PopulateTensor(0, {1, 4}));
   XCTAssertTrue(model.PopulateTensor(1, {2, 3, 5, 6}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4, 5, 6}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/conv_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/conv_test.mm
index 8f1b24a4735..a74b22cf13e 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/conv_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/conv_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -82,9 +83,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({4, 8, 4, 8, 2, 4, 2, 4}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testO1H2W2I1Stride1x1Dilation2x2 {
@@ -120,9 +121,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1, 1, 1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({10}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testO1H3W3I1Stride1x1Dilation1x1 {
@@ -158,9 +159,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({11}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testO2H1W1I2Stride1x1Dilation1x1 {
@@ -196,9 +197,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({4, 8, 4, 8}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testO1H1W1I1Stride2x2Dilation1x1 {
@@ -235,9 +236,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 0, 2, 0, 0, 0, 4, 0, 8}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 4, 8, 16}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.cc b/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.cc
index 228583c6e30..620a4581c52 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.cc
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.cc
@@ -26,12 +26,12 @@ namespace tflite {
 namespace gpu {
 namespace metal {
 
-Status RegisterCustomOps(const GraphFloat32& graph, const Node* node,
-                         const std::vector<ValueId>& inputs,
-                         const std::vector<ValueId>& outputs,
-                         const RuntimeOptions& options,
-                         std::vector<ComputeTaskDescriptorPtr>* tasks) {
-  return UnimplementedError("Unsupported op: " + node->operation.type);
+absl::Status RegisterCustomOps(const GraphFloat32& graph, const Node* node,
+                               const std::vector<ValueId>& inputs,
+                               const std::vector<ValueId>& outputs,
+                               const RuntimeOptions& options,
+                               std::vector<ComputeTaskDescriptorPtr>* tasks) {
+  return absl::UnimplementedError("Unsupported op: " + node->operation.type);
 }
 
 }  // namespace metal
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.h b/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.h
index bef2ba20def..eee1632a644 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.h
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/custom_registry.h
@@ -28,11 +28,11 @@ namespace gpu {
 namespace metal {
 
 // Registers custom operations.
-Status RegisterCustomOps(const GraphFloat32& graph, const Node* node,
-                         const std::vector<ValueId>& inputs,
-                         const std::vector<ValueId>& outputs,
-                         const RuntimeOptions& options,
-                         std::vector<ComputeTaskDescriptorPtr>* tasks);
+absl::Status RegisterCustomOps(const GraphFloat32& graph, const Node* node,
+                               const std::vector<ValueId>& inputs,
+                               const std::vector<ValueId>& outputs,
+                               const RuntimeOptions& options,
+                               std::vector<ComputeTaskDescriptorPtr>* tasks);
 
 }  // namespace metal
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/depthwise_conv_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/depthwise_conv_test.mm
index 5f262238464..d76507253a9 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/depthwise_conv_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/depthwise_conv_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -83,9 +84,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 3}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 4, 12, 16}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testO2H1W1I1Strides2x2Dilation1x1 {
@@ -122,9 +123,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 0, 1, 1, 0, 1, 1, 0, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 3, 1, 3, 1, 3, 1, 3}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testO2H2W2I1Strides1x1Dilation2x2 {
@@ -161,9 +162,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 0, 1, 1, 0, 1, 1, 0, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({10, 26}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/elementwise_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/elementwise_test.mm
index 2c3f6b942ac..6b30bc5c703 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/elementwise_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/elementwise_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -59,9 +60,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, 6.2, 2.0, 4.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testCos {
@@ -72,9 +73,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, 3.1415926, -3.1415926, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, -1.0, -1.0, 0.540302}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testDiv {
@@ -86,9 +87,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
   XCTAssertTrue(model.PopulateTensor(1, {1.0, 2.0, -0.5, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, -3.1, -4.0, 1.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testExp {
@@ -99,11 +100,11 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0f, 1.0f, -1.0f, 100.0f, -100.0f, 0.01f, -0.01f}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({std::exp(0.0f), std::exp(1.0f), std::exp(-1.0f), std::exp(100.0f),
                            std::exp(-100.0f), std::exp(0.01f), std::exp(-0.01f)},
                           model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testHardSwish {
@@ -114,10 +115,10 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {-4.5f, -3.0f, -1.5f, 0.0f, 1.5f, 3.0f, 4.5f}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status =
       CompareVectors({0.0f, 0.0f, -0.375f, 0.0f, 1.125f, 3.f, 4.5f}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testLog {
@@ -128,9 +129,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 3.1415926, 1.0, 1.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, 1.14473, 0.0, 0.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testMaximum {
@@ -142,9 +143,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
   XCTAssertTrue(model.PopulateTensor(1, {1.0, 2.0, 3.0, -2.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 2.0, 3.0, -2.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testMaximumWithScalar {
@@ -157,9 +158,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, -1.0, 2.0, -1.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testMinimum {
@@ -171,9 +172,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
   XCTAssertTrue(model.PopulateTensor(1, {1.0, 2.0, 3.0, -2.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, -6.2, 2.0, -3.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testMinimumWithScalar {
@@ -186,9 +187,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.2, 2.0, -3.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-1.0, -6.2, -1.0, -3.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testPow {
@@ -200,9 +201,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
   XCTAssertTrue(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
   XCTAssertTrue(model.PopulateTensor(1, {1.0, 2.0, 3.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, 1.0, 8.0, 256.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testRsqrt {
@@ -213,9 +214,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 4.0, 9.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 0.707106, 0.5, 0.333333}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSigmoid {
@@ -226,9 +227,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.0, 2.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.5, 0.002473, 0.880797, 0.982014}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSin {
@@ -239,9 +240,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, 3.1415926, -3.1415926, 1.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, 0.0, 0.0, 0.841471}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSqrt {
@@ -252,9 +253,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, 1.0, 2.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, 1.0, 1.414213, 2.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSquare {
@@ -265,9 +266,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 0.5, -3.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 4.0, 0.25, 9.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSquaredDiff {
@@ -279,9 +280,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
   XCTAssertTrue(model.PopulateTensor(0, {0.0, 2.0, 2.0, 4.0}));
   XCTAssertTrue(model.PopulateTensor(1, {1.0, 1.0, 5.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 1.0, 9.0, 0.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSub {
@@ -293,9 +294,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.2, 2.0, 4.0}));
   XCTAssertTrue(model.PopulateTensor(1, {1.0, 2.0, 3.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-1.0, -8.2, -1.0, 0.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTanh {
@@ -306,9 +307,9 @@ TensorRef<BHWC> GetTensorRef(int ref, const BHWC& shape) {
                       /*outputs=*/{GetTensorRef(1, shape)});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -6.0, 2.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, -0.999987, 0.964027, 0.999329}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testMulBroadcastChannels {
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/fully_connected_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/fully_connected_test.mm
index 6d3a3e697b8..e57f9aa84e2 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/fully_connected_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/fully_connected_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -75,9 +76,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::FULLY_CONNECTED), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({6, 13, 20, 27}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/max_unpooling_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/max_unpooling_test.mm
index cacd501f0bd..cf4aacf724f 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/max_unpooling_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/max_unpooling_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -72,10 +73,10 @@ using ::tflite::gpu::metal::SingleOpModel;
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   XCTAssertTrue(model.PopulateTensor(1, {0, 0, 0, 0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status =
       CompareVectors({1, 0, 2, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/mean_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/mean_test.mm
index 69eed7d86b0..67325c1adb7 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/mean_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/mean_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -62,9 +63,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::MEAN), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2.5}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/mul_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/mul_test.mm
index 2a1054d73eb..d881950c831 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/mul_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/mul_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -63,9 +64,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 4, 6, 8}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testMulLinear {
@@ -89,9 +90,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::MUL), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 6, 6, 12}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/padding_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/padding_test.mm
index 22fa11a89fb..9c55cfc45b0 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/padding_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/padding_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -78,9 +79,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::PAD), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors(expected, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)runPrepending:(const HWC&)prepend
@@ -164,9 +165,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::PAD), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 3.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testMirrorPadChannelsOperation {
@@ -188,9 +189,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::PAD), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 3.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/pooling_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/pooling_test.mm
index f79d53c7bd3..d2d95b30af2 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/pooling_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/pooling_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -73,11 +74,11 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::POOLING_2D), attr}, {input}, {output, indices});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 1, 2, 3, 4, 3, 4, 7, 8, 7, 8, 5, 6, 5, 6}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({4, 4, 8, 8}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({3, 3, 1, 1}, model.GetOutput(1), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testPoolingMaxKernel2x2Stride2x2WithoutIndices {
@@ -101,9 +102,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::POOLING_2D), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 1, 2, 3, 4, 3, 4, 7, 8, 7, 8, 5, 6, 5, 6}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({4, 4, 8, 8}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testPoolingAverageKernel2x2Stride2x2 {
@@ -127,9 +128,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::POOLING_2D), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 2, 2, 1, 1, 2, 2, 3, 3, 4, 4, 3, 3, 4, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/prelu_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/prelu_test.mm
index b805ed81c76..1df08be61db 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/prelu_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/prelu_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -69,9 +70,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::PRELU), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {-1.0, -2.0, 1.0, 2.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-2, -4, 1, 2}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testPReluLinearAlphaWithClip {
@@ -96,9 +97,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::PRELU), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {-1.0, -2.0, 1.0, 2.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-2, -4, 1, 1}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testPRelu3DAlphaNoClip {
@@ -124,9 +125,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(op_type), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -1.0, 2.0, -3.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0, -2, 2, -6}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testPRelu3DAlphaWithClip {
@@ -152,9 +153,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(op_type), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {0.0, -1.0, 2.0, -3.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0, -2, 1, -6}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/relu_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/relu_test.mm
index 3687c0ecd65..52de77e0ee4 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/relu_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/relu_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -60,9 +61,9 @@ TensorRef<BHWC> GetTensorRef(int ref) {
   SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)});
   XCTAssertTrue(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, 0.0, 2.0, 8.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testReluClipOnly {
@@ -73,9 +74,9 @@ TensorRef<BHWC> GetTensorRef(int ref) {
   SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)});
   XCTAssertTrue(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0, 0.0, 2.0, 6.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testReluAlphaOnly {
@@ -86,9 +87,9 @@ TensorRef<BHWC> GetTensorRef(int ref) {
   SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)});
   XCTAssertTrue(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-3.0, 0.0, 2.0, 8.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testReluClipAndAlpha {
@@ -99,9 +100,9 @@ TensorRef<BHWC> GetTensorRef(int ref) {
   SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)});
   XCTAssertTrue(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({-3.0, 0.0, 2.0, 6.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/reshape_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/reshape_test.mm
index 48d292e2a1b..684e83b2db1 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/reshape_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/reshape_test.mm
@@ -62,9 +62,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4, 5, 6}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4, 5, 6}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testReshape3x1x2To2x1x3 {
@@ -84,9 +84,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4, 5, 6}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4, 5, 6}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testReshape1x1x4To2x2x1 {
@@ -106,9 +106,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testReshapeBatchIsUnsupported {
@@ -128,9 +128,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESHAPE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.error_message().find("Only identical batch dimension is supported") !=
+  XCTAssertTrue(std::string(status.message()).find("Only identical batch dimension is supported") !=
                     std::string::npos,
-                @"%s", status.error_message().c_str());
+                @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/resize_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/resize_test.mm
index 49febc1d4c6..f00b2766bdc 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/resize_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/resize_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -65,9 +66,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 2.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testResizeBilinear1x2x1To1x4x1 {
@@ -89,9 +90,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 2.5, 4.0, 4.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testResizeBilinear2x2x1To4x4x1 {
@@ -113,11 +114,11 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 4.0, 6.0, 8.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors(
       {1.0, 2.5, 4.0, 4.0, 3.5, 4.75, 6.0, 6.0, 6.0, 7.0, 8.0, 8.0, 6.0, 7.0, 8.0, 8.0},
       model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testResizeBilinear2x2x1To3x3x1WithoutHalfPixel {
@@ -140,10 +141,10 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 1.666666, 2.0, 2.333333, 3.0, 3.333333, 3.0, 3.666666, 4.0},
                           model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testResizeBilinear2x2x1To3x3x1WithHalfPixel {
@@ -166,9 +167,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 1.5, 2.0, 2.0, 2.5, 3.0, 3.0, 3.5, 4.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testResizeNearest1x2x1To2x4x1 {
@@ -190,9 +191,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::RESIZE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1.0, 2.0}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 2.0}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/slice_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/slice_test.mm
index 827f85fe00a..e0c29561f9b 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/slice_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/slice_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -64,9 +65,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SLICE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 2, 3, 4}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSliceNoStrides {
@@ -88,9 +89,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SLICE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 3}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSliceNoStridesStartOffset {
@@ -112,9 +113,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SLICE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({3, 4}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSliceStridesByHeight {
@@ -136,9 +137,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SLICE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 3}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSliceStridesByWidth {
@@ -160,9 +161,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SLICE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 4}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSliceStridesByChannels {
@@ -184,9 +185,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SLICE), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 2, 3, 4}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 4}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/softmax_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/softmax_test.mm
index f5c4770bd8b..9196e9fe094 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/softmax_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/softmax_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -62,9 +63,9 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {0.1, 0.2, 0.1, 0.2}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 1, 1, 1}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSoftmaxDoesNotWorkForHeightAxis {
@@ -84,7 +85,7 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
   auto status = model.Invoke();
-  XCTAssertFalse(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertFalse(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSoftmaxDoesNotWorkForWidthAxis {
@@ -104,7 +105,7 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {0.1, 0.2, 0.3, 0.4}));
   auto status = model.Invoke();
-  XCTAssertFalse(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertFalse(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testSoftmax1x1 {
@@ -126,11 +127,11 @@ using ::tflite::gpu::metal::SingleOpModel;
   SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output});
   XCTAssertTrue(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors(
       {std::exp(0.1f) / sum, std::exp(0.2f) / sum, std::exp(0.3f) / sum, std::exp(0.4f) / sum},
       model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/space_to_depth_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/space_to_depth_test.mm
index 6e82ebe0361..17e398817b2 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/space_to_depth_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/space_to_depth_test.mm
@@ -51,7 +51,7 @@ using ::tflite::gpu::metal::SingleOpModel;
     XCTFail(@"PopulateTensor()");
   }
   const auto status = model.Invoke();
-  if (!status.ok()) XCTFail(@"%s", status.error_message().c_str());
+  if (!status.ok()) XCTFail(@"%s", std::string(status.message()).c_str());
   const std::vector<float>& actual = model.GetOutput(0);
   const std::vector<float> expected = {1.0f, 2.0f, 3.0f, 4.0f};
   XCTAssertEqual(actual[0], expected[0]);
@@ -69,7 +69,7 @@ using ::tflite::gpu::metal::SingleOpModel;
     XCTFail(@"PopulateTensor()");
   }
   const auto status = model.Invoke();
-  if (!status.ok()) XCTFail(@"%s", status.error_message().c_str());
+  if (!status.ok()) XCTFail(@"%s", std::string(status.message()).c_str());
   const std::vector<float>& actual = model.GetOutput(0);
   const std::vector<float> expected = {1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f};
   XCTAssertEqual(actual[0], expected[0]);
@@ -94,7 +94,7 @@ using ::tflite::gpu::metal::SingleOpModel;
     XCTFail(@"PopulateTensor()");
   }
   const auto status = model.Invoke();
-  if (!status.ok()) XCTFail(@"%s", status.error_message().c_str());
+  if (!status.ok()) XCTFail(@"%s", std::string(status.message()).c_str());
   const std::vector<float>& actual = model.GetOutput(0);
   const std::vector<float> expected = {1.0f,  2.0f,  3.0f,  //
                                        4.0f,  5.0f,  6.0f,  //
@@ -126,7 +126,7 @@ using ::tflite::gpu::metal::SingleOpModel;
     XCTFail(@"PopulateTensor()");
   }
   const auto status = model.Invoke();
-  if (!status.ok()) XCTFail(@"%s", status.error_message().c_str());
+  if (!status.ok()) XCTFail(@"%s", std::string(status.message()).c_str());
   const std::vector<float>& actual = model.GetOutput(0);
   const std::vector<float> expected = {1.0f,  2.0f,  3.0f,  4.0f,   //
                                        5.0f,  6.0f,  7.0f,  8.0f,   //
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/test_util.h b/tensorflow/lite/delegates/gpu/metal/kernels/test_util.h
index 7a4066fea0a..ffa567a5a9d 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/test_util.h
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/test_util.h
@@ -45,7 +45,7 @@ class SingleOpModel {
     return true;
   }
 
-  Status Invoke();
+  absl::Status Invoke();
 
   const std::vector<float>& GetOutput(int index) const {
     return outputs_[index].data;
@@ -57,16 +57,16 @@ class SingleOpModel {
   std::vector<TensorFloat32> outputs_;
 };
 
-Status CompareVectors(const std::vector<float>& reference,
-                      const std::vector<float>& output, float max_error);
+absl::Status CompareVectors(const std::vector<float>& reference,
+                            const std::vector<float>& output, float max_error);
 
 /// Helper function that compiles previously configured graph (with added
 /// tasks), initializes graph with specified inputs, invokes and fills specified
 /// outputs
-Status RunGraph(const std::vector<ComputeTaskDescriptorPtr>& graph,
-                id<MTLDevice> device,
-                const std::map<ValueId, TensorFloat32>& inputs,
-                std::map<ValueId, TensorFloat32>* outputs);
+absl::Status RunGraph(const std::vector<ComputeTaskDescriptorPtr>& graph,
+                      id<MTLDevice> device,
+                      const std::map<ValueId, TensorFloat32>& inputs,
+                      std::map<ValueId, TensorFloat32>* outputs);
 
 }  // namespace metal
 }  // namespace gpu
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/test_util.mm b/tensorflow/lite/delegates/gpu/metal/kernels/test_util.mm
index 3edc8669f2c..80c0e2457af 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/test_util.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/test_util.mm
@@ -65,7 +65,7 @@ SingleOpModel::SingleOpModel(Operation&& operation, const std::vector<TensorRef<
   }
 }
 
-Status SingleOpModel::Invoke() {
+absl::Status SingleOpModel::Invoke() {
   std::vector<ValueId> input_ids;
   input_ids.reserve(inputs_.size());
   for (const auto& input : inputs_) {
@@ -143,16 +143,16 @@ Status SingleOpModel::Invoke() {
     RETURN_IF_ERROR(ConvertFromPHWC4(absl::MakeConstSpan(output_pointer, elements_count),
                                      output.shape, absl::MakeSpan(output.data)));
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status CompareVectors(const std::vector<float>& reference, const std::vector<float>& output,
-                      float max_error) {
+absl::Status CompareVectors(const std::vector<float>& reference, const std::vector<float>& output,
+                            float max_error) {
   if (reference.size() != output.size()) {
     const std::string message = "CompareVectors: vectors size does not match for reference: " +
                                 std::to_string(reference.size()) +
                                 " vs. output: " + std::to_string(output.size());
-    return tflite::gpu::InternalError(message);
+    return absl::InternalError(message);
   }
   for (int i = 0; i < reference.size(); i++) {
     float error = std::abs(reference[i] - output[i]);
@@ -160,15 +160,15 @@ Status CompareVectors(const std::vector<float>& reference, const std::vector<flo
       const std::string message =
           "Reference: " + std::to_string(reference[i]) + ", output: " + std::to_string(output[i]) +
           ", error: " + std::to_string(error) + ", max allowed error: " + std::to_string(max_error);
-      return tflite::gpu::InternalError(message);
+      return absl::InternalError(message);
     }
   }
-  return OkStatus();
+  return absl::OkStatus();
 }
 
-Status RunGraph(const std::vector<ComputeTaskDescriptorPtr>& nodes, id<MTLDevice> device,
-                const std::map<ValueId, TensorFloat32>& inputs,
-                std::map<ValueId, TensorFloat32>* outputs) {
+absl::Status RunGraph(const std::vector<ComputeTaskDescriptorPtr>& nodes, id<MTLDevice> device,
+                      const std::map<ValueId, TensorFloat32>& inputs,
+                      std::map<ValueId, TensorFloat32>* outputs) {
   std::vector<ValueId> inputBufferIDs;
   inputBufferIDs.reserve(inputs.size());
   for (const auto& input : inputs) {
@@ -251,7 +251,7 @@ Status RunGraph(const std::vector<ComputeTaskDescriptorPtr>& nodes, id<MTLDevice
                                      absl::MakeSpan(dst.data)));
   }
 
-  return OkStatus();
+  return absl::OkStatus();
 }
 
 }  // namespace metal
diff --git a/tensorflow/lite/delegates/gpu/metal/kernels/transpose_conv_test.mm b/tensorflow/lite/delegates/gpu/metal/kernels/transpose_conv_test.mm
index c1c1193fe0e..aa9d936e455 100644
--- a/tensorflow/lite/delegates/gpu/metal/kernels/transpose_conv_test.mm
+++ b/tensorflow/lite/delegates/gpu/metal/kernels/transpose_conv_test.mm
@@ -17,6 +17,7 @@ limitations under the License.
 
 #import <XCTest/XCTest.h>
 
+#include <string>
 #include <vector>
 
 #include "tensorflow/lite/delegates/gpu/common/operations.h"
@@ -81,10 +82,10 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 4, 2, 4, 1, 1, 4, 8, 4, 8, 1, 1, 3, 5, 3, 5, 1, 1},
                           model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTransposeConvO1H2W2I1Stride1x1Adjacent2x2 {
@@ -120,11 +121,11 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1, 1, 1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({1, 3, 3, 2, 0, 0, 4, 10, 10, 6, 0, 0, 4, 10, 10, 6, 0, 0,
                            3, 7, 7, 4, 0, 0, 0, 0,  0,  0, 0, 0, 0, 0,  0,  0, 0, 0},
                           model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTransposeConvO1H3W3I1Stride1x1Adjacent1x1 {
@@ -160,10 +161,10 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status =
       CompareVectors({7, 11, 7, 1, 7, 11, 7, 1, 4, 6, 4, 1, 1, 1, 1, 1}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTransposeConvO2H1W1I2Stride1x1Dilation1x1 {
@@ -199,9 +200,9 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 1, 1, 1}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({4, 8, 1, 1, 4, 8, 1, 1, 1, 1, 1, 1}, model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTransposeConvO1H1W1I1Stride2x2Dilation1x1 {
@@ -238,11 +239,11 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {1, 0, 2, 0, 0, 0, 4, 0, 8}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({2, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,  0, 0, 0, 0, 0, 0, 0,
                            0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0},
                           model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 - (void)testTransposeConv4x4 {
@@ -277,13 +278,13 @@ using ::tflite::gpu::metal::SingleOpModel;
                       {output});
   XCTAssertTrue(model.PopulateTensor(0, {0.0f, 1.0f, 2.0f, 3.0f}));
   auto status = model.Invoke();
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
   status = CompareVectors({0.0f, 0.0f, 1.0f, 2.0f, 1.0f, 2.0f, 1.0f, 2.0f,
                            2.0f, 4.0f, 6.0f, 12.0f, 6.0f, 12.0f, 4.0f, 8.0f,
                            2.0f, 4.0f, 6.0f, 12.0f, 6.0f, 12.0f, 4.0f, 8.0f,
                            2.0f, 4.0f, 5.0f, 10.0f, 5.0f, 10.0f, 3.0f, 6.0f},
                           model.GetOutput(0), 1e-6f);
-  XCTAssertTrue(status.ok(), @"%s", status.error_message().c_str());
+  XCTAssertTrue(status.ok(), @"%s", std::string(status.message()).c_str());
 }
 
 @end
diff --git a/tensorflow/lite/delegates/gpu/metal_delegate.mm b/tensorflow/lite/delegates/gpu/metal_delegate.mm
index f7f08b273ae..4c6bb140a96 100644
--- a/tensorflow/lite/delegates/gpu/metal_delegate.mm
+++ b/tensorflow/lite/delegates/gpu/metal_delegate.mm
@@ -198,13 +198,13 @@ class Delegate {
     }
   }
 
-  Status BindBufferToTensor(id<MTLBuffer> buffer, int tensor_index) {
+  absl::Status BindBufferToTensor(id<MTLBuffer> buffer, int tensor_index) {
     for (auto& input : graph_inputs_) {
       if (input.tensor_id == tensor_index) {
         input_output_buffers_[input.id] = buffer;
         bphwc4_buffers_[input.id] = buffer;
         input.set_externally = true;
-        return OkStatus();
+        return absl::OkStatus();
       }
     }
     for (auto& output : graph_outputs_) {
@@ -212,10 +212,10 @@ class Delegate {
         input_output_buffers_[output.id] = buffer;
         bphwc4_buffers_[output.id] = buffer;
         output.set_externally = true;
-        return OkStatus();
+        return absl::OkStatus();
       }
     }
-    return NotFoundError("Couldn't find tensor: " + std::to_string(tensor_index));
+    return absl::NotFoundError("Couldn't find tensor: " + std::to_string(tensor_index));
   }
 
   void SetCommandEncoder(
@@ -225,7 +225,7 @@ class Delegate {
     external_command_encoder_ = encoder;
   }
 
-  Status Prepare(TfLiteContext* context, const TfLiteDelegateParams* delegate_params) {
+  absl::Status Prepare(TfLiteContext* context, const TfLiteDelegateParams* delegate_params) {
     // Extract TFLite delegate execution plan from the context and convert it into FlowGraph32.
     GraphFloat32 graph;
     RETURN_IF_ERROR(BuildModel(context, delegate_params, &graph));
@@ -234,7 +234,7 @@ class Delegate {
     NullTransformationReporter reporter;
     ModelTransformer transformer(&graph, &reporter);
     if (!ApplyGeneralTransformations(&transformer)) {
-      return InternalError("Graph general transformations failed");
+      return absl::InternalError("Graph general transformations failed");
     }
 
     // TODO(impjdi): Remove code duplication.
@@ -265,7 +265,7 @@ class Delegate {
       if (tensor->allocation_type == TfLiteAllocationType::kTfLiteMmapRo) continue;
       const auto* input = find_value(tensor_index);
       if (!input || tensor->type != TfLiteType::kTfLiteFloat32) {
-        return NotFoundError("Input tensor is not found in the graph.");
+        return absl::NotFoundError("Input tensor is not found in the graph.");
       }
 
       inputs_.push_back(input->id);
@@ -283,7 +283,7 @@ class Delegate {
       auto* tensor = context->tensors + tensor_index;
       const auto* output = find_value(tensor_index);
       if (!output || tensor->type != TfLiteType::kTfLiteFloat32) {
-        return NotFoundError("Output tensor is not found in the graph.");
+        return absl::NotFoundError("Output tensor is not found in the graph.");
       }
 
       outputs_.push_back(output->id);
@@ -323,7 +323,9 @@ class Delegate {
       const auto& input_tensor = tensors_[input];
       const auto tensor_id = input_tensor.tensor_id;
       input_ids.push_back(input);
-      if (input_tensor.shape.b != 1) return UnimplementedError("Batching is not supported yet.");
+      if (input_tensor.shape.b != 1) {
+        return absl::UnimplementedError("Batching is not supported yet.");
+      }
       input_dimensions[input] = input_tensor.shape;
       graph_inputs_.push_back({
           input,               // .id
@@ -346,7 +348,7 @@ class Delegate {
                                              isFloat16:options_.allow_precision_loss
                                        convertToPBHWC4:true];
           if (converter_to_BPHWC4_ == nil) {
-            return InternalError("Error initialization of input buffer converter");
+            return absl::InternalError("Error initialization of input buffer converter");
           }
         }
       } else {
@@ -383,7 +385,7 @@ class Delegate {
                                              isFloat16:options_.allow_precision_loss
                                        convertToPBHWC4:false];
           if (converter_from_BPHWC4_ == nil) {
-            return InternalError("Error initialization of output buffer converter");
+            return absl::InternalError("Error initialization of output buffer converter");
           }
         }
       } else {
@@ -406,10 +408,10 @@ class Delegate {
     RETURN_IF_ERROR([inference_context_ setInputDimensions:input_dimensions
                                           outputDimensions:&output_dimensions
                                            taskDescriptors:optimized_model]);
-    return OkStatus();
+    return absl::OkStatus();
   }
 
-  Status Invoke(TfLiteContext* context) {
+  absl::Status Invoke(TfLiteContext* context) {
     if (options_.wait_type == TFLGpuDelegateWaitType::TFLGpuDelegateWaitTypeAggressive)
       gpu_alarm_clock_->Stop();
     // We need only synchronization so volatile works better than atomic which reads from global
@@ -514,11 +516,11 @@ class Delegate {
       // External command encoder is assigned so all output buffers are controlled by a user.
       for (const auto& output : graph_outputs_) {
         if (!output.set_externally) {
-          return InternalError(
+          return absl::InternalError(
               "External command encoder is used, but not all output buffers are bound.");
         }
       }
-      return OkStatus();
+      return absl::OkStatus();
     }
 
     // Retrieve data from GPU and convert from PHWC4 to HWC.
@@ -529,7 +531,7 @@ class Delegate {
       const void* gpu_ptr = [input_output_buffers_[output.id] contents];
       std::memcpy(tensor->data.f, gpu_ptr, output.shape.DimensionsProduct() * sizeof(float));
     }
-    return OkStatus();
+    return absl::OkStatus();
   }
 
   TfLiteDelegate* tflite_delegate() { return &delegate_; }
@@ -596,7 +598,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = metal_delegate->Prepare(context, params);
         if (status.ok()) return metal_delegate;
         context->ReportError(context, "TfLiteGpuDelegate Prepare: %s",
-                             status.error_message().c_str());
+                             std::string(status.message()).c_str());
         return nullptr;
       },
       // .free
@@ -610,7 +612,7 @@ TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {
         const auto status = GetMetalDelegate(node)->Invoke(context);
         if (status.ok()) return kTfLiteOk;
         context->ReportError(context, "TfLiteMetalDelegate Invoke: %s",
-                             status.error_message().c_str());
+                             std::string(status.message()).c_str());
         return kTfLiteError;
       },
       nullptr,                // .profiling_string
diff --git a/tensorflow/lite/delegates/gpu/spi.h b/tensorflow/lite/delegates/gpu/spi.h
index c7f041f3db1..a70f8dbb326 100644
--- a/tensorflow/lite/delegates/gpu/spi.h
+++ b/tensorflow/lite/delegates/gpu/spi.h
@@ -33,8 +33,8 @@ class TensorObjectConverter {
  public:
   virtual ~TensorObjectConverter() = default;
 
-  virtual Status Convert(const TensorObject& input,
-                         const TensorObject& output) = 0;
+  virtual absl::Status Convert(const TensorObject& input,
+                               const TensorObject& output) = 0;
 };
 
 class TensorObjectConverterBuilder {
@@ -44,7 +44,7 @@ class TensorObjectConverterBuilder {
   virtual bool IsSupported(const TensorObjectDef& input,
                            const TensorObjectDef& output) const = 0;
 
-  virtual Status MakeConverter(
+  virtual absl::Status MakeConverter(
       const TensorObjectDef& input, const TensorObjectDef& output,
       std::unique_ptr<TensorObjectConverter>* converter) = 0;
 };
@@ -66,13 +66,13 @@ class TensorTie {
 
   virtual ~TensorTie() = default;
 
-  virtual Status SetExternalObject(TensorObject obj) = 0;
+  virtual absl::Status SetExternalObject(TensorObject obj) = 0;
 
   virtual TensorObject GetExternalObject() = 0;
 
-  virtual Status CopyToExternalObject() = 0;
+  virtual absl::Status CopyToExternalObject() = 0;
 
-  virtual Status CopyFromExternalObject() = 0;
+  virtual absl::Status CopyFromExternalObject() = 0;
 
   const TensorTieDef& def() const { return def_; }