diff --git a/tensorflow/lite/g3doc/performance/benchmarks.md b/tensorflow/lite/g3doc/performance/benchmarks.md
index e825f7c41c3..b310d0fb8e1 100644
--- a/tensorflow/lite/g3doc/performance/benchmarks.md
+++ b/tensorflow/lite/g3doc/performance/benchmarks.md
@@ -141,8 +141,7 @@ To run iOS benchmarks, the
[benchmark app](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/tools/benchmark/ios)
was modified to include the appropriate model and `benchmark_params.json` was
modified to set `num_threads` to 2. For GPU delegate, `"use_gpu" : "1"` and
-`"gpu_wait_type" : "aggressive"` options were also added to
-`benchmark_params.json`.
+`"wait_type" : "aggressive"` options were also added to `benchmark_params.json`.
diff --git a/tensorflow/lite/tools/benchmark/BUILD b/tensorflow/lite/tools/benchmark/BUILD
index cd173a76f00..b7f4383f27f 100644
--- a/tensorflow/lite/tools/benchmark/BUILD
+++ b/tensorflow/lite/tools/benchmark/BUILD
@@ -106,12 +106,7 @@ cc_library(
name = "benchmark_tflite_model_lib",
srcs = ["benchmark_tflite_model.cc"],
hdrs = ["benchmark_tflite_model.h"],
- copts = common_copts + select({
- "//tensorflow:ios": [
- "-xobjective-c++",
- ],
- "//conditions:default": [],
- }),
+ copts = common_copts,
deps = [
":benchmark_model_lib",
":benchmark_utils",
@@ -130,9 +125,6 @@ cc_library(
"//tensorflow:android": [
"//tensorflow/lite/delegates/gpu:delegate",
],
- "//tensorflow:ios": [
- "//tensorflow/lite/delegates/gpu:metal_delegate",
- ],
"//conditions:default": [],
}),
)
diff --git a/tensorflow/lite/tools/benchmark/README.md b/tensorflow/lite/tools/benchmark/README.md
index b9655aab25a..3741a958c2a 100644
--- a/tensorflow/lite/tools/benchmark/README.md
+++ b/tensorflow/lite/tools/benchmark/README.md
@@ -58,11 +58,7 @@ and the following optional parameters:
benchmark tool will not correctly use NNAPI.
* `use_gpu`: `bool` (default=false) \
Whether to use the [GPU accelerator delegate](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/delegates/gpu).
- This option is currently only available on Android and iOS devices.
-* `gpu_wait_type`: `str` (default="") \
- Which GPU wait_type option to use, when using GPU delegate on iOS. Should be
- one of the following: passive, active, do_not_wait, aggressive. When left
- blank, passive mode is used by default.
+ This option is currently only available on Android devices.
* `enable_op_profiling`: `bool` (default=false) \
Whether to enable per-operator profiling measurement.
diff --git a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc
index 3448dde8ce1..aae90b18d87 100644
--- a/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc
+++ b/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc
@@ -31,13 +31,6 @@ limitations under the License.
#if defined(__ANDROID__)
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/nnapi/nnapi_util.h"
-#elif defined(__APPLE__)
-#include "TargetConditionals.h"
-#if TARGET_OS_IPHONE && !TARGET_IPHONE_SIMULATOR
-// Only enable metal delegate when using a real iPhone device.
-#define REAL_IPHONE_DEVICE
-#include "tensorflow/lite/delegates/gpu/metal_delegate.h"
-#endif
#endif
#include "tensorflow/lite/kernels/register.h"
@@ -271,13 +264,9 @@ BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
default_params.AddParam("nnapi_accelerator_name",
BenchmarkParam::Create(""));
default_params.AddParam("use_gpu", BenchmarkParam::Create(false));
-#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
+#if defined(__ANDROID__)
default_params.AddParam("gpu_precision_loss_allowed",
BenchmarkParam::Create(true));
-#endif
-#if defined(REAL_IPHONE_DEVICE)
- default_params.AddParam("gpu_wait_type",
- BenchmarkParam::Create(""));
#endif
default_params.AddParam("allow_fp16", BenchmarkParam::Create(false));
default_params.AddParam("require_full_delegation",
@@ -325,16 +314,10 @@ std::vector BenchmarkTfLiteModel::GetFlags() {
"nnapi_accelerator_name", ¶ms_,
"the name of the nnapi accelerator to use (requires Android Q+)"),
CreateFlag("use_gpu", ¶ms_, "use gpu"),
-#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
+#if defined(__ANDROID__)
CreateFlag("gpu_precision_loss_allowed", ¶ms_,
"Allow to process computation in lower precision than "
"FP32 in GPU. By default, it's enabled."),
-#endif
-#if defined(REAL_IPHONE_DEVICE)
- CreateFlag(
- "gpu_wait_type", ¶ms_,
- "GPU wait type. Should be one of the following: passive, active, "
- "do_not_wait, aggressive"),
#endif
CreateFlag("allow_fp16", ¶ms_, "allow fp16"),
CreateFlag("require_full_delegation", ¶ms_,
@@ -380,13 +363,9 @@ void BenchmarkTfLiteModel::LogParams() {
}
#endif
TFLITE_LOG(INFO) << "Use gpu : [" << params_.Get("use_gpu") << "]";
-#if defined(__ANDROID__) || defined(REAL_IPHONE_DEVICE)
+#if defined(__ANDROID__)
TFLITE_LOG(INFO) << "Allow lower precision in gpu : ["
<< params_.Get("gpu_precision_loss_allowed") << "]";
-#endif
-#if defined(REAL_IPHONE_DEVICE)
- TFLITE_LOG(INFO) << "GPU delegate wait type : ["
- << params_.Get("gpu_wait_type") << "]";
#endif
TFLITE_LOG(INFO) << "Allow fp16 : [" << params_.Get("allow_fp16")
<< "]";
@@ -669,31 +648,9 @@ BenchmarkTfLiteModel::TfLiteDelegatePtrMap BenchmarkTfLiteModel::GetDelegates()
}
Interpreter::TfLiteDelegatePtr delegate =
evaluation::CreateGPUDelegate(model_.get(), &gpu_opts);
-#elif defined(REAL_IPHONE_DEVICE)
- TFLGpuDelegateOptions gpu_opts = {0};
- gpu_opts.allow_precision_loss =
- params_.Get("gpu_precision_loss_allowed");
-
- std::string string_gpu_wait_type =
- params_.Get("gpu_wait_type");
- if (!string_gpu_wait_type.empty()) {
- TFLGpuDelegateWaitType wait_type = TFLGpuDelegateWaitTypePassive;
- if (string_gpu_wait_type == "passive") {
- wait_type = TFLGpuDelegateWaitTypePassive;
- } else if (string_gpu_wait_type == "active") {
- wait_type = TFLGpuDelegateWaitTypeActive;
- } else if (string_gpu_wait_type == "do_not_wait") {
- wait_type = TFLGpuDelegateWaitTypeDoNotWait;
- } else if (string_gpu_wait_type == "aggressive") {
- wait_type = TFLGpuDelegateWaitTypeAggressive;
- }
- gpu_opts.wait_type = wait_type;
- }
- Interpreter::TfLiteDelegatePtr delegate(TFLGpuDelegateCreate(&gpu_opts),
- &TFLGpuDelegateDelete);
#else
- TFLITE_LOG(WARN) << "The GPU delegate compile options are only supported "
- "to be benchmarked on Android or iOS platforms.";
+ TFLITE_LOG(WARN) << "The GPU delegate compile options aren't supported to "
+ "be benchmarked on non-Android platforms.";
Interpreter::TfLiteDelegatePtr delegate =
evaluation::CreateGPUDelegate(model_.get());
#endif