diff --git a/tensorflow/lite/delegates/gpu/gl/api.cc b/tensorflow/lite/delegates/gpu/gl/api.cc index c2800bb2b59..60c29abb2e5 100644 --- a/tensorflow/lite/delegates/gpu/gl/api.cc +++ b/tensorflow/lite/delegates/gpu/gl/api.cc @@ -380,6 +380,11 @@ bool IsBatchMatchesForAllValues(const GraphFloat32& model) { return true; } +bool IsOpenGl31OrAbove(const GpuInfo& gpu_info) { + return (gpu_info.major_version == 3 && gpu_info.minor_version >= 1) || + gpu_info.major_version > 3; +} + } // namespace Status Compile(const CompilationOptions& options, const GraphFloat32& model, @@ -392,6 +397,10 @@ Status Compile(const CompilationOptions& options, const GraphFloat32& model, } GpuInfo gpu_info; RETURN_IF_ERROR(RequestGpuInfo(&gpu_info)); + if (!IsOpenGl31OrAbove(gpu_info)) { + return InternalError( + "OpenGL ES 3.1 or above is required to use OpenGL inference."); + } auto compiled_model_impl = absl::make_unique(gpu_info); compiled_model_impl->set_dynamic_batch(options.dynamic_batch); auto compiler = NewCompiler(&node_shader, &gpu_info, options); @@ -408,6 +417,10 @@ Status ReadSerializedModel(const std::vector& serialized_model, std::unique_ptr* compiled_model) { GpuInfo gpu_info; RETURN_IF_ERROR(RequestGpuInfo(&gpu_info)); + if (!IsOpenGl31OrAbove(gpu_info)) { + return InternalError( + "OpenGL ES 3.1 or above is required to use OpenGL inference."); + } auto compiled_model_impl = absl::make_unique(gpu_info); RETURN_IF_ERROR(DeserializeCompiledModel( absl::MakeConstSpan(serialized_model), compiled_model_impl.get()));