[tfls.util] Remove tensorflow-lite-gpu from dependencies.

PiperOrigin-RevId: 311760392
Change-Id: Ia8fe0682cfda037589f7546f1e70974c1be439c5
This commit is contained in:
Xunkai Zhang 2020-05-15 10:46:10 -07:00 committed by TensorFlower Gardener
parent bd010a095e
commit 262e92804b
4 changed files with 110 additions and 17 deletions

View File

@ -17,18 +17,19 @@ package org.tensorflow.lite.gpu;
import java.io.Closeable;
import org.tensorflow.lite.Delegate;
import org.tensorflow.lite.annotations.UsedByReflection;
/**
* {@link Delegate} for GPU inference.
*
* <p>Note: When calling {@code Interpreter.modifyGraphWithDelegate()}/
* {@code Interpreter.Options.addDelegate()} and {@code Interpreter.run()}, the caller must have an
* {@code EGLContext} in the <b>current thread</b> and {@code Interpreter.run()} must be called from
* the same {@code EGLContext}. If an {@code EGLContext} does not exist, the delegate will
* internally create one, but then the developer must ensure that {@code Interpreter.run()} is
* always called from the same thread in which {@code Interpreter.modifyGraphWithDelegate()} was
* called.
* <p>Note: When calling {@code Interpreter.modifyGraphWithDelegate()}/ {@code
* Interpreter.Options.addDelegate()} and {@code Interpreter.run()}, the caller must have an {@code
* EGLContext} in the <b>current thread</b> and {@code Interpreter.run()} must be called from the
* same {@code EGLContext}. If an {@code EGLContext} does not exist, the delegate will internally
* create one, but then the developer must ensure that {@code Interpreter.run()} is always called
* from the same thread in which {@code Interpreter.modifyGraphWithDelegate()} was called.
*/
@UsedByReflection("TFLiteSupport/model/GpuDelegateProxy")
public class GpuDelegate implements Delegate, Closeable {
private static final long INVALID_DELEGATE_HANDLE = 0;
@ -98,6 +99,7 @@ public class GpuDelegate implements Delegate, Closeable {
options.inferencePreference);
}
@UsedByReflection("TFLiteSupport/model/GpuDelegateProxy")
public GpuDelegate() {
this(new Options());
}

View File

@ -9,7 +9,24 @@ package(
licenses = ["notice"], # Apache 2.0
)
# TODO(b/156482505): The NOGPU target is a temporary target. Internally, people
# may already depend on "tensorflow-lite-support" so we shouldn't remove GPU
# from its dependency. We will have CLs to help users migrate. After migration
# is done, the "NOGPU" target will be removed.
android_library(
name = "tensorflow-lite-support-nogpu",
srcs = glob(["src/java/org/tensorflow/lite/support/**/*.java"]),
javacopts = JAVACOPTS,
manifest = "AndroidManifest.xml",
deps = [
"//tensorflow/lite/java:tensorflowlite",
"@org_checkerframework_qual",
],
)
# TODO(138904786): Split Java part and Android part to make the support library usable by pure Java.
# For new users: Please use "tensorflow-lite-support-nogpu" if possible, and
# additionally depends on "tensorflowlite_gpu" if needed.
android_library(
name = "tensorflow-lite-support",
srcs = glob(["src/java/org/tensorflow/lite/support/**/*.java"]),
@ -17,7 +34,7 @@ android_library(
manifest = "AndroidManifest.xml",
deps = [
"//tensorflow/lite/java:tensorflowlite",
"//tensorflow/lite/java:tensorflowlite_gpu",
"//tensorflow/lite/java:tensorflowlite_gpu", # unuseddeps: keep
"@org_checkerframework_qual",
],
)

View File

@ -0,0 +1,69 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
package org.tensorflow.lite.support.model;
import android.util.Log;
import java.io.Closeable;
import java.io.IOException;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.tensorflow.lite.Delegate;
/**
* Helper class to create and call necessary methods of {@code GpuDelegate} which is not a strict
* dependency.
*/
class GpuDelegateProxy implements Delegate, Closeable {
private static final String TAG = "GpuDelegateProxy";
private final Delegate proxiedDelegate;
private final Closeable proxiedCloseable;
@Nullable
public static GpuDelegateProxy maybeNewInstance() {
try {
Class<?> clazz = Class.forName("org.tensorflow.lite.gpu.GpuDelegate");
Object instance = clazz.getDeclaredConstructor().newInstance();
return new GpuDelegateProxy(instance);
} catch (ReflectiveOperationException e) {
Log.e(TAG, "Failed to create the GpuDelegate dynamically.", e);
return null;
}
}
/** Calls {@code close()} method of the delegate. */
@Override
public void close() {
try {
proxiedCloseable.close();
} catch (IOException e) {
// Should not trigger, because GpuDelegate#close never throws. The catch is required because
// of Closeable#close.
Log.e(TAG, "Failed to close the GpuDelegate.", e);
}
}
/** Calls {@code getNativeHandle()} method of the delegate. */
@Override
public long getNativeHandle() {
return proxiedDelegate.getNativeHandle();
}
private GpuDelegateProxy(Object instance) {
this.proxiedCloseable = (Closeable) instance;
this.proxiedDelegate = (Delegate) instance;
}
}

View File

@ -22,7 +22,6 @@ import java.util.Map;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.tensorflow.lite.Interpreter;
import org.tensorflow.lite.gpu.GpuDelegate;
import org.tensorflow.lite.support.common.FileUtil;
import org.tensorflow.lite.support.common.SupportPreconditions;
@ -91,7 +90,7 @@ public class Model {
/** The memory-mapped model data. */
private final MappedByteBuffer byteModel;
private final GpuDelegate gpuDelegate;
private final GpuDelegateProxy gpuDelegateProxy;
/**
* Builder for {@link Model}.
@ -181,24 +180,30 @@ public class Model {
* @param modelPath The original path of the model. It can be fetched later by {@link
* Model#getPath()}.
* @param options The options for running the model.
* @throws IllegalArgumentException if {@code options.device} is {@link Device#GPU} but
* "tensorflow-lite-gpu" is not linked to the project.
*/
public static Model createModel(
@NonNull MappedByteBuffer byteModel, @NonNull String modelPath, @NonNull Options options) {
Interpreter.Options interpreterOptions = new Interpreter.Options();
GpuDelegate gpuDelegate = options.device.equals(Device.GPU) ? new GpuDelegate() : null;
GpuDelegateProxy gpuDelegateProxy = null;
switch (options.device) {
case NNAPI:
interpreterOptions.setUseNNAPI(true);
break;
case GPU:
interpreterOptions.addDelegate(gpuDelegate);
gpuDelegateProxy = GpuDelegateProxy.maybeNewInstance();
SupportPreconditions.checkArgument(
gpuDelegateProxy != null,
"Cannot inference with GPU. Did you add \"tensorflow-lite-gpu\" as dependency?");
interpreterOptions.addDelegate(gpuDelegateProxy);
break;
case CPU:
break;
}
interpreterOptions.setNumThreads(options.numThreads);
Interpreter interpreter = new Interpreter(byteModel, interpreterOptions);
return new Model(modelPath, byteModel, interpreter, gpuDelegate);
return new Model(modelPath, byteModel, interpreter, gpuDelegateProxy);
}
/** Returns the memory-mapped model data. */
@ -243,8 +248,8 @@ public class Model {
if (interpreter != null) {
interpreter.close();
}
if (gpuDelegate != null) {
gpuDelegate.close();
if (gpuDelegateProxy != null) {
gpuDelegateProxy.close();
}
}
@ -252,10 +257,10 @@ public class Model {
@NonNull String modelPath,
@NonNull MappedByteBuffer byteModel,
@NonNull Interpreter interpreter,
@Nullable GpuDelegate gpuDelegate) {
@Nullable GpuDelegateProxy gpuDelegateProxy) {
this.modelPath = modelPath;
this.byteModel = byteModel;
this.interpreter = interpreter;
this.gpuDelegate = gpuDelegate;
this.gpuDelegateProxy = gpuDelegateProxy;
}
}