diff --git a/tensorflow/lite/g3doc/guide/ios.md b/tensorflow/lite/g3doc/guide/ios.md index 7fb109bf374..2b82fbe8b69 100644 --- a/tensorflow/lite/g3doc/guide/ios.md +++ b/tensorflow/lite/g3doc/guide/ios.md @@ -72,7 +72,7 @@ builds, you can write: pod 'TensorFlowLiteSwift', '~> 0.0.1-nightly' ``` -For nightly version, by default +From 2.4.0 version and latest nightly releases, by default [GPU](https://www.tensorflow.org/lite/performance/gpu) and [Core ML delegates](https://www.tensorflow.org/lite/performance/coreml_delegate) are excluded from the pod to reduce the binary size. You can include them by diff --git a/tensorflow/lite/g3doc/performance/coreml_delegate.md b/tensorflow/lite/g3doc/performance/coreml_delegate.md index f9d40e1fab1..91ae96ecbce 100644 --- a/tensorflow/lite/g3doc/performance/coreml_delegate.md +++ b/tensorflow/lite/g3doc/performance/coreml_delegate.md @@ -4,7 +4,8 @@ The TensorFlow Lite Core ML delegate enables running TensorFlow Lite models on [Core ML framework](https://developer.apple.com/documentation/coreml), which results in faster model inference on iOS devices. -Note: This delegate is in experimental (beta) phase. +Note: This delegate is in experimental (beta) phase. It is available from +TensorFlow Lite 2.4.0 and latest nightly releases. Note: Core ML delegate supports Core ML version 2 and later. @@ -24,88 +25,108 @@ The Core ML delegate currently supports float (FP32 and FP16) models. ## Trying the Core ML delegate on your own model The Core ML delegate is already included in nightly release of TensorFlow lite -CocoaPods. To use Core ML delegate, change your TensorFlow lite pod -(`TensorflowLiteC` for C API, and `TensorFlowLiteSwift` for Swift) version to -`0.0.1-nightly` in your `Podfile`, and include subspec `CoreML` +CocoaPods. To use Core ML delegate, change your TensorFlow lite pod to include +subspec `CoreML` in your `Podfile`. + +Note: If you want to use C API instead of Objective-C API, you can include +`TensorFlowLiteC/CoreML` pod to do so. ``` target 'YourProjectName' - # pod 'TensorFlowLiteSwift' - pod 'TensorFlowLiteSwift/CoreML', '~> 0.0.1-nightly' + pod 'TensorFlowLiteSwift/CoreML', '~> 2.4.0' # Or TensorFlowLiteObjC/CoreML ``` OR ``` +# Particularily useful when you also want to include 'Metal' subspec. target 'YourProjectName' - # pod 'TensorFlowLiteSwift' - pod 'TensorFlowLiteSwift', '~> 0.0.1-nightly', :subspecs => ['CoreML'] + pod 'TensorFlowLiteSwift', '~> 2.4.0', :subspecs => ['CoreML'] ``` -Note: After updating `Podfile`, you should run `pod update` to reflect changes. -If you can't see the latest `CoreMLDelegate.swift` file, try running `pod cache -clean TensorFlowLiteSwift`. +Note: Core ML delegate can also use C API for Objective-C code. Prior to +TensorFlow Lite 2.4.0 release, this was the only option. -### Swift +
+ let coreMLDelegate = CoreMLDelegate()
+ var interpreter: Interpreter
-Initialize TensorFlow Lite interpreter with the Core ML delegate.
+ // Core ML delegate will only be created for devices with Neural Engine
+ if coreMLDelegate != nil {
+ interpreter = try Interpreter(modelPath: modelPath,
+ delegates: [coreMLDelegate!])
+ } else {
+ interpreter = try Interpreter(modelPath: modelPath)
+ }
+
+
-```swift
-let coreMLDelegate = CoreMLDelegate()
-var interpreter: Interpreter
+ // Import module when using CocoaPods with module support
+ @import TFLTensorFlowLite;
-// Core ML delegate will only be created for devices with Neural Engine
-if coreMLDelegate != nil {
- interpreter = try Interpreter(modelPath: modelPath,
- delegates: [coreMLDelegate!])
-} else {
- interpreter = try Interpreter(modelPath: modelPath)
-}
-```
+ // Or import following headers manually
+ # import "tensorflow/lite/objc/apis/TFLCoreMLDelegate.h"
+ # import "tensorflow/lite/objc/apis/TFLTensorFlowLite.h"
-### Objective-C
+ // Initialize Core ML delegate
+ TFLCoreMLDelegate* coreMLDelegate = [[TFLCoreMLDelegate alloc] init];
-The Core ML delegate uses C API for Objective-C codes.
+ // Initialize interpreter with model path and Core ML delegate
+ TFLInterpreterOptions* options = [[TFLInterpreterOptions alloc] init];
+ NSError* error = nil;
+ TFLInterpreter* interpreter = [[TFLInterpreter alloc]
+ initWithModelPath:modelPath
+ options:options
+ delegates:@[ coreMLDelegate ]
+ error:&error];
+ if (error != nil) { /* Error handling... */ }
-#### Step 1. Include `coreml_delegate.h`.
+ if (![interpreter allocateTensorsWithError:&error]) { /* Error handling... */ }
+ if (error != nil) { /* Error handling... */ }
-```c
-#include "tensorflow/lite/delegates/coreml/coreml_delegate.h"
-```
+ // Run inference ...
+
+ + #include "tensorflow/lite/delegates/coreml/coreml_delegate.h" -#### Step 2. Create a delegate and initialize a TensorFlow Lite Interpreter + // Initialize interpreter with model + TfLiteModel* model = TfLiteModelCreateFromFile(model_path); -After initializing the interpreter options, call -`TfLiteInterpreterOptionsAddDelegate` with initialized Core ML delegate to apply -the delegate. Then initialize the interpreter with the created option. + // Initialize interpreter with Core ML delegate + TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate(); + TfLiteDelegate* delegate = TfLiteCoreMlDelegateCreate(NULL); // default config + TfLiteInterpreterOptionsAddDelegate(options, delegate); + TfLiteInterpreterOptionsDelete(options); -```c -// Initialize interpreter with model -TfLiteModel* model = TfLiteModelCreateFromFile(model_path); + TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options); -// Initialize interpreter with Core ML delegate -TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate(); -TfLiteDelegate* delegate = TfLiteCoreMlDelegateCreate(NULL); // default config -TfLiteInterpreterOptionsAddDelegate(options, delegate); -TfLiteInterpreterOptionsDelete(options); + TfLiteInterpreterAllocateTensors(interpreter); -TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options); + // Run inference ... -TfLiteInterpreterAllocateTensors(interpreter); + /* ... */ -// Run inference ... -``` + // Dispose resources when it is no longer used. + // Add following code to the section where you dispose of the delegate + // (e.g. `dealloc` of class). -#### Step 3. Dispose resources when it is no longer used. - -Add this code to the section where you dispose of the delegate (e.g. `dealloc` -of class). - -```c -TfLiteInterpreterDelete(interpreter); -TfLiteCoreMlDelegateDelete(delegate); -TfLiteModelDelete(model); -``` + TfLiteInterpreterDelete(interpreter); + TfLiteCoreMlDelegateDelete(delegate); + TfLiteModelDelete(model); ++
+ var options = CoreMLDelegate.Options() + options.enabledDevices = .all + let coreMLDelegate = CoreMLDelegate(options: options)! + let interpreter = try Interpreter(modelPath: modelPath, + delegates: [coreMLDelegate]) ++
+ TFLCoreMLDelegateOptions* coreMLOptions = [[TFLCoreMLDelegateOptions alloc] init]; + coreMLOptions.enabledDevices = TFLCoreMLDelegateEnabledDevicesAll; + TFLCoreMLDelegate* coreMLDelegate = [[TFLCoreMLDelegate alloc] + initWithOptions:coreMLOptions]; -```swift -var options = CoreMLDelegate.Options() -options.enabledDevices = .all -let coreMLDelegate = CoreMLDelegate(options: options)! -let interpreter = try Interpreter(modelPath: modelPath, - delegates: [coreMLDelegate]) -``` - -#### Objective-C - -```c -TfLiteCoreMlDelegateOptions options; -options.enabled_devices = TfLiteCoreMlDelegateAllDevices; -TfLiteDelegate* delegate = TfLiteCoreMlDelegateCreate(&options); -// Initialize interpreter with delegate -``` + // Initialize interpreter with delegate ++
+ TfLiteCoreMlDelegateOptions options; + options.enabled_devices = TfLiteCoreMlDelegateAllDevices; + TfLiteDelegate* delegate = TfLiteCoreMlDelegateCreate(&options); + // Initialize interpreter with delegate ++
+ var delegate = CoreMLDelegate()
+ if delegate == nil {
+ delegate = MetalDelegate() // Add Metal delegate options if necessary.
+ }
-```swift
-var delegate = CoreMLDelegate()
-if delegate == nil {
- delegate = MetalDelegate() // Add Metal delegate options if necessary.
-}
-
-let interpreter = try Interpreter(modelPath: modelPath,
- delegates: [delegate!])
-```
-
-#### Objective-C
-
-```c
-TfLiteCoreMlDelegateOptions options = {};
-delegate = TfLiteCoreMlDelegateCreate(&options);
-if (delegate == NULL) {
- // Add Metal delegate options if necessary
- delegate = TFLGpuDelegateCreate(NULL);
-}
-// Initialize interpreter with delegate
-```
+ let interpreter = try Interpreter(modelPath: modelPath,
+ delegates: [delegate!])
+
+
+ TFLDelegate* delegate = [[TFLCoreMLDelegate alloc] init];
+ if (!delegate) {
+ // Add Metal delegate options if necessary
+ delegate = [[TFLMetalDelegate alloc] init];
+ }
+ // Initialize interpreter with delegate
+
+
+ TfLiteCoreMlDelegateOptions options = {};
+ delegate = TfLiteCoreMlDelegateCreate(&options);
+ if (delegate == NULL) {
+ // Add Metal delegate options if necessary
+ delegate = TFLGpuDelegateCreate(NULL);
+ }
+ // Initialize interpreter with delegate
+
+ + We have built a binary CocoaPod that includes the GPU delegate. To switch + the project to use it, modify the + `tensorflow/tensorflow/lite/examples/ios/camera/Podfile` file to use the + `TensorFlowLiteGpuExperimental` pod instead of `TensorFlowLite`. +
+
+ target 'YourProjectName'
+ # pod 'TensorFlowLite', '1.12.0'
+ pod 'TensorFlowLiteGpuExperimental'
+
+ + From TensorFlow Lite 2.1.0 to 2.2.0, GPU delegate is included in the + `TensorFlowLiteC` pod. You can choose between `TensorFlowLiteC` and + `TensorFlowLiteSwift` depending on the language. +
+
+ import TensorFlowLite
-```swift
-import TensorFlowLite
+ // Load model ...
-// Load model ...
+ // Initialize TensorFlow Lite interpreter with the GPU delegate.
+ let delegate = MetalDelegate()
+ if let interpreter = try Interpreter(modelPath: modelPath,
+ delegates: [delegate]) {
+ // Run inference ...
+ }
+
+
+ // Import module when using CocoaPods with module support
+ @import TFLTensorFlowLite;
-let delegate = MetalDelegate()
+ // Or import following headers manually
+ #import "tensorflow/lite/objc/apis/TFLMetalDelegate.h"
+ #import "tensorflow/lite/objc/apis/TFLTensorFlowLite.h"
-if let interpreter = try Interpreter(modelPath: modelPath,
- delegates: [delegate]) {
- // Run inference ...
-}
+ // Initialize GPU delegate
+ TFLMetalDelegate* metalDelegate = [[TFLMetalDelegate alloc] init];
-```
+ // Initialize interpreter with model path and GPU delegate
+ TFLInterpreterOptions* options = [[TFLInterpreterOptions alloc] init];
+ NSError* error = nil;
+ TFLInterpreter* interpreter = [[TFLInterpreter alloc]
+ initWithModelPath:modelPath
+ options:options
+ delegates:@[ metalDelegate ]
+ error:&error];
+ if (error != nil) { /* Error handling... */ }
-#### Objective-C
+ if (![interpreter allocateTensorsWithError:&error]) { /* Error handling... */ }
+ if (error != nil) { /* Error handling... */ }
-Note: For Objective-C, GPU delegate is provided via C API.
+ // Run inference ...
+ ```
+
+ + #include "tensorflow/lite/c/c_api.h" + #include "tensorflow/lite/delegates/gpu/metal_delegate.h" -In your application code, include the GPU delegate header and call the -`Interpreter::ModifyGraphWithDelegate` function to register the GPU delegate to -the interpreter: + // Initialize model + TfLiteModel* model = TfLiteModelCreateFromFile(model_path); -```objc -#include "tensorflow/lite/c/c_api.h" -#include "tensorflow/lite/delegates/gpu/metal_delegate.h" + // Initialize interpreter with GPU delegate + TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate(); + TfLiteDelegate* delegate = TFLGPUDelegateCreate(nil); // default config + TfLiteInterpreterOptionsAddDelegate(options, metal_delegate); + TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options); + TfLiteInterpreterOptionsDelete(options); -// Initialize model -TfLiteModel* model = TfLiteModelCreateFromFile(model_path); + TfLiteInterpreterAllocateTensors(interpreter); -// Initialize interpreter with GPU delegate -TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate(); -TfLiteDelegate* delegate = TFLGPUDelegateCreate(nil); // default config -TfLiteInterpreterOptionsAddDelegate(options, metal_delegate); -TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options); -TfLiteInterpreterOptionsDelete(options); + NSMutableData *input_data = [NSMutableData dataWithLength:input_size * sizeof(float)]; + NSMutableData *output_data = [NSMutableData dataWithLength:output_size * sizeof(float)]; + TfLiteTensor* input = TfLiteInterpreterGetInputTensor(interpreter, 0); + const TfLiteTensor* output = TfLiteInterpreterGetOutputTensor(interpreter, 0); -TfLiteInterpreterAllocateTensors(interpreter); + // Run inference + TfLiteTensorCopyFromBuffer(input, inputData.bytes, inputData.length); + TfLiteInterpreterInvoke(interpreter); + TfLiteTensorCopyToBuffer(output, outputData.mutableBytes, outputData.length); -NSMutableData *input_data = [NSMutableData dataWithLength:input_size * sizeof(float)]; -NSMutableData *output_data = [NSMutableData dataWithLength:output_size * sizeof(float)]; -TfLiteTensor* input = TfLiteInterpreterGetInputTensor(interpreter, 0); -const TfLiteTensor* output = TfLiteInterpreterGetOutputTensor(interpreter, 0); - -// Run inference -TfLiteTensorCopyFromBuffer(input, inputData.bytes, inputData.length); -TfLiteInterpreterInvoke(interpreter); -TfLiteTensorCopyToBuffer(output, outputData.mutableBytes, outputData.length); - -// Clean up -TfLiteInterpreterDelete(interpreter); -TFLGpuDelegateDelete(metal_delegate); -TfLiteModelDelete(model); -``` + // Clean up + TfLiteInterpreterDelete(interpreter); + TFLGpuDelegateDelete(metal_delegate); + TfLiteModelDelete(model); ++
+ // THIS: + var options = MetalDelegate.Options() + options.isPrecisionLossAllowed = false + options.waitType = .passive + options.isQuantizationEnabled = true + let delegate = MetalDelegate(options: options) -```swift + // IS THE SAME AS THIS: + let delegate = MetalDelegate() ++
+ // THIS: + TFLMetalDelegateOptions* options = [[TFLMetalDelegateOptions alloc] init]; + options.precisionLossAllowed = false; + options.waitType = TFLMetalDelegateThreadWaitTypePassive; + options.quantizationEnabled = true; -// THIS: -var options = MetalDelegate.Options() -options.isPrecisionLossAllowed = false -options.waitType = .passive -options.isQuantizationEnabled = false -let delegate = MetalDelegate(options: options) + TFLMetalDelegate* delegate = [[TFLMetalDelegate alloc] initWithOptions:options]; -// IS THE SAME AS THIS: -let delegate = MetalDelegate() + // IS THE SAME AS THIS: + TFLMetalDelegate* delegate = [[TFLMetalDelegate alloc] init]; ++
+ // THIS:
+ const TFLGpuDelegateOptions options = {
+ .allow_precision_loss = false,
+ .wait_type = TFLGpuDelegateWaitType::TFLGpuDelegateWaitTypePassive,
+ .enable_quantization = true,
+ };
-```
+ TfLiteDelegate* delegate = TFLGpuDelegateCreate(options);
-**C API (also used for Objective-C)**
+ // IS THE SAME AS THIS:
+ TfLiteDelegate* delegate = TFLGpuDelegateCreate(nullptr);
+
+ + var options = MetalDelegate.Options() + options.isQuantizationEnabled = false + let delegate = MetalDelegate(options: options) ++
+ TFLMetalDelegateOptions* options = [[TFLMetalDelegateOptions alloc] init]; + options.quantizationEnabled = false; ++
+ TFLGpuDelegateOptions options = TFLGpuDelegateOptionsDefault(); + options.enable_quantization = false; -```swift -var options = MetalDelegate.Options() -options.isQuantizationEnabled = false -let delegate = MetalDelegate(options: options) -``` + TfLiteDelegate* delegate = TFLGpuDelegateCreate(options); ++