From 1b8f5bc8011a1e85d7a110125c852a4f431d0f59 Mon Sep 17 00:00:00 2001 From: Khanh LeViet Date: Wed, 25 Sep 2019 18:02:44 -0700 Subject: [PATCH] Add new TF Lite models to doc PiperOrigin-RevId: 271248192 --- tensorflow/lite/g3doc/_book.yaml | 19 +++--- .../g3doc/models/face_detection/overview.md | 66 +++++++++++++++++++ .../models/hair_segmentation/overview.md | 42 ++++++++++++ 3 files changed, 117 insertions(+), 10 deletions(-) create mode 100644 tensorflow/lite/g3doc/models/face_detection/overview.md create mode 100644 tensorflow/lite/g3doc/models/hair_segmentation/overview.md diff --git a/tensorflow/lite/g3doc/_book.yaml b/tensorflow/lite/g3doc/_book.yaml index 5e0eeb6bd6d..f81527e82d8 100644 --- a/tensorflow/lite/g3doc/_book.yaml +++ b/tensorflow/lite/g3doc/_book.yaml @@ -110,25 +110,24 @@ upper_tabs: contents: - title: "Overview" path: /lite/models/ - - heading: "Image classification" - - title: "Overview" + - heading: "Vision" + - title: "Image classification" path: /lite/models/image_classification/overview - - title: "Android" - path: /lite/models/image_classification/android - - title: "iOS" - path: /lite/models/image_classification/ios - - heading: "Other techniques" + - title: "Image segmentation" + path: /lite/models/segmentation/overview - title: "Object detection" path: /lite/models/object_detection/overview - title: "Pose estimation" path: /lite/models/pose_estimation/overview - - title: "Segmentation" - path: /lite/models/segmentation/overview - title: "Style transfer" path: /lite/models/style_transfer/overview + - title: "Face detection" + path: /lite/models/face_detection/overview + - title: "Hair segmentation" + path: /lite/models/hair_segmentation/overview + - heading: "Language" - title: "Smart reply" path: /lite/models/smart_reply/overview - - name: "Python API" skip_translation: true contents: diff --git a/tensorflow/lite/g3doc/models/face_detection/overview.md b/tensorflow/lite/g3doc/models/face_detection/overview.md new file mode 100644 index 00000000000..e7ddcfc8c70 --- /dev/null +++ b/tensorflow/lite/g3doc/models/face_detection/overview.md @@ -0,0 +1,66 @@ +# Face detection + +## Get started + +BlazeFace is a lightweight face detection model, designed specifically for +selfie use-case for mobile devices. It works for faces up to 2 meters from +camera and provides 6 additional facial keypoints, which allows to estimate face +rotation angles and do basic AR-effects on top of it. + +For a working demo of an ultrafast realtime face detection Android app using the +model, check out this example by +[MediaPipe](https://mediapipe.readthedocs.io/en/latest/): + +Android +example +Download +starter model + +### How it works + +BlazeFace is a lightweight and well-performing face detector tailored for mobile +GPU inference. It runs at a speed of 200–1000+ FPS on flagship devices. This +super-realtime performance enables it to be applied to any augmented reality +pipeline that requires an accurate facial region of interest as an input for +task-specific models, such as 2D/3D facial keypoint or geometry estimation, +facial features or expression classification, and face region segmentation. + +The new techniques implemented in the model are: + +* lightweight feature extraction network +* GPU-friendly anchor scheme +* improved tie resolution strategy alternative to non-maximum suppression + +Per each prediction, BlazeFace predicts face bounding box and 2D coordinates for +6 facial keypoints: + +Id | Part +--- | ----------------- +0 | left_eye +1 | right_eye +2 | nose_tip +3 | mouth_center +4 | left_ear_tragion +5 | right_ear_tragion + +### Examples of face detection + +![Demo](https://storage.googleapis.com/download.tensorflow.org/models/tflite/face_detection/demo.gif) + +### How it performs + +Model works in several predefined recommended resolutions, depending on input +screen aspect ratio: **128x96**, **128x128**, **96x128**. For resolution +**128x96** inference times shown below: + +Device | Inference time (ms) +------------------------------ | ------------------- +Apple iPhone 7 | 1.8 +Apple iPhone XS | 0.6 +Google Pixel 3 | 3.4 +Huawei P20 | 5.8 +Samsung Galaxy S9+ (SM-G965U1) | 3.7 + +### Read more about BlazeFace + +* [Paper: BlazeFace: Sub-millisecond Neural Face Detection on Mobile GPUs](https://sites.google.com/corp/view/perception-cv4arvr/blazeface) diff --git a/tensorflow/lite/g3doc/models/hair_segmentation/overview.md b/tensorflow/lite/g3doc/models/hair_segmentation/overview.md new file mode 100644 index 00000000000..82d2f15d3ee --- /dev/null +++ b/tensorflow/lite/g3doc/models/hair_segmentation/overview.md @@ -0,0 +1,42 @@ +# Hair segmentation + +## Get started + +Hair segmentation model produces a high-quality hair segmentation mask that is +well suited for AR effects, e.g. virtual hair recoloring. + +For a working demo of a live hair recoloring Android app using the model, check +out this example by [MediaPipe](https://mediapipe.readthedocs.io/en/latest/): + +Android +example +Download +starter model + +### How it works + +Hair segmentation refers to computer vision techniques that detect human hair in +images and videos. To be clear, this technology is not recognizing who is in an +image. The algorithm only estimates where is hair on an image and where is +everything else. + +The model takes a video frame as input and returns a mask that tests if a pixel +is a hair. For better results, this resulting mask is used as an additional +input to the next frame. + +### Model architecture + +Standard hourglass segmentation network architecture with skip connections used +for this model. The input is a 512x512x4 matrix. Channels are red, green, blue, +previous mask or zeros for the first frame. The output is a 512x512x2 matrix +with a background in the first channel and a hair mask in the second. + +![Model architecture](https://storage.googleapis.com/download.tensorflow.org/models/tflite/hair_segmentation/model_architecture.png) + +### Examples of hair recoloring + +![Sample](https://storage.googleapis.com/download.tensorflow.org/models/tflite/hair_segmentation/demo.gif) + +### Read more about hair segmentation + +* [Paper: Real-time Hair segmentation and recoloring on Mobile GPUs](https://sites.google.com/corp/view/perception-cv4arvr/hair-segmentation)