Skip to content

Commit 01a16d0

Browse files
committed
Add some more MLKit snippets
Change-Id: I6f4292995d103572bef3d972cbc62b26f371278f
1 parent 489cd1f commit 01a16d0

File tree

6 files changed

+323
-5
lines changed

6 files changed

+323
-5
lines changed

mlkit/app/src/main/java/com/google/firebase/example/mlkit/FaceDetectionActivity.java

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
import com.google.firebase.ml.vision.common.FirebaseVisionImage;
1313
import com.google.firebase.ml.vision.common.FirebaseVisionPoint;
1414
import com.google.firebase.ml.vision.face.FirebaseVisionFace;
15+
import com.google.firebase.ml.vision.face.FirebaseVisionFaceContour;
1516
import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetector;
1617
import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions;
1718
import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark;
@@ -91,4 +92,58 @@ public void onFailure(@NonNull Exception e) {
9192
});
9293
// [END run_detector]
9394
}
95+
96+
private void faceOptionsExamples() {
97+
// [START mlkit_face_options_examples]
98+
// High-accuracy landmark detection and face classification
99+
FirebaseVisionFaceDetectorOptions highAccuracyOpts =
100+
new FirebaseVisionFaceDetectorOptions.Builder()
101+
.setPerformanceMode(FirebaseVisionFaceDetectorOptions.ACCURATE)
102+
.setLandmarkMode(FirebaseVisionFaceDetectorOptions.ALL_LANDMARKS)
103+
.setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS)
104+
.build();
105+
106+
// Real-time contour detection of multiple faces
107+
FirebaseVisionFaceDetectorOptions realTimeOpts =
108+
new FirebaseVisionFaceDetectorOptions.Builder()
109+
.setContourMode(FirebaseVisionFaceDetectorOptions.ALL_CONTOURS)
110+
.build();
111+
// [END mlkit_face_options_examples]
112+
}
113+
114+
private void processFaceList(List<FirebaseVisionFace> faces) {
115+
// [START mlkit_face_list]
116+
for (FirebaseVisionFace face : faces) {
117+
Rect bounds = face.getBoundingBox();
118+
float rotY = face.getHeadEulerAngleY(); // Head is rotated to the right rotY degrees
119+
float rotZ = face.getHeadEulerAngleZ(); // Head is tilted sideways rotZ degrees
120+
121+
// If landmark detection was enabled (mouth, ears, eyes, cheeks, and
122+
// nose available):
123+
FirebaseVisionFaceLandmark leftEar = face.getLandmark(FirebaseVisionFaceLandmark.LEFT_EAR);
124+
if (leftEar != null) {
125+
FirebaseVisionPoint leftEarPos = leftEar.getPosition();
126+
}
127+
128+
// If contour detection was enabled:
129+
List<FirebaseVisionPoint> leftEyeContour =
130+
face.getContour(FirebaseVisionFaceContour.LEFT_EYE).getPoints();
131+
List<FirebaseVisionPoint> upperLipBottomContour =
132+
face.getContour(FirebaseVisionFaceContour.UPPER_LIP_BOTTOM).getPoints();
133+
134+
// If classification was enabled:
135+
if (face.getSmilingProbability() != FirebaseVisionFace.UNCOMPUTED_PROBABILITY) {
136+
float smileProb = face.getSmilingProbability();
137+
}
138+
if (face.getRightEyeOpenProbability() != FirebaseVisionFace.UNCOMPUTED_PROBABILITY) {
139+
float rightEyeOpenProb = face.getRightEyeOpenProbability();
140+
}
141+
142+
// If face tracking was enabled:
143+
if (face.getTrackingId() != FirebaseVisionFace.INVALID_ID) {
144+
int id = face.getTrackingId();
145+
}
146+
}
147+
// [END mlkit_face_list]
148+
}
94149
}

mlkit/app/src/main/java/com/google/firebase/example/mlkit/MainActivity.java

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,12 @@
22

33
import android.support.v7.app.AppCompatActivity;
44

5+
import com.google.firebase.ml.vision.FirebaseVision;
56
import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions;
7+
import com.google.firebase.ml.vision.common.FirebaseVisionImage;
8+
import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata;
9+
10+
import devrel.firebase.google.com.firebaseoptions.BuildConfig;
611

712
public class MainActivity extends AppCompatActivity {
813

@@ -16,4 +21,27 @@ public void buildCloudVisionOptions() {
1621
// [END ml_build_cloud_vision_options]
1722
}
1823

24+
public void enforceCertificateMatching() {
25+
// Dummy variable
26+
FirebaseVisionImage myImage = FirebaseVisionImage.fromByteArray(new byte[]{},
27+
new FirebaseVisionImageMetadata.Builder().build());
28+
29+
// [START mlkit_certificate_matching]
30+
FirebaseVisionCloudDetectorOptions.Builder optionsBuilder =
31+
new FirebaseVisionCloudDetectorOptions.Builder();
32+
if (!BuildConfig.DEBUG) {
33+
// Requires physical, non-rooted device:
34+
optionsBuilder.enforceCertFingerprintMatch();
35+
}
36+
37+
// Set other options. For example:
38+
optionsBuilder.setModelType(FirebaseVisionCloudDetectorOptions.STABLE_MODEL);
39+
// ...
40+
41+
// And lastly:
42+
FirebaseVisionCloudDetectorOptions options = optionsBuilder.build();
43+
FirebaseVision.getInstance().getVisionCloudLabelDetector(options).detectInImage(myImage);
44+
// [END mlkit_certificate_matching]
45+
}
46+
1947
}

mlkit/app/src/main/java/com/google/firebase/example/mlkit/TextRecognitionActivity.java

Lines changed: 86 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,16 @@
1212
import com.google.firebase.ml.vision.FirebaseVision;
1313
import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions;
1414
import com.google.firebase.ml.vision.common.FirebaseVisionImage;
15+
import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata;
16+
import com.google.firebase.ml.vision.document.FirebaseVisionCloudDocumentRecognizerOptions;
17+
import com.google.firebase.ml.vision.document.FirebaseVisionDocumentText;
18+
import com.google.firebase.ml.vision.document.FirebaseVisionDocumentTextRecognizer;
1519
import com.google.firebase.ml.vision.text.FirebaseVisionText;
1620
import com.google.firebase.ml.vision.text.FirebaseVisionTextRecognizer;
21+
import com.google.firebase.ml.vision.text.RecognizedLanguage;
22+
23+
import java.util.Arrays;
24+
import java.util.List;
1725

1826
public class TextRecognitionActivity extends AppCompatActivity {
1927

@@ -85,11 +93,11 @@ private void recognizeTextCloud(FirebaseVisionImage image) {
8593
Task<FirebaseVisionText> result = detector.processImage(image)
8694
.addOnSuccessListener(new OnSuccessListener<FirebaseVisionText>() {
8795
@Override
88-
public void onSuccess(FirebaseVisionText firebaseVisionText) {
96+
public void onSuccess(FirebaseVisionText result) {
8997
// Task completed successfully
9098
// [START_EXCLUDE]
9199
// [START get_text_cloud]
92-
for (FirebaseVisionText.TextBlock block : firebaseVisionText.getTextBlocks()) {
100+
for (FirebaseVisionText.TextBlock block : result.getTextBlocks()) {
93101
Rect boundingBox = block.getBoundingBox();
94102
Point[] cornerPoints = block.getCornerPoints();
95103
String text = block.getText();
@@ -114,4 +122,80 @@ public void onFailure(@NonNull Exception e) {
114122
});
115123
// [END run_detector_cloud]
116124
}
125+
126+
private void processTextBlock(FirebaseVisionText result) {
127+
// [START mlkit_process_text_block]
128+
String resultText = result.getText();
129+
for (FirebaseVisionText.TextBlock block: result.getTextBlocks()) {
130+
String blockText = block.getText();
131+
Float blockConfidence = block.getConfidence();
132+
List<RecognizedLanguage> blockLanguages = block.getRecognizedLanguages();
133+
Point[] blockCornerPoints = block.getCornerPoints();
134+
Rect blockFrame = block.getBoundingBox();
135+
for (FirebaseVisionText.Line line: block.getLines()) {
136+
String lineText = line.getText();
137+
Float lineConfidence = line.getConfidence();
138+
List<RecognizedLanguage> lineLanguages = line.getRecognizedLanguages();
139+
Point[] lineCornerPoints = line.getCornerPoints();
140+
Rect lineFrame = line.getBoundingBox();
141+
for (FirebaseVisionText.Element element: line.getElements()) {
142+
String elementText = element.getText();
143+
Float elementConfidence = element.getConfidence();
144+
List<RecognizedLanguage> elementLanguages = element.getRecognizedLanguages();
145+
Point[] elementCornerPoints = element.getCornerPoints();
146+
Rect elementFrame = element.getBoundingBox();
147+
}
148+
}
149+
}
150+
// [END mlkit_process_text_block]
151+
}
152+
153+
private FirebaseVisionDocumentTextRecognizer getLocalDocumentRecognizer() {
154+
// [START mlkit_local_doc_recognizer]
155+
FirebaseVisionDocumentTextRecognizer textRecognizer = FirebaseVision.getInstance()
156+
.getCloudDocumentTextRecognizer();
157+
// [END mlkit_local_doc_recognizer]
158+
159+
return textRecognizer;
160+
}
161+
162+
private FirebaseVisionDocumentTextRecognizer getCloudDocumentRecognizer() {
163+
// [START mlkit_cloud_doc_recognizer]
164+
// Or, to provide language hints to assist with language detection:
165+
// See https://cloud.google.com/vision/docs/languages for supported languages
166+
FirebaseVisionCloudDocumentRecognizerOptions options =
167+
new FirebaseVisionCloudDocumentRecognizerOptions.Builder()
168+
.setLanguageHints(Arrays.asList("en", "hi"))
169+
.build();
170+
FirebaseVisionDocumentTextRecognizer textRecognizer = FirebaseVision.getInstance()
171+
.getCloudDocumentTextRecognizer(options);
172+
// [END mlkit_cloud_doc_recognizer]
173+
174+
return textRecognizer;
175+
}
176+
177+
private void processDocumentImage() {
178+
// Dummy variables
179+
FirebaseVisionDocumentTextRecognizer textRecognizer = getLocalDocumentRecognizer();
180+
FirebaseVisionImage myImage = FirebaseVisionImage.fromByteArray(new byte[]{},
181+
new FirebaseVisionImageMetadata.Builder().build());
182+
183+
// [START mlkit_process_doc_image]
184+
textRecognizer.processImage(myImage)
185+
.addOnSuccessListener(new OnSuccessListener<FirebaseVisionDocumentText>() {
186+
@Override
187+
public void onSuccess(FirebaseVisionDocumentText result) {
188+
// Task completed successfully
189+
// ...
190+
}
191+
})
192+
.addOnFailureListener(new OnFailureListener() {
193+
@Override
194+
public void onFailure(@NonNull Exception e) {
195+
// Task failed with an exception
196+
// ...
197+
}
198+
});
199+
// [END mlkit_process_doc_image]
200+
}
117201
}

mlkit/app/src/main/java/com/google/firebase/example/mlkit/kotlin/FaceDetectionActivity.kt

Lines changed: 53 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import com.google.android.gms.tasks.OnFailureListener
55
import com.google.firebase.ml.vision.FirebaseVision
66
import com.google.firebase.ml.vision.common.FirebaseVisionImage
77
import com.google.firebase.ml.vision.face.FirebaseVisionFace
8+
import com.google.firebase.ml.vision.face.FirebaseVisionFaceContour
89
import com.google.firebase.ml.vision.face.FirebaseVisionFaceDetectorOptions
910
import com.google.firebase.ml.vision.face.FirebaseVisionFaceLandmark
1011

@@ -40,8 +41,8 @@ class FaceDetectionActivity : AppCompatActivity() {
4041
// If landmark detection was enabled (mouth, ears, eyes, cheeks, and
4142
// nose available):
4243
val leftEar = face.getLandmark(FirebaseVisionFaceLandmark.LEFT_EAR)
43-
if (leftEar != null) {
44-
val leftEarPos = leftEar!!.position
44+
leftEar?.let {
45+
val leftEarPos = leftEar.position
4546
}
4647

4748
// If classification was enabled:
@@ -69,4 +70,54 @@ class FaceDetectionActivity : AppCompatActivity() {
6970
})
7071
// [END run_detector]
7172
}
73+
74+
private fun faceOptionsExamples() {
75+
// [START mlkit_face_options_examples]
76+
// High-accuracy landmark detection and face classification
77+
val highAccuracyOpts = FirebaseVisionFaceDetectorOptions.Builder()
78+
.setPerformanceMode(FirebaseVisionFaceDetectorOptions.ACCURATE)
79+
.setLandmarkMode(FirebaseVisionFaceDetectorOptions.ALL_LANDMARKS)
80+
.setClassificationMode(FirebaseVisionFaceDetectorOptions.ALL_CLASSIFICATIONS)
81+
.build()
82+
83+
// Real-time contour detection of multiple faces
84+
val realTimeOpts = FirebaseVisionFaceDetectorOptions.Builder()
85+
.setContourMode(FirebaseVisionFaceDetectorOptions.ALL_CONTOURS)
86+
.build()
87+
// [END mlkit_face_options_examples]
88+
}
89+
90+
private fun processFaceList(faces: List<FirebaseVisionFace>) {
91+
// [START mlkit_face_list]
92+
for (face in faces) {
93+
val bounds = face.boundingBox
94+
val rotY = face.headEulerAngleY // Head is rotated to the right rotY degrees
95+
val rotZ = face.headEulerAngleZ // Head is tilted sideways rotZ degrees
96+
97+
// If landmark detection was enabled (mouth, ears, eyes, cheeks, and
98+
// nose available):
99+
val leftEar = face.getLandmark(FirebaseVisionFaceLandmark.LEFT_EAR)
100+
leftEar?.let {
101+
val leftEarPos = leftEar.position
102+
}
103+
104+
// If contour detection was enabled:
105+
val leftEyeContour = face.getContour(FirebaseVisionFaceContour.LEFT_EYE).points
106+
val upperLipBottomContour = face.getContour(FirebaseVisionFaceContour.UPPER_LIP_BOTTOM).points
107+
108+
// If classification was enabled:
109+
if (face.smilingProbability != FirebaseVisionFace.UNCOMPUTED_PROBABILITY) {
110+
val smileProb = face.smilingProbability
111+
}
112+
if (face.rightEyeOpenProbability != FirebaseVisionFace.UNCOMPUTED_PROBABILITY) {
113+
val rightEyeOpenProb = face.rightEyeOpenProbability
114+
}
115+
116+
// If face tracking was enabled:
117+
if (face.trackingId != FirebaseVisionFace.INVALID_ID) {
118+
val id = face.trackingId
119+
}
120+
}
121+
// [END mlkit_face_list]
122+
}
72123
}

mlkit/app/src/main/java/com/google/firebase/example/mlkit/kotlin/MainActivity.kt

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,11 @@
11
package com.google.firebase.example.mlkit.kotlin
22

33
import android.support.v7.app.AppCompatActivity
4+
import com.google.firebase.ml.vision.FirebaseVision
45
import com.google.firebase.ml.vision.cloud.FirebaseVisionCloudDetectorOptions
6+
import com.google.firebase.ml.vision.common.FirebaseVisionImage
7+
import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata
8+
import devrel.firebase.google.com.firebaseoptions.BuildConfig
59

610
class MainActivity : AppCompatActivity() {
711

@@ -13,4 +17,26 @@ class MainActivity : AppCompatActivity() {
1317
.build()
1418
// [END ml_build_cloud_vision_options]
1519
}
20+
21+
fun enforceCertificateMatching() {
22+
// Dummy variable
23+
val myImage = FirebaseVisionImage.fromByteArray(byteArrayOf(),
24+
FirebaseVisionImageMetadata.Builder().build())
25+
26+
// [START mlkit_certificate_matching]
27+
val optionsBuilder = FirebaseVisionCloudDetectorOptions.Builder()
28+
if (!BuildConfig.DEBUG) {
29+
// Requires physical, non-rooted device:
30+
optionsBuilder.enforceCertFingerprintMatch()
31+
}
32+
33+
// Set other options. For example:
34+
optionsBuilder.setModelType(FirebaseVisionCloudDetectorOptions.STABLE_MODEL)
35+
// ...
36+
37+
// And lastly:
38+
val options = optionsBuilder.build()
39+
FirebaseVision.getInstance().getVisionCloudLabelDetector(options).detectInImage(myImage)
40+
// [END mlkit_certificate_matching]
41+
}
1642
}

0 commit comments

Comments
 (0)