kristyc commited on
Commit
c1d8844
1 Parent(s): dd45905

Add a demo with images

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +80 -1
  3. examples/01.jpg +3 -0
  4. examples/02.jpg +3 -0
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.jpg filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -6,11 +6,90 @@ from functools import lru_cache
6
  import cv2
7
  from google.protobuf.json_format import MessageToDict
8
 
 
 
 
 
9
  logger = log_utils.get_logger()
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  demo = gr.Blocks()
12
 
13
  with demo:
14
- gr.Markdown("Hello, world!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  demo.launch()
 
6
  import cv2
7
  from google.protobuf.json_format import MessageToDict
8
 
9
+ mp_drawing = mp.solutions.drawing_utils
10
+ mp_drawing_styles = mp.solutions.drawing_styles
11
+ mp_face_mesh = mp.solutions.face_mesh
12
+
13
  logger = log_utils.get_logger()
14
 
15
+ def process(image, static_image_mode, max_num_faces, refine_landmarks, min_detection_confidence, min_tracking_confidence):
16
+ with mp_face_mesh.FaceMesh(
17
+ static_image_mode=static_image_mode,
18
+ max_num_faces=max_num_faces,
19
+ refine_landmarks=refine_landmarks,
20
+ min_detection_confidence=min_detection_confidence,
21
+ min_tracking_confidence=min_tracking_confidence) as face_mesh:
22
+ results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
23
+ if not results.multi_face_landmarks:
24
+ logger.info("No face landmarks detected")
25
+ return image
26
+ annotated_image = image.copy()
27
+ for face_landmarks in results.multi_face_landmarks:
28
+ print('face_landmarks:', face_landmarks)
29
+ mp_drawing.draw_landmarks(
30
+ image=annotated_image,
31
+ landmark_list=face_landmarks,
32
+ connections=mp_face_mesh.FACEMESH_TESSELATION,
33
+ landmark_drawing_spec=None,
34
+ connection_drawing_spec=mp_drawing_styles
35
+ .get_default_face_mesh_tesselation_style())
36
+ mp_drawing.draw_landmarks(
37
+ image=annotated_image,
38
+ landmark_list=face_landmarks,
39
+ connections=mp_face_mesh.FACEMESH_CONTOURS,
40
+ landmark_drawing_spec=None,
41
+ connection_drawing_spec=mp_drawing_styles
42
+ .get_default_face_mesh_contours_style())
43
+ mp_drawing.draw_landmarks(
44
+ image=annotated_image,
45
+ landmark_list=face_landmarks,
46
+ connections=mp_face_mesh.FACEMESH_IRISES,
47
+ landmark_drawing_spec=None,
48
+ connection_drawing_spec=mp_drawing_styles
49
+ .get_default_face_mesh_iris_connections_style())
50
+ return annotated_image
51
+
52
  demo = gr.Blocks()
53
 
54
  with demo:
55
+ gr.Markdown(
56
+ """
57
+ # MediaPipe Face Mesh
58
+ A demo of Face Mesh using [Google's MediaPipe](https://google.github.io/mediapipe/solutions/face_mesh.html)
59
+ """)
60
+
61
+ with gr.Row():
62
+ with gr.Column():
63
+ gr.Markdown("## Step 1: Configure the model")
64
+ with gr.Group():
65
+ static_image_mode = gr.Checkbox(label="Static image mode", value=False)
66
+ gr.Textbox(show_label=False, value="If unchecked, the solution treats the input images as a video stream. It will try to detect faces in the first input images, and upon a successful detection further localizes the face landmarks. In subsequent images, once all max_num_faces faces are detected and the corresponding face landmarks are localized, it simply tracks those landmarks without invoking another detection until it loses track of any of the faces. This reduces latency and is ideal for processing video frames. If checked, face detection runs on every input image, ideal for processing a batch of static, possibly unrelated, images.")
67
+ max_num_faces = gr.Slider(label="Max number of faces to detect", value=1, minimum=1, maximum=10, step=1)
68
+ with gr.Group():
69
+ refine_landmarks = gr.Checkbox(label="Refine landmarks", value=True)
70
+ gr.Textbox(show_label=False, value="Whether to further refine the landmark coordinates around the eyes and lips, and output additional landmarks around the irises by applying the Attention Mesh Model.")
71
+ with gr.Group():
72
+ min_detection_confidence = gr.Slider(label="Min detection confidence", value=0.5, minimum=0.0, maximum=1.0, step=0.01)
73
+ gr.Textbox(show_label=False, value="Minimum confidence value ([0.0, 1.0]) from the face detection model for the detection to be considered successful.")
74
+ with gr.Group():
75
+ min_tracking_confidence = gr.Slider(label="Min tracking confidence", value=0.5, minimum=0.0, maximum=1.0, step=0.01)
76
+ gr.Textbox(show_label=False, value="Minimum confidence value ([0.0, 1.0]) from the landmark-tracking model for the face landmarks to be considered tracked successfully, or otherwise face detection will be invoked automatically on the next input image. Setting it to a higher value can increase robustness of the solution, at the expense of a higher latency. Ignored if static_image_mode is true, where face detection simply runs on every image.")
77
+
78
+ gr.Markdown("## Step 2: Select an image")
79
+ with gr.Tabs():
80
+ with gr.TabItem("Take a picture"):
81
+ webcam_image = gr.Image(label="Input image", source="webcam")
82
+ gr.Examples(examples=[["examples/01.jpg"], ["examples/02.jpg"]], inputs=[webcam_image])
83
+ webcam_image_btn = gr.Button(value="Process")
84
+ with gr.TabItem("Upload an image"):
85
+ upload_image = gr.Image(label="Input image")
86
+ upload_image_btn = gr.Button(value="Process")
87
+ with gr.Column():
88
+ gr.Markdown("## Step 3: View the results")
89
+ output_image = gr.Image(label="Output image")
90
+ config_inputs = [static_image_mode, max_num_faces, refine_landmarks, min_detection_confidence, min_tracking_confidence]
91
+ outputs = [output_image]
92
+ upload_image_btn.click(fn=process, inputs=[upload_image, *config_inputs], outputs=outputs)
93
+ webcam_image_btn.click(fn=process, inputs=[webcam_image, *config_inputs], outputs=outputs)
94
 
95
  demo.launch()
examples/01.jpg ADDED

Git LFS Details

  • SHA256: 5508e958d886613c626f069ca99646d9d1179180a31b03942d230e6114e2dadd
  • Pointer size: 132 Bytes
  • Size of remote file: 2.65 MB
examples/02.jpg ADDED

Git LFS Details

  • SHA256: 765694280a3199a45443bd52a32b15631fd5d90c02849381748fc4ee15712f36
  • Pointer size: 132 Bytes
  • Size of remote file: 2.75 MB