innat commited on
Commit
8478e62
β€’
1 Parent(s): edb3cf2
Files changed (14) hide show
  1. README.md +4 -4
  2. app.py +59 -0
  3. examples/0.jpg +0 -0
  4. examples/1.jpg +0 -0
  5. examples/2.png +0 -0
  6. examples/3.jpg +0 -0
  7. examples/4.jpg +0 -0
  8. facedetect.py +23 -0
  9. facemesh.py +46 -0
  10. handposedetect.py +31 -0
  11. holistic.py +34 -0
  12. posestimate.py +26 -0
  13. requirements.txt +2 -0
  14. utils.py +22 -0
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Google MediaPipe
3
- emoji: 🐠
4
- colorFrom: gray
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 3.1.4
8
  app_file: app.py
 
1
  ---
2
+ title: Test Mediapipe
3
+ emoji: πŸ‘
4
+ colorFrom: yellow
5
+ colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 3.1.4
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from facemesh import mp_face_mesh_fn
3
+ from facedetect import mp_face_detect_fn
4
+ from handposedetect import mp_hand_pose_detect_fn
5
+ from posestimate import mp_pose_estimation_fn
6
+ from holistic import mp_holistic_fn
7
+
8
+
9
+ def run_mediapipe(image, soln_type):
10
+ if soln_type == 'facemesh':
11
+ annotated_image = mp_face_mesh_fn(image)
12
+ elif soln_type == 'facedetect':
13
+ annotated_image = mp_face_detect_fn(image)
14
+ elif soln_type == 'handpose':
15
+ annotated_image = mp_hand_pose_detect_fn(image)
16
+ elif soln_type == 'pose estimate':
17
+ annotated_image = mp_pose_estimation_fn(image)
18
+ elif soln_type == 'holistic':
19
+ annotated_image = mp_holistic_fn(image)
20
+ return annotated_image
21
+
22
+
23
+ def main():
24
+ solutions = [
25
+ 'facedetect',
26
+ 'facemesh',
27
+ 'handpose',
28
+ 'pose estimate',
29
+ 'holistic'
30
+ ]
31
+
32
+ sample_images = [
33
+ ["examples/0.jpg", solutions[0]],
34
+ ["examples/1.jpg", solutions[1]],
35
+ ["examples/2.png", solutions[2]],
36
+ ["examples/3.jpg", solutions[3]],
37
+ ["examples/4.jpg", solutions[4]],
38
+ ]
39
+
40
+ iface = gr.Interface(
41
+ fn=run_mediapipe,
42
+ inputs=[
43
+ gr.inputs.Image(label="Input Image"),
44
+ gr.inputs.Radio(
45
+ solutions,
46
+ type='value',
47
+ default=solutions[0],
48
+ label='Solutions'
49
+ ),
50
+ ],
51
+
52
+ outputs=gr.outputs.Image(label="MediaPipe"),
53
+ examples=sample_images
54
+ )
55
+
56
+ iface.launch()
57
+
58
+ if __name__ == '__main__':
59
+ main()
examples/0.jpg ADDED
examples/1.jpg ADDED
examples/2.png ADDED
examples/3.jpg ADDED
examples/4.jpg ADDED
facedetect.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mediapipe as mp
2
+ from utils import read_n_resize
3
+
4
+ def mp_face_detect_fn(image):
5
+ mp_face_detection = mp.solutions.face_detection
6
+ mp_drawing = mp.solutions.drawing_utils
7
+
8
+ with mp_face_detection.FaceDetection(
9
+ min_detection_confidence=0.5, model_selection=0
10
+ ) as face_detection:
11
+
12
+ resized_image_array = read_n_resize(image, read=False)
13
+
14
+ # Convert the BGR image to RGB and process it with MediaPipe Face Detection.
15
+ results = face_detection.process(resized_image_array)
16
+
17
+
18
+ annotated_image = resized_image_array.copy()
19
+ for detection in results.detections:
20
+ mp_drawing.draw_detection(annotated_image, detection)
21
+ resized_annotated_image = read_n_resize(annotated_image, read=False)
22
+ return resized_annotated_image
23
+
facemesh.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import mediapipe as mp
3
+ from utils import read_n_resize
4
+
5
+ def mp_face_mesh_fn(image):
6
+ mp_drawing = mp.solutions.drawing_utils
7
+ mp_drawing_styles = mp.solutions.drawing_styles
8
+ mp_face_mesh = mp.solutions.face_mesh
9
+
10
+ with mp_face_mesh.FaceMesh(
11
+ static_image_mode=True,
12
+ max_num_faces=1,
13
+ refine_landmarks=True,
14
+ min_detection_confidence=0.5
15
+ ) as face_mesh:
16
+ resized_image = read_n_resize(image, read=False)
17
+ results = face_mesh.process(resized_image)
18
+
19
+ annotated_image = resized_image.copy()
20
+ for face_landmarks in results.multi_face_landmarks:
21
+ mp_drawing.draw_landmarks(
22
+ image=annotated_image,
23
+ landmark_list=face_landmarks,
24
+ connections=mp_face_mesh.FACEMESH_TESSELATION,
25
+ landmark_drawing_spec=None,
26
+ connection_drawing_spec=mp_drawing_styles
27
+ .get_default_face_mesh_tesselation_style()
28
+ )
29
+ mp_drawing.draw_landmarks(
30
+ image=annotated_image,
31
+ landmark_list=face_landmarks,
32
+ connections=mp_face_mesh.FACEMESH_CONTOURS,
33
+ landmark_drawing_spec=None,
34
+ connection_drawing_spec=mp_drawing_styles
35
+ .get_default_face_mesh_contours_style()
36
+ )
37
+ mp_drawing.draw_landmarks(
38
+ image=annotated_image,
39
+ landmark_list=face_landmarks,
40
+ connections=mp_face_mesh.FACEMESH_IRISES,
41
+ landmark_drawing_spec=None,
42
+ connection_drawing_spec=mp_drawing_styles
43
+ .get_default_face_mesh_iris_connections_style()
44
+ )
45
+ return annotated_image
46
+
handposedetect.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mediapipe as mp
2
+
3
+ import cv2
4
+ from utils import read_n_resize
5
+
6
+ def mp_hand_pose_detect_fn(image):
7
+ mp_drawing = mp.solutions.drawing_utils
8
+ mp_drawing_styles = mp.solutions.drawing_styles
9
+ mp_hands = mp.solutions.hands
10
+
11
+ with mp_hands.Hands(
12
+ static_image_mode=True,
13
+ max_num_hands=5,
14
+ min_detection_confidence=0.5
15
+ ) as hands:
16
+ image = cv2.flip(read_n_resize(image, read=False), 1)
17
+ results = hands.process(image)
18
+
19
+ image_height, image_width, _ = image.shape
20
+ annotated_image = image.copy()
21
+
22
+ for hand_landmarks in results.multi_hand_landmarks:
23
+ mp_drawing.draw_landmarks(
24
+ annotated_image,
25
+ hand_landmarks,
26
+ mp_hands.HAND_CONNECTIONS,
27
+ mp_drawing_styles.get_default_hand_landmarks_style(),
28
+ mp_drawing_styles.get_default_hand_connections_style())
29
+
30
+ return cv2.flip(annotated_image, 1)
31
+
holistic.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mediapipe as mp
2
+ from utils import read_n_resize
3
+
4
+ def mp_holistic_fn(image):
5
+ mp_drawing = mp.solutions.drawing_utils
6
+ mp_drawing_styles = mp.solutions.drawing_styles
7
+ mp_holistic = mp.solutions.holistic
8
+
9
+ with mp_holistic.Holistic(
10
+ static_image_mode=True,
11
+ model_complexity=2,
12
+ enable_segmentation=True,
13
+ refine_face_landmarks=True
14
+ ) as holistic:
15
+ image = read_n_resize(image, read=False)
16
+ results = holistic.process(image)
17
+ annotated_image = image.copy()
18
+
19
+ mp_drawing.draw_landmarks(
20
+ annotated_image,
21
+ results.face_landmarks,
22
+ mp_holistic.FACEMESH_TESSELATION,
23
+ landmark_drawing_spec=None,
24
+ connection_drawing_spec=mp_drawing_styles
25
+ .get_default_face_mesh_tesselation_style())
26
+ mp_drawing.draw_landmarks(
27
+ annotated_image,
28
+ results.pose_landmarks,
29
+ mp_holistic.POSE_CONNECTIONS,
30
+ landmark_drawing_spec=mp_drawing_styles.
31
+ get_default_pose_landmarks_style())
32
+
33
+ return annotated_image
34
+
posestimate.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import mediapipe as mp
2
+ from utils import read_n_resize
3
+
4
+ def mp_pose_estimation_fn(image):
5
+ mp_drawing = mp.solutions.drawing_utils
6
+ mp_drawing_styles = mp.solutions.drawing_styles
7
+ mp_pose = mp.solutions.pose
8
+
9
+ with mp_pose.Pose(
10
+ static_image_mode=True,
11
+ model_complexity=2,
12
+ enable_segmentation=True,
13
+ min_detection_confidence=0.5
14
+ ) as pose:
15
+
16
+ image = read_n_resize(image, read=False)
17
+ results = pose.process(image)
18
+ annotated_image = image.copy()
19
+ # Draw pose landmarks on the image.
20
+ mp_drawing.draw_landmarks(
21
+ annotated_image,
22
+ results.pose_landmarks,
23
+ mp_pose.POSE_CONNECTIONS,
24
+ landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()
25
+ )
26
+ return annotated_image
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ mediapipe
2
+ opencv-python
utils.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import cv2, math
3
+
4
+ DESIRED_HEIGHT = 480
5
+ DESIRED_WIDTH = 480
6
+
7
+ def read_n_resize(image_file, read=True):
8
+ image = cv2.imread(image_file) if read else image_file
9
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if read else image
10
+
11
+ h, w = image.shape[:2]
12
+
13
+ if h < w:
14
+ img = cv2.resize(
15
+ image, (DESIRED_WIDTH, math.floor(h/(w/DESIRED_WIDTH)))
16
+ )
17
+ else:
18
+ img = cv2.resize(
19
+ image, (math.floor(w/(h/DESIRED_HEIGHT)), DESIRED_HEIGHT)
20
+ )
21
+
22
+ return img