Spaces:
Running
Running
Commit
·
4c6423c
1
Parent(s):
854db63
Create helpers.py
Browse files- helpers.py +95 -0
helpers.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import the necessary packages
|
2 |
+
from collections import OrderedDict
|
3 |
+
import numpy as np
|
4 |
+
import cv2
|
5 |
+
|
6 |
+
# define a dictionary that maps the indexes of the facial
|
7 |
+
# landmarks to specific face regions
|
8 |
+
|
9 |
+
#For dlib’s 68-point facial landmark detector:
|
10 |
+
FACIAL_LANDMARKS_68_IDXS = OrderedDict([
|
11 |
+
("mouth", (48, 68)),
|
12 |
+
("inner_mouth", (60, 68)),
|
13 |
+
("right_eyebrow", (17, 22)),
|
14 |
+
("left_eyebrow", (22, 27)),
|
15 |
+
("right_eye", (36, 42)),
|
16 |
+
("left_eye", (42, 48)),
|
17 |
+
("nose", (27, 36)),
|
18 |
+
("jaw", (0, 17))
|
19 |
+
])
|
20 |
+
|
21 |
+
#For dlib’s 5-point facial landmark detector:
|
22 |
+
FACIAL_LANDMARKS_5_IDXS = OrderedDict([
|
23 |
+
("right_eye", (2, 3)),
|
24 |
+
("left_eye", (0, 1)),
|
25 |
+
("nose", (4))
|
26 |
+
])
|
27 |
+
|
28 |
+
# in order to support legacy code, we'll default the indexes to the
|
29 |
+
# 68-point model
|
30 |
+
FACIAL_LANDMARKS_IDXS = FACIAL_LANDMARKS_68_IDXS
|
31 |
+
|
32 |
+
def rect_to_bb(rect):
|
33 |
+
# take a bounding predicted by dlib and convert it
|
34 |
+
# to the format (x, y, w, h) as we would normally do
|
35 |
+
# with OpenCV
|
36 |
+
x = rect.left()
|
37 |
+
y = rect.top()
|
38 |
+
w = rect.right() - x
|
39 |
+
h = rect.bottom() - y
|
40 |
+
|
41 |
+
# return a tuple of (x, y, w, h)
|
42 |
+
return (x, y, w, h)
|
43 |
+
|
44 |
+
def shape_to_np(shape, dtype="int"):
|
45 |
+
# initialize the list of (x, y)-coordinates
|
46 |
+
coords = np.zeros((shape.num_parts, 2), dtype=dtype)
|
47 |
+
|
48 |
+
# loop over all facial landmarks and convert them
|
49 |
+
# to a 2-tuple of (x, y)-coordinates
|
50 |
+
for i in range(0, shape.num_parts):
|
51 |
+
coords[i] = (shape.part(i).x, shape.part(i).y)
|
52 |
+
|
53 |
+
# return the list of (x, y)-coordinates
|
54 |
+
return coords
|
55 |
+
|
56 |
+
def visualize_facial_landmarks(image, shape, colors=None, alpha=0.75):
|
57 |
+
# create two copies of the input image -- one for the
|
58 |
+
# overlay and one for the final output image
|
59 |
+
overlay = image.copy()
|
60 |
+
output = image.copy()
|
61 |
+
|
62 |
+
# if the colors list is None, initialize it with a unique
|
63 |
+
# color for each facial landmark region
|
64 |
+
if colors is None:
|
65 |
+
colors = [(19, 199, 109), (79, 76, 240), (230, 159, 23),
|
66 |
+
(168, 100, 168), (158, 163, 32),
|
67 |
+
(163, 38, 32), (180, 42, 220), (0, 0, 255)]
|
68 |
+
|
69 |
+
# loop over the facial landmark regions individually
|
70 |
+
for (i, name) in enumerate(FACIAL_LANDMARKS_IDXS.keys()):
|
71 |
+
# grab the (x, y)-coordinates associated with the
|
72 |
+
# face landmark
|
73 |
+
(j, k) = FACIAL_LANDMARKS_IDXS[name]
|
74 |
+
pts = shape[j:k]
|
75 |
+
|
76 |
+
# check if are supposed to draw the jawline
|
77 |
+
if name == "jaw":
|
78 |
+
# since the jawline is a non-enclosed facial region,
|
79 |
+
# just draw lines between the (x, y)-coordinates
|
80 |
+
for l in range(1, len(pts)):
|
81 |
+
ptA = tuple(pts[l - 1])
|
82 |
+
ptB = tuple(pts[l])
|
83 |
+
cv2.line(overlay, ptA, ptB, colors[i], 2)
|
84 |
+
|
85 |
+
# otherwise, compute the convex hull of the facial
|
86 |
+
# landmark coordinates points and display it
|
87 |
+
else:
|
88 |
+
hull = cv2.convexHull(pts)
|
89 |
+
cv2.drawContours(overlay, [hull], -1, colors[i], -1)
|
90 |
+
|
91 |
+
# apply the transparent overlay
|
92 |
+
cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
|
93 |
+
|
94 |
+
# return the output image
|
95 |
+
return output
|