add files
Browse files- .gitattributes +1 -0
- .gitignore +2 -0
- app.py +439 -0
- demo_footer.html +3 -0
- demo_header.html +18 -0
- demo_tools.html +11 -0
- examples/00002062.jpg +0 -0
- examples/00002200.jpg +0 -0
- examples/00003245_00.jpg +0 -0
- examples/00005259.jpg +0 -0
- examples/00018022.jpg +0 -0
- examples/00100265.jpg +0 -0
- examples/00824006.jpg +0 -0
- examples/00824008.jpg +0 -0
- examples/00825000.jpg +0 -0
- examples/00826007.jpg +0 -0
- examples/00827009.jpg +0 -0
- examples/00828003.jpg +0 -0
- examples/02316230.jpg +0 -0
- examples/img-above.jpg +0 -0
- face_landmarker.task +3 -0
- face_landmarker.task.txt +8 -0
- glibvision/common_utils.py +112 -0
- glibvision/cv2_utils.py +138 -0
- glibvision/draw_utils.py +39 -0
- glibvision/glandmark_utils.py +48 -0
- glibvision/numpy_utils.py +110 -0
- glibvision/pil_utils.py +30 -0
- gradio_utils.py +60 -0
- mp_box.py +133 -0
- mp_constants.py +331 -0
- mp_utils.py +125 -0
- opencvinpaint.py +106 -0
- requirements.txt +5 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.task filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
files
|
app.py
ADDED
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import spaces
|
2 |
+
import gradio as gr
|
3 |
+
import subprocess
|
4 |
+
from PIL import Image,ImageOps,ImageDraw,ImageFilter
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
import time
|
8 |
+
|
9 |
+
from mp_utils import get_pixel_cordinate_list,extract_landmark,get_pixel_cordinate
|
10 |
+
from glibvision.draw_utils import points_to_box,box_to_xy,plus_point
|
11 |
+
import mp_constants
|
12 |
+
import mp_box
|
13 |
+
import io
|
14 |
+
import numpy as np
|
15 |
+
from glibvision.pil_utils import fill_points,create_color_image,draw_points,draw_box
|
16 |
+
|
17 |
+
from gradio_utils import save_image,save_buffer,clear_old_files ,read_file
|
18 |
+
import opencvinpaint
|
19 |
+
|
20 |
+
'''
|
21 |
+
Face landmark detection based Face Detection.
|
22 |
+
https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
|
23 |
+
from model card
|
24 |
+
https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf
|
25 |
+
Licensed Apache License, Version 2.0
|
26 |
+
Train with google's dataset(more detail see model card)
|
27 |
+
|
28 |
+
'''
|
29 |
+
|
30 |
+
def picker_color_to_rgba(picker_color):
|
31 |
+
|
32 |
+
color_value = picker_color.strip("rgba()").split(",")
|
33 |
+
color_value[0] = int(float(color_value[0]))
|
34 |
+
color_value[1] = int(float(color_value[1]))
|
35 |
+
color_value[2] = int(float(color_value[2]))
|
36 |
+
color_value[3] = int(float(color_value[3])*255)
|
37 |
+
print(f"picker_color = {picker_color} color_value={color_value}")
|
38 |
+
return color_value
|
39 |
+
|
40 |
+
#@spaces.GPU(duration=120)
|
41 |
+
'''
|
42 |
+
|
43 |
+
innner_eyes_blur - inner eyes blur
|
44 |
+
iris_mask_blur - final iris edge blur
|
45 |
+
'''
|
46 |
+
def process_images(image,eyes_slide_x_ratio,eyes_slide_y_ratio,innner_eyes_blur_ratio=0.1,iris_mask_blur_ratio=0.1,pupil_offset_ratio=0.08,draw_eye_pupil_ratio=1.1,iris_color_value="rgba(20,20,20,255)",eyes_white_erode_ratio=0.2,eyes_ball_mask_ratio=0.9,
|
47 |
+
output_important_only=True,progress=gr.Progress(track_tqdm=True)):
|
48 |
+
clear_old_files()
|
49 |
+
if image == None:
|
50 |
+
raise gr.Error("Need Image")
|
51 |
+
|
52 |
+
|
53 |
+
|
54 |
+
iris_color = tuple(picker_color_to_rgba(iris_color_value))
|
55 |
+
#print(iris_color)
|
56 |
+
#return None,None
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
# TODO resize max 2048
|
62 |
+
|
63 |
+
white_image = create_color_image(image.width,image.height,(255,255,255))
|
64 |
+
## Mediapipe landmark
|
65 |
+
progress(0, desc="Start Making Animation")
|
66 |
+
mp_image,face_landmarker_result = extract_landmark(image)
|
67 |
+
larndmarks=face_landmarker_result.face_landmarks
|
68 |
+
|
69 |
+
|
70 |
+
## eyes cordinates
|
71 |
+
left_iris_points = get_pixel_cordinate_list(larndmarks,mp_constants.LINE_LEFT_IRIS,image.width,image.height)
|
72 |
+
right_iris_points = get_pixel_cordinate_list(larndmarks,mp_constants.LINE_RIGHT_IRIS,image.width,image.height)
|
73 |
+
|
74 |
+
left_box = points_to_box(left_iris_points)
|
75 |
+
right_box = points_to_box(right_iris_points)
|
76 |
+
left_eye_radius = (left_box[2] if left_box[2]>left_box[3] else left_box[3])/2
|
77 |
+
right_eye_radius = (right_box[2] if right_box[2]>right_box[3] else right_box[3])/2
|
78 |
+
|
79 |
+
|
80 |
+
innner_eyes_blur = int(left_eye_radius*innner_eyes_blur_ratio)
|
81 |
+
iris_mask_blur = int(left_eye_radius*iris_mask_blur_ratio)
|
82 |
+
|
83 |
+
|
84 |
+
eyes_slide_x = int(left_eye_radius*2 * eyes_slide_x_ratio)
|
85 |
+
eyes_slide_y = int(left_eye_radius*2 * eyes_slide_y_ratio)
|
86 |
+
|
87 |
+
|
88 |
+
pupil_offset_y = right_eye_radius * pupil_offset_ratio
|
89 |
+
|
90 |
+
point_right_pupil = get_pixel_cordinate(larndmarks,mp_constants.POINT_RIGHT_PUPIL,image.width,image.height)
|
91 |
+
point_right_pupil = plus_point(point_right_pupil,[0,pupil_offset_y])
|
92 |
+
|
93 |
+
point_left_pupil = get_pixel_cordinate(larndmarks,mp_constants.POINT_LEFT_PUPIL,image.width,image.height)
|
94 |
+
point_left_pupil = plus_point(point_left_pupil,[0,pupil_offset_y])
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
left_inner_eyes = get_pixel_cordinate_list(larndmarks,mp_constants.LINE_RIGHT_UPPER_INNER_EYE+mp_constants.LINE_RIGHT_LOWER_INNER_EYE,image.width,image.height)
|
99 |
+
right_inner_eyes = get_pixel_cordinate_list(larndmarks,mp_constants.LINE_LEFT_UPPER_INNER_EYE+mp_constants.LINE_LEFT_LOWER_INNER_EYE ,image.width,image.height)
|
100 |
+
|
101 |
+
left_white_eyes = get_pixel_cordinate_list(larndmarks,mp_constants.LINE_LEFT_EYES_WHITE,image.width,image.height)
|
102 |
+
fill_points(white_image,left_white_eyes,(0,0,0))
|
103 |
+
|
104 |
+
right_white_eyes = get_pixel_cordinate_list(larndmarks,mp_constants.LINE_RIGHT_EYES_WHITE ,image.width,image.height)
|
105 |
+
fill_points(white_image,right_white_eyes,(0,0,0))
|
106 |
+
|
107 |
+
left_eyes_box = points_to_box(left_white_eyes)
|
108 |
+
right_eyes_box = points_to_box(right_white_eyes)
|
109 |
+
|
110 |
+
black_image = create_color_image(image.width,image.height,(0,0,0))
|
111 |
+
draw_box(black_image,left_eyes_box,fill=(255,255,255))
|
112 |
+
draw_box(black_image,right_eyes_box,fill=(255,255,255))
|
113 |
+
|
114 |
+
|
115 |
+
eyes_mask_area_image = black_image.convert("L")
|
116 |
+
eyes_mask_image = white_image.convert("L") #eyes-white-area-hole painted black
|
117 |
+
|
118 |
+
galleries = []
|
119 |
+
def add_webp(add_image,label,important=False):
|
120 |
+
if important ==False and output_important_only == True:
|
121 |
+
return
|
122 |
+
|
123 |
+
file_path = save_image(add_image,"webp")
|
124 |
+
galleries.append((file_path,label))
|
125 |
+
|
126 |
+
# Create EYE LINE IMAGE
|
127 |
+
eyes_line_image = image.copy()
|
128 |
+
draw_points(eyes_line_image,left_inner_eyes,outline=(200,200,255),fill=None,width=3)
|
129 |
+
draw_points(eyes_line_image,right_inner_eyes,outline=(200,200,255),fill=None,width=3)
|
130 |
+
|
131 |
+
draw_points(eyes_line_image,left_white_eyes,outline=(255,0,0),fill=None,width=4)
|
132 |
+
draw_points(eyes_line_image,right_white_eyes,outline=(255,0,0),fill=None,width=4)
|
133 |
+
draw_points(eyes_line_image,left_iris_points,outline=(0,255,0),fill=None,width=4)
|
134 |
+
draw_points(eyes_line_image,right_iris_points,outline=(0,255,0),fill=None,width=4)
|
135 |
+
add_webp(eyes_line_image,"eyes-line",True)
|
136 |
+
|
137 |
+
|
138 |
+
# eyes socket(face) image
|
139 |
+
rgba_image = image.convert("RGBA")
|
140 |
+
rgba_image.putalpha(eyes_mask_image)
|
141 |
+
eyes_socket = rgba_image
|
142 |
+
add_webp(eyes_socket,"eyes-socket",True)
|
143 |
+
eyes_socket_mask = eyes_mask_image
|
144 |
+
|
145 |
+
# Save Eyes mask and area
|
146 |
+
eyes_white_mask = ImageOps.invert(eyes_mask_image)
|
147 |
+
add_webp(eyes_white_mask,"eyes-mask")
|
148 |
+
add_webp(eyes_mask_area_image,"eyes-box")
|
149 |
+
|
150 |
+
|
151 |
+
# Remove Edge,
|
152 |
+
|
153 |
+
erode_size = int(left_box[3]*eyes_white_erode_ratio) # eyes-height base #TODO take care right eyes
|
154 |
+
if erode_size%2==0:
|
155 |
+
erode_size+=1
|
156 |
+
eyes_white_mask=eyes_white_mask.filter(ImageFilter.MinFilter(erode_size))
|
157 |
+
|
158 |
+
|
159 |
+
# eyes_only_image inner-white-eyes - erode
|
160 |
+
rgba_image = image.convert("RGBA")
|
161 |
+
rgba_image.putalpha(eyes_white_mask)
|
162 |
+
eyes_only_image = rgba_image
|
163 |
+
add_webp(eyes_only_image,"eyes-only")
|
164 |
+
eyes_only_image_mask = eyes_white_mask.copy()
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
draw = ImageDraw.Draw(eyes_white_mask)
|
169 |
+
draw.circle(point_right_pupil,left_eye_radius*draw_eye_pupil_ratio,fill=(0))
|
170 |
+
draw.circle(point_left_pupil,left_eye_radius*draw_eye_pupil_ratio,fill=(0))
|
171 |
+
|
172 |
+
rgba_image = image.convert("RGBA")
|
173 |
+
rgba_image.putalpha(eyes_white_mask)
|
174 |
+
add_webp(rgba_image,"white-inapint-image",True)
|
175 |
+
|
176 |
+
eyes_mask_area_image.paste(ImageOps.invert(eyes_white_mask),None,mask=eyes_white_mask)
|
177 |
+
add_webp(eyes_mask_area_image,"white-inapint-mask")
|
178 |
+
|
179 |
+
|
180 |
+
cropped_right_eye = rgba_image.crop(box_to_xy(right_eyes_box))
|
181 |
+
add_webp(cropped_right_eye,"right-eye")
|
182 |
+
cropped_right_eye_mask = eyes_mask_area_image.crop(box_to_xy(right_eyes_box))
|
183 |
+
add_webp(cropped_right_eye_mask,"right-eye-mask")
|
184 |
+
|
185 |
+
|
186 |
+
cropped_left_eye = rgba_image.crop(box_to_xy(left_eyes_box))
|
187 |
+
add_webp(cropped_left_eye,"left-eye")
|
188 |
+
cropped_left_eye_mask = eyes_mask_area_image.crop(box_to_xy(left_eyes_box))
|
189 |
+
add_webp(cropped_left_eye_mask,"left-eye-mask")
|
190 |
+
|
191 |
+
|
192 |
+
inpaint_radius = 20
|
193 |
+
blur_radius = 15
|
194 |
+
edge_expand = 4
|
195 |
+
|
196 |
+
inpaint_mode = "Telea"
|
197 |
+
inner_eyes_image = create_color_image(image.width,image.height,color=(0,0,0,0))
|
198 |
+
inpaint_right,tmp_mask=opencvinpaint.process_cvinpaint(cropped_right_eye,cropped_right_eye_mask.convert("RGB"),inpaint_radius,blur_radius,edge_expand,inpaint_mode)
|
199 |
+
add_webp(inpaint_right,"right-eye")
|
200 |
+
inpaint_left,tmp_mask=opencvinpaint.process_cvinpaint(cropped_left_eye,cropped_left_eye_mask.convert("RGB"),inpaint_radius,blur_radius,edge_expand,inpaint_mode)
|
201 |
+
add_webp(inpaint_left,"left-eye")
|
202 |
+
|
203 |
+
inner_eyes_image.paste(inpaint_right,box_to_xy(right_eyes_box))
|
204 |
+
inner_eyes_image.paste(inpaint_left,box_to_xy(left_eyes_box))
|
205 |
+
add_webp(inner_eyes_image,"inpainted-eyes",True)
|
206 |
+
eyes_blank = inner_eyes_image.copy()
|
207 |
+
eyes_blank.paste(eyes_socket,eyes_socket_mask)
|
208 |
+
add_webp(eyes_blank,"eyes_blank",True)
|
209 |
+
|
210 |
+
eyes_move_pt = (eyes_slide_x,eyes_slide_y)
|
211 |
+
draw_pupil_border=2
|
212 |
+
|
213 |
+
draw_left_pupil_radius = int(left_eye_radius*draw_eye_pupil_ratio)
|
214 |
+
draw_right_pupil_radius = int(right_eye_radius*draw_eye_pupil_ratio)
|
215 |
+
|
216 |
+
eyes_ball_image = eyes_only_image.convert("RGBA")#create_color_image(image.width,image.height,color=(0,0,0,0))
|
217 |
+
draw = ImageDraw.Draw(eyes_ball_image)
|
218 |
+
|
219 |
+
|
220 |
+
draw.circle(point_right_pupil,draw_right_pupil_radius,outline=iris_color,width=draw_pupil_border)
|
221 |
+
draw.circle(point_left_pupil,draw_left_pupil_radius,outline=iris_color,width=draw_pupil_border)
|
222 |
+
add_webp(eyes_ball_image,"eyes-ball-inpaint-base",True)
|
223 |
+
|
224 |
+
|
225 |
+
#draw mask too
|
226 |
+
|
227 |
+
|
228 |
+
eyes_ball_image_mask = create_color_image(image.width,image.height,color=(0,0,0))
|
229 |
+
draw = ImageDraw.Draw(eyes_ball_image_mask)
|
230 |
+
draw.circle(point_right_pupil,draw_right_pupil_radius-draw_pupil_border,fill=(255,255,255))
|
231 |
+
draw.circle(point_left_pupil,draw_left_pupil_radius-draw_pupil_border,fill=(255,255,255))
|
232 |
+
add_webp(eyes_ball_image_mask,"eyes-ball-image-mask")
|
233 |
+
eyes_ball_image_mask = eyes_ball_image_mask.convert("L")
|
234 |
+
|
235 |
+
eyes_ball_image_inpaint_mask = eyes_ball_image_mask.copy()
|
236 |
+
eyes_ball_image_inpaint_mask.paste(ImageOps.invert(eyes_only_image_mask),mask=eyes_only_image_mask)
|
237 |
+
add_webp(eyes_ball_image_inpaint_mask,"eyes_ball_image_inpaint_mask")
|
238 |
+
|
239 |
+
### create inpaint and replace
|
240 |
+
pupil_inpaint_radius = 5
|
241 |
+
pupil_blur_radius = 2
|
242 |
+
pupil_edge_expand = 5
|
243 |
+
|
244 |
+
inpaint_eyes_ball_image,tmp_mask=opencvinpaint.process_cvinpaint(eyes_ball_image,eyes_ball_image_inpaint_mask.convert("RGB"),pupil_inpaint_radius,pupil_blur_radius,pupil_edge_expand,inpaint_mode)
|
245 |
+
inpaint_eyes_ball_image.putalpha(eyes_ball_image_mask)
|
246 |
+
add_webp(inpaint_eyes_ball_image,"inpaint_eyes_ball_image")
|
247 |
+
eyes_ball_image = inpaint_eyes_ball_image
|
248 |
+
|
249 |
+
|
250 |
+
eyes_and_ball_mask = eyes_only_image_mask.copy()
|
251 |
+
eyes_and_ball_mask.paste(eyes_ball_image_mask,mask=eyes_ball_image_mask)
|
252 |
+
add_webp(eyes_and_ball_mask,"eyes-ball-mask")
|
253 |
+
|
254 |
+
eyes_ball_image.paste(eyes_only_image,mask=eyes_only_image_mask)
|
255 |
+
add_webp(eyes_ball_image,"eyes-ball",True)
|
256 |
+
|
257 |
+
|
258 |
+
|
259 |
+
inner_eyes_image.paste(eyes_ball_image,eyes_move_pt,mask=eyes_and_ball_mask)
|
260 |
+
add_webp(inner_eyes_image,"inner-eyes")
|
261 |
+
|
262 |
+
inner_eyes_image.paste(eyes_socket,None,mask=eyes_socket_mask)
|
263 |
+
|
264 |
+
#ImageFilter.BLUR,"Smooth More":ImageFilter.SMOOTH_MORE,"Smooth":ImageFilter.SMOOTH
|
265 |
+
filtered_image = inner_eyes_image.filter(ImageFilter.GaussianBlur(radius=innner_eyes_blur))
|
266 |
+
add_webp(filtered_image,"bluerd_inner_face",True)
|
267 |
+
|
268 |
+
|
269 |
+
#filtered_image.paste(eyes_only_image,eyes_move_pt,mask=eyes_ball_image_mask.convert("L"))
|
270 |
+
|
271 |
+
|
272 |
+
|
273 |
+
### create innner mask minus eyeballs
|
274 |
+
white_image = create_color_image(image.width,image.height,color=(255,255,255))
|
275 |
+
draw = ImageDraw.Draw(white_image)
|
276 |
+
right_eyes_xy = get_pixel_cordinate(larndmarks,mp_constants.POINT_RIGHT_PUPIL,image.width,image.height)
|
277 |
+
left_eyes_xy = get_pixel_cordinate(larndmarks,mp_constants.POINT_LEFT_PUPIL,image.width,image.height)
|
278 |
+
draw.circle(plus_point(left_eyes_xy,eyes_move_pt),left_eye_radius*eyes_ball_mask_ratio,fill=(0,0,0,255))
|
279 |
+
draw.circle(plus_point(right_eyes_xy,eyes_move_pt),right_eye_radius*eyes_ball_mask_ratio,fill=(0,0,0,255))
|
280 |
+
add_webp(white_image,"eyes_ball_mask")
|
281 |
+
|
282 |
+
eyes_socket_mask_invert = ImageOps.invert(eyes_socket_mask)
|
283 |
+
eyes_socket_mask_invert.paste(white_image,eyes_socket_mask_invert)
|
284 |
+
add_webp(eyes_socket_mask_invert,"inner_mask_without_eyesball")
|
285 |
+
|
286 |
+
|
287 |
+
### final paste eyes-ball and outer-faces on blured inner
|
288 |
+
eyes_socket_mask_invert = eyes_socket_mask_invert.filter(ImageFilter.GaussianBlur(radius=iris_mask_blur))
|
289 |
+
add_webp(eyes_socket_mask_invert,"inner_mask_without_eyesball-blur",True)
|
290 |
+
|
291 |
+
filtered_image.paste(inner_eyes_image,None,mask=ImageOps.invert(eyes_socket_mask_invert))
|
292 |
+
filtered_image.paste(eyes_socket,None,mask=eyes_socket_mask)
|
293 |
+
file_path = save_image(filtered_image,"webp")
|
294 |
+
|
295 |
+
return filtered_image,galleries
|
296 |
+
|
297 |
+
|
298 |
+
|
299 |
+
|
300 |
+
css="""
|
301 |
+
#col-left {
|
302 |
+
margin: 0 auto;
|
303 |
+
max-width: 640px;
|
304 |
+
}
|
305 |
+
#col-right {
|
306 |
+
margin: 0 auto;
|
307 |
+
max-width: 640px;
|
308 |
+
}
|
309 |
+
.grid-container {
|
310 |
+
display: flex;
|
311 |
+
align-items: center;
|
312 |
+
justify-content: center;
|
313 |
+
gap:10px
|
314 |
+
}
|
315 |
+
|
316 |
+
.image {
|
317 |
+
width: 128px;
|
318 |
+
height: 128px;
|
319 |
+
object-fit: cover;
|
320 |
+
}
|
321 |
+
|
322 |
+
.text {
|
323 |
+
font-size: 16px;
|
324 |
+
}
|
325 |
+
"""
|
326 |
+
|
327 |
+
#css=css,
|
328 |
+
|
329 |
+
|
330 |
+
|
331 |
+
with gr.Blocks(css=css, elem_id="demo-container") as demo:
|
332 |
+
with gr.Column():
|
333 |
+
gr.HTML(read_file("demo_header.html"))
|
334 |
+
gr.HTML(read_file("demo_tools.html"))
|
335 |
+
with gr.Row():
|
336 |
+
with gr.Column():
|
337 |
+
image = gr.Image(height=800,sources=['upload','clipboard'],image_mode='RGB',elem_id="image_upload", type="pil", label="Image")
|
338 |
+
with gr.Row(elem_id="prompt-container", equal_height=False):
|
339 |
+
with gr.Row():
|
340 |
+
btn = gr.Button("Slide Eyes Direction", elem_id="run_button",variant="primary")
|
341 |
+
|
342 |
+
with gr.Accordion(label="Eyes Slide", open=True):
|
343 |
+
with gr.Row(equal_height=False):
|
344 |
+
eyes_slide_x_ratio = gr.Slider(
|
345 |
+
label="Horizontal Slide (Iris based size)",
|
346 |
+
minimum=-2,
|
347 |
+
maximum=2,
|
348 |
+
step=0.01,
|
349 |
+
value=0,info="Based iris size minus to left,plus to right")
|
350 |
+
eyes_slide_y_ratio = gr.Slider(
|
351 |
+
label="Vertical Slide (Iris based size)",
|
352 |
+
minimum=-1.5,
|
353 |
+
maximum=1.5,
|
354 |
+
step=0.01,
|
355 |
+
value=0.25,info="Based iris size minus to up,plus to down")
|
356 |
+
|
357 |
+
with gr.Accordion(label="Advanced Settings", open=False):
|
358 |
+
|
359 |
+
with gr.Row( equal_height=True):
|
360 |
+
innner_eyes_blur_ratio = gr.Slider(
|
361 |
+
label="Inner Eyes Blur Ratio",
|
362 |
+
minimum=0,
|
363 |
+
maximum=1,
|
364 |
+
step=0.01,
|
365 |
+
value=0.2,info="increse valueinnser eyes make flat")
|
366 |
+
iris_mask_blur_ratio = gr.Slider(
|
367 |
+
label="Iris Mask Blur Ratio",
|
368 |
+
minimum=0,
|
369 |
+
maximum=1,
|
370 |
+
step=0.01,
|
371 |
+
value=0.15,info="mask edge smooth")
|
372 |
+
with gr.Row( equal_height=True):
|
373 |
+
pupil_offset_ratio = gr.Slider(
|
374 |
+
label="Pupil center Offset Y",
|
375 |
+
minimum=-0.5,
|
376 |
+
maximum=0.5,
|
377 |
+
step=0.01,
|
378 |
+
value=-0.08,info="mediapipe detection is not middle")
|
379 |
+
draw_eye_pupil_ratio = gr.Slider(
|
380 |
+
label="Draw Pupil radius ratio",
|
381 |
+
minimum=0.5,
|
382 |
+
maximum=1.5,
|
383 |
+
step=0.01,
|
384 |
+
value=1.1,info="mediapipe detection is usually small")
|
385 |
+
iris_color_value = gr.ColorPicker(value="rgba(20,20,20,1)",label="Iris Border Color")
|
386 |
+
with gr.Row( equal_height=True):
|
387 |
+
eyes_white_erode_ratio = gr.Slider(
|
388 |
+
label="Eye Erode erode ratio",
|
389 |
+
minimum=0,
|
390 |
+
maximum=0.5,
|
391 |
+
step=0.01,
|
392 |
+
value=0.1,info="eyes edge is pink")
|
393 |
+
eyes_ball_mask_ratio = gr.Slider(
|
394 |
+
label="Eye Ball Mask ratio",
|
395 |
+
minimum=0,
|
396 |
+
maximum=1,
|
397 |
+
step=0.01,
|
398 |
+
value=0.9,info="iris blur and mask for img2img")
|
399 |
+
with gr.Row( equal_height=True):
|
400 |
+
output_important_only=gr.Checkbox(label="output important image only",value=True)
|
401 |
+
|
402 |
+
|
403 |
+
with gr.Column():
|
404 |
+
animation_out = gr.Image(height=760,label="Result", elem_id="output-animation")
|
405 |
+
image_out = gr.Gallery(label="Output", elem_id="output-img",preview=True)
|
406 |
+
|
407 |
+
|
408 |
+
btn.click(fn=process_images, inputs=[image,eyes_slide_x_ratio,eyes_slide_y_ratio,innner_eyes_blur_ratio,iris_mask_blur_ratio,
|
409 |
+
pupil_offset_ratio,draw_eye_pupil_ratio,iris_color_value,eyes_white_erode_ratio,eyes_ball_mask_ratio,output_important_only
|
410 |
+
],outputs=[animation_out,image_out] ,api_name='infer')
|
411 |
+
gr.Examples(
|
412 |
+
examples =[
|
413 |
+
["examples/02316230.jpg"],
|
414 |
+
["examples/00003245_00.jpg"],
|
415 |
+
["examples/00827009.jpg"],
|
416 |
+
|
417 |
+
["examples/00002062.jpg"],
|
418 |
+
|
419 |
+
|
420 |
+
["examples/00824008.jpg"],
|
421 |
+
["examples/00825000.jpg"],
|
422 |
+
["examples/00826007.jpg"],
|
423 |
+
["examples/00824006.jpg"],
|
424 |
+
["examples/00828003.jpg"],
|
425 |
+
|
426 |
+
["examples/00002200.jpg"],
|
427 |
+
["examples/00005259.jpg"],
|
428 |
+
["examples/00018022.jpg"],
|
429 |
+
["examples/img-above.jpg"],
|
430 |
+
["examples/00100265.jpg"],
|
431 |
+
],
|
432 |
+
#examples =["examples/00003245_00.jpg","examples/00002062.jpg","examples/00100265.jpg","examples/00824006.jpg","examples/00824008.jpg",
|
433 |
+
# "examples/00825000.jpg","examples/00826007.jpg","examples/00827009.jpg","examples/00828003.jpg",],
|
434 |
+
inputs=[image],examples_per_page=5
|
435 |
+
)
|
436 |
+
gr.HTML(read_file("demo_footer.html"))
|
437 |
+
|
438 |
+
if __name__ == "__main__":
|
439 |
+
demo.launch()
|
demo_footer.html
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
<div>
|
2 |
+
<P> Images are generated with <a href="https://huggingface.co/black-forest-labs/FLUX.1-schnell">FLUX.1-schnell</a> and licensed under <a href="http://www.apache.org/licenses/LICENSE-2.0">the Apache 2.0 License</a>
|
3 |
+
</div>
|
demo_header.html
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div style="text-align: center;">
|
2 |
+
<h1>
|
3 |
+
Mediapipe Change Eyes Direction
|
4 |
+
</h1>
|
5 |
+
<div class="grid-container">
|
6 |
+
<img src="https://akjava.github.io/AIDiagramChatWithVoice-FaceCharacter/webp/128/00544245.webp" alt="Mediapipe Face Detection" class="image">
|
7 |
+
|
8 |
+
<p class="text">
|
9 |
+
This Space use <a href="http://www.apache.org/licenses/LICENSE-2.0">the Apache 2.0</a> Licensed <a href="https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker">Mediapipe FaceLandmarker</a> <br>
|
10 |
+
<a href="https://huggingface.co/blog/Akjava/eyes-slide-move/">[Article>]</a>Eyes Slide-Move:Classic-Inpainting fill hole and complete missing iris<br>
|
11 |
+
(1) Make "blank eyes" (remove irises) using classic-inpaint<br>
|
12 |
+
(2) complete missing iris hidden by eyelid using classic-inapaint<br>
|
13 |
+
(3) Move the iris to the desired (x, y) coordinates<br>
|
14 |
+
The result is not so bad,If face is big you can improve with belelow Flux.1 schnell img2img/inpaint<br>
|
15 |
+
</p>
|
16 |
+
</div>
|
17 |
+
|
18 |
+
</div>
|
demo_tools.html
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<div style="text-align: center;">
|
2 |
+
<p>
|
3 |
+
<a href="https://huggingface.co/spaces/Akjava/flux1-schnell-img2img">Flux1-Img2Img(GPU)</a> |
|
4 |
+
<a href="https://huggingface.co/spaces/Akjava/flux1-schnell-mask-inpaint">Flux1-Inpaint(GPU)</a> |
|
5 |
+
<a href="https://huggingface.co/spaces/Akjava/mediapipe-68-points-facial-mask">Create 68 points Parts Mask</a> |
|
6 |
+
<a href="https://huggingface.co/spaces/Akjava/histgram-color-matching">Histgram Color Matching</a> |
|
7 |
+
<a href="https://huggingface.co/spaces/Akjava/WebPTalkHead">WebP anime with 3 images</a> |
|
8 |
+
<a href="https://huggingface.co/spaces/Akjava/WebP-Resize-Convert">WebP Resize Animation</a>
|
9 |
+
</p>
|
10 |
+
<p></p>
|
11 |
+
</div>
|
examples/00002062.jpg
ADDED
examples/00002200.jpg
ADDED
examples/00003245_00.jpg
ADDED
examples/00005259.jpg
ADDED
examples/00018022.jpg
ADDED
examples/00100265.jpg
ADDED
examples/00824006.jpg
ADDED
examples/00824008.jpg
ADDED
examples/00825000.jpg
ADDED
examples/00826007.jpg
ADDED
examples/00827009.jpg
ADDED
examples/00828003.jpg
ADDED
examples/02316230.jpg
ADDED
examples/img-above.jpg
ADDED
face_landmarker.task
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64184e229b263107bc2b804c6625db1341ff2bb731874b0bcc2fe6544e0bc9ff
|
3 |
+
size 3758596
|
face_landmarker.task.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Face landmark detection
|
2 |
+
https://ai.google.dev/edge/mediapipe/solutions/vision/face_landmarker
|
3 |
+
|
4 |
+
model card page is
|
5 |
+
https://storage.googleapis.com/mediapipe-assets/MediaPipe%20BlazeFace%20Model%20Card%20(Short%20Range).pdf
|
6 |
+
|
7 |
+
license is Apache2.0
|
8 |
+
https://www.apache.org/licenses/LICENSE-2.0.html
|
glibvision/common_utils.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
def check_exists_files(files,dirs,exit_on_error=True):
|
3 |
+
if files is not None:
|
4 |
+
if isinstance(files, str):
|
5 |
+
files = [files]
|
6 |
+
for file in files:
|
7 |
+
if not os.path.isfile(file):
|
8 |
+
print(f"File {file} not found")
|
9 |
+
if exit_on_error:
|
10 |
+
exit(1)
|
11 |
+
else:
|
12 |
+
return 1
|
13 |
+
if dirs is not None:
|
14 |
+
if isinstance(dirs, str):
|
15 |
+
dirs = [dirs]
|
16 |
+
for dir in dirs:
|
17 |
+
if not os.path.isdir(dir):
|
18 |
+
print(f"Dir {dir} not found")
|
19 |
+
if exit_on_error:
|
20 |
+
exit(1)
|
21 |
+
else:
|
22 |
+
return 1
|
23 |
+
return 0
|
24 |
+
|
25 |
+
image_extensions =[".jpg"]
|
26 |
+
|
27 |
+
def add_name_suffix(file_name,suffix,replace_suffix=False):
|
28 |
+
if not suffix.startswith("_"):#force add
|
29 |
+
suffix="_"+suffix
|
30 |
+
|
31 |
+
name,ext = os.path.splitext(file_name)
|
32 |
+
if replace_suffix:
|
33 |
+
index = name.rfind("_")
|
34 |
+
if index!=-1:
|
35 |
+
return f"{name[0:index]}{suffix}{ext}"
|
36 |
+
|
37 |
+
return f"{name}{suffix}{ext}"
|
38 |
+
|
39 |
+
def replace_extension(file_name,new_extension,suffix=None,replace_suffix=False):
|
40 |
+
if not new_extension.startswith("."):
|
41 |
+
new_extension="."+new_extension
|
42 |
+
|
43 |
+
name,ext = os.path.splitext(file_name)
|
44 |
+
new_file = f"{name}{new_extension}"
|
45 |
+
if suffix:
|
46 |
+
return add_name_suffix(name+new_extension,suffix,replace_suffix)
|
47 |
+
return new_file
|
48 |
+
|
49 |
+
def list_digit_images(input_dir,sort=True):
|
50 |
+
digit_images = []
|
51 |
+
global image_extensions
|
52 |
+
files = os.listdir(input_dir)
|
53 |
+
for file in files:
|
54 |
+
if file.endswith(".jpg"):#TODO check image
|
55 |
+
base,ext = os.path.splitext(file)
|
56 |
+
if not base.isdigit():
|
57 |
+
continue
|
58 |
+
digit_images.append(file)
|
59 |
+
|
60 |
+
if sort:
|
61 |
+
digit_images.sort()
|
62 |
+
|
63 |
+
return digit_images
|
64 |
+
def list_suffix_images(input_dir,suffix,is_digit=True,sort=True):
|
65 |
+
digit_images = []
|
66 |
+
global image_extensions
|
67 |
+
files = os.listdir(input_dir)
|
68 |
+
for file in files:
|
69 |
+
if file.endswith(".jpg"):#TODO check image
|
70 |
+
base,ext = os.path.splitext(file)
|
71 |
+
if base.endswith(suffix):
|
72 |
+
if is_digit:
|
73 |
+
if not base.replace(suffix,"").isdigit():
|
74 |
+
continue
|
75 |
+
digit_images.append(file)
|
76 |
+
|
77 |
+
if sort:
|
78 |
+
digit_images.sort()
|
79 |
+
|
80 |
+
return digit_images
|
81 |
+
|
82 |
+
import time
|
83 |
+
|
84 |
+
class ProgressTracker:
|
85 |
+
"""
|
86 |
+
処理の進捗状況を追跡し、経過時間と残り時間を表示するクラス。
|
87 |
+
"""
|
88 |
+
|
89 |
+
def __init__(self,key, total_target):
|
90 |
+
"""
|
91 |
+
コンストラクタ
|
92 |
+
|
93 |
+
Args:
|
94 |
+
total_target (int): 処理対象の総数
|
95 |
+
"""
|
96 |
+
self.key = key
|
97 |
+
self.total_target = total_target
|
98 |
+
self.complete_target = 0
|
99 |
+
self.start_time = time.time()
|
100 |
+
|
101 |
+
def update(self):
|
102 |
+
"""
|
103 |
+
進捗を1つ進める。
|
104 |
+
経過時間と残り時間を表示する。
|
105 |
+
"""
|
106 |
+
self.complete_target += 1
|
107 |
+
current_time = time.time()
|
108 |
+
consumed_time = current_time - self.start_time
|
109 |
+
remain_time = (consumed_time / self.complete_target) * (self.total_target - self.complete_target) if self.complete_target > 0 else 0
|
110 |
+
print(f"stepped {self.key} {self.total_target} of {self.complete_target}, consumed {(consumed_time / 60):.1f} min, remain {(remain_time / 60):.1f} min")
|
111 |
+
|
112 |
+
|
glibvision/cv2_utils.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
def draw_bbox(image,box,color=(255,0,0),thickness=1):
|
7 |
+
if thickness==0:
|
8 |
+
return
|
9 |
+
|
10 |
+
left = int(box[0])
|
11 |
+
top = int(box[1])
|
12 |
+
right = int(box[0]+box[2])
|
13 |
+
bottom = int(box[1]+box[3])
|
14 |
+
box_points =[(left,top),(right,top),(right,bottom),(left,bottom)]
|
15 |
+
|
16 |
+
cv2.polylines(image, [np.array(box_points)], isClosed=True, color=color, thickness=thickness)
|
17 |
+
|
18 |
+
|
19 |
+
def to_int_points(points):
|
20 |
+
int_points=[]
|
21 |
+
for point in points:
|
22 |
+
int_points.append([int(point[0]),int(point[1])])
|
23 |
+
return int_points
|
24 |
+
|
25 |
+
def draw_text(img, text, point, font_scale=0.5, color=(200, 200, 200), thickness=1):
|
26 |
+
font = cv2.FONT_HERSHEY_SIMPLEX
|
27 |
+
cv2.putText(img, str(text), point, font, font_scale, color, thickness, cv2.LINE_AA)
|
28 |
+
|
29 |
+
plot_text_color = (200, 200, 200)
|
30 |
+
plot_text_font_scale = 0.5
|
31 |
+
plot_index = 1
|
32 |
+
plot_text = True
|
33 |
+
|
34 |
+
def set_plot_text(is_plot,text_font_scale,text_color):
|
35 |
+
global plot_index,plot_text,plot_text_font_scale,plot_text_color
|
36 |
+
plot_text = is_plot
|
37 |
+
plot_index = 1
|
38 |
+
plot_text_font_scale = text_font_scale
|
39 |
+
plot_text_color = text_color
|
40 |
+
|
41 |
+
def plot_points(image,points,isClosed=False,circle_size=3,circle_color=(255,0,0),line_size=1,line_color=(0,0,255)):
|
42 |
+
global plot_index,plot_text
|
43 |
+
int_points = to_int_points(points)
|
44 |
+
if circle_size>0:
|
45 |
+
for point in int_points:
|
46 |
+
cv2.circle(image,point,circle_size,circle_color,-1)
|
47 |
+
if plot_text:
|
48 |
+
draw_text(image,plot_index,point,plot_text_font_scale,plot_text_color)
|
49 |
+
plot_index+=1
|
50 |
+
if line_size>0:
|
51 |
+
cv2.polylines(image, [np.array(int_points)], isClosed=isClosed, color=line_color, thickness=line_size)
|
52 |
+
|
53 |
+
def fill_points(image,points,thickness=1,line_color=(255,255,255),fill_color = (255,255,255)):
|
54 |
+
np_points = np.array(points,dtype=np.int32)
|
55 |
+
cv2.fillPoly(image, [np_points], fill_color)
|
56 |
+
cv2.polylines(image, [np_points], isClosed=True, color=line_color, thickness=thickness)
|
57 |
+
|
58 |
+
def get_image_size(cv2_image):
|
59 |
+
return cv2_image.shape[:2]
|
60 |
+
|
61 |
+
def get_channel(np_array):
|
62 |
+
return np_array.shape[2] if np_array.ndim == 3 else 1
|
63 |
+
|
64 |
+
def get_numpy_text(np_array,key=""):
|
65 |
+
channel = get_channel(np_array)
|
66 |
+
return f"{key} shape = {np_array.shape} channel = {channel} ndim = {np_array.ndim} size = {np_array.size}"
|
67 |
+
|
68 |
+
|
69 |
+
def gray3d_to_2d(grayscale: np.ndarray) -> np.ndarray:
|
70 |
+
channel = get_channel(grayscale)
|
71 |
+
if channel!=1:
|
72 |
+
raise ValueError(f"color maybe rgb or rgba {get_numpy_text(grayscale)}")
|
73 |
+
"""
|
74 |
+
3 次元グレースケール画像 (チャンネル数 1) を 2 次元に変換する。
|
75 |
+
|
76 |
+
Args:
|
77 |
+
grayscale (np.ndarray): 3 次元グレースケール画像 (チャンネル数 1)。
|
78 |
+
|
79 |
+
Returns:
|
80 |
+
np.ndarray: 2 次元グレースケール画像。
|
81 |
+
"""
|
82 |
+
|
83 |
+
if grayscale.ndim == 2:
|
84 |
+
return grayscale
|
85 |
+
return np.squeeze(grayscale)
|
86 |
+
|
87 |
+
def blend_rgb_images(image1: np.ndarray, image2: np.ndarray, mask: np.ndarray) -> np.ndarray:
|
88 |
+
"""
|
89 |
+
2 つの RGB 画像をマスク画像を使用してブレンドする。
|
90 |
+
|
91 |
+
Args:
|
92 |
+
image1 (np.ndarray): 最初の画像 (RGB)。
|
93 |
+
image2 (np.ndarray): 2 番目の画像 (RGB)。
|
94 |
+
mask (np.ndarray): マスク画像 (グレースケール)。
|
95 |
+
|
96 |
+
Returns:
|
97 |
+
np.ndarray: ブレンドされた画像 (RGB)。
|
98 |
+
|
99 |
+
Raises:
|
100 |
+
ValueError: 入力画像の形状が一致しない場合。
|
101 |
+
"""
|
102 |
+
|
103 |
+
if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
|
104 |
+
raise ValueError("入力画像の形状が一致しません。")
|
105 |
+
|
106 |
+
# 画像を float 型に変換
|
107 |
+
image1 = image1.astype(float)
|
108 |
+
image2 = image2.astype(float)
|
109 |
+
|
110 |
+
# マスクを 3 チャンネルに変換し、0-1 の範囲にスケール
|
111 |
+
alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(float) / 255.0
|
112 |
+
|
113 |
+
# ブレンド計算
|
114 |
+
blended = (1 - alpha) * image1 + alpha * image2
|
115 |
+
|
116 |
+
return blended.astype(np.uint8)
|
117 |
+
|
118 |
+
def create_color_image(img,color=(255,255,255)):
|
119 |
+
mask = np.zeros_like(img)
|
120 |
+
|
121 |
+
h, w = img.shape[:2]
|
122 |
+
cv2.rectangle(mask, (0, 0), (w, h), color, -1)
|
123 |
+
return mask
|
124 |
+
|
125 |
+
def pil_to_bgr_image(image):
|
126 |
+
np_image = np.array(image, dtype=np.uint8)
|
127 |
+
if np_image.shape[2] == 4:
|
128 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGBA2BGRA)
|
129 |
+
else:
|
130 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RGB2BGR)
|
131 |
+
return bgr_img
|
132 |
+
|
133 |
+
def bgr_to_rgb(np_image):
|
134 |
+
if np_image.shape[2] == 4:
|
135 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_RBGRA2RGBA)
|
136 |
+
else:
|
137 |
+
bgr_img = cv2.cvtColor(np_image, cv2.COLOR_BGR2RGB)
|
138 |
+
return bgr_img
|
glibvision/draw_utils.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# DrawUtils
|
2 |
+
# not PIL,CV2,Numpy drawing method
|
3 |
+
|
4 |
+
|
5 |
+
def points_to_box(points):
|
6 |
+
x1=float('inf')
|
7 |
+
x2=0
|
8 |
+
y1=float('inf')
|
9 |
+
y2=0
|
10 |
+
for point in points:
|
11 |
+
if point[0]<x1:
|
12 |
+
x1=point[0]
|
13 |
+
if point[0]>x2:
|
14 |
+
x2=point[0]
|
15 |
+
if point[1]<y1:
|
16 |
+
y1=point[1]
|
17 |
+
if point[1]>y2:
|
18 |
+
y2=point[1]
|
19 |
+
return [x1,y1,x2-x1,y2-y1]
|
20 |
+
|
21 |
+
def box_to_point(box):
|
22 |
+
return [
|
23 |
+
[box[0],box[1]],
|
24 |
+
[box[0]+box[2],box[1]],
|
25 |
+
[box[0]+box[2],box[1]+box[3]],
|
26 |
+
[box[0],box[1]+box[3]]
|
27 |
+
]
|
28 |
+
|
29 |
+
def plus_point(base_pt,add_pt):
|
30 |
+
return [base_pt[0]+add_pt[0],base_pt[1]+add_pt[1]]
|
31 |
+
|
32 |
+
def box_to_xy(box):
|
33 |
+
return [box[0],box[1],box[2]+box[0],box[3]+box[1]]
|
34 |
+
|
35 |
+
def to_int_points(points):
|
36 |
+
int_points=[]
|
37 |
+
for point in points:
|
38 |
+
int_points.append([int(point[0]),int(point[1])])
|
39 |
+
return int_points
|
glibvision/glandmark_utils.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
|
4 |
+
#simple single version
|
5 |
+
def bbox_to_glandmarks(file_name,bbox,points = None):
|
6 |
+
base,ext = os.path.splitext(file_name)
|
7 |
+
glandmark = {"image":{
|
8 |
+
"boxes":[{
|
9 |
+
"left":int(bbox[0]),"top":int(bbox[1]),"width":int(bbox[2]),"height":int(bbox[3])
|
10 |
+
}],
|
11 |
+
"file":file_name,
|
12 |
+
"id":int(base)
|
13 |
+
# width,height ignore here
|
14 |
+
}}
|
15 |
+
if points is not None:
|
16 |
+
parts=[
|
17 |
+
]
|
18 |
+
for point in points:
|
19 |
+
parts.append({"x":int(point[0]),"y":int(point[1])})
|
20 |
+
glandmark["image"]["boxes"][0]["parts"] = parts
|
21 |
+
return glandmark
|
22 |
+
|
23 |
+
#technically this is not g-landmark/dlib ,
|
24 |
+
def convert_to_landmark_group_json(points):
|
25 |
+
if len(points)!=68:
|
26 |
+
print(f"points must be 68 but {len(points)}")
|
27 |
+
return None
|
28 |
+
new_points=list(points)
|
29 |
+
|
30 |
+
result = [ # possible multi person ,just possible any func support multi person
|
31 |
+
|
32 |
+
{ # index start 0 but index-number start 1
|
33 |
+
"chin":new_points[0:17],
|
34 |
+
"left_eyebrow":new_points[17:22],
|
35 |
+
"right_eyebrow":new_points[22:27],
|
36 |
+
"nose_bridge":new_points[27:31],
|
37 |
+
"nose_tip":new_points[31:36],
|
38 |
+
"left_eye":new_points[36:42],
|
39 |
+
"right_eye":new_points[42:48],
|
40 |
+
|
41 |
+
# lip points customized structure
|
42 |
+
# MIT licensed face_recognition
|
43 |
+
# https://github.com/ageitgey/face_recognition
|
44 |
+
"top_lip":new_points[48:55]+[new_points[64]]+[new_points[63]]+[new_points[62]]+[new_points[61]]+[new_points[60]],
|
45 |
+
"bottom_lip":new_points[54:60]+[new_points[48]]+[new_points[60]]+[new_points[67]]+[new_points[66]]+[new_points[65]]+[new_points[64]],
|
46 |
+
}
|
47 |
+
]
|
48 |
+
return result
|
glibvision/numpy_utils.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
|
4 |
+
def apply_binary_mask_to_color(base_image,color,mask):
|
5 |
+
"""
|
6 |
+
二値マスクを使用して、画像の一部を別の画像にコピーする。
|
7 |
+
|
8 |
+
Args:
|
9 |
+
base_image (np.ndarray): コピー先の画像。
|
10 |
+
paste_image (np.ndarray): コピー元の画像。
|
11 |
+
mask (np.ndarray): 二値マスク画像。
|
12 |
+
|
13 |
+
Returns:
|
14 |
+
np.ndarray: マスクを適用した画像。
|
15 |
+
|
16 |
+
"""
|
17 |
+
# TODO check all shape
|
18 |
+
#print_numpy(base_image)
|
19 |
+
#print_numpy(paste_image)
|
20 |
+
#print_numpy(mask)
|
21 |
+
if mask.ndim == 2:
|
22 |
+
condition = mask == 255
|
23 |
+
else:
|
24 |
+
condition = mask[:,:,0] == 255
|
25 |
+
|
26 |
+
base_image[condition] = color
|
27 |
+
return base_image
|
28 |
+
|
29 |
+
def apply_binary_mask_to_image(base_image,paste_image,mask):
|
30 |
+
"""
|
31 |
+
二値マスクを使用して、画像の一部を別の画像にコピーする。
|
32 |
+
|
33 |
+
Args:
|
34 |
+
base_image (np.ndarray): コピー先の画像。
|
35 |
+
paste_image (np.ndarray): コピー元の画像。
|
36 |
+
mask (np.ndarray): 二値マスク画像。
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
np.ndarray: マスクを適用した画像。
|
40 |
+
|
41 |
+
"""
|
42 |
+
# TODO check all shape
|
43 |
+
#print_numpy(base_image)
|
44 |
+
#print_numpy(paste_image)
|
45 |
+
#print_numpy(mask)
|
46 |
+
if mask.ndim == 2:
|
47 |
+
condition = mask == 255
|
48 |
+
else:
|
49 |
+
condition = mask[:,:,0] == 255
|
50 |
+
|
51 |
+
base_image[condition] = paste_image[condition]
|
52 |
+
return base_image
|
53 |
+
|
54 |
+
def pil_to_numpy(image):
|
55 |
+
return np.array(image, dtype=np.uint8)
|
56 |
+
|
57 |
+
def extruce_points(points,index,ratio=1.5):
|
58 |
+
"""
|
59 |
+
indexのポイントをratio倍だけ、点群の中心から、外側に膨らます。
|
60 |
+
"""
|
61 |
+
center_point = np.mean(points, axis=0)
|
62 |
+
if index < 0 or index > len(points):
|
63 |
+
raise ValueError(f"index must be range(0,{len(points)} but value = {index})")
|
64 |
+
point1 =points[index]
|
65 |
+
print(f"center = {center_point}")
|
66 |
+
vec_to_center = point1 - center_point
|
67 |
+
return vec_to_center*ratio + center_point
|
68 |
+
|
69 |
+
|
70 |
+
def bulge_polygon(points, bulge_factor=0.1,isClosed=True):
|
71 |
+
"""
|
72 |
+
ポリゴンの辺の中間に点を追加し、外側に膨らませる
|
73 |
+
ndarrayを返すので注意
|
74 |
+
"""
|
75 |
+
# 入力 points を NumPy 配列に変換
|
76 |
+
points = np.array(points)
|
77 |
+
|
78 |
+
# ポリゴン全体の重心を求める
|
79 |
+
center_point = np.mean(points, axis=0)
|
80 |
+
#print(f"center = {center_point}")
|
81 |
+
new_points = []
|
82 |
+
num_points = len(points)
|
83 |
+
for i in range(num_points):
|
84 |
+
if i == num_points -1 and not isClosed:
|
85 |
+
break
|
86 |
+
p1 = points[i]
|
87 |
+
#print(f"p{i} = {p1}")
|
88 |
+
# 重心から頂点へのベクトル
|
89 |
+
#vec_to_center = p1 - center_point
|
90 |
+
|
91 |
+
# 辺のベクトルを求める
|
92 |
+
mid_diff = points[(i + 1) % num_points] - p1
|
93 |
+
mid = p1+(mid_diff/2)
|
94 |
+
|
95 |
+
#print(f"mid = {mid}")
|
96 |
+
out_vec = mid - center_point
|
97 |
+
|
98 |
+
# 重心からのベクトルに bulge_vec を加算
|
99 |
+
new_point = mid + out_vec * bulge_factor
|
100 |
+
|
101 |
+
new_points.append(p1)
|
102 |
+
new_points.append(new_point.astype(np.int32))
|
103 |
+
|
104 |
+
return np.array(new_points)
|
105 |
+
|
106 |
+
|
107 |
+
# image.shape rgb are (1024,1024,3) use 1024,1024 as 2-dimensional
|
108 |
+
def create_2d_image(shape):
|
109 |
+
grayscale_image = np.zeros(shape[:2], dtype=np.uint8)
|
110 |
+
return grayscale_image
|
glibvision/pil_utils.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from PIL import Image,ImageDraw
|
2 |
+
from .draw_utils import box_to_xy,to_int_points,box_to_point
|
3 |
+
#ver-2024-11-18
|
4 |
+
def create_color_image(width, height, color=(255,255,255)):
|
5 |
+
if color == None:
|
6 |
+
color = (0,0,0)
|
7 |
+
|
8 |
+
if len(color )== 3:
|
9 |
+
mode ="RGB"
|
10 |
+
elif len(color )== 4:
|
11 |
+
mode ="RGBA"
|
12 |
+
|
13 |
+
img = Image.new(mode, (width, height), color)
|
14 |
+
return img
|
15 |
+
|
16 |
+
def fill_points(image,points,color=(255,255,255)):
|
17 |
+
return draw_points(image,points,fill=color)
|
18 |
+
|
19 |
+
def draw_points(image,points,outline=None,fill=None,width=1):
|
20 |
+
draw = ImageDraw.Draw(image)
|
21 |
+
int_points = [(int(x), int(y)) for x, y in points]
|
22 |
+
draw.polygon(int_points, outline=outline,fill=fill,width=width)
|
23 |
+
return image
|
24 |
+
|
25 |
+
def draw_box(image,box,outline=None,fill=None):
|
26 |
+
points = to_int_points(box_to_point(box))
|
27 |
+
return draw_points(image,points,outline,fill)
|
28 |
+
|
29 |
+
def from_numpy(numpy_array):
|
30 |
+
return Image.fromarray(numpy_array)
|
gradio_utils.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
import io
|
6 |
+
import hashlib
|
7 |
+
|
8 |
+
def clear_old_files(dir="files",passed_time=60*60):
|
9 |
+
try:
|
10 |
+
files = os.listdir(dir)
|
11 |
+
current_time = time.time()
|
12 |
+
for file in files:
|
13 |
+
file_path = os.path.join(dir,file)
|
14 |
+
|
15 |
+
ctime = os.stat(file_path).st_ctime
|
16 |
+
diff = current_time - ctime
|
17 |
+
#print(f"ctime={ctime},current_time={current_time},passed_time={passed_time},diff={diff}")
|
18 |
+
if diff > passed_time:
|
19 |
+
os.remove(file_path)
|
20 |
+
except:
|
21 |
+
print("maybe still gallery using error")
|
22 |
+
|
23 |
+
def get_buffer_id(buffer):
|
24 |
+
hash_object = hashlib.sha256(buffer.getvalue())
|
25 |
+
hex_dig = hash_object.hexdigest()
|
26 |
+
unique_id = hex_dig[:32]
|
27 |
+
return unique_id
|
28 |
+
|
29 |
+
def get_image_id(image):
|
30 |
+
buffer = io.BytesIO()
|
31 |
+
image.save(buffer, format='PNG')
|
32 |
+
return get_buffer_id(buffer)
|
33 |
+
|
34 |
+
def save_image(image,extension="jpg",dir_name="files"):
|
35 |
+
id = get_image_id(image)
|
36 |
+
os.makedirs(dir_name,exist_ok=True)
|
37 |
+
file_path = f"{dir_name}/{id}.{extension}"
|
38 |
+
|
39 |
+
image.save(file_path)
|
40 |
+
return file_path
|
41 |
+
|
42 |
+
def save_buffer(buffer,extension="webp",dir_name="files"):
|
43 |
+
id = get_buffer_id(buffer)
|
44 |
+
os.makedirs(dir_name,exist_ok=True)
|
45 |
+
file_path = f"{dir_name}/{id}.{extension}"
|
46 |
+
|
47 |
+
with open(file_path,"wb") as f:
|
48 |
+
f.write(buffer.getvalue())
|
49 |
+
return file_path
|
50 |
+
|
51 |
+
def write_file(file_path,text):
|
52 |
+
with open(file_path, 'w', encoding='utf-8') as f:
|
53 |
+
f.write(text)
|
54 |
+
|
55 |
+
def read_file(file_path):
|
56 |
+
"""read the text of target file
|
57 |
+
"""
|
58 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
59 |
+
content = f.read()
|
60 |
+
return content
|
mp_box.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import mediapipe as mp
|
2 |
+
from mediapipe.tasks import python
|
3 |
+
from mediapipe.tasks.python import vision
|
4 |
+
from mediapipe.framework.formats import landmark_pb2
|
5 |
+
from mediapipe import solutions
|
6 |
+
import numpy as np
|
7 |
+
|
8 |
+
# for X,Y,W,H to x1,y1,x2,y2(Left-top,right-bottom style)
|
9 |
+
def xywh_to_xyxy(box):
|
10 |
+
return [box[0],box[1],box[0]+box[2],box[1]+box[3]]
|
11 |
+
|
12 |
+
def convert_to_box(face_landmarks_list,indices,w=1024,h=1024):
|
13 |
+
x1=w
|
14 |
+
y1=h
|
15 |
+
x2=0
|
16 |
+
y2=0
|
17 |
+
for index in indices:
|
18 |
+
x=min(w,max(0,(face_landmarks_list[0][index].x*w)))
|
19 |
+
y=min(h,max(0,(face_landmarks_list[0][index].y*h)))
|
20 |
+
if x<x1:
|
21 |
+
x1=x
|
22 |
+
|
23 |
+
if y<y1:
|
24 |
+
y1=y
|
25 |
+
|
26 |
+
if x>x2:
|
27 |
+
x2=x
|
28 |
+
if y>y2:
|
29 |
+
y2=y
|
30 |
+
|
31 |
+
|
32 |
+
return [int(x1),int(y1),int(x2-x1),int(y2-y1)]
|
33 |
+
|
34 |
+
|
35 |
+
def box_to_square(bbox):
|
36 |
+
box=list(bbox)
|
37 |
+
if box[2]>box[3]:
|
38 |
+
diff = box[2]-box[3]
|
39 |
+
box[3]+=diff
|
40 |
+
box[1]-=diff/2
|
41 |
+
elif box[3]>box[2]:
|
42 |
+
diff = box[3]-box[2]
|
43 |
+
box[2]+=diff
|
44 |
+
box[0]-=diff/2
|
45 |
+
return box
|
46 |
+
|
47 |
+
|
48 |
+
def face_landmark_result_to_box(face_landmarker_result,width=1024,height=1024):
|
49 |
+
face_landmarks_list = face_landmarker_result.face_landmarks
|
50 |
+
|
51 |
+
|
52 |
+
full_indices = list(range(456))
|
53 |
+
|
54 |
+
MIDDLE_FOREHEAD = 151
|
55 |
+
BOTTOM_CHIN_EX = 152
|
56 |
+
BOTTOM_CHIN = 175
|
57 |
+
CHIN_TO_MIDDLE_FOREHEAD = [200,14,1,6,18,9]
|
58 |
+
MOUTH_BOTTOM = [202,200,422]
|
59 |
+
EYEBROW_CHEEK_LEFT_RIGHT = [46,226,50,1,280,446,276]
|
60 |
+
|
61 |
+
LEFT_HEAD_OUTER_EX = 251 #on side face almost same as full
|
62 |
+
LEFT_HEAD_OUTER = 301
|
63 |
+
LEFT_EYE_OUTER_EX = 356
|
64 |
+
LEFT_EYE_OUTER = 264
|
65 |
+
LEFT_MOUTH_OUTER_EX = 288
|
66 |
+
LEFT_MOUTH_OUTER = 288
|
67 |
+
LEFT_CHIN_OUTER = 435
|
68 |
+
RIGHT_HEAD_OUTER_EX = 21
|
69 |
+
RIGHT_HEAD_OUTER = 71
|
70 |
+
RIGHT_EYE_OUTER_EX = 127
|
71 |
+
RIGHT_EYE_OUTER = 34
|
72 |
+
RIGHT_MOUTH_OUTER_EX = 58
|
73 |
+
RIGHT_MOUTH_OUTER = 215
|
74 |
+
RIGHT_CHIN_OUTER = 150
|
75 |
+
|
76 |
+
# TODO naming line
|
77 |
+
min_indices=CHIN_TO_MIDDLE_FOREHEAD+EYEBROW_CHEEK_LEFT_RIGHT+MOUTH_BOTTOM
|
78 |
+
|
79 |
+
chin_to_brow_indices = [LEFT_CHIN_OUTER,LEFT_MOUTH_OUTER,LEFT_EYE_OUTER,LEFT_HEAD_OUTER,MIDDLE_FOREHEAD,RIGHT_HEAD_OUTER,RIGHT_EYE_OUTER,RIGHT_MOUTH_OUTER,RIGHT_CHIN_OUTER,BOTTOM_CHIN]+min_indices
|
80 |
+
|
81 |
+
box1 = convert_to_box(face_landmarks_list,min_indices,width,height)
|
82 |
+
box2 = convert_to_box(face_landmarks_list,chin_to_brow_indices,width,height)
|
83 |
+
box3 = convert_to_box(face_landmarks_list,full_indices,width,height)
|
84 |
+
#print(box)
|
85 |
+
|
86 |
+
return [box1,box2,box3,box_to_square(box1),box_to_square(box2),box_to_square(box3)]
|
87 |
+
|
88 |
+
|
89 |
+
def draw_landmarks_on_image(detection_result,rgb_image):
|
90 |
+
face_landmarks_list = detection_result.face_landmarks
|
91 |
+
annotated_image = np.copy(rgb_image)
|
92 |
+
|
93 |
+
# Loop through the detected faces to visualize.
|
94 |
+
for idx in range(len(face_landmarks_list)):
|
95 |
+
face_landmarks = face_landmarks_list[idx]
|
96 |
+
|
97 |
+
# Draw the face landmarks.
|
98 |
+
face_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
|
99 |
+
face_landmarks_proto.landmark.extend([
|
100 |
+
landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in face_landmarks
|
101 |
+
])
|
102 |
+
|
103 |
+
solutions.drawing_utils.draw_landmarks(
|
104 |
+
image=annotated_image,
|
105 |
+
landmark_list=face_landmarks_proto,
|
106 |
+
connections=mp.solutions.face_mesh.FACEMESH_TESSELATION,
|
107 |
+
landmark_drawing_spec=None,
|
108 |
+
connection_drawing_spec=mp.solutions.drawing_styles
|
109 |
+
.get_default_face_mesh_tesselation_style())
|
110 |
+
|
111 |
+
return annotated_image
|
112 |
+
|
113 |
+
def mediapipe_to_box(image_data,model_path="face_landmarker.task"):
|
114 |
+
BaseOptions = mp.tasks.BaseOptions
|
115 |
+
FaceLandmarker = mp.tasks.vision.FaceLandmarker
|
116 |
+
FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
|
117 |
+
VisionRunningMode = mp.tasks.vision.RunningMode
|
118 |
+
|
119 |
+
options = FaceLandmarkerOptions(
|
120 |
+
base_options=BaseOptions(model_asset_path=model_path),
|
121 |
+
running_mode=VisionRunningMode.IMAGE
|
122 |
+
,min_face_detection_confidence=0, min_face_presence_confidence=0
|
123 |
+
)
|
124 |
+
|
125 |
+
|
126 |
+
with FaceLandmarker.create_from_options(options) as landmarker:
|
127 |
+
if isinstance(image_data,str):
|
128 |
+
mp_image = mp.Image.create_from_file(image_data)
|
129 |
+
else:
|
130 |
+
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(image_data))
|
131 |
+
face_landmarker_result = landmarker.detect(mp_image)
|
132 |
+
boxes = face_landmark_result_to_box(face_landmarker_result,mp_image.width,mp_image.height)
|
133 |
+
return boxes,mp_image,face_landmarker_result
|
mp_constants.py
ADDED
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ver 2024-11-21
|
2 |
+
# contour
|
3 |
+
POINT_LEFT_HEAD_OUTER_EX = 251 #on side face almost same as full
|
4 |
+
POINT_LEFT_HEAD_OUTER = 301
|
5 |
+
POINT_LEFT_EYE_OUTER_EX = 356
|
6 |
+
POINT_LEFT_EYE_OUTER = 264
|
7 |
+
POINT_LEFT_MOUTH_OUTER_EX = 288
|
8 |
+
POINT_LEFT_MOUTH_OUTER = 435
|
9 |
+
POINT_LEFT_CHIN_OUTER = 379
|
10 |
+
POINT_RIGHT_HEAD_OUTER_EX = 21
|
11 |
+
POINT_RIGHT_HEAD_OUTER = 71
|
12 |
+
POINT_RIGHT_EYE_OUTER_EX = 127
|
13 |
+
POINT_RIGHT_EYE_OUTER = 34
|
14 |
+
POINT_RIGHT_MOUTH_OUTER_EX = 58
|
15 |
+
POINT_RIGHT_MOUTH_OUTER = 215
|
16 |
+
POINT_RIGHT_CHIN_OUTER = 150
|
17 |
+
POINT_CHIN_BOTTOM = 152
|
18 |
+
|
19 |
+
POINT_FOREHEAD_TOP = 10
|
20 |
+
|
21 |
+
POINT_UPPER_LIP_CENTER_BOTTOM=13
|
22 |
+
POINT_LOWER_LIP_CENTER_TOP=14
|
23 |
+
POINT_LOWER_LIP_CENTER_BOTTOM=17
|
24 |
+
POINT_NOSE_CENTER_MIDDLE=5
|
25 |
+
|
26 |
+
LINE_RIGHT_CONTOUR_OUTER_EYE_TO_CHIN =[127,234,93,132,58,172,136,150,149,176,148,152]
|
27 |
+
LINE_RIGHT_CONTOUR_EYE_TO_CHIN = [34,227,137,177,215,138,135,169,170,140,171,175]
|
28 |
+
LINE_RIGHT_CONTOUR_INNER_EYE_TO_CHIN =[143,116,123,147,213,192,214,210,211,32,208,199]
|
29 |
+
|
30 |
+
|
31 |
+
LINE_RIGHT_CONTOUR_0 = [152,175,199]
|
32 |
+
LINE_RIGHT_CONTOUR_1 = [148,171,208]
|
33 |
+
LINE_RIGHT_CONTOUR_2 = [176,140,32]
|
34 |
+
LINE_RIGHT_CONTOUR_3 = [149,170,211]
|
35 |
+
LINE_RIGHT_CONTOUR_4 = [150,169,210]
|
36 |
+
LINE_RIGHT_CONTOUR_5 = [136,135,214]
|
37 |
+
LINE_RIGHT_CONTOUR_6 = [172,138,192]
|
38 |
+
LINE_RIGHT_CONTOUR_7 = [58,215,213]
|
39 |
+
LINE_RIGHT_CONTOUR_8 = [132,177,147]
|
40 |
+
LINE_RIGHT_CONTOUR_9 = [93,137,123]
|
41 |
+
LINE_RIGHT_CONTOUR_10 = [234,227,116]
|
42 |
+
LINE_RIGHT_CONTOUR_11 = [127,34,143]
|
43 |
+
|
44 |
+
LANDMARK_68_CONTOUR_1 = LINE_RIGHT_CONTOUR_11
|
45 |
+
LANDMARK_68_CONTOUR_2_PART1 = LINE_RIGHT_CONTOUR_10
|
46 |
+
LANDMARK_68_CONTOUR_2_PART2 = LINE_RIGHT_CONTOUR_9
|
47 |
+
LANDMARK_68_CONTOUR_3 = LINE_RIGHT_CONTOUR_8
|
48 |
+
LANDMARK_68_CONTOUR_4 = LINE_RIGHT_CONTOUR_7
|
49 |
+
LANDMARK_68_CONTOUR_5 = LINE_RIGHT_CONTOUR_6
|
50 |
+
LANDMARK_68_CONTOUR_6_PART1 = LINE_RIGHT_CONTOUR_5
|
51 |
+
LANDMARK_68_CONTOUR_6_PART2 = LINE_RIGHT_CONTOUR_4
|
52 |
+
|
53 |
+
LANDMARK_68_CONTOUR_7 = LINE_RIGHT_CONTOUR_3
|
54 |
+
LANDMARK_68_CONTOUR_8_PART1 = LINE_RIGHT_CONTOUR_2
|
55 |
+
LANDMARK_68_CONTOUR_8_PART2 = LINE_RIGHT_CONTOUR_1
|
56 |
+
LANDMARK_68_CONTOUR_9 = LINE_RIGHT_CONTOUR_0
|
57 |
+
|
58 |
+
|
59 |
+
LINE_LEFT_CONTOUR_1 = [377,396,428]
|
60 |
+
LINE_LEFT_CONTOUR_2 = [400,369,262]
|
61 |
+
LINE_LEFT_CONTOUR_3 = [378,395,431]
|
62 |
+
LINE_LEFT_CONTOUR_4 = [379,394,430]
|
63 |
+
LINE_LEFT_CONTOUR_5 = [365,364,434]
|
64 |
+
LINE_LEFT_CONTOUR_6 = [397,367,416]
|
65 |
+
LINE_LEFT_CONTOUR_7 = [288,435,433]
|
66 |
+
LINE_LEFT_CONTOUR_8 = [361,401,376]
|
67 |
+
LINE_LEFT_CONTOUR_9 = [323,366,352]
|
68 |
+
LINE_LEFT_CONTOUR_10 = [454,447,345]
|
69 |
+
LINE_LEFT_CONTOUR_11 = [356,264,372]
|
70 |
+
LINE_LEFT_CONTOUR_12 = [389,368,383]
|
71 |
+
|
72 |
+
LANDMARK_68_CONTOUR_10 = LINE_LEFT_CONTOUR_1
|
73 |
+
LANDMARK_68_CONTOUR_11_PART1 = LINE_LEFT_CONTOUR_2
|
74 |
+
LANDMARK_68_CONTOUR_11_PART2 = LINE_LEFT_CONTOUR_3
|
75 |
+
LANDMARK_68_CONTOUR_12 = LINE_LEFT_CONTOUR_4
|
76 |
+
LANDMARK_68_CONTOUR_13 = LINE_LEFT_CONTOUR_5
|
77 |
+
LANDMARK_68_CONTOUR_14 = LINE_LEFT_CONTOUR_6
|
78 |
+
LANDMARK_68_CONTOUR_15_PART1 = LINE_LEFT_CONTOUR_7
|
79 |
+
LANDMARK_68_CONTOUR_15_PART2 = LINE_LEFT_CONTOUR_8
|
80 |
+
|
81 |
+
LANDMARK_68_CONTOUR_16 = LINE_LEFT_CONTOUR_9
|
82 |
+
LANDMARK_68_CONTOUR_17_PART1 = LINE_LEFT_CONTOUR_10
|
83 |
+
LANDMARK_68_CONTOUR_17_PART2 = LINE_LEFT_CONTOUR_11
|
84 |
+
|
85 |
+
LANDMARK_68_RIGHT_EYEBROW_18 = [70,46] #upper,lower
|
86 |
+
LANDMARK_68_RIGHT_EYEBROW_19 = [63,53]
|
87 |
+
LANDMARK_68_RIGHT_EYEBROW_20 = [105,52]
|
88 |
+
LANDMARK_68_RIGHT_EYEBROW_21 = [66,65]
|
89 |
+
LANDMARK_68_RIGHT_EYEBROW_22 = [107,55]
|
90 |
+
|
91 |
+
LANDMARK_68_LEFT_EYEBROW_23 = [336,285] #upper,lower
|
92 |
+
LANDMARK_68_LEFT_EYEBROW_24 = [296,295]
|
93 |
+
LANDMARK_68_LEFT_EYEBROW_25 = [334,282]
|
94 |
+
LANDMARK_68_LEFT_EYEBROW_26 = [293,283]
|
95 |
+
LANDMARK_68_LEFT_EYEBROW_27 = [300,276]
|
96 |
+
|
97 |
+
POINT_NOSE_0 = 8
|
98 |
+
POINT_NOSE_1 = 168
|
99 |
+
POINT_NOSE_2 = 6
|
100 |
+
POINT_NOSE_3 = 197
|
101 |
+
POINT_NOSE_4 = 195
|
102 |
+
POINT_NOSE_5 = 5
|
103 |
+
POINT_NOSE_6 = 4
|
104 |
+
POINT_NOSE_7 = 19
|
105 |
+
POINT_NOSE_8 = 94
|
106 |
+
POINT_NOSE_9 = 2
|
107 |
+
|
108 |
+
#side
|
109 |
+
POINT_NOSE_10 = 98
|
110 |
+
POINT_NOSE_11 = 97
|
111 |
+
POINT_NOSE_12 = 326
|
112 |
+
POINT_NOSE_13 = 327
|
113 |
+
|
114 |
+
LANDMARK_68_VERTICAL_NOSE_28 =[8,168]
|
115 |
+
LANDMARK_68_VERTICAL_NOSE_29 = [6]
|
116 |
+
LANDMARK_68_VERTICAL_NOSE_30=[197,195]
|
117 |
+
LANDMARK_68_VERTICAL_NOSE_31=[5,4]
|
118 |
+
|
119 |
+
LANDMARK_68_HORIZONTAL_NOSE_32 =[POINT_NOSE_10]
|
120 |
+
LANDMARK_68_HORIZONTAL_NOSE_33 = [POINT_NOSE_11]
|
121 |
+
LANDMARK_68_HORIZONTAL_NOSE_34=[POINT_NOSE_9]
|
122 |
+
LANDMARK_68_HORIZONTAL_NOSE_35=[POINT_NOSE_12]
|
123 |
+
LANDMARK_68_HORIZONTAL_NOSE_36=[POINT_NOSE_13]
|
124 |
+
|
125 |
+
|
126 |
+
LINE_VERTICAL_NOSE = [POINT_NOSE_0,POINT_NOSE_1,POINT_NOSE_2,POINT_NOSE_3,POINT_NOSE_4,POINT_NOSE_5,POINT_NOSE_6,POINT_NOSE_7,POINT_NOSE_8,POINT_NOSE_9]
|
127 |
+
LINE_HORIZONTAL_NOSE =[POINT_NOSE_10,POINT_NOSE_11,POINT_NOSE_9,POINT_NOSE_12,POINT_NOSE_13]
|
128 |
+
|
129 |
+
### EYES
|
130 |
+
POINT_RIGHT_UPPER_INNER_EYE_1 = 33
|
131 |
+
POINT_RIGHT_UPPER_INNER_EYE_2 = 246
|
132 |
+
POINT_RIGHT_UPPER_INNER_EYE_3 = 161
|
133 |
+
POINT_RIGHT_UPPER_INNER_EYE_4 = 160
|
134 |
+
POINT_RIGHT_UPPER_INNER_EYE_5 = 159
|
135 |
+
POINT_RIGHT_UPPER_INNER_EYE_6 = 158
|
136 |
+
POINT_RIGHT_UPPER_INNER_EYE_7 = 157
|
137 |
+
POINT_RIGHT_UPPER_INNER_EYE_8 = 173
|
138 |
+
POINT_RIGHT_UPPER_INNER_EYE_9 = 133
|
139 |
+
|
140 |
+
LINE_RIGHT_UPPER_INNER_EYE=[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_INNER_EYE_7,POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9]
|
141 |
+
|
142 |
+
POINT_RIGHT_LOWER_INNER_EYE_1 = 155
|
143 |
+
POINT_RIGHT_LOWER_INNER_EYE_2 = 154
|
144 |
+
POINT_RIGHT_LOWER_INNER_EYE_3 = 153
|
145 |
+
POINT_RIGHT_LOWER_INNER_EYE_4 = 145
|
146 |
+
POINT_RIGHT_LOWER_INNER_EYE_5 = 144
|
147 |
+
POINT_RIGHT_LOWER_INNER_EYE_6 = 163
|
148 |
+
POINT_RIGHT_LOWER_INNER_EYE_7 = 7
|
149 |
+
|
150 |
+
LINE_RIGHT_LOWER_INNER_EYE=[POINT_RIGHT_UPPER_INNER_EYE_9,POINT_RIGHT_LOWER_INNER_EYE_1,POINT_RIGHT_LOWER_INNER_EYE_2,POINT_RIGHT_LOWER_INNER_EYE_3,POINT_RIGHT_LOWER_INNER_EYE_4,POINT_RIGHT_LOWER_INNER_EYE_5,POINT_RIGHT_LOWER_INNER_EYE_6,POINT_RIGHT_LOWER_INNER_EYE_7,POINT_RIGHT_UPPER_INNER_EYE_1]
|
151 |
+
|
152 |
+
|
153 |
+
POINT_RIGHT_UPPER_OUTER_EYE_1 = 130
|
154 |
+
POINT_RIGHT_UPPER_OUTER_EYE_2 = 247
|
155 |
+
POINT_RIGHT_UPPER_OUTER_EYE_3 = 30
|
156 |
+
POINT_RIGHT_UPPER_OUTER_EYE_4 = 29
|
157 |
+
POINT_RIGHT_UPPER_OUTER_EYE_5 = 27
|
158 |
+
POINT_RIGHT_UPPER_OUTER_EYE_6 = 28
|
159 |
+
POINT_RIGHT_UPPER_OUTER_EYE_7 = 56
|
160 |
+
POINT_RIGHT_UPPER_OUTER_EYE_8 = 190
|
161 |
+
POINT_RIGHT_UPPER_OUTER_EYE_9 = 243
|
162 |
+
|
163 |
+
LINE_RIGHT_UPPER_OUTER_EYE=[POINT_RIGHT_UPPER_OUTER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_7,POINT_RIGHT_UPPER_OUTER_EYE_8,POINT_RIGHT_UPPER_OUTER_EYE_9]
|
164 |
+
|
165 |
+
LINE_RIGHT_UPPER_MIXED_EYE =[#firs eye1 and eye2 is intesionaly for moveup
|
166 |
+
[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2], [POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_2], [POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_3], [POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_4], [POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_5], [POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_6]
|
167 |
+
,[POINT_RIGHT_UPPER_INNER_EYE_8],[POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9] #I'm not sure need this one or not POINT_RIGHT_LOWER_INNER_EYE_1
|
168 |
+
]
|
169 |
+
|
170 |
+
LINE_RIGHT_UPPER_MIXED_EYE2 =[#firs eye1 and eye2 is intesionaly for moveup
|
171 |
+
[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2],
|
172 |
+
[POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_INNER_EYE_2,POINT_RIGHT_UPPER_OUTER_EYE_2],
|
173 |
+
[POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_INNER_EYE_3,POINT_RIGHT_UPPER_OUTER_EYE_3],
|
174 |
+
[POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_INNER_EYE_4,POINT_RIGHT_UPPER_OUTER_EYE_4],
|
175 |
+
[POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_INNER_EYE_5,POINT_RIGHT_UPPER_OUTER_EYE_5],
|
176 |
+
[POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_INNER_EYE_6,POINT_RIGHT_UPPER_OUTER_EYE_6]
|
177 |
+
,[POINT_RIGHT_UPPER_INNER_EYE_8],
|
178 |
+
[POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9] #I'm not sure need this one or not POINT_RIGHT_LOWER_INNER_EYE_1
|
179 |
+
]
|
180 |
+
|
181 |
+
# LEFT AND RIGHT IS DIFF
|
182 |
+
LINE_RIGHT_EYES_WHITE = LINE_RIGHT_UPPER_INNER_EYE[1:-1] + LINE_RIGHT_LOWER_INNER_EYE[2:-1]
|
183 |
+
|
184 |
+
|
185 |
+
POINT_RIGHT_LOWER_OUTER_EYE_1 = 112
|
186 |
+
POINT_RIGHT_LOWER_OUTER_EYE_2 = 26
|
187 |
+
POINT_RIGHT_LOWER_OUTER_EYE_3 = 22
|
188 |
+
POINT_RIGHT_LOWER_OUTER_EYE_4 = 23
|
189 |
+
POINT_RIGHT_LOWER_OUTER_EYE_5 = 24
|
190 |
+
POINT_RIGHT_LOWER_OUTER_EYE_6 = 110
|
191 |
+
POINT_RIGHT_LOWER_OUTER_EYE_7 = 25
|
192 |
+
|
193 |
+
LINE_RIGHT_LOWER_OUTER_EYE=[POINT_RIGHT_UPPER_OUTER_EYE_9,POINT_RIGHT_LOWER_OUTER_EYE_1,POINT_RIGHT_LOWER_OUTER_EYE_2,POINT_RIGHT_LOWER_OUTER_EYE_3,POINT_RIGHT_LOWER_OUTER_EYE_4,POINT_RIGHT_LOWER_OUTER_EYE_5,POINT_RIGHT_LOWER_OUTER_EYE_6,POINT_RIGHT_LOWER_OUTER_EYE_7,POINT_RIGHT_UPPER_OUTER_EYE_1]
|
194 |
+
|
195 |
+
LINE_RIGHT_LOWER_MIXED_EYE =[
|
196 |
+
[POINT_RIGHT_UPPER_INNER_EYE_8,POINT_RIGHT_UPPER_INNER_EYE_9,POINT_RIGHT_LOWER_INNER_EYE_1]
|
197 |
+
,[POINT_RIGHT_LOWER_INNER_EYE_2]
|
198 |
+
,POINT_RIGHT_LOWER_INNER_EYE_3,POINT_RIGHT_LOWER_INNER_EYE_4,POINT_RIGHT_LOWER_INNER_EYE_5,POINT_RIGHT_LOWER_INNER_EYE_6,POINT_RIGHT_LOWER_INNER_EYE_7
|
199 |
+
,[POINT_RIGHT_UPPER_INNER_EYE_1,POINT_RIGHT_UPPER_OUTER_EYE_2] #combine 1 and 2 for move up
|
200 |
+
]
|
201 |
+
|
202 |
+
|
203 |
+
POINT_LEFT_UPPER_INNER_EYE_1 = 362
|
204 |
+
POINT_LEFT_UPPER_INNER_EYE_2 = 398
|
205 |
+
POINT_LEFT_UPPER_INNER_EYE_3 = 384
|
206 |
+
POINT_LEFT_UPPER_INNER_EYE_4 = 385
|
207 |
+
POINT_LEFT_UPPER_INNER_EYE_5 = 386
|
208 |
+
POINT_LEFT_UPPER_INNER_EYE_6 = 387
|
209 |
+
POINT_LEFT_UPPER_INNER_EYE_7 = 388
|
210 |
+
POINT_LEFT_UPPER_INNER_EYE_8 = 466
|
211 |
+
POINT_LEFT_UPPER_INNER_EYE_9 = 263
|
212 |
+
|
213 |
+
LINE_LEFT_UPPER_INNER_EYE=[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
214 |
+
#TODO what is this?
|
215 |
+
#LINE_LEFT_UPPER_INNER_EYE=[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
216 |
+
|
217 |
+
|
218 |
+
|
219 |
+
POINT_LEFT_LOWER_INNER_EYE_1 = 249
|
220 |
+
POINT_LEFT_LOWER_INNER_EYE_2 = 390
|
221 |
+
POINT_LEFT_LOWER_INNER_EYE_3 = 373
|
222 |
+
POINT_LEFT_LOWER_INNER_EYE_4 = 374
|
223 |
+
POINT_LEFT_LOWER_INNER_EYE_5 = 380
|
224 |
+
POINT_LEFT_LOWER_INNER_EYE_6 = 381
|
225 |
+
POINT_LEFT_LOWER_INNER_EYE_7 = 382
|
226 |
+
|
227 |
+
|
228 |
+
LINE_LEFT_LOWER_INNER_EYE=[POINT_LEFT_UPPER_INNER_EYE_9,POINT_LEFT_LOWER_INNER_EYE_1,POINT_LEFT_LOWER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_3,POINT_LEFT_LOWER_INNER_EYE_4,POINT_LEFT_LOWER_INNER_EYE_5,POINT_LEFT_LOWER_INNER_EYE_6,POINT_LEFT_LOWER_INNER_EYE_7,POINT_LEFT_UPPER_INNER_EYE_1]
|
229 |
+
|
230 |
+
#outer
|
231 |
+
|
232 |
+
POINT_LEFT_UPPER_OUTER_EYE_1 = 463
|
233 |
+
POINT_LEFT_UPPER_OUTER_EYE_2 = 414
|
234 |
+
POINT_LEFT_UPPER_OUTER_EYE_3 = 286
|
235 |
+
POINT_LEFT_UPPER_OUTER_EYE_4 = 258
|
236 |
+
POINT_LEFT_UPPER_OUTER_EYE_5 = 257
|
237 |
+
POINT_LEFT_UPPER_OUTER_EYE_6 = 259
|
238 |
+
POINT_LEFT_UPPER_OUTER_EYE_7 = 260
|
239 |
+
POINT_LEFT_UPPER_OUTER_EYE_8 = 467
|
240 |
+
POINT_LEFT_UPPER_OUTER_EYE_9 = 359
|
241 |
+
|
242 |
+
LINE_LEFT_UPPER_OUTER_EYE=[POINT_LEFT_UPPER_OUTER_EYE_1,POINT_LEFT_UPPER_OUTER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_7,POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_OUTER_EYE_9]
|
243 |
+
|
244 |
+
|
245 |
+
POINT_LEFT_LOWER_OUTER_EYE_1 = 255
|
246 |
+
POINT_LEFT_LOWER_OUTER_EYE_2 = 339
|
247 |
+
POINT_LEFT_LOWER_OUTER_EYE_3 = 254
|
248 |
+
POINT_LEFT_LOWER_OUTER_EYE_4 = 253
|
249 |
+
POINT_LEFT_LOWER_OUTER_EYE_5 = 252
|
250 |
+
POINT_LEFT_LOWER_OUTER_EYE_6 = 256
|
251 |
+
POINT_LEFT_LOWER_OUTER_EYE_7 = 341
|
252 |
+
|
253 |
+
LINE_LEFT_LOWER_OUTER_EYE=[POINT_LEFT_UPPER_OUTER_EYE_9,POINT_LEFT_LOWER_OUTER_EYE_1,POINT_LEFT_LOWER_OUTER_EYE_2,POINT_LEFT_LOWER_OUTER_EYE_3,POINT_LEFT_LOWER_OUTER_EYE_4,POINT_LEFT_LOWER_OUTER_EYE_5,POINT_LEFT_LOWER_OUTER_EYE_6,POINT_LEFT_LOWER_OUTER_EYE_7,POINT_LEFT_UPPER_OUTER_EYE_1]
|
254 |
+
|
255 |
+
LINE_LEFT_UPPER_MIXED_EYE =[#firs eye1 and eye2 is intesionaly for moveup
|
256 |
+
[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7],
|
257 |
+
[POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_2], [POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_3], [POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_4], [POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_5], [POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_6]
|
258 |
+
,[POINT_LEFT_UPPER_INNER_EYE_8],[POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
259 |
+
]
|
260 |
+
|
261 |
+
LINE_LEFT_UPPER_MIXED_EYE2 =[#firs eye1 and eye2 is intesionaly for moveup
|
262 |
+
[POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7],
|
263 |
+
[POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_UPPER_OUTER_EYE_2],
|
264 |
+
[POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_INNER_EYE_3,POINT_LEFT_UPPER_OUTER_EYE_3],
|
265 |
+
[POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_INNER_EYE_4,POINT_LEFT_UPPER_OUTER_EYE_4],
|
266 |
+
[POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_INNER_EYE_5,POINT_LEFT_UPPER_OUTER_EYE_5],
|
267 |
+
[POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_INNER_EYE_6,POINT_LEFT_UPPER_OUTER_EYE_6]
|
268 |
+
,[POINT_LEFT_UPPER_INNER_EYE_8],
|
269 |
+
[POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
270 |
+
]
|
271 |
+
|
272 |
+
LINE_LEFT_LOWER_MIXED_EYE =[
|
273 |
+
[POINT_LEFT_UPPER_OUTER_EYE_8,POINT_LEFT_UPPER_INNER_EYE_9]
|
274 |
+
,[POINT_LEFT_LOWER_INNER_EYE_2]
|
275 |
+
,POINT_LEFT_LOWER_INNER_EYE_3,POINT_LEFT_LOWER_INNER_EYE_4,POINT_LEFT_LOWER_INNER_EYE_5,POINT_LEFT_LOWER_INNER_EYE_6,POINT_LEFT_LOWER_INNER_EYE_7
|
276 |
+
, [POINT_LEFT_UPPER_INNER_EYE_1,POINT_LEFT_UPPER_INNER_EYE_2,POINT_LEFT_LOWER_INNER_EYE_7] #combine 1 and 2 for move up
|
277 |
+
]
|
278 |
+
|
279 |
+
LINE_LEFT_EYES_WHITE = LINE_LEFT_UPPER_INNER_EYE[1:-1] + LINE_LEFT_LOWER_INNER_EYE[1:-2]
|
280 |
+
|
281 |
+
#LIP
|
282 |
+
LINE_RIGHT_UPPER_OUTER_LIP=[
|
283 |
+
61,185,40,39,37,0
|
284 |
+
]
|
285 |
+
LINE_LEFT_UPPER_OUTER_LIP=[
|
286 |
+
0,267,269,270,409,291
|
287 |
+
]
|
288 |
+
|
289 |
+
|
290 |
+
LINE_LOWER_OUTER_LIP=[291,#upper
|
291 |
+
375,321,405,314,17,84,181,91,146
|
292 |
+
,61 #upper
|
293 |
+
]
|
294 |
+
|
295 |
+
LINE_UPPER_INNER_LIP=[
|
296 |
+
61,185,40,39,37,0,267,269,270,409,291
|
297 |
+
]
|
298 |
+
|
299 |
+
LINE_LOWER_INNER_LIP=[291,#upper
|
300 |
+
375,321,405,314,17,84,181,91,146
|
301 |
+
,61 #upper
|
302 |
+
]
|
303 |
+
|
304 |
+
LANDMARK_68_UPPER_OUTER_LIP_49 =[61]
|
305 |
+
LANDMARK_68_UPPER_OUTER_LIP_50 =[40,39]
|
306 |
+
LANDMARK_68_UPPER_OUTER_LIP_51 =[37]
|
307 |
+
LANDMARK_68_UPPER_OUTER_LIP_52 =[0]
|
308 |
+
LANDMARK_68_UPPER_OUTER_LIP_53 =[267]
|
309 |
+
LANDMARK_68_UPPER_OUTER_LIP_54 =[270,269]
|
310 |
+
LANDMARK_68_UPPER_OUTER_LIP_55 =[291]
|
311 |
+
|
312 |
+
LANDMARK_68_LOWER_OUTER_LIP_56 =[375,321]
|
313 |
+
LANDMARK_68_LOWER_OUTER_LIP_57 =[405,314]
|
314 |
+
LANDMARK_68_LOWER_OUTER_LIP_58 =[17]
|
315 |
+
LANDMARK_68_LOWER_OUTER_LIP_59 =[84,181]
|
316 |
+
LANDMARK_68_LOWER_OUTER_LIP_60 =[146,91]
|
317 |
+
|
318 |
+
LANDMARK_68_UPPER_INNER_LIP_61 =[78]
|
319 |
+
LANDMARK_68_UPPER_INNER_LIP_62 =[81]
|
320 |
+
LANDMARK_68_UPPER_INNER_LIP_63 =[13]
|
321 |
+
LANDMARK_68_UPPER_INNER_LIP_64 =[311]
|
322 |
+
LANDMARK_68_UPPER_INNER_LIP_65 =[308]
|
323 |
+
|
324 |
+
LANDMARK_68_LOWER_INNER_LIP_66 =[402]
|
325 |
+
LANDMARK_68_LOWER_INNER_LIP_67 =[14]
|
326 |
+
LANDMARK_68_LOWER_INNER_LIP_68 =[178]
|
327 |
+
|
328 |
+
POINT_LEFT_PUPIL = 473
|
329 |
+
LINE_LEFT_IRIS = [474,475,476,477]
|
330 |
+
POINT_RIGHT_PUPIL = 468
|
331 |
+
LINE_RIGHT_IRIS = [469,470,471,472]
|
mp_utils.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import mediapipe as mp
|
4 |
+
from mediapipe.tasks import python
|
5 |
+
from mediapipe.tasks.python import vision
|
6 |
+
from mediapipe.framework.formats import landmark_pb2
|
7 |
+
from mediapipe import solutions
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
|
11 |
+
def calculate_distance(p1, p2):
|
12 |
+
"""
|
13 |
+
|
14 |
+
"""
|
15 |
+
return math.sqrt((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)
|
16 |
+
def to_int_points(points):
|
17 |
+
ints=[]
|
18 |
+
for pt in points:
|
19 |
+
#print(pt)
|
20 |
+
value = [int(pt[0]),int(pt[1])]
|
21 |
+
#print(value)
|
22 |
+
ints.append(value)
|
23 |
+
return ints
|
24 |
+
|
25 |
+
debug = False
|
26 |
+
def divide_line_to_points(points,divided): # return divided + 1
|
27 |
+
total_length = 0
|
28 |
+
line_length_list = []
|
29 |
+
for i in range(len(points)-1):
|
30 |
+
pt_length = calculate_distance(points[i],points[i+1])
|
31 |
+
total_length += pt_length
|
32 |
+
line_length_list.append(pt_length)
|
33 |
+
|
34 |
+
splited_length = total_length/divided
|
35 |
+
|
36 |
+
def get_new_point(index,lerp):
|
37 |
+
pt1 = points[index]
|
38 |
+
pt2 = points[index+1]
|
39 |
+
diff = [pt2[0] - pt1[0], pt2[1]-pt1[1]]
|
40 |
+
new_point = [pt1[0]+diff[0]*lerp,pt1[1]+diff[1]*lerp]
|
41 |
+
if debug:
|
42 |
+
print(f"pt1 ={pt1} pt2 ={pt2} diff={diff} new_point={new_point}")
|
43 |
+
|
44 |
+
return new_point
|
45 |
+
|
46 |
+
if debug:
|
47 |
+
print(f"{total_length} splitted = {splited_length} line-length-list = {len(line_length_list)}")
|
48 |
+
splited_points=[points[0]]
|
49 |
+
for i in range(1,divided):
|
50 |
+
need_length = splited_length*i
|
51 |
+
if debug:
|
52 |
+
print(f"{i} need length = {need_length}")
|
53 |
+
current_length = 0
|
54 |
+
for j in range(len(line_length_list)):
|
55 |
+
line_length = line_length_list[j]
|
56 |
+
current_length+=line_length
|
57 |
+
if current_length>need_length:
|
58 |
+
if debug:
|
59 |
+
print(f"over need length index = {j} current={current_length}")
|
60 |
+
diff = current_length - need_length
|
61 |
+
|
62 |
+
lerp_point = 1.0 - (diff/line_length)
|
63 |
+
if debug:
|
64 |
+
print(f"over = {diff} lerp ={lerp_point}")
|
65 |
+
new_point = get_new_point(j,lerp_point)
|
66 |
+
|
67 |
+
splited_points.append(new_point)
|
68 |
+
break
|
69 |
+
|
70 |
+
splited_points.append(points[-1]) # last one
|
71 |
+
splited_points=to_int_points(splited_points)
|
72 |
+
|
73 |
+
if debug:
|
74 |
+
print(f"sp={len(splited_points)}")
|
75 |
+
return splited_points
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
def expand_bbox(bbox,left=5,top=5,right=5,bottom=5):
|
80 |
+
left_pixel = bbox[2]*(float(left)/100)
|
81 |
+
top_pixel = bbox[3]*(float(top)/100)
|
82 |
+
right_pixel = bbox[2]*(float(right)/100)
|
83 |
+
bottom_pixel = bbox[3]*(float(bottom)/100)
|
84 |
+
new_box = list(bbox)
|
85 |
+
new_box[0] -=left_pixel
|
86 |
+
new_box[1] -=top_pixel
|
87 |
+
new_box[2] +=left_pixel+right_pixel
|
88 |
+
new_box[3] +=top_pixel+bottom_pixel
|
89 |
+
return new_box
|
90 |
+
|
91 |
+
#normalized value index see mp_constants
|
92 |
+
def get_normalized_cordinate(face_landmarks_list,index):
|
93 |
+
x=face_landmarks_list[0][index].x
|
94 |
+
y=face_landmarks_list[0][index].y
|
95 |
+
return x,y
|
96 |
+
|
97 |
+
def get_pixel_cordinate(face_landmarks_list,landmark,width,height):
|
98 |
+
point = get_normalized_cordinate(face_landmarks_list,landmark)
|
99 |
+
return int(point[0]*width),int(point[1]*height)
|
100 |
+
|
101 |
+
def get_pixel_cordinate_list(face_landmarks_list,indices,width,height):
|
102 |
+
cordinates = []
|
103 |
+
for index in indices:
|
104 |
+
cordinates.append(get_pixel_cordinate(face_landmarks_list,index,width,height))
|
105 |
+
return cordinates
|
106 |
+
|
107 |
+
def extract_landmark(image_data,model_path="face_landmarker.task"):
|
108 |
+
BaseOptions = mp.tasks.BaseOptions
|
109 |
+
FaceLandmarker = mp.tasks.vision.FaceLandmarker
|
110 |
+
FaceLandmarkerOptions = mp.tasks.vision.FaceLandmarkerOptions
|
111 |
+
VisionRunningMode = mp.tasks.vision.RunningMode
|
112 |
+
|
113 |
+
options = FaceLandmarkerOptions(
|
114 |
+
base_options=BaseOptions(model_asset_path=model_path),
|
115 |
+
running_mode=VisionRunningMode.IMAGE
|
116 |
+
,min_face_detection_confidence=0, min_face_presence_confidence=0
|
117 |
+
)
|
118 |
+
|
119 |
+
with FaceLandmarker.create_from_options(options) as landmarker:
|
120 |
+
if isinstance(image_data,str):
|
121 |
+
mp_image = mp.Image.create_from_file(image_data)
|
122 |
+
else:
|
123 |
+
mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(image_data))
|
124 |
+
face_landmarker_result = landmarker.detect(mp_image)
|
125 |
+
return mp_image,face_landmarker_result
|
opencvinpaint.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
|
9 |
+
debug = False
|
10 |
+
|
11 |
+
def gray3d_to_2d(grayscale: np.ndarray) -> np.ndarray:
|
12 |
+
channel = grayscale.shape[2] if grayscale.ndim == 3 else 1
|
13 |
+
if channel!=1:
|
14 |
+
text = f"grayscale shape = {grayscale.shape} channel = {channel} ndim = {grayscale.ndim} size = {grayscale.size}"
|
15 |
+
raise ValueError(f"color maybe rgb or rgba {text}")
|
16 |
+
|
17 |
+
if grayscale.ndim == 2:
|
18 |
+
return grayscale
|
19 |
+
return np.squeeze(grayscale)
|
20 |
+
|
21 |
+
def pil_to_cv(image):
|
22 |
+
cv_image = np.array(image, dtype=np.uint8)
|
23 |
+
if cv_image.shape[2] == 3: # カラー
|
24 |
+
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
|
25 |
+
elif cv_image.shape[2] == 4: #
|
26 |
+
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGBA2BGR)
|
27 |
+
return cv_image
|
28 |
+
|
29 |
+
def blend_rgb_images(image1: np.ndarray, image2: np.ndarray, mask: np.ndarray) -> np.ndarray:
|
30 |
+
|
31 |
+
if image1.shape != image2.shape or image1.shape[:2] != mask.shape:
|
32 |
+
raise ValueError("not same shape")
|
33 |
+
|
34 |
+
# 画像を float 型に変換
|
35 |
+
image1 = image1.astype(float)
|
36 |
+
image2 = image2.astype(float)
|
37 |
+
|
38 |
+
# mask to 3 chan 0 -1 value
|
39 |
+
alpha = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(float) / 255.0
|
40 |
+
|
41 |
+
# calcurate blend
|
42 |
+
blended = (1 - alpha) * image1 + alpha * image2
|
43 |
+
|
44 |
+
return blended.astype(np.uint8)
|
45 |
+
|
46 |
+
def process_cvinpaint(image,mask_image,inpaint_radius,blur_radius,edge_expand,inpaint_mode,dilate=0):
|
47 |
+
#print("process cvinpaint")
|
48 |
+
#print(blur_radius,",",edge_expand)
|
49 |
+
cv_image = pil_to_cv(image)
|
50 |
+
|
51 |
+
cv_mask = pil_to_cv(mask_image)
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
cv_gray = cv2.cvtColor(cv_mask,cv2.COLOR_BGR2GRAY)
|
57 |
+
|
58 |
+
|
59 |
+
mask = gray3d_to_2d(cv_gray)
|
60 |
+
if dilate>0:
|
61 |
+
kernel = np.ones((dilate, dilate), np.uint8)
|
62 |
+
mask = cv2.dilate(mask, kernel, iterations=1)
|
63 |
+
|
64 |
+
#cv2.imwrite("_mask.jpg",mask)
|
65 |
+
#cv2.imwrite("_image.jpg",cv_image)
|
66 |
+
mode = cv2.INPAINT_TELEA if inpaint_mode == "Telea" else cv2.INPAINT_NS
|
67 |
+
img_inpainted = cv2.inpaint(cv_image, mask,inpaint_radius, mode)
|
68 |
+
if debug:
|
69 |
+
cv2.imwrite("close_eye_inpaint.jpg",img_inpainted)
|
70 |
+
|
71 |
+
|
72 |
+
## blur
|
73 |
+
if blur_radius > 0:
|
74 |
+
if blur_radius%2==0:
|
75 |
+
blur_radius += 1
|
76 |
+
#print(blur_radius)
|
77 |
+
blurred_image = cv2.GaussianBlur(img_inpainted, (blur_radius, blur_radius), 0) #should be odd
|
78 |
+
if debug:
|
79 |
+
cv2.imwrite("close_eye_inpaint_burred.jpg",blurred_image)
|
80 |
+
else:
|
81 |
+
blurred_image = img_inpainted
|
82 |
+
|
83 |
+
# expand edge and blur
|
84 |
+
kernel = np.ones((edge_expand, edge_expand), np.uint8)
|
85 |
+
extend_mask = cv2.dilate(mask, kernel, iterations=1)
|
86 |
+
|
87 |
+
if edge_expand > 0 and blur_radius > 0:
|
88 |
+
extend_burred_mask = cv2.GaussianBlur(extend_mask, (blur_radius, blur_radius), 0)
|
89 |
+
else:
|
90 |
+
extend_burred_mask = extend_mask
|
91 |
+
|
92 |
+
|
93 |
+
img_inpainted = blend_rgb_images(img_inpainted,blurred_image,extend_burred_mask)
|
94 |
+
|
95 |
+
output_image = img_inpainted.copy()
|
96 |
+
|
97 |
+
if output_image.shape[2] == 3: # カラー
|
98 |
+
output_image = cv2.cvtColor(output_image, cv2.COLOR_BGR2RGB)
|
99 |
+
|
100 |
+
return Image.fromarray(output_image),Image.fromarray(mask)
|
101 |
+
|
102 |
+
if __name__ == "__main__":
|
103 |
+
image = Image.open(sys.argv[1])
|
104 |
+
mask = Image.open(sys.argv[2])
|
105 |
+
output = process_cvinpaint(image,mask)
|
106 |
+
output.save(sys.argv[3])
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
numpy
|
2 |
+
torch
|
3 |
+
spaces
|
4 |
+
mediapipe
|
5 |
+
opencv-python
|