shuhulhandoo
commited on
Commit
•
5664dc5
1
Parent(s):
3c30086
Upload 6 files
Browse files- README.md +55 -12
- face_detection.py +95 -0
- face_swap.py +238 -0
- main.py +38 -0
- main_video.py +58 -0
- requirements.txt +3 -0
README.md
CHANGED
@@ -1,12 +1,55 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
---
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# FaceSwap
|
2 |
+
Swap face between two photos for Python 3 with OpenCV and dlib.
|
3 |
+
|
4 |
+
## Get Started
|
5 |
+
```sh
|
6 |
+
python main.py --src imgs/test6.jpg --dst imgs/test7.jpg --out results/output6_7.jpg --correct_color
|
7 |
+
```
|
8 |
+
|
9 |
+
| Source | Destination | Result |
|
10 |
+
| --- | --- | --- |
|
11 |
+
|![](imgs/test6.jpg) | ![](imgs/test7.jpg) | ![](results/output6_7.jpg) |
|
12 |
+
|
13 |
+
```sh
|
14 |
+
python main.py --src imgs/test6.jpg --dst imgs/test7.jpg --out results/output6_7_2d.jpg --correct_color --warp_2d
|
15 |
+
```
|
16 |
+
|
17 |
+
| Source | Destination | Result |
|
18 |
+
| --- | --- | --- |
|
19 |
+
|![](imgs/test6.jpg) | ![](imgs/test7.jpg) | ![](results/output6_7_2d.jpg) |
|
20 |
+
|
21 |
+
|
22 |
+
## Install
|
23 |
+
### Requirements
|
24 |
+
* `pip install -r requirements.txt`
|
25 |
+
* OpenCV 3: `conda install opencv` (If you have conda/anaconda)
|
26 |
+
|
27 |
+
Note: See [requirements.txt](requirements.txt) for more details.
|
28 |
+
### Git Clone
|
29 |
+
```sh
|
30 |
+
git clone https://github.com/wuhuikai/FaceSwap.git
|
31 |
+
```
|
32 |
+
### Swap Your Face
|
33 |
+
```sh
|
34 |
+
python main.py ...
|
35 |
+
```
|
36 |
+
Note: Run **python main.py -h** for more details.
|
37 |
+
|
38 |
+
|
39 |
+
### Real-time camera
|
40 |
+
```sh
|
41 |
+
python main_video.py --src_img imgs/test7.jpg --show --correct_color --save_path {*.avi}
|
42 |
+
```
|
43 |
+
### Video
|
44 |
+
```sh
|
45 |
+
python main_video.py --src_img imgs/test7.jpg --video_path {video_path} --show --correct_color --save_path {*.avi}
|
46 |
+
```
|
47 |
+
|
48 |
+
## More Results
|
49 |
+
| From | To |
|
50 |
+
| --- | --- |
|
51 |
+
| ![](imgs/test4.jpg) | ![](results/output6_4.jpg) |
|
52 |
+
| ![](imgs/test3.jpg) | ![](results/output6_3.jpg) |
|
53 |
+
| ![](imgs/test2.jpg) | ![](results/output6_2_2d.jpg) |
|
54 |
+
| ![](imgs/test1.jpg) | ![](results/output6_1.jpg) |
|
55 |
+
| ![](imgs/test4.jpg) | ![](results/output7_4.jpg) |
|
face_detection.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import dlib
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
## Face detection
|
6 |
+
def face_detection(img,upsample_times=1):
|
7 |
+
# Ask the detector to find the bounding boxes of each face. The 1 in the
|
8 |
+
# second argument indicates that we should upsample the image 1 time. This
|
9 |
+
# will make everything bigger and allow us to detect more faces.
|
10 |
+
detector = dlib.get_frontal_face_detector()
|
11 |
+
faces = detector(img, upsample_times)
|
12 |
+
|
13 |
+
return faces
|
14 |
+
|
15 |
+
PREDICTOR_PATH = 'models/shape_predictor_68_face_landmarks.dat'
|
16 |
+
predictor = dlib.shape_predictor(PREDICTOR_PATH)
|
17 |
+
## Face and points detection
|
18 |
+
def face_points_detection(img, bbox:dlib.rectangle):
|
19 |
+
# Get the landmarks/parts for the face in box d.
|
20 |
+
shape = predictor(img, bbox)
|
21 |
+
|
22 |
+
# loop over the 68 facial landmarks and convert them
|
23 |
+
# to a 2-tuple of (x, y)-coordinates
|
24 |
+
coords = np.asarray(list([p.x, p.y] for p in shape.parts()), dtype=int)
|
25 |
+
|
26 |
+
# return the array of (x, y)-coordinates
|
27 |
+
return coords
|
28 |
+
|
29 |
+
def select_face(im, r=10, choose=True):
|
30 |
+
faces = face_detection(im)
|
31 |
+
|
32 |
+
if len(faces) == 0:
|
33 |
+
return None, None, None
|
34 |
+
|
35 |
+
if len(faces) == 1 or not choose:
|
36 |
+
idx = np.argmax([(face.right() - face.left()) * (face.bottom() - face.top()) for face in faces])
|
37 |
+
bbox = faces[idx]
|
38 |
+
else:
|
39 |
+
bbox = []
|
40 |
+
|
41 |
+
def click_on_face(event, x, y, flags, params):
|
42 |
+
if event != cv2.EVENT_LBUTTONDOWN:
|
43 |
+
return
|
44 |
+
|
45 |
+
for face in faces:
|
46 |
+
if face.left() < x < face.right() and face.top() < y < face.bottom():
|
47 |
+
bbox.append(face)
|
48 |
+
break
|
49 |
+
|
50 |
+
im_copy = im.copy()
|
51 |
+
for face in faces:
|
52 |
+
# draw the face bounding box
|
53 |
+
cv2.rectangle(im_copy, (face.left(), face.top()), (face.right(), face.bottom()), (0, 0, 255), 1)
|
54 |
+
cv2.imshow('Click the Face:', im_copy)
|
55 |
+
cv2.setMouseCallback('Click the Face:', click_on_face)
|
56 |
+
while len(bbox) == 0:
|
57 |
+
cv2.waitKey(1)
|
58 |
+
cv2.destroyAllWindows()
|
59 |
+
bbox = bbox[0]
|
60 |
+
|
61 |
+
points = np.asarray(face_points_detection(im, bbox))
|
62 |
+
|
63 |
+
im_w, im_h = im.shape[:2]
|
64 |
+
left, top = np.min(points, 0)
|
65 |
+
right, bottom = np.max(points, 0)
|
66 |
+
|
67 |
+
x, y = max(0, left - r), max(0, top - r)
|
68 |
+
w, h = min(right + r, im_h) - x, min(bottom + r, im_w) - y
|
69 |
+
|
70 |
+
return points - np.asarray([[x, y]]), (x, y, w, h), im[y:y + h, x:x + w]
|
71 |
+
|
72 |
+
|
73 |
+
def select_all_faces(im, r=10):
|
74 |
+
faces = face_detection(im)
|
75 |
+
|
76 |
+
if len(faces) == 0:
|
77 |
+
return None
|
78 |
+
|
79 |
+
faceBoxes = {k : {"points" : None,
|
80 |
+
"shape" : None,
|
81 |
+
"face" : None} for k in range(len(faces))}
|
82 |
+
for i, bbox in enumerate(faces):
|
83 |
+
points = np.asarray(face_points_detection(im, bbox))
|
84 |
+
|
85 |
+
im_w, im_h = im.shape[:2]
|
86 |
+
left, top = np.min(points, 0)
|
87 |
+
right, bottom = np.max(points, 0)
|
88 |
+
|
89 |
+
x, y = max(0, left - r), max(0, top - r)
|
90 |
+
w, h = min(right + r, im_h) - x, min(bottom + r, im_w) - y
|
91 |
+
faceBoxes[i]["points"] = points - np.asarray([[x, y]])
|
92 |
+
faceBoxes[i]["shape"] = (x, y, w, h)
|
93 |
+
faceBoxes[i]["face"] = im[y:y + h, x:x + w]
|
94 |
+
|
95 |
+
return faceBoxes
|
face_swap.py
ADDED
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import scipy.spatial as spatial
|
5 |
+
import logging
|
6 |
+
|
7 |
+
|
8 |
+
## 3D Transform
|
9 |
+
def bilinear_interpolate(img, coords):
|
10 |
+
""" Interpolates over every image channel
|
11 |
+
http://en.wikipedia.org/wiki/Bilinear_interpolation
|
12 |
+
:param img: max 3 channel image
|
13 |
+
:param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords
|
14 |
+
:returns: array of interpolated pixels with same shape as coords
|
15 |
+
"""
|
16 |
+
int_coords = np.int32(coords)
|
17 |
+
x0, y0 = int_coords
|
18 |
+
dx, dy = coords - int_coords
|
19 |
+
|
20 |
+
# 4 Neighour pixels
|
21 |
+
q11 = img[y0, x0]
|
22 |
+
q21 = img[y0, x0 + 1]
|
23 |
+
q12 = img[y0 + 1, x0]
|
24 |
+
q22 = img[y0 + 1, x0 + 1]
|
25 |
+
|
26 |
+
btm = q21.T * dx + q11.T * (1 - dx)
|
27 |
+
top = q22.T * dx + q12.T * (1 - dx)
|
28 |
+
inter_pixel = top * dy + btm * (1 - dy)
|
29 |
+
|
30 |
+
return inter_pixel.T
|
31 |
+
|
32 |
+
def grid_coordinates(points):
|
33 |
+
""" x,y grid coordinates within the ROI of supplied points
|
34 |
+
:param points: points to generate grid coordinates
|
35 |
+
:returns: array of (x, y) coordinates
|
36 |
+
"""
|
37 |
+
xmin = np.min(points[:, 0])
|
38 |
+
xmax = np.max(points[:, 0]) + 1
|
39 |
+
ymin = np.min(points[:, 1])
|
40 |
+
ymax = np.max(points[:, 1]) + 1
|
41 |
+
|
42 |
+
return np.asarray([(x, y) for y in range(ymin, ymax)
|
43 |
+
for x in range(xmin, xmax)], np.uint32)
|
44 |
+
|
45 |
+
|
46 |
+
def process_warp(src_img, result_img, tri_affines, dst_points, delaunay):
|
47 |
+
"""
|
48 |
+
Warp each triangle from the src_image only within the
|
49 |
+
ROI of the destination image (points in dst_points).
|
50 |
+
"""
|
51 |
+
roi_coords = grid_coordinates(dst_points)
|
52 |
+
# indices to vertices. -1 if pixel is not in any triangle
|
53 |
+
roi_tri_indices = delaunay.find_simplex(roi_coords)
|
54 |
+
|
55 |
+
for simplex_index in range(len(delaunay.simplices)):
|
56 |
+
coords = roi_coords[roi_tri_indices == simplex_index]
|
57 |
+
num_coords = len(coords)
|
58 |
+
out_coords = np.dot(tri_affines[simplex_index],
|
59 |
+
np.vstack((coords.T, np.ones(num_coords))))
|
60 |
+
x, y = coords.T
|
61 |
+
result_img[y, x] = bilinear_interpolate(src_img, out_coords)
|
62 |
+
|
63 |
+
return None
|
64 |
+
|
65 |
+
|
66 |
+
def triangular_affine_matrices(vertices, src_points, dst_points):
|
67 |
+
"""
|
68 |
+
Calculate the affine transformation matrix for each
|
69 |
+
triangle (x,y) vertex from dst_points to src_points
|
70 |
+
:param vertices: array of triplet indices to corners of triangle
|
71 |
+
:param src_points: array of [x, y] points to landmarks for source image
|
72 |
+
:param dst_points: array of [x, y] points to landmarks for destination image
|
73 |
+
:returns: 2 x 3 affine matrix transformation for a triangle
|
74 |
+
"""
|
75 |
+
ones = [1, 1, 1]
|
76 |
+
for tri_indices in vertices:
|
77 |
+
src_tri = np.vstack((src_points[tri_indices, :].T, ones))
|
78 |
+
dst_tri = np.vstack((dst_points[tri_indices, :].T, ones))
|
79 |
+
mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :]
|
80 |
+
yield mat
|
81 |
+
|
82 |
+
|
83 |
+
def warp_image_3d(src_img, src_points, dst_points, dst_shape, dtype=np.uint8):
|
84 |
+
rows, cols = dst_shape[:2]
|
85 |
+
result_img = np.zeros((rows, cols, 3), dtype=dtype)
|
86 |
+
|
87 |
+
delaunay = spatial.Delaunay(dst_points)
|
88 |
+
tri_affines = np.asarray(list(triangular_affine_matrices(
|
89 |
+
delaunay.simplices, src_points, dst_points)))
|
90 |
+
|
91 |
+
process_warp(src_img, result_img, tri_affines, dst_points, delaunay)
|
92 |
+
|
93 |
+
return result_img
|
94 |
+
|
95 |
+
|
96 |
+
## 2D Transform
|
97 |
+
def transformation_from_points(points1, points2):
|
98 |
+
points1 = points1.astype(np.float64)
|
99 |
+
points2 = points2.astype(np.float64)
|
100 |
+
|
101 |
+
c1 = np.mean(points1, axis=0)
|
102 |
+
c2 = np.mean(points2, axis=0)
|
103 |
+
points1 -= c1
|
104 |
+
points2 -= c2
|
105 |
+
|
106 |
+
s1 = np.std(points1)
|
107 |
+
s2 = np.std(points2)
|
108 |
+
points1 /= s1
|
109 |
+
points2 /= s2
|
110 |
+
|
111 |
+
U, S, Vt = np.linalg.svd(np.dot(points1.T, points2))
|
112 |
+
R = (np.dot(U, Vt)).T
|
113 |
+
|
114 |
+
return np.vstack([np.hstack([s2 / s1 * R,
|
115 |
+
(c2.T - np.dot(s2 / s1 * R, c1.T))[:, np.newaxis]]),
|
116 |
+
np.array([[0., 0., 1.]])])
|
117 |
+
|
118 |
+
|
119 |
+
def warp_image_2d(im, M, dshape):
|
120 |
+
output_im = np.zeros(dshape, dtype=im.dtype)
|
121 |
+
cv2.warpAffine(im,
|
122 |
+
M[:2],
|
123 |
+
(dshape[1], dshape[0]),
|
124 |
+
dst=output_im,
|
125 |
+
borderMode=cv2.BORDER_TRANSPARENT,
|
126 |
+
flags=cv2.WARP_INVERSE_MAP)
|
127 |
+
|
128 |
+
return output_im
|
129 |
+
|
130 |
+
|
131 |
+
## Generate Mask
|
132 |
+
def mask_from_points(size, points,erode_flag=1):
|
133 |
+
radius = 10 # kernel size
|
134 |
+
kernel = np.ones((radius, radius), np.uint8)
|
135 |
+
|
136 |
+
mask = np.zeros(size, np.uint8)
|
137 |
+
cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)
|
138 |
+
if erode_flag:
|
139 |
+
mask = cv2.erode(mask, kernel,iterations=1)
|
140 |
+
|
141 |
+
return mask
|
142 |
+
|
143 |
+
|
144 |
+
## Color Correction
|
145 |
+
def correct_colours(im1, im2, landmarks1):
|
146 |
+
COLOUR_CORRECT_BLUR_FRAC = 0.75
|
147 |
+
LEFT_EYE_POINTS = list(range(42, 48))
|
148 |
+
RIGHT_EYE_POINTS = list(range(36, 42))
|
149 |
+
|
150 |
+
blur_amount = COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm(
|
151 |
+
np.mean(landmarks1[LEFT_EYE_POINTS], axis=0) -
|
152 |
+
np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0))
|
153 |
+
blur_amount = int(blur_amount)
|
154 |
+
if blur_amount % 2 == 0:
|
155 |
+
blur_amount += 1
|
156 |
+
im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)
|
157 |
+
im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)
|
158 |
+
|
159 |
+
# Avoid divide-by-zero errors.
|
160 |
+
im2_blur = im2_blur.astype(int)
|
161 |
+
im2_blur += 128*(im2_blur <= 1)
|
162 |
+
|
163 |
+
result = im2.astype(np.float64) * im1_blur.astype(np.float64) / im2_blur.astype(np.float64)
|
164 |
+
result = np.clip(result, 0, 255).astype(np.uint8)
|
165 |
+
|
166 |
+
return result
|
167 |
+
|
168 |
+
|
169 |
+
## Copy-and-paste
|
170 |
+
def apply_mask(img, mask):
|
171 |
+
""" Apply mask to supplied image
|
172 |
+
:param img: max 3 channel image
|
173 |
+
:param mask: [0-255] values in mask
|
174 |
+
:returns: new image with mask applied
|
175 |
+
"""
|
176 |
+
masked_img=cv2.bitwise_and(img,img,mask=mask)
|
177 |
+
|
178 |
+
return masked_img
|
179 |
+
|
180 |
+
|
181 |
+
## Alpha blending
|
182 |
+
def alpha_feathering(src_img, dest_img, img_mask, blur_radius=15):
|
183 |
+
mask = cv2.blur(img_mask, (blur_radius, blur_radius))
|
184 |
+
mask = mask / 255.0
|
185 |
+
|
186 |
+
result_img = np.empty(src_img.shape, np.uint8)
|
187 |
+
for i in range(3):
|
188 |
+
result_img[..., i] = src_img[..., i] * mask + dest_img[..., i] * (1-mask)
|
189 |
+
|
190 |
+
return result_img
|
191 |
+
|
192 |
+
|
193 |
+
def check_points(img,points):
|
194 |
+
# Todo: I just consider one situation.
|
195 |
+
if points[8,1]>img.shape[0]:
|
196 |
+
logging.error("Jaw part out of image")
|
197 |
+
else:
|
198 |
+
return True
|
199 |
+
return False
|
200 |
+
|
201 |
+
|
202 |
+
def face_swap(src_face, dst_face, src_points, dst_points, dst_shape, dst_img, args, end=48):
|
203 |
+
h, w = dst_face.shape[:2]
|
204 |
+
|
205 |
+
## 3d warp
|
206 |
+
warped_src_face = warp_image_3d(src_face, src_points[:end], dst_points[:end], (h, w))
|
207 |
+
## Mask for blending
|
208 |
+
mask = mask_from_points((h, w), dst_points)
|
209 |
+
mask_src = np.mean(warped_src_face, axis=2) > 0
|
210 |
+
mask = np.asarray(mask * mask_src, dtype=np.uint8)
|
211 |
+
## Correct color
|
212 |
+
if args == "correct color":
|
213 |
+
warped_src_face = apply_mask(warped_src_face, mask)
|
214 |
+
dst_face_masked = apply_mask(dst_face, mask)
|
215 |
+
warped_src_face = correct_colours(dst_face_masked, warped_src_face, dst_points)
|
216 |
+
## 2d warp
|
217 |
+
if args == "warp_2d":
|
218 |
+
unwarped_src_face = warp_image_3d(warped_src_face, dst_points[:end], src_points[:end], src_face.shape[:2])
|
219 |
+
warped_src_face = warp_image_2d(unwarped_src_face, transformation_from_points(dst_points, src_points),
|
220 |
+
(h, w, 3))
|
221 |
+
|
222 |
+
mask = mask_from_points((h, w), dst_points)
|
223 |
+
mask_src = np.mean(warped_src_face, axis=2) > 0
|
224 |
+
mask = np.asarray(mask * mask_src, dtype=np.uint8)
|
225 |
+
|
226 |
+
## Shrink the mask
|
227 |
+
kernel = np.ones((10, 10), np.uint8)
|
228 |
+
mask = cv2.erode(mask, kernel, iterations=1)
|
229 |
+
##Poisson Blending
|
230 |
+
r = cv2.boundingRect(mask)
|
231 |
+
center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
|
232 |
+
output = cv2.seamlessClone(warped_src_face, dst_face, mask, center, cv2.NORMAL_CLONE)
|
233 |
+
|
234 |
+
x, y, w, h = dst_shape
|
235 |
+
dst_img_cp = dst_img.copy()
|
236 |
+
dst_img_cp[y:y + h, x:x + w] = output
|
237 |
+
|
238 |
+
return dst_img_cp
|
main.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python
|
2 |
+
import os
|
3 |
+
import cv2
|
4 |
+
import argparse
|
5 |
+
import streamlit as st
|
6 |
+
from face_detection import select_face, select_all_faces
|
7 |
+
from face_swap import face_swap
|
8 |
+
import gradio as gr
|
9 |
+
from PIL import Image
|
10 |
+
|
11 |
+
def swap():
|
12 |
+
|
13 |
+
src_img = st.file_uploader("Source Image", type=["jpg", "png", "jpeg"])
|
14 |
+
dst_img = st.file_uploader("Destination Image", type=["jpg", "png", "jpeg"])
|
15 |
+
|
16 |
+
|
17 |
+
src_img = cv2.imread(src_img.name)
|
18 |
+
dst_img = cv2.imread(Image.open(dst_img))
|
19 |
+
|
20 |
+
src_points, src_shape, src_face = select_face(src_img)
|
21 |
+
|
22 |
+
dst_faceBoxes = select_all_faces(dst_img)
|
23 |
+
|
24 |
+
if dst_faceBoxes is None:
|
25 |
+
print('Detect 0 Face !!')
|
26 |
+
exit(-1)
|
27 |
+
|
28 |
+
output = dst_img
|
29 |
+
for k, dst_face in dst_faceBoxes.items():
|
30 |
+
output = face_swap(src_face, dst_face["face"], src_points,
|
31 |
+
dst_face["points"], dst_face["shape"],
|
32 |
+
output, "correct color")
|
33 |
+
|
34 |
+
return output
|
35 |
+
if __name__ == '__main__':
|
36 |
+
swap()
|
37 |
+
# demo = gr.Interface(fn=main, inputs=["image", "image"], outputs="image")
|
38 |
+
# demo.launch()
|
main_video.py
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import cv2
|
3 |
+
import logging
|
4 |
+
import argparse
|
5 |
+
|
6 |
+
from face_detection import select_face
|
7 |
+
from face_swap import face_swap
|
8 |
+
|
9 |
+
|
10 |
+
class VideoHandler(object):
|
11 |
+
def __init__(self, video_path=0, img_path=None, args=None):
|
12 |
+
self.src_points, self.src_shape, self.src_face = select_face(cv2.imread(img_path))
|
13 |
+
if self.src_points is None:
|
14 |
+
print('No face detected in the source image !!!')
|
15 |
+
exit(-1)
|
16 |
+
self.args = args
|
17 |
+
self.video = cv2.VideoCapture(video_path)
|
18 |
+
self.writer = cv2.VideoWriter(args.save_path, cv2.VideoWriter_fourcc(*'MJPG'), self.video.get(cv2.CAP_PROP_FPS),
|
19 |
+
(int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))))
|
20 |
+
|
21 |
+
def start(self):
|
22 |
+
while self.video.isOpened():
|
23 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
24 |
+
break
|
25 |
+
|
26 |
+
_, dst_img = self.video.read()
|
27 |
+
dst_points, dst_shape, dst_face = select_face(dst_img, choose=False)
|
28 |
+
if dst_points is not None:
|
29 |
+
dst_img = face_swap(self.src_face, dst_face, self.src_points, dst_points, dst_shape, dst_img, self.args, 68)
|
30 |
+
self.writer.write(dst_img)
|
31 |
+
if self.args.show:
|
32 |
+
cv2.imshow("Video", dst_img)
|
33 |
+
|
34 |
+
self.video.release()
|
35 |
+
self.writer.release()
|
36 |
+
cv2.destroyAllWindows()
|
37 |
+
|
38 |
+
|
39 |
+
if __name__ == '__main__':
|
40 |
+
logging.basicConfig(level=logging.INFO,
|
41 |
+
format="%(levelname)s:%(lineno)d:%(message)s")
|
42 |
+
|
43 |
+
parser = argparse.ArgumentParser(description='FaceSwap Video')
|
44 |
+
parser.add_argument('--src_img', required=True,
|
45 |
+
help='Path for source image')
|
46 |
+
parser.add_argument('--video_path', default=0,
|
47 |
+
help='Path for video')
|
48 |
+
parser.add_argument('--warp_2d', default=False, action='store_true', help='2d or 3d warp')
|
49 |
+
parser.add_argument('--correct_color', default=False, action='store_true', help='Correct color')
|
50 |
+
parser.add_argument('--show', default=False, action='store_true', help='Show')
|
51 |
+
parser.add_argument('--save_path', required=True, help='Path for storing output video')
|
52 |
+
args = parser.parse_args()
|
53 |
+
|
54 |
+
dir_path = os.path.dirname(args.save_path)
|
55 |
+
if not os.path.isdir(dir_path):
|
56 |
+
os.makedirs(dir_path)
|
57 |
+
|
58 |
+
VideoHandler(args.video_path, args.src_img, args).start()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
dlib >= 19.9.0
|
2 |
+
numpy >= 1.13.1
|
3 |
+
scipy >= 0.18.0
|