felixrosberg
commited on
Commit
β’
87d3df0
1
Parent(s):
1b7cc25
Update app.py
Browse files
app.py
CHANGED
@@ -43,65 +43,69 @@ blend_mask_base = gaussian_filter(blend_mask_base, sigma=7)
|
|
43 |
|
44 |
|
45 |
def run_inference(target, source):
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
|
|
|
|
|
|
|
|
105 |
|
106 |
description = "Performs subject agnostic identity transfer from a source face to all target faces."
|
107 |
examples = [["elon_musk_example.jpg", "rick_astely_example.jpg"], ["10017.png", "9538.png"]]
|
|
|
43 |
|
44 |
|
45 |
def run_inference(target, source):
|
46 |
+
try:
|
47 |
+
source = np.array(source)
|
48 |
+
target = np.array(target)
|
49 |
+
|
50 |
+
# Prepare to load video
|
51 |
+
source_a = RetinaFace(np.expand_dims(source, axis=0)).numpy()[0]
|
52 |
+
source_h, source_w, _ = source.shape
|
53 |
+
source_lm = get_lm(source_a, source_w, source_h)
|
54 |
+
source_aligned = norm_crop(source, source_lm, image_size=256)
|
55 |
+
source_z = ArcFace.predict(np.expand_dims(tf.image.resize(source_aligned, [112, 112]) / 255.0, axis=0))
|
56 |
+
|
57 |
+
# read frame
|
58 |
+
im = target
|
59 |
+
im_h, im_w, _ = im.shape
|
60 |
+
im_shape = (im_w, im_h)
|
61 |
+
|
62 |
+
detection_scale = im_w // 640 if im_w > 640 else 1
|
63 |
+
|
64 |
+
faces = RetinaFace(np.expand_dims(cv2.resize(im,
|
65 |
+
(im_w // detection_scale,
|
66 |
+
im_h // detection_scale)), axis=0)).numpy()
|
67 |
+
|
68 |
+
total_img = im / 255.0
|
69 |
+
for annotation in faces:
|
70 |
+
lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h],
|
71 |
+
[annotation[6] * im_w, annotation[7] * im_h],
|
72 |
+
[annotation[8] * im_w, annotation[9] * im_h],
|
73 |
+
[annotation[10] * im_w, annotation[11] * im_h],
|
74 |
+
[annotation[12] * im_w, annotation[13] * im_h]],
|
75 |
+
dtype=np.float32)
|
76 |
+
|
77 |
+
# align the detected face
|
78 |
+
M, pose_index = estimate_norm(lm_align, 256, "arcface", shrink_factor=1.0)
|
79 |
+
im_aligned = cv2.warpAffine(im, M, (256, 256), borderValue=0.0)
|
80 |
+
|
81 |
+
# face swap
|
82 |
+
changed_face_cage = G.predict([np.expand_dims((im_aligned - 127.5) / 127.5, axis=0),
|
83 |
+
source_z])
|
84 |
+
changed_face = (changed_face_cage[0] + 1) / 2
|
85 |
+
|
86 |
+
# get inverse transformation landmarks
|
87 |
+
transformed_lmk = transform_landmark_points(M, lm_align)
|
88 |
+
|
89 |
+
# warp image back
|
90 |
+
iM, _ = inverse_estimate_norm(lm_align, transformed_lmk, 256, "arcface", shrink_factor=1.0)
|
91 |
+
iim_aligned = cv2.warpAffine(changed_face, iM, im_shape, borderValue=0.0)
|
92 |
+
|
93 |
+
# blend swapped face with target image
|
94 |
+
blend_mask = cv2.warpAffine(blend_mask_base, iM, im_shape, borderValue=0.0)
|
95 |
+
blend_mask = np.expand_dims(blend_mask, axis=-1)
|
96 |
+
total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
|
97 |
+
|
98 |
+
if opt.compare:
|
99 |
+
total_img = np.concatenate((im / 255.0, total_img), axis=1)
|
100 |
+
|
101 |
+
total_img = np.clip(total_img, 0, 1)
|
102 |
+
total_img *= 255.0
|
103 |
+
total_img = total_img.astype('uint8')
|
104 |
+
|
105 |
+
return total_img
|
106 |
+
except Exception as e:
|
107 |
+
print(e)
|
108 |
+
return None
|
109 |
|
110 |
description = "Performs subject agnostic identity transfer from a source face to all target faces."
|
111 |
examples = [["elon_musk_example.jpg", "rick_astely_example.jpg"], ["10017.png", "9538.png"]]
|