Spaces:
Running
Running
victorisgeek
commited on
Upload 3 files
Browse files- dofaker/face_swap/__init__.py +8 -0
- dofaker/face_swap/base_swapper.py +13 -0
- dofaker/face_swap/inswapper.py +135 -0
dofaker/face_swap/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .inswapper import InSwapper
|
2 |
+
|
3 |
+
|
4 |
+
def get_swapper_model(name='', root=None, **kwargs):
|
5 |
+
if name.lower() == 'inswapper':
|
6 |
+
return InSwapper(name=name, root=root, **kwargs)
|
7 |
+
else:
|
8 |
+
raise UserWarning('The swapper model {} not support.'.format(name))
|
dofaker/face_swap/base_swapper.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class BaseSwapper:
|
2 |
+
|
3 |
+
def forward(self, img, latent, *args, **kwargs):
|
4 |
+
raise NotImplementedError
|
5 |
+
|
6 |
+
def get(self,
|
7 |
+
img,
|
8 |
+
target_face,
|
9 |
+
source_face,
|
10 |
+
paste_back=True,
|
11 |
+
*args,
|
12 |
+
**kwargs):
|
13 |
+
raise NotImplementedError
|
dofaker/face_swap/inswapper.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
import onnx
|
4 |
+
from onnx import numpy_helper
|
5 |
+
|
6 |
+
from insightface import model_zoo
|
7 |
+
from insightface.utils import face_align
|
8 |
+
from .base_swapper import BaseSwapper
|
9 |
+
|
10 |
+
from dofaker.utils import download_file, get_model_url
|
11 |
+
|
12 |
+
|
13 |
+
class InSwapper(BaseSwapper):
|
14 |
+
|
15 |
+
def __init__(self, name='inswapper', root='weights/models'):
|
16 |
+
_, model_file = download_file(get_model_url(name),
|
17 |
+
save_dir=root,
|
18 |
+
overwrite=False)
|
19 |
+
providers = model_zoo.model_zoo.get_default_providers()
|
20 |
+
self.session = model_zoo.model_zoo.PickableInferenceSession(
|
21 |
+
model_file, providers=providers)
|
22 |
+
|
23 |
+
model = onnx.load(model_file)
|
24 |
+
graph = model.graph
|
25 |
+
self.emap = numpy_helper.to_array(graph.initializer[-1])
|
26 |
+
self.input_mean = 0.0
|
27 |
+
self.input_std = 255.0
|
28 |
+
|
29 |
+
inputs = self.session.get_inputs()
|
30 |
+
self.input_names = []
|
31 |
+
for inp in inputs:
|
32 |
+
self.input_names.append(inp.name)
|
33 |
+
outputs = self.session.get_outputs()
|
34 |
+
output_names = []
|
35 |
+
for out in outputs:
|
36 |
+
output_names.append(out.name)
|
37 |
+
self.output_names = output_names
|
38 |
+
assert len(
|
39 |
+
self.output_names
|
40 |
+
) == 1, "The output number of inswapper model should be 1, but got {}, please check your model.".format(
|
41 |
+
len(self.output_names))
|
42 |
+
output_shape = outputs[0].shape
|
43 |
+
input_cfg = inputs[0]
|
44 |
+
input_shape = input_cfg.shape
|
45 |
+
self.input_shape = input_shape
|
46 |
+
print('inswapper-shape:', self.input_shape)
|
47 |
+
self.input_size = tuple(input_shape[2:4][::-1])
|
48 |
+
|
49 |
+
def forward(self, img, latent):
|
50 |
+
img = (img - self.input_mean) / self.input_std
|
51 |
+
pred = self.session.run(self.output_names, {
|
52 |
+
self.input_names[0]: img,
|
53 |
+
self.input_names[1]: latent
|
54 |
+
})[0]
|
55 |
+
return pred
|
56 |
+
|
57 |
+
def get(self, img, target_face, source_face, paste_back=True):
|
58 |
+
aimg, M = face_align.norm_crop2(img, target_face.kps,
|
59 |
+
self.input_size[0])
|
60 |
+
blob = cv2.dnn.blobFromImage(
|
61 |
+
aimg,
|
62 |
+
1.0 / self.input_std,
|
63 |
+
self.input_size,
|
64 |
+
(self.input_mean, self.input_mean, self.input_mean),
|
65 |
+
swapRB=True)
|
66 |
+
latent = source_face.normed_embedding.reshape((1, -1))
|
67 |
+
latent = np.dot(latent, self.emap)
|
68 |
+
latent /= np.linalg.norm(latent)
|
69 |
+
pred = self.session.run(self.output_names, {
|
70 |
+
self.input_names[0]: blob,
|
71 |
+
self.input_names[1]: latent
|
72 |
+
})[0]
|
73 |
+
img_fake = pred.transpose((0, 2, 3, 1))[0]
|
74 |
+
bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:, :, ::-1]
|
75 |
+
if not paste_back:
|
76 |
+
return bgr_fake, M
|
77 |
+
else:
|
78 |
+
target_img = img
|
79 |
+
fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
|
80 |
+
fake_diff = np.abs(fake_diff).mean(axis=2)
|
81 |
+
fake_diff[:2, :] = 0
|
82 |
+
fake_diff[-2:, :] = 0
|
83 |
+
fake_diff[:, :2] = 0
|
84 |
+
fake_diff[:, -2:] = 0
|
85 |
+
IM = cv2.invertAffineTransform(M)
|
86 |
+
img_white = np.full((aimg.shape[0], aimg.shape[1]),
|
87 |
+
255,
|
88 |
+
dtype=np.float32)
|
89 |
+
bgr_fake = cv2.warpAffine(
|
90 |
+
bgr_fake,
|
91 |
+
IM, (target_img.shape[1], target_img.shape[0]),
|
92 |
+
borderValue=0.0)
|
93 |
+
img_white = cv2.warpAffine(
|
94 |
+
img_white,
|
95 |
+
IM, (target_img.shape[1], target_img.shape[0]),
|
96 |
+
borderValue=0.0)
|
97 |
+
fake_diff = cv2.warpAffine(
|
98 |
+
fake_diff,
|
99 |
+
IM, (target_img.shape[1], target_img.shape[0]),
|
100 |
+
borderValue=0.0)
|
101 |
+
img_white[img_white > 20] = 255
|
102 |
+
fthresh = 10
|
103 |
+
fake_diff[fake_diff < fthresh] = 0
|
104 |
+
fake_diff[fake_diff >= fthresh] = 255
|
105 |
+
img_mask = img_white
|
106 |
+
mask_h_inds, mask_w_inds = np.where(img_mask == 255)
|
107 |
+
mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
|
108 |
+
mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
|
109 |
+
mask_size = int(np.sqrt(mask_h * mask_w))
|
110 |
+
k = max(mask_size // 10, 10)
|
111 |
+
#k = max(mask_size//20, 6)
|
112 |
+
#k = 6
|
113 |
+
kernel = np.ones((k, k), np.uint8)
|
114 |
+
img_mask = cv2.erode(img_mask, kernel, iterations=1)
|
115 |
+
kernel = np.ones((2, 2), np.uint8)
|
116 |
+
fake_diff = cv2.dilate(fake_diff, kernel, iterations=1)
|
117 |
+
k = max(mask_size // 20, 5)
|
118 |
+
#k = 3
|
119 |
+
#k = 3
|
120 |
+
kernel_size = (k, k)
|
121 |
+
blur_size = tuple(2 * i + 1 for i in kernel_size)
|
122 |
+
img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
|
123 |
+
k = 5
|
124 |
+
kernel_size = (k, k)
|
125 |
+
blur_size = tuple(2 * i + 1 for i in kernel_size)
|
126 |
+
fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
|
127 |
+
img_mask /= 255
|
128 |
+
fake_diff /= 255
|
129 |
+
#img_mask = fake_diff
|
130 |
+
img_mask = np.reshape(img_mask,
|
131 |
+
[img_mask.shape[0], img_mask.shape[1], 1])
|
132 |
+
fake_merged = img_mask * bgr_fake + (
|
133 |
+
1 - img_mask) * target_img.astype(np.float32)
|
134 |
+
fake_merged = fake_merged.astype(np.uint8)
|
135 |
+
return fake_merged
|