Charbel Malo commited on
Commit
1118f2d
·
verified ·
1 Parent(s): be02ec2

Update face_swapper.py

Browse files
Files changed (1) hide show
  1. face_swapper.py +11 -3
face_swapper.py CHANGED
@@ -20,6 +20,7 @@ arcface_dst = np.array(
20
  dtype=np.float32)
21
 
22
 
 
23
  def estimate_norm(lmk, image_size=112, mode='arcface'):
24
  assert lmk.shape == (5, 2)
25
  assert image_size % 112 == 0 or image_size % 128 == 0
@@ -37,14 +38,16 @@ def estimate_norm(lmk, image_size=112, mode='arcface'):
37
  return M
38
 
39
 
 
40
  def norm_crop2(img, landmark, image_size=112, mode='arcface'):
41
  M = estimate_norm(landmark, image_size, mode)
42
  warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
43
  return warped, M
44
 
45
 
46
- @spaces.GPU(enable_queue=True)
47
  class Inswapper():
 
 
48
  def __init__(self, model_file=None, batch_size=32, providers=['CUDAExecutionProvider,CPUExecutionProvider']):
49
  self.model_file = model_file
50
  self.batch_size = batch_size
@@ -56,13 +59,15 @@ class Inswapper():
56
  self.session_options = onnxruntime.SessionOptions()
57
  self.session = onnxruntime.InferenceSession(self.model_file, sess_options=self.session_options, providers=providers)
58
 
 
59
  def forward(self, imgs, latents):
60
  preds = []
61
  for img, latent in zip(imgs, latents):
62
  img = img / 255
63
  pred = self.session.run(['output'], {'target': img, 'source': latent})[0]
64
  preds.append(pred)
65
-
 
66
  def get(self, imgs, target_faces, source_faces):
67
  imgs = list(imgs)
68
 
@@ -79,7 +84,8 @@ class Inswapper():
79
  matrs[idx] = matrix
80
 
81
  return (preds, matrs)
82
-
 
83
  def prepare_data(self, img, target_face, source_face):
84
  if isinstance(img, str):
85
  img = cv2.imread(img)
@@ -94,6 +100,7 @@ class Inswapper():
94
 
95
  return (matrix, blob, latent)
96
 
 
97
  def batch_forward(self, img_list, target_f_list, source_f_list):
98
  num_samples = len(img_list)
99
  num_batches = (num_samples + self.batch_size - 1) // self.batch_size
@@ -111,6 +118,7 @@ class Inswapper():
111
  yield batch_pred, batch_matr
112
 
113
 
 
114
  def paste_to_whole(foreground, background, matrix, mask=None, crop_mask=(0,0,0,0), blur_amount=0.1, erode_amount = 0.15, blend_method='linear'):
115
  inv_matrix = cv2.invertAffineTransform(matrix)
116
  fg_shape = foreground.shape[:2]
 
20
  dtype=np.float32)
21
 
22
 
23
+ @spaces.GPU(enable_queue=True)
24
  def estimate_norm(lmk, image_size=112, mode='arcface'):
25
  assert lmk.shape == (5, 2)
26
  assert image_size % 112 == 0 or image_size % 128 == 0
 
38
  return M
39
 
40
 
41
+ @spaces.GPU(enable_queue=True)
42
  def norm_crop2(img, landmark, image_size=112, mode='arcface'):
43
  M = estimate_norm(landmark, image_size, mode)
44
  warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
45
  return warped, M
46
 
47
 
 
48
  class Inswapper():
49
+
50
+ @spaces.GPU(enable_queue=True)
51
  def __init__(self, model_file=None, batch_size=32, providers=['CUDAExecutionProvider,CPUExecutionProvider']):
52
  self.model_file = model_file
53
  self.batch_size = batch_size
 
59
  self.session_options = onnxruntime.SessionOptions()
60
  self.session = onnxruntime.InferenceSession(self.model_file, sess_options=self.session_options, providers=providers)
61
 
62
+ @spaces.GPU(enable_queue=True)
63
  def forward(self, imgs, latents):
64
  preds = []
65
  for img, latent in zip(imgs, latents):
66
  img = img / 255
67
  pred = self.session.run(['output'], {'target': img, 'source': latent})[0]
68
  preds.append(pred)
69
+
70
+ @spaces.GPU(enable_queue=True)
71
  def get(self, imgs, target_faces, source_faces):
72
  imgs = list(imgs)
73
 
 
84
  matrs[idx] = matrix
85
 
86
  return (preds, matrs)
87
+
88
+ @spaces.GPU(enable_queue=True)
89
  def prepare_data(self, img, target_face, source_face):
90
  if isinstance(img, str):
91
  img = cv2.imread(img)
 
100
 
101
  return (matrix, blob, latent)
102
 
103
+ @spaces.GPU(enable_queue=True)
104
  def batch_forward(self, img_list, target_f_list, source_f_list):
105
  num_samples = len(img_list)
106
  num_batches = (num_samples + self.batch_size - 1) // self.batch_size
 
118
  yield batch_pred, batch_matr
119
 
120
 
121
+ @spaces.GPU(enable_queue=True)
122
  def paste_to_whole(foreground, background, matrix, mask=None, crop_mask=(0,0,0,0), blur_amount=0.1, erode_amount = 0.15, blend_method='linear'):
123
  inv_matrix = cv2.invertAffineTransform(matrix)
124
  fg_shape = foreground.shape[:2]