Ganrt commited on
Commit
9ddf813
1 Parent(s): da6d14f
Files changed (1) hide show
  1. inference.py +13 -11
inference.py CHANGED
@@ -33,7 +33,7 @@ def main():
33
  print('[Info] Using {} for inference.'.format(device))
34
  os.makedirs(os.path.join('temp', args.tmp_dir), exist_ok=True)
35
 
36
- enhancer = FaceEnhancement(base_dir='checkpoints', size=512, model='GPEN-BFR-512', use_sr=False, \
37
  sr_model='rrdb_realesrnet_psnr', channel_multiplier=2, narrow=1, device=device)
38
  restorer = GFPGANer(model_path='checkpoints/GFPGANv1.3.pth', upscale=1, arch='clean', \
39
  channel_multiplier=2, bg_upsampler=None)
@@ -149,7 +149,7 @@ def main():
149
 
150
  if not os.path.isfile('temp/'+base_name+'_stablized.npy') or args.re_preprocess:
151
  imgs = []
152
- for idx in tqdm(range(len(frames_pil)), desc="[Step 3] Stablize the expression In Video:"):
153
  if args.one_shot:
154
  source_img = trans_image(frames_pil[0]).unsqueeze(0).to(device)
155
  semantic_source_numpy = semantic_npy[0:1]
@@ -168,7 +168,7 @@ def main():
168
  np.save('temp/'+base_name+'_stablized.npy',imgs)
169
  del D_Net
170
  else:
171
- print('[Step 3] Using saved stablized video.')
172
  imgs = np.load('temp/'+base_name+'_stablized.npy')
173
  torch.cuda.empty_cache()
174
 
@@ -249,22 +249,24 @@ def main():
249
 
250
  ff = xf.copy()
251
  ff[y1:y2, x1:x2] = p
252
-
 
 
 
253
  # month region enhancement by GFPGAN
254
  cropped_faces, restored_faces, restored_img = restorer.enhance(
255
- ff, has_aligned=False, only_center_face=True, paste_back=True)
256
  # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
257
  mm = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0]
 
258
  mouse_mask = np.zeros_like(restored_img)
259
  tmp_mask = enhancer.faceparser.process(restored_img[y1:y2, x1:x2], mm)[0]
260
  mouse_mask[y1:y2, x1:x2]= cv2.resize(tmp_mask, (x2 - x1, y2 - y1))[:, :, np.newaxis] / 255.
261
 
262
- height, width = ff.shape[:2]
263
- restored_img, ff, full_mask = [cv2.resize(x, (512, 512)) for x in (restored_img, ff, np.float32(mouse_mask))]
264
  img = Laplacian_Pyramid_Blending_with_mask(restored_img, ff, full_mask[:, :, 0], 10)
265
- pp = np.uint8(cv2.resize(np.clip(img, 0 ,255), (width, height)))
266
-
267
- pp, orig_faces, enhanced_faces = enhancer.process(pp, xf, bbox=c, face_enhance=False, possion_blending=True)
268
  out.write(pp)
269
  out.release()
270
 
@@ -342,4 +344,4 @@ def datagen(frames, mels, full_frames, frames_pil, cox):
342
 
343
 
344
  if __name__ == '__main__':
345
- main()
 
33
  print('[Info] Using {} for inference.'.format(device))
34
  os.makedirs(os.path.join('temp', args.tmp_dir), exist_ok=True)
35
 
36
+ enhancer = FaceEnhancement(base_dir='checkpoints', size=1024, model='GPEN-BFR-1024', use_sr=False, \
37
  sr_model='rrdb_realesrnet_psnr', channel_multiplier=2, narrow=1, device=device)
38
  restorer = GFPGANer(model_path='checkpoints/GFPGANv1.3.pth', upscale=1, arch='clean', \
39
  channel_multiplier=2, bg_upsampler=None)
 
149
 
150
  if not os.path.isfile('temp/'+base_name+'_stablized.npy') or args.re_preprocess:
151
  imgs = []
152
+ for idx in tqdm(range(len(frames_pil)), desc="[Step 3] Stabilize the expression In Video:"):
153
  if args.one_shot:
154
  source_img = trans_image(frames_pil[0]).unsqueeze(0).to(device)
155
  semantic_source_numpy = semantic_npy[0:1]
 
168
  np.save('temp/'+base_name+'_stablized.npy',imgs)
169
  del D_Net
170
  else:
171
+ print('[Step 3] Using saved stabilized video.')
172
  imgs = np.load('temp/'+base_name+'_stablized.npy')
173
  torch.cuda.empty_cache()
174
 
 
249
 
250
  ff = xf.copy()
251
  ff[y1:y2, x1:x2] = p
252
+ height, width = ff.shape[:2]
253
+ pp = np.uint8(cv2.resize(np.clip(ff, 0 ,512), (width, height)))
254
+
255
+ pp, orig_faces, enhanced_faces = enhancer.process(pp, xf, bbox=c, face_enhance=True, possion_blending=False)
256
  # month region enhancement by GFPGAN
257
  cropped_faces, restored_faces, restored_img = restorer.enhance(
258
+ pp, has_aligned=False, only_center_face=True, paste_back=True)
259
  # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
260
  mm = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0]
261
+ #mm = [0, 255, 255, 255, 255, 255, 255, 255, 0, 0, 255, 255, 255, 0, 0, 0, 0, 0, 0]
262
  mouse_mask = np.zeros_like(restored_img)
263
  tmp_mask = enhancer.faceparser.process(restored_img[y1:y2, x1:x2], mm)[0]
264
  mouse_mask[y1:y2, x1:x2]= cv2.resize(tmp_mask, (x2 - x1, y2 - y1))[:, :, np.newaxis] / 255.
265
 
266
+
267
+ restored_img, ff, full_mask = [cv2.resize(x, (1024, 1024)) for x in (restored_img, ff, np.float32(mouse_mask))]
268
  img = Laplacian_Pyramid_Blending_with_mask(restored_img, ff, full_mask[:, :, 0], 10)
269
+ pp = np.uint8(cv2.resize(np.clip(img, 0 ,1024), (width, height)))
 
 
270
  out.write(pp)
271
  out.release()
272
 
 
344
 
345
 
346
  if __name__ == '__main__':
347
+ main()