victorisgeek commited on
Commit
39732fd
1 Parent(s): 5e52400

Upload 3 files

Browse files
Files changed (3) hide show
  1. image_test.py +107 -0
  2. image_test_multi_face.py +146 -0
  3. video_test.py +90 -0
image_test.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import paddle
3
+ import argparse
4
+ import cv2
5
+ import numpy as np
6
+ import os
7
+ from models.model import FaceSwap, l2_norm
8
+ from models.arcface import IRBlock, ResNet
9
+ from utils.align_face import back_matrix, dealign, align_img
10
+ from utils.util import paddle2cv, cv2paddle
11
+ from utils.prepare_data import LandmarkModel
12
+
13
+ def get_id_emb(id_net, id_img_path):
14
+ id_img = cv2.imread(id_img_path)
15
+
16
+ id_img = cv2.resize(id_img, (112, 112))
17
+ id_img = cv2paddle(id_img)
18
+ mean = paddle.to_tensor([[0.485, 0.456, 0.406]]).reshape((1, 3, 1, 1))
19
+ std = paddle.to_tensor([[0.229, 0.224, 0.225]]).reshape((1, 3, 1, 1))
20
+ id_img = (id_img - mean) / std
21
+
22
+ id_emb, id_feature = id_net(id_img)
23
+ id_emb = l2_norm(id_emb)
24
+
25
+ return id_emb, id_feature
26
+
27
+
28
+ def image_test(args):
29
+ paddle.set_device("gpu" if args.use_gpu else 'cpu')
30
+ faceswap_model = FaceSwap(args.use_gpu)
31
+
32
+ id_net = ResNet(block=IRBlock, layers=[3, 4, 23, 3])
33
+ id_net.set_dict(paddle.load('./checkpoints/arcface.pdparams'))
34
+
35
+ id_net.eval()
36
+
37
+ weight = paddle.load('./checkpoints/MobileFaceSwap_224.pdparams')
38
+
39
+ base_path = args.source_img_path.replace('.png', '').replace('.jpg', '').replace('.jpeg', '')
40
+ id_emb, id_feature = get_id_emb(id_net, base_path + '_aligned.png')
41
+
42
+ faceswap_model.set_model_param(id_emb, id_feature, model_weight=weight)
43
+ faceswap_model.eval()
44
+
45
+ if os.path.isfile(args.target_img_path):
46
+ img_list = [args.target_img_path]
47
+ else:
48
+ img_list = [os.path.join(args.target_img_path, x) for x in os.listdir(args.target_img_path) if x.endswith('png') or x.endswith('jpg') or x.endswith('jpeg')]
49
+ for img_path in img_list:
50
+
51
+ origin_att_img = cv2.imread(img_path)
52
+ base_path = img_path.replace('.png', '').replace('.jpg', '').replace('.jpeg', '')
53
+ att_img = cv2.imread(base_path + '_aligned.png')
54
+ att_img = cv2paddle(att_img)
55
+ import time
56
+
57
+ res, mask = faceswap_model(att_img)
58
+ res = paddle2cv(res)
59
+
60
+ if args.merge_result:
61
+ back_matrix = np.load(base_path + '_back.npy')
62
+ mask = np.transpose(mask[0].numpy(), (1, 2, 0))
63
+ res = dealign(res, origin_att_img, back_matrix, mask)
64
+ cv2.imwrite(os.path.join(args.output_dir, os.path.basename(img_path)), res)
65
+
66
+
67
+ def face_align(landmarkModel, image_path, merge_result=False, image_size=224):
68
+ if os.path.isfile(image_path):
69
+ img_list = [image_path]
70
+ else:
71
+ img_list = [os.path.join(image_path, x) for x in os.listdir(image_path) if x.endswith('png') or x.endswith('jpg') or x.endswith('jpeg')]
72
+ for path in img_list:
73
+ img = cv2.imread(path)
74
+ landmark = landmarkModel.get(img)
75
+ if landmark is not None:
76
+ base_path = path.replace('.png', '').replace('.jpg', '').replace('.jpeg', '')
77
+ aligned_img, back_matrix = align_img(img, landmark, image_size)
78
+ # np.save(base_path + '.npy', landmark)
79
+ cv2.imwrite(base_path + '_aligned.png', aligned_img)
80
+ if merge_result:
81
+ np.save(base_path + '_back.npy', back_matrix)
82
+
83
+
84
+ if __name__ == '__main__':
85
+
86
+ parser = argparse.ArgumentParser(description="MobileFaceSwap Test")
87
+ parser.add_argument('--source_img_path', type=str, help='path to the source image')
88
+ parser.add_argument('--target_img_path', type=str, help='path to the target images')
89
+ parser.add_argument('--output_dir', type=str, default='results', help='path to the output dirs')
90
+ parser.add_argument('--image_size', type=int, default=224,help='size of the test images (224 SimSwap | 256 FaceShifter)')
91
+ parser.add_argument('--merge_result', type=bool, default=True, help='output with whole image')
92
+ parser.add_argument('--need_align', type=bool, default=True, help='need to align the image')
93
+ parser.add_argument('--use_gpu', type=bool, default=False)
94
+
95
+
96
+ args = parser.parse_args()
97
+ if args.need_align:
98
+ landmarkModel = LandmarkModel(name='landmarks')
99
+ landmarkModel.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
100
+ face_align(landmarkModel, args.source_img_path)
101
+ face_align(landmarkModel, args.target_img_path, args.merge_result, args.image_size)
102
+ os.makedirs(args.output_dir, exist_ok=True)
103
+ image_test(args)
104
+
105
+
106
+
107
+
image_test_multi_face.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import paddle
3
+ import argparse
4
+ import cv2
5
+ import numpy as np
6
+ import os
7
+ from models.model import FaceSwap, l2_norm
8
+ from models.arcface import IRBlock, ResNet
9
+ from utils.align_face import back_matrix, dealign, align_img
10
+ from utils.util import paddle2cv, cv2paddle
11
+ from utils.prepare_data import LandmarkModel
12
+
13
+ def get_id_emb(id_net, id_img_path):
14
+ id_img = cv2.imread(id_img_path)
15
+
16
+ id_img = cv2.resize(id_img, (112, 112))
17
+ id_img = cv2paddle(id_img)
18
+ mean = paddle.to_tensor([[0.485, 0.456, 0.406]]).reshape((1, 3, 1, 1))
19
+ std = paddle.to_tensor([[0.229, 0.224, 0.225]]).reshape((1, 3, 1, 1))
20
+ id_img = (id_img - mean) / std
21
+
22
+ id_emb, id_feature = id_net(id_img)
23
+ id_emb = l2_norm(id_emb)
24
+
25
+ return id_emb, id_feature
26
+
27
+ def get_id_emb_from_image(id_net, id_img):
28
+ id_img = cv2.resize(id_img, (112, 112))
29
+ id_img = cv2paddle(id_img)
30
+ mean = paddle.to_tensor([[0.485, 0.456, 0.406]]).reshape((1, 3, 1, 1))
31
+ std = paddle.to_tensor([[0.229, 0.224, 0.225]]).reshape((1, 3, 1, 1))
32
+ id_img = (id_img - mean) / std
33
+ id_emb, id_feature = id_net(id_img)
34
+ id_emb = l2_norm(id_emb)
35
+
36
+ return id_emb, id_feature
37
+
38
+ def image_test_multi_face(args, source_aligned_images, target_aligned_images):
39
+ #paddle.set_device("gpu" if args.use_gpu else 'cpu')
40
+ paddle.set_device("gpu" if args.use_gpu else 'cpu')
41
+ faceswap_model = FaceSwap(args.use_gpu)
42
+
43
+ id_net = ResNet(block=IRBlock, layers=[3, 4, 23, 3])
44
+ id_net.set_dict(paddle.load('./checkpoints/arcface.pdparams'))
45
+
46
+ id_net.eval()
47
+
48
+ weight = paddle.load('./checkpoints/MobileFaceSwap_224.pdparams')
49
+
50
+ #target_path = args.target_img_path.replace('.png', '').replace('.jpg', '').replace('.jpeg', '')
51
+
52
+ start_idx = args.target_img_path.rfind('/')
53
+ if start_idx > 0:
54
+ target_name = args.target_img_path[args.target_img_path.rfind('/'):]
55
+ else:
56
+ target_name = args.target_img_path
57
+ origin_att_img = cv2.imread(args.target_img_path)
58
+ #id_emb, id_feature = get_id_emb(id_net, base_path + '_aligned.png')
59
+
60
+
61
+ for idx, target_aligned_image in enumerate(target_aligned_images):
62
+ id_emb, id_feature = get_id_emb_from_image(id_net, source_aligned_images[idx % len(source_aligned_images)][0])
63
+ faceswap_model.set_model_param(id_emb, id_feature, model_weight=weight)
64
+ faceswap_model.eval()
65
+ #print(target_aligned_image.shape)
66
+
67
+ att_img = cv2paddle(target_aligned_image[0])
68
+ #import time
69
+ #start = time.perf_counter()
70
+
71
+ res, mask = faceswap_model(att_img)
72
+ #print('process time :{}', time.perf_counter() - start)
73
+ res = paddle2cv(res)
74
+
75
+ #dest[landmarks[idx][0]:landmarks[idx][1],:] =
76
+
77
+ back_matrix = target_aligned_images[idx % len(target_aligned_images)][1]
78
+ mask = np.transpose(mask[0].numpy(), (1, 2, 0))
79
+ origin_att_img = dealign(res, origin_att_img, back_matrix, mask)
80
+ '''
81
+ if args.merge_result:
82
+ back_matrix = np.load(base_path + '_back.npy')
83
+ mask = np.transpose(mask[0].numpy(), (1, 2, 0))
84
+ res = dealign(res, origin_att_img, back_matrix, mask)
85
+ '''
86
+ cv2.imwrite(os.path.join(args.output_dir, os.path.basename(target_name.format(idx))), origin_att_img)
87
+
88
+
89
+ def face_align(landmarkModel, image_path, merge_result=False, image_size=224):
90
+ if os.path.isfile(image_path):
91
+ img_list = [image_path]
92
+ else:
93
+ img_list = [os.path.join(image_path, x) for x in os.listdir(image_path) if x.endswith('png') or x.endswith('jpg') or x.endswith('jpeg')]
94
+ for path in img_list:
95
+ img = cv2.imread(path)
96
+ landmark = landmarkModel.get(img)
97
+ if landmark is not None:
98
+ base_path = path.replace('.png', '').replace('.jpg', '').replace('.jpeg', '')
99
+ aligned_img, back_matrix = align_img(img, landmark, image_size)
100
+ # np.save(base_path + '.npy', landmark)
101
+ cv2.imwrite(base_path + '_aligned.png', aligned_img)
102
+ if merge_result:
103
+ np.save(base_path + '_back.npy', back_matrix)
104
+
105
+ def faces_align(landmarkModel, image_path, image_size=224):
106
+ aligned_imgs =[]
107
+ if os.path.isfile(image_path):
108
+ img_list = [image_path]
109
+ else:
110
+ img_list = [os.path.join(image_path, x) for x in os.listdir(image_path) if x.endswith('png') or x.endswith('jpg') or x.endswith('jpeg')]
111
+ for path in img_list:
112
+ img = cv2.imread(path)
113
+ landmarks = landmarkModel.gets(img)
114
+ for landmark in landmarks:
115
+ if landmark is not None:
116
+ aligned_img, back_matrix = align_img(img, landmark, image_size)
117
+ aligned_imgs.append([aligned_img, back_matrix])
118
+ return aligned_imgs
119
+
120
+
121
+ if __name__ == '__main__':
122
+
123
+ parser = argparse.ArgumentParser(description="MobileFaceSwap Test")
124
+ parser.add_argument('--source_img_path', type=str, help='path to the source image')
125
+ parser.add_argument('--target_img_path', type=str, help='path to the target images')
126
+ parser.add_argument('--output_dir', type=str, default='results', help='path to the output dirs')
127
+ parser.add_argument('--image_size', type=int, default=224,help='size of the test images (224 SimSwap | 256 FaceShifter)')
128
+ parser.add_argument('--merge_result', type=bool, default=True, help='output with whole image')
129
+ parser.add_argument('--need_align', type=bool, default=True, help='need to align the image')
130
+ parser.add_argument('--use_gpu', type=bool, default=False)
131
+
132
+
133
+ args = parser.parse_args()
134
+ if args.need_align:
135
+ landmarkModel = LandmarkModel(name='landmarks')
136
+ landmarkModel.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
137
+ source_aligned_images = faces_align(landmarkModel, args.source_img_path)
138
+ target_aligned_images = faces_align(landmarkModel, args.target_img_path, args.image_size)
139
+ os.makedirs(args.output_dir, exist_ok=True)
140
+ image_test_multi_face(args, source_aligned_images, target_aligned_images)
141
+
142
+
143
+
144
+
145
+
146
+
video_test.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import paddle
3
+ import argparse
4
+ import cv2
5
+ import numpy as np
6
+ import os
7
+ from models.model import FaceSwap, l2_norm
8
+ from models.arcface import IRBlock, ResNet
9
+ from utils.align_face import back_matrix, dealign, align_img
10
+ from utils.util import paddle2cv, cv2paddle
11
+ from utils.prepare_data import LandmarkModel
12
+ from tqdm import tqdm
13
+
14
+ def get_id_emb(id_net, id_img):
15
+ id_img = cv2.resize(id_img, (112, 112))
16
+ id_img = cv2paddle(id_img)
17
+ mean = paddle.to_tensor([[0.485, 0.456, 0.406]]).reshape((1, 3, 1, 1))
18
+ std = paddle.to_tensor([[0.229, 0.224, 0.225]]).reshape((1, 3, 1, 1))
19
+ id_img = (id_img - mean) / std
20
+
21
+ id_emb, id_feature = id_net(id_img)
22
+ id_emb = l2_norm(id_emb)
23
+
24
+ return id_emb, id_feature
25
+
26
+
27
+ def video_test(args):
28
+
29
+ paddle.set_device("gpu" if args.use_gpu else 'cpu')
30
+ faceswap_model = FaceSwap(args.use_gpu)
31
+
32
+ id_net = ResNet(block=IRBlock, layers=[3, 4, 23, 3])
33
+ id_net.set_dict(paddle.load('./checkpoints/arcface.pdparams'))
34
+
35
+ id_net.eval()
36
+
37
+ weight = paddle.load('./checkpoints/MobileFaceSwap_224.pdparams')
38
+
39
+ landmarkModel = LandmarkModel(name='landmarks')
40
+ landmarkModel.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
41
+ id_img = cv2.imread(args.source_img_path)
42
+ #人脸检测
43
+ landmark = landmarkModel.get(id_img)
44
+ if landmark is None:
45
+ print('**** No Face Detect Error ****')
46
+ exit()
47
+ aligned_id_img, _ = align_img(id_img, landmark)
48
+
49
+ id_emb, id_feature = get_id_emb(id_net, aligned_id_img)
50
+
51
+ faceswap_model.set_model_param(id_emb, id_feature, model_weight=weight)
52
+ faceswap_model.eval()
53
+
54
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
55
+ cap = cv2.VideoCapture()
56
+ cap.open(args.target_video_path)
57
+ videoWriter = cv2.VideoWriter(os.path.join(args.output_path, os.path.basename(args.target_video_path)), fourcc, int(cap.get(cv2.CAP_PROP_FPS)), (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))))
58
+ all_f = cap.get(cv2.CAP_PROP_FRAME_COUNT)
59
+ for i in tqdm(range(int(all_f))):
60
+ ret, frame = cap.read()
61
+ landmark = landmarkModel.get(frame)
62
+ if landmark is not None:
63
+ att_img, back_matrix = align_img(frame, landmark)
64
+ att_img = cv2paddle(att_img)
65
+ res, mask = faceswap_model(att_img)
66
+ res = paddle2cv(res)
67
+ mask = np.transpose(mask[0].numpy(), (1, 2, 0))
68
+ res = dealign(res, frame, back_matrix, mask)
69
+ frame = res
70
+ else:
71
+ print('**** No Face Detect Error ****')
72
+ videoWriter.write(frame)
73
+ cap.release()
74
+ videoWriter.release()
75
+
76
+
77
+ if __name__ == '__main__':
78
+
79
+ parser = argparse.ArgumentParser(description="MobileFaceSwap Test")
80
+
81
+ parser = argparse.ArgumentParser(description="MobileFaceSwap Test")
82
+ parser.add_argument('--source_img_path', type=str, help='path to the source image')
83
+ parser.add_argument('--target_video_path', type=str, help='path to the target video')
84
+ parser.add_argument('--output_path', type=str, default='results', help='path to the output videos')
85
+ parser.add_argument('--image_size', type=int, default=224,help='size of the test images (224 SimSwap | 256 FaceShifter)')
86
+ parser.add_argument('--merge_result', type=bool, default=True, help='output with whole image')
87
+ parser.add_argument('--use_gpu', type=bool, default=False)
88
+
89
+ args = parser.parse_args()
90
+ video_test(args)