LuisFlow commited on
Commit
f3236f9
β€’
1 Parent(s): db36ede

Upload 27 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,27 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ g_model
2
+ flagged
3
+ arcface_model
4
+ retina_model
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/AFFA-face-swap.iml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.8 (py38)" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ <component name="PyDocumentationSettings">
9
+ <option name="format" value="PLAIN" />
10
+ <option name="myDocStringFormat" value="Plain" />
11
+ </component>
12
+ <component name="TestRunnerService">
13
+ <option name="PROJECT_TEST_RUNNER" value="pytest" />
14
+ </component>
15
+ </module>
.idea/inspectionProfiles/Project_Default.xml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <profile version="1.0">
3
+ <option name="myName" value="Project Default" />
4
+ <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
+ <option name="ignoredPackages">
6
+ <value>
7
+ <list size="3">
8
+ <item index="0" class="java.lang.String" itemvalue="ipython" />
9
+ <item index="1" class="java.lang.String" itemvalue="Cython" />
10
+ <item index="2" class="java.lang.String" itemvalue="tensorflow-gpu" />
11
+ </list>
12
+ </value>
13
+ </option>
14
+ </inspection_tool>
15
+ <inspection_tool class="PyPep8Inspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
16
+ <option name="ignoredErrors">
17
+ <list>
18
+ <option value="E402" />
19
+ </list>
20
+ </option>
21
+ </inspection_tool>
22
+ <inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
23
+ <option name="ignoredErrors">
24
+ <list>
25
+ <option value="N806" />
26
+ <option value="N812" />
27
+ </list>
28
+ </option>
29
+ </inspection_tool>
30
+ <inspection_tool class="PyUnresolvedReferencesInspection" enabled="true" level="WARNING" enabled_by_default="true">
31
+ <option name="ignoredIdentifiers">
32
+ <list>
33
+ <option value="torch.backends.cudnn" />
34
+ </list>
35
+ </option>
36
+ </inspection_tool>
37
+ </profile>
38
+ </component>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (py38)" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/AFFA-face-swap.iml" filepath="$PROJECT_DIR$/.idea/AFFA-face-swap.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/vcs.xml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="VcsDirectoryMappings">
4
+ <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
+ <mapping directory="$PROJECT_DIR$/arcface_model" vcs="Git" />
6
+ <mapping directory="$PROJECT_DIR$/g_model" vcs="Git" />
7
+ <mapping directory="$PROJECT_DIR$/retina_model" vcs="Git" />
8
+ </component>
9
+ </project>
README.md CHANGED
@@ -1,13 +1,47 @@
1
  ---
2
- title: Caracara
3
- emoji: πŸ“ˆ
4
- colorFrom: pink
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.43.2
8
  app_file: app.py
9
  pinned: false
10
  license: cc-by-nc-sa-4.0
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Face Swap
3
+ emoji: πŸ§™πŸ§™πŸ§™πŸ§™πŸ§™πŸ§™πŸ§™πŸ§™
4
+ colorFrom: purple
5
+ colorTo: green
6
  sdk: gradio
 
7
  app_file: app.py
8
  pinned: false
9
  license: cc-by-nc-sa-4.0
10
+ duplicated_from: felixrosberg/face-swap
11
  ---
12
 
13
+ # Configuration
14
+
15
+ `title`: _string_
16
+ Display title for the Space
17
+
18
+ `emoji`: _string_
19
+ Space emoji (emoji-only character allowed)
20
+
21
+ `colorFrom`: _string_
22
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
23
+
24
+ `colorTo`: _string_
25
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
26
+
27
+ `sdk`: _string_
28
+ Can be either `gradio`, `streamlit`, or `static`
29
+
30
+ `sdk_version` : _string_
31
+ Only applicable for `streamlit` SDK.
32
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
33
+
34
+ `app_file`: _string_
35
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
36
+ Path is relative to the root of the repository.
37
+
38
+ `models`: _List[string]_
39
+ HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
40
+ Will be parsed automatically from your code if not specified here.
41
+
42
+ `datasets`: _List[string]_
43
+ HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
44
+ Will be parsed automatically from your code if not specified here.
45
+
46
+ `pinned`: _boolean_
47
+ Whether the Space stays on top of your list.
app.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ from huggingface_hub import Repository
3
+ import os
4
+
5
+ from utils.utils import norm_crop, estimate_norm, inverse_estimate_norm, transform_landmark_points, get_lm
6
+ from networks.layers import AdaIN, AdaptiveAttention
7
+ from tensorflow_addons.layers import InstanceNormalization
8
+ import numpy as np
9
+ import cv2
10
+ from scipy.ndimage import gaussian_filter
11
+
12
+ from tensorflow.keras.models import load_model
13
+ from options.swap_options import SwapOptions
14
+
15
+ # .
16
+ # token = os.environ['model_fetch']
17
+
18
+ opt = SwapOptions().parse()
19
+ token = os.environ['token']
20
+
21
+ retina_repo = Repository(local_dir="retina_models", clone_from="felixrosberg/RetinaFace")
22
+
23
+ from retinaface.models import *
24
+
25
+ RetinaFace = load_model("retina_models/RetinaFace-Res50.h5",
26
+ custom_objects={"FPN": FPN,
27
+ "SSH": SSH,
28
+ "BboxHead": BboxHead,
29
+ "LandmarkHead": LandmarkHead,
30
+ "ClassHead": ClassHead}
31
+ )
32
+
33
+ arc_repo = Repository(local_dir="arcface_model", clone_from="felixrosberg/ArcFace")
34
+ ArcFace = load_model("arcface_model/ArcFace-Res50.h5")
35
+ ArcFaceE = load_model("arcface_model/ArcFacePerceptual-Res50.h5")
36
+
37
+ g_repo = Repository(local_dir="g_model_c_hq", clone_from="felixrosberg/FaceDancer",use_auth_token=token)
38
+ G = load_model("g_model_c_hq/FaceDancer_config_c_HQ.h5", custom_objects={"AdaIN": AdaIN,
39
+ "AdaptiveAttention": AdaptiveAttention,
40
+ "InstanceNormalization": InstanceNormalization})
41
+
42
+ # r_repo = Repository(local_dir="reconstruction_attack", clone_from="felixrosberg/reconstruction_attack",
43
+ # private=True, use_auth_token=token)
44
+ # R = load_model("reconstruction_attack/reconstructor_42.h5", custom_objects={"AdaIN": AdaIN,
45
+ # "AdaptiveAttention": AdaptiveAttention,
46
+ # "InstanceNormalization": InstanceNormalization})
47
+
48
+ # permuter_repo = Repository(local_dir="identity_permuter", clone_from="felixrosberg/identitypermuter",
49
+ # private=True, use_auth_token=token, git_user="felixrosberg")
50
+
51
+ # from identity_permuter.id_permuter import identity_permuter
52
+
53
+ # IDP = identity_permuter(emb_size=32, min_arg=False)
54
+ # IDP.load_weights("identity_permuter/id_permuter.h5")
55
+
56
+ blend_mask_base = np.zeros(shape=(256, 256, 1))
57
+ blend_mask_base[80:244, 32:224] = 1
58
+ blend_mask_base = gaussian_filter(blend_mask_base, sigma=7)
59
+
60
+
61
+ def run_inference(target, source, slider, adv_slider, settings):
62
+ try:
63
+ source = np.array(source)
64
+ target = np.array(target)
65
+
66
+ # Prepare to load video
67
+ if "anonymize" not in settings:
68
+ source_a = RetinaFace(np.expand_dims(source, axis=0)).numpy()[0]
69
+ source_h, source_w, _ = source.shape
70
+ source_lm = get_lm(source_a, source_w, source_h)
71
+ source_aligned = norm_crop(source, source_lm, image_size=256)
72
+ source_z = ArcFace.predict(np.expand_dims(tf.image.resize(source_aligned, [112, 112]) / 255.0, axis=0))
73
+ else:
74
+ source_z = None
75
+
76
+ # read frame
77
+ im = target
78
+ im_h, im_w, _ = im.shape
79
+ im_shape = (im_w, im_h)
80
+
81
+ detection_scale = im_w // 640 if im_w > 640 else 1
82
+
83
+ faces = RetinaFace(np.expand_dims(cv2.resize(im,
84
+ (im_w // detection_scale,
85
+ im_h // detection_scale)), axis=0)).numpy()
86
+
87
+ total_img = im / 255.0
88
+ for annotation in faces:
89
+ lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h],
90
+ [annotation[6] * im_w, annotation[7] * im_h],
91
+ [annotation[8] * im_w, annotation[9] * im_h],
92
+ [annotation[10] * im_w, annotation[11] * im_h],
93
+ [annotation[12] * im_w, annotation[13] * im_h]],
94
+ dtype=np.float32)
95
+
96
+ # align the detected face
97
+ M, pose_index = estimate_norm(lm_align, 256, "arcface", shrink_factor=1.0)
98
+ im_aligned = (cv2.warpAffine(im, M, (256, 256), borderValue=0.0) - 127.5) / 127.5
99
+
100
+ if "adversarial defense" in settings:
101
+ eps = adv_slider / 200
102
+ X = tf.convert_to_tensor(np.expand_dims(im_aligned, axis=0))
103
+ with tf.GradientTape() as tape:
104
+ tape.watch(X)
105
+
106
+ X_z = ArcFaceE(tf.image.resize(X * 0.5 + 0.5, [112, 112]))
107
+ output = R([X, X_z])
108
+
109
+ loss = tf.reduce_mean(tf.abs(0 - output))
110
+
111
+ gradient = tf.sign(tape.gradient(loss, X))
112
+
113
+ adv_x = X + eps * gradient
114
+ im_aligned = tf.clip_by_value(adv_x, -1, 1)[0]
115
+
116
+ if "anonymize" in settings and "reconstruction attack" not in settings:
117
+ """source_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) / 255.0, axis=0))
118
+ anon_ratio = int(512 * (slider / 100))
119
+ anon_vector = np.ones(shape=(1, 512))
120
+ anon_vector[:, :anon_ratio] = -1
121
+ np.random.shuffle(anon_vector)
122
+ source_z *= anon_vector"""
123
+
124
+ slider_weight = slider / 100
125
+
126
+ target_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) * 0.5 + 0.5, axis=0))
127
+ # source_z = IDP.predict(target_z)
128
+
129
+ source_z = slider_weight * source_z + (1 - slider_weight) * target_z
130
+
131
+ if "reconstruction attack" in settings:
132
+ source_z = ArcFaceE.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) * 0.5 + 0.5, axis=0))
133
+
134
+ # face swap
135
+ if "reconstruction attack" not in settings:
136
+ changed_face_cage = G.predict([np.expand_dims(im_aligned, axis=0),
137
+ source_z])
138
+ changed_face = changed_face_cage[0] * 0.5 + 0.5
139
+
140
+ # get inverse transformation landmarks
141
+ transformed_lmk = transform_landmark_points(M, lm_align)
142
+
143
+ # warp image back
144
+ iM, _ = inverse_estimate_norm(lm_align, transformed_lmk, 256, "arcface", shrink_factor=1.0)
145
+ iim_aligned = cv2.warpAffine(changed_face, iM, im_shape, borderValue=0.0)
146
+
147
+ # blend swapped face with target image
148
+ blend_mask = cv2.warpAffine(blend_mask_base, iM, im_shape, borderValue=0.0)
149
+ blend_mask = np.expand_dims(blend_mask, axis=-1)
150
+ total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
151
+ else:
152
+ changed_face_cage = R.predict([np.expand_dims(im_aligned, axis=0),
153
+ source_z])
154
+ changed_face = changed_face_cage[0] * 0.5 + 0.5
155
+
156
+ # get inverse transformation landmarks
157
+ transformed_lmk = transform_landmark_points(M, lm_align)
158
+
159
+ # warp image back
160
+ iM, _ = inverse_estimate_norm(lm_align, transformed_lmk, 256, "arcface", shrink_factor=1.0)
161
+ iim_aligned = cv2.warpAffine(changed_face, iM, im_shape, borderValue=0.0)
162
+
163
+ # blend swapped face with target image
164
+ blend_mask = cv2.warpAffine(blend_mask_base, iM, im_shape, borderValue=0.0)
165
+ blend_mask = np.expand_dims(blend_mask, axis=-1)
166
+ total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
167
+
168
+ if "compare" in settings:
169
+ total_img = np.concatenate((im / 255.0, total_img), axis=1)
170
+
171
+ total_img = np.clip(total_img, 0, 1)
172
+ total_img *= 255.0
173
+ total_img = total_img.astype('uint8')
174
+
175
+ return total_img
176
+ except Exception as e:
177
+ print(e)
178
+ return None
179
+
180
+
181
+ description = "Performs subject agnostic identity transfer from a source face to all target faces. \n\n" \
182
+ "Implementation and demo of FaceDancer, accepted to WACV 2023. \n\n" \
183
+ "Pre-print: https://arxiv.org/abs/2210.10473 \n\n" \
184
+ "Code: https://github.com/felixrosberg/FaceDancer \n\n" \
185
+ "\n\n" \
186
+ "Options:\n\n" \
187
+ "-Compare returns the target image concatenated with the results.\n\n" \
188
+ "-Anonymize will ignore the source image and perform an identity permutation of target faces.\n\n" \
189
+ "-Reconstruction attack will attempt to invert the face swap or the anonymization.\n\n" \
190
+ "-Adversarial defense will add a permutation noise that disrupts the reconstruction attack.\n\n" \
191
+ "NOTE: There is no guarantees with the anonymization process currently.\n\n" \
192
+ "NOTE: source image with too high resolution may not work properly!"
193
+ examples = [["assets/rick.jpg", "assets/musk.jpg", 100, 10, ["compare"]],
194
+ ["assets/musk.jpg", "assets/musk.jpg", 100, 10, ["anonymize"]]]
195
+ article = """
196
+ Demo is based of recent research from my Ph.D work. Results expects to be published in the coming months.
197
+ """
198
+
199
+ iface = gradio.Interface(run_inference,
200
+ [gradio.Image(shape=None, type="pil", label='Target'),
201
+ gradio.Image(shape=None, type="pil", label='Source'),
202
+ gradio.Slider(0, 100, default=100, label="Anonymization ratio (%)"),
203
+ gradio.Slider(0, 100, default=100, label="Adversarial defense ratio (%)"),
204
+ gradio.CheckboxGroup(["compare",
205
+ "anonymize",
206
+ "reconstruction attack",
207
+ "adversarial defense"],
208
+ label='Options')],
209
+ "image",
210
+ title="Face Swap",
211
+ description=description,
212
+ examples=examples,
213
+ article=article,
214
+ layout="vertical")
215
+ iface.launch()
assets/girl_0.png ADDED
assets/girl_1.png ADDED
assets/musk.jpg ADDED
assets/rick.jpg ADDED
networks/__pycache__/layers.cpython-37.pyc ADDED
Binary file (69.1 kB). View file
 
networks/__pycache__/layers.cpython-38.pyc ADDED
Binary file (2.12 kB). View file
 
networks/layers.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow.keras.layers import Layer, Dense
3
+
4
+
5
+ def sin_activation(x, omega=30):
6
+ return tf.math.sin(omega * x)
7
+
8
+
9
+ class AdaIN(Layer):
10
+ def __init__(self, **kwargs):
11
+ super(AdaIN, self).__init__(**kwargs)
12
+
13
+ def build(self, input_shapes):
14
+ x_shape = input_shapes[0]
15
+ w_shape = input_shapes[1]
16
+
17
+ self.w_channels = w_shape[-1]
18
+ self.x_channels = x_shape[-1]
19
+
20
+ self.dense_1 = Dense(self.x_channels)
21
+ self.dense_2 = Dense(self.x_channels)
22
+
23
+ def call(self, inputs):
24
+ x, w = inputs
25
+ ys = tf.reshape(self.dense_1(w), (-1, 1, 1, self.x_channels))
26
+ yb = tf.reshape(self.dense_2(w), (-1, 1, 1, self.x_channels))
27
+ return ys * x + yb
28
+
29
+ def get_config(self):
30
+ config = {
31
+ #'w_channels': self.w_channels,
32
+ #'x_channels': self.x_channels
33
+ }
34
+ base_config = super(AdaIN, self).get_config()
35
+ return dict(list(base_config.items()) + list(config.items()))
36
+
37
+
38
+ class AdaptiveAttention(Layer):
39
+
40
+ def __init__(self, **kwargs):
41
+ super(AdaptiveAttention, self).__init__(**kwargs)
42
+
43
+ def call(self, inputs):
44
+ m, a, i = inputs
45
+ return (1 - m) * a + m * i
46
+
47
+ def get_config(self):
48
+ base_config = super(AdaptiveAttention, self).get_config()
49
+ return base_config
options/__pycache__/swap_options.cpython-37.pyc ADDED
Binary file (6.21 kB). View file
 
options/__pycache__/swap_options.cpython-38.pyc ADDED
Binary file (1.65 kB). View file
 
options/swap_options.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+
4
+ class SwapOptions():
5
+ def __init__(self):
6
+ self.parser = argparse.ArgumentParser()
7
+ self.initialized = False
8
+
9
+ def initialize(self):
10
+ # paths (data, models, etc...)
11
+ self.parser.add_argument('--arcface_path', type=str,
12
+ default="arcface_model/arcface/arc_res50.h5",
13
+ help='path to arcface model. Used to extract identity from source.')
14
+
15
+ # Video/Image necessary models
16
+ self.parser.add_argument('--retina_path', type=str,
17
+ default="retinaface/retinaface_res50.h5",
18
+ help='path to retinaface model.')
19
+ self.parser.add_argument('--compare', type=bool,
20
+ default=True,
21
+ help='If true, concatenates the frame with the manipulated frame')
22
+
23
+ self.parser.add_argument('--load', type=int,
24
+ default=30,
25
+ help='int of number to load checkpoint weights.')
26
+ self.parser.add_argument('--device_id', type=int, default=0,
27
+ help='which device to use')
28
+
29
+ # logging and checkpointing
30
+ self.parser.add_argument('--log_dir', type=str, default='logs/runs/',
31
+ help='logging directory')
32
+ self.parser.add_argument('--log_name', type=str, default='affa_f',
33
+ help='name of the run, change this to track several experiments')
34
+
35
+ self.parser.add_argument('--chkp_dir', type=str, default='checkpoints/',
36
+ help='checkpoint directory (will use same name as log_name!)')
37
+ self.initialized = True
38
+
39
+ def parse(self):
40
+ if not self.initialized:
41
+ self.initialize()
42
+ self.opt = self.parser.parse_args()
43
+ return self.opt
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ tensorflow
2
+ tensorflow-addons
3
+ opencv-python-headless
4
+ scipy
5
+ pillow
6
+ scikit-image
7
+ huggingface_hub
retinaface/anchor.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Anchor utils modified from https://github.com/biubug6/Pytorch_Retinaface"""
2
+ import math
3
+ import tensorflow as tf
4
+ import numpy as np
5
+ from itertools import product as product
6
+
7
+
8
+ ###############################################################################
9
+ # Tensorflow / Numpy Priors #
10
+ ###############################################################################
11
+ def prior_box(image_sizes, min_sizes, steps, clip=False):
12
+ """prior box"""
13
+ feature_maps = [
14
+ [math.ceil(image_sizes[0] / step), math.ceil(image_sizes[1] / step)]
15
+ for step in steps]
16
+
17
+ anchors = []
18
+ for k, f in enumerate(feature_maps):
19
+ for i, j in product(range(f[0]), range(f[1])):
20
+ for min_size in min_sizes[k]:
21
+ s_kx = min_size / image_sizes[1]
22
+ s_ky = min_size / image_sizes[0]
23
+ cx = (j + 0.5) * steps[k] / image_sizes[1]
24
+ cy = (i + 0.5) * steps[k] / image_sizes[0]
25
+ anchors += [cx, cy, s_kx, s_ky]
26
+
27
+ output = np.asarray(anchors).reshape([-1, 4])
28
+
29
+ if clip:
30
+ output = np.clip(output, 0, 1)
31
+
32
+ return output
33
+
34
+
35
+ def prior_box_tf(image_sizes, min_sizes, steps, clip=False):
36
+ """prior box"""
37
+ image_sizes = tf.cast(tf.convert_to_tensor(image_sizes), tf.float32)
38
+ feature_maps = tf.math.ceil(
39
+ tf.reshape(image_sizes, [1, 2]) /
40
+ tf.reshape(tf.cast(steps, tf.float32), [-1, 1]))
41
+
42
+ anchors = []
43
+ for k in range(len(min_sizes)):
44
+ grid_x, grid_y = _meshgrid_tf(tf.range(feature_maps[k][1]),
45
+ tf.range(feature_maps[k][0]))
46
+ cx = (grid_x + 0.5) * steps[k] / image_sizes[1]
47
+ cy = (grid_y + 0.5) * steps[k] / image_sizes[0]
48
+ cxcy = tf.stack([cx, cy], axis=-1)
49
+ cxcy = tf.reshape(cxcy, [-1, 2])
50
+ cxcy = tf.repeat(cxcy, repeats=tf.shape(min_sizes[k])[0], axis=0)
51
+
52
+ sx = min_sizes[k] / image_sizes[1]
53
+ sy = min_sizes[k] / image_sizes[0]
54
+ sxsy = tf.stack([sx, sy], 1)
55
+ sxsy = tf.repeat(sxsy[tf.newaxis],
56
+ repeats=tf.shape(grid_x)[0] * tf.shape(grid_x)[1],
57
+ axis=0)
58
+ sxsy = tf.reshape(sxsy, [-1, 2])
59
+
60
+ anchors.append(tf.concat([cxcy, sxsy], 1))
61
+
62
+ output = tf.concat(anchors, axis=0)
63
+
64
+ if clip:
65
+ output = tf.clip_by_value(output, 0, 1)
66
+
67
+ return output
68
+
69
+
70
+ def _meshgrid_tf(x, y):
71
+ """ workaround solution of the tf.meshgrid() issue:
72
+ https://github.com/tensorflow/tensorflow/issues/34470"""
73
+ grid_shape = [tf.shape(y)[0], tf.shape(x)[0]]
74
+ grid_x = tf.broadcast_to(tf.reshape(x, [1, -1]), grid_shape)
75
+ grid_y = tf.broadcast_to(tf.reshape(y, [-1, 1]), grid_shape)
76
+ return grid_x, grid_y
77
+
78
+
79
+ ###############################################################################
80
+ # Tensorflow Encoding #
81
+ ###############################################################################
82
+ def encode_tf(labels, priors, match_thresh, ignore_thresh,
83
+ variances=[0.1, 0.2]):
84
+ """tensorflow encoding"""
85
+ assert ignore_thresh <= match_thresh
86
+ priors = tf.cast(priors, tf.float32)
87
+ bbox = labels[:, :4]
88
+ landm = labels[:, 4:-1]
89
+ landm_valid = labels[:, -1] # 1: with landm, 0: w/o landm.
90
+
91
+ # jaccard index
92
+ overlaps = _jaccard(bbox, _point_form(priors))
93
+
94
+ # (Bipartite Matching)
95
+ # [num_objects] best prior for each ground truth
96
+ best_prior_overlap, best_prior_idx = tf.math.top_k(overlaps, k=1)
97
+ best_prior_overlap = best_prior_overlap[:, 0]
98
+ best_prior_idx = best_prior_idx[:, 0]
99
+
100
+ # [num_priors] best ground truth for each prior
101
+ overlaps_t = tf.transpose(overlaps)
102
+ best_truth_overlap, best_truth_idx = tf.math.top_k(overlaps_t, k=1)
103
+ best_truth_overlap = best_truth_overlap[:, 0]
104
+ best_truth_idx = best_truth_idx[:, 0]
105
+
106
+ # ensure best prior
107
+ def _loop_body(i, bt_idx, bt_overlap):
108
+ bp_mask = tf.one_hot(best_prior_idx[i], tf.shape(bt_idx)[0])
109
+ bp_mask_int = tf.cast(bp_mask, tf.int32)
110
+ new_bt_idx = bt_idx * (1 - bp_mask_int) + bp_mask_int * i
111
+ bp_mask_float = tf.cast(bp_mask, tf.float32)
112
+ new_bt_overlap = bt_overlap * (1 - bp_mask_float) + bp_mask_float * 2
113
+ return tf.cond(best_prior_overlap[i] > match_thresh,
114
+ lambda: (i + 1, new_bt_idx, new_bt_overlap),
115
+ lambda: (i + 1, bt_idx, bt_overlap))
116
+ _, best_truth_idx, best_truth_overlap = tf.while_loop(
117
+ lambda i, bt_idx, bt_overlap: tf.less(i, tf.shape(best_prior_idx)[0]),
118
+ _loop_body, [tf.constant(0), best_truth_idx, best_truth_overlap])
119
+
120
+ matches_bbox = tf.gather(bbox, best_truth_idx) # [num_priors, 4]
121
+ matches_landm = tf.gather(landm, best_truth_idx) # [num_priors, 10]
122
+ matches_landm_v = tf.gather(landm_valid, best_truth_idx) # [num_priors]
123
+
124
+ loc_t = _encode_bbox(matches_bbox, priors, variances)
125
+ landm_t = _encode_landm(matches_landm, priors, variances)
126
+ landm_valid_t = tf.cast(matches_landm_v > 0, tf.float32)
127
+ conf_t = tf.cast(best_truth_overlap > match_thresh, tf.float32)
128
+ conf_t = tf.where(
129
+ tf.logical_and(best_truth_overlap < match_thresh,
130
+ best_truth_overlap > ignore_thresh),
131
+ tf.ones_like(conf_t) * -1, conf_t) # 1: pos, 0: neg, -1: ignore
132
+
133
+ return tf.concat([loc_t, landm_t, landm_valid_t[..., tf.newaxis],
134
+ conf_t[..., tf.newaxis]], axis=1)
135
+
136
+
137
+ def _encode_bbox(matched, priors, variances):
138
+ """Encode the variances from the priorbox layers into the ground truth
139
+ boxes we have matched (based on jaccard overlap) with the prior boxes.
140
+ Args:
141
+ matched: (tensor) Coords of ground truth for each prior in point-form
142
+ Shape: [num_priors, 4].
143
+ priors: (tensor) Prior boxes in center-offset form
144
+ Shape: [num_priors,4].
145
+ variances: (list[float]) Variances of priorboxes
146
+ Return:
147
+ encoded boxes (tensor), Shape: [num_priors, 4]
148
+ """
149
+
150
+ # dist b/t match center and prior's center
151
+ g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
152
+ # encode variance
153
+ g_cxcy /= (variances[0] * priors[:, 2:])
154
+ # match wh / prior wh
155
+ g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
156
+ g_wh = tf.math.log(g_wh) / variances[1]
157
+ # return target for smooth_l1_loss
158
+ return tf.concat([g_cxcy, g_wh], 1) # [num_priors,4]
159
+
160
+
161
+ def _encode_landm(matched, priors, variances):
162
+ """Encode the variances from the priorbox layers into the ground truth
163
+ boxes we have matched (based on jaccard overlap) with the prior boxes.
164
+ Args:
165
+ matched: (tensor) Coords of ground truth for each prior in point-form
166
+ Shape: [num_priors, 10].
167
+ priors: (tensor) Prior boxes in center-offset form
168
+ Shape: [num_priors,4].
169
+ variances: (list[float]) Variances of priorboxes
170
+ Return:
171
+ encoded landm (tensor), Shape: [num_priors, 10]
172
+ """
173
+
174
+ # dist b/t match center and prior's center
175
+ matched = tf.reshape(matched, [tf.shape(matched)[0], 5, 2])
176
+ priors = tf.broadcast_to(
177
+ tf.expand_dims(priors, 1), [tf.shape(matched)[0], 5, 4])
178
+ g_cxcy = matched[:, :, :2] - priors[:, :, :2]
179
+ # encode variance
180
+ g_cxcy /= (variances[0] * priors[:, :, 2:])
181
+ # g_cxcy /= priors[:, :, 2:]
182
+ g_cxcy = tf.reshape(g_cxcy, [tf.shape(g_cxcy)[0], -1])
183
+ # return target for smooth_l1_loss
184
+ return g_cxcy
185
+
186
+
187
+ def _point_form(boxes):
188
+ """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
189
+ representation for comparison to point form ground truth data.
190
+ Args:
191
+ boxes: (tensor) center-size default boxes from priorbox layers.
192
+ Return:
193
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
194
+ """
195
+ return tf.concat((boxes[:, :2] - boxes[:, 2:] / 2,
196
+ boxes[:, :2] + boxes[:, 2:] / 2), axis=1)
197
+
198
+
199
+ def _intersect(box_a, box_b):
200
+ """ We resize both tensors to [A,B,2]:
201
+ [A,2] -> [A,1,2] -> [A,B,2]
202
+ [B,2] -> [1,B,2] -> [A,B,2]
203
+ Then we compute the area of intersect between box_a and box_b.
204
+ Args:
205
+ box_a: (tensor) bounding boxes, Shape: [A,4].
206
+ box_b: (tensor) bounding boxes, Shape: [B,4].
207
+ Return:
208
+ (tensor) intersection area, Shape: [A,B].
209
+ """
210
+ A = tf.shape(box_a)[0]
211
+ B = tf.shape(box_b)[0]
212
+ max_xy = tf.minimum(
213
+ tf.broadcast_to(tf.expand_dims(box_a[:, 2:], 1), [A, B, 2]),
214
+ tf.broadcast_to(tf.expand_dims(box_b[:, 2:], 0), [A, B, 2]))
215
+ min_xy = tf.maximum(
216
+ tf.broadcast_to(tf.expand_dims(box_a[:, :2], 1), [A, B, 2]),
217
+ tf.broadcast_to(tf.expand_dims(box_b[:, :2], 0), [A, B, 2]))
218
+ inter = tf.maximum((max_xy - min_xy), tf.zeros_like(max_xy - min_xy))
219
+ return inter[:, :, 0] * inter[:, :, 1]
220
+
221
+
222
+ def _jaccard(box_a, box_b):
223
+ """Compute the jaccard overlap of two sets of boxes. The jaccard overlap
224
+ is simply the intersection over union of two boxes. Here we operate on
225
+ ground truth boxes and default boxes.
226
+ E.g.:
227
+ A ∩ B / A βˆͺ B = A ∩ B / (area(A) + area(B) - A ∩ B)
228
+ Args:
229
+ box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
230
+ box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
231
+ Return:
232
+ jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
233
+ """
234
+ inter = _intersect(box_a, box_b)
235
+ area_a = tf.broadcast_to(
236
+ tf.expand_dims(
237
+ (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]), 1),
238
+ tf.shape(inter)) # [A,B]
239
+ area_b = tf.broadcast_to(
240
+ tf.expand_dims(
241
+ (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]), 0),
242
+ tf.shape(inter)) # [A,B]
243
+ union = area_a + area_b - inter
244
+ return inter / union # [A,B]
245
+
246
+
247
+ ###############################################################################
248
+ # Tensorflow Decoding #
249
+ ###############################################################################
250
+ def decode_tf(labels, priors, variances=[0.1, 0.2]):
251
+ """tensorflow decoding"""
252
+ bbox = _decode_bbox(labels[:, :4], priors, variances)
253
+ landm = _decode_landm(labels[:, 4:14], priors, variances)
254
+ landm_valid = labels[:, 14][:, tf.newaxis]
255
+ conf = labels[:, 15][:, tf.newaxis]
256
+
257
+ return tf.concat([bbox, landm, landm_valid, conf], axis=1)
258
+
259
+
260
+ def _decode_bbox(pre, priors, variances=[0.1, 0.2]):
261
+ """Decode locations from predictions using priors to undo
262
+ the encoding we did for offset regression at train time.
263
+ Args:
264
+ pre (tensor): location predictions for loc layers,
265
+ Shape: [num_priors,4]
266
+ priors (tensor): Prior boxes in center-offset form.
267
+ Shape: [num_priors,4].
268
+ variances: (list[float]) Variances of priorboxes
269
+ Return:
270
+ decoded bounding box predictions
271
+ """
272
+ centers = priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:]
273
+ sides = priors[:, 2:] * tf.math.exp(pre[:, 2:] * variances[1])
274
+
275
+ return tf.concat([centers - sides / 2, centers + sides / 2], axis=1)
276
+
277
+
278
+ def _decode_landm(pre, priors, variances=[0.1, 0.2]):
279
+ """Decode landm from predictions using priors to undo
280
+ the encoding we did for offset regression at train time.
281
+ Args:
282
+ pre (tensor): landm predictions for loc layers,
283
+ Shape: [num_priors,10]
284
+ priors (tensor): Prior boxes in center-offset form.
285
+ Shape: [num_priors,4].
286
+ variances: (list[float]) Variances of priorboxes
287
+ Return:
288
+ decoded landm predictions
289
+ """
290
+ landms = tf.concat(
291
+ [priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
292
+ priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
293
+ priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
294
+ priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
295
+ priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:]], axis=1)
296
+ return landms
retinaface/models.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow.keras import Model
3
+ from tensorflow.keras.applications import MobileNetV2, ResNet50
4
+ from tensorflow.keras.layers import Input, Conv2D, ReLU, LeakyReLU
5
+ from retinaface.anchor import decode_tf, prior_box_tf
6
+
7
+
8
+ def _regularizer(weights_decay):
9
+ """l2 regularizer"""
10
+ return tf.keras.regularizers.l2(weights_decay)
11
+
12
+
13
+ def _kernel_init(scale=1.0, seed=None):
14
+ """He normal initializer"""
15
+ return tf.keras.initializers.he_normal()
16
+
17
+
18
+ class BatchNormalization(tf.keras.layers.BatchNormalization):
19
+ """Make trainable=False freeze BN for real (the og version is sad).
20
+ ref: https://github.com/zzh8829/yolov3-tf2
21
+ """
22
+ def __init__(self, axis=-1, momentum=0.9, epsilon=1e-5, center=True,
23
+ scale=True, name=None, **kwargs):
24
+ super(BatchNormalization, self).__init__(
25
+ axis=axis, momentum=momentum, epsilon=epsilon, center=center,
26
+ scale=scale, name=name, **kwargs)
27
+
28
+ def call(self, x, training=False):
29
+ if training is None:
30
+ training = tf.constant(False)
31
+ training = tf.logical_and(training, self.trainable)
32
+
33
+ return super().call(x, training)
34
+
35
+
36
+ def Backbone(backbone_type='ResNet50', use_pretrain=True):
37
+ """Backbone Model"""
38
+ weights = None
39
+ if use_pretrain:
40
+ weights = 'imagenet'
41
+
42
+ def backbone(x):
43
+ if backbone_type == 'ResNet50':
44
+ extractor = ResNet50(
45
+ input_shape=x.shape[1:], include_top=False, weights=weights)
46
+ pick_layer1 = 80 # [80, 80, 512]
47
+ pick_layer2 = 142 # [40, 40, 1024]
48
+ pick_layer3 = 174 # [20, 20, 2048]
49
+ preprocess = tf.keras.applications.resnet.preprocess_input
50
+ elif backbone_type == 'MobileNetV2':
51
+ extractor = MobileNetV2(
52
+ input_shape=x.shape[1:], include_top=False, weights=weights)
53
+ pick_layer1 = 54 # [80, 80, 32]
54
+ pick_layer2 = 116 # [40, 40, 96]
55
+ pick_layer3 = 143 # [20, 20, 160]
56
+ preprocess = tf.keras.applications.mobilenet_v2.preprocess_input
57
+ else:
58
+ raise NotImplementedError(
59
+ 'Backbone type {} is not recognized.'.format(backbone_type))
60
+
61
+ return Model(extractor.input,
62
+ (extractor.layers[pick_layer1].output,
63
+ extractor.layers[pick_layer2].output,
64
+ extractor.layers[pick_layer3].output),
65
+ name=backbone_type + '_extrator')(preprocess(x))
66
+
67
+ return backbone
68
+
69
+
70
+ class ConvUnit(tf.keras.layers.Layer):
71
+ """Conv + BN + Act"""
72
+ def __init__(self, f, k, s, wd, act=None, **kwargs):
73
+ super(ConvUnit, self).__init__(**kwargs)
74
+ self.conv = Conv2D(filters=f, kernel_size=k, strides=s, padding='same',
75
+ kernel_initializer=_kernel_init(),
76
+ kernel_regularizer=_regularizer(wd),
77
+ use_bias=False)
78
+ self.bn = BatchNormalization()
79
+
80
+ if act is None:
81
+ self.act_fn = tf.identity
82
+ elif act == 'relu':
83
+ self.act_fn = ReLU()
84
+ elif act == 'lrelu':
85
+ self.act_fn = LeakyReLU(0.1)
86
+ else:
87
+ raise NotImplementedError(
88
+ 'Activation function type {} is not recognized.'.format(act))
89
+
90
+ def call(self, x):
91
+ return self.act_fn(self.bn(self.conv(x)))
92
+
93
+
94
+ class FPN(tf.keras.layers.Layer):
95
+ """Feature Pyramid Network"""
96
+ def __init__(self, out_ch, wd, **kwargs):
97
+ super(FPN, self).__init__(**kwargs)
98
+ act = 'relu'
99
+ self.out_ch = out_ch
100
+ self.wd = wd
101
+ if (out_ch <= 64):
102
+ act = 'lrelu'
103
+
104
+ self.output1 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
105
+ self.output2 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
106
+ self.output3 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
107
+ self.merge1 = ConvUnit(f=out_ch, k=3, s=1, wd=wd, act=act)
108
+ self.merge2 = ConvUnit(f=out_ch, k=3, s=1, wd=wd, act=act)
109
+
110
+ def call(self, x):
111
+ output1 = self.output1(x[0]) # [80, 80, out_ch]
112
+ output2 = self.output2(x[1]) # [40, 40, out_ch]
113
+ output3 = self.output3(x[2]) # [20, 20, out_ch]
114
+
115
+ up_h, up_w = tf.shape(output2)[1], tf.shape(output2)[2]
116
+ up3 = tf.image.resize(output3, [up_h, up_w], method='nearest')
117
+ output2 = output2 + up3
118
+ output2 = self.merge2(output2)
119
+
120
+ up_h, up_w = tf.shape(output1)[1], tf.shape(output1)[2]
121
+ up2 = tf.image.resize(output2, [up_h, up_w], method='nearest')
122
+ output1 = output1 + up2
123
+ output1 = self.merge1(output1)
124
+
125
+ return output1, output2, output3
126
+
127
+ def get_config(self):
128
+ config = {
129
+ 'out_ch': self.out_ch,
130
+ 'wd': self.wd,
131
+ }
132
+ base_config = super(FPN, self).get_config()
133
+ return dict(list(base_config.items()) + list(config.items()))
134
+
135
+
136
+ class SSH(tf.keras.layers.Layer):
137
+ """Single Stage Headless Layer"""
138
+ def __init__(self, out_ch, wd, **kwargs):
139
+ super(SSH, self).__init__(**kwargs)
140
+ assert out_ch % 4 == 0
141
+ self.out_ch = out_ch
142
+ self.wd = wd
143
+ act = 'relu'
144
+ if (out_ch <= 64):
145
+ act = 'lrelu'
146
+
147
+ self.conv_3x3 = ConvUnit(f=out_ch // 2, k=3, s=1, wd=wd, act=None)
148
+
149
+ self.conv_5x5_1 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=act)
150
+ self.conv_5x5_2 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=None)
151
+
152
+ self.conv_7x7_2 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=act)
153
+ self.conv_7x7_3 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=None)
154
+
155
+ self.relu = ReLU()
156
+
157
+ def call(self, x):
158
+ conv_3x3 = self.conv_3x3(x)
159
+
160
+ conv_5x5_1 = self.conv_5x5_1(x)
161
+ conv_5x5 = self.conv_5x5_2(conv_5x5_1)
162
+
163
+ conv_7x7_2 = self.conv_7x7_2(conv_5x5_1)
164
+ conv_7x7 = self.conv_7x7_3(conv_7x7_2)
165
+
166
+ output = tf.concat([conv_3x3, conv_5x5, conv_7x7], axis=3)
167
+ output = self.relu(output)
168
+
169
+ return output
170
+
171
+ def get_config(self):
172
+ config = {
173
+ 'out_ch': self.out_ch,
174
+ 'wd': self.wd,
175
+ }
176
+ base_config = super(SSH, self).get_config()
177
+ return dict(list(base_config.items()) + list(config.items()))
178
+
179
+
180
+ class BboxHead(tf.keras.layers.Layer):
181
+ """Bbox Head Layer"""
182
+ def __init__(self, num_anchor, wd, **kwargs):
183
+ super(BboxHead, self).__init__(**kwargs)
184
+ self.num_anchor = num_anchor
185
+ self.wd = wd
186
+ self.conv = Conv2D(filters=num_anchor * 4, kernel_size=1, strides=1)
187
+
188
+ def call(self, x):
189
+ h, w = tf.shape(x)[1], tf.shape(x)[2]
190
+ x = self.conv(x)
191
+
192
+ return tf.reshape(x, [-1, h * w * self.num_anchor, 4])
193
+
194
+ def get_config(self):
195
+ config = {
196
+ 'num_anchor': self.num_anchor,
197
+ 'wd': self.wd,
198
+ }
199
+ base_config = super(BboxHead, self).get_config()
200
+ return dict(list(base_config.items()) + list(config.items()))
201
+
202
+
203
+ class LandmarkHead(tf.keras.layers.Layer):
204
+ """Landmark Head Layer"""
205
+ def __init__(self, num_anchor, wd, name='LandmarkHead', **kwargs):
206
+ super(LandmarkHead, self).__init__(name=name, **kwargs)
207
+ self.num_anchor = num_anchor
208
+ self.wd = wd
209
+ self.conv = Conv2D(filters=num_anchor * 10, kernel_size=1, strides=1)
210
+
211
+ def call(self, x):
212
+ h, w = tf.shape(x)[1], tf.shape(x)[2]
213
+ x = self.conv(x)
214
+
215
+ return tf.reshape(x, [-1, h * w * self.num_anchor, 10])
216
+
217
+ def get_config(self):
218
+ config = {
219
+ 'num_anchor': self.num_anchor,
220
+ 'wd': self.wd,
221
+ }
222
+ base_config = super(LandmarkHead, self).get_config()
223
+ return dict(list(base_config.items()) + list(config.items()))
224
+
225
+
226
+ class ClassHead(tf.keras.layers.Layer):
227
+ """Class Head Layer"""
228
+ def __init__(self, num_anchor, wd, name='ClassHead', **kwargs):
229
+ super(ClassHead, self).__init__(name=name, **kwargs)
230
+ self.num_anchor = num_anchor
231
+ self.wd = wd
232
+ self.conv = Conv2D(filters=num_anchor * 2, kernel_size=1, strides=1)
233
+
234
+ def call(self, x):
235
+ h, w = tf.shape(x)[1], tf.shape(x)[2]
236
+ x = self.conv(x)
237
+
238
+ return tf.reshape(x, [-1, h * w * self.num_anchor, 2])
239
+
240
+ def get_config(self):
241
+ config = {
242
+ 'num_anchor': self.num_anchor,
243
+ 'wd': self.wd,
244
+ }
245
+ base_config = super(ClassHead, self).get_config()
246
+ return dict(list(base_config.items()) + list(config.items()))
247
+
248
+
249
+ def RetinaFaceModel(cfg, training=False, iou_th=0.4, score_th=0.02,
250
+ name='RetinaFaceModel'):
251
+ """Retina Face Model"""
252
+ input_size = cfg['input_size'] if training else None
253
+ wd = cfg['weights_decay']
254
+ out_ch = cfg['out_channel']
255
+ num_anchor = len(cfg['min_sizes'][0])
256
+ backbone_type = cfg['backbone_type']
257
+
258
+ # define model
259
+ x = inputs = Input([input_size, input_size, 3], name='input_image')
260
+
261
+ x = Backbone(backbone_type=backbone_type)(x)
262
+
263
+ fpn = FPN(out_ch=out_ch, wd=wd)(x)
264
+
265
+ features = [SSH(out_ch=out_ch, wd=wd)(f)
266
+ for i, f in enumerate(fpn)]
267
+
268
+ bbox_regressions = tf.concat(
269
+ [BboxHead(num_anchor, wd=wd)(f)
270
+ for i, f in enumerate(features)], axis=1)
271
+ landm_regressions = tf.concat(
272
+ [LandmarkHead(num_anchor, wd=wd, name=f'LandmarkHead_{i}')(f)
273
+ for i, f in enumerate(features)], axis=1)
274
+ classifications = tf.concat(
275
+ [ClassHead(num_anchor, wd=wd, name=f'ClassHead_{i}')(f)
276
+ for i, f in enumerate(features)], axis=1)
277
+
278
+ classifications = tf.keras.layers.Softmax(axis=-1)(classifications)
279
+
280
+ if training:
281
+ out = (bbox_regressions, landm_regressions, classifications)
282
+ else:
283
+ # only for batch size 1
284
+ preds = tf.concat( # [bboxes, landms, landms_valid, conf]
285
+ [bbox_regressions[0],
286
+ landm_regressions[0],
287
+ tf.ones_like(classifications[0, :, 0][..., tf.newaxis]),
288
+ classifications[0, :, 1][..., tf.newaxis]], 1)
289
+ priors = prior_box_tf((tf.shape(inputs)[1], tf.shape(inputs)[2]), cfg['min_sizes'], cfg['steps'], cfg['clip'])
290
+ decode_preds = decode_tf(preds, priors, cfg['variances'])
291
+
292
+ selected_indices = tf.image.non_max_suppression(
293
+ boxes=decode_preds[:, :4],
294
+ scores=decode_preds[:, -1],
295
+ max_output_size=tf.shape(decode_preds)[0],
296
+ iou_threshold=iou_th,
297
+ score_threshold=score_th)
298
+
299
+ out = tf.gather(decode_preds, selected_indices)
300
+
301
+ return Model(inputs, out, name=name), Model(inputs, [bbox_regressions, landm_regressions, classifications], name=name + '_bb_only')
retinaface/ops.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from retinaface.anchor import decode_tf, prior_box_tf
2
+ import tensorflow as tf
3
+
4
+
5
+ def extract_detections(bbox_regressions, landm_regressions, classifications, image_sizes, iou_th=0.4, score_th=0.02):
6
+ min_sizes = [[16, 32], [64, 128], [256, 512]]
7
+ steps = [8, 16, 32]
8
+ variances = [0.1, 0.2]
9
+ preds = tf.concat( # [bboxes, landms, landms_valid, conf]
10
+ [bbox_regressions,
11
+ landm_regressions,
12
+ tf.ones_like(classifications[:, 0][..., tf.newaxis]),
13
+ classifications[:, 1][..., tf.newaxis]], 1)
14
+ priors = prior_box_tf(image_sizes, min_sizes, steps, False)
15
+ decode_preds = decode_tf(preds, priors, variances)
16
+
17
+ selected_indices = tf.image.non_max_suppression(
18
+ boxes=decode_preds[:, :4],
19
+ scores=decode_preds[:, -1],
20
+ max_output_size=tf.shape(decode_preds)[0],
21
+ iou_threshold=iou_th,
22
+ score_threshold=score_th)
23
+
24
+ out = tf.gather(decode_preds, selected_indices)
25
+
26
+ return out
utils/__pycache__/utils.cpython-38.pyc ADDED
Binary file (11.6 kB). View file
 
utils/utils.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from tensorflow.keras.models import model_from_json
3
+ from networks.layers import AdaIN, AdaptiveAttention
4
+ import tensorflow as tf
5
+
6
+ import numpy as np
7
+ import cv2
8
+ import math
9
+ from skimage import transform as trans
10
+ from scipy.signal import convolve2d
11
+ from skimage.color import rgb2yuv, yuv2rgb
12
+
13
+ from PIL import Image
14
+
15
+
16
+ def save_model_internal(model, path, name, num):
17
+ json_model = model.to_json()
18
+ with open(path + name + '.json', "w") as json_file:
19
+ json_file.write(json_model)
20
+
21
+ model.save_weights(path + name + '_' + str(num) + '.h5')
22
+
23
+
24
+ def load_model_internal(path, name, num):
25
+ with open(path + name + '.json', 'r') as json_file:
26
+ model_dict = json_file.read()
27
+
28
+ mod = model_from_json(model_dict, custom_objects={'AdaIN': AdaIN, 'AdaptiveAttention': AdaptiveAttention})
29
+ mod.load_weights(path + name + '_' + str(num) + '.h5')
30
+
31
+ return mod
32
+
33
+
34
+ def save_training_meta(state_dict, path, num):
35
+ with open(path + str(num) + '.json', 'w') as json_file:
36
+ json.dump(state_dict, json_file, indent=2)
37
+
38
+
39
+ def load_training_meta(path, num):
40
+ with open(path + str(num) + '.json', 'r') as json_file:
41
+ state_dict = json.load(json_file)
42
+ return state_dict
43
+
44
+
45
+ def log_info(sw, results_dict, iteration):
46
+ with sw.as_default():
47
+ for key in results_dict.keys():
48
+ tf.summary.scalar(key, results_dict[key], step=iteration)
49
+
50
+
51
+ src1 = np.array([[51.642, 50.115], [57.617, 49.990], [35.740, 69.007],
52
+ [51.157, 89.050], [57.025, 89.702]],
53
+ dtype=np.float32)
54
+ # <--left
55
+ src2 = np.array([[45.031, 50.118], [65.568, 50.872], [39.677, 68.111],
56
+ [45.177, 86.190], [64.246, 86.758]],
57
+ dtype=np.float32)
58
+
59
+ # ---frontal
60
+ src3 = np.array([[39.730, 51.138], [72.270, 51.138], [56.000, 68.493],
61
+ [42.463, 87.010], [69.537, 87.010]],
62
+ dtype=np.float32)
63
+
64
+ # -->right
65
+ src4 = np.array([[46.845, 50.872], [67.382, 50.118], [72.737, 68.111],
66
+ [48.167, 86.758], [67.236, 86.190]],
67
+ dtype=np.float32)
68
+
69
+ # -->right profile
70
+ src5 = np.array([[54.796, 49.990], [60.771, 50.115], [76.673, 69.007],
71
+ [55.388, 89.702], [61.257, 89.050]],
72
+ dtype=np.float32)
73
+
74
+ src = np.array([src1, src2, src3, src4, src5])
75
+ src_map = {112: src, 224: src * 2}
76
+
77
+ # Left eye, right eye, nose, left mouth, right mouth
78
+ arcface_src = np.array(
79
+ [[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
80
+ [41.5493, 92.3655], [70.7299, 92.2041]],
81
+ dtype=np.float32)
82
+
83
+ arcface_src = np.expand_dims(arcface_src, axis=0)
84
+
85
+
86
+ def extract_face(img, bb, absolute_center, mode='arcface', extention_rate=0.05, debug=False):
87
+ """Extract face from image given a bounding box"""
88
+ # bbox
89
+ x1, y1, x2, y2 = bb + 60
90
+ adjusted_absolute_center = (absolute_center[0] + 60, absolute_center[1] + 60)
91
+ if debug:
92
+ print(bb + 60)
93
+ x1, y1, x2, y2 = bb
94
+ cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 3)
95
+ cv2.circle(img, absolute_center, 1, (255, 0, 255), 2)
96
+ Image.fromarray(img).show()
97
+ x1, y1, x2, y2 = bb + 60
98
+ # Pad image in case face is out of frame
99
+ padded_img = np.zeros(shape=(248, 248, 3), dtype=np.uint8)
100
+ padded_img[60:-60, 60:-60, :] = img
101
+
102
+ if debug:
103
+ cv2.rectangle(padded_img, (x1, y1), (x2, y2), (0, 255, 255), 3)
104
+ cv2.circle(padded_img, adjusted_absolute_center, 1, (255, 255, 255), 2)
105
+ Image.fromarray(padded_img).show()
106
+
107
+ y_len = abs(y1 - y2)
108
+ x_len = abs(x1 - x2)
109
+
110
+ new_len = (y_len + x_len) // 2
111
+
112
+ extension = int(new_len * extention_rate)
113
+
114
+ x_adjust = (x_len - new_len) // 2
115
+ y_adjust = (y_len - new_len) // 2
116
+
117
+ x_1_adjusted = x1 + x_adjust - extension
118
+ x_2_adjusted = x2 - x_adjust + extension
119
+
120
+ if mode == 'arcface':
121
+ y_1_adjusted = y1 - extension
122
+ y_2_adjusted = y2 - 2 * y_adjust + extension
123
+ else:
124
+ y_1_adjusted = y1 + 2 * y_adjust - extension
125
+ y_2_adjusted = y2 + extension
126
+
127
+ move_x = adjusted_absolute_center[0] - (x_1_adjusted + x_2_adjusted) // 2
128
+ move_y = adjusted_absolute_center[1] - (y_1_adjusted + y_2_adjusted) // 2
129
+
130
+ x_1_adjusted = x_1_adjusted + move_x
131
+ x_2_adjusted = x_2_adjusted + move_x
132
+ y_1_adjusted = y_1_adjusted + move_y
133
+ y_2_adjusted = y_2_adjusted + move_y
134
+
135
+ # print(y_1_adjusted, y_2_adjusted, x_1_adjusted, x_2_adjusted)
136
+
137
+ return padded_img[y_1_adjusted:y_2_adjusted, x_1_adjusted:x_2_adjusted]
138
+
139
+
140
+ def distance(a, b):
141
+ return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
142
+
143
+
144
+ def euclidean_distance(a, b):
145
+ x1 = a[0]; y1 = a[1]
146
+ x2 = b[0]; y2 = b[1]
147
+ return np.sqrt(((x2 - x1) * (x2 - x1)) + ((y2 - y1) * (y2 - y1)))
148
+
149
+
150
+ def align_face(img, landmarks, debug=False):
151
+ nose, right_eye, left_eye = landmarks
152
+
153
+ left_eye_x = left_eye[0]
154
+ left_eye_y = left_eye[1]
155
+
156
+ right_eye_x = right_eye[0]
157
+ right_eye_y = right_eye[1]
158
+
159
+ center_eye = ((left_eye[0] + right_eye[0]) // 2, (left_eye[1] + right_eye[1]) // 2)
160
+
161
+ if left_eye_y < right_eye_y:
162
+ point_3rd = (right_eye_x, left_eye_y)
163
+ direction = -1
164
+ else:
165
+ point_3rd = (left_eye_x, right_eye_y)
166
+ direction = 1
167
+
168
+ if debug:
169
+ cv2.circle(img, point_3rd, 1, (255, 0, 0), 1)
170
+ cv2.circle(img, center_eye, 1, (255, 0, 0), 1)
171
+
172
+ cv2.line(img, right_eye, left_eye, (0, 0, 0), 1)
173
+ cv2.line(img, left_eye, point_3rd, (0, 0, 0), 1)
174
+ cv2.line(img, right_eye, point_3rd, (0, 0, 0), 1)
175
+
176
+ a = euclidean_distance(left_eye, point_3rd)
177
+ b = euclidean_distance(right_eye, left_eye)
178
+ c = euclidean_distance(right_eye, point_3rd)
179
+
180
+ cos_a = (b * b + c * c - a * a) / (2 * b * c)
181
+
182
+ angle = np.arccos(cos_a)
183
+
184
+ angle = (angle * 180) / np.pi
185
+
186
+ if direction == -1:
187
+ angle = 90 - angle
188
+ ang = math.radians(direction * angle)
189
+ else:
190
+ ang = math.radians(direction * angle)
191
+ angle = 0 - angle
192
+
193
+ M = cv2.getRotationMatrix2D((64, 64), angle, 1)
194
+ new_img = cv2.warpAffine(img, M, (128, 128),
195
+ flags=cv2.INTER_CUBIC)
196
+
197
+ rotated_nose = (int((nose[0] - 64) * np.cos(ang) - (nose[1] - 64) * np.sin(ang) + 64),
198
+ int((nose[0] - 64) * np.sin(ang) + (nose[1] - 64) * np.cos(ang) + 64))
199
+
200
+ rotated_center_eye = (int((center_eye[0] - 64) * np.cos(ang) - (center_eye[1] - 64) * np.sin(ang) + 64),
201
+ int((center_eye[0] - 64) * np.sin(ang) + (center_eye[1] - 64) * np.cos(ang) + 64))
202
+
203
+ abolute_center = (rotated_center_eye[0], (rotated_nose[1] + rotated_center_eye[1]) // 2)
204
+
205
+ if debug:
206
+ cv2.circle(new_img, rotated_nose, 1, (0, 0, 255), 1)
207
+ cv2.circle(new_img, rotated_center_eye, 1, (0, 0, 255), 1)
208
+ cv2.circle(new_img, abolute_center, 1, (0, 0, 255), 1)
209
+
210
+ return new_img, abolute_center
211
+
212
+
213
+ def estimate_norm(lmk, image_size=112, mode='arcface', shrink_factor=1.0):
214
+ assert lmk.shape == (5, 2)
215
+ tform = trans.SimilarityTransform()
216
+ lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
217
+ min_M = []
218
+ min_index = []
219
+ min_error = float('inf')
220
+ src_factor = image_size / 112
221
+ if mode == 'arcface':
222
+ src = arcface_src * shrink_factor + (1 - shrink_factor) * 56
223
+ src = src * src_factor
224
+ else:
225
+ src = src_map[image_size] * src_factor
226
+ for i in np.arange(src.shape[0]):
227
+ tform.estimate(lmk, src[i])
228
+ M = tform.params[0:2, :]
229
+ results = np.dot(M, lmk_tran.T)
230
+ results = results.T
231
+ error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))
232
+ # print(error)
233
+ if error < min_error:
234
+ min_error = error
235
+ min_M = M
236
+ min_index = i
237
+ return min_M, min_index
238
+
239
+
240
+ def inverse_estimate_norm(lmk, t_lmk, image_size=112, mode='arcface', shrink_factor=1.0):
241
+ assert lmk.shape == (5, 2)
242
+ tform = trans.SimilarityTransform()
243
+ lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)
244
+ min_M = []
245
+ min_index = []
246
+ min_error = float('inf')
247
+ src_factor = image_size / 112
248
+ if mode == 'arcface':
249
+ src = arcface_src * shrink_factor + (1 - shrink_factor) * 56
250
+ src = src * src_factor
251
+ else:
252
+ src = src_map[image_size] * src_factor
253
+ for i in np.arange(src.shape[0]):
254
+ tform.estimate(t_lmk, lmk)
255
+ M = tform.params[0:2, :]
256
+ results = np.dot(M, lmk_tran.T)
257
+ results = results.T
258
+ error = np.sum(np.sqrt(np.sum((results - src[i])**2, axis=1)))
259
+ # print(error)
260
+ if error < min_error:
261
+ min_error = error
262
+ min_M = M
263
+ min_index = i
264
+ return min_M, min_index
265
+
266
+
267
+ def norm_crop(img, landmark, image_size=112, mode='arcface', shrink_factor=1.0):
268
+ """
269
+ Align and crop the image based of the facial landmarks in the image. The alignment is done with
270
+ a similarity transformation based of source coordinates.
271
+ :param img: Image to transform.
272
+ :param landmark: Five landmark coordinates in the image.
273
+ :param image_size: Desired output size after transformation.
274
+ :param mode: 'arcface' aligns the face for the use of Arcface facial recognition model. Useful for
275
+ both facial recognition tasks and face swapping tasks.
276
+ :param shrink_factor: Shrink factor that shrinks the source landmark coordinates. This will include more border
277
+ information around the face. Useful when you want to include more background information when performing face swaps.
278
+ The lower the shrink factor the more of the face is included. Default value 1.0 will align the image to be ready
279
+ for the Arcface recognition model, but usually omits part of the chin. Value of 0.0 would transform all source points
280
+ to the middle of the image, probably rendering the alignment procedure useless.
281
+
282
+ If you process the image with a shrink factor of 0.85 and then want to extract the identity embedding with arcface,
283
+ you simply do a central crop of factor 0.85 to yield same cropped result as using shrink factor 1.0. This will
284
+ reduce the resolution, the recommendation is to processed images to output resolutions higher than 112 is using
285
+ Arcface. This will make sure no information is lost by resampling the image after central crop.
286
+ :return: Returns the transformed image.
287
+ """
288
+ M, pose_index = estimate_norm(landmark, image_size, mode, shrink_factor=shrink_factor)
289
+ warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
290
+ return warped
291
+
292
+
293
+ def transform_landmark_points(M, points):
294
+ lmk_tran = np.insert(points, 2, values=np.ones(5), axis=1)
295
+ transformed_lmk = np.dot(M, lmk_tran.T)
296
+ transformed_lmk = transformed_lmk.T
297
+
298
+ return transformed_lmk
299
+
300
+
301
+ def multi_convolver(image, kernel, iterations):
302
+ if kernel == "Sharpen":
303
+ kernel = np.array([[0, -1, 0],
304
+ [-1, 5, -1],
305
+ [0, -1, 0]])
306
+ elif kernel == "Unsharp_mask":
307
+ kernel = np.array([[1, 4, 6, 4, 1],
308
+ [4, 16, 24, 16, 1],
309
+ [6, 24, -476, 24, 1],
310
+ [4, 16, 24, 16, 1],
311
+ [1, 4, 6, 4, 1]]) * (-1 / 256)
312
+ elif kernel == "Blur":
313
+ kernel = (1 / 16.0) * np.array([[1., 2., 1.],
314
+ [2., 4., 2.],
315
+ [1., 2., 1.]])
316
+ for i in range(iterations):
317
+ image = convolve2d(image, kernel, 'same', boundary='fill', fillvalue = 0)
318
+ return image
319
+
320
+
321
+ def convolve_rgb(image, kernel, iterations=1):
322
+ img_yuv = rgb2yuv(image)
323
+ img_yuv[:, :, 0] = multi_convolver(img_yuv[:, :, 0], kernel,
324
+ iterations)
325
+ final_image = yuv2rgb(img_yuv)
326
+
327
+ return final_image.astype('float32')
328
+
329
+
330
+ def generate_mask_from_landmarks(lms, im_size):
331
+ blend_mask_lm = np.zeros(shape=(im_size, im_size, 3), dtype='float32')
332
+
333
+ # EYES
334
+ blend_mask_lm = cv2.circle(blend_mask_lm,
335
+ (int(lms[0][0]), int(lms[0][1])), 12, (255, 255, 255), 30)
336
+ blend_mask_lm = cv2.circle(blend_mask_lm,
337
+ (int(lms[1][0]), int(lms[1][1])), 12, (255, 255, 255), 30)
338
+ blend_mask_lm = cv2.circle(blend_mask_lm,
339
+ (int((lms[0][0] + lms[1][0]) / 2), int((lms[0][1] + lms[1][1]) / 2)),
340
+ 16, (255, 255, 255), 65)
341
+
342
+ # NOSE
343
+ blend_mask_lm = cv2.circle(blend_mask_lm,
344
+ (int(lms[2][0]), int(lms[2][1])), 5, (255, 255, 255), 5)
345
+ blend_mask_lm = cv2.circle(blend_mask_lm,
346
+ (int((lms[0][0] + lms[1][0]) / 2), int(lms[2][1])), 16, (255, 255, 255), 100)
347
+
348
+ # MOUTH
349
+ blend_mask_lm = cv2.circle(blend_mask_lm,
350
+ (int(lms[3][0]), int(lms[3][1])), 6, (255, 255, 255), 30)
351
+ blend_mask_lm = cv2.circle(blend_mask_lm,
352
+ (int(lms[4][0]), int(lms[4][1])), 6, (255, 255, 255), 30)
353
+
354
+ blend_mask_lm = cv2.circle(blend_mask_lm,
355
+ (int((lms[3][0] + lms[4][0]) / 2), int((lms[3][1] + lms[4][1]) / 2)),
356
+ 16, (255, 255, 255), 40)
357
+ return blend_mask_lm
358
+
359
+
360
+ def display_distance_text(im, distance, lms, im_w, im_h, scale=2):
361
+ blended_insert = cv2.putText(im, str(distance)[:4],
362
+ (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)),
363
+ cv2.FONT_HERSHEY_SIMPLEX, scale * 0.5, (0.08, 0.16, 0.08), int(scale * 2))
364
+ blended_insert = cv2.putText(blended_insert, str(distance)[:4],
365
+ (int(lms[4] * im_w * 0.5), int(lms[5] * im_h * 0.8)),
366
+ cv2.FONT_HERSHEY_SIMPLEX, scale* 0.5, (0.3, 0.7, 0.32), int(scale * 1))
367
+ return blended_insert
368
+
369
+
370
+ def get_lm(annotation, im_w, im_h):
371
+ lm_align = np.array([[annotation[4] * im_w, annotation[5] * im_h],
372
+ [annotation[6] * im_w, annotation[7] * im_h],
373
+ [annotation[8] * im_w, annotation[9] * im_h],
374
+ [annotation[10] * im_w, annotation[11] * im_h],
375
+ [annotation[12] * im_w, annotation[13] * im_h]],
376
+ dtype=np.float32)
377
+ return lm_align