felixrosberg
commited on
Commit
β’
81695aa
1
Parent(s):
de22594
Update app.py
Browse files
app.py
CHANGED
@@ -96,22 +96,23 @@ def run_inference(target, source, slider, adv_slider, settings):
|
|
96 |
|
97 |
# align the detected face
|
98 |
M, pose_index = estimate_norm(lm_align, 256, "arcface", shrink_factor=1.0)
|
99 |
-
im_aligned = cv2.warpAffine(im, M, (256, 256), borderValue=0.0)
|
100 |
|
101 |
if "adversarial defense" in settings:
|
102 |
eps = adv_slider / 200
|
|
|
103 |
with tf.GradientTape() as tape:
|
104 |
-
tape.watch(
|
105 |
|
106 |
-
X_z = ArcFaceE(tf.image.resize(
|
107 |
-
output = R([
|
108 |
|
109 |
-
loss = tf.reduce_mean(tf.abs(
|
110 |
|
111 |
-
gradient = tf.sign(tape.gradient(loss,
|
112 |
|
113 |
-
adv_x =
|
114 |
-
im_aligned = tf.clip_by_value(adv_x, -1, 1)
|
115 |
|
116 |
if "anonymize" in settings and "reconstruction attack" not in settings:
|
117 |
"""source_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) / 255.0, axis=0))
|
@@ -123,19 +124,19 @@ def run_inference(target, source, slider, adv_slider, settings):
|
|
123 |
|
124 |
slider_weight = slider / 100
|
125 |
|
126 |
-
target_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112])
|
127 |
source_z = IDP.predict(target_z)
|
128 |
|
129 |
source_z = slider_weight * source_z + (1 - slider_weight) * target_z
|
130 |
|
131 |
if "reconstruction attack" in settings:
|
132 |
-
source_z = ArcFaceE.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112])
|
133 |
|
134 |
# face swap
|
135 |
if "reconstruction attack" not in settings:
|
136 |
-
changed_face_cage = G.predict([np.expand_dims(
|
137 |
source_z])
|
138 |
-
changed_face =
|
139 |
|
140 |
# get inverse transformation landmarks
|
141 |
transformed_lmk = transform_landmark_points(M, lm_align)
|
@@ -149,9 +150,9 @@ def run_inference(target, source, slider, adv_slider, settings):
|
|
149 |
blend_mask = np.expand_dims(blend_mask, axis=-1)
|
150 |
total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
|
151 |
else:
|
152 |
-
changed_face_cage = R.predict([np.expand_dims(
|
153 |
source_z])
|
154 |
-
changed_face =
|
155 |
|
156 |
# get inverse transformation landmarks
|
157 |
transformed_lmk = transform_landmark_points(M, lm_align)
|
|
|
96 |
|
97 |
# align the detected face
|
98 |
M, pose_index = estimate_norm(lm_align, 256, "arcface", shrink_factor=1.0)
|
99 |
+
im_aligned = (cv2.warpAffine(im, M, (256, 256), borderValue=0.0) - 127.5) / 127.5
|
100 |
|
101 |
if "adversarial defense" in settings:
|
102 |
eps = adv_slider / 200
|
103 |
+
X = tf.convert_to_tensor(np.expand_dims(im_aligned, axis=0))
|
104 |
with tf.GradientTape() as tape:
|
105 |
+
tape.watch(X)
|
106 |
|
107 |
+
X_z = ArcFaceE(tf.image.resize(X * 0.5 + 0.5, [112, 112]))
|
108 |
+
output = R([X, X_z])
|
109 |
|
110 |
+
loss = tf.reduce_mean(tf.abs(0 - output))
|
111 |
|
112 |
+
gradient = tf.sign(tape.gradient(loss, X))
|
113 |
|
114 |
+
adv_x = X + eps * gradient
|
115 |
+
im_aligned = tf.clip_by_value(adv_x, -1, 1)[0]
|
116 |
|
117 |
if "anonymize" in settings and "reconstruction attack" not in settings:
|
118 |
"""source_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) / 255.0, axis=0))
|
|
|
124 |
|
125 |
slider_weight = slider / 100
|
126 |
|
127 |
+
target_z = ArcFace.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) * 0.5 + 0.5, axis=0))
|
128 |
source_z = IDP.predict(target_z)
|
129 |
|
130 |
source_z = slider_weight * source_z + (1 - slider_weight) * target_z
|
131 |
|
132 |
if "reconstruction attack" in settings:
|
133 |
+
source_z = ArcFaceE.predict(np.expand_dims(tf.image.resize(im_aligned, [112, 112]) * 0.5 + 0.5, axis=0))
|
134 |
|
135 |
# face swap
|
136 |
if "reconstruction attack" not in settings:
|
137 |
+
changed_face_cage = G.predict([np.expand_dims(im_aligned, axis=0),
|
138 |
source_z])
|
139 |
+
changed_face = changed_face_cage[0] * 0.5 + 0.5
|
140 |
|
141 |
# get inverse transformation landmarks
|
142 |
transformed_lmk = transform_landmark_points(M, lm_align)
|
|
|
150 |
blend_mask = np.expand_dims(blend_mask, axis=-1)
|
151 |
total_img = (iim_aligned * blend_mask + total_img * (1 - blend_mask))
|
152 |
else:
|
153 |
+
changed_face_cage = R.predict([np.expand_dims(im_aligned, axis=0),
|
154 |
source_z])
|
155 |
+
changed_face = changed_face_cage[0] * 0.5 + 0.5
|
156 |
|
157 |
# get inverse transformation landmarks
|
158 |
transformed_lmk = transform_landmark_points(M, lm_align)
|