Spaces:
Sleeping
Sleeping
PKUWilliamYang
commited on
Commit
•
ac1883f
1
Parent(s):
ac4ce84
Upload 8 files
Browse files- utils/__init__.py +0 -0
- utils/common.py +87 -0
- utils/data_utils.py +25 -0
- utils/inference_utils.py +182 -0
- utils/train_utils.py +13 -0
- utils/wandb_utils.py +47 -0
- webUI/app_task.py +305 -0
- webUI/styleganex_model.py +492 -0
utils/__init__.py
ADDED
File without changes
|
utils/common.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from PIL import Image
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
|
6 |
+
|
7 |
+
# Log images
|
8 |
+
def log_input_image(x, opts):
|
9 |
+
if opts.label_nc == 0:
|
10 |
+
return tensor2im(x)
|
11 |
+
elif opts.label_nc == 1:
|
12 |
+
return tensor2sketch(x)
|
13 |
+
else:
|
14 |
+
return tensor2map(x)
|
15 |
+
|
16 |
+
|
17 |
+
def tensor2im(var):
|
18 |
+
var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
|
19 |
+
var = ((var + 1) / 2)
|
20 |
+
var[var < 0] = 0
|
21 |
+
var[var > 1] = 1
|
22 |
+
var = var * 255
|
23 |
+
return Image.fromarray(var.astype('uint8'))
|
24 |
+
|
25 |
+
|
26 |
+
def tensor2map(var):
|
27 |
+
mask = np.argmax(var.data.cpu().numpy(), axis=0)
|
28 |
+
colors = get_colors()
|
29 |
+
mask_image = np.ones(shape=(mask.shape[0], mask.shape[1], 3))
|
30 |
+
for class_idx in np.unique(mask):
|
31 |
+
mask_image[mask == class_idx] = colors[class_idx]
|
32 |
+
mask_image = mask_image.astype('uint8')
|
33 |
+
return Image.fromarray(mask_image)
|
34 |
+
|
35 |
+
|
36 |
+
def tensor2sketch(var):
|
37 |
+
im = var[0].cpu().detach().numpy()
|
38 |
+
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
|
39 |
+
im = (im * 255).astype(np.uint8)
|
40 |
+
return Image.fromarray(im)
|
41 |
+
|
42 |
+
|
43 |
+
# Visualization utils
|
44 |
+
def get_colors():
|
45 |
+
# currently support up to 19 classes (for the celebs-hq-mask dataset)
|
46 |
+
colors = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255],
|
47 |
+
[255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204],
|
48 |
+
[255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]
|
49 |
+
return colors
|
50 |
+
|
51 |
+
|
52 |
+
def vis_faces(log_hooks):
|
53 |
+
display_count = len(log_hooks)
|
54 |
+
fig = plt.figure(figsize=(8, 4 * display_count))
|
55 |
+
gs = fig.add_gridspec(display_count, 3)
|
56 |
+
for i in range(display_count):
|
57 |
+
hooks_dict = log_hooks[i]
|
58 |
+
fig.add_subplot(gs[i, 0])
|
59 |
+
if 'diff_input' in hooks_dict:
|
60 |
+
vis_faces_with_id(hooks_dict, fig, gs, i)
|
61 |
+
else:
|
62 |
+
vis_faces_no_id(hooks_dict, fig, gs, i)
|
63 |
+
plt.tight_layout()
|
64 |
+
return fig
|
65 |
+
|
66 |
+
|
67 |
+
def vis_faces_with_id(hooks_dict, fig, gs, i):
|
68 |
+
plt.imshow(hooks_dict['input_face'])
|
69 |
+
plt.title('Input\nOut Sim={:.2f}'.format(float(hooks_dict['diff_input'])))
|
70 |
+
fig.add_subplot(gs[i, 1])
|
71 |
+
plt.imshow(hooks_dict['target_face'])
|
72 |
+
plt.title('Target\nIn={:.2f}, Out={:.2f}'.format(float(hooks_dict['diff_views']),
|
73 |
+
float(hooks_dict['diff_target'])))
|
74 |
+
fig.add_subplot(gs[i, 2])
|
75 |
+
plt.imshow(hooks_dict['output_face'])
|
76 |
+
plt.title('Output\n Target Sim={:.2f}'.format(float(hooks_dict['diff_target'])))
|
77 |
+
|
78 |
+
|
79 |
+
def vis_faces_no_id(hooks_dict, fig, gs, i):
|
80 |
+
plt.imshow(hooks_dict['input_face'], cmap="gray")
|
81 |
+
plt.title('Input')
|
82 |
+
fig.add_subplot(gs[i, 1])
|
83 |
+
plt.imshow(hooks_dict['target_face'])
|
84 |
+
plt.title('Target')
|
85 |
+
fig.add_subplot(gs[i, 2])
|
86 |
+
plt.imshow(hooks_dict['output_face'])
|
87 |
+
plt.title('Output')
|
utils/data_utils.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Code adopted from pix2pixHD:
|
3 |
+
https://github.com/NVIDIA/pix2pixHD/blob/master/data/image_folder.py
|
4 |
+
"""
|
5 |
+
import os
|
6 |
+
|
7 |
+
IMG_EXTENSIONS = [
|
8 |
+
'.jpg', '.JPG', '.jpeg', '.JPEG',
|
9 |
+
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
|
10 |
+
]
|
11 |
+
|
12 |
+
|
13 |
+
def is_image_file(filename):
|
14 |
+
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
15 |
+
|
16 |
+
|
17 |
+
def make_dataset(dir):
|
18 |
+
images = []
|
19 |
+
assert os.path.isdir(dir), '%s is not a valid directory' % dir
|
20 |
+
for root, _, fnames in sorted(os.walk(dir)):
|
21 |
+
for fname in fnames:
|
22 |
+
if is_image_file(fname):
|
23 |
+
path = os.path.join(root, fname)
|
24 |
+
images.append(path)
|
25 |
+
return images
|
utils/inference_utils.py
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
from PIL import Image
|
4 |
+
import cv2
|
5 |
+
import random
|
6 |
+
import math
|
7 |
+
import argparse
|
8 |
+
import torch
|
9 |
+
from torch.utils import data
|
10 |
+
from torch.nn import functional as F
|
11 |
+
from torch import autograd
|
12 |
+
from torch.nn import init
|
13 |
+
import torchvision.transforms as transforms
|
14 |
+
from scripts.align_all_parallel import get_landmark
|
15 |
+
|
16 |
+
def visualize(img_arr, dpi):
|
17 |
+
plt.figure(figsize=(10,10),dpi=dpi)
|
18 |
+
plt.imshow(((img_arr.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8))
|
19 |
+
plt.axis('off')
|
20 |
+
plt.show()
|
21 |
+
|
22 |
+
def save_image(img, filename):
|
23 |
+
tmp = ((img.detach().cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
|
24 |
+
cv2.imwrite(filename, cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR))
|
25 |
+
|
26 |
+
def load_image(filename):
|
27 |
+
transform = transforms.Compose([
|
28 |
+
transforms.ToTensor(),
|
29 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
|
30 |
+
])
|
31 |
+
|
32 |
+
img = Image.open(filename)
|
33 |
+
img = transform(img)
|
34 |
+
return img.unsqueeze(dim=0)
|
35 |
+
|
36 |
+
def get_video_crop_parameter(filepath, predictor, padding=[256,256,256,256]):
|
37 |
+
if type(filepath) == str:
|
38 |
+
img = dlib.load_rgb_image(filepath)
|
39 |
+
else:
|
40 |
+
img = filepath
|
41 |
+
lm = get_landmark(img, predictor)
|
42 |
+
if lm is None:
|
43 |
+
return None
|
44 |
+
lm_chin = lm[0 : 17] # left-right
|
45 |
+
lm_eyebrow_left = lm[17 : 22] # left-right
|
46 |
+
lm_eyebrow_right = lm[22 : 27] # left-right
|
47 |
+
lm_nose = lm[27 : 31] # top-down
|
48 |
+
lm_nostrils = lm[31 : 36] # top-down
|
49 |
+
lm_eye_left = lm[36 : 42] # left-clockwise
|
50 |
+
lm_eye_right = lm[42 : 48] # left-clockwise
|
51 |
+
lm_mouth_outer = lm[48 : 60] # left-clockwise
|
52 |
+
lm_mouth_inner = lm[60 : 68] # left-clockwise
|
53 |
+
|
54 |
+
scale = 64. / (np.mean(lm_eye_right[:,0])-np.mean(lm_eye_left[:,0]))
|
55 |
+
center = ((np.mean(lm_eye_right, axis=0)+np.mean(lm_eye_left, axis=0)) / 2) * scale
|
56 |
+
h, w = round(img.shape[0] * scale), round(img.shape[1] * scale)
|
57 |
+
left = max(round(center[0] - padding[0]), 0) // 8 * 8
|
58 |
+
right = min(round(center[0] + padding[1]), w) // 8 * 8
|
59 |
+
top = max(round(center[1] - padding[2]), 0) // 8 * 8
|
60 |
+
bottom = min(round(center[1] + padding[3]), h) // 8 * 8
|
61 |
+
return h,w,top,bottom,left,right,scale
|
62 |
+
|
63 |
+
def tensor2cv2(img):
|
64 |
+
tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
|
65 |
+
return cv2.cvtColor(tmp, cv2.COLOR_RGB2BGR)
|
66 |
+
|
67 |
+
def noise_regularize(noises):
|
68 |
+
loss = 0
|
69 |
+
|
70 |
+
for noise in noises:
|
71 |
+
size = noise.shape[2]
|
72 |
+
|
73 |
+
while True:
|
74 |
+
loss = (
|
75 |
+
loss
|
76 |
+
+ (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2)
|
77 |
+
+ (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2)
|
78 |
+
)
|
79 |
+
|
80 |
+
if size <= 8:
|
81 |
+
break
|
82 |
+
|
83 |
+
#noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2])
|
84 |
+
#noise = noise.mean([3, 5])
|
85 |
+
noise = F.interpolate(noise, scale_factor=0.5, mode='bilinear')
|
86 |
+
size //= 2
|
87 |
+
|
88 |
+
return loss
|
89 |
+
|
90 |
+
|
91 |
+
def noise_normalize_(noises):
|
92 |
+
for noise in noises:
|
93 |
+
mean = noise.mean()
|
94 |
+
std = noise.std()
|
95 |
+
|
96 |
+
noise.data.add_(-mean).div_(std)
|
97 |
+
|
98 |
+
|
99 |
+
def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
|
100 |
+
lr_ramp = min(1, (1 - t) / rampdown)
|
101 |
+
lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
|
102 |
+
lr_ramp = lr_ramp * min(1, t / rampup)
|
103 |
+
|
104 |
+
return initial_lr * lr_ramp
|
105 |
+
|
106 |
+
|
107 |
+
def latent_noise(latent, strength):
|
108 |
+
noise = torch.randn_like(latent) * strength
|
109 |
+
|
110 |
+
return latent + noise
|
111 |
+
|
112 |
+
|
113 |
+
def make_image(tensor):
|
114 |
+
return (
|
115 |
+
tensor.detach()
|
116 |
+
.clamp_(min=-1, max=1)
|
117 |
+
.add(1)
|
118 |
+
.div_(2)
|
119 |
+
.mul(255)
|
120 |
+
.type(torch.uint8)
|
121 |
+
.permute(0, 2, 3, 1)
|
122 |
+
.to("cpu")
|
123 |
+
.numpy()
|
124 |
+
)
|
125 |
+
|
126 |
+
|
127 |
+
# from pix2pixeHD
|
128 |
+
# Converts a one-hot tensor into a colorful label map
|
129 |
+
def tensor2label(label_tensor, n_label, imtype=np.uint8):
|
130 |
+
if n_label == 0:
|
131 |
+
return tensor2im(label_tensor, imtype)
|
132 |
+
label_tensor = label_tensor.cpu().float()
|
133 |
+
if label_tensor.size()[0] > 1:
|
134 |
+
label_tensor = label_tensor.max(0, keepdim=True)[1]
|
135 |
+
label_tensor = Colorize(n_label)(label_tensor)
|
136 |
+
label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
|
137 |
+
return label_numpy.astype(imtype)
|
138 |
+
|
139 |
+
def uint82bin(n, count=8):
|
140 |
+
"""returns the binary of integer n, count refers to amount of bits"""
|
141 |
+
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
|
142 |
+
|
143 |
+
def labelcolormap(N):
|
144 |
+
if N == 35: # cityscape
|
145 |
+
cmap = np.array([( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), (111, 74, 0), ( 81, 0, 81),
|
146 |
+
(128, 64,128), (244, 35,232), (250,170,160), (230,150,140), ( 70, 70, 70), (102,102,156), (190,153,153),
|
147 |
+
(180,165,180), (150,100,100), (150,120, 90), (153,153,153), (153,153,153), (250,170, 30), (220,220, 0),
|
148 |
+
(107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142), ( 0, 0, 70),
|
149 |
+
( 0, 60,100), ( 0, 0, 90), ( 0, 0,110), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0,142)],
|
150 |
+
dtype=np.uint8)
|
151 |
+
else:
|
152 |
+
cmap = np.zeros((N, 3), dtype=np.uint8)
|
153 |
+
for i in range(N):
|
154 |
+
r, g, b = 0, 0, 0
|
155 |
+
id = i
|
156 |
+
for j in range(7):
|
157 |
+
str_id = uint82bin(id)
|
158 |
+
r = r ^ (np.uint8(str_id[-1]) << (7-j))
|
159 |
+
g = g ^ (np.uint8(str_id[-2]) << (7-j))
|
160 |
+
b = b ^ (np.uint8(str_id[-3]) << (7-j))
|
161 |
+
id = id >> 3
|
162 |
+
cmap[i, 0] = r
|
163 |
+
cmap[i, 1] = g
|
164 |
+
cmap[i, 2] = b
|
165 |
+
return cmap
|
166 |
+
|
167 |
+
class Colorize(object):
|
168 |
+
def __init__(self, n=35):
|
169 |
+
self.cmap = labelcolormap(n)
|
170 |
+
self.cmap = torch.from_numpy(self.cmap[:n])
|
171 |
+
|
172 |
+
def __call__(self, gray_image):
|
173 |
+
size = gray_image.size()
|
174 |
+
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
|
175 |
+
|
176 |
+
for label in range(0, len(self.cmap)):
|
177 |
+
mask = (label == gray_image[0]).cpu()
|
178 |
+
color_image[0][mask] = self.cmap[label][0]
|
179 |
+
color_image[1][mask] = self.cmap[label][1]
|
180 |
+
color_image[2][mask] = self.cmap[label][2]
|
181 |
+
|
182 |
+
return color_image
|
utils/train_utils.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
def aggregate_loss_dict(agg_loss_dict):
|
3 |
+
mean_vals = {}
|
4 |
+
for output in agg_loss_dict:
|
5 |
+
for key in output:
|
6 |
+
mean_vals[key] = mean_vals.setdefault(key, []) + [output[key]]
|
7 |
+
for key in mean_vals:
|
8 |
+
if len(mean_vals[key]) > 0:
|
9 |
+
mean_vals[key] = sum(mean_vals[key]) / len(mean_vals[key])
|
10 |
+
else:
|
11 |
+
print('{} has no value'.format(key))
|
12 |
+
mean_vals[key] = 0
|
13 |
+
return mean_vals
|
utils/wandb_utils.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import os
|
3 |
+
import numpy as np
|
4 |
+
import wandb
|
5 |
+
|
6 |
+
from utils import common
|
7 |
+
|
8 |
+
|
9 |
+
class WBLogger:
|
10 |
+
|
11 |
+
def __init__(self, opts):
|
12 |
+
wandb_run_name = os.path.basename(opts.exp_dir)
|
13 |
+
wandb.init(project="pixel2style2pixel", config=vars(opts), name=wandb_run_name)
|
14 |
+
|
15 |
+
@staticmethod
|
16 |
+
def log_best_model():
|
17 |
+
wandb.run.summary["best-model-save-time"] = datetime.datetime.now()
|
18 |
+
|
19 |
+
@staticmethod
|
20 |
+
def log(prefix, metrics_dict, global_step):
|
21 |
+
log_dict = {f'{prefix}_{key}': value for key, value in metrics_dict.items()}
|
22 |
+
log_dict["global_step"] = global_step
|
23 |
+
wandb.log(log_dict)
|
24 |
+
|
25 |
+
@staticmethod
|
26 |
+
def log_dataset_wandb(dataset, dataset_name, n_images=16):
|
27 |
+
idxs = np.random.choice(a=range(len(dataset)), size=n_images, replace=False)
|
28 |
+
data = [wandb.Image(dataset.source_paths[idx]) for idx in idxs]
|
29 |
+
wandb.log({f"{dataset_name} Data Samples": data})
|
30 |
+
|
31 |
+
@staticmethod
|
32 |
+
def log_images_to_wandb(x, y, y_hat, id_logs, prefix, step, opts):
|
33 |
+
im_data = []
|
34 |
+
column_names = ["Source", "Target", "Output"]
|
35 |
+
if id_logs is not None:
|
36 |
+
column_names.append("ID Diff Output to Target")
|
37 |
+
for i in range(len(x)):
|
38 |
+
cur_im_data = [
|
39 |
+
wandb.Image(common.log_input_image(x[i], opts)),
|
40 |
+
wandb.Image(common.tensor2im(y[i])),
|
41 |
+
wandb.Image(common.tensor2im(y_hat[i])),
|
42 |
+
]
|
43 |
+
if id_logs is not None:
|
44 |
+
cur_im_data.append(id_logs[i]["diff_target"])
|
45 |
+
im_data.append(cur_im_data)
|
46 |
+
outputs_table = wandb.Table(data=im_data, columns=column_names)
|
47 |
+
wandb.log({f"{prefix.title()} Step {step} Output Samples": outputs_table})
|
webUI/app_task.py
ADDED
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
from huggingface_hub import hf_hub_download
|
3 |
+
import numpy as np
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
|
7 |
+
def create_demo_sr(process):
|
8 |
+
with gr.Blocks() as demo:
|
9 |
+
with gr.Row():
|
10 |
+
gr.Markdown('## Face Super Resolution')
|
11 |
+
with gr.Row():
|
12 |
+
with gr.Column():
|
13 |
+
input_image = gr.Image(source='upload', type='filepath')
|
14 |
+
model_type = gr.Radio(label='Model Type', choices=['SR for 32x','SR for 4x-48x'], value='SR for 32x')
|
15 |
+
resize_scale = gr.Slider(label='Resize Scale',
|
16 |
+
minimum=4,
|
17 |
+
maximum=48,
|
18 |
+
value=32,
|
19 |
+
step=4)
|
20 |
+
run_button = gr.Button(label='Run')
|
21 |
+
gr.Examples(
|
22 |
+
examples =[['pexels-daniel-xavier-1239291.jpg', 'SR for 32x', 32],
|
23 |
+
['ILip77SbmOE.png', 'SR for 32x', 32],
|
24 |
+
['ILip77SbmOE.png', 'SR for 4x-48x', 48],
|
25 |
+
],
|
26 |
+
inputs = [input_image, model_type, resize_scale],
|
27 |
+
)
|
28 |
+
with gr.Column():
|
29 |
+
#lrinput = gr.Image(label='Low-resolution input',type='numpy', interactive=False)
|
30 |
+
#result = gr.Image(label='Output',type='numpy', interactive=False)
|
31 |
+
result = gr.Gallery(label='LR input and Output',
|
32 |
+
elem_id='gallery').style(grid=2,
|
33 |
+
height='auto')
|
34 |
+
|
35 |
+
inputs = [
|
36 |
+
input_image,
|
37 |
+
resize_scale,
|
38 |
+
model_type,
|
39 |
+
]
|
40 |
+
run_button.click(fn=process,
|
41 |
+
inputs=inputs,
|
42 |
+
outputs=[result],
|
43 |
+
api_name='sr')
|
44 |
+
return demo
|
45 |
+
|
46 |
+
def create_demo_s2f(process):
|
47 |
+
with gr.Blocks() as demo:
|
48 |
+
with gr.Row():
|
49 |
+
gr.Markdown('## Sketch-to-Face Translation')
|
50 |
+
with gr.Row():
|
51 |
+
with gr.Column():
|
52 |
+
input_image = gr.Image(source='upload', type='filepath')
|
53 |
+
gr.Markdown("""Note: Input will be cropped if larger than 512x512.""")
|
54 |
+
seed = gr.Slider(label='Seed for appearance',
|
55 |
+
minimum=0,
|
56 |
+
maximum=2147483647,
|
57 |
+
step=1,
|
58 |
+
randomize=True)
|
59 |
+
#input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
|
60 |
+
run_button = gr.Button(label='Run')
|
61 |
+
gr.Examples(
|
62 |
+
examples =[['234_sketch.jpg', 1024]],
|
63 |
+
inputs = [input_image, seed],
|
64 |
+
)
|
65 |
+
with gr.Column():
|
66 |
+
result = gr.Image(label='Output',type='numpy', interactive=False)
|
67 |
+
|
68 |
+
inputs = [
|
69 |
+
input_image, seed
|
70 |
+
]
|
71 |
+
run_button.click(fn=process,
|
72 |
+
inputs=inputs,
|
73 |
+
outputs=[result],
|
74 |
+
api_name='s2f')
|
75 |
+
return demo
|
76 |
+
|
77 |
+
|
78 |
+
def create_demo_m2f(process):
|
79 |
+
with gr.Blocks() as demo:
|
80 |
+
with gr.Row():
|
81 |
+
gr.Markdown('## Mask-to-Face Translation')
|
82 |
+
with gr.Row():
|
83 |
+
with gr.Column():
|
84 |
+
input_image = gr.Image(source='upload', type='filepath')
|
85 |
+
input_type = gr.Radio(label='Input Type', choices=['color image','parsing mask'], value='color image')
|
86 |
+
seed = gr.Slider(label='Seed for appearance',
|
87 |
+
minimum=0,
|
88 |
+
maximum=2147483647,
|
89 |
+
step=1,
|
90 |
+
randomize=True)
|
91 |
+
#input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
|
92 |
+
run_button = gr.Button(label='Run')
|
93 |
+
gr.Examples(
|
94 |
+
examples =[['ILip77SbmOE.png', 'color image', 4], ['ILip77SbmOE_mask.png', 'parsing mask', 4]],
|
95 |
+
inputs = [input_image, input_type, seed],
|
96 |
+
)
|
97 |
+
with gr.Column():
|
98 |
+
#vizmask = gr.Image(label='Visualized mask',type='numpy', interactive=False)
|
99 |
+
#result = gr.Image(label='Output',type='numpy', interactive=False)
|
100 |
+
result = gr.Gallery(label='Visualized mask and Output',
|
101 |
+
elem_id='gallery').style(grid=2,
|
102 |
+
height='auto')
|
103 |
+
|
104 |
+
inputs = [
|
105 |
+
input_image, input_type, seed
|
106 |
+
]
|
107 |
+
run_button.click(fn=process,
|
108 |
+
inputs=inputs,
|
109 |
+
outputs=[result],
|
110 |
+
api_name='m2f')
|
111 |
+
return demo
|
112 |
+
|
113 |
+
def create_demo_editing(process):
|
114 |
+
with gr.Blocks() as demo:
|
115 |
+
with gr.Row():
|
116 |
+
gr.Markdown('## Video Face Editing (for image input)')
|
117 |
+
with gr.Row():
|
118 |
+
with gr.Column():
|
119 |
+
input_image = gr.Image(source='upload', type='filepath')
|
120 |
+
model_type = gr.Radio(label='Editing Type', choices=['reduce age','light hair color'], value='color image')
|
121 |
+
scale_factor = gr.Slider(label='editing degree (-2~2)',
|
122 |
+
minimum=-2,
|
123 |
+
maximum=2,
|
124 |
+
value=1,
|
125 |
+
step=0.1)
|
126 |
+
#input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
|
127 |
+
run_button = gr.Button(label='Run')
|
128 |
+
gr.Examples(
|
129 |
+
examples =[['ILip77SbmOE.png', 'reduce age', -2],
|
130 |
+
['ILip77SbmOE.png', 'light hair color', 1]],
|
131 |
+
inputs = [input_image, model_type, scale_factor],
|
132 |
+
)
|
133 |
+
with gr.Column():
|
134 |
+
result = gr.Image(label='Output',type='numpy', interactive=False)
|
135 |
+
|
136 |
+
inputs = [
|
137 |
+
input_image, scale_factor, model_type
|
138 |
+
]
|
139 |
+
run_button.click(fn=process,
|
140 |
+
inputs=inputs,
|
141 |
+
outputs=[result],
|
142 |
+
api_name='editing')
|
143 |
+
return demo
|
144 |
+
|
145 |
+
def create_demo_toonify(process):
|
146 |
+
with gr.Blocks() as demo:
|
147 |
+
with gr.Row():
|
148 |
+
gr.Markdown('## Video Face Toonification (for image input)')
|
149 |
+
with gr.Row():
|
150 |
+
with gr.Column():
|
151 |
+
input_image = gr.Image(source='upload', type='filepath')
|
152 |
+
style_type = gr.Radio(label='Style Type', choices=['Pixar','Cartoon','Arcane'], value='Pixar')
|
153 |
+
#input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
|
154 |
+
run_button = gr.Button(label='Run')
|
155 |
+
gr.Examples(
|
156 |
+
examples =[['ILip77SbmOE.png', 'Pixar'], ['ILip77SbmOE.png', 'Cartoon'], ['ILip77SbmOE.png', 'Arcane']],
|
157 |
+
inputs = [input_image, style_type],
|
158 |
+
)
|
159 |
+
with gr.Column():
|
160 |
+
result = gr.Image(label='Output',type='numpy', interactive=False)
|
161 |
+
|
162 |
+
inputs = [
|
163 |
+
input_image, style_type
|
164 |
+
]
|
165 |
+
run_button.click(fn=process,
|
166 |
+
inputs=inputs,
|
167 |
+
outputs=[result],
|
168 |
+
api_name='toonify')
|
169 |
+
return demo
|
170 |
+
|
171 |
+
|
172 |
+
def create_demo_vediting(process, max_frame_num = 4):
|
173 |
+
with gr.Blocks() as demo:
|
174 |
+
with gr.Row():
|
175 |
+
gr.Markdown('## Video Face Editing (for video input)')
|
176 |
+
with gr.Row():
|
177 |
+
with gr.Column():
|
178 |
+
input_video = gr.Video(source='upload', mirror_webcam=False, type='filepath')
|
179 |
+
model_type = gr.Radio(label='Editing Type', choices=['reduce age','light hair color'], value='color image')
|
180 |
+
scale_factor = gr.Slider(label='editing degree (-2~2)',
|
181 |
+
minimum=-2,
|
182 |
+
maximum=2,
|
183 |
+
value=1,
|
184 |
+
step=0.1)
|
185 |
+
frame_num = gr.Slider(label='Number of frames to edit (full video editing is not allowed so as not to slow down the demo, \
|
186 |
+
but you can duplicate the Space to modify the number limit from 4 to a large value)',
|
187 |
+
minimum=1,
|
188 |
+
maximum=max_frame_num,
|
189 |
+
value=2,
|
190 |
+
step=1)
|
191 |
+
#input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
|
192 |
+
run_button = gr.Button(label='Run')
|
193 |
+
gr.Examples(
|
194 |
+
examples =[['684.mp4', 'reduce age', 1.5, 2],
|
195 |
+
['684.mp4', 'light hair color', 0.7, 2]],
|
196 |
+
inputs = [input_video, model_type, scale_factor],
|
197 |
+
)
|
198 |
+
with gr.Column():
|
199 |
+
viz_result = gr.Gallery(label='Several edited frames', elem_id='gallery').style(grid=2, height='auto')
|
200 |
+
result = gr.Video(label='Output', type='mp4', interactive=False)
|
201 |
+
|
202 |
+
inputs = [
|
203 |
+
input_video, scale_factor, model_type, frame_num
|
204 |
+
]
|
205 |
+
run_button.click(fn=process,
|
206 |
+
inputs=inputs,
|
207 |
+
outputs=[viz_result, result],
|
208 |
+
api_name='vediting')
|
209 |
+
return demo
|
210 |
+
|
211 |
+
def create_demo_vtoonify(process, max_frame_num = 4):
|
212 |
+
with gr.Blocks() as demo:
|
213 |
+
with gr.Row():
|
214 |
+
gr.Markdown('## Video Face Toonification (for video input)')
|
215 |
+
with gr.Row():
|
216 |
+
with gr.Column():
|
217 |
+
input_video = gr.Video(source='upload', mirror_webcam=False, type='filepath')
|
218 |
+
style_type = gr.Radio(label='Style Type', choices=['Pixar','Cartoon','Arcane'], value='Pixar')
|
219 |
+
frame_num = gr.Slider(label='Number of frames to toonify (full video toonification is not allowed so as not to slow down the demo, \
|
220 |
+
but you can duplicate the Space to modify the number limit from 4 to a large value)',
|
221 |
+
minimum=1,
|
222 |
+
maximum=max_frame_num,
|
223 |
+
value=2,
|
224 |
+
step=1)
|
225 |
+
#input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
|
226 |
+
run_button = gr.Button(label='Run')
|
227 |
+
gr.Examples(
|
228 |
+
examples =[['529_2.mp4', 'Arcane'],
|
229 |
+
['pexels-anthony-shkraba-production-8136210.mp4', 'Pixar'],
|
230 |
+
['684.mp4', 'Cartoon']],
|
231 |
+
inputs = [input_video, style_type],
|
232 |
+
)
|
233 |
+
with gr.Column():
|
234 |
+
viz_result = gr.Gallery(label='Several toonified frames', elem_id='gallery').style(grid=2, height='auto')
|
235 |
+
result = gr.Video(label='Output', type='mp4', interactive=False)
|
236 |
+
|
237 |
+
inputs = [
|
238 |
+
input_video, style_type, frame_num
|
239 |
+
]
|
240 |
+
run_button.click(fn=process,
|
241 |
+
inputs=inputs,
|
242 |
+
outputs=[viz_result, result],
|
243 |
+
api_name='vtoonify')
|
244 |
+
return demo
|
245 |
+
|
246 |
+
def create_demo_inversion(process, allow_optimization=False):
|
247 |
+
with gr.Blocks() as demo:
|
248 |
+
with gr.Row():
|
249 |
+
gr.Markdown('## StyleGANEX Inversion for Editing')
|
250 |
+
with gr.Row():
|
251 |
+
with gr.Column():
|
252 |
+
input_image = gr.Image(source='upload', type='filepath')
|
253 |
+
optimize = gr.Radio(label='Whether optimize latent (latent optimization is not allowed so as not to slow down the demo, \
|
254 |
+
but you can duplicate the Space to modify the option or directly upload an optimized latent file. \
|
255 |
+
The file can be computed by inversion.py from the github page or colab)', choices=['No optimization','Latent optimization'],
|
256 |
+
value='No optimization', interactive=allow_optimization)
|
257 |
+
input_latent = gr.File(label='Optimized latent code (optional)', file_types=[".pt"])
|
258 |
+
editing_options = gr.Dropdown(['None', 'Style Mixing',
|
259 |
+
'Attribute Editing: smile',
|
260 |
+
'Attribute Editing: open_eye',
|
261 |
+
'Attribute Editing: open_mouth',
|
262 |
+
'Attribute Editing: pose',
|
263 |
+
'Attribute Editing: reduce_age',
|
264 |
+
'Attribute Editing: glasses',
|
265 |
+
'Attribute Editing: light_hair_color',
|
266 |
+
'Attribute Editing: slender',
|
267 |
+
'Domain Transfer: disney_princess',
|
268 |
+
'Domain Transfer: vintage_comics',
|
269 |
+
'Domain Transfer: pixar',
|
270 |
+
'Domain Transfer: edvard_munch',
|
271 |
+
'Domain Transfer: modigliani',
|
272 |
+
],
|
273 |
+
label="editing options",
|
274 |
+
value='None')
|
275 |
+
scale_factor = gr.Slider(label='editing degree (-2~2) for Attribute Editing',
|
276 |
+
minimum=-2,
|
277 |
+
maximum=2,
|
278 |
+
value=2,
|
279 |
+
step=0.1)
|
280 |
+
seed = gr.Slider(label='Appearance Seed for Style Mixing',
|
281 |
+
minimum=0,
|
282 |
+
maximum=2147483647,
|
283 |
+
step=1,
|
284 |
+
randomize=True)
|
285 |
+
#input_info = gr.Textbox(label='Process Information', interactive=False, value='n.a.')
|
286 |
+
run_button = gr.Button(label='Run')
|
287 |
+
gr.Examples(
|
288 |
+
examples =[['ILip77SbmOE.png', 'ILip77SbmOE_inversion.pt', 'Domain Transfer: vintage_comics'],
|
289 |
+
['ILip77SbmOE.png', 'ILip77SbmOE_inversion.pt', 'Attribute Editing: smile'],
|
290 |
+
['ILip77SbmOE.png', 'ILip77SbmOE_inversion.pt', 'Style Mixing'],
|
291 |
+
],
|
292 |
+
inputs = [input_image, input_latent, editing_options],
|
293 |
+
)
|
294 |
+
with gr.Column():
|
295 |
+
result = gr.Image(label='Inversion output',type='numpy', interactive=False)
|
296 |
+
editing_result = gr.Image(label='Editing output',type='numpy', interactive=False)
|
297 |
+
|
298 |
+
inputs = [
|
299 |
+
input_image, optimize, input_latent, editing_options, scale_factor, seed
|
300 |
+
]
|
301 |
+
run_button.click(fn=process,
|
302 |
+
inputs=inputs,
|
303 |
+
outputs=[result, editing_result],
|
304 |
+
api_name='inversion')
|
305 |
+
return demo
|
webUI/styleganex_model.py
ADDED
@@ -0,0 +1,492 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
import os
|
6 |
+
import pathlib
|
7 |
+
import gc
|
8 |
+
import torch
|
9 |
+
import dlib
|
10 |
+
import cv2
|
11 |
+
import PIL
|
12 |
+
from tqdm import tqdm
|
13 |
+
import numpy as np
|
14 |
+
import torch.nn.functional as F
|
15 |
+
import torchvision
|
16 |
+
from torchvision import transforms, utils
|
17 |
+
from argparse import Namespace
|
18 |
+
from datasets import augmentations
|
19 |
+
from huggingface_hub import hf_hub_download
|
20 |
+
from scripts.align_all_parallel import align_face
|
21 |
+
from latent_optimization import latent_optimization
|
22 |
+
from utils.inference_utils import save_image, load_image, visualize, get_video_crop_parameter, tensor2cv2, tensor2label, labelcolormap
|
23 |
+
from models.psp import pSp
|
24 |
+
from models.bisenet.model import BiSeNet
|
25 |
+
from models.stylegan2.model import Generator
|
26 |
+
|
27 |
+
class Model():
|
28 |
+
def __init__(self, device):
|
29 |
+
super().__init__()
|
30 |
+
|
31 |
+
self.device = device
|
32 |
+
self.task_name = None
|
33 |
+
self.editing_w = None
|
34 |
+
self.pspex = None
|
35 |
+
self.landmarkpredictor = dlib.shape_predictor(hf_hub_download('PKUWilliamYang/VToonify', 'models/shape_predictor_68_face_landmarks.dat'))
|
36 |
+
self.transform = transforms.Compose([
|
37 |
+
transforms.ToTensor(),
|
38 |
+
transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]),
|
39 |
+
])
|
40 |
+
self.to_tensor = transforms.Compose([
|
41 |
+
transforms.ToTensor(),
|
42 |
+
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
|
43 |
+
])
|
44 |
+
self.maskpredictor = BiSeNet(n_classes=19)
|
45 |
+
self.maskpredictor.load_state_dict(torch.load(hf_hub_download('PKUWilliamYang/VToonify', 'models/faceparsing.pth'), map_location='cpu'))
|
46 |
+
self.maskpredictor.to(self.device).eval()
|
47 |
+
self.parameters = {}
|
48 |
+
self.parameters['inversion'] = {'path':'pretrained_models/styleganex_inversion.pt', 'image_path':'./data/ILip77SbmOE.png'}
|
49 |
+
self.parameters['sr-32'] = {'path':'pretrained_models/styleganex_sr32.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'}
|
50 |
+
self.parameters['sr'] = {'path':'pretrained_models/styleganex_sr.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'}
|
51 |
+
self.parameters['sketch2face'] = {'path':'pretrained_models/styleganex_sketch2face.pt', 'image_path':'./data/234_sketch.jpg'}
|
52 |
+
self.parameters['mask2face'] = {'path':'pretrained_models/styleganex_mask2face.pt', 'image_path':'./data/540.jpg'}
|
53 |
+
self.parameters['edit_age'] = {'path':'pretrained_models/styleganex_edit_age.pt', 'image_path':'./data/390.mp4'}
|
54 |
+
self.parameters['edit_hair'] = {'path':'pretrained_models/styleganex_edit_hair.pt', 'image_path':'./data/390.mp4'}
|
55 |
+
self.parameters['toonify_pixar'] = {'path':'pretrained_models/styleganex_toonify_pixar.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'}
|
56 |
+
self.parameters['toonify_cartoon'] = {'path':'pretrained_models/styleganex_toonify_cartoon.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'}
|
57 |
+
self.parameters['toonify_arcane'] = {'path':'pretrained_models/styleganex_toonify_arcane.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'}
|
58 |
+
self.print_log = True
|
59 |
+
self.editing_dicts = torch.load(hf_hub_download('PKUWilliamYang/StyleGANEX', 'direction_dics.pt'))
|
60 |
+
self.generator = Generator(1024, 512, 8)
|
61 |
+
self.model_type = None
|
62 |
+
self.error_info = 'Error: no face detected! \
|
63 |
+
StyleGANEX uses dlib.get_frontal_face_detector but sometimes it fails to detect a face. \
|
64 |
+
You can try several times or use other images until a face is detected, \
|
65 |
+
then switch back to the original image.'
|
66 |
+
|
67 |
+
def load_model(self, task_name: str) -> None:
|
68 |
+
if task_name == self.task_name:
|
69 |
+
return
|
70 |
+
if self.pspex is not None:
|
71 |
+
del self.pspex
|
72 |
+
torch.cuda.empty_cache()
|
73 |
+
gc.collect()
|
74 |
+
path = self.parameters[task_name]['path']
|
75 |
+
local_path = hf_hub_download('PKUWilliamYang/StyleGANEX', path)
|
76 |
+
ckpt = torch.load(local_path, map_location='cpu')
|
77 |
+
opts = ckpt['opts']
|
78 |
+
opts['checkpoint_path'] = local_path
|
79 |
+
opts['device'] = self.device
|
80 |
+
opts = Namespace(**opts)
|
81 |
+
self.pspex = pSp(opts, ckpt).to(self.device).eval()
|
82 |
+
self.pspex.latent_avg = self.pspex.latent_avg.to(self.device)
|
83 |
+
if 'editing_w' in ckpt.keys():
|
84 |
+
self.editing_w = ckpt['editing_w'].clone().to(self.device)
|
85 |
+
self.task_name = task_name
|
86 |
+
torch.cuda.empty_cache()
|
87 |
+
gc.collect()
|
88 |
+
|
89 |
+
def load_G_model(self, model_type: str) -> None:
|
90 |
+
if model_type == self.model_type:
|
91 |
+
return
|
92 |
+
torch.cuda.empty_cache()
|
93 |
+
gc.collect()
|
94 |
+
local_path = hf_hub_download('rinong/stylegan-nada-models', model_type+'.pt')
|
95 |
+
self.generator.load_state_dict(torch.load(local_path, map_location='cpu')['g_ema'], strict=False)
|
96 |
+
self.generator.to(self.device).eval()
|
97 |
+
self.model_type = model_type
|
98 |
+
torch.cuda.empty_cache()
|
99 |
+
gc.collect()
|
100 |
+
|
101 |
+
def tensor2np(self, img):
|
102 |
+
tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8)
|
103 |
+
return tmp
|
104 |
+
|
105 |
+
def process_sr(self, input_image: str, resize_scale: int, model: str) -> list[np.ndarray]:
|
106 |
+
#false_image = np.zeros((256,256,3), np.uint8)
|
107 |
+
#info = 'Error: no face detected! Please retry or change the photo.'
|
108 |
+
|
109 |
+
if input_image is None:
|
110 |
+
#return [false_image, false_image], 'Error: fail to load empty file.'
|
111 |
+
raise gr.Error("Error: fail to load empty file.")
|
112 |
+
frame = cv2.imread(input_image)
|
113 |
+
if frame is None:
|
114 |
+
#return [false_image, false_image], 'Error: fail to load the image.'
|
115 |
+
raise gr.Error("Error: fail to load the image.")
|
116 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
117 |
+
|
118 |
+
if model is None or model == 'SR for 32x':
|
119 |
+
task_name = 'sr-32'
|
120 |
+
resize_scale = 32
|
121 |
+
else:
|
122 |
+
task_name = 'sr'
|
123 |
+
|
124 |
+
with torch.no_grad():
|
125 |
+
paras = get_video_crop_parameter(frame, self.landmarkpredictor)
|
126 |
+
if paras is None:
|
127 |
+
#return [false_image, false_image], info
|
128 |
+
raise gr.Error(self.error_info)
|
129 |
+
h,w,top,bottom,left,right,scale = paras
|
130 |
+
H, W = int(bottom-top), int(right-left)
|
131 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
132 |
+
x1 = PIL.Image.fromarray(np.uint8(frame))
|
133 |
+
x1 = augmentations.BilinearResize(factors=[resize_scale//4])(x1)
|
134 |
+
x1_up = x1.resize((W, H))
|
135 |
+
x2_up = align_face(np.array(x1_up), self.landmarkpredictor)
|
136 |
+
if x2_up is None:
|
137 |
+
#return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.'
|
138 |
+
raise gr.Error(self.error_info)
|
139 |
+
x1_up = transforms.ToTensor()(x1_up).unsqueeze(dim=0).to(self.device) * 2 - 1
|
140 |
+
x2_up = self.transform(x2_up).unsqueeze(dim=0).to(self.device)
|
141 |
+
if self.print_log: print('image loaded')
|
142 |
+
self.load_model(task_name)
|
143 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
144 |
+
y_hat = torch.clamp(self.pspex(x1=x1_up, x2=x2_up, use_skip=self.pspex.opts.use_skip, resize=False), -1, 1)
|
145 |
+
|
146 |
+
return [self.tensor2np(x1_up[0]), self.tensor2np(y_hat[0])]
|
147 |
+
|
148 |
+
|
149 |
+
def process_s2f(self, input_image: str, seed: int) -> np.ndarray:
|
150 |
+
task_name = 'sketch2face'
|
151 |
+
with torch.no_grad():
|
152 |
+
x1 = transforms.ToTensor()(PIL.Image.open(input_image)).unsqueeze(0).to(self.device)
|
153 |
+
if x1.shape[2] > 513:
|
154 |
+
x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8]
|
155 |
+
if x1.shape[3] > 513:
|
156 |
+
x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8]
|
157 |
+
x1 = x1[:,0:1] # uploaded files will be transformed to 3-channel RGB image!
|
158 |
+
if self.print_log: print('image loaded')
|
159 |
+
self.load_model(task_name)
|
160 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
161 |
+
self.pspex.train()
|
162 |
+
torch.manual_seed(seed)
|
163 |
+
y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip,
|
164 |
+
inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7)
|
165 |
+
y_hat = torch.clamp(y_hat, -1, 1)
|
166 |
+
self.pspex.eval()
|
167 |
+
return self.tensor2np(y_hat[0])
|
168 |
+
|
169 |
+
def process_m2f(self, input_image: str, input_type: str, seed: int) -> list[np.ndarray]:
|
170 |
+
#false_image = np.zeros((256,256,3), np.uint8)
|
171 |
+
if input_image is None:
|
172 |
+
raise gr.Error('Error: fail to load empty file.' )
|
173 |
+
#return [false_image, false_image], 'Error: fail to load empty file.'
|
174 |
+
task_name = 'mask2face'
|
175 |
+
with torch.no_grad():
|
176 |
+
if input_type == 'parsing mask':
|
177 |
+
x1 = PIL.Image.open(input_image).getchannel(0) # uploaded files will be transformed to 3-channel RGB image!
|
178 |
+
x1 = augmentations.ToOneHot(19)(x1)
|
179 |
+
x1 = transforms.ToTensor()(x1).unsqueeze(dim=0).float().to(self.device)
|
180 |
+
#print(x1.shape)
|
181 |
+
else:
|
182 |
+
frame = cv2.imread(input_image)
|
183 |
+
if frame is None:
|
184 |
+
#return [false_image, false_image], 'Error: fail to load the image.'
|
185 |
+
raise gr.Error('Error: fail to load the image.' )
|
186 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
187 |
+
paras = get_video_crop_parameter(frame, self.landmarkpredictor)
|
188 |
+
if paras is None:
|
189 |
+
#return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.'
|
190 |
+
raise gr.Error(self.error_info)
|
191 |
+
h,w,top,bottom,left,right,scale = paras
|
192 |
+
H, W = int(bottom-top), int(right-left)
|
193 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
194 |
+
# convert face image to segmentation mask
|
195 |
+
x1 = self.to_tensor(frame).unsqueeze(0).to(self.device)
|
196 |
+
# upsample image for precise segmentation
|
197 |
+
x1 = F.interpolate(x1, scale_factor=2, mode='bilinear')
|
198 |
+
x1 = self.maskpredictor(x1)[0]
|
199 |
+
x1 = F.interpolate(x1, scale_factor=0.5).argmax(dim=1)
|
200 |
+
x1 = F.one_hot(x1, num_classes=19).permute(0, 3, 1, 2).float().to(self.device)
|
201 |
+
|
202 |
+
if x1.shape[2] > 513:
|
203 |
+
x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8]
|
204 |
+
if x1.shape[3] > 513:
|
205 |
+
x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8]
|
206 |
+
|
207 |
+
x1_viz = (tensor2label(x1[0], 19) / 192 * 256).astype(np.uint8)
|
208 |
+
|
209 |
+
if self.print_log: print('image loaded')
|
210 |
+
self.load_model(task_name)
|
211 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
212 |
+
self.pspex.train()
|
213 |
+
torch.manual_seed(seed)
|
214 |
+
y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip,
|
215 |
+
inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7)
|
216 |
+
y_hat = torch.clamp(y_hat, -1, 1)
|
217 |
+
self.pspex.eval()
|
218 |
+
return [x1_viz, self.tensor2np(y_hat[0])]
|
219 |
+
|
220 |
+
|
221 |
+
def process_editing(self, input_image: str, scale_factor: float, model_type: str) -> np.ndarray:
|
222 |
+
#false_image = np.zeros((256,256,3), np.uint8)
|
223 |
+
#info = 'Error: no face detected! Please retry or change the photo.'
|
224 |
+
|
225 |
+
if input_image is None:
|
226 |
+
#return false_image, false_image, 'Error: fail to load empty file.'
|
227 |
+
raise gr.Error('Error: fail to load empty file.')
|
228 |
+
frame = cv2.imread(input_image)
|
229 |
+
if frame is None:
|
230 |
+
#return false_image, false_image, 'Error: fail to load the image.'
|
231 |
+
raise gr.Error('Error: fail to load the image.')
|
232 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
233 |
+
|
234 |
+
if model_type is None or model_type == 'reduce age':
|
235 |
+
task_name = 'edit_age'
|
236 |
+
else:
|
237 |
+
task_name = 'edit_hair'
|
238 |
+
|
239 |
+
with torch.no_grad():
|
240 |
+
paras = get_video_crop_parameter(frame, self.landmarkpredictor)
|
241 |
+
if paras is None:
|
242 |
+
#return false_image, false_image, info
|
243 |
+
raise gr.Error(self.error_info)
|
244 |
+
h,w,top,bottom,left,right,scale = paras
|
245 |
+
H, W = int(bottom-top), int(right-left)
|
246 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
247 |
+
x1 = self.transform(frame).unsqueeze(0).to(self.device)
|
248 |
+
x2 = align_face(frame, self.landmarkpredictor)
|
249 |
+
if x2 is None:
|
250 |
+
#return false_image, 'Error: no face detected! Please retry or change the photo.'
|
251 |
+
raise gr.Error(self.error_info)
|
252 |
+
x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
|
253 |
+
if self.print_log: print('image loaded')
|
254 |
+
self.load_model(task_name)
|
255 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
256 |
+
y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True,
|
257 |
+
resize=False, editing_w= - scale_factor* self.editing_w[0:1])
|
258 |
+
y_hat = torch.clamp(y_hat, -1, 1)
|
259 |
+
|
260 |
+
return self.tensor2np(y_hat[0])
|
261 |
+
|
262 |
+
def process_vediting(self, input_video: str, scale_factor: float, model_type: str, frame_num: int) -> tuple[list[np.ndarray], str]:
|
263 |
+
#false_image = np.zeros((256,256,3), np.uint8)
|
264 |
+
#info = 'Error: no face detected! Please retry or change the video.'
|
265 |
+
|
266 |
+
if input_video is None:
|
267 |
+
#return [false_image], 'default.mp4', 'Error: fail to load empty file.'
|
268 |
+
raise gr.Error('Error: fail to load empty file.')
|
269 |
+
video_cap = cv2.VideoCapture(input_video)
|
270 |
+
success, frame = video_cap.read()
|
271 |
+
if success is False:
|
272 |
+
#return [false_image], 'default.mp4', 'Error: fail to load the video.'
|
273 |
+
raise gr.Error('Error: fail to load the video.')
|
274 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
275 |
+
|
276 |
+
if model_type is None or model_type == 'reduce age':
|
277 |
+
task_name = 'edit_age'
|
278 |
+
else:
|
279 |
+
task_name = 'edit_hair'
|
280 |
+
|
281 |
+
with torch.no_grad():
|
282 |
+
paras = get_video_crop_parameter(frame, self.landmarkpredictor)
|
283 |
+
if paras is None:
|
284 |
+
#return [false_image], 'default.mp4', info
|
285 |
+
raise gr.Error(self.error_info)
|
286 |
+
h,w,top,bottom,left,right,scale = paras
|
287 |
+
H, W = int(bottom-top), int(right-left)
|
288 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
289 |
+
x1 = self.transform(frame).unsqueeze(0).to(self.device)
|
290 |
+
x2 = align_face(frame, self.landmarkpredictor)
|
291 |
+
if x2 is None:
|
292 |
+
#return [false_image], 'default.mp4', info
|
293 |
+
raise gr.Error(self.error_info)
|
294 |
+
x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
|
295 |
+
if self.print_log: print('first frame loaded')
|
296 |
+
self.load_model(task_name)
|
297 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
298 |
+
|
299 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
300 |
+
videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H))
|
301 |
+
|
302 |
+
viz_frames = []
|
303 |
+
for i in range(frame_num):
|
304 |
+
if i > 0:
|
305 |
+
success, frame = video_cap.read()
|
306 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
307 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
308 |
+
x1 = self.transform(frame).unsqueeze(0).to(self.device)
|
309 |
+
y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True,
|
310 |
+
resize=False, editing_w= - scale_factor * self.editing_w[0:1])
|
311 |
+
y_hat = torch.clamp(y_hat, -1, 1)
|
312 |
+
videoWriter.write(tensor2cv2(y_hat[0].cpu()))
|
313 |
+
if i < min(frame_num, 4):
|
314 |
+
viz_frames += [self.tensor2np(y_hat[0])]
|
315 |
+
|
316 |
+
videoWriter.release()
|
317 |
+
|
318 |
+
return viz_frames, 'output.mp4'
|
319 |
+
|
320 |
+
|
321 |
+
def process_toonify(self, input_image: str, style_type: str) -> np.ndarray:
|
322 |
+
#false_image = np.zeros((256,256,3), np.uint8)
|
323 |
+
#info = 'Error: no face detected! Please retry or change the photo.'
|
324 |
+
|
325 |
+
if input_image is None:
|
326 |
+
raise gr.Error('Error: fail to load empty file.')
|
327 |
+
#return false_image, false_image, 'Error: fail to load empty file.'
|
328 |
+
frame = cv2.imread(input_image)
|
329 |
+
if frame is None:
|
330 |
+
raise gr.Error('Error: fail to load the image.')
|
331 |
+
#return false_image, false_image, 'Error: fail to load the image.'
|
332 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
333 |
+
|
334 |
+
if style_type is None or style_type == 'Pixar':
|
335 |
+
task_name = 'toonify_pixar'
|
336 |
+
elif style_type == 'Cartoon':
|
337 |
+
task_name = 'toonify_cartoon'
|
338 |
+
else:
|
339 |
+
task_name = 'toonify_arcane'
|
340 |
+
|
341 |
+
with torch.no_grad():
|
342 |
+
paras = get_video_crop_parameter(frame, self.landmarkpredictor)
|
343 |
+
if paras is None:
|
344 |
+
raise gr.Error(self.error_info)
|
345 |
+
#return false_image, false_image, info
|
346 |
+
h,w,top,bottom,left,right,scale = paras
|
347 |
+
H, W = int(bottom-top), int(right-left)
|
348 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
349 |
+
x1 = self.transform(frame).unsqueeze(0).to(self.device)
|
350 |
+
x2 = align_face(frame, self.landmarkpredictor)
|
351 |
+
if x2 is None:
|
352 |
+
raise gr.Error(self.error_info)
|
353 |
+
#return false_image, 'Error: no face detected! Please retry or change the photo.'
|
354 |
+
x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
|
355 |
+
if self.print_log: print('image loaded')
|
356 |
+
self.load_model(task_name)
|
357 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
358 |
+
y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False)
|
359 |
+
y_hat = torch.clamp(y_hat, -1, 1)
|
360 |
+
|
361 |
+
return self.tensor2np(y_hat[0]), 'Done!'
|
362 |
+
|
363 |
+
|
364 |
+
def process_vtoonify(self, input_video: str, style_type: str, frame_num: int) -> tuple[list[np.ndarray], str]:
|
365 |
+
#false_image = np.zeros((256,256,3), np.uint8)
|
366 |
+
#info = 'Error: no face detected! Please retry or change the video.'
|
367 |
+
|
368 |
+
if input_video is None:
|
369 |
+
raise gr.Error('Error: fail to load empty file.')
|
370 |
+
#return [false_image], 'default.mp4', 'Error: fail to load empty file.'
|
371 |
+
video_cap = cv2.VideoCapture(input_video)
|
372 |
+
success, frame = video_cap.read()
|
373 |
+
if success is False:
|
374 |
+
raise gr.Error('Error: fail to load the video.')
|
375 |
+
#return [false_image], 'default.mp4', 'Error: fail to load the video.'
|
376 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
377 |
+
|
378 |
+
if style_type is None or style_type == 'Pixar':
|
379 |
+
task_name = 'toonify_pixar'
|
380 |
+
elif style_type == 'Cartoon':
|
381 |
+
task_name = 'toonify_cartoon'
|
382 |
+
else:
|
383 |
+
task_name = 'toonify_arcane'
|
384 |
+
|
385 |
+
with torch.no_grad():
|
386 |
+
paras = get_video_crop_parameter(frame, self.landmarkpredictor)
|
387 |
+
if paras is None:
|
388 |
+
raise gr.Error(self.error_info)
|
389 |
+
#return [false_image], 'default.mp4', info
|
390 |
+
h,w,top,bottom,left,right,scale = paras
|
391 |
+
H, W = int(bottom-top), int(right-left)
|
392 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
393 |
+
x1 = self.transform(frame).unsqueeze(0).to(self.device)
|
394 |
+
x2 = align_face(frame, self.landmarkpredictor)
|
395 |
+
if x2 is None:
|
396 |
+
raise gr.Error(self.error_info)
|
397 |
+
#return [false_image], 'default.mp4', info
|
398 |
+
x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
|
399 |
+
if self.print_log: print('first frame loaded')
|
400 |
+
self.load_model(task_name)
|
401 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
402 |
+
|
403 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
404 |
+
videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H))
|
405 |
+
|
406 |
+
viz_frames = []
|
407 |
+
for i in range(frame_num):
|
408 |
+
if i > 0:
|
409 |
+
success, frame = video_cap.read()
|
410 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
411 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
412 |
+
x1 = self.transform(frame).unsqueeze(0).to(self.device)
|
413 |
+
y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False)
|
414 |
+
y_hat = torch.clamp(y_hat, -1, 1)
|
415 |
+
videoWriter.write(tensor2cv2(y_hat[0].cpu()))
|
416 |
+
if i < min(frame_num, 4):
|
417 |
+
viz_frames += [self.tensor2np(y_hat[0])]
|
418 |
+
|
419 |
+
videoWriter.release()
|
420 |
+
|
421 |
+
return viz_frames, 'output.mp4'
|
422 |
+
|
423 |
+
|
424 |
+
def process_inversion(self, input_image: str, optimize: str, input_latent: file-object, editing_options: str,
|
425 |
+
scale_factor: float, seed: int) -> tuple[np.ndarray, np.ndarray]:
|
426 |
+
#false_image = np.zeros((256,256,3), np.uint8)
|
427 |
+
#info = 'Error: no face detected! Please retry or change the photo.'
|
428 |
+
|
429 |
+
if input_image is None:
|
430 |
+
raise gr.Error('Error: fail to load empty file.')
|
431 |
+
#return false_image, false_image, 'Error: fail to load empty file.'
|
432 |
+
frame = cv2.imread(input_image)
|
433 |
+
if frame is None:
|
434 |
+
raise gr.Error('Error: fail to load the image.')
|
435 |
+
#return false_image, false_image, 'Error: fail to load the image.'
|
436 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
437 |
+
|
438 |
+
task_name = 'inversion'
|
439 |
+
self.load_model(task_name)
|
440 |
+
if self.print_log: print('model %s loaded'%(task_name))
|
441 |
+
if input_latent is not None:
|
442 |
+
if '.pt' not in input_latent.name:
|
443 |
+
raise gr.Error('Error: the latent format is wrong')
|
444 |
+
#return false_image, false_image, 'Error: the latent format is wrong'
|
445 |
+
latents = torch.load(input_latent.name)
|
446 |
+
if 'wplus' not in latents.keys() or 'f' not in latents.keys():
|
447 |
+
raise gr.Error('Error: the latent format is wrong')
|
448 |
+
#return false_image, false_image, 'Error: the latent format is wrong'
|
449 |
+
wplus = latents['wplus'].to(self.device) # w+
|
450 |
+
f = [latents['f'][0].to(self.device)] # f
|
451 |
+
elif optimize == 'Latent optimization':
|
452 |
+
wplus, f, _, _, _ = latent_optimization(frame, self.pspex, self.landmarkpredictor,
|
453 |
+
step=500, device=self.device)
|
454 |
+
else:
|
455 |
+
with torch.no_grad():
|
456 |
+
paras = get_video_crop_parameter(frame, self.landmarkpredictor)
|
457 |
+
if paras is None:
|
458 |
+
raise gr.Error(self.error_info)
|
459 |
+
#return false_image, false_image, info
|
460 |
+
h,w,top,bottom,left,right,scale = paras
|
461 |
+
H, W = int(bottom-top), int(right-left)
|
462 |
+
frame = cv2.resize(frame, (w, h))[top:bottom, left:right]
|
463 |
+
x1 = self.transform(frame).unsqueeze(0).to(self.device)
|
464 |
+
x2 = align_face(frame, self.landmarkpredictor)
|
465 |
+
if x2 is None:
|
466 |
+
raise gr.Error(self.error_info)
|
467 |
+
#return false_image, false_image, 'Error: no face detected! Please retry or change the photo.'
|
468 |
+
x2 = self.transform(x2).unsqueeze(dim=0).to(self.device)
|
469 |
+
if self.print_log: print('image loaded')
|
470 |
+
wplus = self.pspex.encoder(x2) + self.pspex.latent_avg.unsqueeze(0)
|
471 |
+
_, f = self.pspex.encoder(x1, return_feat=True)
|
472 |
+
|
473 |
+
with torch.no_grad():
|
474 |
+
y_hat, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f)
|
475 |
+
y_hat = torch.clamp(y_hat, -1, 1)
|
476 |
+
|
477 |
+
if 'Style Mixing' in editing_options:
|
478 |
+
torch.manual_seed(seed)
|
479 |
+
wplus[:, 8:] = self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,10,1) * 0.7
|
480 |
+
y_hat_edit, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f)
|
481 |
+
elif 'Attribute Editing' in editing_options:
|
482 |
+
editing_w = self.editing_dicts[editing_options[19:]].to(self.device)
|
483 |
+
y_hat_edit, _ = self.pspex.decoder([wplus+scale_factor*editing_w], input_is_latent=True, first_layer_feature=f)
|
484 |
+
elif 'Domain Transfer' in editing_options:
|
485 |
+
self.load_G_model(editing_options[17:])
|
486 |
+
if self.print_log: print('model %s loaded'%(editing_options[17:]))
|
487 |
+
y_hat_edit, _ = self.generator([wplus], input_is_latent=True, first_layer_feature=f)
|
488 |
+
else:
|
489 |
+
y_hat_edit = y_hat
|
490 |
+
y_hat_edit = torch.clamp(y_hat_edit, -1, 1)
|
491 |
+
|
492 |
+
return self.tensor2np(y_hat[0]), self.tensor2np(y_hat_edit[0])
|