Ahsen Khaliq commited on
Commit
dbe4fe6
1 Parent(s): e63ba22

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +150 -0
app.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ os.system("wget https://github.com/Sxela/ArcaneGAN/releases/download/v0.2/ArcaneGANv0.2.jit")
4
+ os.system("pip -qq install facenet_pytorch")
5
+
6
+
7
+ from facenet_pytorch import MTCNN
8
+ from torchvision import transforms
9
+ import torch, PIL
10
+
11
+ from tqdm.notebook import tqdm
12
+ import gradio as gr
13
+
14
+ mtcnn = MTCNN(image_size=256, margin=80)
15
+
16
+ # simplest ye olde trustworthy MTCNN for face detection with landmarks
17
+ def detect(img):
18
+
19
+ # Detect faces
20
+ batch_boxes, batch_probs, batch_points = mtcnn.detect(img, landmarks=True)
21
+ # Select faces
22
+ if not mtcnn.keep_all:
23
+ batch_boxes, batch_probs, batch_points = mtcnn.select_boxes(
24
+ batch_boxes, batch_probs, batch_points, img, method=mtcnn.selection_method
25
+ )
26
+
27
+ return batch_boxes, batch_points
28
+
29
+ # my version of isOdd, should make a separate repo for it :D
30
+ def makeEven(_x):
31
+ return _x if (_x % 2 == 0) else _x+1
32
+
33
+ # the actual scaler function
34
+ def scale(boxes, _img, max_res=1_500_000, target_face=256, fixed_ratio=0, max_upscale=2, VERBOSE=False):
35
+
36
+ x, y = _img.size
37
+
38
+ ratio = 2 #initial ratio
39
+
40
+ #scale to desired face size
41
+ if (boxes is not None):
42
+ if len(boxes)>0:
43
+ ratio = target_face/max(boxes[0][2:]-boxes[0][:2]);
44
+ ratio = min(ratio, max_upscale)
45
+ if VERBOSE: print('up by', ratio)
46
+
47
+ if fixed_ratio>0:
48
+ if VERBOSE: print('fixed ratio')
49
+ ratio = fixed_ratio
50
+
51
+ x*=ratio
52
+ y*=ratio
53
+
54
+ #downscale to fit into max res
55
+ res = x*y
56
+ if res > max_res:
57
+ ratio = pow(res/max_res,1/2);
58
+ if VERBOSE: print(ratio)
59
+ x=int(x/ratio)
60
+ y=int(y/ratio)
61
+
62
+ #make dimensions even, because usually NNs fail on uneven dimensions due skip connection size mismatch
63
+ x = makeEven(int(x))
64
+ y = makeEven(int(y))
65
+
66
+ size = (x, y)
67
+
68
+ return _img.resize(size)
69
+
70
+ """
71
+ A useful scaler algorithm, based on face detection.
72
+ Takes PIL.Image, returns a uniformly scaled PIL.Image
73
+ boxes: a list of detected bboxes
74
+ _img: PIL.Image
75
+ max_res: maximum pixel area to fit into. Use to stay below the VRAM limits of your GPU.
76
+ target_face: desired face size. Upscale or downscale the whole image to fit the detected face into that dimension.
77
+ fixed_ratio: fixed scale. Ignores the face size, but doesn't ignore the max_res limit.
78
+ max_upscale: maximum upscale ratio. Prevents from scaling images with tiny faces to a blurry mess.
79
+ """
80
+
81
+ def scale_by_face_size(_img, max_res=1_500_000, target_face=256, fix_ratio=0, max_upscale=2, VERBOSE=False):
82
+ boxes = None
83
+ boxes, _ = detect(_img)
84
+ if VERBOSE: print('boxes',boxes)
85
+ img_resized = scale(boxes, _img, max_res, target_face, fix_ratio, max_upscale, VERBOSE)
86
+ return img_resized
87
+
88
+
89
+ size = 256
90
+
91
+ means = [0.485, 0.456, 0.406]
92
+ stds = [0.229, 0.224, 0.225]
93
+
94
+ t_stds = torch.tensor(stds).cpu()[:,None,None]
95
+ t_means = torch.tensor(means).cpu()[:,None,None]
96
+
97
+ def makeEven(_x):
98
+ return int(_x) if (_x % 2 == 0) else int(_x+1)
99
+
100
+ img_transforms = transforms.Compose([
101
+ transforms.ToTensor(),
102
+ transforms.Normalize(means,stds)])
103
+
104
+ def tensor2im(var):
105
+ return var.mul(t_stds).add(t_means).mul(255.).clamp(0,255).permute(1,2,0)
106
+
107
+ def proc_pil_img(input_image, model):
108
+ transformed_image = img_transforms(input_image)[None,...].cpu()
109
+
110
+ with torch.no_grad():
111
+ result_image = model(transformed_image)[0]; print(result_image.shape)
112
+ output_image = tensor2im(result_image)
113
+ output_image = output_image.detach().cpu().numpy().astype('uint8')
114
+ output_image = PIL.Image.fromarray(output_image)
115
+ return output_image
116
+
117
+
118
+
119
+ model_path = './ArcaneGANv0.2.jit'
120
+
121
+ model = torch.jit.load(model_path,map_location='cpu').to('cpu').float().eval().cpu()
122
+
123
+ def fit(img,maxsize=512):
124
+ maxdim = max(*img.size)
125
+ if maxdim>maxsize:
126
+ ratio = maxsize/maxdim
127
+ x,y = img.size
128
+ size = (int(x*ratio),int(y*ratio))
129
+ img = img.resize(size)
130
+ return img
131
+
132
+
133
+
134
+ def process(img):
135
+ im = scale_by_face_size(im, target_face=300, max_res=1_500_000, max_upscale=2)
136
+ res = proc_pil_img(im, model)
137
+ return res
138
+
139
+ title = "ArcaneGAN"
140
+ description = "Gradio demo for ArcaneGan. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
141
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.05703'>Adversarial Open Domain Adaption for Sketch-to-Photo Synthesis</a> | <a href='https://github.com/Mukosame/Anime2Sketch'>Github Repo</a></p>"
142
+
143
+ gr.Interface(
144
+ process,
145
+ gr.inputs.Image(type="pil", label="Input"),
146
+ gr.outputs.Image(type="pil", label="Output"),
147
+ title=title,
148
+ description=description,
149
+ article=article,
150
+ ).launch(debug=True)