Ahsen Khaliq commited on
Commit
e984b5c
β€’
1 Parent(s): 8ce726b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +422 -0
app.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.yaml', 'vqgan_imagenet_f16_16384.yaml')
3
+ torch.hub.download_url_to_file('http://mirror.io.community/blob/vqgan/vqgan_imagenet_f16_16384.ckpt', 'vqgan_imagenet_f16_16384.ckpt')
4
+ import argparse
5
+ import math
6
+ from pathlib import Path
7
+ import sys
8
+
9
+ sys.path.insert(1, './taming-transformers')
10
+ from IPython import display
11
+ from base64 import b64encode
12
+ from omegaconf import OmegaConf
13
+ from PIL import Image
14
+ from taming.models import cond_transformer, vqgan
15
+ import taming.modules
16
+ from torch import nn, optim
17
+ from torch.nn import functional as F
18
+ from torchvision import transforms
19
+ from torchvision.transforms import functional as TF
20
+ from tqdm.notebook import tqdm
21
+
22
+ from CLIP import clip
23
+ import kornia.augmentation as K
24
+ import numpy as np
25
+ import imageio
26
+ from PIL import ImageFile, Image
27
+ ImageFile.LOAD_TRUNCATED_IMAGES = True
28
+ import gradio as gr
29
+
30
+ def sinc(x):
31
+ return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
32
+
33
+
34
+ def lanczos(x, a):
35
+ cond = torch.logical_and(-a < x, x < a)
36
+ out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
37
+ return out / out.sum()
38
+
39
+
40
+ def ramp(ratio, width):
41
+ n = math.ceil(width / ratio + 1)
42
+ out = torch.empty([n])
43
+ cur = 0
44
+ for i in range(out.shape[0]):
45
+ out[i] = cur
46
+ cur += ratio
47
+ return torch.cat([-out[1:].flip([0]), out])[1:-1]
48
+
49
+
50
+ def resample(input, size, align_corners=True):
51
+ n, c, h, w = input.shape
52
+ dh, dw = size
53
+
54
+ input = input.view([n * c, 1, h, w])
55
+
56
+ if dh < h:
57
+ kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
58
+ pad_h = (kernel_h.shape[0] - 1) // 2
59
+ input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
60
+ input = F.conv2d(input, kernel_h[None, None, :, None])
61
+
62
+ if dw < w:
63
+ kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
64
+ pad_w = (kernel_w.shape[0] - 1) // 2
65
+ input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
66
+ input = F.conv2d(input, kernel_w[None, None, None, :])
67
+
68
+ input = input.view([n, c, h, w])
69
+ return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
70
+
71
+
72
+ class ReplaceGrad(torch.autograd.Function):
73
+ @staticmethod
74
+ def forward(ctx, x_forward, x_backward):
75
+ ctx.shape = x_backward.shape
76
+ return x_forward
77
+
78
+ @staticmethod
79
+ def backward(ctx, grad_in):
80
+ return None, grad_in.sum_to_size(ctx.shape)
81
+
82
+
83
+ replace_grad = ReplaceGrad.apply
84
+
85
+
86
+ class ClampWithGrad(torch.autograd.Function):
87
+ @staticmethod
88
+ def forward(ctx, input, min, max):
89
+ ctx.min = min
90
+ ctx.max = max
91
+ ctx.save_for_backward(input)
92
+ return input.clamp(min, max)
93
+
94
+ @staticmethod
95
+ def backward(ctx, grad_in):
96
+ input, = ctx.saved_tensors
97
+ return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
98
+
99
+
100
+ clamp_with_grad = ClampWithGrad.apply
101
+
102
+
103
+ def vector_quantize(x, codebook):
104
+ d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
105
+ indices = d.argmin(-1)
106
+ x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
107
+ return replace_grad(x_q, x)
108
+
109
+
110
+ class Prompt(nn.Module):
111
+ def __init__(self, embed, weight=1., stop=float('-inf')):
112
+ super().__init__()
113
+ self.register_buffer('embed', embed)
114
+ self.register_buffer('weight', torch.as_tensor(weight))
115
+ self.register_buffer('stop', torch.as_tensor(stop))
116
+
117
+ def forward(self, input):
118
+ input_normed = F.normalize(input.unsqueeze(1), dim=2)
119
+ embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
120
+ dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
121
+ dists = dists * self.weight.sign()
122
+ return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()
123
+
124
+
125
+ def parse_prompt(prompt):
126
+ vals = prompt.rsplit(':', 2)
127
+ vals = vals + ['', '1', '-inf'][len(vals):]
128
+ return vals[0], float(vals[1]), float(vals[2])
129
+
130
+
131
+ class MakeCutouts(nn.Module):
132
+ def __init__(self, cut_size, cutn, cut_pow=1.):
133
+ super().__init__()
134
+ self.cut_size = cut_size
135
+ self.cutn = cutn
136
+ self.cut_pow = cut_pow
137
+
138
+ self.augs = nn.Sequential(
139
+ # K.RandomHorizontalFlip(p=0.5),
140
+ # K.RandomVerticalFlip(p=0.5),
141
+ # K.RandomSolarize(0.01, 0.01, p=0.7),
142
+ # K.RandomSharpness(0.3,p=0.4),
143
+ # K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,1), ratio=(0.75,1.333), cropping_mode='resample', p=0.5),
144
+ # K.RandomCrop(size=(self.cut_size,self.cut_size), p=0.5),
145
+ K.RandomAffine(degrees=15, translate=0.1, p=0.7, padding_mode='border'),
146
+ K.RandomPerspective(0.7,p=0.7),
147
+ K.ColorJitter(hue=0.1, saturation=0.1, p=0.7),
148
+ K.RandomErasing((.1, .4), (.3, 1/.3), same_on_batch=True, p=0.7),
149
+
150
+ )
151
+ self.noise_fac = 0.1
152
+ self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
153
+ self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
154
+
155
+ def forward(self, input):
156
+ sideY, sideX = input.shape[2:4]
157
+ max_size = min(sideX, sideY)
158
+ min_size = min(sideX, sideY, self.cut_size)
159
+ cutouts = []
160
+
161
+ for _ in range(self.cutn):
162
+
163
+ # size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
164
+ # offsetx = torch.randint(0, sideX - size + 1, ())
165
+ # offsety = torch.randint(0, sideY - size + 1, ())
166
+ # cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
167
+ # cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
168
+
169
+ # cutout = transforms.Resize(size=(self.cut_size, self.cut_size))(input)
170
+
171
+ cutout = (self.av_pool(input) + self.max_pool(input))/2
172
+ cutouts.append(cutout)
173
+ batch = self.augs(torch.cat(cutouts, dim=0))
174
+ if self.noise_fac:
175
+ facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
176
+ batch = batch + facs * torch.randn_like(batch)
177
+ return batch
178
+
179
+
180
+ def load_vqgan_model(config_path, checkpoint_path):
181
+ config = OmegaConf.load(config_path)
182
+ if config.model.target == 'taming.models.vqgan.VQModel':
183
+ model = vqgan.VQModel(**config.model.params)
184
+ model.eval().requires_grad_(False)
185
+ model.init_from_ckpt(checkpoint_path)
186
+ elif config.model.target == 'taming.models.vqgan.GumbelVQ':
187
+ model = vqgan.GumbelVQ(**config.model.params)
188
+ model.eval().requires_grad_(False)
189
+ model.init_from_ckpt(checkpoint_path)
190
+ elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
191
+ parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
192
+ parent_model.eval().requires_grad_(False)
193
+ parent_model.init_from_ckpt(checkpoint_path)
194
+ model = parent_model.first_stage_model
195
+ else:
196
+ raise ValueError(f'unknown model type: {config.model.target}')
197
+ del model.loss
198
+ return model
199
+
200
+
201
+ def resize_image(image, out_size):
202
+ ratio = image.size[0] / image.size[1]
203
+ area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
204
+ size = round((area * ratio)**0.5), round((area / ratio)**0.5)
205
+ return image.resize(size, Image.LANCZOS)
206
+
207
+
208
+ def inference(text):
209
+ texts = text
210
+ width = 300
211
+ height = 300
212
+ model = "vqgan_imagenet_f16_16384"
213
+ images_interval = 50
214
+ init_image = ""
215
+ target_images = ""
216
+ seed = 42
217
+ max_iterations = 300
218
+
219
+ model_names={"vqgan_imagenet_f16_16384": 'ImageNet 16384',"vqgan_imagenet_f16_1024":"ImageNet 1024", 'vqgan_openimages_f16_8192':'OpenImages 8912',
220
+ "wikiart_1024":"WikiArt 1024", "wikiart_16384":"WikiArt 16384", "coco":"COCO-Stuff", "faceshq":"FacesHQ", "sflckr":"S-FLCKR"}
221
+ name_model = model_names[model]
222
+
223
+ if seed == -1:
224
+ seed = None
225
+ if init_image == "None":
226
+ init_image = None
227
+ if target_images == "None" or not target_images:
228
+ target_images = []
229
+ else:
230
+ target_images = target_images.split("|")
231
+ target_images = [image.strip() for image in target_images]
232
+
233
+ texts = [phrase.strip() for phrase in texts.split("|")]
234
+ if texts == ['']:
235
+ texts = []
236
+
237
+
238
+ args = argparse.Namespace(
239
+ prompts=texts,
240
+ image_prompts=target_images,
241
+ noise_prompt_seeds=[],
242
+ noise_prompt_weights=[],
243
+ size=[width, height],
244
+ init_image=init_image,
245
+ init_weight=0.,
246
+ clip_model='ViT-B/32',
247
+ vqgan_config=f'{model}.yaml',
248
+ vqgan_checkpoint=f'{model}.ckpt',
249
+ step_size=0.1,
250
+ cutn=32,
251
+ cut_pow=1.,
252
+ display_freq=images_interval,
253
+ seed=seed,
254
+ )
255
+ from urllib.request import urlopen
256
+
257
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
258
+ print('Using device:', device)
259
+ if texts:
260
+ print('Using texts:', texts)
261
+ if target_images:
262
+ print('Using image prompts:', target_images)
263
+ if args.seed is None:
264
+ seed = torch.seed()
265
+ else:
266
+ seed = args.seed
267
+ torch.manual_seed(seed)
268
+ print('Using seed:', seed)
269
+
270
+ model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
271
+ perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
272
+ # clock=deepcopy(perceptor.visual.positional_embedding.data)
273
+ # perceptor.visual.positional_embedding.data = clock/clock.max()
274
+ # perceptor.visual.positional_embedding.data=clamp_with_grad(clock,0,1)
275
+
276
+ cut_size = perceptor.visual.input_resolution
277
+
278
+ f = 2**(model.decoder.num_resolutions - 1)
279
+ make_cutouts = MakeCutouts(cut_size, args.cutn, cut_pow=args.cut_pow)
280
+
281
+ toksX, toksY = args.size[0] // f, args.size[1] // f
282
+ sideX, sideY = toksX * f, toksY * f
283
+
284
+ if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':
285
+ e_dim = 256
286
+ n_toks = model.quantize.n_embed
287
+ z_min = model.quantize.embed.weight.min(dim=0).values[None, :, None, None]
288
+ z_max = model.quantize.embed.weight.max(dim=0).values[None, :, None, None]
289
+ else:
290
+ e_dim = model.quantize.e_dim
291
+ n_toks = model.quantize.n_e
292
+ z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
293
+ z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
294
+ # z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
295
+ # z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
296
+
297
+ # normalize_imagenet = transforms.Normalize(mean=[0.485, 0.456, 0.406],
298
+ # std=[0.229, 0.224, 0.225])
299
+
300
+ if args.init_image:
301
+ if 'http' in args.init_image:
302
+ img = Image.open(urlopen(args.init_image))
303
+ else:
304
+ img = Image.open(args.init_image)
305
+ pil_image = img.convert('RGB')
306
+ pil_image = pil_image.resize((sideX, sideY), Image.LANCZOS)
307
+ pil_tensor = TF.to_tensor(pil_image)
308
+ z, *_ = model.encode(pil_tensor.to(device).unsqueeze(0) * 2 - 1)
309
+ else:
310
+ one_hot = F.one_hot(torch.randint(n_toks, [toksY * toksX], device=device), n_toks).float()
311
+ # z = one_hot @ model.quantize.embedding.weight
312
+ if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':
313
+ z = one_hot @ model.quantize.embed.weight
314
+ else:
315
+ z = one_hot @ model.quantize.embedding.weight
316
+ z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2)
317
+ z = torch.rand_like(z)*2
318
+ z_orig = z.clone()
319
+ z.requires_grad_(True)
320
+ opt = optim.Adam([z], lr=args.step_size)
321
+
322
+ normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
323
+ std=[0.26862954, 0.26130258, 0.27577711])
324
+
325
+
326
+
327
+ pMs = []
328
+
329
+ for prompt in args.prompts:
330
+ txt, weight, stop = parse_prompt(prompt)
331
+ embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
332
+ pMs.append(Prompt(embed, weight, stop).to(device))
333
+
334
+ for prompt in args.image_prompts:
335
+ path, weight, stop = parse_prompt(prompt)
336
+ img = Image.open(path)
337
+ pil_image = img.convert('RGB')
338
+ img = resize_image(pil_image, (sideX, sideY))
339
+ batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))
340
+ embed = perceptor.encode_image(normalize(batch)).float()
341
+ pMs.append(Prompt(embed, weight, stop).to(device))
342
+
343
+ for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):
344
+ gen = torch.Generator().manual_seed(seed)
345
+ embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)
346
+ pMs.append(Prompt(embed, weight).to(device))
347
+
348
+ def synth(z):
349
+ if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':
350
+ z_q = vector_quantize(z.movedim(1, 3), model.quantize.embed.weight).movedim(3, 1)
351
+ else:
352
+ z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1)
353
+ return clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1)
354
+
355
+ @torch.no_grad()
356
+ def checkin(i, losses):
357
+ losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
358
+ tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
359
+ out = synth(z)
360
+ TF.to_pil_image(out[0].cpu()).save('progress.png')
361
+ display.display(display.Image('progress.png'))
362
+
363
+ def ascend_txt():
364
+ # global i
365
+ out = synth(z)
366
+ iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
367
+
368
+ result = []
369
+
370
+ if args.init_weight:
371
+ # result.append(F.mse_loss(z, z_orig) * args.init_weight / 2)
372
+ result.append(F.mse_loss(z, torch.zeros_like(z_orig)) * ((1/torch.tensor(i*2 + 1))*args.init_weight) / 2)
373
+ for prompt in pMs:
374
+ result.append(prompt(iii))
375
+ img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
376
+ img = np.transpose(img, (1, 2, 0))
377
+ imageio.imwrite('./steps/' + str(i) + '.png', np.array(img))
378
+
379
+ return result
380
+
381
+ def train(i):
382
+ opt.zero_grad()
383
+ lossAll = ascend_txt()
384
+ if i % args.display_freq == 0:
385
+ checkin(i, lossAll)
386
+
387
+ loss = sum(lossAll)
388
+ loss.backward()
389
+ opt.step()
390
+ with torch.no_grad():
391
+ z.copy_(z.maximum(z_min).minimum(z_max))
392
+
393
+ i = 0
394
+ try:
395
+ with tqdm() as pbar:
396
+ while True:
397
+ train(i)
398
+ if i == max_iterations:
399
+ break
400
+ i += 1
401
+ pbar.update()
402
+ except KeyboardInterrupt:
403
+ pass
404
+ return "./steps/300.png"
405
+
406
+ title = "VQGAN + CLIP"
407
+ description = "Gradio demo for VQGAN + CLIP. To use it, simply add your text, or click one of the examples to load them. Read more at the links below."
408
+ article = "<p style='text-align: center'>Originally made by Katherine Crowson (https://github.com/crowsonkb, https://twitter.com/RiversHaveWings). The original BigGAN+CLIP method was by https://twitter.com/advadnoun. Added some explanations and modifications by Eleiber#8347, pooling trick by Crimeacs#8222 (https://twitter.com/EarthML1) and the GUI was made with the help of Abulafia#3734. | <a href='https://colab.research.google.com/drive/1ZAus_gn2RhTZWzOWUpPERNC0Q8OhZRTZ'>Colab</a> | <a href='https://github.com/CompVis/taming-transformers'>Taming Transformers Github Repo</a> | <a href='https://github.com/openai/CLIP'>CLIP Github Repo</a></p>"
409
+
410
+ gr.Interface(
411
+ inference,
412
+ gr.inputs.Textbox(label="Input"),
413
+ gr.outputs.Image(type="file", label="Output"),
414
+ title=title,
415
+ description=description,
416
+ article=article,
417
+ examples=[
418
+ ['a garden by james gurney'],
419
+ ['coral reef city artstationHQ'],
420
+ ['a cabin in the mountains unreal engine']
421
+ ]
422
+ ).launch(debug=True)