liuhaotian commited on
Commit
f9a674e
1 Parent(s): 6c8fcd4

Upload app

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +112 -0
  2. DejaVuSansMono.ttf +0 -0
  3. __init__.py +0 -0
  4. app.py +758 -0
  5. dataset/__init__.py +0 -0
  6. dataset/base_dataset.py +220 -0
  7. dataset/catalog.py +72 -0
  8. dataset/cd_dataset.py +250 -0
  9. dataset/concat_dataset.py +65 -0
  10. dataset/grounding_dataset.py +205 -0
  11. dataset/layout_dataset.py +237 -0
  12. dataset/tsv.py +212 -0
  13. dataset/tsv_dataset.py +326 -0
  14. dataset/utils.py +116 -0
  15. environment.yaml +29 -0
  16. gligen/__init__.py +10 -0
  17. gligen/create_meta.py +170 -0
  18. gligen/distributed.py +122 -0
  19. gligen/evaluator.py +225 -0
  20. gligen/ldm/__init__.py +3 -0
  21. gligen/ldm/data/__init__.py +0 -0
  22. gligen/ldm/data/base.py +23 -0
  23. gligen/ldm/data/imagenet.py +394 -0
  24. gligen/ldm/data/imagenet_clsidx_to_label.txt +1000 -0
  25. gligen/ldm/data/index_synset.yaml +1000 -0
  26. gligen/ldm/data/lsun.py +92 -0
  27. gligen/ldm/lr_scheduler.py +98 -0
  28. gligen/ldm/models/autoencoder.py +52 -0
  29. gligen/ldm/models/diffusion/__init__.py +0 -0
  30. gligen/ldm/models/diffusion/classifier.py +267 -0
  31. gligen/ldm/models/diffusion/ddim.py +134 -0
  32. gligen/ldm/models/diffusion/ddpm.py +72 -0
  33. gligen/ldm/models/diffusion/ldm.py +88 -0
  34. gligen/ldm/models/diffusion/plms.py +162 -0
  35. gligen/ldm/modules/attention.py +387 -0
  36. gligen/ldm/modules/diffusionmodules/__init__.py +0 -0
  37. gligen/ldm/modules/diffusionmodules/model.py +835 -0
  38. gligen/ldm/modules/diffusionmodules/openaimodel.py +489 -0
  39. gligen/ldm/modules/diffusionmodules/positionnet.py +50 -0
  40. gligen/ldm/modules/diffusionmodules/positionnet_with_image.py +68 -0
  41. gligen/ldm/modules/diffusionmodules/util.py +277 -0
  42. gligen/ldm/modules/distributions/__init__.py +0 -0
  43. gligen/ldm/modules/distributions/distributions.py +92 -0
  44. gligen/ldm/modules/ema.py +76 -0
  45. gligen/ldm/modules/encoders/__init__.py +0 -0
  46. gligen/ldm/modules/encoders/modules.py +245 -0
  47. gligen/ldm/modules/encoders/modules_backup.py +234 -0
  48. gligen/ldm/modules/image_degradation/__init__.py +2 -0
  49. gligen/ldm/modules/image_degradation/bsrgan.py +730 -0
  50. gligen/ldm/modules/image_degradation/bsrgan_light.py +650 -0
.gitignore ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # IntelliJ project files
2
+ .idea
3
+ *.iml
4
+ out
5
+ gen
6
+
7
+ ### Vim template
8
+ [._]*.s[a-w][a-z]
9
+ [._]s[a-w][a-z]
10
+ *.un~
11
+ Session.vim
12
+ .netrwhist
13
+ *~
14
+
15
+ ### IPythonNotebook template
16
+ # Temporary data
17
+ .ipynb_checkpoints/
18
+
19
+ ### Python template
20
+ # Byte-compiled / optimized / DLL files
21
+ __pycache__/
22
+ *.py[cod]
23
+ *$py.class
24
+
25
+ # C extensions
26
+ *.so
27
+
28
+ # Distribution / packaging
29
+ .Python
30
+ env/
31
+ build/
32
+ develop-eggs/
33
+ dist/
34
+ downloads/
35
+ eggs/
36
+ .eggs/
37
+ #lib/
38
+ #lib64/
39
+ parts/
40
+ sdist/
41
+ var/
42
+ *.egg-info/
43
+ .installed.cfg
44
+ *.egg
45
+
46
+ # PyInstaller
47
+ # Usually these files are written by a python script from a template
48
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
49
+ *.manifest
50
+ *.spec
51
+
52
+ # Installer logs
53
+ pip-log.txt
54
+ pip-delete-this-directory.txt
55
+
56
+ # Unit test / coverage reports
57
+ htmlcov/
58
+ .tox/
59
+ .coverage
60
+ .coverage.*
61
+ .cache
62
+ nosetests.xml
63
+ coverage.xml
64
+ *,cover
65
+
66
+ # Translations
67
+ *.mo
68
+ *.pot
69
+
70
+ # Django stuff:
71
+ *.log
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ target/
78
+
79
+ *.ipynb
80
+ *.params
81
+ # *.json
82
+ .vscode/
83
+ *.code-workspace/
84
+
85
+ lib/pycocotools/_mask.c
86
+ lib/nms/cpu_nms.c
87
+
88
+ OUTPUT
89
+ OUTPUT/*
90
+ models/*
91
+ DATASET
92
+ DATASET/*
93
+ external/
94
+ MODELS
95
+ MODELS/*
96
+ gradio_cached_examples/*
97
+
98
+ kill.sh
99
+
100
+ draws/
101
+ #:wq
102
+ #plot/figs
103
+
104
+ *venv/*
105
+
106
+ # images
107
+ # images/*
108
+
109
+ create_samples/
110
+ create_samples/*
111
+
112
+ ckpts/*
DejaVuSansMono.ttf ADDED
Binary file (341 kB). View file
 
__init__.py ADDED
File without changes
app.py ADDED
@@ -0,0 +1,758 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import argparse
4
+ from omegaconf import OmegaConf
5
+ from gligen.task_grounded_generation import grounded_generation_box, load_ckpt
6
+
7
+ import json
8
+ import numpy as np
9
+ from PIL import Image, ImageDraw, ImageFont
10
+ from functools import partial
11
+ import math
12
+
13
+ from gradio import processing_utils
14
+ from typing import Optional
15
+
16
+ from huggingface_hub import hf_hub_download
17
+ hf_hub_download = partial(hf_hub_download, library_name="gligen_demo")
18
+
19
+
20
+ arg_bool = lambda x: x.lower() == 'true'
21
+
22
+
23
+ def parse_option():
24
+ parser = argparse.ArgumentParser('GLIGen Demo', add_help=False)
25
+ parser.add_argument("--folder", type=str, default="create_samples", help="path to OUTPUT")
26
+ parser.add_argument("--official_ckpt", type=str, default='ckpts/sd-v1-4.ckpt', help="")
27
+ parser.add_argument("--guidance_scale", type=float, default=5, help="")
28
+ parser.add_argument("--alpha_scale", type=float, default=1, help="scale tanh(alpha). If 0, the behaviour is same as original model")
29
+ parser.add_argument("--load-text-box-generation", type=arg_bool, default=True, help="Load text-box generation pipeline.")
30
+ parser.add_argument("--load-text-box-inpainting", type=arg_bool, default=True, help="Load text-box inpainting pipeline.")
31
+ parser.add_argument("--load-text-image-box-generation", type=arg_bool, default=True, help="Load text-image-box generation pipeline.")
32
+ args = parser.parse_args()
33
+ return args
34
+ args = parse_option()
35
+
36
+
37
+ def load_from_hf(repo_id, filename='diffusion_pytorch_model.bin'):
38
+ cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
39
+ return torch.load(cache_file, map_location='cpu')
40
+
41
+ def load_ckpt_config_from_hf(modality):
42
+ ckpt = load_from_hf(f'gligen/{modality}')
43
+ config = load_from_hf('gligen/demo_config_legacy', filename=f'{modality}.pth')
44
+ return ckpt, config
45
+
46
+
47
+ if args.load_text_box_generation:
48
+ pretrained_ckpt_gligen, config = load_ckpt_config_from_hf('gligen-generation-text-box')
49
+ config = OmegaConf.create( config["_content"] ) # config used in training
50
+ config.update( vars(args) )
51
+ config.model['params']['is_inpaint'] = False
52
+ config.model['params']['is_style'] = False
53
+ loaded_model_list = load_ckpt(config, pretrained_ckpt_gligen)
54
+
55
+
56
+ if args.load_text_box_inpainting:
57
+ pretrained_ckpt_gligen_inpaint, config = load_ckpt_config_from_hf('gligen-inpainting-text-box')
58
+ config = OmegaConf.create( config["_content"] ) # config used in training
59
+ config.update( vars(args) )
60
+ config.model['params']['is_inpaint'] = True
61
+ config.model['params']['is_style'] = False
62
+ loaded_model_list_inpaint = load_ckpt(config, pretrained_ckpt_gligen_inpaint)
63
+
64
+
65
+ if args.load_text_image_box_generation:
66
+ pretrained_ckpt_gligen_style, config = load_ckpt_config_from_hf('gligen-generation-text-image-box')
67
+ config = OmegaConf.create( config["_content"] ) # config used in training
68
+ config.update( vars(args) )
69
+ config.model['params']['is_inpaint'] = False
70
+ config.model['params']['is_style'] = True
71
+ loaded_model_list_style = load_ckpt(config, pretrained_ckpt_gligen_style)
72
+
73
+
74
+ def load_clip_model():
75
+ from transformers import CLIPProcessor, CLIPModel
76
+ version = "openai/clip-vit-large-patch14"
77
+ model = CLIPModel.from_pretrained(version).cuda()
78
+ processor = CLIPProcessor.from_pretrained(version)
79
+
80
+ return {
81
+ 'version': version,
82
+ 'model': model,
83
+ 'processor': processor,
84
+ }
85
+
86
+ clip_model = load_clip_model()
87
+
88
+
89
+ class ImageMask(gr.components.Image):
90
+ """
91
+ Sets: source="canvas", tool="sketch"
92
+ """
93
+
94
+ is_template = True
95
+
96
+ def __init__(self, **kwargs):
97
+ super().__init__(source="upload", tool="sketch", interactive=True, **kwargs)
98
+
99
+ def preprocess(self, x):
100
+ if x is None:
101
+ return x
102
+ if self.tool == "sketch" and self.source in ["upload", "webcam"] and type(x) != dict:
103
+ decode_image = processing_utils.decode_base64_to_image(x)
104
+ width, height = decode_image.size
105
+ mask = np.zeros((height, width, 4), dtype=np.uint8)
106
+ mask[..., -1] = 255
107
+ mask = self.postprocess(mask)
108
+ x = {'image': x, 'mask': mask}
109
+ return super().preprocess(x)
110
+
111
+
112
+ class Blocks(gr.Blocks):
113
+
114
+ def __init__(
115
+ self,
116
+ theme: str = "default",
117
+ analytics_enabled: Optional[bool] = None,
118
+ mode: str = "blocks",
119
+ title: str = "Gradio",
120
+ css: Optional[str] = None,
121
+ **kwargs,
122
+ ):
123
+
124
+ self.extra_configs = {
125
+ 'thumbnail': kwargs.pop('thumbnail', ''),
126
+ 'url': kwargs.pop('url', 'https://gradio.app/'),
127
+ 'creator': kwargs.pop('creator', '@teamGradio'),
128
+ }
129
+
130
+ super(Blocks, self).__init__(theme, analytics_enabled, mode, title, css, **kwargs)
131
+
132
+ def get_config_file(self):
133
+ config = super(Blocks, self).get_config_file()
134
+
135
+ for k, v in self.extra_configs.items():
136
+ config[k] = v
137
+
138
+ return config
139
+
140
+ '''
141
+ inference model
142
+ '''
143
+
144
+ @torch.no_grad()
145
+ def inference(task, language_instruction, grounding_instruction, inpainting_boxes_nodrop, image,
146
+ alpha_sample, guidance_scale, batch_size,
147
+ fix_seed, rand_seed, actual_mask, style_image,
148
+ *args, **kwargs):
149
+ grounding_instruction = json.loads(grounding_instruction)
150
+ phrase_list, location_list = [], []
151
+ for k, v in grounding_instruction.items():
152
+ phrase_list.append(k)
153
+ location_list.append(v)
154
+
155
+ placeholder_image = Image.open('images/teddy.jpg').convert("RGB")
156
+ image_list = [placeholder_image] * len(phrase_list) # placeholder input for visual prompt, which is disabled
157
+
158
+ batch_size = int(batch_size)
159
+ if not 1 <= batch_size <= 2:
160
+ batch_size = 2
161
+
162
+ if style_image == None:
163
+ has_text_mask = 1
164
+ has_image_mask = 0 # then we hack above 'image_list'
165
+ else:
166
+ valid_phrase_len = len(phrase_list)
167
+
168
+ phrase_list += ['placeholder']
169
+ has_text_mask = [1]*valid_phrase_len + [0]
170
+
171
+ image_list = [placeholder_image]*valid_phrase_len + [style_image]
172
+ has_image_mask = [0]*valid_phrase_len + [1]
173
+
174
+ location_list += [ [0.0, 0.0, 1, 0.01] ] # style image grounding location
175
+
176
+ if task == 'Grounded Inpainting':
177
+ alpha_sample = 1.0
178
+
179
+ instruction = dict(
180
+ prompt = language_instruction,
181
+ phrases = phrase_list,
182
+ images = image_list,
183
+ locations = location_list,
184
+ alpha_type = [alpha_sample, 0, 1.0 - alpha_sample],
185
+ has_text_mask = has_text_mask,
186
+ has_image_mask = has_image_mask,
187
+ save_folder_name = language_instruction,
188
+ guidance_scale = guidance_scale,
189
+ batch_size = batch_size,
190
+ fix_seed = bool(fix_seed),
191
+ rand_seed = int(rand_seed),
192
+ actual_mask = actual_mask,
193
+ inpainting_boxes_nodrop = inpainting_boxes_nodrop,
194
+ )
195
+
196
+ with torch.autocast(device_type='cuda', dtype=torch.float16):
197
+ if task == 'Grounded Generation':
198
+ if style_image == None:
199
+ return grounded_generation_box(loaded_model_list, instruction, *args, **kwargs)
200
+ else:
201
+ return grounded_generation_box(loaded_model_list_style, instruction, *args, **kwargs)
202
+ elif task == 'Grounded Inpainting':
203
+ assert image is not None
204
+ instruction['input_image'] = image.convert("RGB")
205
+ return grounded_generation_box(loaded_model_list_inpaint, instruction, *args, **kwargs)
206
+
207
+
208
+ def draw_box(boxes=[], texts=[], img=None):
209
+ if len(boxes) == 0 and img is None:
210
+ return None
211
+
212
+ if img is None:
213
+ img = Image.new('RGB', (512, 512), (255, 255, 255))
214
+ colors = ["red", "olive", "blue", "green", "orange", "brown", "cyan", "purple"]
215
+ draw = ImageDraw.Draw(img)
216
+ font = ImageFont.truetype("DejaVuSansMono.ttf", size=18)
217
+ for bid, box in enumerate(boxes):
218
+ draw.rectangle([box[0], box[1], box[2], box[3]], outline=colors[bid % len(colors)], width=4)
219
+ anno_text = texts[bid]
220
+ draw.rectangle([box[0], box[3] - int(font.size * 1.2), box[0] + int((len(anno_text) + 0.8) * font.size * 0.6), box[3]], outline=colors[bid % len(colors)], fill=colors[bid % len(colors)], width=4)
221
+ draw.text([box[0] + int(font.size * 0.2), box[3] - int(font.size*1.2)], anno_text, font=font, fill=(255,255,255))
222
+ return img
223
+
224
+ def get_concat(ims):
225
+ if len(ims) == 1:
226
+ n_col = 1
227
+ else:
228
+ n_col = 2
229
+ n_row = math.ceil(len(ims) / 2)
230
+ dst = Image.new('RGB', (ims[0].width * n_col, ims[0].height * n_row), color="white")
231
+ for i, im in enumerate(ims):
232
+ row_id = i // n_col
233
+ col_id = i % n_col
234
+ dst.paste(im, (im.width * col_id, im.height * row_id))
235
+ return dst
236
+
237
+
238
+ def auto_append_grounding(language_instruction, grounding_texts):
239
+ for grounding_text in grounding_texts:
240
+ if grounding_text not in language_instruction and grounding_text != 'auto':
241
+ language_instruction += "; " + grounding_text
242
+ print(language_instruction)
243
+ return language_instruction
244
+
245
+
246
+
247
+
248
+ def generate(task, language_instruction, grounding_texts, sketch_pad,
249
+ alpha_sample, guidance_scale, batch_size,
250
+ fix_seed, rand_seed, use_actual_mask, append_grounding, style_cond_image,
251
+ state):
252
+ if 'boxes' not in state:
253
+ state['boxes'] = []
254
+
255
+ boxes = state['boxes']
256
+ grounding_texts = [x.strip() for x in grounding_texts.split(';')]
257
+ assert len(boxes) == len(grounding_texts)
258
+ boxes = (np.asarray(boxes) / 512).tolist()
259
+ grounding_instruction = json.dumps({obj: box for obj,box in zip(grounding_texts, boxes)})
260
+
261
+ image = None
262
+ actual_mask = None
263
+ if task == 'Grounded Inpainting':
264
+ image = state.get('original_image', sketch_pad['image']).copy()
265
+ image = center_crop(image)
266
+ image = Image.fromarray(image)
267
+
268
+ if use_actual_mask:
269
+ actual_mask = sketch_pad['mask'].copy()
270
+ if actual_mask.ndim == 3:
271
+ actual_mask = actual_mask[..., 0]
272
+ actual_mask = center_crop(actual_mask, tgt_size=(64, 64))
273
+ actual_mask = torch.from_numpy(actual_mask == 0).float()
274
+
275
+ if state.get('inpaint_hw', None):
276
+ boxes = np.asarray(boxes) * 0.9 + 0.05
277
+ boxes = boxes.tolist()
278
+ grounding_instruction = json.dumps({obj: box for obj,box in zip(grounding_texts, boxes) if obj != 'auto'})
279
+
280
+ if append_grounding:
281
+ language_instruction = auto_append_grounding(language_instruction, grounding_texts)
282
+
283
+ gen_images, gen_overlays = inference(
284
+ task, language_instruction, grounding_instruction, boxes, image,
285
+ alpha_sample, guidance_scale, batch_size,
286
+ fix_seed, rand_seed, actual_mask, style_cond_image, clip_model=clip_model,
287
+ )
288
+
289
+ for idx, gen_image in enumerate(gen_images):
290
+
291
+ if task == 'Grounded Inpainting' and state.get('inpaint_hw', None):
292
+ hw = min(*state['original_image'].shape[:2])
293
+ gen_image = sized_center_fill(state['original_image'].copy(), np.array(gen_image.resize((hw, hw))), hw, hw)
294
+ gen_image = Image.fromarray(gen_image)
295
+
296
+ gen_images[idx] = gen_image
297
+
298
+ blank_samples = batch_size % 2 if batch_size > 1 else 0
299
+ gen_images = [gr.Image.update(value=x, visible=True) for i,x in enumerate(gen_images)] \
300
+ + [gr.Image.update(value=None, visible=True) for _ in range(blank_samples)] \
301
+ + [gr.Image.update(value=None, visible=False) for _ in range(4 - batch_size - blank_samples)]
302
+
303
+ return gen_images + [state]
304
+
305
+
306
+ def binarize(x):
307
+ return (x != 0).astype('uint8') * 255
308
+
309
+ def sized_center_crop(img, cropx, cropy):
310
+ y, x = img.shape[:2]
311
+ startx = x // 2 - (cropx // 2)
312
+ starty = y // 2 - (cropy // 2)
313
+ return img[starty:starty+cropy, startx:startx+cropx]
314
+
315
+ def sized_center_fill(img, fill, cropx, cropy):
316
+ y, x = img.shape[:2]
317
+ startx = x // 2 - (cropx // 2)
318
+ starty = y // 2 - (cropy // 2)
319
+ img[starty:starty+cropy, startx:startx+cropx] = fill
320
+ return img
321
+
322
+ def sized_center_mask(img, cropx, cropy):
323
+ y, x = img.shape[:2]
324
+ startx = x // 2 - (cropx // 2)
325
+ starty = y // 2 - (cropy // 2)
326
+ center_region = img[starty:starty+cropy, startx:startx+cropx].copy()
327
+ img = (img * 0.2).astype('uint8')
328
+ img[starty:starty+cropy, startx:startx+cropx] = center_region
329
+ return img
330
+
331
+ def center_crop(img, HW=None, tgt_size=(512, 512)):
332
+ if HW is None:
333
+ H, W = img.shape[:2]
334
+ HW = min(H, W)
335
+ img = sized_center_crop(img, HW, HW)
336
+ img = Image.fromarray(img)
337
+ img = img.resize(tgt_size)
338
+ return np.array(img)
339
+
340
+ def draw(task, input, grounding_texts, new_image_trigger, state):
341
+ if type(input) == dict:
342
+ image = input['image']
343
+ mask = input['mask']
344
+ else:
345
+ mask = input
346
+
347
+ if mask.ndim == 3:
348
+ mask = mask[..., 0]
349
+
350
+ image_scale = 1.0
351
+
352
+ # resize trigger
353
+ if task == "Grounded Inpainting":
354
+ mask_cond = mask.sum() == 0
355
+ # size_cond = mask.shape != (512, 512)
356
+ if mask_cond and 'original_image' not in state:
357
+ image = Image.fromarray(image)
358
+ width, height = image.size
359
+ scale = 600 / min(width, height)
360
+ image = image.resize((int(width * scale), int(height * scale)))
361
+ state['original_image'] = np.array(image).copy()
362
+ image_scale = float(height / width)
363
+ return [None, new_image_trigger + 1, image_scale, state]
364
+ else:
365
+ original_image = state['original_image']
366
+ H, W = original_image.shape[:2]
367
+ image_scale = float(H / W)
368
+
369
+ mask = binarize(mask)
370
+ if mask.shape != (512, 512):
371
+ # assert False, "should not receive any non- 512x512 masks."
372
+ if 'original_image' in state and state['original_image'].shape[:2] == mask.shape:
373
+ mask = center_crop(mask, state['inpaint_hw'])
374
+ image = center_crop(state['original_image'], state['inpaint_hw'])
375
+ else:
376
+ mask = np.zeros((512, 512), dtype=np.uint8)
377
+ # mask = center_crop(mask)
378
+ mask = binarize(mask)
379
+
380
+ if type(mask) != np.ndarray:
381
+ mask = np.array(mask)
382
+
383
+ if mask.sum() == 0 and task != "Grounded Inpainting":
384
+ state = {}
385
+
386
+ if task != 'Grounded Inpainting':
387
+ image = None
388
+ else:
389
+ image = Image.fromarray(image)
390
+
391
+ if 'boxes' not in state:
392
+ state['boxes'] = []
393
+
394
+ if 'masks' not in state or len(state['masks']) == 0:
395
+ state['masks'] = []
396
+ last_mask = np.zeros_like(mask)
397
+ else:
398
+ last_mask = state['masks'][-1]
399
+
400
+ if type(mask) == np.ndarray and mask.size > 1:
401
+ diff_mask = mask - last_mask
402
+ else:
403
+ diff_mask = np.zeros([])
404
+
405
+ if diff_mask.sum() > 0:
406
+ x1x2 = np.where(diff_mask.max(0) != 0)[0]
407
+ y1y2 = np.where(diff_mask.max(1) != 0)[0]
408
+ y1, y2 = y1y2.min(), y1y2.max()
409
+ x1, x2 = x1x2.min(), x1x2.max()
410
+
411
+ if (x2 - x1 > 5) and (y2 - y1 > 5):
412
+ state['masks'].append(mask.copy())
413
+ state['boxes'].append((x1, y1, x2, y2))
414
+
415
+ grounding_texts = [x.strip() for x in grounding_texts.split(';')]
416
+ grounding_texts = [x for x in grounding_texts if len(x) > 0]
417
+ if len(grounding_texts) < len(state['boxes']):
418
+ grounding_texts += [f'Obj. {bid+1}' for bid in range(len(grounding_texts), len(state['boxes']))]
419
+
420
+ box_image = draw_box(state['boxes'], grounding_texts, image)
421
+
422
+ if box_image is not None and state.get('inpaint_hw', None):
423
+ inpaint_hw = state['inpaint_hw']
424
+ box_image_resize = np.array(box_image.resize((inpaint_hw, inpaint_hw)))
425
+ original_image = state['original_image'].copy()
426
+ box_image = sized_center_fill(original_image, box_image_resize, inpaint_hw, inpaint_hw)
427
+
428
+ return [box_image, new_image_trigger, image_scale, state]
429
+
430
+ def clear(task, sketch_pad_trigger, batch_size, state, switch_task=False):
431
+ if task != 'Grounded Inpainting':
432
+ sketch_pad_trigger = sketch_pad_trigger + 1
433
+ blank_samples = batch_size % 2 if batch_size > 1 else 0
434
+ out_images = [gr.Image.update(value=None, visible=True) for i in range(batch_size)] \
435
+ + [gr.Image.update(value=None, visible=True) for _ in range(blank_samples)] \
436
+ + [gr.Image.update(value=None, visible=False) for _ in range(4 - batch_size - blank_samples)]
437
+ state = {}
438
+ return [None, sketch_pad_trigger, None, 1.0] + out_images + [state]
439
+
440
+ css = """
441
+ #generate-btn {
442
+ --tw-border-opacity: 1;
443
+ border-color: rgb(255 216 180 / var(--tw-border-opacity));
444
+ --tw-gradient-from: rgb(255 216 180 / .7);
445
+ --tw-gradient-to: rgb(255 216 180 / 0);
446
+ --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to);
447
+ --tw-gradient-to: rgb(255 176 102 / .8);
448
+ --tw-text-opacity: 1;
449
+ color: rgb(238 116 0 / var(--tw-text-opacity));
450
+ }
451
+ #img2img_image, #img2img_image > .h-60, #img2img_image > .h-60 > div, #img2img_image > .h-60 > div > img
452
+ {
453
+ height: var(--height) !important;
454
+ max-height: var(--height) !important;
455
+ min-height: var(--height) !important;
456
+ }
457
+ #mirrors a:hover {
458
+ cursor:pointer;
459
+ }
460
+ #paper-info a {
461
+ color:#008AD7;
462
+ }
463
+ #paper-info a:hover {
464
+ cursor: pointer;
465
+ }
466
+ """
467
+
468
+ rescale_js = """
469
+ function(x) {
470
+ const root = document.querySelector('gradio-app').shadowRoot || document.querySelector('gradio-app');
471
+ let image_scale = parseFloat(root.querySelector('#image_scale input').value) || 1.0;
472
+ const image_width = root.querySelector('#img2img_image').clientWidth;
473
+ const target_height = parseInt(image_width * image_scale);
474
+ document.body.style.setProperty('--height', `${target_height}px`);
475
+ root.querySelectorAll('button.justify-center.rounded')[0].style.display='none';
476
+ root.querySelectorAll('button.justify-center.rounded')[1].style.display='none';
477
+ return x;
478
+ }
479
+ """
480
+
481
+ mirror_js = """
482
+ function () {
483
+ const root = document.querySelector('gradio-app').shadowRoot || document.querySelector('gradio-app');
484
+ const mirrors_div = root.querySelector('#mirrors');
485
+ const current_url = window.location.href;
486
+ const mirrors = [
487
+ 'https://dev.hliu.cc/gligen_mirror1/',
488
+ 'https://dev.hliu.cc/gligen_mirror2/',
489
+ ];
490
+
491
+ let mirror_html = '';
492
+ mirror_html += '[<a href="https://gligen.github.io" target="_blank" style="">Project Page</a>]';
493
+ mirror_html += '[<a href="https://arxiv.org/abs/2301.07093" target="_blank" style="">Paper</a>]';
494
+ mirror_html += '[<a href="https://github.com/gligen/GLIGEN" target="_blank" style="">GitHub Repo</a>]';
495
+ mirror_html += '&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;&nbsp;';
496
+ mirror_html += 'Mirrors: ';
497
+
498
+ mirrors.forEach((e, index) => {
499
+ let cur_index = index + 1;
500
+ if (current_url.includes(e)) {
501
+ mirror_html += `[Mirror ${cur_index}] `;
502
+ } else {
503
+ mirror_html += `[<a onclick="window.location.href = '${e}'">Mirror ${cur_index}</a>] `;
504
+ }
505
+ });
506
+
507
+ mirror_html = `<div class="output-markdown gr-prose" style="max-width: 100%;"><h3 style="text-align: center" id="paper-info">${mirror_html}</h3></div>`;
508
+
509
+ mirrors_div.innerHTML = mirror_html;
510
+ }
511
+ """
512
+
513
+ with Blocks(
514
+ css=css,
515
+ analytics_enabled=False,
516
+ title="GLIGen demo",
517
+ ) as main:
518
+ gr.Markdown('<h1 style="text-align: center;">GLIGen: Open-Set Grounded Text-to-Image Generation</h1>')
519
+ gr.Markdown("""<h3 style="text-align: center" id="paper-info">
520
+ [<a href="https://gligen.github.io" target="_blank" style="">Project Page</a>]
521
+ [<a href="https://arxiv.org/abs/2301.07093" target="_blank" style="">Paper</a>]
522
+ [<a href="https://github.com/gligen/GLIGEN" target="_blank" style="">GitHub Repo</a>]
523
+ </h3>""")
524
+ # gr.HTML("", elem_id="mirrors")
525
+ gr.Markdown("To ground concepts of interest with desired spatial specification, please (1) &#9000;&#65039; enter the concept names in <em> Grounding Instruction</em>, and (2) &#128433;&#65039; draw their corresponding bounding boxes one by one using <em> Sketch Pad</em> -- the parsed boxes will be displayed automatically.")
526
+ with gr.Row():
527
+ with gr.Column(scale=4):
528
+ sketch_pad_trigger = gr.Number(value=0, visible=False)
529
+ sketch_pad_resize_trigger = gr.Number(value=0, visible=False)
530
+ init_white_trigger = gr.Number(value=0, visible=False)
531
+ image_scale = gr.Number(value=0, elem_id="image_scale", visible=False)
532
+ new_image_trigger = gr.Number(value=0, visible=False)
533
+
534
+ task = gr.Radio(
535
+ choices=["Grounded Generation", 'Grounded Inpainting'],
536
+ type="value",
537
+ value="Grounded Generation",
538
+ label="Task",
539
+ )
540
+ language_instruction = gr.Textbox(
541
+ label="Language instruction",
542
+ )
543
+ grounding_instruction = gr.Textbox(
544
+ label="Grounding instruction (Separated by semicolon)",
545
+ )
546
+ with gr.Row():
547
+ sketch_pad = ImageMask(label="Sketch Pad", elem_id="img2img_image")
548
+ out_imagebox = gr.Image(type="pil", label="Parsed Sketch Pad")
549
+ with gr.Row():
550
+ clear_btn = gr.Button(value='Clear')
551
+ gen_btn = gr.Button(value='Generate', elem_id="generate-btn")
552
+ with gr.Accordion("Advanced Options", open=False):
553
+ with gr.Column():
554
+ alpha_sample = gr.Slider(minimum=0, maximum=1.0, step=0.1, value=0.3, label="Scheduled Sampling (τ)")
555
+ guidance_scale = gr.Slider(minimum=0, maximum=50, step=0.5, value=7.5, label="Guidance Scale")
556
+ batch_size = gr.Slider(minimum=1, maximum=2, step=1, value=2, label="Number of Samples")
557
+ append_grounding = gr.Checkbox(value=True, label="Append grounding instructions to the caption")
558
+ use_actual_mask = gr.Checkbox(value=False, label="Use actual mask for inpainting", visible=False)
559
+ with gr.Row():
560
+ fix_seed = gr.Checkbox(value=True, label="Fixed seed")
561
+ rand_seed = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Seed")
562
+ with gr.Row():
563
+ use_style_cond = gr.Checkbox(value=False, label="Enable Style Condition")
564
+ style_cond_image = gr.Image(type="pil", label="Style Condition", visible=False, interactive=True)
565
+ with gr.Column(scale=4):
566
+ gr.Markdown("### Generated Images")
567
+ with gr.Row():
568
+ out_gen_1 = gr.Image(type="pil", visible=True, show_label=False)
569
+ out_gen_2 = gr.Image(type="pil", visible=True, show_label=False)
570
+ with gr.Row():
571
+ out_gen_3 = gr.Image(type="pil", visible=False, show_label=False)
572
+ out_gen_4 = gr.Image(type="pil", visible=False, show_label=False)
573
+
574
+ state = gr.State({})
575
+
576
+ class Controller:
577
+ def __init__(self):
578
+ self.calls = 0
579
+ self.tracks = 0
580
+ self.resizes = 0
581
+ self.scales = 0
582
+
583
+ def init_white(self, init_white_trigger):
584
+ self.calls += 1
585
+ return np.ones((512, 512), dtype='uint8') * 255, 1.0, init_white_trigger+1
586
+
587
+ def change_n_samples(self, n_samples):
588
+ blank_samples = n_samples % 2 if n_samples > 1 else 0
589
+ return [gr.Image.update(visible=True) for _ in range(n_samples + blank_samples)] \
590
+ + [gr.Image.update(visible=False) for _ in range(4 - n_samples - blank_samples)]
591
+
592
+ def resize_centercrop(self, state):
593
+ self.resizes += 1
594
+ image = state['original_image'].copy()
595
+ inpaint_hw = int(0.9 * min(*image.shape[:2]))
596
+ state['inpaint_hw'] = inpaint_hw
597
+ image_cc = center_crop(image, inpaint_hw)
598
+ # print(f'resize triggered {self.resizes}', image.shape, '->', image_cc.shape)
599
+ return image_cc, state
600
+
601
+ def resize_masked(self, state):
602
+ self.resizes += 1
603
+ image = state['original_image'].copy()
604
+ inpaint_hw = int(0.9 * min(*image.shape[:2]))
605
+ state['inpaint_hw'] = inpaint_hw
606
+ image_mask = sized_center_mask(image, inpaint_hw, inpaint_hw)
607
+ state['masked_image'] = image_mask.copy()
608
+ # print(f'mask triggered {self.resizes}')
609
+ return image_mask, state
610
+
611
+ def switch_task_hide_cond(self, task):
612
+ cond = False
613
+ if task == "Grounded Generation":
614
+ cond = True
615
+
616
+ return gr.Checkbox.update(visible=cond, value=False), gr.Image.update(value=None, visible=False), gr.Slider.update(visible=cond), gr.Checkbox.update(visible=(not cond), value=False)
617
+
618
+ controller = Controller()
619
+ main.load(
620
+ lambda x:x+1,
621
+ inputs=sketch_pad_trigger,
622
+ outputs=sketch_pad_trigger,
623
+ queue=False)
624
+ sketch_pad.edit(
625
+ draw,
626
+ inputs=[task, sketch_pad, grounding_instruction, sketch_pad_resize_trigger, state],
627
+ outputs=[out_imagebox, sketch_pad_resize_trigger, image_scale, state],
628
+ queue=False,
629
+ )
630
+ grounding_instruction.change(
631
+ draw,
632
+ inputs=[task, sketch_pad, grounding_instruction, sketch_pad_resize_trigger, state],
633
+ outputs=[out_imagebox, sketch_pad_resize_trigger, image_scale, state],
634
+ queue=False,
635
+ )
636
+ clear_btn.click(
637
+ clear,
638
+ inputs=[task, sketch_pad_trigger, batch_size, state],
639
+ outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, out_gen_1, out_gen_2, out_gen_3, out_gen_4, state],
640
+ queue=False)
641
+ task.change(
642
+ partial(clear, switch_task=True),
643
+ inputs=[task, sketch_pad_trigger, batch_size, state],
644
+ outputs=[sketch_pad, sketch_pad_trigger, out_imagebox, image_scale, out_gen_1, out_gen_2, out_gen_3, out_gen_4, state],
645
+ queue=False)
646
+ sketch_pad_trigger.change(
647
+ controller.init_white,
648
+ inputs=[init_white_trigger],
649
+ outputs=[sketch_pad, image_scale, init_white_trigger],
650
+ queue=False)
651
+ sketch_pad_resize_trigger.change(
652
+ controller.resize_masked,
653
+ inputs=[state],
654
+ outputs=[sketch_pad, state],
655
+ queue=False)
656
+ batch_size.change(
657
+ controller.change_n_samples,
658
+ inputs=[batch_size],
659
+ outputs=[out_gen_1, out_gen_2, out_gen_3, out_gen_4],
660
+ queue=False)
661
+ gen_btn.click(
662
+ generate,
663
+ inputs=[
664
+ task, language_instruction, grounding_instruction, sketch_pad,
665
+ alpha_sample, guidance_scale, batch_size,
666
+ fix_seed, rand_seed,
667
+ use_actual_mask,
668
+ append_grounding, style_cond_image,
669
+ state,
670
+ ],
671
+ outputs=[out_gen_1, out_gen_2, out_gen_3, out_gen_4, state],
672
+ queue=True
673
+ )
674
+ sketch_pad_resize_trigger.change(
675
+ None,
676
+ None,
677
+ sketch_pad_resize_trigger,
678
+ _js=rescale_js,
679
+ queue=False)
680
+ init_white_trigger.change(
681
+ None,
682
+ None,
683
+ init_white_trigger,
684
+ _js=rescale_js,
685
+ queue=False)
686
+ use_style_cond.change(
687
+ lambda cond: gr.Image.update(visible=cond),
688
+ use_style_cond,
689
+ style_cond_image,
690
+ queue=False)
691
+ task.change(
692
+ controller.switch_task_hide_cond,
693
+ inputs=task,
694
+ outputs=[use_style_cond, style_cond_image, alpha_sample, use_actual_mask],
695
+ queue=False)
696
+
697
+ with gr.Column():
698
+ gr.Examples(
699
+ examples=[
700
+ [
701
+ "images/blank.png",
702
+ "Grounded Generation",
703
+ "a dog and an apple",
704
+ "a dog;an apple",
705
+ ],
706
+ [
707
+ "images/blank.png",
708
+ "Grounded Generation",
709
+ "John Lennon is using a pc",
710
+ "John Lennon;a pc",
711
+ [
712
+ "images/blank.png",
713
+ "Grounded Generation",
714
+ "a painting of a fox sitting in a field at sunrise in the style of Claude Mone",
715
+ "fox;sunrise",
716
+ ],
717
+ ],
718
+ [
719
+ "images/blank.png",
720
+ "Grounded Generation",
721
+ "a beautiful painting of hot dog by studio ghibli, octane render, brilliantly coloured",
722
+ "hot dog",
723
+ ],
724
+ [
725
+ "images/blank.png",
726
+ "Grounded Generation",
727
+ "a sport car, unreal engine, global illumination, ray tracing",
728
+ "a sport car",
729
+ ],
730
+ [
731
+ "images/flower_beach.jpg",
732
+ "Grounded Inpainting",
733
+ "a squirrel and the space needle",
734
+ "a squirrel;the space needle",
735
+ ],
736
+ [
737
+ "images/arg_corgis.jpeg",
738
+ "Grounded Inpainting",
739
+ "a dog and a birthday cake",
740
+ "a dog; a birthday cake",
741
+ ],
742
+ [
743
+ "images/teddy.jpg",
744
+ "Grounded Inpainting",
745
+ "a teddy bear wearing a santa claus red shirt; holding a Christmas gift box on hand",
746
+ "a santa claus shirt; a Christmas gift box",
747
+ ],
748
+ ],
749
+ inputs=[sketch_pad, task, language_instruction, grounding_instruction],
750
+ outputs=None,
751
+ fn=None,
752
+ cache_examples=False,
753
+ )
754
+
755
+ main.queue(concurrency_count=1, api_open=False)
756
+ main.launch(share=False, show_api=False)
757
+
758
+
dataset/__init__.py ADDED
File without changes
dataset/base_dataset.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image, ImageDraw
3
+ import torchvision.transforms as transforms
4
+ import torchvision
5
+ from zipfile import ZipFile
6
+ import os
7
+ import multiprocessing
8
+ import math
9
+ import numpy as np
10
+ import random
11
+ from io import BytesIO
12
+
13
+ VALID_IMAGE_TYPES = ['.jpg', '.jpeg', '.tiff', '.bmp', '.png']
14
+
15
+
16
+ def check_filenames_in_zipdata(filenames, ziproot):
17
+ samples = []
18
+ for fst in ZipFile(ziproot).infolist():
19
+ fname = fst.filename
20
+ if fname.endswith('/') or fname.startswith('.') or fst.file_size == 0:
21
+ continue
22
+ if os.path.splitext(fname)[1].lower() in VALID_IMAGE_TYPES:
23
+ samples.append((fname))
24
+ filenames = set(filenames)
25
+ samples = set(samples)
26
+ assert filenames.issubset(samples), 'Something wrong with your zip data'
27
+
28
+
29
+
30
+ def draw_box(img, boxes):
31
+ colors = ["red", "olive", "blue", "green", "orange", "brown", "cyan", "purple"]
32
+ draw = ImageDraw.Draw(img)
33
+ for bid, box in enumerate(boxes):
34
+ draw.rectangle([box[0], box[1], box[2], box[3]], outline =colors[bid % len(colors)], width=4)
35
+ # draw.rectangle([box[0], box[1], box[2], box[3]], outline ="red", width=2) # x0 y0 x1 y1
36
+ return img
37
+
38
+
39
+
40
+ def to_valid(x0, y0, x1, y1, image_size, min_box_size):
41
+ valid = True
42
+
43
+ if x0>image_size or y0>image_size or x1<0 or y1<0:
44
+ valid = False # no way to make this box vide, it is completely cropped out
45
+ return valid, (None, None, None, None)
46
+
47
+ x0 = max(x0, 0)
48
+ y0 = max(y0, 0)
49
+ x1 = min(x1, image_size)
50
+ y1 = min(y1, image_size)
51
+
52
+ if (x1-x0)*(y1-y0) / (image_size*image_size) < min_box_size:
53
+ valid = False
54
+ return valid, (None, None, None, None)
55
+
56
+ return valid, (x0, y0, x1, y1)
57
+
58
+
59
+
60
+
61
+
62
+ def recalculate_box_and_verify_if_valid(x, y, w, h, trans_info, image_size, min_box_size):
63
+ """
64
+ x,y,w,h: the original annotation corresponding to the raw image size.
65
+ trans_info: what resizing and cropping have been applied to the raw image
66
+ image_size: what is the final image size
67
+ """
68
+
69
+ x0 = x * trans_info["performed_scale"] - trans_info['crop_x']
70
+ y0 = y * trans_info["performed_scale"] - trans_info['crop_y']
71
+ x1 = (x + w) * trans_info["performed_scale"] - trans_info['crop_x']
72
+ y1 = (y + h) * trans_info["performed_scale"] - trans_info['crop_y']
73
+
74
+
75
+ # at this point, box annotation has been recalculated based on scaling and cropping
76
+ # but some point may fall off the image_size region (e.g., negative value), thus we
77
+ # need to clamp them into 0-image_size. But if all points falling outsize of image
78
+ # region, then we will consider this is an invalid box.
79
+ valid, (x0, y0, x1, y1) = to_valid(x0, y0, x1, y1, image_size, min_box_size)
80
+
81
+ if valid:
82
+ # we also perform random flip.
83
+ # Here boxes are valid, and are based on image_size
84
+ if trans_info["performed_flip"]:
85
+ x0, x1 = image_size-x1, image_size-x0
86
+
87
+ return valid, (x0, y0, x1, y1)
88
+
89
+
90
+
91
+ class BaseDataset(torch.utils.data.Dataset):
92
+ def __init__(self, image_root, random_crop, random_flip, image_size):
93
+ super().__init__()
94
+ self.image_root = image_root
95
+ self.random_crop = random_crop
96
+ self.random_flip = random_flip
97
+ self.image_size = image_size
98
+ self.use_zip = False
99
+
100
+ if image_root[-4::] == 'zip':
101
+ self.use_zip = True
102
+ self.zip_dict = {}
103
+
104
+ if self.random_crop:
105
+ assert False, 'NOT IMPLEMENTED'
106
+
107
+
108
+ def fetch_zipfile(self, ziproot):
109
+ pid = multiprocessing.current_process().pid # get pid of this process.
110
+ if pid not in self.zip_dict:
111
+ self.zip_dict[pid] = ZipFile(ziproot)
112
+ zip_file = self.zip_dict[pid]
113
+ return zip_file
114
+
115
+ def fetch_image(self, filename):
116
+ if self.use_zip:
117
+ zip_file = self.fetch_zipfile(self.image_root)
118
+ image = Image.open( BytesIO(zip_file.read(filename)) ).convert('RGB')
119
+ return image
120
+ else:
121
+ image = Image.open( os.path.join(self.image_root,filename) ).convert('RGB')
122
+ return image
123
+
124
+
125
+ def vis_getitem_data(self, index=None, out=None, return_tensor=False, name="res.jpg", print_caption=True):
126
+
127
+ if out is None:
128
+ out = self[index]
129
+
130
+ img = torchvision.transforms.functional.to_pil_image( out["image"]*0.5+0.5 )
131
+ canvas = torchvision.transforms.functional.to_pil_image( torch.ones_like(out["image"]) )
132
+ W, H = img.size
133
+
134
+ if print_caption:
135
+ caption = out["caption"]
136
+ print(caption)
137
+ print(" ")
138
+
139
+ boxes = []
140
+ for box in out["boxes"]:
141
+ x0,y0,x1,y1 = box
142
+ boxes.append( [float(x0*W), float(y0*H), float(x1*W), float(y1*H)] )
143
+ img = draw_box(img, boxes)
144
+
145
+ if return_tensor:
146
+ return torchvision.transforms.functional.to_tensor(img)
147
+ else:
148
+ img.save(name)
149
+
150
+
151
+ def transform_image(self, pil_image):
152
+ if self.random_crop:
153
+ assert False
154
+ arr = random_crop_arr(pil_image, self.image_size)
155
+ else:
156
+ arr, info = center_crop_arr(pil_image, self.image_size)
157
+
158
+ info["performed_flip"] = False
159
+ if self.random_flip and random.random()<0.5:
160
+ arr = arr[:, ::-1]
161
+ info["performed_flip"] = True
162
+
163
+ arr = arr.astype(np.float32) / 127.5 - 1
164
+ arr = np.transpose(arr, [2,0,1])
165
+
166
+ return torch.tensor(arr), info
167
+
168
+
169
+
170
+ def center_crop_arr(pil_image, image_size):
171
+ # We are not on a new enough PIL to support the `reducing_gap`
172
+ # argument, which uses BOX downsampling at powers of two first.
173
+ # Thus, we do it by hand to improve downsample quality.
174
+ WW, HH = pil_image.size
175
+
176
+ while min(*pil_image.size) >= 2 * image_size:
177
+ pil_image = pil_image.resize(
178
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
179
+ )
180
+
181
+ scale = image_size / min(*pil_image.size)
182
+
183
+ pil_image = pil_image.resize(
184
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
185
+ )
186
+
187
+ # at this point, the min of pil_image side is desired image_size
188
+ performed_scale = image_size / min(WW, HH)
189
+
190
+ arr = np.array(pil_image)
191
+ crop_y = (arr.shape[0] - image_size) // 2
192
+ crop_x = (arr.shape[1] - image_size) // 2
193
+
194
+ info = {"performed_scale":performed_scale, 'crop_y':crop_y, 'crop_x':crop_x, "WW":WW, 'HH':HH}
195
+
196
+ return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size], info
197
+
198
+
199
+ def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
200
+ min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
201
+ max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
202
+ smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
203
+
204
+ # We are not on a new enough PIL to support the `reducing_gap`
205
+ # argument, which uses BOX downsampling at powers of two first.
206
+ # Thus, we do it by hand to improve downsample quality.
207
+ while min(*pil_image.size) >= 2 * smaller_dim_size:
208
+ pil_image = pil_image.resize(
209
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
210
+ )
211
+
212
+ scale = smaller_dim_size / min(*pil_image.size)
213
+ pil_image = pil_image.resize(
214
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
215
+ )
216
+
217
+ arr = np.array(pil_image)
218
+ crop_y = random.randrange(arr.shape[0] - image_size + 1)
219
+ crop_x = random.randrange(arr.shape[1] - image_size + 1)
220
+ return arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]
dataset/catalog.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ class DatasetCatalog:
4
+ def __init__(self, ROOT, which_embedder):
5
+ assert which_embedder in ['clip', 'bert']
6
+
7
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
8
+
9
+
10
+ self.VGGrounding = {
11
+ "target": "dataset.tsv_dataset.TSVDataset",
12
+ "train_params": dict(
13
+ tsv_path=os.path.join(ROOT,'GROUNDING/gqa/tsv/train-00.tsv'),
14
+ )
15
+ }
16
+
17
+
18
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
19
+
20
+
21
+ self.FlickrGrounding = {
22
+ "target": "dataset.tsv_dataset.TSVDataset",
23
+ "train_params":dict(
24
+ tsv_path=os.path.join(ROOT,'GROUNDING/flickr30k/tsv/train-00.tsv'),
25
+ )
26
+ }
27
+
28
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
29
+
30
+ self.SBUGrounding = {
31
+ "target": "dataset.tsv_dataset.TSVDataset",
32
+ "train_params":dict(
33
+ tsv_path=os.path.join(ROOT,'GROUNDING/SBU/tsv/train-00.tsv'),
34
+ )
35
+ }
36
+
37
+
38
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
39
+
40
+
41
+ self.CC3MGrounding = {
42
+ "target": "dataset.tsv_dataset.TSVDataset",
43
+ "train_params":dict(
44
+ tsv_path=os.path.join(ROOT,'GROUNDING/CC3M/tsv/train-00.tsv'),
45
+ )
46
+ }
47
+
48
+
49
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
50
+
51
+
52
+ self.CC12MGrounding = {
53
+ "target": "dataset.tsv_dataset.TSVDataset",
54
+ "train_params":dict(
55
+ tsv_path=os.path.join(ROOT,'GROUNDING/CC12M/tsv/train-00.tsv'),
56
+ )
57
+ }
58
+
59
+
60
+ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
61
+
62
+ # temp = 'category_embedding_clip.pth' if which_embedder == 'clip' else 'category_embedding_bert.pth'
63
+ # obj365_category_embedding_path = os.path.join(ROOT, 'OBJECTS365', temp)
64
+
65
+ self.Obj365Detection = {
66
+ "target": "dataset.tsv_dataset.TSVDataset",
67
+ "train_params":dict(
68
+ tsv_path=os.path.join(ROOT,'OBJECTS365/tsv/train-00.tsv'),
69
+ ),
70
+ }
71
+
72
+
dataset/cd_dataset.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, os, random, math
2
+ from collections import defaultdict
3
+ from copy import deepcopy
4
+
5
+ import torch
6
+ from torch.utils.data import Dataset
7
+ import torchvision.transforms as transforms
8
+
9
+ import numpy as np
10
+ from PIL import Image
11
+ from .base_dataset import BaseDataset, check_filenames_in_zipdata, recalculate_box_and_verify_if_valid
12
+ from io import BytesIO
13
+
14
+
15
+
16
+ def not_in_at_all(list1, list2):
17
+ for a in list1:
18
+ if a in list2:
19
+ return False
20
+ return True
21
+
22
+
23
+ def clean_annotations(annotations):
24
+ for anno in annotations:
25
+ anno.pop("segmentation", None)
26
+ anno.pop("area", None)
27
+ anno.pop("iscrowd", None)
28
+ # anno.pop("id", None)
29
+
30
+
31
+ def make_a_sentence(obj_names, clean=False):
32
+
33
+ if clean:
34
+ obj_names = [ name[:-6] if ("-other" in name) else name for name in obj_names]
35
+
36
+ caption = ""
37
+ tokens_positive = []
38
+ for obj_name in obj_names:
39
+ start_len = len(caption)
40
+ caption += obj_name
41
+ end_len = len(caption)
42
+ caption += ", "
43
+ tokens_positive.append(
44
+ [[start_len, end_len]] # in real caption, positive tokens can be disjoint, thus using list of list
45
+ )
46
+ caption = caption[:-2] # remove last ", "
47
+
48
+ return caption #, tokens_positive
49
+
50
+
51
+ def check_all_have_same_images(instances_data, stuff_data, caption_data):
52
+ if stuff_data is not None:
53
+ assert instances_data["images"] == stuff_data["images"]
54
+ if caption_data is not None:
55
+ assert instances_data["images"] == caption_data["images"]
56
+
57
+
58
+ class CDDataset(BaseDataset):
59
+ "CD: Caption Detection"
60
+ def __init__(self,
61
+ image_root,
62
+ category_embedding_path,
63
+ instances_json_path = None,
64
+ stuff_json_path = None,
65
+ caption_json_path = None,
66
+ prob_real_caption = 0,
67
+ fake_caption_type = 'empty',
68
+ image_size=256,
69
+ max_images=None,
70
+ min_box_size=0.01,
71
+ max_boxes_per_image=8,
72
+ include_other=False,
73
+ random_crop = False,
74
+ random_flip = True,
75
+ ):
76
+ super().__init__(random_crop, random_flip, image_size)
77
+
78
+ self.image_root = image_root
79
+ self.category_embedding_path = category_embedding_path
80
+ self.instances_json_path = instances_json_path
81
+ self.stuff_json_path = stuff_json_path
82
+ self.caption_json_path = caption_json_path
83
+ self.prob_real_caption = prob_real_caption
84
+ self.fake_caption_type = fake_caption_type
85
+ self.max_images = max_images
86
+ self.min_box_size = min_box_size
87
+ self.max_boxes_per_image = max_boxes_per_image
88
+ self.include_other = include_other
89
+
90
+
91
+ assert fake_caption_type in ["empty", "made"]
92
+ if prob_real_caption > 0:
93
+ assert caption_json_path is not None, "caption json must be given"
94
+
95
+
96
+ # Load all jsons
97
+ with open(instances_json_path, 'r') as f:
98
+ instances_data = json.load(f) # keys: 'info', 'images', 'licenses', 'categories', 'annotations'
99
+ clean_annotations(instances_data["annotations"])
100
+ self.instances_data = instances_data
101
+
102
+ self.stuff_data = None
103
+ if stuff_json_path is not None:
104
+ with open(stuff_json_path, 'r') as f:
105
+ stuff_data = json.load(f) # keys: 'info', 'images', 'licenses', 'categories', 'annotations'
106
+ clean_annotations(stuff_data["annotations"])
107
+ self.stuff_data = stuff_data
108
+
109
+ self.captions_data = None
110
+ if caption_json_path is not None:
111
+ with open(caption_json_path, 'r') as f:
112
+ captions_data = json.load(f) # keys: 'info', 'images', 'licenses', 'categories', 'annotations'
113
+ clean_annotations(captions_data["annotations"])
114
+ self.captions_data = captions_data
115
+
116
+
117
+ # Load preprocessed name embedding
118
+ self.category_embeddings = torch.load(category_embedding_path)
119
+ self.embedding_len = list( self.category_embeddings.values() )[0].shape[0]
120
+
121
+
122
+ # Misc
123
+ self.image_ids = [] # main list for selecting images
124
+ self.image_id_to_filename = {} # file names used to read image
125
+ check_all_have_same_images(self.instances_data, self.stuff_data, self.captions_data)
126
+ for image_data in self.instances_data['images']:
127
+ image_id = image_data['id']
128
+ filename = image_data['file_name']
129
+ self.image_ids.append(image_id)
130
+ self.image_id_to_filename[image_id] = filename
131
+
132
+
133
+ # All category names (including things and stuff)
134
+ self.object_idx_to_name = {}
135
+ for category_data in self.instances_data['categories']:
136
+ self.object_idx_to_name[category_data['id']] = category_data['name']
137
+ if self.stuff_data is not None:
138
+ for category_data in self.stuff_data['categories']:
139
+ self.object_idx_to_name[category_data['id']] = category_data['name']
140
+
141
+
142
+ # Add object data from instances and stuff
143
+ self.image_id_to_objects = defaultdict(list)
144
+ self.select_objects( self.instances_data['annotations'] )
145
+ if self.stuff_data is not None:
146
+ self.select_objects( self.stuff_data['annotations'] )
147
+
148
+ # Add caption data
149
+ if self.captions_data is not None:
150
+ self.image_id_to_captions = defaultdict(list)
151
+ self.select_captions( self.captions_data['annotations'] )
152
+
153
+ # Check if all filenames can be found in the zip file
154
+ # all_filenames = [self.image_id_to_filename[idx] for idx in self.image_ids]
155
+ # check_filenames_in_zipdata(all_filenames, image_root)
156
+
157
+
158
+ def select_objects(self, annotations):
159
+ for object_anno in annotations:
160
+ image_id = object_anno['image_id']
161
+ object_name = self.object_idx_to_name[object_anno['category_id']]
162
+ other_ok = object_name != 'other' or self.include_other
163
+ if other_ok:
164
+ self.image_id_to_objects[image_id].append(object_anno)
165
+
166
+
167
+ def select_captions(self, annotations):
168
+ for caption_data in annotations:
169
+ image_id = caption_data['image_id']
170
+ self.image_id_to_captions[image_id].append(caption_data)
171
+
172
+
173
+ def total_images(self):
174
+ return len(self)
175
+
176
+
177
+ def __getitem__(self, index):
178
+ if self.max_boxes_per_image > 99:
179
+ assert False, "Are you sure setting such large number of boxes?"
180
+
181
+ out = {}
182
+
183
+ image_id = self.image_ids[index]
184
+ out['id'] = image_id
185
+
186
+ # Image
187
+ filename = self.image_id_to_filename[image_id]
188
+ image = self.fetch_image(filename)
189
+ #WW, HH = image.size
190
+ image_tensor, trans_info = self.transform_image(image)
191
+ out["image"] = image_tensor
192
+
193
+
194
+ # Select valid boxes after cropping (center or random)
195
+ this_image_obj_annos = deepcopy(self.image_id_to_objects[image_id])
196
+ areas = []
197
+ all_obj_names = []
198
+ all_boxes = []
199
+ all_masks = []
200
+ all_positive_embeddings = []
201
+ for object_anno in this_image_obj_annos:
202
+
203
+ x, y, w, h = object_anno['bbox']
204
+ valid, (x0, y0, x1, y1) = recalculate_box_and_verify_if_valid(x, y, w, h, trans_info, self.image_size, self.min_box_size)
205
+
206
+ if valid:
207
+ areas.append( (x1-x0)*(y1-y0) )
208
+ obj_name = self.object_idx_to_name[ object_anno['category_id'] ]
209
+ all_obj_names.append(obj_name)
210
+ all_boxes.append( torch.tensor([x0,y0,x1,y1]) / self.image_size ) # scale to 0-1
211
+ all_masks.append(1)
212
+ all_positive_embeddings.append( self.category_embeddings[obj_name] )
213
+
214
+ wanted_idxs = torch.tensor(areas).sort(descending=True)[1]
215
+ wanted_idxs = wanted_idxs[0:self.max_boxes_per_image]
216
+ obj_names = [] # used for making a sentence
217
+ boxes = torch.zeros(self.max_boxes_per_image, 4)
218
+ masks = torch.zeros(self.max_boxes_per_image)
219
+ positive_embeddings = torch.zeros(self.max_boxes_per_image, self.embedding_len)
220
+ for i, idx in enumerate(wanted_idxs):
221
+ obj_names.append( all_obj_names[idx] )
222
+ boxes[i] = all_boxes[idx]
223
+ masks[i] = all_masks[idx]
224
+ positive_embeddings[i] = all_positive_embeddings[idx]
225
+
226
+ # Caption
227
+ if random.uniform(0, 1) < self.prob_real_caption:
228
+ caption_data = self.image_id_to_captions[image_id]
229
+ idx = random.randint(0, len(caption_data)-1 )
230
+ caption = caption_data[idx]["caption"]
231
+ else:
232
+ if self.fake_caption_type == "empty":
233
+ caption = ""
234
+ else:
235
+ caption = make_a_sentence(obj_names, clean=True)
236
+
237
+
238
+ out["caption"] = caption
239
+ out["boxes"] = boxes
240
+ out["masks"] = masks
241
+ out["positive_embeddings"] = positive_embeddings
242
+
243
+ return out
244
+
245
+
246
+ def __len__(self):
247
+ if self.max_images is None:
248
+ return len(self.image_ids)
249
+ return min(len(self.image_ids), self.max_images)
250
+
dataset/concat_dataset.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .catalog import DatasetCatalog
2
+ from ldm.util import instantiate_from_config
3
+ import torch
4
+
5
+
6
+
7
+
8
+ class ConCatDataset():
9
+ def __init__(self, dataset_name_list, ROOT, which_embedder, train=True, repeats=None):
10
+ self.datasets = []
11
+ cul_previous_dataset_length = 0
12
+ offset_map = []
13
+ which_dataset = []
14
+
15
+ if repeats is None:
16
+ repeats = [1] * len(dataset_name_list)
17
+ else:
18
+ assert len(repeats) == len(dataset_name_list)
19
+
20
+
21
+ Catalog = DatasetCatalog(ROOT, which_embedder)
22
+ for dataset_idx, (dataset_name, yaml_params) in enumerate(dataset_name_list.items()):
23
+ repeat = repeats[dataset_idx]
24
+
25
+ dataset_dict = getattr(Catalog, dataset_name)
26
+
27
+ target = dataset_dict['target']
28
+ params = dataset_dict['train_params'] if train else dataset_dict['val_params']
29
+ if yaml_params is not None:
30
+ params.update(yaml_params)
31
+ dataset = instantiate_from_config( dict(target=target, params=params) )
32
+
33
+ self.datasets.append(dataset)
34
+ for _ in range(repeat):
35
+ offset_map.append( torch.ones(len(dataset))*cul_previous_dataset_length )
36
+ which_dataset.append( torch.ones(len(dataset))*dataset_idx )
37
+ cul_previous_dataset_length += len(dataset)
38
+ offset_map = torch.cat(offset_map, dim=0).long()
39
+ self.total_length = cul_previous_dataset_length
40
+
41
+ self.mapping = torch.arange(self.total_length) - offset_map
42
+ self.which_dataset = torch.cat(which_dataset, dim=0).long()
43
+
44
+
45
+ def total_images(self):
46
+ count = 0
47
+ for dataset in self.datasets:
48
+ print(dataset.total_images())
49
+ count += dataset.total_images()
50
+ return count
51
+
52
+
53
+
54
+ def __getitem__(self, idx):
55
+ dataset = self.datasets[ self.which_dataset[idx] ]
56
+ return dataset[ self.mapping[idx] ]
57
+
58
+
59
+ def __len__(self):
60
+ return self.total_length
61
+
62
+
63
+
64
+
65
+
dataset/grounding_dataset.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tkinter.messagebox import NO
2
+ import torch
3
+ import json
4
+ from collections import defaultdict
5
+ from PIL import Image, ImageDraw
6
+ from copy import deepcopy
7
+ import os
8
+ import torchvision.transforms as transforms
9
+ import torchvision
10
+ from .base_dataset import BaseDataset, check_filenames_in_zipdata, recalculate_box_and_verify_if_valid
11
+ from io import BytesIO
12
+ import random
13
+
14
+ def check_unique(images, fields):
15
+ for field in fields:
16
+ temp_list = []
17
+ for img_info in images:
18
+ temp_list.append(img_info[field])
19
+ assert len(set(temp_list)) == len(temp_list), field
20
+
21
+ def clean_data(data):
22
+ for data_info in data:
23
+ data_info.pop("original_img_id", None)
24
+ data_info.pop("original_id", None)
25
+ data_info.pop("sentence_id", None) # sentence id for each image (multiple sentences for one image)
26
+ data_info.pop("dataset_name", None)
27
+ data_info.pop("data_source", None)
28
+ data_info["data_id"] = data_info.pop("id")
29
+
30
+
31
+ def clean_annotations(annotations):
32
+ for anno_info in annotations:
33
+ anno_info.pop("iscrowd", None) # I have checked that all 0 for flickr, vg, coco
34
+ anno_info.pop("category_id", None) # I have checked that all 1 for flickr vg. This is not always 1 for coco, but I do not think we need this annotation
35
+ anno_info.pop("area", None)
36
+ # anno_info.pop("id", None)
37
+ anno_info["data_id"] = anno_info.pop("image_id")
38
+
39
+
40
+ def draw_box(img, boxes):
41
+ draw = ImageDraw.Draw(img)
42
+ for box in boxes:
43
+ draw.rectangle([box[0], box[1], box[2], box[3]], outline ="red", width=2) # x0 y0 x1 y1
44
+ return img
45
+
46
+
47
+ def xyhw2xyxy(box):
48
+ x0, y0, w, h = box
49
+ return [ x0, y0, x0+w, y0+h ]
50
+
51
+
52
+
53
+ class GroundingDataset(BaseDataset):
54
+ def __init__(self,
55
+ image_root,
56
+ json_path,
57
+ annotation_embedding_path,
58
+ prob_real_caption=1,
59
+ image_size=256,
60
+ min_box_size=0.01,
61
+ max_boxes_per_data=8,
62
+ max_images=None, # set as 30K used to eval
63
+ random_crop = False,
64
+ random_flip = True,
65
+ ):
66
+ super().__init__(image_root, random_crop, random_flip, image_size)
67
+ self.image_root = image_root
68
+ self.json_path = json_path
69
+ self.annotation_embedding_path = annotation_embedding_path
70
+ self.prob_real_caption = prob_real_caption
71
+ self.min_box_size = min_box_size
72
+ self.max_boxes_per_data = max_boxes_per_data
73
+ self.max_images = max_images
74
+
75
+
76
+ # Load raw data
77
+ with open(json_path, 'r') as f:
78
+ json_raw = json.load(f) # keys: 'info', 'images', 'licenses', 'categories', 'annotations'
79
+ self.data = json_raw["images"] # donot name it images, which is misleading
80
+ self.annotations = json_raw["annotations"]
81
+
82
+
83
+ # Load preprocessed name embedding
84
+ if 'bert' in annotation_embedding_path:
85
+ self.embedding_len = 1280
86
+ elif 'clip' in annotation_embedding_path:
87
+ self.embedding_len = 768
88
+ else:
89
+ assert False
90
+
91
+
92
+ # clean data and annotation
93
+ check_unique( self.data, ['id'] )
94
+ check_unique( self.annotations, ['id'] )
95
+ clean_data(self.data)
96
+ clean_annotations(self.annotations)
97
+ self.data_id_list = [ datum['data_id'] for datum in self.data ]
98
+ self.data = { datum['data_id']:datum for datum in self.data } # map self.data from a list into a dict
99
+
100
+
101
+ # data point to its annotation mapping
102
+ self.data_id_to_annos = defaultdict(list)
103
+ for anno in self.annotations:
104
+ self.data_id_to_annos[ anno["data_id"] ].append(anno)
105
+
106
+
107
+
108
+ # These are not used that offen, but are useful in some cases
109
+ self.file_names = [] # all training images
110
+ self.file_name_to_data_ids = defaultdict(list) # for each image, there are multiple data points (captions)
111
+ for data_id in self.data_id_list:
112
+ fine_name = self.data[data_id]["file_name"]
113
+ self.file_names.append(fine_name)
114
+ self.file_name_to_data_ids[fine_name].append(data_id)
115
+ self.file_names = list(set(self.file_names))
116
+
117
+
118
+ if self.max_images is not None:
119
+ "This is only used as COCO2017P evulation, when we set max_images as 30k"
120
+ assert False, 'I have commented out the following code to save cpu memory'
121
+ # new_data_id_list = []
122
+ # new_file_name_to_data_ids = defaultdict(list)
123
+ # self.file_names = self.file_names[0:self.max_images]
124
+ # for file_name in self.file_names:
125
+ # data_id = self.file_name_to_data_ids[file_name][0]
126
+ # new_data_id_list.append(data_id)
127
+ # new_file_name_to_data_ids[file_name].append(data_id)
128
+ # self.data_id_list = new_data_id_list
129
+ # self.file_name_to_data_ids = new_file_name_to_data_ids
130
+
131
+
132
+ # Check if all filenames can be found in the zip file
133
+ # all_filenames = [self.data[idx]['file_name'] for idx in self.data_id_list ]
134
+ # check_filenames_in_zipdata(all_filenames, image_root)
135
+
136
+
137
+ def total_images(self):
138
+ return len(self.file_names)
139
+
140
+
141
+ def __getitem__(self, index):
142
+ if self.max_boxes_per_data > 99:
143
+ assert False, "Are you sure setting such large number of boxes?"
144
+
145
+ out = {}
146
+
147
+ data_id = self.data_id_list[index]
148
+ out['id'] = data_id
149
+
150
+
151
+ # Image and caption
152
+ file_name = self.data[data_id]['file_name']
153
+ image = self.fetch_image(file_name)
154
+ image_tensor, trans_info = self.transform_image(image)
155
+ out["image"] = image_tensor
156
+
157
+ if random.uniform(0, 1) < self.prob_real_caption:
158
+ out["caption"] = self.data[data_id]["caption"]
159
+ else:
160
+ out["caption"] = ""
161
+
162
+
163
+
164
+ annos = deepcopy(self.data_id_to_annos[data_id])
165
+ areas = []
166
+ all_boxes = []
167
+ all_masks = []
168
+ all_positive_embeddings = []
169
+
170
+
171
+ for anno in annos:
172
+
173
+ x, y, w, h = anno['bbox']
174
+ valid, (x0, y0, x1, y1) = recalculate_box_and_verify_if_valid(x, y, w, h, trans_info, self.image_size, self.min_box_size)
175
+
176
+ if valid:
177
+ areas.append( (x1-x0)*(y1-y0) )
178
+ all_boxes.append( torch.tensor([x0,y0,x1,y1]) / self.image_size ) # scale to 0-1
179
+ all_masks.append(1)
180
+ all_positive_embeddings.append( torch.load(os.path.join(self.annotation_embedding_path,str(anno["id"])), map_location='cpu' ) )
181
+
182
+ wanted_idxs = torch.tensor(areas).sort(descending=True)[1]
183
+ wanted_idxs = wanted_idxs[0:self.max_boxes_per_data]
184
+
185
+ boxes = torch.zeros(self.max_boxes_per_data, 4)
186
+ masks = torch.zeros(self.max_boxes_per_data)
187
+ positive_embeddings = torch.zeros(self.max_boxes_per_data, self.embedding_len)
188
+ for i, idx in enumerate(wanted_idxs):
189
+ boxes[i] = all_boxes[idx]
190
+ masks[i] = all_masks[idx]
191
+ positive_embeddings[i] = all_positive_embeddings[idx]
192
+
193
+
194
+ out["boxes"] = boxes
195
+ out["masks"] = masks
196
+ out["positive_embeddings"] = positive_embeddings
197
+
198
+ return out
199
+
200
+
201
+
202
+ def __len__(self):
203
+ return len(self.data_id_list)
204
+
205
+
dataset/layout_dataset.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, os, random, math
2
+ from collections import defaultdict
3
+ from copy import deepcopy
4
+
5
+ import torch
6
+ from torch.utils.data import Dataset
7
+ import torchvision.transforms as transforms
8
+
9
+ import numpy as np
10
+ from PIL import Image, ImageOps
11
+ from .base_dataset import BaseDataset, check_filenames_in_zipdata
12
+ from io import BytesIO
13
+
14
+
15
+
16
+
17
+ def clean_annotations(annotations):
18
+ for anno in annotations:
19
+ anno.pop("segmentation", None)
20
+ anno.pop("area", None)
21
+ anno.pop("iscrowd", None)
22
+ anno.pop("id", None)
23
+
24
+
25
+ def make_a_sentence(obj_names, clean=False):
26
+
27
+ if clean:
28
+ obj_names = [ name[:-6] if ("-other" in name) else name for name in obj_names]
29
+
30
+ caption = ""
31
+ tokens_positive = []
32
+ for obj_name in obj_names:
33
+ start_len = len(caption)
34
+ caption += obj_name
35
+ end_len = len(caption)
36
+ caption += ", "
37
+ tokens_positive.append(
38
+ [[start_len, end_len]] # in real caption, positive tokens can be disjoint, thus using list of list
39
+ )
40
+ caption = caption[:-2] # remove last ", "
41
+
42
+ return caption #, tokens_positive
43
+
44
+
45
+ class LayoutDataset(BaseDataset):
46
+ """
47
+ Note: this dataset can somehow be achieved in cd_dataset.CDDataset
48
+ Since if you donot set prob_real_caption=0 in CDDataset, then that
49
+ dataset will only use detection annotations. However, in that dataset,
50
+ we do not remove images but remove boxes.
51
+
52
+ However, in layout2img works, people will just resize raw image data into 256*256,
53
+ thus they pre-calculate box size and apply min_box_size before min/max_boxes_per_image.
54
+ And then they will remove images if does not follow the rule.
55
+
56
+ These two different methods will lead to different number of training/val images.
57
+ Thus this dataset here is only for layout2img.
58
+
59
+ """
60
+ def __init__(self,
61
+ image_root,
62
+ instances_json_path,
63
+ stuff_json_path,
64
+ category_embedding_path,
65
+ fake_caption_type = 'empty',
66
+ image_size=256,
67
+ max_samples=None,
68
+ min_box_size=0.02,
69
+ min_boxes_per_image=3,
70
+ max_boxes_per_image=8,
71
+ include_other=False,
72
+ random_flip=True
73
+ ):
74
+ super().__init__(random_crop=None, random_flip=None, image_size=None) # we only use vis_getitem func in BaseDataset, donot use the others.
75
+
76
+ assert fake_caption_type in ['empty', 'made']
77
+ self.image_root = image_root
78
+ self.instances_json_path = instances_json_path
79
+ self.stuff_json_path = stuff_json_path
80
+ self.category_embedding_path = category_embedding_path
81
+ self.fake_caption_type = fake_caption_type
82
+ self.image_size = image_size
83
+ self.max_samples = max_samples
84
+ self.min_box_size = min_box_size
85
+ self.min_boxes_per_image = min_boxes_per_image
86
+ self.max_boxes_per_image = max_boxes_per_image
87
+ self.include_other = include_other
88
+ self.random_flip = random_flip
89
+
90
+
91
+ self.transform = transforms.Compose([transforms.Resize( (image_size, image_size) ),
92
+ transforms.ToTensor(),
93
+ transforms.Lambda(lambda t: (t * 2) - 1) ])
94
+
95
+ # Load all jsons
96
+ with open(instances_json_path, 'r') as f:
97
+ instances_data = json.load(f) # keys: 'info', 'images', 'licenses', 'categories', 'annotations'
98
+ clean_annotations(instances_data["annotations"])
99
+ self.instances_data = instances_data
100
+
101
+ with open(stuff_json_path, 'r') as f:
102
+ stuff_data = json.load(f) # keys: 'info', 'images', 'licenses', 'categories', 'annotations'
103
+ clean_annotations(stuff_data["annotations"])
104
+ self.stuff_data = stuff_data
105
+
106
+
107
+ # Load preprocessed name embedding
108
+ self.category_embeddings = torch.load(category_embedding_path)
109
+ self.embedding_len = list( self.category_embeddings.values() )[0].shape[0]
110
+
111
+
112
+ # Misc
113
+ self.image_ids = [] # main list for selecting images
114
+ self.image_id_to_filename = {} # file names used to read image
115
+ self.image_id_to_size = {} # original size of this image
116
+ assert instances_data['images'] == stuff_data["images"]
117
+ for image_data in instances_data['images']:
118
+ image_id = image_data['id']
119
+ filename = image_data['file_name']
120
+ width = image_data['width']
121
+ height = image_data['height']
122
+ self.image_ids.append(image_id)
123
+ self.image_id_to_filename[image_id] = filename
124
+ self.image_id_to_size[image_id] = (width, height)
125
+
126
+ # All category names (including things and stuff)
127
+ self.things_id_list = []
128
+ self.stuff_id_list = []
129
+ self.object_idx_to_name = {}
130
+ for category_data in instances_data['categories']:
131
+ self.things_id_list.append( category_data['id'] )
132
+ self.object_idx_to_name[category_data['id']] = category_data['name']
133
+ for category_data in stuff_data['categories']:
134
+ self.stuff_id_list.append( category_data['id'] )
135
+ self.object_idx_to_name[category_data['id']] = category_data['name']
136
+ self.all_categories = [ self.object_idx_to_name.get(k, None) for k in range(183+1) ]
137
+
138
+
139
+ # Add object data from instances and stuff
140
+ self.image_id_to_objects = defaultdict(list)
141
+ self.select_objects( instances_data['annotations'] )
142
+ self.select_objects( stuff_data['annotations'] )
143
+
144
+
145
+ # Prune images that have too few or too many objects
146
+ new_image_ids = []
147
+ for image_id in self.image_ids:
148
+ num_objs = len(self.image_id_to_objects[image_id])
149
+ if self.min_boxes_per_image <= num_objs <= self.max_boxes_per_image:
150
+ new_image_ids.append(image_id)
151
+ self.image_ids = new_image_ids
152
+
153
+
154
+ # Check if all filenames can be found in the zip file
155
+ all_filenames = [self.image_id_to_filename[idx] for idx in self.image_ids]
156
+ check_filenames_in_zipdata(all_filenames, image_root)
157
+
158
+
159
+
160
+ def select_objects(self, annotations):
161
+ for object_anno in annotations:
162
+ image_id = object_anno['image_id']
163
+ _, _, w, h = object_anno['bbox']
164
+ W, H = self.image_id_to_size[image_id]
165
+ box_area = (w * h) / (W * H)
166
+ box_ok = box_area > self.min_box_size
167
+ object_name = self.object_idx_to_name[object_anno['category_id']]
168
+ other_ok = object_name != 'other' or self.include_other
169
+ if box_ok and other_ok:
170
+ self.image_id_to_objects[image_id].append(object_anno)
171
+
172
+
173
+ def total_images(self):
174
+ return len(self)
175
+
176
+
177
+ def __getitem__(self, index):
178
+ if self.max_boxes_per_image > 99:
179
+ assert False, "Are you sure setting such large number of boxes?"
180
+
181
+ out = {}
182
+
183
+ image_id = self.image_ids[index]
184
+ out['id'] = image_id
185
+
186
+ flip = self.random_flip and random.random()<0.5
187
+
188
+ # Image
189
+ filename = self.image_id_to_filename[image_id]
190
+ zip_file = self.fetch_zipfile(self.image_root)
191
+ image = Image.open(BytesIO(zip_file.read(filename))).convert('RGB')
192
+ WW, HH = image.size
193
+ if flip:
194
+ image = ImageOps.mirror(image)
195
+ out["image"] = self.transform(image)
196
+
197
+ this_image_obj_annos = deepcopy(self.image_id_to_objects[image_id])
198
+
199
+ # Make a sentence
200
+ obj_names = [] # used for make a sentence
201
+ boxes = torch.zeros(self.max_boxes_per_image, 4)
202
+ masks = torch.zeros(self.max_boxes_per_image)
203
+ positive_embeddings = torch.zeros(self.max_boxes_per_image, self.embedding_len)
204
+ for idx, object_anno in enumerate(this_image_obj_annos):
205
+ obj_name = self.object_idx_to_name[ object_anno['category_id'] ]
206
+ obj_names.append(obj_name)
207
+ x, y, w, h = object_anno['bbox']
208
+ x0 = x / WW
209
+ y0 = y / HH
210
+ x1 = (x + w) / WW
211
+ y1 = (y + h) / HH
212
+ if flip:
213
+ x0, x1 = 1-x1, 1-x0
214
+ boxes[idx] = torch.tensor([x0,y0,x1,y1])
215
+ masks[idx] = 1
216
+ positive_embeddings[idx] = self.category_embeddings[obj_name]
217
+
218
+ if self.fake_caption_type == 'empty':
219
+ caption = ""
220
+ else:
221
+ caption = make_a_sentence(obj_names, clean=True)
222
+
223
+ out["caption"] = caption
224
+ out["boxes"] = boxes
225
+ out["masks"] = masks
226
+ out["positive_embeddings"] = positive_embeddings
227
+
228
+
229
+ return out
230
+
231
+
232
+ def __len__(self):
233
+ if self.max_samples is None:
234
+ return len(self.image_ids)
235
+ return min(len(self.image_ids), self.max_samples)
236
+
237
+
dataset/tsv.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path as op
3
+ import gc
4
+ import json
5
+ from typing import List
6
+ import logging
7
+
8
+ try:
9
+ from .blob_storage import BlobStorage, disk_usage
10
+ except:
11
+ class BlobStorage:
12
+ pass
13
+
14
+
15
+ def generate_lineidx(filein: str, idxout: str) -> None:
16
+ idxout_tmp = idxout + '.tmp'
17
+ with open(filein, 'r') as tsvin, open(idxout_tmp, 'w') as tsvout:
18
+ fsize = os.fstat(tsvin.fileno()).st_size
19
+ fpos = 0
20
+ while fpos != fsize:
21
+ tsvout.write(str(fpos) + "\n")
22
+ tsvin.readline()
23
+ fpos = tsvin.tell()
24
+ os.rename(idxout_tmp, idxout)
25
+
26
+
27
+ def read_to_character(fp, c):
28
+ result = []
29
+ while True:
30
+ s = fp.read(32)
31
+ assert s != ''
32
+ if c in s:
33
+ result.append(s[: s.index(c)])
34
+ break
35
+ else:
36
+ result.append(s)
37
+ return ''.join(result)
38
+
39
+
40
+ class TSVFile(object):
41
+ def __init__(self,
42
+ tsv_file: str,
43
+ if_generate_lineidx: bool = False,
44
+ lineidx: str = None,
45
+ class_selector: List[str] = None,
46
+ blob_storage: BlobStorage = None):
47
+ self.tsv_file = tsv_file
48
+ self.lineidx = op.splitext(tsv_file)[0] + '.lineidx' \
49
+ if not lineidx else lineidx
50
+ self.linelist = op.splitext(tsv_file)[0] + '.linelist'
51
+ self.chunks = op.splitext(tsv_file)[0] + '.chunks'
52
+ self._fp = None
53
+ self._lineidx = None
54
+ self._sample_indices = None
55
+ self._class_boundaries = None
56
+ self._class_selector = class_selector
57
+ self._blob_storage = blob_storage
58
+ self._len = None
59
+ # the process always keeps the process which opens the file.
60
+ # If the pid is not equal to the currrent pid, we will re-open the file.
61
+ self.pid = None
62
+ # generate lineidx if not exist
63
+ if not op.isfile(self.lineidx) and if_generate_lineidx:
64
+ generate_lineidx(self.tsv_file, self.lineidx)
65
+
66
+ def __del__(self):
67
+ self.gcidx()
68
+ if self._fp:
69
+ self._fp.close()
70
+ # physically remove the tsv file if it is retrieved by BlobStorage
71
+ if self._blob_storage and 'azcopy' in self.tsv_file and os.path.exists(self.tsv_file):
72
+ try:
73
+ original_usage = disk_usage('/')
74
+ os.remove(self.tsv_file)
75
+ logging.info("Purged %s (disk usage: %.2f%% => %.2f%%)" %
76
+ (self.tsv_file, original_usage, disk_usage('/') * 100))
77
+ except:
78
+ # Known issue: multiple threads attempting to delete the file will raise a FileNotFound error.
79
+ # TODO: try Threadling.Lock to better handle the race condition
80
+ pass
81
+
82
+ def __str__(self):
83
+ return "TSVFile(tsv_file='{}')".format(self.tsv_file)
84
+
85
+ def __repr__(self):
86
+ return str(self)
87
+
88
+ def gcidx(self):
89
+ logging.debug('Run gc collect')
90
+ self._lineidx = None
91
+ self._sample_indices = None
92
+ #self._class_boundaries = None
93
+ return gc.collect()
94
+
95
+ def get_class_boundaries(self):
96
+ return self._class_boundaries
97
+
98
+ def num_rows(self, gcf=False):
99
+ if (self._len is None):
100
+ self._ensure_lineidx_loaded()
101
+ retval = len(self._sample_indices)
102
+
103
+ if (gcf):
104
+ self.gcidx()
105
+
106
+ self._len = retval
107
+
108
+ return self._len
109
+
110
+ def seek(self, idx: int):
111
+ self._ensure_tsv_opened()
112
+ self._ensure_lineidx_loaded()
113
+ try:
114
+ pos = self._lineidx[self._sample_indices[idx]]
115
+ except:
116
+ logging.info('=> {}-{}'.format(self.tsv_file, idx))
117
+ raise
118
+ self._fp.seek(pos)
119
+ return [s.strip() for s in self._fp.readline().split('\t')]
120
+
121
+ def seek_first_column(self, idx: int):
122
+ self._ensure_tsv_opened()
123
+ self._ensure_lineidx_loaded()
124
+ pos = self._lineidx[idx]
125
+ self._fp.seek(pos)
126
+ return read_to_character(self._fp, '\t')
127
+
128
+ def get_key(self, idx: int):
129
+ return self.seek_first_column(idx)
130
+
131
+ def __getitem__(self, index: int):
132
+ return self.seek(index)
133
+
134
+ def __len__(self):
135
+ return self.num_rows()
136
+
137
+ def _ensure_lineidx_loaded(self):
138
+ if self._lineidx is None:
139
+ logging.debug('=> loading lineidx: {}'.format(self.lineidx))
140
+ with open(self.lineidx, 'r') as fp:
141
+ lines = fp.readlines()
142
+ lines = [line.strip() for line in lines]
143
+ self._lineidx = [int(line) for line in lines]
144
+
145
+ # read the line list if exists
146
+ linelist = None
147
+ if op.isfile(self.linelist):
148
+ with open(self.linelist, 'r') as fp:
149
+ linelist = sorted(
150
+ [
151
+ int(line.strip())
152
+ for line in fp.readlines()
153
+ ]
154
+ )
155
+
156
+ if op.isfile(self.chunks):
157
+ self._sample_indices = []
158
+ self._class_boundaries = []
159
+ class_boundaries = json.load(open(self.chunks, 'r'))
160
+ for class_name, boundary in class_boundaries.items():
161
+ start = len(self._sample_indices)
162
+ if class_name in self._class_selector:
163
+ for idx in range(boundary[0], boundary[1] + 1):
164
+ # NOTE: potentially slow when linelist is long, try to speed it up
165
+ if linelist and idx not in linelist:
166
+ continue
167
+ self._sample_indices.append(idx)
168
+ end = len(self._sample_indices)
169
+ self._class_boundaries.append((start, end))
170
+ else:
171
+ if linelist:
172
+ self._sample_indices = linelist
173
+ else:
174
+ self._sample_indices = list(range(len(self._lineidx)))
175
+
176
+ def _ensure_tsv_opened(self):
177
+ if self._fp is None:
178
+ if self._blob_storage:
179
+ self._fp = self._blob_storage.open(self.tsv_file)
180
+ else:
181
+ self._fp = open(self.tsv_file, 'r')
182
+ self.pid = os.getpid()
183
+
184
+ if self.pid != os.getpid():
185
+ logging.debug('=> re-open {} because the process id changed'.format(self.tsv_file))
186
+ self._fp = open(self.tsv_file, 'r')
187
+ self.pid = os.getpid()
188
+
189
+
190
+ class TSVWriter(object):
191
+ def __init__(self, tsv_file):
192
+ self.tsv_file = tsv_file
193
+ self.lineidx_file = op.splitext(tsv_file)[0] + '.lineidx'
194
+ self.tsv_file_tmp = self.tsv_file + '.tmp'
195
+ self.lineidx_file_tmp = self.lineidx_file + '.tmp'
196
+
197
+ self.tsv_fp = open(self.tsv_file_tmp, 'w')
198
+ self.lineidx_fp = open(self.lineidx_file_tmp, 'w')
199
+
200
+ self.idx = 0
201
+
202
+ def write(self, values, sep='\t'):
203
+ v = '{0}\n'.format(sep.join(map(str, values)))
204
+ self.tsv_fp.write(v)
205
+ self.lineidx_fp.write(str(self.idx) + '\n')
206
+ self.idx = self.idx + len(v)
207
+
208
+ def close(self):
209
+ self.tsv_fp.close()
210
+ self.lineidx_fp.close()
211
+ os.rename(self.tsv_file_tmp, self.tsv_file)
212
+ os.rename(self.lineidx_file_tmp, self.lineidx_file)
dataset/tsv_dataset.py ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tkinter.messagebox import NO
2
+ import torch
3
+ import json
4
+ from collections import defaultdict
5
+ from PIL import Image, ImageDraw
6
+ from copy import deepcopy
7
+ import os
8
+ import torchvision.transforms as transforms
9
+ import torchvision
10
+ from .base_dataset import BaseDataset, check_filenames_in_zipdata, recalculate_box_and_verify_if_valid
11
+ from io import BytesIO
12
+ import random
13
+
14
+ from .tsv import TSVFile
15
+
16
+ from io import BytesIO
17
+ import base64
18
+ from PIL import Image
19
+ import numpy as np
20
+
21
+
22
+ def decode_base64_to_pillow(image_b64):
23
+ return Image.open(BytesIO(base64.b64decode(image_b64))).convert('RGB')
24
+
25
+ def decode_tensor_from_string(arr_str, use_tensor=True):
26
+ arr = np.frombuffer(base64.b64decode(arr_str), dtype='float32')
27
+ if use_tensor:
28
+ arr = torch.from_numpy(arr)
29
+ return arr
30
+
31
+ def decode_item(item):
32
+ item = json.loads(item)
33
+ item['image'] = decode_base64_to_pillow(item['image'])
34
+
35
+ for anno in item['annos']:
36
+ anno['image_embedding_before'] = decode_tensor_from_string(anno['image_embedding_before'])
37
+ anno['text_embedding_before'] = decode_tensor_from_string(anno['text_embedding_before'])
38
+ anno['image_embedding_after'] = decode_tensor_from_string(anno['image_embedding_after'])
39
+ anno['text_embedding_after'] = decode_tensor_from_string(anno['text_embedding_after'])
40
+ return item
41
+
42
+ def check_unique(images, fields):
43
+ for field in fields:
44
+ temp_list = []
45
+ for img_info in images:
46
+ temp_list.append(img_info[field])
47
+ assert len(set(temp_list)) == len(temp_list), field
48
+
49
+ def clean_data(data):
50
+ for data_info in data:
51
+ data_info.pop("original_img_id", None)
52
+ data_info.pop("original_id", None)
53
+ data_info.pop("sentence_id", None) # sentence id for each image (multiple sentences for one image)
54
+ data_info.pop("dataset_name", None)
55
+ data_info.pop("data_source", None)
56
+ data_info["data_id"] = data_info.pop("id")
57
+
58
+
59
+ def clean_annotations(annotations):
60
+ for anno_info in annotations:
61
+ anno_info.pop("iscrowd", None) # I have checked that all 0 for flickr, vg, coco
62
+ anno_info.pop("category_id", None) # I have checked that all 1 for flickr vg. This is not always 1 for coco, but I do not think we need this annotation
63
+ anno_info.pop("area", None)
64
+ # anno_info.pop("id", None)
65
+ anno_info["data_id"] = anno_info.pop("image_id")
66
+
67
+
68
+ def draw_box(img, boxes):
69
+ draw = ImageDraw.Draw(img)
70
+ for box in boxes:
71
+ draw.rectangle([box[0], box[1], box[2], box[3]], outline ="red", width=2) # x0 y0 x1 y1
72
+ return img
73
+
74
+
75
+ def xyhw2xyxy(box):
76
+ x0, y0, w, h = box
77
+ return [ x0, y0, x0+w, y0+h ]
78
+
79
+
80
+ def make_a_sentence(obj_names, clean=False):
81
+
82
+ if clean:
83
+ obj_names = [ name[:-6] if ("-other" in name) else name for name in obj_names]
84
+
85
+ caption = ""
86
+ tokens_positive = []
87
+ for obj_name in obj_names:
88
+ start_len = len(caption)
89
+ caption += obj_name
90
+ end_len = len(caption)
91
+ caption += ", "
92
+ tokens_positive.append(
93
+ [[start_len, end_len]] # in real caption, positive tokens can be disjoint, thus using list of list
94
+ )
95
+ caption = caption[:-2] # remove last ", "
96
+
97
+ return caption #, tokens_positive
98
+
99
+
100
+ def mask_for_random_drop_text_or_image_feature(masks, random_drop_embedding):
101
+ """
102
+ input masks tell how many valid grounding tokens for this image
103
+ e.g., 1,1,1,1,0,0,0,0,0,0...
104
+
105
+ If random_drop_embedding=both. we will random drop either image or
106
+ text feature for each token,
107
+ but we always make sure there is at least one feature used.
108
+ In other words, the following masks are not valid
109
+ (because for the second obj, no feature at all):
110
+ image: 1,0,1,1,0,0,0,0,0
111
+ text: 1,0,0,0,0,0,0,0,0
112
+
113
+ if random_drop_embedding=image. we will random drop image feature
114
+ and always keep the text one.
115
+
116
+ """
117
+ N = masks.shape[0]
118
+
119
+ if random_drop_embedding=='both':
120
+ temp_mask = torch.ones(2,N)
121
+ for i in range(N):
122
+ if random.uniform(0, 1) < 0.5: # else keep both features
123
+ idx = random.sample([0,1], 1)[0] # randomly choose to drop image or text feature
124
+ temp_mask[idx,i] = 0
125
+ image_masks = temp_mask[0]*masks
126
+ text_masks = temp_mask[1]*masks
127
+
128
+ if random_drop_embedding=='image':
129
+ image_masks = masks*(torch.rand(N)>0.5)*1
130
+ text_masks = masks
131
+
132
+ return image_masks, text_masks
133
+
134
+
135
+
136
+
137
+
138
+ def project(x, projection_matrix):
139
+ """
140
+ x (Batch*768) should be the penultimate feature of CLIP (before projection)
141
+ projection_matrix (768*768) is the CLIP projection matrix, which should be weight.data of Linear layer
142
+ defined in CLIP (out_dim, in_dim), thus we need to apply transpose below.
143
+ this function will return the CLIP feature (without normalziation)
144
+ """
145
+ return x@torch.transpose(projection_matrix, 0, 1)
146
+
147
+
148
+ def inv_project(y, projection_matrix):
149
+ """
150
+ y (Batch*768) should be the CLIP feature (after projection)
151
+ projection_matrix (768*768) is the CLIP projection matrix, which should be weight.data of Linear layer
152
+ defined in CLIP (out_dim, in_dim).
153
+ this function will return the CLIP penultimate feature.
154
+
155
+ Note: to make sure getting the correct penultimate feature, the input y should not be normalized.
156
+ If it is normalized, then the result will be scaled by CLIP feature norm, which is unknown.
157
+ """
158
+ return y@torch.transpose(torch.linalg.inv(projection_matrix), 0, 1)
159
+
160
+
161
+
162
+
163
+ class TSVDataset(BaseDataset):
164
+ def __init__(self,
165
+ tsv_path,
166
+ which_embedder='clip',
167
+ which_layer=['after','after'], # text and image
168
+ prob_use_caption=1,
169
+ random_drop_embedding='none',
170
+ image_size=256,
171
+ min_box_size=0.01,
172
+ max_boxes_per_data=8,
173
+ max_images=None, # set as 30K used to eval
174
+ random_crop = False,
175
+ random_flip = True,
176
+ ):
177
+ image_root = "a placeholder path as we are using tsv here"
178
+ super().__init__(image_root, random_crop, random_flip, image_size)
179
+ self.tsv_path = tsv_path
180
+ self.which_embedder = which_embedder
181
+ self.prob_use_caption = prob_use_caption
182
+ self.random_drop_embedding = random_drop_embedding
183
+ self.min_box_size = min_box_size
184
+ self.max_boxes_per_data = max_boxes_per_data
185
+ self.max_images = max_images
186
+
187
+ assert which_layer in [ ['after','after'], ['before','after_renorm'], ['before','after_reproject'] ]
188
+ assert random_drop_embedding in ['none', 'both', 'image']
189
+ self.which_layer_text = which_layer[0]
190
+ self.which_layer_image = which_layer[1]
191
+
192
+ #self.projection_matrix = torch.load(os.path.join(os.path.dirname(__file__), 'projection_matrix') )
193
+ self.projection_matrix = torch.load('projection_matrix')
194
+
195
+ # Load tsv data
196
+ self.tsv_file = TSVFile(self.tsv_path)
197
+
198
+
199
+ # Load preprocessed name embedding
200
+ if which_embedder == 'bert':
201
+ self.embedding_len = 1280
202
+ elif which_embedder == 'clip':
203
+ self.embedding_len = 768
204
+ else:
205
+ assert False
206
+
207
+ def total_images(self):
208
+ return len(self)
209
+
210
+ def get_item_from_tsv(self, index):
211
+ _, item = self.tsv_file[index]
212
+ item = decode_item(item)
213
+ return item
214
+
215
+
216
+ def mapping(self, image_embedding):
217
+ if self.which_layer_image == 'after':
218
+ # both use CLIP aligned feature
219
+ return image_embedding
220
+ elif self.which_layer_image == 'after_renorm':
221
+ # text use before, but image use after projection but normalize to 28.7
222
+ return image_embedding*28.7
223
+ elif self.which_layer_image == 'after_reproject':
224
+ image_embedding = project( image_embedding.unsqueeze(0), self.projection_matrix.T )
225
+ image_embedding = image_embedding.squeeze(0)
226
+ image_embedding = image_embedding / image_embedding.norm()
227
+ image_embedding = image_embedding * 28.7
228
+ return image_embedding
229
+
230
+
231
+
232
+ def __getitem__(self, index):
233
+ if self.max_boxes_per_data > 99:
234
+ assert False, "Are you sure setting such large number of boxes?"
235
+
236
+ raw_item = self.get_item_from_tsv(index)
237
+ is_det = raw_item.get('is_det', False) # if it is from detection (such as o365), then we will make a caption
238
+
239
+ out = {}
240
+
241
+ # -------------------- id and image ------------------- #
242
+ out['id'] = raw_item['data_id']
243
+ image = raw_item['image']
244
+ image_tensor, trans_info = self.transform_image(image)
245
+ out["image"] = image_tensor
246
+
247
+
248
+
249
+ # -------------------- grounding token ------------------- #
250
+ annos = raw_item['annos']
251
+
252
+ areas = []
253
+ all_boxes = []
254
+ all_masks = []
255
+ all_text_embeddings = []
256
+ all_image_embeddings = []
257
+ if is_det:
258
+ all_category_names = []
259
+
260
+ text_embedding_name = 'text_embedding_before' if self.which_layer_text == 'before' else 'text_embedding_after'
261
+ image_embedding_name = 'image_embedding_after'
262
+
263
+ for anno in annos:
264
+ x, y, w, h = anno['bbox']
265
+ valid, (x0, y0, x1, y1) = recalculate_box_and_verify_if_valid(x, y, w, h, trans_info, self.image_size, self.min_box_size)
266
+
267
+ if valid:
268
+ areas.append( (x1-x0)*(y1-y0) )
269
+ all_boxes.append( torch.tensor([x0,y0,x1,y1]) / self.image_size ) # scale to 0-1
270
+ all_masks.append(1)
271
+ all_text_embeddings.append(anno[text_embedding_name])
272
+ all_image_embeddings.append( self.mapping(anno[image_embedding_name]) )
273
+ if is_det:
274
+ all_category_names.append(anno["category_name"])
275
+
276
+
277
+ wanted_idxs = torch.tensor(areas).sort(descending=True)[1]
278
+ wanted_idxs = wanted_idxs[0:self.max_boxes_per_data]
279
+
280
+ boxes = torch.zeros(self.max_boxes_per_data, 4)
281
+ masks = torch.zeros(self.max_boxes_per_data)
282
+ text_embeddings = torch.zeros(self.max_boxes_per_data, self.embedding_len)
283
+ image_embeddings = torch.zeros(self.max_boxes_per_data, self.embedding_len)
284
+ if is_det:
285
+ category_names = []
286
+ for i, idx in enumerate(wanted_idxs):
287
+ boxes[i] = all_boxes[idx]
288
+ masks[i] = all_masks[idx]
289
+ text_embeddings[i] = all_text_embeddings[idx]
290
+ image_embeddings[i] = all_image_embeddings[idx]
291
+ if is_det:
292
+ category_names.append(all_category_names[idx])
293
+
294
+ if self.random_drop_embedding != 'none':
295
+ image_masks, text_masks = mask_for_random_drop_text_or_image_feature(masks, self.random_drop_embedding)
296
+ else:
297
+ image_masks = masks
298
+ text_masks = masks
299
+
300
+
301
+ out["boxes"] = boxes
302
+ out["masks"] = masks
303
+ out["image_masks"] = image_masks
304
+ out["text_masks"] = text_masks
305
+ out["text_embeddings"] = text_embeddings
306
+ out["image_embeddings"] = image_embeddings
307
+
308
+
309
+
310
+ # -------------------- caption ------------------- #
311
+ if random.uniform(0, 1) < self.prob_use_caption:
312
+ if is_det:
313
+ out["caption"] = make_a_sentence(category_names)
314
+ else:
315
+ out["caption"] = raw_item["caption"]
316
+ else:
317
+ out["caption"] = ""
318
+
319
+ return out
320
+
321
+
322
+
323
+ def __len__(self):
324
+ return len(self.tsv_file)
325
+
326
+
dataset/utils.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ #
3
+ # Copyright 2018 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import PIL
18
+ import torch
19
+ import torchvision.transforms as T
20
+
21
+
22
+ IMAGENET_MEAN = [0.485, 0.456, 0.406]
23
+ IMAGENET_STD = [0.229, 0.224, 0.225]
24
+
25
+ INV_IMAGENET_MEAN = [-m for m in IMAGENET_MEAN]
26
+ INV_IMAGENET_STD = [1.0 / s for s in IMAGENET_STD]
27
+
28
+
29
+ def imagenet_preprocess():
30
+ return T.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD)
31
+
32
+
33
+ def rescale(x):
34
+ lo, hi = x.min(), x.max()
35
+ return x.sub(lo).div(hi - lo)
36
+
37
+
38
+ def imagenet_deprocess(rescale_image=True):
39
+ transforms = [
40
+ T.Normalize(mean=[0, 0, 0], std=INV_IMAGENET_STD),
41
+ T.Normalize(mean=INV_IMAGENET_MEAN, std=[1.0, 1.0, 1.0]),
42
+ ]
43
+ if rescale_image:
44
+ transforms.append(rescale)
45
+ return T.Compose(transforms)
46
+
47
+
48
+ def imagenet_deprocess_batch(imgs, rescale=True):
49
+ """
50
+ Input:
51
+ - imgs: FloatTensor of shape (N, C, H, W) giving preprocessed images
52
+
53
+ Output:
54
+ - imgs_de: ByteTensor of shape (N, C, H, W) giving deprocessed images
55
+ in the range [0, 255]
56
+ """
57
+ if isinstance(imgs, torch.autograd.Variable):
58
+ imgs = imgs.data
59
+ imgs = imgs.cpu().clone()
60
+ deprocess_fn = imagenet_deprocess(rescale_image=rescale)
61
+ imgs_de = []
62
+ for i in range(imgs.size(0)):
63
+ img_de = deprocess_fn(imgs[i])[None]
64
+ img_de = img_de.mul(255).clamp(0, 255).byte()
65
+ imgs_de.append(img_de)
66
+ imgs_de = torch.cat(imgs_de, dim=0)
67
+ return imgs_de
68
+
69
+
70
+ class Resize(object):
71
+ def __init__(self, size, interp=PIL.Image.BILINEAR):
72
+ if isinstance(size, tuple):
73
+ H, W = size
74
+ self.size = (W, H)
75
+ else:
76
+ self.size = (size, size)
77
+ self.interp = interp
78
+
79
+ def __call__(self, img):
80
+ return img.resize(self.size, self.interp)
81
+
82
+
83
+ def unpack_var(v):
84
+ if isinstance(v, torch.autograd.Variable):
85
+ return v.data
86
+ return v
87
+
88
+
89
+ def split_graph_batch(triples, obj_data, obj_to_img, triple_to_img):
90
+ triples = unpack_var(triples)
91
+ obj_data = [unpack_var(o) for o in obj_data]
92
+ obj_to_img = unpack_var(obj_to_img)
93
+ triple_to_img = unpack_var(triple_to_img)
94
+
95
+ triples_out = []
96
+ obj_data_out = [[] for _ in obj_data]
97
+ obj_offset = 0
98
+ N = obj_to_img.max() + 1
99
+ for i in range(N):
100
+ o_idxs = (obj_to_img == i).nonzero().view(-1)
101
+ t_idxs = (triple_to_img == i).nonzero().view(-1)
102
+
103
+ cur_triples = triples[t_idxs].clone()
104
+ cur_triples[:, 0] -= obj_offset
105
+ cur_triples[:, 2] -= obj_offset
106
+ triples_out.append(cur_triples)
107
+
108
+ for j, o_data in enumerate(obj_data):
109
+ cur_o_data = None
110
+ if o_data is not None:
111
+ cur_o_data = o_data[o_idxs]
112
+ obj_data_out[j].append(cur_o_data)
113
+
114
+ obj_offset += o_idxs.size(0)
115
+
116
+ return triples_out, obj_data_out
environment.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: gligen_demo
2
+ channels:
3
+ - xformers/label/dev
4
+ - pytorch
5
+ - defaults
6
+ dependencies:
7
+ - python=3.10.8
8
+ - pip=22.2.2
9
+ - cudatoolkit=11.3
10
+ - pytorch=1.12.1
11
+ - torchvision=0.13.1
12
+ - numpy=1.23.1
13
+ - xformers
14
+ - pip:
15
+ - omegaconf==2.1.1
16
+ - albumentations==1.3.0
17
+ - opencv-python
18
+ - imageio==2.9.0
19
+ - imageio-ffmpeg==0.4.2
20
+ - pytorch-lightning==1.4.2
21
+ - test-tube>=0.7.5
22
+ - streamlit==1.12.1
23
+ - einops==0.3.0
24
+ - git+https://github.com/openai/CLIP.git
25
+ - protobuf~=3.20.1
26
+ - torchmetrics==0.6.0
27
+ - transformers==4.19.2
28
+ - kornia==0.6.0
29
+ - gradio==3.16.0
gligen/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os, sys
3
+ sys.path.append(os.path.dirname(__file__))
4
+ sys.path.append(os.path.join(os.path.dirname(__file__), "ldm"))
5
+
6
+ import gligen.evaluator as evaluator
7
+ import gligen.trainer as trainer
8
+
9
+
10
+ # import gligen.ldm as ldm
gligen/create_meta.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CKPTS = [
2
+
3
+ dict(
4
+ path="/home/chunyl/azure_mount/yuhengdb/fine_tune_ldm/version5_branch6_output/GoldG+SBU+CC3M+CC12M+O365/second_stage_drop_both/tag01/checkpoint_00450001.pth",
5
+ feature_type=['before','after_reproject'],
6
+ save_folder_name="v5b6_drop_both",
7
+ ),
8
+
9
+
10
+ # dict(
11
+ # path="/home/v-yuhengli/blobfuse/output/fine_tune_ldm/version5_branch6_output/GoldG+SBU+CC3M+CC12M+O365/second_stage_drop_none/tag00/checkpoint_00165001.pth",
12
+ # feature_type=['before','after_reproject'],
13
+ # save_folder_name="v5b6_drop_none",
14
+ # ),
15
+
16
+
17
+
18
+
19
+
20
+ ]
21
+
22
+
23
+
24
+ # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
25
+
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+ # if meta["has_image_mask"] == 0:
34
+ # image_embeddings = text_embeddings
35
+ # if meta["has_text_mask"] == 0:
36
+ # text_embeddings = image_embeddings
37
+
38
+ # out = {
39
+ # "boxes" : boxes.unsqueeze(0).repeat(batch,1,1),
40
+ # "masks" : masks.unsqueeze(0).repeat(batch,1),
41
+ # "text_masks" : masks.unsqueeze(0).repeat(batch,1),
42
+ # "image_masks" : masks.unsqueeze(0).repeat(batch,1),
43
+ # "text_embeddings" : text_embeddings.unsqueeze(0).repeat(batch,1,1),
44
+ # "image_embeddings" : image_embeddings.unsqueeze(0).repeat(batch,1,1)
45
+ # }
46
+
47
+
48
+
49
+
50
+
51
+
52
+
53
+ META = [
54
+
55
+
56
+ dict(
57
+ prompt = "a teddy bear sitting next to a red bird",
58
+ phrases = ['a teddy bear', 'a red bird'],
59
+ images = ['images/teddy.jpg', 'images/red_bird.jpg'],
60
+ locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
61
+ alpha_type = [1.0, 0, 0.0],
62
+ has_text_mask = 1,
63
+ has_image_mask = 0,
64
+ save_folder_name="teddy_bird_1_1"
65
+ ),
66
+
67
+
68
+ # dict(
69
+ # prompt = "a teddy bear sitting next to a bird",
70
+ # phrases = ['a teddy bear', 'a bird'],
71
+ # images = ['images/teddy.jpg', 'images/red_bird.jpg'],
72
+ # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
73
+ # alpha_type = [1.0, 0, 0.0],
74
+ # has_text_mask = 1,
75
+ # has_image_mask = 1,
76
+ # save_folder_name="teddy_bird_1_1"
77
+ # ),
78
+
79
+
80
+ # dict(
81
+ # prompt = "a teddy bear sitting next to a bird",
82
+ # phrases = ['a teddy bear', 'a bird'],
83
+ # images = ['images/teddy.jpg', 'images/red_bird.jpg'],
84
+ # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
85
+ # alpha_type = [0.5, 0, 0.5],
86
+ # has_text_mask = 1,
87
+ # has_image_mask = 0,
88
+ # save_folder_name="teddy_bird_1_0"
89
+ # ),
90
+
91
+ # dict(
92
+ # prompt = "",
93
+ # phrases = ['a teddy bear', 'an umbrella'],
94
+ # images = ['images/teddy.jpg', 'images/umbrella.png'],
95
+ # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
96
+ # alpha_type = [1.0, 0, 0.0],
97
+ # has_text_mask = 1,
98
+ # has_image_mask = 1,
99
+ # save_folder_name="empty_teddy_umbrella_1_1"
100
+ # ),
101
+
102
+ # dict(
103
+ # prompt = "hello kitty and bird hybrid",
104
+ # phrases = ['a hello kitty', 'a hello kitty'],
105
+ # images = ['images/red_bird.jpg', 'images/red_bird.jpg'],
106
+ # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
107
+ # has_text_mask = 1,
108
+ # has_image_mask = 1,
109
+ # save_folder_name="hello+bird_1_1"
110
+ # ),
111
+
112
+ # dict(
113
+ # prompt = "hello kitty and teddy bear hybrid",
114
+ # phrases = ['a hello kitty', 'a hello kitty'],
115
+ # images = ['images/teddy.jpg', 'images/teddy.jpg'],
116
+ # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
117
+ # has_text_mask = 1,
118
+ # has_image_mask = 1,
119
+ # save_folder_name="hello+teddy_1_1"
120
+ # ),
121
+
122
+ # dict(
123
+ # prompt = "bird and hello kitty hybrid",
124
+ # phrases = ['a bird', 'a bird'],
125
+ # images = ['images/hello.jpg', 'images/hello.jpg'],
126
+ # locations = [ [0.0,0.09,0.33,0.76], [0.55,0.11,1.0,0.8] ],
127
+ # alpha_type = [1.0, 0, 0.0],
128
+ # has_text_mask = 1,
129
+ # has_image_mask = 0.5,
130
+ # save_folder_name="bird+hello_1_1"
131
+ # ),
132
+
133
+
134
+
135
+ # dict(
136
+ # prompt = "a deer standing in front of a brick house in the woods, anime, oil painting, high resolution, cottagecore, ghibli inspired, 4k",
137
+ # phrases = ['a deer'],
138
+ # images = ['images/sky.jpg'],
139
+ # locations = [ [0.0,0.5,0.5,0.9] ],
140
+ # alpha_type = [1, 0, 0],
141
+ # has_text_mask = 1,
142
+ # has_image_mask = 1,
143
+ # save_folder_name="deer_sky"
144
+ # ),
145
+
146
+
147
+ # dict(
148
+ # prompt = "A woman sitting in a restaurant with a slice of pizza in front of her",
149
+ # phrases = ['dining table', 'pizza', 'person', 'wall', 'car', 'paper', 'chair', 'window', 'bottle', 'cup'],
150
+ # images = ['images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg','images/hello.jpg'],
151
+ # locations = [ [0.0030, 0.3589, 1.0000, 1.0000],
152
+ # [0.0779, 0.6744, 0.9768, 1.0000],
153
+ # [0.2236, 0.0000, 0.7809, 0.4352],
154
+ # [0.0000, 0.0000, 0.4313, 0.4505],
155
+ # [0.6275, 0.1050, 0.9444, 0.2497],
156
+ # [0.0000, 0.3859, 0.1250, 0.6922],
157
+ # [0.7137, 0.2389, 0.8540, 0.4549],
158
+ # [0.0000, 0.0000, 0.4667, 0.0630],
159
+ # [0.3822, 0.4235, 0.4932, 0.6575],
160
+ # [0.6616, 0.3617, 0.7880, 0.5165] ],
161
+ # alpha_type = [0.0, 0, 1.0],
162
+ # has_text_mask = 1,
163
+ # has_image_mask = 0,
164
+ # save_folder_name="pizza_1_0"
165
+ # ),
166
+
167
+
168
+
169
+
170
+ ]
gligen/distributed.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import pickle
3
+
4
+ import torch
5
+ from torch import distributed as dist
6
+ from torch.utils.data.sampler import Sampler
7
+
8
+
9
+ def get_rank():
10
+ if not dist.is_available():
11
+ return 0
12
+
13
+ if not dist.is_initialized():
14
+ return 0
15
+
16
+ return dist.get_rank()
17
+
18
+
19
+ def synchronize():
20
+ if not dist.is_available():
21
+ return
22
+ if not dist.is_initialized():
23
+ return
24
+
25
+ world_size = dist.get_world_size()
26
+ if world_size == 1:
27
+ return
28
+
29
+ dist.barrier()
30
+
31
+
32
+ def get_world_size():
33
+ if not dist.is_available():
34
+ return 1
35
+ if not dist.is_initialized():
36
+ return 1
37
+ return dist.get_world_size()
38
+
39
+
40
+ def reduce_sum(tensor):
41
+ if not dist.is_available():
42
+ return tensor
43
+
44
+ if not dist.is_initialized():
45
+ return tensor
46
+
47
+ tensor = tensor.clone()
48
+ dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
49
+
50
+ return tensor
51
+
52
+
53
+ def gather_grad(params):
54
+ world_size = get_world_size()
55
+
56
+ if world_size == 1:
57
+ return
58
+
59
+ for param in params:
60
+ if param.grad is not None:
61
+ dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
62
+ param.grad.data.div_(world_size)
63
+
64
+
65
+ def all_gather(data):
66
+ world_size = get_world_size()
67
+
68
+ if world_size == 1:
69
+ return [data]
70
+
71
+ buffer = pickle.dumps(data)
72
+ storage = torch.ByteStorage.from_buffer(buffer)
73
+ tensor = torch.ByteTensor(storage).to('cuda')
74
+
75
+ local_size = torch.IntTensor([tensor.numel()]).to('cuda')
76
+ size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
77
+ dist.all_gather(size_list, local_size)
78
+ size_list = [int(size.item()) for size in size_list]
79
+ max_size = max(size_list)
80
+
81
+ tensor_list = []
82
+ for _ in size_list:
83
+ tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
84
+
85
+ if local_size != max_size:
86
+ padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
87
+ tensor = torch.cat((tensor, padding), 0)
88
+
89
+ dist.all_gather(tensor_list, tensor)
90
+
91
+ data_list = []
92
+
93
+ for size, tensor in zip(size_list, tensor_list):
94
+ buffer = tensor.cpu().numpy().tobytes()[:size]
95
+ data_list.append(pickle.loads(buffer))
96
+
97
+ return data_list
98
+
99
+
100
+ def reduce_loss_dict(loss_dict):
101
+ world_size = get_world_size()
102
+
103
+ if world_size < 2:
104
+ return loss_dict
105
+
106
+ with torch.no_grad():
107
+ keys = []
108
+ losses = []
109
+
110
+ for k in sorted(loss_dict.keys()):
111
+ keys.append(k)
112
+ losses.append(loss_dict[k])
113
+
114
+ losses = torch.stack(losses, 0)
115
+ dist.reduce(losses, dst=0)
116
+
117
+ if dist.get_rank() == 0:
118
+ losses /= world_size
119
+
120
+ reduced_losses = {k: v for k, v in zip(keys, losses)}
121
+
122
+ return reduced_losses
gligen/evaluator.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from ldm.models.diffusion.ddim import DDIMSampler
3
+ from ldm.models.diffusion.plms import PLMSSampler
4
+ from ldm.util import instantiate_from_config
5
+ import numpy as np
6
+ import random
7
+ from dataset.concat_dataset import ConCatDataset #, collate_fn
8
+ from torch.utils.data import DataLoader
9
+ from torch.utils.data.distributed import DistributedSampler
10
+ import os
11
+ from tqdm import tqdm
12
+ from distributed import get_rank, synchronize, get_world_size
13
+ from trainer import read_official_ckpt, batch_to_device, ImageCaptionSaver, wrap_loader #, get_padded_boxes
14
+ from PIL import Image
15
+ import math
16
+ import json
17
+
18
+
19
+ def draw_masks_from_boxes(boxes,size):
20
+
21
+ image_masks = []
22
+ for box in boxes:
23
+ image_mask = torch.ones(size[0],size[1])
24
+ for bx in box:
25
+ x0, x1 = bx[0]*size[0], bx[2]*size[0]
26
+ y0, y1 = bx[1]*size[1], bx[3]*size[1]
27
+ image_mask[int(y0):int(y1), int(x0):int(x1)] = 0
28
+ image_masks.append(image_mask)
29
+ return torch.stack(image_masks).unsqueeze(1)
30
+
31
+
32
+
33
+ def set_alpha_scale(model, alpha_scale):
34
+ from ldm.modules.attention import GatedCrossAttentionDense, GatedSelfAttentionDense
35
+ for module in model.modules():
36
+ if type(module) == GatedCrossAttentionDense or type(module) == GatedSelfAttentionDense:
37
+ module.scale = alpha_scale
38
+ # print("scale: ", alpha_scale)
39
+ # print("attn: ", module.alpha_attn)
40
+ # print("dense: ", module.alpha_dense)
41
+ # print(' ')
42
+ # print(' ')
43
+
44
+
45
+ def save_images(samples, image_ids, folder, to256):
46
+ for sample, image_id in zip(samples, image_ids):
47
+ sample = torch.clamp(sample, min=-1, max=1) * 0.5 + 0.5
48
+ sample = sample.cpu().numpy().transpose(1,2,0) * 255
49
+ img_name = str(int(image_id))+'.png'
50
+ img = Image.fromarray(sample.astype(np.uint8))
51
+ if to256:
52
+ img = img.resize( (256,256), Image.BICUBIC)
53
+ img.save(os.path.join(folder,img_name))
54
+
55
+
56
+ def ckpt_to_folder_name(basename):
57
+ name=""
58
+ for s in basename:
59
+ if s.isdigit():
60
+ name+=s
61
+ seen = round( int(name)/1000, 1 )
62
+ return str(seen).ljust(4,'0')+'k'
63
+
64
+
65
+ class Evaluator:
66
+ def __init__(self, config):
67
+
68
+ self.config = config
69
+ self.device = torch.device("cuda")
70
+
71
+
72
+ # = = = = = create model and diffusion = = = = = #
73
+ if self.config.ckpt != "real":
74
+
75
+ self.model = instantiate_from_config(config.model).to(self.device)
76
+ self.autoencoder = instantiate_from_config(config.autoencoder).to(self.device)
77
+ self.text_encoder = instantiate_from_config(config.text_encoder).to(self.device)
78
+ self.diffusion = instantiate_from_config(config.diffusion).to(self.device)
79
+
80
+ # donot need to load official_ckpt for self.model here, since we will load from our ckpt
81
+ state_dict = read_official_ckpt( os.path.join(config.DATA_ROOT, config.official_ckpt_name) )
82
+ self.autoencoder.load_state_dict( state_dict["autoencoder"] )
83
+ self.text_encoder.load_state_dict( state_dict["text_encoder"] )
84
+ self.diffusion.load_state_dict( state_dict["diffusion"] )
85
+
86
+
87
+ # = = = = = load from our ckpt = = = = = #
88
+ if self.config.ckpt == "real":
89
+ print("Saving all real images...")
90
+ self.just_save_real = True
91
+ else:
92
+ checkpoint = torch.load(self.config.ckpt, map_location="cpu")
93
+ which_state = 'ema' if 'ema' in checkpoint else "model"
94
+ which_state = which_state if config.which_state is None else config.which_state
95
+ self.model.load_state_dict(checkpoint[which_state])
96
+ print("ckpt is loaded")
97
+ self.just_save_real = False
98
+ set_alpha_scale(self.model, self.config.alpha_scale)
99
+
100
+ self.autoencoder.eval()
101
+ self.model.eval()
102
+ self.text_encoder.eval()
103
+
104
+
105
+ # = = = = = create data = = = = = #
106
+ self.dataset_eval = ConCatDataset(config.val_dataset_names, config.DATA_ROOT, config.which_embedder, train=False)
107
+ print("total eval images: ", len(self.dataset_eval))
108
+ sampler = DistributedSampler(self.dataset_eval,shuffle=False) if config.distributed else None
109
+ loader_eval = DataLoader( self.dataset_eval,batch_size=config.batch_size,
110
+ num_workers=config.workers,
111
+ pin_memory=True,
112
+ sampler=sampler,
113
+ drop_last=False) # shuffle default is False
114
+ self.loader_eval = loader_eval
115
+
116
+
117
+ # = = = = = create output folder = = = = = #
118
+ folder_name = ckpt_to_folder_name(os.path.basename(config.ckpt))
119
+ self.outdir = os.path.join(config.OUTPUT_ROOT, folder_name)
120
+ self.outdir_real = os.path.join(self.outdir,'real')
121
+ self.outdir_fake = os.path.join(self.outdir,'fake')
122
+ if config.to256:
123
+ self.outdir_real256 = os.path.join(self.outdir,'real256')
124
+ self.outdir_fake256 = os.path.join(self.outdir,'fake256')
125
+ synchronize() # if rank0 is faster, it may mkdir before the other rank call os.listdir()
126
+ if get_rank() == 0:
127
+ os.makedirs(self.outdir, exist_ok=True)
128
+ os.makedirs(self.outdir_real, exist_ok=True)
129
+ os.makedirs(self.outdir_fake, exist_ok=True)
130
+ if config.to256:
131
+ os.makedirs(self.outdir_real256, exist_ok=True)
132
+ os.makedirs(self.outdir_fake256, exist_ok=True)
133
+ print(self.outdir) # double check
134
+
135
+ self.evaluation_finished = False
136
+ if os.path.exists( os.path.join(self.outdir,'score.txt') ):
137
+ self.evaluation_finished = True
138
+
139
+
140
+ def alread_saved_this_batch(self, batch):
141
+ existing_real_files = os.listdir( self.outdir_real )
142
+ existing_fake_files = os.listdir( self.outdir_fake )
143
+ status = []
144
+ for image_id in batch["id"]:
145
+ img_name = str(int(image_id))+'.png'
146
+ status.append(img_name in existing_real_files)
147
+ status.append(img_name in existing_fake_files)
148
+ return all(status)
149
+
150
+
151
+ @torch.no_grad()
152
+ def start_evaluating(self):
153
+
154
+ iterator = tqdm( self.loader_eval, desc='Evaluating progress')
155
+ for batch in iterator:
156
+
157
+ #if not self.alread_saved_this_batch(batch):
158
+ if True:
159
+
160
+ batch_to_device(batch, self.device)
161
+ batch_size = batch["image"].shape[0]
162
+ samples_real = batch["image"]
163
+
164
+ if self.just_save_real:
165
+ samples_fake = None
166
+ else:
167
+ uc = self.text_encoder.encode( batch_size*[""] )
168
+ context = self.text_encoder.encode( batch["caption"] )
169
+
170
+ image_mask = x0 = None
171
+ if self.config.inpaint:
172
+ image_mask = draw_masks_from_boxes( batch['boxes'], self.model.image_size ).cuda()
173
+ x0 = self.autoencoder.encode( batch["image"] )
174
+
175
+ shape = (batch_size, self.model.in_channels, self.model.image_size, self.model.image_size)
176
+ if self.config.no_plms:
177
+ sampler = DDIMSampler(self.diffusion, self.model)
178
+ steps = 250
179
+ else:
180
+ sampler = PLMSSampler(self.diffusion, self.model)
181
+ steps = 50
182
+
183
+ input = dict( x=None, timesteps=None, context=context, boxes=batch['boxes'], masks=batch['masks'], positive_embeddings=batch["positive_embeddings"] )
184
+ samples_fake = sampler.sample(S=steps, shape=shape, input=input, uc=uc, guidance_scale=self.config.guidance_scale, mask=image_mask, x0=x0)
185
+ samples_fake = self.autoencoder.decode(samples_fake)
186
+
187
+
188
+ save_images(samples_real, batch['id'], self.outdir_real, to256=False )
189
+ if self.config.to256:
190
+ save_images(samples_real, batch['id'], self.outdir_real256, to256=True )
191
+
192
+ if samples_fake is not None:
193
+ save_images(samples_fake, batch['id'], self.outdir_fake, to256=False )
194
+ if self.config.to256:
195
+ save_images(samples_fake, batch['id'], self.outdir_fake256, to256=True )
196
+
197
+
198
+ def fire_fid(self):
199
+ paths = [self.outdir_real, self.outdir_fake]
200
+ if self.config.to256:
201
+ paths = [self.outdir_real256, self.outdir_fake256]
202
+
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+
216
+
217
+
218
+
219
+
220
+
221
+
222
+
223
+
224
+
225
+
gligen/ldm/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import gligen.evaluator as evaluator
2
+ import gligen.trainer as trainer
3
+ import gligen.ldm as ldm
gligen/ldm/data/__init__.py ADDED
File without changes
gligen/ldm/data/base.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset
3
+
4
+
5
+ class Txt2ImgIterableBaseDataset(IterableDataset):
6
+ '''
7
+ Define an interface to make the IterableDatasets for text2img data chainable
8
+ '''
9
+ def __init__(self, num_records=0, valid_ids=None, size=256):
10
+ super().__init__()
11
+ self.num_records = num_records
12
+ self.valid_ids = valid_ids
13
+ self.sample_ids = valid_ids
14
+ self.size = size
15
+
16
+ print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.')
17
+
18
+ def __len__(self):
19
+ return self.num_records
20
+
21
+ @abstractmethod
22
+ def __iter__(self):
23
+ pass
gligen/ldm/data/imagenet.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, yaml, pickle, shutil, tarfile, glob
2
+ import cv2
3
+ import albumentations
4
+ import PIL
5
+ import numpy as np
6
+ import torchvision.transforms.functional as TF
7
+ from omegaconf import OmegaConf
8
+ from functools import partial
9
+ from PIL import Image
10
+ from tqdm import tqdm
11
+ from torch.utils.data import Dataset, Subset
12
+
13
+ import taming.data.utils as tdu
14
+ from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve
15
+ from taming.data.imagenet import ImagePaths
16
+
17
+ from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light
18
+
19
+
20
+ def synset2idx(path_to_yaml="ldm/data/index_synset.yaml"):
21
+ with open(path_to_yaml) as f:
22
+ di2s = yaml.load(f)
23
+ return dict((v,k) for k,v in di2s.items())
24
+
25
+
26
+ class ImageNetBase(Dataset):
27
+ def __init__(self, config=None):
28
+ self.config = config or OmegaConf.create()
29
+ if not type(self.config)==dict:
30
+ self.config = OmegaConf.to_container(self.config)
31
+ self.keep_orig_class_label = self.config.get("keep_orig_class_label", False)
32
+ self.process_images = True # if False we skip loading & processing images and self.data contains filepaths
33
+ self._prepare()
34
+ self._prepare_synset_to_human()
35
+ self._prepare_idx_to_synset()
36
+ self._prepare_human_to_integer_label()
37
+ self._load()
38
+
39
+ def __len__(self):
40
+ return len(self.data)
41
+
42
+ def __getitem__(self, i):
43
+ return self.data[i]
44
+
45
+ def _prepare(self):
46
+ raise NotImplementedError()
47
+
48
+ def _filter_relpaths(self, relpaths):
49
+ ignore = set([
50
+ "n06596364_9591.JPEG",
51
+ ])
52
+ relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore]
53
+ if "sub_indices" in self.config:
54
+ indices = str_to_indices(self.config["sub_indices"])
55
+ synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings
56
+ self.synset2idx = synset2idx(path_to_yaml=self.idx2syn)
57
+ files = []
58
+ for rpath in relpaths:
59
+ syn = rpath.split("/")[0]
60
+ if syn in synsets:
61
+ files.append(rpath)
62
+ return files
63
+ else:
64
+ return relpaths
65
+
66
+ def _prepare_synset_to_human(self):
67
+ SIZE = 2655750
68
+ URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1"
69
+ self.human_dict = os.path.join(self.root, "synset_human.txt")
70
+ if (not os.path.exists(self.human_dict) or
71
+ not os.path.getsize(self.human_dict)==SIZE):
72
+ download(URL, self.human_dict)
73
+
74
+ def _prepare_idx_to_synset(self):
75
+ URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1"
76
+ self.idx2syn = os.path.join(self.root, "index_synset.yaml")
77
+ if (not os.path.exists(self.idx2syn)):
78
+ download(URL, self.idx2syn)
79
+
80
+ def _prepare_human_to_integer_label(self):
81
+ URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1"
82
+ self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt")
83
+ if (not os.path.exists(self.human2integer)):
84
+ download(URL, self.human2integer)
85
+ with open(self.human2integer, "r") as f:
86
+ lines = f.read().splitlines()
87
+ assert len(lines) == 1000
88
+ self.human2integer_dict = dict()
89
+ for line in lines:
90
+ value, key = line.split(":")
91
+ self.human2integer_dict[key] = int(value)
92
+
93
+ def _load(self):
94
+ with open(self.txt_filelist, "r") as f:
95
+ self.relpaths = f.read().splitlines()
96
+ l1 = len(self.relpaths)
97
+ self.relpaths = self._filter_relpaths(self.relpaths)
98
+ print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths)))
99
+
100
+ self.synsets = [p.split("/")[0] for p in self.relpaths]
101
+ self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths]
102
+
103
+ unique_synsets = np.unique(self.synsets)
104
+ class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets))
105
+ if not self.keep_orig_class_label:
106
+ self.class_labels = [class_dict[s] for s in self.synsets]
107
+ else:
108
+ self.class_labels = [self.synset2idx[s] for s in self.synsets]
109
+
110
+ with open(self.human_dict, "r") as f:
111
+ human_dict = f.read().splitlines()
112
+ human_dict = dict(line.split(maxsplit=1) for line in human_dict)
113
+
114
+ self.human_labels = [human_dict[s] for s in self.synsets]
115
+
116
+ labels = {
117
+ "relpath": np.array(self.relpaths),
118
+ "synsets": np.array(self.synsets),
119
+ "class_label": np.array(self.class_labels),
120
+ "human_label": np.array(self.human_labels),
121
+ }
122
+
123
+ if self.process_images:
124
+ self.size = retrieve(self.config, "size", default=256)
125
+ self.data = ImagePaths(self.abspaths,
126
+ labels=labels,
127
+ size=self.size,
128
+ random_crop=self.random_crop,
129
+ )
130
+ else:
131
+ self.data = self.abspaths
132
+
133
+
134
+ class ImageNetTrain(ImageNetBase):
135
+ NAME = "ILSVRC2012_train"
136
+ URL = "http://www.image-net.org/challenges/LSVRC/2012/"
137
+ AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2"
138
+ FILES = [
139
+ "ILSVRC2012_img_train.tar",
140
+ ]
141
+ SIZES = [
142
+ 147897477120,
143
+ ]
144
+
145
+ def __init__(self, process_images=True, data_root=None, **kwargs):
146
+ self.process_images = process_images
147
+ self.data_root = data_root
148
+ super().__init__(**kwargs)
149
+
150
+ def _prepare(self):
151
+ if self.data_root:
152
+ self.root = os.path.join(self.data_root, self.NAME)
153
+ else:
154
+ cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
155
+ self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
156
+
157
+ self.datadir = os.path.join(self.root, "data")
158
+ self.txt_filelist = os.path.join(self.root, "filelist.txt")
159
+ self.expected_length = 1281167
160
+ self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop",
161
+ default=True)
162
+ if not tdu.is_prepared(self.root):
163
+ # prep
164
+ print("Preparing dataset {} in {}".format(self.NAME, self.root))
165
+
166
+ datadir = self.datadir
167
+ if not os.path.exists(datadir):
168
+ path = os.path.join(self.root, self.FILES[0])
169
+ if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
170
+ import academictorrents as at
171
+ atpath = at.get(self.AT_HASH, datastore=self.root)
172
+ assert atpath == path
173
+
174
+ print("Extracting {} to {}".format(path, datadir))
175
+ os.makedirs(datadir, exist_ok=True)
176
+ with tarfile.open(path, "r:") as tar:
177
+ tar.extractall(path=datadir)
178
+
179
+ print("Extracting sub-tars.")
180
+ subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar")))
181
+ for subpath in tqdm(subpaths):
182
+ subdir = subpath[:-len(".tar")]
183
+ os.makedirs(subdir, exist_ok=True)
184
+ with tarfile.open(subpath, "r:") as tar:
185
+ tar.extractall(path=subdir)
186
+
187
+ filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
188
+ filelist = [os.path.relpath(p, start=datadir) for p in filelist]
189
+ filelist = sorted(filelist)
190
+ filelist = "\n".join(filelist)+"\n"
191
+ with open(self.txt_filelist, "w") as f:
192
+ f.write(filelist)
193
+
194
+ tdu.mark_prepared(self.root)
195
+
196
+
197
+ class ImageNetValidation(ImageNetBase):
198
+ NAME = "ILSVRC2012_validation"
199
+ URL = "http://www.image-net.org/challenges/LSVRC/2012/"
200
+ AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5"
201
+ VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1"
202
+ FILES = [
203
+ "ILSVRC2012_img_val.tar",
204
+ "validation_synset.txt",
205
+ ]
206
+ SIZES = [
207
+ 6744924160,
208
+ 1950000,
209
+ ]
210
+
211
+ def __init__(self, process_images=True, data_root=None, **kwargs):
212
+ self.data_root = data_root
213
+ self.process_images = process_images
214
+ super().__init__(**kwargs)
215
+
216
+ def _prepare(self):
217
+ if self.data_root:
218
+ self.root = os.path.join(self.data_root, self.NAME)
219
+ else:
220
+ cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
221
+ self.root = os.path.join(cachedir, "autoencoders/data", self.NAME)
222
+ self.datadir = os.path.join(self.root, "data")
223
+ self.txt_filelist = os.path.join(self.root, "filelist.txt")
224
+ self.expected_length = 50000
225
+ self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop",
226
+ default=False)
227
+ if not tdu.is_prepared(self.root):
228
+ # prep
229
+ print("Preparing dataset {} in {}".format(self.NAME, self.root))
230
+
231
+ datadir = self.datadir
232
+ if not os.path.exists(datadir):
233
+ path = os.path.join(self.root, self.FILES[0])
234
+ if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]:
235
+ import academictorrents as at
236
+ atpath = at.get(self.AT_HASH, datastore=self.root)
237
+ assert atpath == path
238
+
239
+ print("Extracting {} to {}".format(path, datadir))
240
+ os.makedirs(datadir, exist_ok=True)
241
+ with tarfile.open(path, "r:") as tar:
242
+ tar.extractall(path=datadir)
243
+
244
+ vspath = os.path.join(self.root, self.FILES[1])
245
+ if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]:
246
+ download(self.VS_URL, vspath)
247
+
248
+ with open(vspath, "r") as f:
249
+ synset_dict = f.read().splitlines()
250
+ synset_dict = dict(line.split() for line in synset_dict)
251
+
252
+ print("Reorganizing into synset folders")
253
+ synsets = np.unique(list(synset_dict.values()))
254
+ for s in synsets:
255
+ os.makedirs(os.path.join(datadir, s), exist_ok=True)
256
+ for k, v in synset_dict.items():
257
+ src = os.path.join(datadir, k)
258
+ dst = os.path.join(datadir, v)
259
+ shutil.move(src, dst)
260
+
261
+ filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG"))
262
+ filelist = [os.path.relpath(p, start=datadir) for p in filelist]
263
+ filelist = sorted(filelist)
264
+ filelist = "\n".join(filelist)+"\n"
265
+ with open(self.txt_filelist, "w") as f:
266
+ f.write(filelist)
267
+
268
+ tdu.mark_prepared(self.root)
269
+
270
+
271
+
272
+ class ImageNetSR(Dataset):
273
+ def __init__(self, size=None,
274
+ degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1.,
275
+ random_crop=True):
276
+ """
277
+ Imagenet Superresolution Dataloader
278
+ Performs following ops in order:
279
+ 1. crops a crop of size s from image either as random or center crop
280
+ 2. resizes crop to size with cv2.area_interpolation
281
+ 3. degrades resized crop with degradation_fn
282
+
283
+ :param size: resizing to size after cropping
284
+ :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light
285
+ :param downscale_f: Low Resolution Downsample factor
286
+ :param min_crop_f: determines crop size s,
287
+ where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f)
288
+ :param max_crop_f: ""
289
+ :param data_root:
290
+ :param random_crop:
291
+ """
292
+ self.base = self.get_base()
293
+ assert size
294
+ assert (size / downscale_f).is_integer()
295
+ self.size = size
296
+ self.LR_size = int(size / downscale_f)
297
+ self.min_crop_f = min_crop_f
298
+ self.max_crop_f = max_crop_f
299
+ assert(max_crop_f <= 1.)
300
+ self.center_crop = not random_crop
301
+
302
+ self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA)
303
+
304
+ self.pil_interpolation = False # gets reset later if incase interp_op is from pillow
305
+
306
+ if degradation == "bsrgan":
307
+ self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f)
308
+
309
+ elif degradation == "bsrgan_light":
310
+ self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f)
311
+
312
+ else:
313
+ interpolation_fn = {
314
+ "cv_nearest": cv2.INTER_NEAREST,
315
+ "cv_bilinear": cv2.INTER_LINEAR,
316
+ "cv_bicubic": cv2.INTER_CUBIC,
317
+ "cv_area": cv2.INTER_AREA,
318
+ "cv_lanczos": cv2.INTER_LANCZOS4,
319
+ "pil_nearest": PIL.Image.NEAREST,
320
+ "pil_bilinear": PIL.Image.BILINEAR,
321
+ "pil_bicubic": PIL.Image.BICUBIC,
322
+ "pil_box": PIL.Image.BOX,
323
+ "pil_hamming": PIL.Image.HAMMING,
324
+ "pil_lanczos": PIL.Image.LANCZOS,
325
+ }[degradation]
326
+
327
+ self.pil_interpolation = degradation.startswith("pil_")
328
+
329
+ if self.pil_interpolation:
330
+ self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn)
331
+
332
+ else:
333
+ self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size,
334
+ interpolation=interpolation_fn)
335
+
336
+ def __len__(self):
337
+ return len(self.base)
338
+
339
+ def __getitem__(self, i):
340
+ example = self.base[i]
341
+ image = Image.open(example["file_path_"])
342
+
343
+ if not image.mode == "RGB":
344
+ image = image.convert("RGB")
345
+
346
+ image = np.array(image).astype(np.uint8)
347
+
348
+ min_side_len = min(image.shape[:2])
349
+ crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None)
350
+ crop_side_len = int(crop_side_len)
351
+
352
+ if self.center_crop:
353
+ self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len)
354
+
355
+ else:
356
+ self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len)
357
+
358
+ image = self.cropper(image=image)["image"]
359
+ image = self.image_rescaler(image=image)["image"]
360
+
361
+ if self.pil_interpolation:
362
+ image_pil = PIL.Image.fromarray(image)
363
+ LR_image = self.degradation_process(image_pil)
364
+ LR_image = np.array(LR_image).astype(np.uint8)
365
+
366
+ else:
367
+ LR_image = self.degradation_process(image=image)["image"]
368
+
369
+ example["image"] = (image/127.5 - 1.0).astype(np.float32)
370
+ example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32)
371
+
372
+ return example
373
+
374
+
375
+ class ImageNetSRTrain(ImageNetSR):
376
+ def __init__(self, **kwargs):
377
+ super().__init__(**kwargs)
378
+
379
+ def get_base(self):
380
+ with open("ldm/data/imagenet_train_hr_indices.p", "rb") as f:
381
+ indices = pickle.load(f)
382
+ dset = ImageNetTrain(process_images=False,)
383
+ return Subset(dset, indices)
384
+
385
+
386
+ class ImageNetSRValidation(ImageNetSR):
387
+ def __init__(self, **kwargs):
388
+ super().__init__(**kwargs)
389
+
390
+ def get_base(self):
391
+ with open("ldm/data/imagenet_val_hr_indices.p", "rb") as f:
392
+ indices = pickle.load(f)
393
+ dset = ImageNetValidation(process_images=False,)
394
+ return Subset(dset, indices)
gligen/ldm/data/imagenet_clsidx_to_label.txt ADDED
@@ -0,0 +1,1000 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0: 'tench, Tinca tinca',
2
+ 1: 'goldfish, Carassius auratus',
3
+ 2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
4
+ 3: 'tiger shark, Galeocerdo cuvieri',
5
+ 4: 'hammerhead, hammerhead shark',
6
+ 5: 'electric ray, crampfish, numbfish, torpedo',
7
+ 6: 'stingray',
8
+ 7: 'cock',
9
+ 8: 'hen',
10
+ 9: 'ostrich, Struthio camelus',
11
+ 10: 'brambling, Fringilla montifringilla',
12
+ 11: 'goldfinch, Carduelis carduelis',
13
+ 12: 'house finch, linnet, Carpodacus mexicanus',
14
+ 13: 'junco, snowbird',
15
+ 14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
16
+ 15: 'robin, American robin, Turdus migratorius',
17
+ 16: 'bulbul',
18
+ 17: 'jay',
19
+ 18: 'magpie',
20
+ 19: 'chickadee',
21
+ 20: 'water ouzel, dipper',
22
+ 21: 'kite',
23
+ 22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
24
+ 23: 'vulture',
25
+ 24: 'great grey owl, great gray owl, Strix nebulosa',
26
+ 25: 'European fire salamander, Salamandra salamandra',
27
+ 26: 'common newt, Triturus vulgaris',
28
+ 27: 'eft',
29
+ 28: 'spotted salamander, Ambystoma maculatum',
30
+ 29: 'axolotl, mud puppy, Ambystoma mexicanum',
31
+ 30: 'bullfrog, Rana catesbeiana',
32
+ 31: 'tree frog, tree-frog',
33
+ 32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
34
+ 33: 'loggerhead, loggerhead turtle, Caretta caretta',
35
+ 34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
36
+ 35: 'mud turtle',
37
+ 36: 'terrapin',
38
+ 37: 'box turtle, box tortoise',
39
+ 38: 'banded gecko',
40
+ 39: 'common iguana, iguana, Iguana iguana',
41
+ 40: 'American chameleon, anole, Anolis carolinensis',
42
+ 41: 'whiptail, whiptail lizard',
43
+ 42: 'agama',
44
+ 43: 'frilled lizard, Chlamydosaurus kingi',
45
+ 44: 'alligator lizard',
46
+ 45: 'Gila monster, Heloderma suspectum',
47
+ 46: 'green lizard, Lacerta viridis',
48
+ 47: 'African chameleon, Chamaeleo chamaeleon',
49
+ 48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
50
+ 49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
51
+ 50: 'American alligator, Alligator mississipiensis',
52
+ 51: 'triceratops',
53
+ 52: 'thunder snake, worm snake, Carphophis amoenus',
54
+ 53: 'ringneck snake, ring-necked snake, ring snake',
55
+ 54: 'hognose snake, puff adder, sand viper',
56
+ 55: 'green snake, grass snake',
57
+ 56: 'king snake, kingsnake',
58
+ 57: 'garter snake, grass snake',
59
+ 58: 'water snake',
60
+ 59: 'vine snake',
61
+ 60: 'night snake, Hypsiglena torquata',
62
+ 61: 'boa constrictor, Constrictor constrictor',
63
+ 62: 'rock python, rock snake, Python sebae',
64
+ 63: 'Indian cobra, Naja naja',
65
+ 64: 'green mamba',
66
+ 65: 'sea snake',
67
+ 66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
68
+ 67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
69
+ 68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
70
+ 69: 'trilobite',
71
+ 70: 'harvestman, daddy longlegs, Phalangium opilio',
72
+ 71: 'scorpion',
73
+ 72: 'black and gold garden spider, Argiope aurantia',
74
+ 73: 'barn spider, Araneus cavaticus',
75
+ 74: 'garden spider, Aranea diademata',
76
+ 75: 'black widow, Latrodectus mactans',
77
+ 76: 'tarantula',
78
+ 77: 'wolf spider, hunting spider',
79
+ 78: 'tick',
80
+ 79: 'centipede',
81
+ 80: 'black grouse',
82
+ 81: 'ptarmigan',
83
+ 82: 'ruffed grouse, partridge, Bonasa umbellus',
84
+ 83: 'prairie chicken, prairie grouse, prairie fowl',
85
+ 84: 'peacock',
86
+ 85: 'quail',
87
+ 86: 'partridge',
88
+ 87: 'African grey, African gray, Psittacus erithacus',
89
+ 88: 'macaw',
90
+ 89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
91
+ 90: 'lorikeet',
92
+ 91: 'coucal',
93
+ 92: 'bee eater',
94
+ 93: 'hornbill',
95
+ 94: 'hummingbird',
96
+ 95: 'jacamar',
97
+ 96: 'toucan',
98
+ 97: 'drake',
99
+ 98: 'red-breasted merganser, Mergus serrator',
100
+ 99: 'goose',
101
+ 100: 'black swan, Cygnus atratus',
102
+ 101: 'tusker',
103
+ 102: 'echidna, spiny anteater, anteater',
104
+ 103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
105
+ 104: 'wallaby, brush kangaroo',
106
+ 105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
107
+ 106: 'wombat',
108
+ 107: 'jellyfish',
109
+ 108: 'sea anemone, anemone',
110
+ 109: 'brain coral',
111
+ 110: 'flatworm, platyhelminth',
112
+ 111: 'nematode, nematode worm, roundworm',
113
+ 112: 'conch',
114
+ 113: 'snail',
115
+ 114: 'slug',
116
+ 115: 'sea slug, nudibranch',
117
+ 116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
118
+ 117: 'chambered nautilus, pearly nautilus, nautilus',
119
+ 118: 'Dungeness crab, Cancer magister',
120
+ 119: 'rock crab, Cancer irroratus',
121
+ 120: 'fiddler crab',
122
+ 121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
123
+ 122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
124
+ 123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
125
+ 124: 'crayfish, crawfish, crawdad, crawdaddy',
126
+ 125: 'hermit crab',
127
+ 126: 'isopod',
128
+ 127: 'white stork, Ciconia ciconia',
129
+ 128: 'black stork, Ciconia nigra',
130
+ 129: 'spoonbill',
131
+ 130: 'flamingo',
132
+ 131: 'little blue heron, Egretta caerulea',
133
+ 132: 'American egret, great white heron, Egretta albus',
134
+ 133: 'bittern',
135
+ 134: 'crane',
136
+ 135: 'limpkin, Aramus pictus',
137
+ 136: 'European gallinule, Porphyrio porphyrio',
138
+ 137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
139
+ 138: 'bustard',
140
+ 139: 'ruddy turnstone, Arenaria interpres',
141
+ 140: 'red-backed sandpiper, dunlin, Erolia alpina',
142
+ 141: 'redshank, Tringa totanus',
143
+ 142: 'dowitcher',
144
+ 143: 'oystercatcher, oyster catcher',
145
+ 144: 'pelican',
146
+ 145: 'king penguin, Aptenodytes patagonica',
147
+ 146: 'albatross, mollymawk',
148
+ 147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
149
+ 148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
150
+ 149: 'dugong, Dugong dugon',
151
+ 150: 'sea lion',
152
+ 151: 'Chihuahua',
153
+ 152: 'Japanese spaniel',
154
+ 153: 'Maltese dog, Maltese terrier, Maltese',
155
+ 154: 'Pekinese, Pekingese, Peke',
156
+ 155: 'Shih-Tzu',
157
+ 156: 'Blenheim spaniel',
158
+ 157: 'papillon',
159
+ 158: 'toy terrier',
160
+ 159: 'Rhodesian ridgeback',
161
+ 160: 'Afghan hound, Afghan',
162
+ 161: 'basset, basset hound',
163
+ 162: 'beagle',
164
+ 163: 'bloodhound, sleuthhound',
165
+ 164: 'bluetick',
166
+ 165: 'black-and-tan coonhound',
167
+ 166: 'Walker hound, Walker foxhound',
168
+ 167: 'English foxhound',
169
+ 168: 'redbone',
170
+ 169: 'borzoi, Russian wolfhound',
171
+ 170: 'Irish wolfhound',
172
+ 171: 'Italian greyhound',
173
+ 172: 'whippet',
174
+ 173: 'Ibizan hound, Ibizan Podenco',
175
+ 174: 'Norwegian elkhound, elkhound',
176
+ 175: 'otterhound, otter hound',
177
+ 176: 'Saluki, gazelle hound',
178
+ 177: 'Scottish deerhound, deerhound',
179
+ 178: 'Weimaraner',
180
+ 179: 'Staffordshire bullterrier, Staffordshire bull terrier',
181
+ 180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
182
+ 181: 'Bedlington terrier',
183
+ 182: 'Border terrier',
184
+ 183: 'Kerry blue terrier',
185
+ 184: 'Irish terrier',
186
+ 185: 'Norfolk terrier',
187
+ 186: 'Norwich terrier',
188
+ 187: 'Yorkshire terrier',
189
+ 188: 'wire-haired fox terrier',
190
+ 189: 'Lakeland terrier',
191
+ 190: 'Sealyham terrier, Sealyham',
192
+ 191: 'Airedale, Airedale terrier',
193
+ 192: 'cairn, cairn terrier',
194
+ 193: 'Australian terrier',
195
+ 194: 'Dandie Dinmont, Dandie Dinmont terrier',
196
+ 195: 'Boston bull, Boston terrier',
197
+ 196: 'miniature schnauzer',
198
+ 197: 'giant schnauzer',
199
+ 198: 'standard schnauzer',
200
+ 199: 'Scotch terrier, Scottish terrier, Scottie',
201
+ 200: 'Tibetan terrier, chrysanthemum dog',
202
+ 201: 'silky terrier, Sydney silky',
203
+ 202: 'soft-coated wheaten terrier',
204
+ 203: 'West Highland white terrier',
205
+ 204: 'Lhasa, Lhasa apso',
206
+ 205: 'flat-coated retriever',
207
+ 206: 'curly-coated retriever',
208
+ 207: 'golden retriever',
209
+ 208: 'Labrador retriever',
210
+ 209: 'Chesapeake Bay retriever',
211
+ 210: 'German short-haired pointer',
212
+ 211: 'vizsla, Hungarian pointer',
213
+ 212: 'English setter',
214
+ 213: 'Irish setter, red setter',
215
+ 214: 'Gordon setter',
216
+ 215: 'Brittany spaniel',
217
+ 216: 'clumber, clumber spaniel',
218
+ 217: 'English springer, English springer spaniel',
219
+ 218: 'Welsh springer spaniel',
220
+ 219: 'cocker spaniel, English cocker spaniel, cocker',
221
+ 220: 'Sussex spaniel',
222
+ 221: 'Irish water spaniel',
223
+ 222: 'kuvasz',
224
+ 223: 'schipperke',
225
+ 224: 'groenendael',
226
+ 225: 'malinois',
227
+ 226: 'briard',
228
+ 227: 'kelpie',
229
+ 228: 'komondor',
230
+ 229: 'Old English sheepdog, bobtail',
231
+ 230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
232
+ 231: 'collie',
233
+ 232: 'Border collie',
234
+ 233: 'Bouvier des Flandres, Bouviers des Flandres',
235
+ 234: 'Rottweiler',
236
+ 235: 'German shepherd, German shepherd dog, German police dog, alsatian',
237
+ 236: 'Doberman, Doberman pinscher',
238
+ 237: 'miniature pinscher',
239
+ 238: 'Greater Swiss Mountain dog',
240
+ 239: 'Bernese mountain dog',
241
+ 240: 'Appenzeller',
242
+ 241: 'EntleBucher',
243
+ 242: 'boxer',
244
+ 243: 'bull mastiff',
245
+ 244: 'Tibetan mastiff',
246
+ 245: 'French bulldog',
247
+ 246: 'Great Dane',
248
+ 247: 'Saint Bernard, St Bernard',
249
+ 248: 'Eskimo dog, husky',
250
+ 249: 'malamute, malemute, Alaskan malamute',
251
+ 250: 'Siberian husky',
252
+ 251: 'dalmatian, coach dog, carriage dog',
253
+ 252: 'affenpinscher, monkey pinscher, monkey dog',
254
+ 253: 'basenji',
255
+ 254: 'pug, pug-dog',
256
+ 255: 'Leonberg',
257
+ 256: 'Newfoundland, Newfoundland dog',
258
+ 257: 'Great Pyrenees',
259
+ 258: 'Samoyed, Samoyede',
260
+ 259: 'Pomeranian',
261
+ 260: 'chow, chow chow',
262
+ 261: 'keeshond',
263
+ 262: 'Brabancon griffon',
264
+ 263: 'Pembroke, Pembroke Welsh corgi',
265
+ 264: 'Cardigan, Cardigan Welsh corgi',
266
+ 265: 'toy poodle',
267
+ 266: 'miniature poodle',
268
+ 267: 'standard poodle',
269
+ 268: 'Mexican hairless',
270
+ 269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
271
+ 270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
272
+ 271: 'red wolf, maned wolf, Canis rufus, Canis niger',
273
+ 272: 'coyote, prairie wolf, brush wolf, Canis latrans',
274
+ 273: 'dingo, warrigal, warragal, Canis dingo',
275
+ 274: 'dhole, Cuon alpinus',
276
+ 275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
277
+ 276: 'hyena, hyaena',
278
+ 277: 'red fox, Vulpes vulpes',
279
+ 278: 'kit fox, Vulpes macrotis',
280
+ 279: 'Arctic fox, white fox, Alopex lagopus',
281
+ 280: 'grey fox, gray fox, Urocyon cinereoargenteus',
282
+ 281: 'tabby, tabby cat',
283
+ 282: 'tiger cat',
284
+ 283: 'Persian cat',
285
+ 284: 'Siamese cat, Siamese',
286
+ 285: 'Egyptian cat',
287
+ 286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
288
+ 287: 'lynx, catamount',
289
+ 288: 'leopard, Panthera pardus',
290
+ 289: 'snow leopard, ounce, Panthera uncia',
291
+ 290: 'jaguar, panther, Panthera onca, Felis onca',
292
+ 291: 'lion, king of beasts, Panthera leo',
293
+ 292: 'tiger, Panthera tigris',
294
+ 293: 'cheetah, chetah, Acinonyx jubatus',
295
+ 294: 'brown bear, bruin, Ursus arctos',
296
+ 295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
297
+ 296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
298
+ 297: 'sloth bear, Melursus ursinus, Ursus ursinus',
299
+ 298: 'mongoose',
300
+ 299: 'meerkat, mierkat',
301
+ 300: 'tiger beetle',
302
+ 301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
303
+ 302: 'ground beetle, carabid beetle',
304
+ 303: 'long-horned beetle, longicorn, longicorn beetle',
305
+ 304: 'leaf beetle, chrysomelid',
306
+ 305: 'dung beetle',
307
+ 306: 'rhinoceros beetle',
308
+ 307: 'weevil',
309
+ 308: 'fly',
310
+ 309: 'bee',
311
+ 310: 'ant, emmet, pismire',
312
+ 311: 'grasshopper, hopper',
313
+ 312: 'cricket',
314
+ 313: 'walking stick, walkingstick, stick insect',
315
+ 314: 'cockroach, roach',
316
+ 315: 'mantis, mantid',
317
+ 316: 'cicada, cicala',
318
+ 317: 'leafhopper',
319
+ 318: 'lacewing, lacewing fly',
320
+ 319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
321
+ 320: 'damselfly',
322
+ 321: 'admiral',
323
+ 322: 'ringlet, ringlet butterfly',
324
+ 323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
325
+ 324: 'cabbage butterfly',
326
+ 325: 'sulphur butterfly, sulfur butterfly',
327
+ 326: 'lycaenid, lycaenid butterfly',
328
+ 327: 'starfish, sea star',
329
+ 328: 'sea urchin',
330
+ 329: 'sea cucumber, holothurian',
331
+ 330: 'wood rabbit, cottontail, cottontail rabbit',
332
+ 331: 'hare',
333
+ 332: 'Angora, Angora rabbit',
334
+ 333: 'hamster',
335
+ 334: 'porcupine, hedgehog',
336
+ 335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
337
+ 336: 'marmot',
338
+ 337: 'beaver',
339
+ 338: 'guinea pig, Cavia cobaya',
340
+ 339: 'sorrel',
341
+ 340: 'zebra',
342
+ 341: 'hog, pig, grunter, squealer, Sus scrofa',
343
+ 342: 'wild boar, boar, Sus scrofa',
344
+ 343: 'warthog',
345
+ 344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
346
+ 345: 'ox',
347
+ 346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
348
+ 347: 'bison',
349
+ 348: 'ram, tup',
350
+ 349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
351
+ 350: 'ibex, Capra ibex',
352
+ 351: 'hartebeest',
353
+ 352: 'impala, Aepyceros melampus',
354
+ 353: 'gazelle',
355
+ 354: 'Arabian camel, dromedary, Camelus dromedarius',
356
+ 355: 'llama',
357
+ 356: 'weasel',
358
+ 357: 'mink',
359
+ 358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
360
+ 359: 'black-footed ferret, ferret, Mustela nigripes',
361
+ 360: 'otter',
362
+ 361: 'skunk, polecat, wood pussy',
363
+ 362: 'badger',
364
+ 363: 'armadillo',
365
+ 364: 'three-toed sloth, ai, Bradypus tridactylus',
366
+ 365: 'orangutan, orang, orangutang, Pongo pygmaeus',
367
+ 366: 'gorilla, Gorilla gorilla',
368
+ 367: 'chimpanzee, chimp, Pan troglodytes',
369
+ 368: 'gibbon, Hylobates lar',
370
+ 369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
371
+ 370: 'guenon, guenon monkey',
372
+ 371: 'patas, hussar monkey, Erythrocebus patas',
373
+ 372: 'baboon',
374
+ 373: 'macaque',
375
+ 374: 'langur',
376
+ 375: 'colobus, colobus monkey',
377
+ 376: 'proboscis monkey, Nasalis larvatus',
378
+ 377: 'marmoset',
379
+ 378: 'capuchin, ringtail, Cebus capucinus',
380
+ 379: 'howler monkey, howler',
381
+ 380: 'titi, titi monkey',
382
+ 381: 'spider monkey, Ateles geoffroyi',
383
+ 382: 'squirrel monkey, Saimiri sciureus',
384
+ 383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
385
+ 384: 'indri, indris, Indri indri, Indri brevicaudatus',
386
+ 385: 'Indian elephant, Elephas maximus',
387
+ 386: 'African elephant, Loxodonta africana',
388
+ 387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
389
+ 388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
390
+ 389: 'barracouta, snoek',
391
+ 390: 'eel',
392
+ 391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
393
+ 392: 'rock beauty, Holocanthus tricolor',
394
+ 393: 'anemone fish',
395
+ 394: 'sturgeon',
396
+ 395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
397
+ 396: 'lionfish',
398
+ 397: 'puffer, pufferfish, blowfish, globefish',
399
+ 398: 'abacus',
400
+ 399: 'abaya',
401
+ 400: "academic gown, academic robe, judge's robe",
402
+ 401: 'accordion, piano accordion, squeeze box',
403
+ 402: 'acoustic guitar',
404
+ 403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
405
+ 404: 'airliner',
406
+ 405: 'airship, dirigible',
407
+ 406: 'altar',
408
+ 407: 'ambulance',
409
+ 408: 'amphibian, amphibious vehicle',
410
+ 409: 'analog clock',
411
+ 410: 'apiary, bee house',
412
+ 411: 'apron',
413
+ 412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
414
+ 413: 'assault rifle, assault gun',
415
+ 414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
416
+ 415: 'bakery, bakeshop, bakehouse',
417
+ 416: 'balance beam, beam',
418
+ 417: 'balloon',
419
+ 418: 'ballpoint, ballpoint pen, ballpen, Biro',
420
+ 419: 'Band Aid',
421
+ 420: 'banjo',
422
+ 421: 'bannister, banister, balustrade, balusters, handrail',
423
+ 422: 'barbell',
424
+ 423: 'barber chair',
425
+ 424: 'barbershop',
426
+ 425: 'barn',
427
+ 426: 'barometer',
428
+ 427: 'barrel, cask',
429
+ 428: 'barrow, garden cart, lawn cart, wheelbarrow',
430
+ 429: 'baseball',
431
+ 430: 'basketball',
432
+ 431: 'bassinet',
433
+ 432: 'bassoon',
434
+ 433: 'bathing cap, swimming cap',
435
+ 434: 'bath towel',
436
+ 435: 'bathtub, bathing tub, bath, tub',
437
+ 436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
438
+ 437: 'beacon, lighthouse, beacon light, pharos',
439
+ 438: 'beaker',
440
+ 439: 'bearskin, busby, shako',
441
+ 440: 'beer bottle',
442
+ 441: 'beer glass',
443
+ 442: 'bell cote, bell cot',
444
+ 443: 'bib',
445
+ 444: 'bicycle-built-for-two, tandem bicycle, tandem',
446
+ 445: 'bikini, two-piece',
447
+ 446: 'binder, ring-binder',
448
+ 447: 'binoculars, field glasses, opera glasses',
449
+ 448: 'birdhouse',
450
+ 449: 'boathouse',
451
+ 450: 'bobsled, bobsleigh, bob',
452
+ 451: 'bolo tie, bolo, bola tie, bola',
453
+ 452: 'bonnet, poke bonnet',
454
+ 453: 'bookcase',
455
+ 454: 'bookshop, bookstore, bookstall',
456
+ 455: 'bottlecap',
457
+ 456: 'bow',
458
+ 457: 'bow tie, bow-tie, bowtie',
459
+ 458: 'brass, memorial tablet, plaque',
460
+ 459: 'brassiere, bra, bandeau',
461
+ 460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
462
+ 461: 'breastplate, aegis, egis',
463
+ 462: 'broom',
464
+ 463: 'bucket, pail',
465
+ 464: 'buckle',
466
+ 465: 'bulletproof vest',
467
+ 466: 'bullet train, bullet',
468
+ 467: 'butcher shop, meat market',
469
+ 468: 'cab, hack, taxi, taxicab',
470
+ 469: 'caldron, cauldron',
471
+ 470: 'candle, taper, wax light',
472
+ 471: 'cannon',
473
+ 472: 'canoe',
474
+ 473: 'can opener, tin opener',
475
+ 474: 'cardigan',
476
+ 475: 'car mirror',
477
+ 476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
478
+ 477: "carpenter's kit, tool kit",
479
+ 478: 'carton',
480
+ 479: 'car wheel',
481
+ 480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
482
+ 481: 'cassette',
483
+ 482: 'cassette player',
484
+ 483: 'castle',
485
+ 484: 'catamaran',
486
+ 485: 'CD player',
487
+ 486: 'cello, violoncello',
488
+ 487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
489
+ 488: 'chain',
490
+ 489: 'chainlink fence',
491
+ 490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
492
+ 491: 'chain saw, chainsaw',
493
+ 492: 'chest',
494
+ 493: 'chiffonier, commode',
495
+ 494: 'chime, bell, gong',
496
+ 495: 'china cabinet, china closet',
497
+ 496: 'Christmas stocking',
498
+ 497: 'church, church building',
499
+ 498: 'cinema, movie theater, movie theatre, movie house, picture palace',
500
+ 499: 'cleaver, meat cleaver, chopper',
501
+ 500: 'cliff dwelling',
502
+ 501: 'cloak',
503
+ 502: 'clog, geta, patten, sabot',
504
+ 503: 'cocktail shaker',
505
+ 504: 'coffee mug',
506
+ 505: 'coffeepot',
507
+ 506: 'coil, spiral, volute, whorl, helix',
508
+ 507: 'combination lock',
509
+ 508: 'computer keyboard, keypad',
510
+ 509: 'confectionery, confectionary, candy store',
511
+ 510: 'container ship, containership, container vessel',
512
+ 511: 'convertible',
513
+ 512: 'corkscrew, bottle screw',
514
+ 513: 'cornet, horn, trumpet, trump',
515
+ 514: 'cowboy boot',
516
+ 515: 'cowboy hat, ten-gallon hat',
517
+ 516: 'cradle',
518
+ 517: 'crane',
519
+ 518: 'crash helmet',
520
+ 519: 'crate',
521
+ 520: 'crib, cot',
522
+ 521: 'Crock Pot',
523
+ 522: 'croquet ball',
524
+ 523: 'crutch',
525
+ 524: 'cuirass',
526
+ 525: 'dam, dike, dyke',
527
+ 526: 'desk',
528
+ 527: 'desktop computer',
529
+ 528: 'dial telephone, dial phone',
530
+ 529: 'diaper, nappy, napkin',
531
+ 530: 'digital clock',
532
+ 531: 'digital watch',
533
+ 532: 'dining table, board',
534
+ 533: 'dishrag, dishcloth',
535
+ 534: 'dishwasher, dish washer, dishwashing machine',
536
+ 535: 'disk brake, disc brake',
537
+ 536: 'dock, dockage, docking facility',
538
+ 537: 'dogsled, dog sled, dog sleigh',
539
+ 538: 'dome',
540
+ 539: 'doormat, welcome mat',
541
+ 540: 'drilling platform, offshore rig',
542
+ 541: 'drum, membranophone, tympan',
543
+ 542: 'drumstick',
544
+ 543: 'dumbbell',
545
+ 544: 'Dutch oven',
546
+ 545: 'electric fan, blower',
547
+ 546: 'electric guitar',
548
+ 547: 'electric locomotive',
549
+ 548: 'entertainment center',
550
+ 549: 'envelope',
551
+ 550: 'espresso maker',
552
+ 551: 'face powder',
553
+ 552: 'feather boa, boa',
554
+ 553: 'file, file cabinet, filing cabinet',
555
+ 554: 'fireboat',
556
+ 555: 'fire engine, fire truck',
557
+ 556: 'fire screen, fireguard',
558
+ 557: 'flagpole, flagstaff',
559
+ 558: 'flute, transverse flute',
560
+ 559: 'folding chair',
561
+ 560: 'football helmet',
562
+ 561: 'forklift',
563
+ 562: 'fountain',
564
+ 563: 'fountain pen',
565
+ 564: 'four-poster',
566
+ 565: 'freight car',
567
+ 566: 'French horn, horn',
568
+ 567: 'frying pan, frypan, skillet',
569
+ 568: 'fur coat',
570
+ 569: 'garbage truck, dustcart',
571
+ 570: 'gasmask, respirator, gas helmet',
572
+ 571: 'gas pump, gasoline pump, petrol pump, island dispenser',
573
+ 572: 'goblet',
574
+ 573: 'go-kart',
575
+ 574: 'golf ball',
576
+ 575: 'golfcart, golf cart',
577
+ 576: 'gondola',
578
+ 577: 'gong, tam-tam',
579
+ 578: 'gown',
580
+ 579: 'grand piano, grand',
581
+ 580: 'greenhouse, nursery, glasshouse',
582
+ 581: 'grille, radiator grille',
583
+ 582: 'grocery store, grocery, food market, market',
584
+ 583: 'guillotine',
585
+ 584: 'hair slide',
586
+ 585: 'hair spray',
587
+ 586: 'half track',
588
+ 587: 'hammer',
589
+ 588: 'hamper',
590
+ 589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
591
+ 590: 'hand-held computer, hand-held microcomputer',
592
+ 591: 'handkerchief, hankie, hanky, hankey',
593
+ 592: 'hard disc, hard disk, fixed disk',
594
+ 593: 'harmonica, mouth organ, harp, mouth harp',
595
+ 594: 'harp',
596
+ 595: 'harvester, reaper',
597
+ 596: 'hatchet',
598
+ 597: 'holster',
599
+ 598: 'home theater, home theatre',
600
+ 599: 'honeycomb',
601
+ 600: 'hook, claw',
602
+ 601: 'hoopskirt, crinoline',
603
+ 602: 'horizontal bar, high bar',
604
+ 603: 'horse cart, horse-cart',
605
+ 604: 'hourglass',
606
+ 605: 'iPod',
607
+ 606: 'iron, smoothing iron',
608
+ 607: "jack-o'-lantern",
609
+ 608: 'jean, blue jean, denim',
610
+ 609: 'jeep, landrover',
611
+ 610: 'jersey, T-shirt, tee shirt',
612
+ 611: 'jigsaw puzzle',
613
+ 612: 'jinrikisha, ricksha, rickshaw',
614
+ 613: 'joystick',
615
+ 614: 'kimono',
616
+ 615: 'knee pad',
617
+ 616: 'knot',
618
+ 617: 'lab coat, laboratory coat',
619
+ 618: 'ladle',
620
+ 619: 'lampshade, lamp shade',
621
+ 620: 'laptop, laptop computer',
622
+ 621: 'lawn mower, mower',
623
+ 622: 'lens cap, lens cover',
624
+ 623: 'letter opener, paper knife, paperknife',
625
+ 624: 'library',
626
+ 625: 'lifeboat',
627
+ 626: 'lighter, light, igniter, ignitor',
628
+ 627: 'limousine, limo',
629
+ 628: 'liner, ocean liner',
630
+ 629: 'lipstick, lip rouge',
631
+ 630: 'Loafer',
632
+ 631: 'lotion',
633
+ 632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
634
+ 633: "loupe, jeweler's loupe",
635
+ 634: 'lumbermill, sawmill',
636
+ 635: 'magnetic compass',
637
+ 636: 'mailbag, postbag',
638
+ 637: 'mailbox, letter box',
639
+ 638: 'maillot',
640
+ 639: 'maillot, tank suit',
641
+ 640: 'manhole cover',
642
+ 641: 'maraca',
643
+ 642: 'marimba, xylophone',
644
+ 643: 'mask',
645
+ 644: 'matchstick',
646
+ 645: 'maypole',
647
+ 646: 'maze, labyrinth',
648
+ 647: 'measuring cup',
649
+ 648: 'medicine chest, medicine cabinet',
650
+ 649: 'megalith, megalithic structure',
651
+ 650: 'microphone, mike',
652
+ 651: 'microwave, microwave oven',
653
+ 652: 'military uniform',
654
+ 653: 'milk can',
655
+ 654: 'minibus',
656
+ 655: 'miniskirt, mini',
657
+ 656: 'minivan',
658
+ 657: 'missile',
659
+ 658: 'mitten',
660
+ 659: 'mixing bowl',
661
+ 660: 'mobile home, manufactured home',
662
+ 661: 'Model T',
663
+ 662: 'modem',
664
+ 663: 'monastery',
665
+ 664: 'monitor',
666
+ 665: 'moped',
667
+ 666: 'mortar',
668
+ 667: 'mortarboard',
669
+ 668: 'mosque',
670
+ 669: 'mosquito net',
671
+ 670: 'motor scooter, scooter',
672
+ 671: 'mountain bike, all-terrain bike, off-roader',
673
+ 672: 'mountain tent',
674
+ 673: 'mouse, computer mouse',
675
+ 674: 'mousetrap',
676
+ 675: 'moving van',
677
+ 676: 'muzzle',
678
+ 677: 'nail',
679
+ 678: 'neck brace',
680
+ 679: 'necklace',
681
+ 680: 'nipple',
682
+ 681: 'notebook, notebook computer',
683
+ 682: 'obelisk',
684
+ 683: 'oboe, hautboy, hautbois',
685
+ 684: 'ocarina, sweet potato',
686
+ 685: 'odometer, hodometer, mileometer, milometer',
687
+ 686: 'oil filter',
688
+ 687: 'organ, pipe organ',
689
+ 688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
690
+ 689: 'overskirt',
691
+ 690: 'oxcart',
692
+ 691: 'oxygen mask',
693
+ 692: 'packet',
694
+ 693: 'paddle, boat paddle',
695
+ 694: 'paddlewheel, paddle wheel',
696
+ 695: 'padlock',
697
+ 696: 'paintbrush',
698
+ 697: "pajama, pyjama, pj's, jammies",
699
+ 698: 'palace',
700
+ 699: 'panpipe, pandean pipe, syrinx',
701
+ 700: 'paper towel',
702
+ 701: 'parachute, chute',
703
+ 702: 'parallel bars, bars',
704
+ 703: 'park bench',
705
+ 704: 'parking meter',
706
+ 705: 'passenger car, coach, carriage',
707
+ 706: 'patio, terrace',
708
+ 707: 'pay-phone, pay-station',
709
+ 708: 'pedestal, plinth, footstall',
710
+ 709: 'pencil box, pencil case',
711
+ 710: 'pencil sharpener',
712
+ 711: 'perfume, essence',
713
+ 712: 'Petri dish',
714
+ 713: 'photocopier',
715
+ 714: 'pick, plectrum, plectron',
716
+ 715: 'pickelhaube',
717
+ 716: 'picket fence, paling',
718
+ 717: 'pickup, pickup truck',
719
+ 718: 'pier',
720
+ 719: 'piggy bank, penny bank',
721
+ 720: 'pill bottle',
722
+ 721: 'pillow',
723
+ 722: 'ping-pong ball',
724
+ 723: 'pinwheel',
725
+ 724: 'pirate, pirate ship',
726
+ 725: 'pitcher, ewer',
727
+ 726: "plane, carpenter's plane, woodworking plane",
728
+ 727: 'planetarium',
729
+ 728: 'plastic bag',
730
+ 729: 'plate rack',
731
+ 730: 'plow, plough',
732
+ 731: "plunger, plumber's helper",
733
+ 732: 'Polaroid camera, Polaroid Land camera',
734
+ 733: 'pole',
735
+ 734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
736
+ 735: 'poncho',
737
+ 736: 'pool table, billiard table, snooker table',
738
+ 737: 'pop bottle, soda bottle',
739
+ 738: 'pot, flowerpot',
740
+ 739: "potter's wheel",
741
+ 740: 'power drill',
742
+ 741: 'prayer rug, prayer mat',
743
+ 742: 'printer',
744
+ 743: 'prison, prison house',
745
+ 744: 'projectile, missile',
746
+ 745: 'projector',
747
+ 746: 'puck, hockey puck',
748
+ 747: 'punching bag, punch bag, punching ball, punchball',
749
+ 748: 'purse',
750
+ 749: 'quill, quill pen',
751
+ 750: 'quilt, comforter, comfort, puff',
752
+ 751: 'racer, race car, racing car',
753
+ 752: 'racket, racquet',
754
+ 753: 'radiator',
755
+ 754: 'radio, wireless',
756
+ 755: 'radio telescope, radio reflector',
757
+ 756: 'rain barrel',
758
+ 757: 'recreational vehicle, RV, R.V.',
759
+ 758: 'reel',
760
+ 759: 'reflex camera',
761
+ 760: 'refrigerator, icebox',
762
+ 761: 'remote control, remote',
763
+ 762: 'restaurant, eating house, eating place, eatery',
764
+ 763: 'revolver, six-gun, six-shooter',
765
+ 764: 'rifle',
766
+ 765: 'rocking chair, rocker',
767
+ 766: 'rotisserie',
768
+ 767: 'rubber eraser, rubber, pencil eraser',
769
+ 768: 'rugby ball',
770
+ 769: 'rule, ruler',
771
+ 770: 'running shoe',
772
+ 771: 'safe',
773
+ 772: 'safety pin',
774
+ 773: 'saltshaker, salt shaker',
775
+ 774: 'sandal',
776
+ 775: 'sarong',
777
+ 776: 'sax, saxophone',
778
+ 777: 'scabbard',
779
+ 778: 'scale, weighing machine',
780
+ 779: 'school bus',
781
+ 780: 'schooner',
782
+ 781: 'scoreboard',
783
+ 782: 'screen, CRT screen',
784
+ 783: 'screw',
785
+ 784: 'screwdriver',
786
+ 785: 'seat belt, seatbelt',
787
+ 786: 'sewing machine',
788
+ 787: 'shield, buckler',
789
+ 788: 'shoe shop, shoe-shop, shoe store',
790
+ 789: 'shoji',
791
+ 790: 'shopping basket',
792
+ 791: 'shopping cart',
793
+ 792: 'shovel',
794
+ 793: 'shower cap',
795
+ 794: 'shower curtain',
796
+ 795: 'ski',
797
+ 796: 'ski mask',
798
+ 797: 'sleeping bag',
799
+ 798: 'slide rule, slipstick',
800
+ 799: 'sliding door',
801
+ 800: 'slot, one-armed bandit',
802
+ 801: 'snorkel',
803
+ 802: 'snowmobile',
804
+ 803: 'snowplow, snowplough',
805
+ 804: 'soap dispenser',
806
+ 805: 'soccer ball',
807
+ 806: 'sock',
808
+ 807: 'solar dish, solar collector, solar furnace',
809
+ 808: 'sombrero',
810
+ 809: 'soup bowl',
811
+ 810: 'space bar',
812
+ 811: 'space heater',
813
+ 812: 'space shuttle',
814
+ 813: 'spatula',
815
+ 814: 'speedboat',
816
+ 815: "spider web, spider's web",
817
+ 816: 'spindle',
818
+ 817: 'sports car, sport car',
819
+ 818: 'spotlight, spot',
820
+ 819: 'stage',
821
+ 820: 'steam locomotive',
822
+ 821: 'steel arch bridge',
823
+ 822: 'steel drum',
824
+ 823: 'stethoscope',
825
+ 824: 'stole',
826
+ 825: 'stone wall',
827
+ 826: 'stopwatch, stop watch',
828
+ 827: 'stove',
829
+ 828: 'strainer',
830
+ 829: 'streetcar, tram, tramcar, trolley, trolley car',
831
+ 830: 'stretcher',
832
+ 831: 'studio couch, day bed',
833
+ 832: 'stupa, tope',
834
+ 833: 'submarine, pigboat, sub, U-boat',
835
+ 834: 'suit, suit of clothes',
836
+ 835: 'sundial',
837
+ 836: 'sunglass',
838
+ 837: 'sunglasses, dark glasses, shades',
839
+ 838: 'sunscreen, sunblock, sun blocker',
840
+ 839: 'suspension bridge',
841
+ 840: 'swab, swob, mop',
842
+ 841: 'sweatshirt',
843
+ 842: 'swimming trunks, bathing trunks',
844
+ 843: 'swing',
845
+ 844: 'switch, electric switch, electrical switch',
846
+ 845: 'syringe',
847
+ 846: 'table lamp',
848
+ 847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
849
+ 848: 'tape player',
850
+ 849: 'teapot',
851
+ 850: 'teddy, teddy bear',
852
+ 851: 'television, television system',
853
+ 852: 'tennis ball',
854
+ 853: 'thatch, thatched roof',
855
+ 854: 'theater curtain, theatre curtain',
856
+ 855: 'thimble',
857
+ 856: 'thresher, thrasher, threshing machine',
858
+ 857: 'throne',
859
+ 858: 'tile roof',
860
+ 859: 'toaster',
861
+ 860: 'tobacco shop, tobacconist shop, tobacconist',
862
+ 861: 'toilet seat',
863
+ 862: 'torch',
864
+ 863: 'totem pole',
865
+ 864: 'tow truck, tow car, wrecker',
866
+ 865: 'toyshop',
867
+ 866: 'tractor',
868
+ 867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
869
+ 868: 'tray',
870
+ 869: 'trench coat',
871
+ 870: 'tricycle, trike, velocipede',
872
+ 871: 'trimaran',
873
+ 872: 'tripod',
874
+ 873: 'triumphal arch',
875
+ 874: 'trolleybus, trolley coach, trackless trolley',
876
+ 875: 'trombone',
877
+ 876: 'tub, vat',
878
+ 877: 'turnstile',
879
+ 878: 'typewriter keyboard',
880
+ 879: 'umbrella',
881
+ 880: 'unicycle, monocycle',
882
+ 881: 'upright, upright piano',
883
+ 882: 'vacuum, vacuum cleaner',
884
+ 883: 'vase',
885
+ 884: 'vault',
886
+ 885: 'velvet',
887
+ 886: 'vending machine',
888
+ 887: 'vestment',
889
+ 888: 'viaduct',
890
+ 889: 'violin, fiddle',
891
+ 890: 'volleyball',
892
+ 891: 'waffle iron',
893
+ 892: 'wall clock',
894
+ 893: 'wallet, billfold, notecase, pocketbook',
895
+ 894: 'wardrobe, closet, press',
896
+ 895: 'warplane, military plane',
897
+ 896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
898
+ 897: 'washer, automatic washer, washing machine',
899
+ 898: 'water bottle',
900
+ 899: 'water jug',
901
+ 900: 'water tower',
902
+ 901: 'whiskey jug',
903
+ 902: 'whistle',
904
+ 903: 'wig',
905
+ 904: 'window screen',
906
+ 905: 'window shade',
907
+ 906: 'Windsor tie',
908
+ 907: 'wine bottle',
909
+ 908: 'wing',
910
+ 909: 'wok',
911
+ 910: 'wooden spoon',
912
+ 911: 'wool, woolen, woollen',
913
+ 912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
914
+ 913: 'wreck',
915
+ 914: 'yawl',
916
+ 915: 'yurt',
917
+ 916: 'web site, website, internet site, site',
918
+ 917: 'comic book',
919
+ 918: 'crossword puzzle, crossword',
920
+ 919: 'street sign',
921
+ 920: 'traffic light, traffic signal, stoplight',
922
+ 921: 'book jacket, dust cover, dust jacket, dust wrapper',
923
+ 922: 'menu',
924
+ 923: 'plate',
925
+ 924: 'guacamole',
926
+ 925: 'consomme',
927
+ 926: 'hot pot, hotpot',
928
+ 927: 'trifle',
929
+ 928: 'ice cream, icecream',
930
+ 929: 'ice lolly, lolly, lollipop, popsicle',
931
+ 930: 'French loaf',
932
+ 931: 'bagel, beigel',
933
+ 932: 'pretzel',
934
+ 933: 'cheeseburger',
935
+ 934: 'hotdog, hot dog, red hot',
936
+ 935: 'mashed potato',
937
+ 936: 'head cabbage',
938
+ 937: 'broccoli',
939
+ 938: 'cauliflower',
940
+ 939: 'zucchini, courgette',
941
+ 940: 'spaghetti squash',
942
+ 941: 'acorn squash',
943
+ 942: 'butternut squash',
944
+ 943: 'cucumber, cuke',
945
+ 944: 'artichoke, globe artichoke',
946
+ 945: 'bell pepper',
947
+ 946: 'cardoon',
948
+ 947: 'mushroom',
949
+ 948: 'Granny Smith',
950
+ 949: 'strawberry',
951
+ 950: 'orange',
952
+ 951: 'lemon',
953
+ 952: 'fig',
954
+ 953: 'pineapple, ananas',
955
+ 954: 'banana',
956
+ 955: 'jackfruit, jak, jack',
957
+ 956: 'custard apple',
958
+ 957: 'pomegranate',
959
+ 958: 'hay',
960
+ 959: 'carbonara',
961
+ 960: 'chocolate sauce, chocolate syrup',
962
+ 961: 'dough',
963
+ 962: 'meat loaf, meatloaf',
964
+ 963: 'pizza, pizza pie',
965
+ 964: 'potpie',
966
+ 965: 'burrito',
967
+ 966: 'red wine',
968
+ 967: 'espresso',
969
+ 968: 'cup',
970
+ 969: 'eggnog',
971
+ 970: 'alp',
972
+ 971: 'bubble',
973
+ 972: 'cliff, drop, drop-off',
974
+ 973: 'coral reef',
975
+ 974: 'geyser',
976
+ 975: 'lakeside, lakeshore',
977
+ 976: 'promontory, headland, head, foreland',
978
+ 977: 'sandbar, sand bar',
979
+ 978: 'seashore, coast, seacoast, sea-coast',
980
+ 979: 'valley, vale',
981
+ 980: 'volcano',
982
+ 981: 'ballplayer, baseball player',
983
+ 982: 'groom, bridegroom',
984
+ 983: 'scuba diver',
985
+ 984: 'rapeseed',
986
+ 985: 'daisy',
987
+ 986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
988
+ 987: 'corn',
989
+ 988: 'acorn',
990
+ 989: 'hip, rose hip, rosehip',
991
+ 990: 'buckeye, horse chestnut, conker',
992
+ 991: 'coral fungus',
993
+ 992: 'agaric',
994
+ 993: 'gyromitra',
995
+ 994: 'stinkhorn, carrion fungus',
996
+ 995: 'earthstar',
997
+ 996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
998
+ 997: 'bolete',
999
+ 998: 'ear, spike, capitulum',
1000
+ 999: 'toilet tissue, toilet paper, bathroom tissue'
gligen/ldm/data/index_synset.yaml ADDED
@@ -0,0 +1,1000 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 0: n01440764
2
+ 1: n01443537
3
+ 2: n01484850
4
+ 3: n01491361
5
+ 4: n01494475
6
+ 5: n01496331
7
+ 6: n01498041
8
+ 7: n01514668
9
+ 8: n07646067
10
+ 9: n01518878
11
+ 10: n01530575
12
+ 11: n01531178
13
+ 12: n01532829
14
+ 13: n01534433
15
+ 14: n01537544
16
+ 15: n01558993
17
+ 16: n01560419
18
+ 17: n01580077
19
+ 18: n01582220
20
+ 19: n01592084
21
+ 20: n01601694
22
+ 21: n13382471
23
+ 22: n01614925
24
+ 23: n01616318
25
+ 24: n01622779
26
+ 25: n01629819
27
+ 26: n01630670
28
+ 27: n01631663
29
+ 28: n01632458
30
+ 29: n01632777
31
+ 30: n01641577
32
+ 31: n01644373
33
+ 32: n01644900
34
+ 33: n01664065
35
+ 34: n01665541
36
+ 35: n01667114
37
+ 36: n01667778
38
+ 37: n01669191
39
+ 38: n01675722
40
+ 39: n01677366
41
+ 40: n01682714
42
+ 41: n01685808
43
+ 42: n01687978
44
+ 43: n01688243
45
+ 44: n01689811
46
+ 45: n01692333
47
+ 46: n01693334
48
+ 47: n01694178
49
+ 48: n01695060
50
+ 49: n01697457
51
+ 50: n01698640
52
+ 51: n01704323
53
+ 52: n01728572
54
+ 53: n01728920
55
+ 54: n01729322
56
+ 55: n01729977
57
+ 56: n01734418
58
+ 57: n01735189
59
+ 58: n01737021
60
+ 59: n01739381
61
+ 60: n01740131
62
+ 61: n01742172
63
+ 62: n01744401
64
+ 63: n01748264
65
+ 64: n01749939
66
+ 65: n01751748
67
+ 66: n01753488
68
+ 67: n01755581
69
+ 68: n01756291
70
+ 69: n01768244
71
+ 70: n01770081
72
+ 71: n01770393
73
+ 72: n01773157
74
+ 73: n01773549
75
+ 74: n01773797
76
+ 75: n01774384
77
+ 76: n01774750
78
+ 77: n01775062
79
+ 78: n04432308
80
+ 79: n01784675
81
+ 80: n01795545
82
+ 81: n01796340
83
+ 82: n01797886
84
+ 83: n01798484
85
+ 84: n01806143
86
+ 85: n07647321
87
+ 86: n07647496
88
+ 87: n01817953
89
+ 88: n01818515
90
+ 89: n01819313
91
+ 90: n01820546
92
+ 91: n01824575
93
+ 92: n01828970
94
+ 93: n01829413
95
+ 94: n01833805
96
+ 95: n01843065
97
+ 96: n01843383
98
+ 97: n01847000
99
+ 98: n01855032
100
+ 99: n07646821
101
+ 100: n01860187
102
+ 101: n01871265
103
+ 102: n01872772
104
+ 103: n01873310
105
+ 104: n01877812
106
+ 105: n01882714
107
+ 106: n01883070
108
+ 107: n01910747
109
+ 108: n01914609
110
+ 109: n01917289
111
+ 110: n01924916
112
+ 111: n01930112
113
+ 112: n01943899
114
+ 113: n01944390
115
+ 114: n13719102
116
+ 115: n01950731
117
+ 116: n01955084
118
+ 117: n01968897
119
+ 118: n01978287
120
+ 119: n01978455
121
+ 120: n01980166
122
+ 121: n01981276
123
+ 122: n01983481
124
+ 123: n01984695
125
+ 124: n01985128
126
+ 125: n01986214
127
+ 126: n01990800
128
+ 127: n02002556
129
+ 128: n02002724
130
+ 129: n02006656
131
+ 130: n02007558
132
+ 131: n02009229
133
+ 132: n02009912
134
+ 133: n02011460
135
+ 134: n03126707
136
+ 135: n02013706
137
+ 136: n02017213
138
+ 137: n02018207
139
+ 138: n02018795
140
+ 139: n02025239
141
+ 140: n02027492
142
+ 141: n02028035
143
+ 142: n02033041
144
+ 143: n02037110
145
+ 144: n02051845
146
+ 145: n02056570
147
+ 146: n02058221
148
+ 147: n02066245
149
+ 148: n02071294
150
+ 149: n02074367
151
+ 150: n02077923
152
+ 151: n08742578
153
+ 152: n02085782
154
+ 153: n02085936
155
+ 154: n02086079
156
+ 155: n02086240
157
+ 156: n02086646
158
+ 157: n02086910
159
+ 158: n02087046
160
+ 159: n02087394
161
+ 160: n02088094
162
+ 161: n02088238
163
+ 162: n02088364
164
+ 163: n02088466
165
+ 164: n02088632
166
+ 165: n02089078
167
+ 166: n02089867
168
+ 167: n02089973
169
+ 168: n02090379
170
+ 169: n02090622
171
+ 170: n02090721
172
+ 171: n02091032
173
+ 172: n02091134
174
+ 173: n02091244
175
+ 174: n02091467
176
+ 175: n02091635
177
+ 176: n02091831
178
+ 177: n02092002
179
+ 178: n02092339
180
+ 179: n02093256
181
+ 180: n02093428
182
+ 181: n02093647
183
+ 182: n02093754
184
+ 183: n02093859
185
+ 184: n02093991
186
+ 185: n02094114
187
+ 186: n02094258
188
+ 187: n02094433
189
+ 188: n02095314
190
+ 189: n02095570
191
+ 190: n02095889
192
+ 191: n02096051
193
+ 192: n02096177
194
+ 193: n02096294
195
+ 194: n02096437
196
+ 195: n02096585
197
+ 196: n02097047
198
+ 197: n02097130
199
+ 198: n02097209
200
+ 199: n02097298
201
+ 200: n02097474
202
+ 201: n02097658
203
+ 202: n02098105
204
+ 203: n02098286
205
+ 204: n02098413
206
+ 205: n02099267
207
+ 206: n02099429
208
+ 207: n02099601
209
+ 208: n02099712
210
+ 209: n02099849
211
+ 210: n02100236
212
+ 211: n02100583
213
+ 212: n02100735
214
+ 213: n02100877
215
+ 214: n02101006
216
+ 215: n02101388
217
+ 216: n02101556
218
+ 217: n02102040
219
+ 218: n02102177
220
+ 219: n02102318
221
+ 220: n02102480
222
+ 221: n02102973
223
+ 222: n02104029
224
+ 223: n02104365
225
+ 224: n02105056
226
+ 225: n02105162
227
+ 226: n02105251
228
+ 227: n02105412
229
+ 228: n02105505
230
+ 229: n02105641
231
+ 230: n02105855
232
+ 231: n02106030
233
+ 232: n02106166
234
+ 233: n02106382
235
+ 234: n02106550
236
+ 235: n02106662
237
+ 236: n02107142
238
+ 237: n02107312
239
+ 238: n02107574
240
+ 239: n02107683
241
+ 240: n02107908
242
+ 241: n02108000
243
+ 242: n02108089
244
+ 243: n02108422
245
+ 244: n02108551
246
+ 245: n02108915
247
+ 246: n02109047
248
+ 247: n02109525
249
+ 248: n02109961
250
+ 249: n02110063
251
+ 250: n02110185
252
+ 251: n02110341
253
+ 252: n02110627
254
+ 253: n02110806
255
+ 254: n02110958
256
+ 255: n02111129
257
+ 256: n02111277
258
+ 257: n02111500
259
+ 258: n02111889
260
+ 259: n02112018
261
+ 260: n02112137
262
+ 261: n02112350
263
+ 262: n02112706
264
+ 263: n02113023
265
+ 264: n02113186
266
+ 265: n02113624
267
+ 266: n02113712
268
+ 267: n02113799
269
+ 268: n02113978
270
+ 269: n02114367
271
+ 270: n02114548
272
+ 271: n02114712
273
+ 272: n02114855
274
+ 273: n02115641
275
+ 274: n02115913
276
+ 275: n02116738
277
+ 276: n02117135
278
+ 277: n02119022
279
+ 278: n02119789
280
+ 279: n02120079
281
+ 280: n02120505
282
+ 281: n02123045
283
+ 282: n02123159
284
+ 283: n02123394
285
+ 284: n02123597
286
+ 285: n02124075
287
+ 286: n02125311
288
+ 287: n02127052
289
+ 288: n02128385
290
+ 289: n02128757
291
+ 290: n02128925
292
+ 291: n02129165
293
+ 292: n02129604
294
+ 293: n02130308
295
+ 294: n02132136
296
+ 295: n02133161
297
+ 296: n02134084
298
+ 297: n02134418
299
+ 298: n02137549
300
+ 299: n02138441
301
+ 300: n02165105
302
+ 301: n02165456
303
+ 302: n02167151
304
+ 303: n02168699
305
+ 304: n02169497
306
+ 305: n02172182
307
+ 306: n02174001
308
+ 307: n02177972
309
+ 308: n03373237
310
+ 309: n07975909
311
+ 310: n02219486
312
+ 311: n02226429
313
+ 312: n02229544
314
+ 313: n02231487
315
+ 314: n02233338
316
+ 315: n02236044
317
+ 316: n02256656
318
+ 317: n02259212
319
+ 318: n02264363
320
+ 319: n02268443
321
+ 320: n02268853
322
+ 321: n02276258
323
+ 322: n02277742
324
+ 323: n02279972
325
+ 324: n02280649
326
+ 325: n02281406
327
+ 326: n02281787
328
+ 327: n02317335
329
+ 328: n02319095
330
+ 329: n02321529
331
+ 330: n02325366
332
+ 331: n02326432
333
+ 332: n02328150
334
+ 333: n02342885
335
+ 334: n02346627
336
+ 335: n02356798
337
+ 336: n02361337
338
+ 337: n05262120
339
+ 338: n02364673
340
+ 339: n02389026
341
+ 340: n02391049
342
+ 341: n02395406
343
+ 342: n02396427
344
+ 343: n02397096
345
+ 344: n02398521
346
+ 345: n02403003
347
+ 346: n02408429
348
+ 347: n02410509
349
+ 348: n02412080
350
+ 349: n02415577
351
+ 350: n02417914
352
+ 351: n02422106
353
+ 352: n02422699
354
+ 353: n02423022
355
+ 354: n02437312
356
+ 355: n02437616
357
+ 356: n10771990
358
+ 357: n14765497
359
+ 358: n02443114
360
+ 359: n02443484
361
+ 360: n14765785
362
+ 361: n02445715
363
+ 362: n02447366
364
+ 363: n02454379
365
+ 364: n02457408
366
+ 365: n02480495
367
+ 366: n02480855
368
+ 367: n02481823
369
+ 368: n02483362
370
+ 369: n02483708
371
+ 370: n02484975
372
+ 371: n02486261
373
+ 372: n02486410
374
+ 373: n02487347
375
+ 374: n02488291
376
+ 375: n02488702
377
+ 376: n02489166
378
+ 377: n02490219
379
+ 378: n02492035
380
+ 379: n02492660
381
+ 380: n02493509
382
+ 381: n02493793
383
+ 382: n02494079
384
+ 383: n02497673
385
+ 384: n02500267
386
+ 385: n02504013
387
+ 386: n02504458
388
+ 387: n02509815
389
+ 388: n02510455
390
+ 389: n02514041
391
+ 390: n07783967
392
+ 391: n02536864
393
+ 392: n02606052
394
+ 393: n02607072
395
+ 394: n02640242
396
+ 395: n02641379
397
+ 396: n02643566
398
+ 397: n02655020
399
+ 398: n02666347
400
+ 399: n02667093
401
+ 400: n02669723
402
+ 401: n02672831
403
+ 402: n02676566
404
+ 403: n02687172
405
+ 404: n02690373
406
+ 405: n02692877
407
+ 406: n02699494
408
+ 407: n02701002
409
+ 408: n02704792
410
+ 409: n02708093
411
+ 410: n02727426
412
+ 411: n08496334
413
+ 412: n02747177
414
+ 413: n02749479
415
+ 414: n02769748
416
+ 415: n02776631
417
+ 416: n02777292
418
+ 417: n02782329
419
+ 418: n02783161
420
+ 419: n02786058
421
+ 420: n02787622
422
+ 421: n02788148
423
+ 422: n02790996
424
+ 423: n02791124
425
+ 424: n02791270
426
+ 425: n02793495
427
+ 426: n02794156
428
+ 427: n02795169
429
+ 428: n02797295
430
+ 429: n02799071
431
+ 430: n02802426
432
+ 431: n02804515
433
+ 432: n02804610
434
+ 433: n02807133
435
+ 434: n02808304
436
+ 435: n02808440
437
+ 436: n02814533
438
+ 437: n02814860
439
+ 438: n02815834
440
+ 439: n02817516
441
+ 440: n02823428
442
+ 441: n02823750
443
+ 442: n02825657
444
+ 443: n02834397
445
+ 444: n02835271
446
+ 445: n02837789
447
+ 446: n02840245
448
+ 447: n02841315
449
+ 448: n02843684
450
+ 449: n02859443
451
+ 450: n02860847
452
+ 451: n02865351
453
+ 452: n02869837
454
+ 453: n02870880
455
+ 454: n02871525
456
+ 455: n02877765
457
+ 456: n02880308
458
+ 457: n02883205
459
+ 458: n02892201
460
+ 459: n02892767
461
+ 460: n02894605
462
+ 461: n02895154
463
+ 462: n12520864
464
+ 463: n02909870
465
+ 464: n02910353
466
+ 465: n02916936
467
+ 466: n02917067
468
+ 467: n02927161
469
+ 468: n02930766
470
+ 469: n02939185
471
+ 470: n02948072
472
+ 471: n02950826
473
+ 472: n02951358
474
+ 473: n02951585
475
+ 474: n02963159
476
+ 475: n02965783
477
+ 476: n02966193
478
+ 477: n02966687
479
+ 478: n02971356
480
+ 479: n02974003
481
+ 480: n02977058
482
+ 481: n02978881
483
+ 482: n02979186
484
+ 483: n02980441
485
+ 484: n02981792
486
+ 485: n02988304
487
+ 486: n02992211
488
+ 487: n02992529
489
+ 488: n13652994
490
+ 489: n03000134
491
+ 490: n03000247
492
+ 491: n03000684
493
+ 492: n03014705
494
+ 493: n03016953
495
+ 494: n03017168
496
+ 495: n03018349
497
+ 496: n03026506
498
+ 497: n03028079
499
+ 498: n03032252
500
+ 499: n03041632
501
+ 500: n03042490
502
+ 501: n03045698
503
+ 502: n03047690
504
+ 503: n03062245
505
+ 504: n03063599
506
+ 505: n03063689
507
+ 506: n03065424
508
+ 507: n03075370
509
+ 508: n03085013
510
+ 509: n03089624
511
+ 510: n03095699
512
+ 511: n03100240
513
+ 512: n03109150
514
+ 513: n03110669
515
+ 514: n03124043
516
+ 515: n03124170
517
+ 516: n15142452
518
+ 517: n03126707
519
+ 518: n03127747
520
+ 519: n03127925
521
+ 520: n03131574
522
+ 521: n03133878
523
+ 522: n03134739
524
+ 523: n03141823
525
+ 524: n03146219
526
+ 525: n03160309
527
+ 526: n03179701
528
+ 527: n03180011
529
+ 528: n03187595
530
+ 529: n03188531
531
+ 530: n03196217
532
+ 531: n03197337
533
+ 532: n03201208
534
+ 533: n03207743
535
+ 534: n03207941
536
+ 535: n03208938
537
+ 536: n03216828
538
+ 537: n03218198
539
+ 538: n13872072
540
+ 539: n03223299
541
+ 540: n03240683
542
+ 541: n03249569
543
+ 542: n07647870
544
+ 543: n03255030
545
+ 544: n03259401
546
+ 545: n03271574
547
+ 546: n03272010
548
+ 547: n03272562
549
+ 548: n03290653
550
+ 549: n13869788
551
+ 550: n03297495
552
+ 551: n03314780
553
+ 552: n03325584
554
+ 553: n03337140
555
+ 554: n03344393
556
+ 555: n03345487
557
+ 556: n03347037
558
+ 557: n03355925
559
+ 558: n03372029
560
+ 559: n03376595
561
+ 560: n03379051
562
+ 561: n03384352
563
+ 562: n03388043
564
+ 563: n03388183
565
+ 564: n03388549
566
+ 565: n03393912
567
+ 566: n03394916
568
+ 567: n03400231
569
+ 568: n03404251
570
+ 569: n03417042
571
+ 570: n03424325
572
+ 571: n03425413
573
+ 572: n03443371
574
+ 573: n03444034
575
+ 574: n03445777
576
+ 575: n03445924
577
+ 576: n03447447
578
+ 577: n03447721
579
+ 578: n08286342
580
+ 579: n03452741
581
+ 580: n03457902
582
+ 581: n03459775
583
+ 582: n03461385
584
+ 583: n03467068
585
+ 584: n03476684
586
+ 585: n03476991
587
+ 586: n03478589
588
+ 587: n03482001
589
+ 588: n03482405
590
+ 589: n03483316
591
+ 590: n03485407
592
+ 591: n03485794
593
+ 592: n03492542
594
+ 593: n03494278
595
+ 594: n03495570
596
+ 595: n10161363
597
+ 596: n03498962
598
+ 597: n03527565
599
+ 598: n03529860
600
+ 599: n09218315
601
+ 600: n03532672
602
+ 601: n03534580
603
+ 602: n03535780
604
+ 603: n03538406
605
+ 604: n03544143
606
+ 605: n03584254
607
+ 606: n03584829
608
+ 607: n03590841
609
+ 608: n03594734
610
+ 609: n03594945
611
+ 610: n03595614
612
+ 611: n03598930
613
+ 612: n03599486
614
+ 613: n03602883
615
+ 614: n03617480
616
+ 615: n03623198
617
+ 616: n15102712
618
+ 617: n03630383
619
+ 618: n03633091
620
+ 619: n03637318
621
+ 620: n03642806
622
+ 621: n03649909
623
+ 622: n03657121
624
+ 623: n03658185
625
+ 624: n07977870
626
+ 625: n03662601
627
+ 626: n03666591
628
+ 627: n03670208
629
+ 628: n03673027
630
+ 629: n03676483
631
+ 630: n03680355
632
+ 631: n03690938
633
+ 632: n03691459
634
+ 633: n03692522
635
+ 634: n03697007
636
+ 635: n03706229
637
+ 636: n03709823
638
+ 637: n03710193
639
+ 638: n03710637
640
+ 639: n03710721
641
+ 640: n03717622
642
+ 641: n03720891
643
+ 642: n03721384
644
+ 643: n03725035
645
+ 644: n03729826
646
+ 645: n03733131
647
+ 646: n03733281
648
+ 647: n03733805
649
+ 648: n03742115
650
+ 649: n03743016
651
+ 650: n03759954
652
+ 651: n03761084
653
+ 652: n03763968
654
+ 653: n03764736
655
+ 654: n03769881
656
+ 655: n03770439
657
+ 656: n03770679
658
+ 657: n03773504
659
+ 658: n03775071
660
+ 659: n03775546
661
+ 660: n03776460
662
+ 661: n03777568
663
+ 662: n03777754
664
+ 663: n03781244
665
+ 664: n03782006
666
+ 665: n03785016
667
+ 666: n14955889
668
+ 667: n03787032
669
+ 668: n03788195
670
+ 669: n03788365
671
+ 670: n03791053
672
+ 671: n03792782
673
+ 672: n03792972
674
+ 673: n03793489
675
+ 674: n03794056
676
+ 675: n03796401
677
+ 676: n03803284
678
+ 677: n13652335
679
+ 678: n03814639
680
+ 679: n03814906
681
+ 680: n03825788
682
+ 681: n03832673
683
+ 682: n03837869
684
+ 683: n03838899
685
+ 684: n03840681
686
+ 685: n03841143
687
+ 686: n03843555
688
+ 687: n03854065
689
+ 688: n03857828
690
+ 689: n03866082
691
+ 690: n03868242
692
+ 691: n03868863
693
+ 692: n07281099
694
+ 693: n03873416
695
+ 694: n03874293
696
+ 695: n03874599
697
+ 696: n03876231
698
+ 697: n03877472
699
+ 698: n08053121
700
+ 699: n03884397
701
+ 700: n03887697
702
+ 701: n03888257
703
+ 702: n03888605
704
+ 703: n03891251
705
+ 704: n03891332
706
+ 705: n03895866
707
+ 706: n03899768
708
+ 707: n03902125
709
+ 708: n03903868
710
+ 709: n03908618
711
+ 710: n03908714
712
+ 711: n03916031
713
+ 712: n03920288
714
+ 713: n03924679
715
+ 714: n03929660
716
+ 715: n03929855
717
+ 716: n03930313
718
+ 717: n03930630
719
+ 718: n03934042
720
+ 719: n03935335
721
+ 720: n03937543
722
+ 721: n03938244
723
+ 722: n03942813
724
+ 723: n03944341
725
+ 724: n03947888
726
+ 725: n03950228
727
+ 726: n03954731
728
+ 727: n03956157
729
+ 728: n03958227
730
+ 729: n03961711
731
+ 730: n03967562
732
+ 731: n03970156
733
+ 732: n03976467
734
+ 733: n08620881
735
+ 734: n03977966
736
+ 735: n03980874
737
+ 736: n03982430
738
+ 737: n03983396
739
+ 738: n03991062
740
+ 739: n03992509
741
+ 740: n03995372
742
+ 741: n03998194
743
+ 742: n04004767
744
+ 743: n13937284
745
+ 744: n04008634
746
+ 745: n04009801
747
+ 746: n04019541
748
+ 747: n04023962
749
+ 748: n13413294
750
+ 749: n04033901
751
+ 750: n04033995
752
+ 751: n04037443
753
+ 752: n04039381
754
+ 753: n09403211
755
+ 754: n04041544
756
+ 755: n04044716
757
+ 756: n04049303
758
+ 757: n04065272
759
+ 758: n07056680
760
+ 759: n04069434
761
+ 760: n04070727
762
+ 761: n04074963
763
+ 762: n04081281
764
+ 763: n04086273
765
+ 764: n04090263
766
+ 765: n04099969
767
+ 766: n04111531
768
+ 767: n04116512
769
+ 768: n04118538
770
+ 769: n04118776
771
+ 770: n04120489
772
+ 771: n04125116
773
+ 772: n04127249
774
+ 773: n04131690
775
+ 774: n04133789
776
+ 775: n04136333
777
+ 776: n04141076
778
+ 777: n04141327
779
+ 778: n04141975
780
+ 779: n04146614
781
+ 780: n04147291
782
+ 781: n04149813
783
+ 782: n04152593
784
+ 783: n04154340
785
+ 784: n07917272
786
+ 785: n04162706
787
+ 786: n04179913
788
+ 787: n04192698
789
+ 788: n04200800
790
+ 789: n04201297
791
+ 790: n04204238
792
+ 791: n04204347
793
+ 792: n04208427
794
+ 793: n04209133
795
+ 794: n04209239
796
+ 795: n04228054
797
+ 796: n04229816
798
+ 797: n04235860
799
+ 798: n04238763
800
+ 799: n04239074
801
+ 800: n04243546
802
+ 801: n04251144
803
+ 802: n04252077
804
+ 803: n04252225
805
+ 804: n04254120
806
+ 805: n04254680
807
+ 806: n04254777
808
+ 807: n04258138
809
+ 808: n04259630
810
+ 809: n04263257
811
+ 810: n04264628
812
+ 811: n04265275
813
+ 812: n04266014
814
+ 813: n04270147
815
+ 814: n04273569
816
+ 815: n04275363
817
+ 816: n05605498
818
+ 817: n04285008
819
+ 818: n04286575
820
+ 819: n08646566
821
+ 820: n04310018
822
+ 821: n04311004
823
+ 822: n04311174
824
+ 823: n04317175
825
+ 824: n04325704
826
+ 825: n04326547
827
+ 826: n04328186
828
+ 827: n04330267
829
+ 828: n04332243
830
+ 829: n04335435
831
+ 830: n04337157
832
+ 831: n04344873
833
+ 832: n04346328
834
+ 833: n04347754
835
+ 834: n04350905
836
+ 835: n04355338
837
+ 836: n04355933
838
+ 837: n04356056
839
+ 838: n04357314
840
+ 839: n04366367
841
+ 840: n04367480
842
+ 841: n04370456
843
+ 842: n04371430
844
+ 843: n14009946
845
+ 844: n04372370
846
+ 845: n04376876
847
+ 846: n04380533
848
+ 847: n04389033
849
+ 848: n04392985
850
+ 849: n04398044
851
+ 850: n04399382
852
+ 851: n04404412
853
+ 852: n04409515
854
+ 853: n04417672
855
+ 854: n04418357
856
+ 855: n04423845
857
+ 856: n04428191
858
+ 857: n04429376
859
+ 858: n04435653
860
+ 859: n04442312
861
+ 860: n04443257
862
+ 861: n04447861
863
+ 862: n04456115
864
+ 863: n04458633
865
+ 864: n04461696
866
+ 865: n04462240
867
+ 866: n04465666
868
+ 867: n04467665
869
+ 868: n04476259
870
+ 869: n04479046
871
+ 870: n04482393
872
+ 871: n04483307
873
+ 872: n04485082
874
+ 873: n04486054
875
+ 874: n04487081
876
+ 875: n04487394
877
+ 876: n04493381
878
+ 877: n04501370
879
+ 878: n04505470
880
+ 879: n04507155
881
+ 880: n04509417
882
+ 881: n04515003
883
+ 882: n04517823
884
+ 883: n04522168
885
+ 884: n04523525
886
+ 885: n04525038
887
+ 886: n04525305
888
+ 887: n04532106
889
+ 888: n04532670
890
+ 889: n04536866
891
+ 890: n04540053
892
+ 891: n04542943
893
+ 892: n04548280
894
+ 893: n04548362
895
+ 894: n04550184
896
+ 895: n04552348
897
+ 896: n04553703
898
+ 897: n04554684
899
+ 898: n04557648
900
+ 899: n04560804
901
+ 900: n04562935
902
+ 901: n04579145
903
+ 902: n04579667
904
+ 903: n04584207
905
+ 904: n04589890
906
+ 905: n04590129
907
+ 906: n04591157
908
+ 907: n04591713
909
+ 908: n10782135
910
+ 909: n04596742
911
+ 910: n04598010
912
+ 911: n04599235
913
+ 912: n04604644
914
+ 913: n14423870
915
+ 914: n04612504
916
+ 915: n04613696
917
+ 916: n06359193
918
+ 917: n06596364
919
+ 918: n06785654
920
+ 919: n06794110
921
+ 920: n06874185
922
+ 921: n07248320
923
+ 922: n07565083
924
+ 923: n07657664
925
+ 924: n07583066
926
+ 925: n07584110
927
+ 926: n07590611
928
+ 927: n07613480
929
+ 928: n07614500
930
+ 929: n07615774
931
+ 930: n07684084
932
+ 931: n07693725
933
+ 932: n07695742
934
+ 933: n07697313
935
+ 934: n07697537
936
+ 935: n07711569
937
+ 936: n07714571
938
+ 937: n07714990
939
+ 938: n07715103
940
+ 939: n12159804
941
+ 940: n12160303
942
+ 941: n12160857
943
+ 942: n07717556
944
+ 943: n07718472
945
+ 944: n07718747
946
+ 945: n07720875
947
+ 946: n07730033
948
+ 947: n13001041
949
+ 948: n07742313
950
+ 949: n12630144
951
+ 950: n14991210
952
+ 951: n07749582
953
+ 952: n07753113
954
+ 953: n07753275
955
+ 954: n07753592
956
+ 955: n07754684
957
+ 956: n07760859
958
+ 957: n07768694
959
+ 958: n07802026
960
+ 959: n07831146
961
+ 960: n07836838
962
+ 961: n07860988
963
+ 962: n07871810
964
+ 963: n07873807
965
+ 964: n07875152
966
+ 965: n07880968
967
+ 966: n07892512
968
+ 967: n07920052
969
+ 968: n13904665
970
+ 969: n07932039
971
+ 970: n09193705
972
+ 971: n09229709
973
+ 972: n09246464
974
+ 973: n09256479
975
+ 974: n09288635
976
+ 975: n09332890
977
+ 976: n09399592
978
+ 977: n09421951
979
+ 978: n09428293
980
+ 979: n09468604
981
+ 980: n09472597
982
+ 981: n09835506
983
+ 982: n10148035
984
+ 983: n10565667
985
+ 984: n11879895
986
+ 985: n11939491
987
+ 986: n12057211
988
+ 987: n12144580
989
+ 988: n12267677
990
+ 989: n12620546
991
+ 990: n12768682
992
+ 991: n12985857
993
+ 992: n12998815
994
+ 993: n13037406
995
+ 994: n13040303
996
+ 995: n13044778
997
+ 996: n13052670
998
+ 997: n13054560
999
+ 998: n13133613
1000
+ 999: n15075141
gligen/ldm/data/lsun.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import PIL
4
+ from PIL import Image
5
+ from torch.utils.data import Dataset
6
+ from torchvision import transforms
7
+
8
+
9
+ class LSUNBase(Dataset):
10
+ def __init__(self,
11
+ txt_file,
12
+ data_root,
13
+ size=None,
14
+ interpolation="bicubic",
15
+ flip_p=0.5
16
+ ):
17
+ self.data_paths = txt_file
18
+ self.data_root = data_root
19
+ with open(self.data_paths, "r") as f:
20
+ self.image_paths = f.read().splitlines()
21
+ self._length = len(self.image_paths)
22
+ self.labels = {
23
+ "relative_file_path_": [l for l in self.image_paths],
24
+ "file_path_": [os.path.join(self.data_root, l)
25
+ for l in self.image_paths],
26
+ }
27
+
28
+ self.size = size
29
+ self.interpolation = {"linear": PIL.Image.LINEAR,
30
+ "bilinear": PIL.Image.BILINEAR,
31
+ "bicubic": PIL.Image.BICUBIC,
32
+ "lanczos": PIL.Image.LANCZOS,
33
+ }[interpolation]
34
+ self.flip = transforms.RandomHorizontalFlip(p=flip_p)
35
+
36
+ def __len__(self):
37
+ return self._length
38
+
39
+ def __getitem__(self, i):
40
+ example = dict((k, self.labels[k][i]) for k in self.labels)
41
+ image = Image.open(example["file_path_"])
42
+ if not image.mode == "RGB":
43
+ image = image.convert("RGB")
44
+
45
+ # default to score-sde preprocessing
46
+ img = np.array(image).astype(np.uint8)
47
+ crop = min(img.shape[0], img.shape[1])
48
+ h, w, = img.shape[0], img.shape[1]
49
+ img = img[(h - crop) // 2:(h + crop) // 2,
50
+ (w - crop) // 2:(w + crop) // 2]
51
+
52
+ image = Image.fromarray(img)
53
+ if self.size is not None:
54
+ image = image.resize((self.size, self.size), resample=self.interpolation)
55
+
56
+ image = self.flip(image)
57
+ image = np.array(image).astype(np.uint8)
58
+ example["image"] = (image / 127.5 - 1.0).astype(np.float32)
59
+ return example
60
+
61
+
62
+ class LSUNChurchesTrain(LSUNBase):
63
+ def __init__(self, **kwargs):
64
+ super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs)
65
+
66
+
67
+ class LSUNChurchesValidation(LSUNBase):
68
+ def __init__(self, flip_p=0., **kwargs):
69
+ super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches",
70
+ flip_p=flip_p, **kwargs)
71
+
72
+
73
+ class LSUNBedroomsTrain(LSUNBase):
74
+ def __init__(self, **kwargs):
75
+ super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs)
76
+
77
+
78
+ class LSUNBedroomsValidation(LSUNBase):
79
+ def __init__(self, flip_p=0.0, **kwargs):
80
+ super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms",
81
+ flip_p=flip_p, **kwargs)
82
+
83
+
84
+ class LSUNCatsTrain(LSUNBase):
85
+ def __init__(self, **kwargs):
86
+ super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs)
87
+
88
+
89
+ class LSUNCatsValidation(LSUNBase):
90
+ def __init__(self, flip_p=0., **kwargs):
91
+ super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats",
92
+ flip_p=flip_p, **kwargs)
gligen/ldm/lr_scheduler.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ class LambdaWarmUpCosineScheduler:
5
+ """
6
+ note: use with a base_lr of 1.0
7
+ """
8
+ def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
9
+ self.lr_warm_up_steps = warm_up_steps
10
+ self.lr_start = lr_start
11
+ self.lr_min = lr_min
12
+ self.lr_max = lr_max
13
+ self.lr_max_decay_steps = max_decay_steps
14
+ self.last_lr = 0.
15
+ self.verbosity_interval = verbosity_interval
16
+
17
+ def schedule(self, n, **kwargs):
18
+ if self.verbosity_interval > 0:
19
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
20
+ if n < self.lr_warm_up_steps:
21
+ lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
22
+ self.last_lr = lr
23
+ return lr
24
+ else:
25
+ t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
26
+ t = min(t, 1.0)
27
+ lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
28
+ 1 + np.cos(t * np.pi))
29
+ self.last_lr = lr
30
+ return lr
31
+
32
+ def __call__(self, n, **kwargs):
33
+ return self.schedule(n,**kwargs)
34
+
35
+
36
+ class LambdaWarmUpCosineScheduler2:
37
+ """
38
+ supports repeated iterations, configurable via lists
39
+ note: use with a base_lr of 1.0.
40
+ """
41
+ def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
42
+ assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
43
+ self.lr_warm_up_steps = warm_up_steps
44
+ self.f_start = f_start
45
+ self.f_min = f_min
46
+ self.f_max = f_max
47
+ self.cycle_lengths = cycle_lengths
48
+ self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
49
+ self.last_f = 0.
50
+ self.verbosity_interval = verbosity_interval
51
+
52
+ def find_in_interval(self, n):
53
+ interval = 0
54
+ for cl in self.cum_cycles[1:]:
55
+ if n <= cl:
56
+ return interval
57
+ interval += 1
58
+
59
+ def schedule(self, n, **kwargs):
60
+ cycle = self.find_in_interval(n)
61
+ n = n - self.cum_cycles[cycle]
62
+ if self.verbosity_interval > 0:
63
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
64
+ f"current cycle {cycle}")
65
+ if n < self.lr_warm_up_steps[cycle]:
66
+ f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
67
+ self.last_f = f
68
+ return f
69
+ else:
70
+ t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
71
+ t = min(t, 1.0)
72
+ f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
73
+ 1 + np.cos(t * np.pi))
74
+ self.last_f = f
75
+ return f
76
+
77
+ def __call__(self, n, **kwargs):
78
+ return self.schedule(n, **kwargs)
79
+
80
+
81
+ class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
82
+
83
+ def schedule(self, n, **kwargs):
84
+ cycle = self.find_in_interval(n)
85
+ n = n - self.cum_cycles[cycle]
86
+ if self.verbosity_interval > 0:
87
+ if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
88
+ f"current cycle {cycle}")
89
+
90
+ if n < self.lr_warm_up_steps[cycle]:
91
+ f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
92
+ self.last_f = f
93
+ return f
94
+ else:
95
+ f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
96
+ self.last_f = f
97
+ return f
98
+
gligen/ldm/models/autoencoder.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ #import pytorch_lightning as pl
4
+ import torch.nn.functional as F
5
+ from contextlib import contextmanager
6
+
7
+ # from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
8
+
9
+ from ldm.modules.diffusionmodules.model import Encoder, Decoder
10
+ from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
11
+
12
+ from ldm.util import instantiate_from_config
13
+
14
+
15
+
16
+
17
+ class AutoencoderKL(nn.Module):
18
+ def __init__(self,
19
+ ddconfig,
20
+ embed_dim,
21
+ scale_factor=1
22
+ ):
23
+ super().__init__()
24
+ self.encoder = Encoder(**ddconfig)
25
+ self.decoder = Decoder(**ddconfig)
26
+ assert ddconfig["double_z"]
27
+ self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
28
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
29
+ self.embed_dim = embed_dim
30
+ self.scale_factor = scale_factor
31
+
32
+
33
+
34
+ def encode(self, x):
35
+ h = self.encoder(x)
36
+ moments = self.quant_conv(h)
37
+ posterior = DiagonalGaussianDistribution(moments)
38
+ return posterior.sample() * self.scale_factor
39
+
40
+ def decode(self, z):
41
+ z = 1. / self.scale_factor * z
42
+ z = self.post_quant_conv(z)
43
+ dec = self.decoder(z)
44
+ return dec
45
+
46
+
47
+
48
+
49
+
50
+
51
+
52
+
gligen/ldm/models/diffusion/__init__.py ADDED
File without changes
gligen/ldm/models/diffusion/classifier.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import pytorch_lightning as pl
4
+ from omegaconf import OmegaConf
5
+ from torch.nn import functional as F
6
+ from torch.optim import AdamW
7
+ from torch.optim.lr_scheduler import LambdaLR
8
+ from copy import deepcopy
9
+ from einops import rearrange
10
+ from glob import glob
11
+ from natsort import natsorted
12
+
13
+ from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel
14
+ from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config
15
+
16
+ __models__ = {
17
+ 'class_label': EncoderUNetModel,
18
+ 'segmentation': UNetModel
19
+ }
20
+
21
+
22
+ def disabled_train(self, mode=True):
23
+ """Overwrite model.train with this function to make sure train/eval mode
24
+ does not change anymore."""
25
+ return self
26
+
27
+
28
+ class NoisyLatentImageClassifier(pl.LightningModule):
29
+
30
+ def __init__(self,
31
+ diffusion_path,
32
+ num_classes,
33
+ ckpt_path=None,
34
+ pool='attention',
35
+ label_key=None,
36
+ diffusion_ckpt_path=None,
37
+ scheduler_config=None,
38
+ weight_decay=1.e-2,
39
+ log_steps=10,
40
+ monitor='val/loss',
41
+ *args,
42
+ **kwargs):
43
+ super().__init__(*args, **kwargs)
44
+ self.num_classes = num_classes
45
+ # get latest config of diffusion model
46
+ diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1]
47
+ self.diffusion_config = OmegaConf.load(diffusion_config).model
48
+ self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
49
+ self.load_diffusion()
50
+
51
+ self.monitor = monitor
52
+ self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
53
+ self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
54
+ self.log_steps = log_steps
55
+
56
+ self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \
57
+ else self.diffusion_model.cond_stage_key
58
+
59
+ assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params'
60
+
61
+ if self.label_key not in __models__:
62
+ raise NotImplementedError()
63
+
64
+ self.load_classifier(ckpt_path, pool)
65
+
66
+ self.scheduler_config = scheduler_config
67
+ self.use_scheduler = self.scheduler_config is not None
68
+ self.weight_decay = weight_decay
69
+
70
+ def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
71
+ sd = torch.load(path, map_location="cpu")
72
+ if "state_dict" in list(sd.keys()):
73
+ sd = sd["state_dict"]
74
+ keys = list(sd.keys())
75
+ for k in keys:
76
+ for ik in ignore_keys:
77
+ if k.startswith(ik):
78
+ print("Deleting key {} from state_dict.".format(k))
79
+ del sd[k]
80
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
81
+ sd, strict=False)
82
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
83
+ if len(missing) > 0:
84
+ print(f"Missing Keys: {missing}")
85
+ if len(unexpected) > 0:
86
+ print(f"Unexpected Keys: {unexpected}")
87
+
88
+ def load_diffusion(self):
89
+ model = instantiate_from_config(self.diffusion_config)
90
+ self.diffusion_model = model.eval()
91
+ self.diffusion_model.train = disabled_train
92
+ for param in self.diffusion_model.parameters():
93
+ param.requires_grad = False
94
+
95
+ def load_classifier(self, ckpt_path, pool):
96
+ model_config = deepcopy(self.diffusion_config.params.unet_config.params)
97
+ model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels
98
+ model_config.out_channels = self.num_classes
99
+ if self.label_key == 'class_label':
100
+ model_config.pool = pool
101
+
102
+ self.model = __models__[self.label_key](**model_config)
103
+ if ckpt_path is not None:
104
+ print('#####################################################################')
105
+ print(f'load from ckpt "{ckpt_path}"')
106
+ print('#####################################################################')
107
+ self.init_from_ckpt(ckpt_path)
108
+
109
+ @torch.no_grad()
110
+ def get_x_noisy(self, x, t, noise=None):
111
+ noise = default(noise, lambda: torch.randn_like(x))
112
+ continuous_sqrt_alpha_cumprod = None
113
+ if self.diffusion_model.use_continuous_noise:
114
+ continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
115
+ # todo: make sure t+1 is correct here
116
+
117
+ return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise,
118
+ continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod)
119
+
120
+ def forward(self, x_noisy, t, *args, **kwargs):
121
+ return self.model(x_noisy, t)
122
+
123
+ @torch.no_grad()
124
+ def get_input(self, batch, k):
125
+ x = batch[k]
126
+ if len(x.shape) == 3:
127
+ x = x[..., None]
128
+ x = rearrange(x, 'b h w c -> b c h w')
129
+ x = x.to(memory_format=torch.contiguous_format).float()
130
+ return x
131
+
132
+ @torch.no_grad()
133
+ def get_conditioning(self, batch, k=None):
134
+ if k is None:
135
+ k = self.label_key
136
+ assert k is not None, 'Needs to provide label key'
137
+
138
+ targets = batch[k].to(self.device)
139
+
140
+ if self.label_key == 'segmentation':
141
+ targets = rearrange(targets, 'b h w c -> b c h w')
142
+ for down in range(self.numd):
143
+ h, w = targets.shape[-2:]
144
+ targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
145
+
146
+ # targets = rearrange(targets,'b c h w -> b h w c')
147
+
148
+ return targets
149
+
150
+ def compute_top_k(self, logits, labels, k, reduction="mean"):
151
+ _, top_ks = torch.topk(logits, k, dim=1)
152
+ if reduction == "mean":
153
+ return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
154
+ elif reduction == "none":
155
+ return (top_ks == labels[:, None]).float().sum(dim=-1)
156
+
157
+ def on_train_epoch_start(self):
158
+ # save some memory
159
+ self.diffusion_model.model.to('cpu')
160
+
161
+ @torch.no_grad()
162
+ def write_logs(self, loss, logits, targets):
163
+ log_prefix = 'train' if self.training else 'val'
164
+ log = {}
165
+ log[f"{log_prefix}/loss"] = loss.mean()
166
+ log[f"{log_prefix}/acc@1"] = self.compute_top_k(
167
+ logits, targets, k=1, reduction="mean"
168
+ )
169
+ log[f"{log_prefix}/acc@5"] = self.compute_top_k(
170
+ logits, targets, k=5, reduction="mean"
171
+ )
172
+
173
+ self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
174
+ self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
175
+ self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
176
+ lr = self.optimizers().param_groups[0]['lr']
177
+ self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
178
+
179
+ def shared_step(self, batch, t=None):
180
+ x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
181
+ targets = self.get_conditioning(batch)
182
+ if targets.dim() == 4:
183
+ targets = targets.argmax(dim=1)
184
+ if t is None:
185
+ t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long()
186
+ else:
187
+ t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
188
+ x_noisy = self.get_x_noisy(x, t)
189
+ logits = self(x_noisy, t)
190
+
191
+ loss = F.cross_entropy(logits, targets, reduction='none')
192
+
193
+ self.write_logs(loss.detach(), logits.detach(), targets.detach())
194
+
195
+ loss = loss.mean()
196
+ return loss, logits, x_noisy, targets
197
+
198
+ def training_step(self, batch, batch_idx):
199
+ loss, *_ = self.shared_step(batch)
200
+ return loss
201
+
202
+ def reset_noise_accs(self):
203
+ self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
204
+ range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)}
205
+
206
+ def on_validation_start(self):
207
+ self.reset_noise_accs()
208
+
209
+ @torch.no_grad()
210
+ def validation_step(self, batch, batch_idx):
211
+ loss, *_ = self.shared_step(batch)
212
+
213
+ for t in self.noisy_acc:
214
+ _, logits, _, targets = self.shared_step(batch, t)
215
+ self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
216
+ self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
217
+
218
+ return loss
219
+
220
+ def configure_optimizers(self):
221
+ optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
222
+
223
+ if self.use_scheduler:
224
+ scheduler = instantiate_from_config(self.scheduler_config)
225
+
226
+ print("Setting up LambdaLR scheduler...")
227
+ scheduler = [
228
+ {
229
+ 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
230
+ 'interval': 'step',
231
+ 'frequency': 1
232
+ }]
233
+ return [optimizer], scheduler
234
+
235
+ return optimizer
236
+
237
+ @torch.no_grad()
238
+ def log_images(self, batch, N=8, *args, **kwargs):
239
+ log = dict()
240
+ x = self.get_input(batch, self.diffusion_model.first_stage_key)
241
+ log['inputs'] = x
242
+
243
+ y = self.get_conditioning(batch)
244
+
245
+ if self.label_key == 'class_label':
246
+ y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
247
+ log['labels'] = y
248
+
249
+ if ismap(y):
250
+ log['labels'] = self.diffusion_model.to_rgb(y)
251
+
252
+ for step in range(self.log_steps):
253
+ current_time = step * self.log_time_interval
254
+
255
+ _, logits, x_noisy, _ = self.shared_step(batch, t=current_time)
256
+
257
+ log[f'inputs@t{current_time}'] = x_noisy
258
+
259
+ pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes)
260
+ pred = rearrange(pred, 'b h w c -> b c h w')
261
+
262
+ log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred)
263
+
264
+ for key in log:
265
+ log[key] = log[key][:N]
266
+
267
+ return log
gligen/ldm/models/diffusion/ddim.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from tqdm import tqdm
4
+ from functools import partial
5
+
6
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
7
+
8
+
9
+ class DDIMSampler(object):
10
+ def __init__(self, diffusion, model, schedule="linear", alpha_generator_func=None, set_alpha_scale=None):
11
+ super().__init__()
12
+ self.diffusion = diffusion
13
+ self.model = model
14
+ self.device = diffusion.betas.device
15
+ self.ddpm_num_timesteps = diffusion.num_timesteps
16
+ self.schedule = schedule
17
+ self.alpha_generator_func = alpha_generator_func
18
+ self.set_alpha_scale = set_alpha_scale
19
+
20
+
21
+ def register_buffer(self, name, attr):
22
+ if type(attr) == torch.Tensor:
23
+ attr = attr.to(self.device)
24
+ setattr(self, name, attr)
25
+
26
+
27
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0.):
28
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
29
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=False)
30
+ alphas_cumprod = self.diffusion.alphas_cumprod
31
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
32
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device)
33
+
34
+ self.register_buffer('betas', to_torch(self.diffusion.betas))
35
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
36
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.diffusion.alphas_cumprod_prev))
37
+
38
+ # calculations for diffusion q(x_t | x_{t-1}) and others
39
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
40
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
41
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
42
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
43
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
44
+
45
+ # ddim sampling parameters
46
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
47
+ ddim_timesteps=self.ddim_timesteps,
48
+ eta=ddim_eta,verbose=False)
49
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
50
+ self.register_buffer('ddim_alphas', ddim_alphas)
51
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
52
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
53
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
54
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
55
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
56
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
57
+
58
+
59
+ @torch.no_grad()
60
+ def sample(self, S, shape, input, uc=None, guidance_scale=1, mask=None, x0=None):
61
+ self.make_schedule(ddim_num_steps=S)
62
+ return self.ddim_sampling(shape, input, uc, guidance_scale, mask=mask, x0=x0)
63
+
64
+
65
+ @torch.no_grad()
66
+ def ddim_sampling(self, shape, input, uc, guidance_scale=1, mask=None, x0=None):
67
+ b = shape[0]
68
+
69
+ img = input["x"]
70
+ if img == None:
71
+ img = torch.randn(shape, device=self.device)
72
+ input["x"] = img
73
+
74
+
75
+ time_range = np.flip(self.ddim_timesteps)
76
+ total_steps = self.ddim_timesteps.shape[0]
77
+
78
+ #iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
79
+ iterator = time_range
80
+
81
+ if self.alpha_generator_func != None:
82
+ alphas = self.alpha_generator_func(len(iterator))
83
+
84
+
85
+ for i, step in enumerate(iterator):
86
+
87
+ # set alpha
88
+ if self.alpha_generator_func != None:
89
+ self.set_alpha_scale(self.model, alphas[i])
90
+
91
+ # run
92
+ index = total_steps - i - 1
93
+ input["timesteps"] = torch.full((b,), step, device=self.device, dtype=torch.long)
94
+
95
+ if mask is not None:
96
+ assert x0 is not None
97
+ img_orig = self.diffusion.q_sample( x0, input["timesteps"] )
98
+ img = img_orig * mask + (1. - mask) * img
99
+ input["x"] = img
100
+
101
+ img, pred_x0 = self.p_sample_ddim(input, index=index, uc=uc, guidance_scale=guidance_scale)
102
+ input["x"] = img
103
+
104
+ return img
105
+
106
+
107
+ @torch.no_grad()
108
+ def p_sample_ddim(self, input, index, uc=None, guidance_scale=1):
109
+
110
+
111
+ e_t = self.model(input)
112
+ if uc is not None and guidance_scale != 1:
113
+ unconditional_input = dict(x=input["x"], timesteps=input["timesteps"], context=uc)
114
+ if "inpainting_extra_input" in input:
115
+ unconditional_input["inpainting_extra_input"] = input["inpainting_extra_input"]
116
+ e_t_uncond = self.model( unconditional_input )
117
+ e_t = e_t_uncond + guidance_scale * (e_t - e_t_uncond)
118
+
119
+ # select parameters corresponding to the currently considered timestep
120
+ b = input["x"].shape[0]
121
+ a_t = torch.full((b, 1, 1, 1), self.ddim_alphas[index], device=self.device)
122
+ a_prev = torch.full((b, 1, 1, 1), self.ddim_alphas_prev[index], device=self.device)
123
+ sigma_t = torch.full((b, 1, 1, 1), self.ddim_sigmas[index], device=self.device)
124
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), self.ddim_sqrt_one_minus_alphas[index],device=self.device)
125
+
126
+ # current prediction for x_0
127
+ pred_x0 = (input["x"] - sqrt_one_minus_at * e_t) / a_t.sqrt()
128
+
129
+ # direction pointing to x_t
130
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
131
+ noise = sigma_t * torch.randn_like( input["x"] )
132
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
133
+
134
+ return x_prev, pred_x0
gligen/ldm/models/diffusion/ddpm.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ from functools import partial
5
+ from ldm.modules.diffusionmodules.util import make_beta_schedule
6
+
7
+
8
+
9
+
10
+
11
+ class DDPM(nn.Module):
12
+ def __init__(self, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
13
+ super().__init__()
14
+
15
+ self.v_posterior = 0
16
+ self.register_schedule(beta_schedule, timesteps, linear_start, linear_end, cosine_s)
17
+
18
+
19
+ def register_schedule(self, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
20
+
21
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
22
+ alphas = 1. - betas
23
+ alphas_cumprod = np.cumprod(alphas, axis=0)
24
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
25
+
26
+ timesteps, = betas.shape
27
+ self.num_timesteps = int(timesteps)
28
+ self.linear_start = linear_start
29
+ self.linear_end = linear_end
30
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
31
+
32
+ to_torch = partial(torch.tensor, dtype=torch.float32)
33
+
34
+ self.register_buffer('betas', to_torch(betas))
35
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
36
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
37
+
38
+ # calculations for diffusion q(x_t | x_{t-1}) and others
39
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
40
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
41
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
42
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
43
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
44
+
45
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
46
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas
47
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
48
+
49
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
50
+
51
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
52
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
53
+ self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
54
+ self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
55
+
56
+
57
+
58
+
59
+
60
+
61
+
62
+
63
+
64
+
65
+
66
+
67
+
68
+
69
+
70
+
71
+
72
+
gligen/ldm/models/diffusion/ldm.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ from ldm.util import default
6
+ from ldm.modules.diffusionmodules.util import extract_into_tensor
7
+ from .ddpm import DDPM
8
+
9
+
10
+
11
+ class LatentDiffusion(DDPM):
12
+ def __init__(self, *args, **kwargs):
13
+ super().__init__(*args, **kwargs)
14
+ # hardcoded
15
+ self.clip_denoised = False
16
+
17
+
18
+
19
+ def q_sample(self, x_start, t, noise=None):
20
+ noise = default(noise, lambda: torch.randn_like(x_start))
21
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
22
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
23
+
24
+
25
+ "Does not support DDPM sampling anymore. Only do DDIM or PLMS"
26
+
27
+ # = = = = = = = = = = = = Below is for sampling = = = = = = = = = = = = #
28
+
29
+ # def predict_start_from_noise(self, x_t, t, noise):
30
+ # return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
31
+ # extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise )
32
+
33
+ # def q_posterior(self, x_start, x_t, t):
34
+ # posterior_mean = (
35
+ # extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
36
+ # extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
37
+ # )
38
+ # posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
39
+ # posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
40
+ # return posterior_mean, posterior_variance, posterior_log_variance_clipped
41
+
42
+
43
+ # def p_mean_variance(self, model, x, c, t):
44
+
45
+ # model_out = model(x, t, c)
46
+ # x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
47
+
48
+ # if self.clip_denoised:
49
+ # x_recon.clamp_(-1., 1.)
50
+
51
+ # model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
52
+ # return model_mean, posterior_variance, posterior_log_variance, x_recon
53
+
54
+
55
+ # @torch.no_grad()
56
+ # def p_sample(self, model, x, c, t):
57
+ # b, *_, device = *x.shape, x.device
58
+ # model_mean, _, model_log_variance, x0 = self.p_mean_variance(model, x=x, c=c, t=t, )
59
+ # noise = torch.randn_like(x)
60
+
61
+ # # no noise when t == 0
62
+ # nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
63
+
64
+ # return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
65
+
66
+
67
+ # @torch.no_grad()
68
+ # def p_sample_loop(self, model, shape, c):
69
+ # device = self.betas.device
70
+ # b = shape[0]
71
+ # img = torch.randn(shape, device=device)
72
+
73
+ # iterator = tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps)
74
+ # for i in iterator:
75
+ # ts = torch.full((b,), i, device=device, dtype=torch.long)
76
+ # img, x0 = self.p_sample(model, img, c, ts)
77
+
78
+ # return img
79
+
80
+
81
+ # @torch.no_grad()
82
+ # def sample(self, model, shape, c, uc=None, guidance_scale=None):
83
+ # return self.p_sample_loop(model, shape, c)
84
+
85
+
86
+
87
+
88
+
gligen/ldm/models/diffusion/plms.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from tqdm import tqdm
4
+ from functools import partial
5
+ from copy import deepcopy
6
+ from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
7
+
8
+
9
+ class PLMSSampler(object):
10
+ def __init__(self, diffusion, model, schedule="linear", alpha_generator_func=None, set_alpha_scale=None):
11
+ super().__init__()
12
+ self.diffusion = diffusion
13
+ self.model = model
14
+ self.device = diffusion.betas.device
15
+ self.ddpm_num_timesteps = diffusion.num_timesteps
16
+ self.schedule = schedule
17
+ self.alpha_generator_func = alpha_generator_func
18
+ self.set_alpha_scale = set_alpha_scale
19
+
20
+ def register_buffer(self, name, attr):
21
+ if type(attr) == torch.Tensor:
22
+ attr = attr.to(self.device)
23
+ setattr(self, name, attr)
24
+
25
+ def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=False):
26
+ if ddim_eta != 0:
27
+ raise ValueError('ddim_eta must be 0 for PLMS')
28
+ self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
29
+ num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
30
+ alphas_cumprod = self.diffusion.alphas_cumprod
31
+ assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
32
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.device)
33
+
34
+ self.register_buffer('betas', to_torch(self.diffusion.betas))
35
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
36
+ self.register_buffer('alphas_cumprod_prev', to_torch(self.diffusion.alphas_cumprod_prev))
37
+
38
+ # calculations for diffusion q(x_t | x_{t-1}) and others
39
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
40
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
41
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
42
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
43
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
44
+
45
+ # ddim sampling parameters
46
+ ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
47
+ ddim_timesteps=self.ddim_timesteps,
48
+ eta=ddim_eta,verbose=verbose)
49
+ self.register_buffer('ddim_sigmas', ddim_sigmas)
50
+ self.register_buffer('ddim_alphas', ddim_alphas)
51
+ self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
52
+ self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
53
+ sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
54
+ (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
55
+ 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
56
+ self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
57
+
58
+
59
+ @torch.no_grad()
60
+ def sample(self, S, shape, input, uc=None, guidance_scale=1, mask=None, x0=None):
61
+ self.make_schedule(ddim_num_steps=S)
62
+ return self.plms_sampling(shape, input, uc, guidance_scale, mask=mask, x0=x0)
63
+
64
+
65
+ @torch.no_grad()
66
+ def plms_sampling(self, shape, input, uc=None, guidance_scale=1, mask=None, x0=None):
67
+
68
+ b = shape[0]
69
+
70
+ img = input["x"]
71
+ if img == None:
72
+ img = torch.randn(shape, device=self.device)
73
+ input["x"] = img
74
+
75
+ time_range = np.flip(self.ddim_timesteps)
76
+ total_steps = self.ddim_timesteps.shape[0]
77
+
78
+ old_eps = []
79
+
80
+ if self.alpha_generator_func != None:
81
+ alphas = self.alpha_generator_func(len(time_range))
82
+
83
+ for i, step in enumerate(time_range):
84
+
85
+ # set alpha
86
+ if self.alpha_generator_func != None:
87
+ self.set_alpha_scale(self.model, alphas[i])
88
+
89
+ # run
90
+ index = total_steps - i - 1
91
+ ts = torch.full((b,), step, device=self.device, dtype=torch.long)
92
+ ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=self.device, dtype=torch.long)
93
+
94
+ if mask is not None:
95
+ assert x0 is not None
96
+ img_orig = self.diffusion.q_sample(x0, ts)
97
+ img = img_orig * mask + (1. - mask) * img
98
+ input["x"] = img
99
+
100
+ img, pred_x0, e_t = self.p_sample_plms(input, ts, index=index, uc=uc, guidance_scale=guidance_scale, old_eps=old_eps, t_next=ts_next)
101
+ input["x"] = img
102
+ old_eps.append(e_t)
103
+ if len(old_eps) >= 4:
104
+ old_eps.pop(0)
105
+
106
+ return img
107
+
108
+
109
+ @torch.no_grad()
110
+ def p_sample_plms(self, input, t, index, guidance_scale=1., uc=None, old_eps=None, t_next=None):
111
+ x = deepcopy(input["x"])
112
+ b = x.shape[0]
113
+
114
+ def get_model_output(input):
115
+ e_t = self.model(input)
116
+ if uc is not None and guidance_scale != 1:
117
+ unconditional_input = dict(x=input["x"], timesteps=input["timesteps"], context=uc)
118
+ if "inpainting_extra_input" in input:
119
+ unconditional_input["inpainting_extra_input"] = input["inpainting_extra_input"]
120
+ e_t_uncond = self.model( unconditional_input )
121
+ e_t = e_t_uncond + guidance_scale * (e_t - e_t_uncond)
122
+ return e_t
123
+
124
+
125
+ def get_x_prev_and_pred_x0(e_t, index):
126
+ # select parameters corresponding to the currently considered timestep
127
+ a_t = torch.full((b, 1, 1, 1), self.ddim_alphas[index], device=self.device)
128
+ a_prev = torch.full((b, 1, 1, 1), self.ddim_alphas_prev[index], device=self.device)
129
+ sigma_t = torch.full((b, 1, 1, 1), self.ddim_sigmas[index], device=self.device)
130
+ sqrt_one_minus_at = torch.full((b, 1, 1, 1), self.ddim_sqrt_one_minus_alphas[index],device=self.device)
131
+
132
+ # current prediction for x_0
133
+ pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
134
+
135
+ # direction pointing to x_t
136
+ dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
137
+ noise = sigma_t * torch.randn_like(x)
138
+ x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
139
+ return x_prev, pred_x0
140
+
141
+ input["timesteps"] = t
142
+ e_t = get_model_output(input)
143
+ if len(old_eps) == 0:
144
+ # Pseudo Improved Euler (2nd order)
145
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
146
+ input["x"] = x_prev
147
+ input["timesteps"] = t_next
148
+ e_t_next = get_model_output(input)
149
+ e_t_prime = (e_t + e_t_next) / 2
150
+ elif len(old_eps) == 1:
151
+ # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
152
+ e_t_prime = (3 * e_t - old_eps[-1]) / 2
153
+ elif len(old_eps) == 2:
154
+ # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
155
+ e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
156
+ elif len(old_eps) >= 3:
157
+ # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
158
+ e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
159
+
160
+ x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
161
+
162
+ return x_prev, pred_x0, e_t
gligen/ldm/modules/attention.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from inspect import isfunction
2
+ import math
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import nn, einsum
6
+ from einops import rearrange, repeat
7
+
8
+ # from ldm.modules.diffusionmodules.util import checkpoint, FourierEmbedder
9
+ from torch.utils import checkpoint
10
+
11
+ try:
12
+ import xformers
13
+ import xformers.ops
14
+ XFORMERS_IS_AVAILBLE = True
15
+ except:
16
+ XFORMERS_IS_AVAILBLE = False
17
+
18
+
19
+ def exists(val):
20
+ return val is not None
21
+
22
+
23
+ def uniq(arr):
24
+ return{el: True for el in arr}.keys()
25
+
26
+
27
+ def default(val, d):
28
+ if exists(val):
29
+ return val
30
+ return d() if isfunction(d) else d
31
+
32
+
33
+ def max_neg_value(t):
34
+ return -torch.finfo(t.dtype).max
35
+
36
+
37
+ def init_(tensor):
38
+ dim = tensor.shape[-1]
39
+ std = 1 / math.sqrt(dim)
40
+ tensor.uniform_(-std, std)
41
+ return tensor
42
+
43
+
44
+ # feedforward
45
+ class GEGLU(nn.Module):
46
+ def __init__(self, dim_in, dim_out):
47
+ super().__init__()
48
+ self.proj = nn.Linear(dim_in, dim_out * 2)
49
+
50
+ def forward(self, x):
51
+ x, gate = self.proj(x).chunk(2, dim=-1)
52
+ return x * F.gelu(gate)
53
+
54
+
55
+ class FeedForward(nn.Module):
56
+ def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
57
+ super().__init__()
58
+ inner_dim = int(dim * mult)
59
+ dim_out = default(dim_out, dim)
60
+ project_in = nn.Sequential(
61
+ nn.Linear(dim, inner_dim),
62
+ nn.GELU()
63
+ ) if not glu else GEGLU(dim, inner_dim)
64
+
65
+ self.net = nn.Sequential(
66
+ project_in,
67
+ nn.Dropout(dropout),
68
+ nn.Linear(inner_dim, dim_out)
69
+ )
70
+
71
+ def forward(self, x):
72
+ return self.net(x)
73
+
74
+
75
+ def zero_module(module):
76
+ """
77
+ Zero out the parameters of a module and return it.
78
+ """
79
+ for p in module.parameters():
80
+ p.detach().zero_()
81
+ return module
82
+
83
+
84
+ def Normalize(in_channels):
85
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
86
+
87
+
88
+ class LinearAttention(nn.Module):
89
+ def __init__(self, dim, heads=4, dim_head=32):
90
+ super().__init__()
91
+ self.heads = heads
92
+ hidden_dim = dim_head * heads
93
+ self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
94
+ self.to_out = nn.Conv2d(hidden_dim, dim, 1)
95
+
96
+ def forward(self, x):
97
+ b, c, h, w = x.shape
98
+ qkv = self.to_qkv(x)
99
+ q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
100
+ k = k.softmax(dim=-1)
101
+ context = torch.einsum('bhdn,bhen->bhde', k, v)
102
+ out = torch.einsum('bhde,bhdn->bhen', context, q)
103
+ out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
104
+ return self.to_out(out)
105
+
106
+
107
+
108
+
109
+ class CrossAttention(nn.Module):
110
+ def __init__(self, query_dim, key_dim, value_dim, heads=8, dim_head=64, dropout=0):
111
+ super().__init__()
112
+ inner_dim = dim_head * heads
113
+ self.scale = dim_head ** -0.5
114
+ self.heads = heads
115
+ self.dim_head = dim_head
116
+
117
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
118
+ self.to_k = nn.Linear(key_dim, inner_dim, bias=False)
119
+ self.to_v = nn.Linear(value_dim, inner_dim, bias=False)
120
+
121
+
122
+ self.to_out = nn.Sequential( nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) )
123
+
124
+
125
+ def fill_inf_from_mask(self, sim, mask):
126
+ if mask is not None:
127
+ B,M = mask.shape
128
+ mask = mask.unsqueeze(1).repeat(1,self.heads,1).reshape(B*self.heads,1,-1)
129
+ max_neg_value = -torch.finfo(sim.dtype).max
130
+ sim.masked_fill_(~mask, max_neg_value)
131
+ return sim
132
+
133
+ def forward_plain(self, x, key, value, mask=None):
134
+
135
+ q = self.to_q(x) # B*N*(H*C)
136
+ k = self.to_k(key) # B*M*(H*C)
137
+ v = self.to_v(value) # B*M*(H*C)
138
+
139
+ B, N, HC = q.shape
140
+ _, M, _ = key.shape
141
+ H = self.heads
142
+ C = HC // H
143
+
144
+ q = q.view(B,N,H,C).permute(0,2,1,3).reshape(B*H,N,C) # (B*H)*N*C
145
+ k = k.view(B,M,H,C).permute(0,2,1,3).reshape(B*H,M,C) # (B*H)*M*C
146
+ v = v.view(B,M,H,C).permute(0,2,1,3).reshape(B*H,M,C) # (B*H)*M*C
147
+
148
+ sim = torch.einsum('b i d, b j d -> b i j', q, k) * self.scale # (B*H)*N*M
149
+ self.fill_inf_from_mask(sim, mask)
150
+ attn = sim.softmax(dim=-1) # (B*H)*N*M
151
+
152
+ out = torch.einsum('b i j, b j d -> b i d', attn, v) # (B*H)*N*C
153
+ out = out.view(B,H,N,C).permute(0,2,1,3).reshape(B,N,(H*C)) # B*N*(H*C)
154
+
155
+ return self.to_out(out)
156
+
157
+ def forward(self, x, key, value, mask=None):
158
+ if not XFORMERS_IS_AVAILBLE:
159
+ return self.forward_plain(x, key, value, mask)
160
+
161
+ q = self.to_q(x) # B*N*(H*C)
162
+ k = self.to_k(key) # B*M*(H*C)
163
+ v = self.to_v(value) # B*M*(H*C)
164
+
165
+ b, _, _ = q.shape
166
+ q, k, v = map(
167
+ lambda t: t.unsqueeze(3)
168
+ .reshape(b, t.shape[1], self.heads, self.dim_head)
169
+ .permute(0, 2, 1, 3)
170
+ .reshape(b * self.heads, t.shape[1], self.dim_head)
171
+ .contiguous(),
172
+ (q, k, v),
173
+ )
174
+
175
+ # actually compute the attention, what we cannot get enough of
176
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
177
+
178
+ if exists(mask):
179
+ raise NotImplementedError
180
+ out = (
181
+ out.unsqueeze(0)
182
+ .reshape(b, self.heads, out.shape[1], self.dim_head)
183
+ .permute(0, 2, 1, 3)
184
+ .reshape(b, out.shape[1], self.heads * self.dim_head)
185
+ )
186
+ return self.to_out(out)
187
+
188
+
189
+
190
+
191
+
192
+ class SelfAttention(nn.Module):
193
+ def __init__(self, query_dim, heads=8, dim_head=64, dropout=0.):
194
+ super().__init__()
195
+ inner_dim = dim_head * heads
196
+ self.scale = dim_head ** -0.5
197
+ self.heads = heads
198
+ self.dim_head = dim_head
199
+
200
+ self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
201
+ self.to_k = nn.Linear(query_dim, inner_dim, bias=False)
202
+ self.to_v = nn.Linear(query_dim, inner_dim, bias=False)
203
+
204
+ self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout) )
205
+
206
+ def forward_plain(self, x):
207
+ q = self.to_q(x) # B*N*(H*C)
208
+ k = self.to_k(x) # B*N*(H*C)
209
+ v = self.to_v(x) # B*N*(H*C)
210
+
211
+ B, N, HC = q.shape
212
+ H = self.heads
213
+ C = HC // H
214
+
215
+ q = q.view(B,N,H,C).permute(0,2,1,3).reshape(B*H,N,C) # (B*H)*N*C
216
+ k = k.view(B,N,H,C).permute(0,2,1,3).reshape(B*H,N,C) # (B*H)*N*C
217
+ v = v.view(B,N,H,C).permute(0,2,1,3).reshape(B*H,N,C) # (B*H)*N*C
218
+
219
+ sim = torch.einsum('b i c, b j c -> b i j', q, k) * self.scale # (B*H)*N*N
220
+ attn = sim.softmax(dim=-1) # (B*H)*N*N
221
+
222
+ out = torch.einsum('b i j, b j c -> b i c', attn, v) # (B*H)*N*C
223
+ out = out.view(B,H,N,C).permute(0,2,1,3).reshape(B,N,(H*C)) # B*N*(H*C)
224
+
225
+ return self.to_out(out)
226
+
227
+ def forward(self, x, context=None, mask=None):
228
+ if not XFORMERS_IS_AVAILBLE:
229
+ return self.forward_plain(x)
230
+
231
+ q = self.to_q(x)
232
+ context = default(context, x)
233
+ k = self.to_k(context)
234
+ v = self.to_v(context)
235
+
236
+ b, _, _ = q.shape
237
+ q, k, v = map(
238
+ lambda t: t.unsqueeze(3)
239
+ .reshape(b, t.shape[1], self.heads, self.dim_head)
240
+ .permute(0, 2, 1, 3)
241
+ .reshape(b * self.heads, t.shape[1], self.dim_head)
242
+ .contiguous(),
243
+ (q, k, v),
244
+ )
245
+
246
+ # actually compute the attention, what we cannot get enough of
247
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=None)
248
+
249
+ if exists(mask):
250
+ raise NotImplementedError
251
+ out = (
252
+ out.unsqueeze(0)
253
+ .reshape(b, self.heads, out.shape[1], self.dim_head)
254
+ .permute(0, 2, 1, 3)
255
+ .reshape(b, out.shape[1], self.heads * self.dim_head)
256
+ )
257
+ return self.to_out(out)
258
+
259
+
260
+ class GatedCrossAttentionDense(nn.Module):
261
+ def __init__(self, query_dim, key_dim, value_dim, n_heads, d_head):
262
+ super().__init__()
263
+
264
+ self.attn = CrossAttention(query_dim=query_dim, key_dim=key_dim, value_dim=value_dim, heads=n_heads, dim_head=d_head)
265
+ self.ff = FeedForward(query_dim, glu=True)
266
+
267
+ self.norm1 = nn.LayerNorm(query_dim)
268
+ self.norm2 = nn.LayerNorm(query_dim)
269
+
270
+ self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)) )
271
+ self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)) )
272
+
273
+ # this can be useful: we can externally change magnitude of tanh(alpha)
274
+ # for example, when it is set to 0, then the entire model is same as original one
275
+ self.scale = 1
276
+
277
+ def forward(self, x, objs):
278
+
279
+ x = x + self.scale*torch.tanh(self.alpha_attn) * self.attn( self.norm1(x), objs, objs)
280
+ x = x + self.scale*torch.tanh(self.alpha_dense) * self.ff( self.norm2(x) )
281
+
282
+ return x
283
+
284
+
285
+ class GatedSelfAttentionDense(nn.Module):
286
+ def __init__(self, query_dim, context_dim, n_heads, d_head):
287
+ super().__init__()
288
+
289
+ # we need a linear projection since we need cat visual feature and obj feature
290
+ self.linear = nn.Linear(context_dim, query_dim)
291
+
292
+ self.attn = SelfAttention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
293
+ self.ff = FeedForward(query_dim, glu=True)
294
+
295
+ self.norm1 = nn.LayerNorm(query_dim)
296
+ self.norm2 = nn.LayerNorm(query_dim)
297
+
298
+ self.register_parameter('alpha_attn', nn.Parameter(torch.tensor(0.)) )
299
+ self.register_parameter('alpha_dense', nn.Parameter(torch.tensor(0.)) )
300
+
301
+ # this can be useful: we can externally change magnitude of tanh(alpha)
302
+ # for example, when it is set to 0, then the entire model is same as original one
303
+ self.scale = 1
304
+
305
+
306
+ def forward(self, x, objs):
307
+
308
+ N_visual = x.shape[1]
309
+ objs = self.linear(objs)
310
+
311
+ x = x + self.scale*torch.tanh(self.alpha_attn) * self.attn( self.norm1(torch.cat([x,objs],dim=1)) )[:,0:N_visual,:]
312
+ x = x + self.scale*torch.tanh(self.alpha_dense) * self.ff( self.norm2(x) )
313
+
314
+ return x
315
+
316
+
317
+ class BasicTransformerBlock(nn.Module):
318
+ def __init__(self, query_dim, key_dim, value_dim, n_heads, d_head, fuser_type, use_checkpoint=True):
319
+ super().__init__()
320
+ self.attn1 = SelfAttention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
321
+ self.ff = FeedForward(query_dim, glu=True)
322
+ self.attn2 = CrossAttention(query_dim=query_dim, key_dim=key_dim, value_dim=value_dim, heads=n_heads, dim_head=d_head)
323
+ self.norm1 = nn.LayerNorm(query_dim)
324
+ self.norm2 = nn.LayerNorm(query_dim)
325
+ self.norm3 = nn.LayerNorm(query_dim)
326
+ self.use_checkpoint = use_checkpoint
327
+
328
+ if fuser_type == "gatedSA":
329
+ # note key_dim here actually is context_dim
330
+ self.fuser = GatedSelfAttentionDense(query_dim, key_dim, n_heads, d_head)
331
+ elif fuser_type == "gatedCA":
332
+ self.fuser = GatedCrossAttentionDense(query_dim, key_dim, value_dim, n_heads, d_head)
333
+ else:
334
+ assert False
335
+
336
+
337
+ def forward(self, x, context, objs):
338
+ # return checkpoint(self._forward, (x, context, objs), self.parameters(), self.use_checkpoint)
339
+ if self.use_checkpoint and x.requires_grad:
340
+ return checkpoint.checkpoint(self._forward, x, context, objs)
341
+ else:
342
+ return self._forward(x, context, objs)
343
+
344
+ def _forward(self, x, context, objs):
345
+ x = self.attn1( self.norm1(x) ) + x
346
+ x = self.fuser(x, objs) # identity mapping in the beginning
347
+ x = self.attn2(self.norm2(x), context, context) + x
348
+ x = self.ff(self.norm3(x)) + x
349
+ return x
350
+
351
+
352
+ class SpatialTransformer(nn.Module):
353
+ def __init__(self, in_channels, key_dim, value_dim, n_heads, d_head, depth=1, fuser_type=None, use_checkpoint=True):
354
+ super().__init__()
355
+ self.in_channels = in_channels
356
+ query_dim = n_heads * d_head
357
+ self.norm = Normalize(in_channels)
358
+
359
+
360
+ self.proj_in = nn.Conv2d(in_channels,
361
+ query_dim,
362
+ kernel_size=1,
363
+ stride=1,
364
+ padding=0)
365
+
366
+ self.transformer_blocks = nn.ModuleList(
367
+ [BasicTransformerBlock(query_dim, key_dim, value_dim, n_heads, d_head, fuser_type, use_checkpoint=use_checkpoint)
368
+ for d in range(depth)]
369
+ )
370
+
371
+ self.proj_out = zero_module(nn.Conv2d(query_dim,
372
+ in_channels,
373
+ kernel_size=1,
374
+ stride=1,
375
+ padding=0))
376
+
377
+ def forward(self, x, context, objs):
378
+ b, c, h, w = x.shape
379
+ x_in = x
380
+ x = self.norm(x)
381
+ x = self.proj_in(x)
382
+ x = rearrange(x, 'b c h w -> b (h w) c')
383
+ for block in self.transformer_blocks:
384
+ x = block(x, context, objs)
385
+ x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
386
+ x = self.proj_out(x)
387
+ return x + x_in
gligen/ldm/modules/diffusionmodules/__init__.py ADDED
File without changes
gligen/ldm/modules/diffusionmodules/model.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pytorch_diffusion + derived encoder decoder
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ from einops import rearrange
7
+
8
+ from ldm.util import instantiate_from_config
9
+ from ldm.modules.attention import LinearAttention
10
+
11
+
12
+ def get_timestep_embedding(timesteps, embedding_dim):
13
+ """
14
+ This matches the implementation in Denoising Diffusion Probabilistic Models:
15
+ From Fairseq.
16
+ Build sinusoidal embeddings.
17
+ This matches the implementation in tensor2tensor, but differs slightly
18
+ from the description in Section 3.5 of "Attention Is All You Need".
19
+ """
20
+ assert len(timesteps.shape) == 1
21
+
22
+ half_dim = embedding_dim // 2
23
+ emb = math.log(10000) / (half_dim - 1)
24
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
25
+ emb = emb.to(device=timesteps.device)
26
+ emb = timesteps.float()[:, None] * emb[None, :]
27
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
28
+ if embedding_dim % 2 == 1: # zero pad
29
+ emb = torch.nn.functional.pad(emb, (0,1,0,0))
30
+ return emb
31
+
32
+
33
+ def nonlinearity(x):
34
+ # swish
35
+ return x*torch.sigmoid(x)
36
+
37
+
38
+ def Normalize(in_channels, num_groups=32):
39
+ return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
40
+
41
+
42
+ class Upsample(nn.Module):
43
+ def __init__(self, in_channels, with_conv):
44
+ super().__init__()
45
+ self.with_conv = with_conv
46
+ if self.with_conv:
47
+ self.conv = torch.nn.Conv2d(in_channels,
48
+ in_channels,
49
+ kernel_size=3,
50
+ stride=1,
51
+ padding=1)
52
+
53
+ def forward(self, x):
54
+ x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
55
+ if self.with_conv:
56
+ x = self.conv(x)
57
+ return x
58
+
59
+
60
+ class Downsample(nn.Module):
61
+ def __init__(self, in_channels, with_conv):
62
+ super().__init__()
63
+ self.with_conv = with_conv
64
+ if self.with_conv:
65
+ # no asymmetric padding in torch conv, must do it ourselves
66
+ self.conv = torch.nn.Conv2d(in_channels,
67
+ in_channels,
68
+ kernel_size=3,
69
+ stride=2,
70
+ padding=0)
71
+
72
+ def forward(self, x):
73
+ if self.with_conv:
74
+ pad = (0,1,0,1)
75
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
76
+ x = self.conv(x)
77
+ else:
78
+ x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
79
+ return x
80
+
81
+
82
+ class ResnetBlock(nn.Module):
83
+ def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
84
+ dropout, temb_channels=512):
85
+ super().__init__()
86
+ self.in_channels = in_channels
87
+ out_channels = in_channels if out_channels is None else out_channels
88
+ self.out_channels = out_channels
89
+ self.use_conv_shortcut = conv_shortcut
90
+
91
+ self.norm1 = Normalize(in_channels)
92
+ self.conv1 = torch.nn.Conv2d(in_channels,
93
+ out_channels,
94
+ kernel_size=3,
95
+ stride=1,
96
+ padding=1)
97
+ if temb_channels > 0:
98
+ self.temb_proj = torch.nn.Linear(temb_channels,
99
+ out_channels)
100
+ self.norm2 = Normalize(out_channels)
101
+ self.dropout = torch.nn.Dropout(dropout)
102
+ self.conv2 = torch.nn.Conv2d(out_channels,
103
+ out_channels,
104
+ kernel_size=3,
105
+ stride=1,
106
+ padding=1)
107
+ if self.in_channels != self.out_channels:
108
+ if self.use_conv_shortcut:
109
+ self.conv_shortcut = torch.nn.Conv2d(in_channels,
110
+ out_channels,
111
+ kernel_size=3,
112
+ stride=1,
113
+ padding=1)
114
+ else:
115
+ self.nin_shortcut = torch.nn.Conv2d(in_channels,
116
+ out_channels,
117
+ kernel_size=1,
118
+ stride=1,
119
+ padding=0)
120
+
121
+ def forward(self, x, temb):
122
+ h = x
123
+ h = self.norm1(h)
124
+ h = nonlinearity(h)
125
+ h = self.conv1(h)
126
+
127
+ if temb is not None:
128
+ h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
129
+
130
+ h = self.norm2(h)
131
+ h = nonlinearity(h)
132
+ h = self.dropout(h)
133
+ h = self.conv2(h)
134
+
135
+ if self.in_channels != self.out_channels:
136
+ if self.use_conv_shortcut:
137
+ x = self.conv_shortcut(x)
138
+ else:
139
+ x = self.nin_shortcut(x)
140
+
141
+ return x+h
142
+
143
+
144
+ class LinAttnBlock(LinearAttention):
145
+ """to match AttnBlock usage"""
146
+ def __init__(self, in_channels):
147
+ super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
148
+
149
+
150
+ class AttnBlock(nn.Module):
151
+ def __init__(self, in_channels):
152
+ super().__init__()
153
+ self.in_channels = in_channels
154
+
155
+ self.norm = Normalize(in_channels)
156
+ self.q = torch.nn.Conv2d(in_channels,
157
+ in_channels,
158
+ kernel_size=1,
159
+ stride=1,
160
+ padding=0)
161
+ self.k = torch.nn.Conv2d(in_channels,
162
+ in_channels,
163
+ kernel_size=1,
164
+ stride=1,
165
+ padding=0)
166
+ self.v = torch.nn.Conv2d(in_channels,
167
+ in_channels,
168
+ kernel_size=1,
169
+ stride=1,
170
+ padding=0)
171
+ self.proj_out = torch.nn.Conv2d(in_channels,
172
+ in_channels,
173
+ kernel_size=1,
174
+ stride=1,
175
+ padding=0)
176
+
177
+
178
+ def forward(self, x):
179
+ h_ = x
180
+ h_ = self.norm(h_)
181
+ q = self.q(h_)
182
+ k = self.k(h_)
183
+ v = self.v(h_)
184
+
185
+ # compute attention
186
+ b,c,h,w = q.shape
187
+ q = q.reshape(b,c,h*w)
188
+ q = q.permute(0,2,1) # b,hw,c
189
+ k = k.reshape(b,c,h*w) # b,c,hw
190
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
191
+ w_ = w_ * (int(c)**(-0.5))
192
+ w_ = torch.nn.functional.softmax(w_, dim=2)
193
+
194
+ # attend to values
195
+ v = v.reshape(b,c,h*w)
196
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
197
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
198
+ h_ = h_.reshape(b,c,h,w)
199
+
200
+ h_ = self.proj_out(h_)
201
+
202
+ return x+h_
203
+
204
+
205
+ def make_attn(in_channels, attn_type="vanilla"):
206
+ assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
207
+ print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
208
+ if attn_type == "vanilla":
209
+ return AttnBlock(in_channels)
210
+ elif attn_type == "none":
211
+ return nn.Identity(in_channels)
212
+ else:
213
+ return LinAttnBlock(in_channels)
214
+
215
+
216
+ class Model(nn.Module):
217
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
218
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
219
+ resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
220
+ super().__init__()
221
+ if use_linear_attn: attn_type = "linear"
222
+ self.ch = ch
223
+ self.temb_ch = self.ch*4
224
+ self.num_resolutions = len(ch_mult)
225
+ self.num_res_blocks = num_res_blocks
226
+ self.resolution = resolution
227
+ self.in_channels = in_channels
228
+
229
+ self.use_timestep = use_timestep
230
+ if self.use_timestep:
231
+ # timestep embedding
232
+ self.temb = nn.Module()
233
+ self.temb.dense = nn.ModuleList([
234
+ torch.nn.Linear(self.ch,
235
+ self.temb_ch),
236
+ torch.nn.Linear(self.temb_ch,
237
+ self.temb_ch),
238
+ ])
239
+
240
+ # downsampling
241
+ self.conv_in = torch.nn.Conv2d(in_channels,
242
+ self.ch,
243
+ kernel_size=3,
244
+ stride=1,
245
+ padding=1)
246
+
247
+ curr_res = resolution
248
+ in_ch_mult = (1,)+tuple(ch_mult)
249
+ self.down = nn.ModuleList()
250
+ for i_level in range(self.num_resolutions):
251
+ block = nn.ModuleList()
252
+ attn = nn.ModuleList()
253
+ block_in = ch*in_ch_mult[i_level]
254
+ block_out = ch*ch_mult[i_level]
255
+ for i_block in range(self.num_res_blocks):
256
+ block.append(ResnetBlock(in_channels=block_in,
257
+ out_channels=block_out,
258
+ temb_channels=self.temb_ch,
259
+ dropout=dropout))
260
+ block_in = block_out
261
+ if curr_res in attn_resolutions:
262
+ attn.append(make_attn(block_in, attn_type=attn_type))
263
+ down = nn.Module()
264
+ down.block = block
265
+ down.attn = attn
266
+ if i_level != self.num_resolutions-1:
267
+ down.downsample = Downsample(block_in, resamp_with_conv)
268
+ curr_res = curr_res // 2
269
+ self.down.append(down)
270
+
271
+ # middle
272
+ self.mid = nn.Module()
273
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
274
+ out_channels=block_in,
275
+ temb_channels=self.temb_ch,
276
+ dropout=dropout)
277
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
278
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
279
+ out_channels=block_in,
280
+ temb_channels=self.temb_ch,
281
+ dropout=dropout)
282
+
283
+ # upsampling
284
+ self.up = nn.ModuleList()
285
+ for i_level in reversed(range(self.num_resolutions)):
286
+ block = nn.ModuleList()
287
+ attn = nn.ModuleList()
288
+ block_out = ch*ch_mult[i_level]
289
+ skip_in = ch*ch_mult[i_level]
290
+ for i_block in range(self.num_res_blocks+1):
291
+ if i_block == self.num_res_blocks:
292
+ skip_in = ch*in_ch_mult[i_level]
293
+ block.append(ResnetBlock(in_channels=block_in+skip_in,
294
+ out_channels=block_out,
295
+ temb_channels=self.temb_ch,
296
+ dropout=dropout))
297
+ block_in = block_out
298
+ if curr_res in attn_resolutions:
299
+ attn.append(make_attn(block_in, attn_type=attn_type))
300
+ up = nn.Module()
301
+ up.block = block
302
+ up.attn = attn
303
+ if i_level != 0:
304
+ up.upsample = Upsample(block_in, resamp_with_conv)
305
+ curr_res = curr_res * 2
306
+ self.up.insert(0, up) # prepend to get consistent order
307
+
308
+ # end
309
+ self.norm_out = Normalize(block_in)
310
+ self.conv_out = torch.nn.Conv2d(block_in,
311
+ out_ch,
312
+ kernel_size=3,
313
+ stride=1,
314
+ padding=1)
315
+
316
+ def forward(self, x, t=None, context=None):
317
+ #assert x.shape[2] == x.shape[3] == self.resolution
318
+ if context is not None:
319
+ # assume aligned context, cat along channel axis
320
+ x = torch.cat((x, context), dim=1)
321
+ if self.use_timestep:
322
+ # timestep embedding
323
+ assert t is not None
324
+ temb = get_timestep_embedding(t, self.ch)
325
+ temb = self.temb.dense[0](temb)
326
+ temb = nonlinearity(temb)
327
+ temb = self.temb.dense[1](temb)
328
+ else:
329
+ temb = None
330
+
331
+ # downsampling
332
+ hs = [self.conv_in(x)]
333
+ for i_level in range(self.num_resolutions):
334
+ for i_block in range(self.num_res_blocks):
335
+ h = self.down[i_level].block[i_block](hs[-1], temb)
336
+ if len(self.down[i_level].attn) > 0:
337
+ h = self.down[i_level].attn[i_block](h)
338
+ hs.append(h)
339
+ if i_level != self.num_resolutions-1:
340
+ hs.append(self.down[i_level].downsample(hs[-1]))
341
+
342
+ # middle
343
+ h = hs[-1]
344
+ h = self.mid.block_1(h, temb)
345
+ h = self.mid.attn_1(h)
346
+ h = self.mid.block_2(h, temb)
347
+
348
+ # upsampling
349
+ for i_level in reversed(range(self.num_resolutions)):
350
+ for i_block in range(self.num_res_blocks+1):
351
+ h = self.up[i_level].block[i_block](
352
+ torch.cat([h, hs.pop()], dim=1), temb)
353
+ if len(self.up[i_level].attn) > 0:
354
+ h = self.up[i_level].attn[i_block](h)
355
+ if i_level != 0:
356
+ h = self.up[i_level].upsample(h)
357
+
358
+ # end
359
+ h = self.norm_out(h)
360
+ h = nonlinearity(h)
361
+ h = self.conv_out(h)
362
+ return h
363
+
364
+ def get_last_layer(self):
365
+ return self.conv_out.weight
366
+
367
+
368
+ class Encoder(nn.Module):
369
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
370
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
371
+ resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
372
+ **ignore_kwargs):
373
+ super().__init__()
374
+ if use_linear_attn: attn_type = "linear"
375
+ self.ch = ch
376
+ self.temb_ch = 0
377
+ self.num_resolutions = len(ch_mult)
378
+ self.num_res_blocks = num_res_blocks
379
+ self.resolution = resolution
380
+ self.in_channels = in_channels
381
+
382
+ # downsampling
383
+ self.conv_in = torch.nn.Conv2d(in_channels,
384
+ self.ch,
385
+ kernel_size=3,
386
+ stride=1,
387
+ padding=1)
388
+
389
+ curr_res = resolution
390
+ in_ch_mult = (1,)+tuple(ch_mult)
391
+ self.in_ch_mult = in_ch_mult
392
+ self.down = nn.ModuleList()
393
+ for i_level in range(self.num_resolutions):
394
+ block = nn.ModuleList()
395
+ attn = nn.ModuleList()
396
+ block_in = ch*in_ch_mult[i_level]
397
+ block_out = ch*ch_mult[i_level]
398
+ for i_block in range(self.num_res_blocks):
399
+ block.append(ResnetBlock(in_channels=block_in,
400
+ out_channels=block_out,
401
+ temb_channels=self.temb_ch,
402
+ dropout=dropout))
403
+ block_in = block_out
404
+ if curr_res in attn_resolutions:
405
+ attn.append(make_attn(block_in, attn_type=attn_type))
406
+ down = nn.Module()
407
+ down.block = block
408
+ down.attn = attn
409
+ if i_level != self.num_resolutions-1:
410
+ down.downsample = Downsample(block_in, resamp_with_conv)
411
+ curr_res = curr_res // 2
412
+ self.down.append(down)
413
+
414
+ # middle
415
+ self.mid = nn.Module()
416
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
417
+ out_channels=block_in,
418
+ temb_channels=self.temb_ch,
419
+ dropout=dropout)
420
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
421
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
422
+ out_channels=block_in,
423
+ temb_channels=self.temb_ch,
424
+ dropout=dropout)
425
+
426
+ # end
427
+ self.norm_out = Normalize(block_in)
428
+ self.conv_out = torch.nn.Conv2d(block_in,
429
+ 2*z_channels if double_z else z_channels,
430
+ kernel_size=3,
431
+ stride=1,
432
+ padding=1)
433
+
434
+ def forward(self, x):
435
+ # timestep embedding
436
+ temb = None
437
+
438
+ # downsampling
439
+ hs = [self.conv_in(x)]
440
+ for i_level in range(self.num_resolutions):
441
+ for i_block in range(self.num_res_blocks):
442
+ h = self.down[i_level].block[i_block](hs[-1], temb)
443
+ if len(self.down[i_level].attn) > 0:
444
+ h = self.down[i_level].attn[i_block](h)
445
+ hs.append(h)
446
+ if i_level != self.num_resolutions-1:
447
+ hs.append(self.down[i_level].downsample(hs[-1]))
448
+
449
+ # middle
450
+ h = hs[-1]
451
+ h = self.mid.block_1(h, temb)
452
+ h = self.mid.attn_1(h)
453
+ h = self.mid.block_2(h, temb)
454
+
455
+ # end
456
+ h = self.norm_out(h)
457
+ h = nonlinearity(h)
458
+ h = self.conv_out(h)
459
+ return h
460
+
461
+
462
+ class Decoder(nn.Module):
463
+ def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
464
+ attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
465
+ resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
466
+ attn_type="vanilla", **ignorekwargs):
467
+ super().__init__()
468
+ if use_linear_attn: attn_type = "linear"
469
+ self.ch = ch
470
+ self.temb_ch = 0
471
+ self.num_resolutions = len(ch_mult)
472
+ self.num_res_blocks = num_res_blocks
473
+ self.resolution = resolution
474
+ self.in_channels = in_channels
475
+ self.give_pre_end = give_pre_end
476
+ self.tanh_out = tanh_out
477
+
478
+ # compute in_ch_mult, block_in and curr_res at lowest res
479
+ in_ch_mult = (1,)+tuple(ch_mult)
480
+ block_in = ch*ch_mult[self.num_resolutions-1]
481
+ curr_res = resolution // 2**(self.num_resolutions-1)
482
+ self.z_shape = (1,z_channels,curr_res,curr_res)
483
+ print("Working with z of shape {} = {} dimensions.".format(
484
+ self.z_shape, np.prod(self.z_shape)))
485
+
486
+ # z to block_in
487
+ self.conv_in = torch.nn.Conv2d(z_channels,
488
+ block_in,
489
+ kernel_size=3,
490
+ stride=1,
491
+ padding=1)
492
+
493
+ # middle
494
+ self.mid = nn.Module()
495
+ self.mid.block_1 = ResnetBlock(in_channels=block_in,
496
+ out_channels=block_in,
497
+ temb_channels=self.temb_ch,
498
+ dropout=dropout)
499
+ self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
500
+ self.mid.block_2 = ResnetBlock(in_channels=block_in,
501
+ out_channels=block_in,
502
+ temb_channels=self.temb_ch,
503
+ dropout=dropout)
504
+
505
+ # upsampling
506
+ self.up = nn.ModuleList()
507
+ for i_level in reversed(range(self.num_resolutions)):
508
+ block = nn.ModuleList()
509
+ attn = nn.ModuleList()
510
+ block_out = ch*ch_mult[i_level]
511
+ for i_block in range(self.num_res_blocks+1):
512
+ block.append(ResnetBlock(in_channels=block_in,
513
+ out_channels=block_out,
514
+ temb_channels=self.temb_ch,
515
+ dropout=dropout))
516
+ block_in = block_out
517
+ if curr_res in attn_resolutions:
518
+ attn.append(make_attn(block_in, attn_type=attn_type))
519
+ up = nn.Module()
520
+ up.block = block
521
+ up.attn = attn
522
+ if i_level != 0:
523
+ up.upsample = Upsample(block_in, resamp_with_conv)
524
+ curr_res = curr_res * 2
525
+ self.up.insert(0, up) # prepend to get consistent order
526
+
527
+ # end
528
+ self.norm_out = Normalize(block_in)
529
+ self.conv_out = torch.nn.Conv2d(block_in,
530
+ out_ch,
531
+ kernel_size=3,
532
+ stride=1,
533
+ padding=1)
534
+
535
+ def forward(self, z):
536
+ #assert z.shape[1:] == self.z_shape[1:]
537
+ self.last_z_shape = z.shape
538
+
539
+ # timestep embedding
540
+ temb = None
541
+
542
+ # z to block_in
543
+ h = self.conv_in(z)
544
+
545
+ # middle
546
+ h = self.mid.block_1(h, temb)
547
+ h = self.mid.attn_1(h)
548
+ h = self.mid.block_2(h, temb)
549
+
550
+ # upsampling
551
+ for i_level in reversed(range(self.num_resolutions)):
552
+ for i_block in range(self.num_res_blocks+1):
553
+ h = self.up[i_level].block[i_block](h, temb)
554
+ if len(self.up[i_level].attn) > 0:
555
+ h = self.up[i_level].attn[i_block](h)
556
+ if i_level != 0:
557
+ h = self.up[i_level].upsample(h)
558
+
559
+ # end
560
+ if self.give_pre_end:
561
+ return h
562
+
563
+ h = self.norm_out(h)
564
+ h = nonlinearity(h)
565
+ h = self.conv_out(h)
566
+ if self.tanh_out:
567
+ h = torch.tanh(h)
568
+ return h
569
+
570
+
571
+ class SimpleDecoder(nn.Module):
572
+ def __init__(self, in_channels, out_channels, *args, **kwargs):
573
+ super().__init__()
574
+ self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
575
+ ResnetBlock(in_channels=in_channels,
576
+ out_channels=2 * in_channels,
577
+ temb_channels=0, dropout=0.0),
578
+ ResnetBlock(in_channels=2 * in_channels,
579
+ out_channels=4 * in_channels,
580
+ temb_channels=0, dropout=0.0),
581
+ ResnetBlock(in_channels=4 * in_channels,
582
+ out_channels=2 * in_channels,
583
+ temb_channels=0, dropout=0.0),
584
+ nn.Conv2d(2*in_channels, in_channels, 1),
585
+ Upsample(in_channels, with_conv=True)])
586
+ # end
587
+ self.norm_out = Normalize(in_channels)
588
+ self.conv_out = torch.nn.Conv2d(in_channels,
589
+ out_channels,
590
+ kernel_size=3,
591
+ stride=1,
592
+ padding=1)
593
+
594
+ def forward(self, x):
595
+ for i, layer in enumerate(self.model):
596
+ if i in [1,2,3]:
597
+ x = layer(x, None)
598
+ else:
599
+ x = layer(x)
600
+
601
+ h = self.norm_out(x)
602
+ h = nonlinearity(h)
603
+ x = self.conv_out(h)
604
+ return x
605
+
606
+
607
+ class UpsampleDecoder(nn.Module):
608
+ def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
609
+ ch_mult=(2,2), dropout=0.0):
610
+ super().__init__()
611
+ # upsampling
612
+ self.temb_ch = 0
613
+ self.num_resolutions = len(ch_mult)
614
+ self.num_res_blocks = num_res_blocks
615
+ block_in = in_channels
616
+ curr_res = resolution // 2 ** (self.num_resolutions - 1)
617
+ self.res_blocks = nn.ModuleList()
618
+ self.upsample_blocks = nn.ModuleList()
619
+ for i_level in range(self.num_resolutions):
620
+ res_block = []
621
+ block_out = ch * ch_mult[i_level]
622
+ for i_block in range(self.num_res_blocks + 1):
623
+ res_block.append(ResnetBlock(in_channels=block_in,
624
+ out_channels=block_out,
625
+ temb_channels=self.temb_ch,
626
+ dropout=dropout))
627
+ block_in = block_out
628
+ self.res_blocks.append(nn.ModuleList(res_block))
629
+ if i_level != self.num_resolutions - 1:
630
+ self.upsample_blocks.append(Upsample(block_in, True))
631
+ curr_res = curr_res * 2
632
+
633
+ # end
634
+ self.norm_out = Normalize(block_in)
635
+ self.conv_out = torch.nn.Conv2d(block_in,
636
+ out_channels,
637
+ kernel_size=3,
638
+ stride=1,
639
+ padding=1)
640
+
641
+ def forward(self, x):
642
+ # upsampling
643
+ h = x
644
+ for k, i_level in enumerate(range(self.num_resolutions)):
645
+ for i_block in range(self.num_res_blocks + 1):
646
+ h = self.res_blocks[i_level][i_block](h, None)
647
+ if i_level != self.num_resolutions - 1:
648
+ h = self.upsample_blocks[k](h)
649
+ h = self.norm_out(h)
650
+ h = nonlinearity(h)
651
+ h = self.conv_out(h)
652
+ return h
653
+
654
+
655
+ class LatentRescaler(nn.Module):
656
+ def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
657
+ super().__init__()
658
+ # residual block, interpolate, residual block
659
+ self.factor = factor
660
+ self.conv_in = nn.Conv2d(in_channels,
661
+ mid_channels,
662
+ kernel_size=3,
663
+ stride=1,
664
+ padding=1)
665
+ self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
666
+ out_channels=mid_channels,
667
+ temb_channels=0,
668
+ dropout=0.0) for _ in range(depth)])
669
+ self.attn = AttnBlock(mid_channels)
670
+ self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
671
+ out_channels=mid_channels,
672
+ temb_channels=0,
673
+ dropout=0.0) for _ in range(depth)])
674
+
675
+ self.conv_out = nn.Conv2d(mid_channels,
676
+ out_channels,
677
+ kernel_size=1,
678
+ )
679
+
680
+ def forward(self, x):
681
+ x = self.conv_in(x)
682
+ for block in self.res_block1:
683
+ x = block(x, None)
684
+ x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
685
+ x = self.attn(x)
686
+ for block in self.res_block2:
687
+ x = block(x, None)
688
+ x = self.conv_out(x)
689
+ return x
690
+
691
+
692
+ class MergedRescaleEncoder(nn.Module):
693
+ def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
694
+ attn_resolutions, dropout=0.0, resamp_with_conv=True,
695
+ ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
696
+ super().__init__()
697
+ intermediate_chn = ch * ch_mult[-1]
698
+ self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
699
+ z_channels=intermediate_chn, double_z=False, resolution=resolution,
700
+ attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
701
+ out_ch=None)
702
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
703
+ mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
704
+
705
+ def forward(self, x):
706
+ x = self.encoder(x)
707
+ x = self.rescaler(x)
708
+ return x
709
+
710
+
711
+ class MergedRescaleDecoder(nn.Module):
712
+ def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
713
+ dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
714
+ super().__init__()
715
+ tmp_chn = z_channels*ch_mult[-1]
716
+ self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
717
+ resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
718
+ ch_mult=ch_mult, resolution=resolution, ch=ch)
719
+ self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
720
+ out_channels=tmp_chn, depth=rescale_module_depth)
721
+
722
+ def forward(self, x):
723
+ x = self.rescaler(x)
724
+ x = self.decoder(x)
725
+ return x
726
+
727
+
728
+ class Upsampler(nn.Module):
729
+ def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
730
+ super().__init__()
731
+ assert out_size >= in_size
732
+ num_blocks = int(np.log2(out_size//in_size))+1
733
+ factor_up = 1.+ (out_size % in_size)
734
+ print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
735
+ self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
736
+ out_channels=in_channels)
737
+ self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
738
+ attn_resolutions=[], in_channels=None, ch=in_channels,
739
+ ch_mult=[ch_mult for _ in range(num_blocks)])
740
+
741
+ def forward(self, x):
742
+ x = self.rescaler(x)
743
+ x = self.decoder(x)
744
+ return x
745
+
746
+
747
+ class Resize(nn.Module):
748
+ def __init__(self, in_channels=None, learned=False, mode="bilinear"):
749
+ super().__init__()
750
+ self.with_conv = learned
751
+ self.mode = mode
752
+ if self.with_conv:
753
+ print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
754
+ raise NotImplementedError()
755
+ assert in_channels is not None
756
+ # no asymmetric padding in torch conv, must do it ourselves
757
+ self.conv = torch.nn.Conv2d(in_channels,
758
+ in_channels,
759
+ kernel_size=4,
760
+ stride=2,
761
+ padding=1)
762
+
763
+ def forward(self, x, scale_factor=1.0):
764
+ if scale_factor==1.0:
765
+ return x
766
+ else:
767
+ x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
768
+ return x
769
+
770
+ class FirstStagePostProcessor(nn.Module):
771
+
772
+ def __init__(self, ch_mult:list, in_channels,
773
+ pretrained_model:nn.Module=None,
774
+ reshape=False,
775
+ n_channels=None,
776
+ dropout=0.,
777
+ pretrained_config=None):
778
+ super().__init__()
779
+ if pretrained_config is None:
780
+ assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
781
+ self.pretrained_model = pretrained_model
782
+ else:
783
+ assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
784
+ self.instantiate_pretrained(pretrained_config)
785
+
786
+ self.do_reshape = reshape
787
+
788
+ if n_channels is None:
789
+ n_channels = self.pretrained_model.encoder.ch
790
+
791
+ self.proj_norm = Normalize(in_channels,num_groups=in_channels//2)
792
+ self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3,
793
+ stride=1,padding=1)
794
+
795
+ blocks = []
796
+ downs = []
797
+ ch_in = n_channels
798
+ for m in ch_mult:
799
+ blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
800
+ ch_in = m * n_channels
801
+ downs.append(Downsample(ch_in, with_conv=False))
802
+
803
+ self.model = nn.ModuleList(blocks)
804
+ self.downsampler = nn.ModuleList(downs)
805
+
806
+
807
+ def instantiate_pretrained(self, config):
808
+ model = instantiate_from_config(config)
809
+ self.pretrained_model = model.eval()
810
+ # self.pretrained_model.train = False
811
+ for param in self.pretrained_model.parameters():
812
+ param.requires_grad = False
813
+
814
+
815
+ @torch.no_grad()
816
+ def encode_with_pretrained(self,x):
817
+ c = self.pretrained_model.encode(x)
818
+ if isinstance(c, DiagonalGaussianDistribution):
819
+ c = c.mode()
820
+ return c
821
+
822
+ def forward(self,x):
823
+ z_fs = self.encode_with_pretrained(x)
824
+ z = self.proj_norm(z_fs)
825
+ z = self.proj(z)
826
+ z = nonlinearity(z)
827
+
828
+ for submodel, downmodel in zip(self.model,self.downsampler):
829
+ z = submodel(z,temb=None)
830
+ z = downmodel(z)
831
+
832
+ if self.do_reshape:
833
+ z = rearrange(z,'b c h w -> b (h w) c')
834
+ return z
835
+
gligen/ldm/modules/diffusionmodules/openaimodel.py ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ from functools import partial
3
+ import math
4
+
5
+ import numpy as np
6
+ import random
7
+ import torch as th
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ from ldm.modules.diffusionmodules.util import (
12
+ conv_nd,
13
+ linear,
14
+ avg_pool_nd,
15
+ zero_module,
16
+ normalization,
17
+ timestep_embedding,
18
+ )
19
+ from ldm.modules.attention import SpatialTransformer
20
+ from torch.utils import checkpoint
21
+
22
+ class TimestepBlock(nn.Module):
23
+ """
24
+ Any module where forward() takes timestep embeddings as a second argument.
25
+ """
26
+
27
+ @abstractmethod
28
+ def forward(self, x, emb):
29
+ """
30
+ Apply the module to `x` given `emb` timestep embeddings.
31
+ """
32
+
33
+
34
+ class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
35
+ """
36
+ A sequential module that passes timestep embeddings to the children that
37
+ support it as an extra input.
38
+ """
39
+
40
+ def forward(self, x, emb, context, objs):
41
+ for layer in self:
42
+ if isinstance(layer, TimestepBlock):
43
+ x = layer(x, emb)
44
+ elif isinstance(layer, SpatialTransformer):
45
+ x = layer(x, context, objs)
46
+ else:
47
+ x = layer(x)
48
+ return x
49
+
50
+
51
+ class Upsample(nn.Module):
52
+ """
53
+ An upsampling layer with an optional convolution.
54
+ :param channels: channels in the inputs and outputs.
55
+ :param use_conv: a bool determining if a convolution is applied.
56
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
57
+ upsampling occurs in the inner-two dimensions.
58
+ """
59
+
60
+ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
61
+ super().__init__()
62
+ self.channels = channels
63
+ self.out_channels = out_channels or channels
64
+ self.use_conv = use_conv
65
+ self.dims = dims
66
+ if use_conv:
67
+ self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
68
+
69
+ def forward(self, x):
70
+ assert x.shape[1] == self.channels
71
+ if self.dims == 3:
72
+ x = F.interpolate(
73
+ x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
74
+ )
75
+ else:
76
+ x = F.interpolate(x, scale_factor=2, mode="nearest")
77
+ if self.use_conv:
78
+ x = self.conv(x)
79
+ return x
80
+
81
+
82
+
83
+
84
+ class Downsample(nn.Module):
85
+ """
86
+ A downsampling layer with an optional convolution.
87
+ :param channels: channels in the inputs and outputs.
88
+ :param use_conv: a bool determining if a convolution is applied.
89
+ :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
90
+ downsampling occurs in the inner-two dimensions.
91
+ """
92
+
93
+ def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
94
+ super().__init__()
95
+ self.channels = channels
96
+ self.out_channels = out_channels or channels
97
+ self.use_conv = use_conv
98
+ self.dims = dims
99
+ stride = 2 if dims != 3 else (1, 2, 2)
100
+ if use_conv:
101
+ self.op = conv_nd(
102
+ dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
103
+ )
104
+ else:
105
+ assert self.channels == self.out_channels
106
+ self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
107
+
108
+ def forward(self, x):
109
+ assert x.shape[1] == self.channels
110
+ return self.op(x)
111
+
112
+
113
+ class ResBlock(TimestepBlock):
114
+ """
115
+ A residual block that can optionally change the number of channels.
116
+ :param channels: the number of input channels.
117
+ :param emb_channels: the number of timestep embedding channels.
118
+ :param dropout: the rate of dropout.
119
+ :param out_channels: if specified, the number of out channels.
120
+ :param use_conv: if True and out_channels is specified, use a spatial
121
+ convolution instead of a smaller 1x1 convolution to change the
122
+ channels in the skip connection.
123
+ :param dims: determines if the signal is 1D, 2D, or 3D.
124
+ :param use_checkpoint: if True, use gradient checkpointing on this module.
125
+ :param up: if True, use this block for upsampling.
126
+ :param down: if True, use this block for downsampling.
127
+ """
128
+
129
+ def __init__(
130
+ self,
131
+ channels,
132
+ emb_channels,
133
+ dropout,
134
+ out_channels=None,
135
+ use_conv=False,
136
+ use_scale_shift_norm=False,
137
+ dims=2,
138
+ use_checkpoint=False,
139
+ up=False,
140
+ down=False,
141
+ ):
142
+ super().__init__()
143
+ self.channels = channels
144
+ self.emb_channels = emb_channels
145
+ self.dropout = dropout
146
+ self.out_channels = out_channels or channels
147
+ self.use_conv = use_conv
148
+ self.use_checkpoint = use_checkpoint
149
+ self.use_scale_shift_norm = use_scale_shift_norm
150
+
151
+ self.in_layers = nn.Sequential(
152
+ normalization(channels),
153
+ nn.SiLU(),
154
+ conv_nd(dims, channels, self.out_channels, 3, padding=1),
155
+ )
156
+
157
+ self.updown = up or down
158
+
159
+ if up:
160
+ self.h_upd = Upsample(channels, False, dims)
161
+ self.x_upd = Upsample(channels, False, dims)
162
+ elif down:
163
+ self.h_upd = Downsample(channels, False, dims)
164
+ self.x_upd = Downsample(channels, False, dims)
165
+ else:
166
+ self.h_upd = self.x_upd = nn.Identity()
167
+
168
+ self.emb_layers = nn.Sequential(
169
+ nn.SiLU(),
170
+ linear(
171
+ emb_channels,
172
+ 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
173
+ ),
174
+ )
175
+ self.out_layers = nn.Sequential(
176
+ normalization(self.out_channels),
177
+ nn.SiLU(),
178
+ nn.Dropout(p=dropout),
179
+ zero_module(
180
+ conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
181
+ ),
182
+ )
183
+
184
+ if self.out_channels == channels:
185
+ self.skip_connection = nn.Identity()
186
+ elif use_conv:
187
+ self.skip_connection = conv_nd(
188
+ dims, channels, self.out_channels, 3, padding=1
189
+ )
190
+ else:
191
+ self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
192
+
193
+ def forward(self, x, emb):
194
+ """
195
+ Apply the block to a Tensor, conditioned on a timestep embedding.
196
+ :param x: an [N x C x ...] Tensor of features.
197
+ :param emb: an [N x emb_channels] Tensor of timestep embeddings.
198
+ :return: an [N x C x ...] Tensor of outputs.
199
+ """
200
+ # return checkpoint(
201
+ # self._forward, (x, emb), self.parameters(), self.use_checkpoint
202
+ # )
203
+ if self.use_checkpoint and x.requires_grad:
204
+ return checkpoint.checkpoint(self._forward, x, emb )
205
+ else:
206
+ return self._forward(x, emb)
207
+
208
+
209
+ def _forward(self, x, emb):
210
+ if self.updown:
211
+ in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
212
+ h = in_rest(x)
213
+ h = self.h_upd(h)
214
+ x = self.x_upd(x)
215
+ h = in_conv(h)
216
+ else:
217
+ h = self.in_layers(x)
218
+ emb_out = self.emb_layers(emb).type(h.dtype)
219
+ while len(emb_out.shape) < len(h.shape):
220
+ emb_out = emb_out[..., None]
221
+ if self.use_scale_shift_norm:
222
+ out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
223
+ scale, shift = th.chunk(emb_out, 2, dim=1)
224
+ h = out_norm(h) * (1 + scale) + shift
225
+ h = out_rest(h)
226
+ else:
227
+ h = h + emb_out
228
+ h = self.out_layers(h)
229
+ return self.skip_connection(x) + h
230
+
231
+
232
+
233
+
234
+ class UNetModel(nn.Module):
235
+ def __init__(
236
+ self,
237
+ image_size,
238
+ in_channels,
239
+ model_channels,
240
+ out_channels,
241
+ num_res_blocks,
242
+ attention_resolutions,
243
+ dropout=0,
244
+ channel_mult=(1, 2, 4, 8),
245
+ conv_resample=True,
246
+ dims=2,
247
+ use_checkpoint=False,
248
+ num_heads=8,
249
+ use_scale_shift_norm=False,
250
+ transformer_depth=1,
251
+ positive_len = 768, # this is pre-processing embedding len for each 'obj/box'
252
+ context_dim=None,
253
+ fuser_type = None,
254
+ is_inpaint = False,
255
+ is_style = False,
256
+ ):
257
+ super().__init__()
258
+
259
+ self.image_size = image_size
260
+ self.in_channels = in_channels
261
+ self.model_channels = model_channels
262
+ self.out_channels = out_channels
263
+ self.num_res_blocks = num_res_blocks
264
+ self.attention_resolutions = attention_resolutions
265
+ self.dropout = dropout
266
+ self.channel_mult = channel_mult
267
+ self.conv_resample = conv_resample
268
+ self.use_checkpoint = use_checkpoint
269
+ self.num_heads = num_heads
270
+ self.positive_len = positive_len
271
+ self.context_dim = context_dim
272
+ self.fuser_type = fuser_type
273
+ self.is_inpaint = is_inpaint
274
+ self.is_style = is_style
275
+ self.use_o2 = False # This will be turned into True by externally if use o2 durining training
276
+ assert fuser_type in ["gatedSA", "gatedCA"]
277
+
278
+
279
+ time_embed_dim = model_channels * 4
280
+ self.time_embed = nn.Sequential(
281
+ linear(model_channels, time_embed_dim),
282
+ nn.SiLU(),
283
+ linear(time_embed_dim, time_embed_dim),
284
+ )
285
+
286
+
287
+ total_in_channels = in_channels+in_channels+1 if self.is_inpaint else in_channels
288
+ self.input_blocks = nn.ModuleList([TimestepEmbedSequential(conv_nd(dims, total_in_channels, model_channels, 3, padding=1))])
289
+
290
+ input_block_chans = [model_channels]
291
+ ch = model_channels
292
+ ds = 1
293
+
294
+ # = = = = = = = = = = = = = = = = = = = = Down Branch = = = = = = = = = = = = = = = = = = = = #
295
+ for level, mult in enumerate(channel_mult):
296
+ for _ in range(num_res_blocks):
297
+ layers = [ ResBlock(ch,
298
+ time_embed_dim,
299
+ dropout,
300
+ out_channels=mult * model_channels,
301
+ dims=dims,
302
+ use_checkpoint=use_checkpoint,
303
+ use_scale_shift_norm=use_scale_shift_norm,) ]
304
+
305
+ ch = mult * model_channels
306
+ if ds in attention_resolutions:
307
+ dim_head = ch // num_heads
308
+ layers.append(SpatialTransformer(ch, key_dim=context_dim, value_dim=context_dim, n_heads=num_heads, d_head=dim_head, depth=transformer_depth, fuser_type=fuser_type, use_checkpoint=use_checkpoint))
309
+
310
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
311
+ input_block_chans.append(ch)
312
+
313
+ if level != len(channel_mult) - 1: # will not go to this downsample branch in the last feature
314
+ out_ch = ch
315
+ self.input_blocks.append( TimestepEmbedSequential( Downsample(ch, conv_resample, dims=dims, out_channels=out_ch ) ) )
316
+ ch = out_ch
317
+ input_block_chans.append(ch)
318
+ ds *= 2
319
+ dim_head = ch // num_heads
320
+
321
+ # self.input_blocks = [ C | RT RT D | RT RT D | RT RT D | R R ]
322
+
323
+
324
+ # = = = = = = = = = = = = = = = = = = = = BottleNeck = = = = = = = = = = = = = = = = = = = = #
325
+
326
+ self.middle_block = TimestepEmbedSequential(
327
+ ResBlock(ch,
328
+ time_embed_dim,
329
+ dropout,
330
+ dims=dims,
331
+ use_checkpoint=use_checkpoint,
332
+ use_scale_shift_norm=use_scale_shift_norm),
333
+ SpatialTransformer(ch, key_dim=context_dim, value_dim=context_dim, n_heads=num_heads, d_head=dim_head, depth=transformer_depth, fuser_type=fuser_type, use_checkpoint=use_checkpoint),
334
+ ResBlock(ch,
335
+ time_embed_dim,
336
+ dropout,
337
+ dims=dims,
338
+ use_checkpoint=use_checkpoint,
339
+ use_scale_shift_norm=use_scale_shift_norm))
340
+
341
+
342
+
343
+ # = = = = = = = = = = = = = = = = = = = = Up Branch = = = = = = = = = = = = = = = = = = = = #
344
+
345
+
346
+ self.output_blocks = nn.ModuleList([])
347
+ for level, mult in list(enumerate(channel_mult))[::-1]:
348
+ for i in range(num_res_blocks + 1):
349
+ ich = input_block_chans.pop()
350
+ layers = [ ResBlock(ch + ich,
351
+ time_embed_dim,
352
+ dropout,
353
+ out_channels=model_channels * mult,
354
+ dims=dims,
355
+ use_checkpoint=use_checkpoint,
356
+ use_scale_shift_norm=use_scale_shift_norm) ]
357
+ ch = model_channels * mult
358
+
359
+ if ds in attention_resolutions:
360
+ dim_head = ch // num_heads
361
+ layers.append( SpatialTransformer(ch, key_dim=context_dim, value_dim=context_dim, n_heads=num_heads, d_head=dim_head, depth=transformer_depth, fuser_type=fuser_type, use_checkpoint=use_checkpoint) )
362
+ if level and i == num_res_blocks:
363
+ out_ch = ch
364
+ layers.append( Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) )
365
+ ds //= 2
366
+
367
+ self.output_blocks.append(TimestepEmbedSequential(*layers))
368
+
369
+
370
+ # self.output_blocks = [ R R RU | RT RT RTU | RT RT RTU | RT RT RT ]
371
+
372
+
373
+ self.out = nn.Sequential(
374
+ normalization(ch),
375
+ nn.SiLU(),
376
+ zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
377
+ )
378
+
379
+ if self.is_style:
380
+ from .positionnet_with_image import PositionNet
381
+ else:
382
+ from .positionnet import PositionNet
383
+ self.position_net = PositionNet(positive_len=positive_len, out_dim=context_dim)
384
+
385
+
386
+
387
+
388
+ def forward_position_net(self,input):
389
+ if ("boxes" in input):
390
+ boxes, masks, text_embeddings = input["boxes"], input["masks"], input["text_embeddings"]
391
+ _ , self.max_box, _ = text_embeddings.shape
392
+ else:
393
+ dtype = input["x"].dtype
394
+ batch = input["x"].shape[0]
395
+ device = input["x"].device
396
+ boxes = th.zeros(batch, self.max_box, 4,).type(dtype).to(device)
397
+ masks = th.zeros(batch, self.max_box).type(dtype).to(device)
398
+ text_embeddings = th.zeros(batch, self.max_box, self.positive_len).type(dtype).to(device)
399
+ if self.training and random.random() < 0.1: # random drop for guidance
400
+ boxes, masks, text_embeddings = boxes*0, masks*0, text_embeddings*0
401
+
402
+ objs = self.position_net( boxes, masks, text_embeddings ) # B*N*C
403
+
404
+ return objs
405
+
406
+
407
+
408
+
409
+
410
+ def forward_position_net_with_image(self,input):
411
+
412
+ if ("boxes" in input):
413
+ boxes = input["boxes"]
414
+ masks = input["masks"]
415
+ text_masks = input["text_masks"]
416
+ image_masks = input["image_masks"]
417
+ text_embeddings = input["text_embeddings"]
418
+ image_embeddings = input["image_embeddings"]
419
+ _ , self.max_box, _ = text_embeddings.shape
420
+ else:
421
+ dtype = input["x"].dtype
422
+ batch = input["x"].shape[0]
423
+ device = input["x"].device
424
+ boxes = th.zeros(batch, self.max_box, 4,).type(dtype).to(device)
425
+ masks = th.zeros(batch, self.max_box).type(dtype).to(device)
426
+ text_masks = th.zeros(batch, self.max_box).type(dtype).to(device)
427
+ image_masks = th.zeros(batch, self.max_box).type(dtype).to(device)
428
+ text_embeddings = th.zeros(batch, self.max_box, self.positive_len).type(dtype).to(device)
429
+ image_embeddings = th.zeros(batch, self.max_box, self.positive_len).type(dtype).to(device)
430
+
431
+ if self.training and random.random() < 0.1: # random drop for guidance
432
+ boxes = boxes*0
433
+ masks = masks*0
434
+ text_masks = text_masks*0
435
+ image_masks = image_masks*0
436
+ text_embeddings = text_embeddings*0
437
+ image_embeddings = image_embeddings*0
438
+
439
+ objs = self.position_net( boxes, masks, text_masks, image_masks, text_embeddings, image_embeddings ) # B*N*C
440
+
441
+ return objs
442
+
443
+
444
+
445
+
446
+
447
+ def forward(self, input):
448
+
449
+ if self.is_style:
450
+ objs = self.forward_position_net_with_image(input)
451
+ else:
452
+ objs = self.forward_position_net(input)
453
+
454
+
455
+ hs = []
456
+
457
+ t_emb = timestep_embedding(input["timesteps"], self.model_channels, repeat_only=False)
458
+ if self.use_o2:
459
+ t_emb = t_emb.to(th.float16) # not sure why apex will not cast this
460
+ emb = self.time_embed(t_emb)
461
+
462
+
463
+ h = input["x"]
464
+ if self.is_inpaint:
465
+ h = th.cat( [h, input["inpainting_extra_input"]], dim=1 )
466
+ context = input["context"]
467
+
468
+
469
+ for module in self.input_blocks:
470
+ h = module(h, emb, context, objs)
471
+ hs.append(h)
472
+
473
+ h = self.middle_block(h, emb, context, objs)
474
+
475
+ for module in self.output_blocks:
476
+ h = th.cat([h, hs.pop()], dim=1)
477
+ h = module(h, emb, context, objs)
478
+
479
+ return self.out(h)
480
+
481
+
482
+
483
+
484
+
485
+
486
+
487
+
488
+
489
+
gligen/ldm/modules/diffusionmodules/positionnet.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from ldm.modules.attention import BasicTransformerBlock
4
+ from ldm.modules.diffusionmodules.util import checkpoint, FourierEmbedder
5
+ import torch.nn.functional as F
6
+
7
+
8
+
9
+ class PositionNet(nn.Module):
10
+ def __init__(self, positive_len, out_dim, fourier_freqs=8):
11
+ super().__init__()
12
+ self.positive_len = positive_len
13
+ self.out_dim = out_dim
14
+
15
+ self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs)
16
+ self.position_dim = fourier_freqs*2*4 # 2 is sin&cos, 4 is xyxy
17
+
18
+ self.linears = nn.Sequential(
19
+ nn.Linear( self.positive_len + self.position_dim, 512),
20
+ nn.SiLU(),
21
+ nn.Linear( 512, 512),
22
+ nn.SiLU(),
23
+ nn.Linear(512, out_dim),
24
+ )
25
+
26
+ self.null_positive_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
27
+ self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim]))
28
+
29
+
30
+ def forward(self, boxes, masks, positive_embeddings):
31
+ B, N, _ = boxes.shape
32
+ masks = masks.unsqueeze(-1)
33
+
34
+ # embedding position (it may includes padding as placeholder)
35
+ xyxy_embedding = self.fourier_embedder(boxes) # B*N*4 --> B*N*C
36
+
37
+ # learnable null embedding
38
+ positive_null = self.null_positive_feature.view(1,1,-1)
39
+ xyxy_null = self.null_position_feature.view(1,1,-1)
40
+
41
+ # replace padding with learnable null embedding
42
+ positive_embeddings = positive_embeddings*masks + (1-masks)*positive_null
43
+ xyxy_embedding = xyxy_embedding*masks + (1-masks)*xyxy_null
44
+
45
+ objs = self.linears( torch.cat([positive_embeddings, xyxy_embedding], dim=-1) )
46
+ assert objs.shape == torch.Size([B,N,self.out_dim])
47
+ return objs
48
+
49
+
50
+
gligen/ldm/modules/diffusionmodules/positionnet_with_image.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from ldm.modules.attention import BasicTransformerBlock
4
+ from ldm.modules.diffusionmodules.util import checkpoint, FourierEmbedder
5
+ import torch.nn.functional as F
6
+
7
+
8
+
9
+ class PositionNet(nn.Module):
10
+ def __init__(self, positive_len, out_dim, fourier_freqs=8):
11
+ super().__init__()
12
+ self.positive_len = positive_len
13
+ self.out_dim = out_dim
14
+
15
+ self.fourier_embedder = FourierEmbedder(num_freqs=fourier_freqs)
16
+ self.position_dim = fourier_freqs*2*4 # 2 is sin&cos, 4 is xyxy
17
+
18
+ # -------------------------------------------------------------- #
19
+ self.linears_text = nn.Sequential(
20
+ nn.Linear( self.positive_len + self.position_dim, 512),
21
+ nn.SiLU(),
22
+ nn.Linear( 512, 512),
23
+ nn.SiLU(),
24
+ nn.Linear(512, out_dim),
25
+ )
26
+
27
+ self.linears_image = nn.Sequential(
28
+ nn.Linear( self.positive_len + self.position_dim, 512),
29
+ nn.SiLU(),
30
+ nn.Linear( 512, 512),
31
+ nn.SiLU(),
32
+ nn.Linear(512, out_dim),
33
+ )
34
+
35
+ # -------------------------------------------------------------- #
36
+ self.null_text_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
37
+ self.null_image_feature = torch.nn.Parameter(torch.zeros([self.positive_len]))
38
+ self.null_position_feature = torch.nn.Parameter(torch.zeros([self.position_dim]))
39
+
40
+
41
+ def forward(self, boxes, masks, text_masks, image_masks, text_embeddings, image_embeddings):
42
+ B, N, _ = boxes.shape
43
+ masks = masks.unsqueeze(-1) # B*N*1
44
+ text_masks = text_masks.unsqueeze(-1) # B*N*1
45
+ image_masks = image_masks.unsqueeze(-1) # B*N*1
46
+
47
+ # embedding position (it may includes padding as placeholder)
48
+ xyxy_embedding = self.fourier_embedder(boxes) # B*N*4 --> B*N*C
49
+
50
+ # learnable null embedding
51
+ text_null = self.null_text_feature.view(1,1,-1) # 1*1*C
52
+ image_null = self.null_image_feature.view(1,1,-1) # 1*1*C
53
+ xyxy_null = self.null_position_feature.view(1,1,-1) # 1*1*C
54
+
55
+ # replace padding with learnable null embedding
56
+ text_embeddings = text_embeddings*text_masks + (1-text_masks)*text_null
57
+ image_embeddings = image_embeddings*image_masks + (1-image_masks)*image_null
58
+ xyxy_embedding = xyxy_embedding*masks + (1-masks)*xyxy_null
59
+
60
+ objs_text = self.linears_text( torch.cat([text_embeddings, xyxy_embedding], dim=-1) )
61
+ objs_image = self.linears_image( torch.cat([image_embeddings,xyxy_embedding], dim=-1) )
62
+ objs = torch.cat( [objs_text,objs_image], dim=1 )
63
+
64
+ assert objs.shape == torch.Size([B,N*2,self.out_dim])
65
+ return objs
66
+
67
+
68
+
gligen/ldm/modules/diffusionmodules/util.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import math
3
+ import torch
4
+ import torch.nn as nn
5
+ import numpy as np
6
+ from einops import repeat
7
+
8
+ from ldm.util import instantiate_from_config
9
+
10
+
11
+
12
+ class FourierEmbedder():
13
+ def __init__(self, num_freqs=64, temperature=100):
14
+
15
+ self.num_freqs = num_freqs
16
+ self.temperature = temperature
17
+ self.freq_bands = temperature ** ( torch.arange(num_freqs) / num_freqs )
18
+
19
+ @ torch.no_grad()
20
+ def __call__(self, x, cat_dim=-1):
21
+ "x: arbitrary shape of tensor. dim: cat dim"
22
+ out = []
23
+ for freq in self.freq_bands:
24
+ out.append( torch.sin( freq*x ) )
25
+ out.append( torch.cos( freq*x ) )
26
+ return torch.cat(out, cat_dim)
27
+
28
+
29
+
30
+ def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
31
+ if schedule == "linear":
32
+ betas = (
33
+ torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
34
+ )
35
+
36
+ elif schedule == "cosine":
37
+ timesteps = (
38
+ torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
39
+ )
40
+ alphas = timesteps / (1 + cosine_s) * np.pi / 2
41
+ alphas = torch.cos(alphas).pow(2)
42
+ alphas = alphas / alphas[0]
43
+ betas = 1 - alphas[1:] / alphas[:-1]
44
+ betas = np.clip(betas, a_min=0, a_max=0.999)
45
+
46
+ elif schedule == "sqrt_linear":
47
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
48
+ elif schedule == "sqrt":
49
+ betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
50
+ else:
51
+ raise ValueError(f"schedule '{schedule}' unknown.")
52
+ return betas.numpy()
53
+
54
+
55
+ def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
56
+ if ddim_discr_method == 'uniform':
57
+ c = num_ddpm_timesteps // num_ddim_timesteps
58
+ ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
59
+ elif ddim_discr_method == 'quad':
60
+ ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
61
+ else:
62
+ raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
63
+
64
+ # assert ddim_timesteps.shape[0] == num_ddim_timesteps
65
+ # add one to get the final alpha values right (the ones from first scale to data during sampling)
66
+ steps_out = ddim_timesteps + 1
67
+ if verbose:
68
+ print(f'Selected timesteps for ddim sampler: {steps_out}')
69
+ return steps_out
70
+
71
+
72
+ def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
73
+ # select alphas for computing the variance schedule
74
+ alphas = alphacums[ddim_timesteps]
75
+ alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
76
+
77
+ # according the the formula provided in https://arxiv.org/abs/2010.02502
78
+ sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
79
+ if verbose:
80
+ print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
81
+ print(f'For the chosen value of eta, which is {eta}, '
82
+ f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
83
+ return sigmas, alphas, alphas_prev
84
+
85
+
86
+ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
87
+ """
88
+ Create a beta schedule that discretizes the given alpha_t_bar function,
89
+ which defines the cumulative product of (1-beta) over time from t = [0,1].
90
+ :param num_diffusion_timesteps: the number of betas to produce.
91
+ :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
92
+ produces the cumulative product of (1-beta) up to that
93
+ part of the diffusion process.
94
+ :param max_beta: the maximum beta to use; use values lower than 1 to
95
+ prevent singularities.
96
+ """
97
+ betas = []
98
+ for i in range(num_diffusion_timesteps):
99
+ t1 = i / num_diffusion_timesteps
100
+ t2 = (i + 1) / num_diffusion_timesteps
101
+ betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
102
+ return np.array(betas)
103
+
104
+
105
+ def extract_into_tensor(a, t, x_shape):
106
+ b, *_ = t.shape
107
+ out = a.gather(-1, t)
108
+ return out.reshape(b, *((1,) * (len(x_shape) - 1)))
109
+
110
+
111
+ def checkpoint(func, inputs, params, flag):
112
+ """
113
+ Evaluate a function without caching intermediate activations, allowing for
114
+ reduced memory at the expense of extra compute in the backward pass.
115
+ :param func: the function to evaluate.
116
+ :param inputs: the argument sequence to pass to `func`.
117
+ :param params: a sequence of parameters `func` depends on but does not
118
+ explicitly take as arguments.
119
+ :param flag: if False, disable gradient checkpointing.
120
+ """
121
+ if flag:
122
+ args = tuple(inputs) + tuple(params)
123
+ return CheckpointFunction.apply(func, len(inputs), *args)
124
+ else:
125
+ return func(*inputs)
126
+
127
+
128
+ class CheckpointFunction(torch.autograd.Function):
129
+ @staticmethod
130
+ def forward(ctx, run_function, length, *args):
131
+ ctx.run_function = run_function
132
+ ctx.input_tensors = list(args[:length])
133
+ ctx.input_params = list(args[length:])
134
+
135
+ with torch.no_grad():
136
+ output_tensors = ctx.run_function(*ctx.input_tensors)
137
+ return output_tensors
138
+
139
+ @staticmethod
140
+ def backward(ctx, *output_grads):
141
+ ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
142
+ with torch.enable_grad():
143
+ # Fixes a bug where the first op in run_function modifies the
144
+ # Tensor storage in place, which is not allowed for detach()'d
145
+ # Tensors.
146
+ shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
147
+ output_tensors = ctx.run_function(*shallow_copies)
148
+ input_grads = torch.autograd.grad(
149
+ output_tensors,
150
+ ctx.input_tensors + ctx.input_params,
151
+ output_grads,
152
+ allow_unused=True,
153
+ )
154
+ del ctx.input_tensors
155
+ del ctx.input_params
156
+ del output_tensors
157
+ return (None, None) + input_grads
158
+
159
+
160
+ def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
161
+ """
162
+ Create sinusoidal timestep embeddings.
163
+ :param timesteps: a 1-D Tensor of N indices, one per batch element.
164
+ These may be fractional.
165
+ :param dim: the dimension of the output.
166
+ :param max_period: controls the minimum frequency of the embeddings.
167
+ :return: an [N x dim] Tensor of positional embeddings.
168
+ """
169
+ if not repeat_only:
170
+ half = dim // 2
171
+ freqs = torch.exp(
172
+ -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
173
+ ).to(device=timesteps.device)
174
+ args = timesteps[:, None].float() * freqs[None]
175
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
176
+ if dim % 2:
177
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
178
+ else:
179
+ embedding = repeat(timesteps, 'b -> b d', d=dim)
180
+ return embedding
181
+
182
+
183
+ def zero_module(module):
184
+ """
185
+ Zero out the parameters of a module and return it.
186
+ """
187
+ for p in module.parameters():
188
+ p.detach().zero_()
189
+ return module
190
+
191
+
192
+ def scale_module(module, scale):
193
+ """
194
+ Scale the parameters of a module and return it.
195
+ """
196
+ for p in module.parameters():
197
+ p.detach().mul_(scale)
198
+ return module
199
+
200
+
201
+ def mean_flat(tensor):
202
+ """
203
+ Take the mean over all non-batch dimensions.
204
+ """
205
+ return tensor.mean(dim=list(range(1, len(tensor.shape))))
206
+
207
+
208
+ def normalization(channels):
209
+ """
210
+ Make a standard normalization layer.
211
+ :param channels: number of input channels.
212
+ :return: an nn.Module for normalization.
213
+ """
214
+ return GroupNorm32(32, channels)
215
+
216
+
217
+ # PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
218
+ class SiLU(nn.Module):
219
+ def forward(self, x):
220
+ return x * torch.sigmoid(x)
221
+
222
+
223
+ class GroupNorm32(nn.GroupNorm):
224
+ def forward(self, x):
225
+ return super().forward(x.float()).type(x.dtype)
226
+ #return super().forward(x).type(x.dtype)
227
+
228
+ def conv_nd(dims, *args, **kwargs):
229
+ """
230
+ Create a 1D, 2D, or 3D convolution module.
231
+ """
232
+ if dims == 1:
233
+ return nn.Conv1d(*args, **kwargs)
234
+ elif dims == 2:
235
+ return nn.Conv2d(*args, **kwargs)
236
+ elif dims == 3:
237
+ return nn.Conv3d(*args, **kwargs)
238
+ raise ValueError(f"unsupported dimensions: {dims}")
239
+
240
+
241
+ def linear(*args, **kwargs):
242
+ """
243
+ Create a linear module.
244
+ """
245
+ return nn.Linear(*args, **kwargs)
246
+
247
+
248
+ def avg_pool_nd(dims, *args, **kwargs):
249
+ """
250
+ Create a 1D, 2D, or 3D average pooling module.
251
+ """
252
+ if dims == 1:
253
+ return nn.AvgPool1d(*args, **kwargs)
254
+ elif dims == 2:
255
+ return nn.AvgPool2d(*args, **kwargs)
256
+ elif dims == 3:
257
+ return nn.AvgPool3d(*args, **kwargs)
258
+ raise ValueError(f"unsupported dimensions: {dims}")
259
+
260
+
261
+ class HybridConditioner(nn.Module):
262
+
263
+ def __init__(self, c_concat_config, c_crossattn_config):
264
+ super().__init__()
265
+ self.concat_conditioner = instantiate_from_config(c_concat_config)
266
+ self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
267
+
268
+ def forward(self, c_concat, c_crossattn):
269
+ c_concat = self.concat_conditioner(c_concat)
270
+ c_crossattn = self.crossattn_conditioner(c_crossattn)
271
+ return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
272
+
273
+
274
+ def noise_like(shape, device, repeat=False):
275
+ repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
276
+ noise = lambda: torch.randn(shape, device=device)
277
+ return repeat_noise() if repeat else noise()
gligen/ldm/modules/distributions/__init__.py ADDED
File without changes
gligen/ldm/modules/distributions/distributions.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+
4
+
5
+ class AbstractDistribution:
6
+ def sample(self):
7
+ raise NotImplementedError()
8
+
9
+ def mode(self):
10
+ raise NotImplementedError()
11
+
12
+
13
+ class DiracDistribution(AbstractDistribution):
14
+ def __init__(self, value):
15
+ self.value = value
16
+
17
+ def sample(self):
18
+ return self.value
19
+
20
+ def mode(self):
21
+ return self.value
22
+
23
+
24
+ class DiagonalGaussianDistribution(object):
25
+ def __init__(self, parameters, deterministic=False):
26
+ self.parameters = parameters
27
+ self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
28
+ self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
29
+ self.deterministic = deterministic
30
+ self.std = torch.exp(0.5 * self.logvar)
31
+ self.var = torch.exp(self.logvar)
32
+ if self.deterministic:
33
+ self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
34
+
35
+ def sample(self):
36
+ x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
37
+ return x
38
+
39
+ def kl(self, other=None):
40
+ if self.deterministic:
41
+ return torch.Tensor([0.])
42
+ else:
43
+ if other is None:
44
+ return 0.5 * torch.sum(torch.pow(self.mean, 2)
45
+ + self.var - 1.0 - self.logvar,
46
+ dim=[1, 2, 3])
47
+ else:
48
+ return 0.5 * torch.sum(
49
+ torch.pow(self.mean - other.mean, 2) / other.var
50
+ + self.var / other.var - 1.0 - self.logvar + other.logvar,
51
+ dim=[1, 2, 3])
52
+
53
+ def nll(self, sample, dims=[1,2,3]):
54
+ if self.deterministic:
55
+ return torch.Tensor([0.])
56
+ logtwopi = np.log(2.0 * np.pi)
57
+ return 0.5 * torch.sum(
58
+ logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
59
+ dim=dims)
60
+
61
+ def mode(self):
62
+ return self.mean
63
+
64
+
65
+ def normal_kl(mean1, logvar1, mean2, logvar2):
66
+ """
67
+ source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
68
+ Compute the KL divergence between two gaussians.
69
+ Shapes are automatically broadcasted, so batches can be compared to
70
+ scalars, among other use cases.
71
+ """
72
+ tensor = None
73
+ for obj in (mean1, logvar1, mean2, logvar2):
74
+ if isinstance(obj, torch.Tensor):
75
+ tensor = obj
76
+ break
77
+ assert tensor is not None, "at least one argument must be a Tensor"
78
+
79
+ # Force variances to be Tensors. Broadcasting helps convert scalars to
80
+ # Tensors, but it does not work for torch.exp().
81
+ logvar1, logvar2 = [
82
+ x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
83
+ for x in (logvar1, logvar2)
84
+ ]
85
+
86
+ return 0.5 * (
87
+ -1.0
88
+ + logvar2
89
+ - logvar1
90
+ + torch.exp(logvar1 - logvar2)
91
+ + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
92
+ )
gligen/ldm/modules/ema.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class LitEma(nn.Module):
6
+ def __init__(self, model, decay=0.9999, use_num_upates=True):
7
+ super().__init__()
8
+ if decay < 0.0 or decay > 1.0:
9
+ raise ValueError('Decay must be between 0 and 1')
10
+
11
+ self.m_name2s_name = {}
12
+ self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13
+ self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
14
+ else torch.tensor(-1,dtype=torch.int))
15
+
16
+ for name, p in model.named_parameters():
17
+ if p.requires_grad:
18
+ #remove as '.'-character is not allowed in buffers
19
+ s_name = name.replace('.','')
20
+ self.m_name2s_name.update({name:s_name})
21
+ self.register_buffer(s_name,p.clone().detach().data)
22
+
23
+ self.collected_params = []
24
+
25
+ def forward(self,model):
26
+ decay = self.decay
27
+
28
+ if self.num_updates >= 0:
29
+ self.num_updates += 1
30
+ decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
31
+
32
+ one_minus_decay = 1.0 - decay
33
+
34
+ with torch.no_grad():
35
+ m_param = dict(model.named_parameters())
36
+ shadow_params = dict(self.named_buffers())
37
+
38
+ for key in m_param:
39
+ if m_param[key].requires_grad:
40
+ sname = self.m_name2s_name[key]
41
+ shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
42
+ shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
43
+ else:
44
+ assert not key in self.m_name2s_name
45
+
46
+ def copy_to(self, model):
47
+ m_param = dict(model.named_parameters())
48
+ shadow_params = dict(self.named_buffers())
49
+ for key in m_param:
50
+ if m_param[key].requires_grad:
51
+ m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
52
+ else:
53
+ assert not key in self.m_name2s_name
54
+
55
+ def store(self, parameters):
56
+ """
57
+ Save the current parameters for restoring later.
58
+ Args:
59
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
60
+ temporarily stored.
61
+ """
62
+ self.collected_params = [param.clone() for param in parameters]
63
+
64
+ def restore(self, parameters):
65
+ """
66
+ Restore the parameters stored with the `store` method.
67
+ Useful to validate the model with EMA parameters without affecting the
68
+ original optimization process. Store the parameters before the
69
+ `copy_to` method. After validation (or model saving), use this to
70
+ restore the former parameters.
71
+ Args:
72
+ parameters: Iterable of `torch.nn.Parameter`; the parameters to be
73
+ updated with the stored parameters.
74
+ """
75
+ for c_param, param in zip(self.collected_params, parameters):
76
+ param.data.copy_(c_param.data)
gligen/ldm/modules/encoders/__init__.py ADDED
File without changes
gligen/ldm/modules/encoders/modules.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from functools import partial
4
+ import clip
5
+ from einops import rearrange, repeat
6
+ from transformers import CLIPTokenizer, CLIPTextModel
7
+ import kornia
8
+
9
+ from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
10
+
11
+
12
+ class AbstractEncoder(nn.Module):
13
+ def __init__(self):
14
+ super().__init__()
15
+
16
+ def encode(self, *args, **kwargs):
17
+ raise NotImplementedError
18
+
19
+
20
+
21
+ class ClassEmbedder(nn.Module):
22
+ def __init__(self, embed_dim, n_classes=1000, key='class'):
23
+ super().__init__()
24
+ self.key = key
25
+ self.embedding = nn.Embedding(n_classes, embed_dim)
26
+
27
+ def forward(self, batch, key=None):
28
+ if key is None:
29
+ key = self.key
30
+ # this is for use in crossattn
31
+ c = batch[key][:, None]
32
+ c = self.embedding(c)
33
+ return c
34
+
35
+
36
+ class TransformerEmbedder(AbstractEncoder):
37
+ """Some transformer encoder layers"""
38
+ def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
39
+ super().__init__()
40
+ self.device = device
41
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
42
+ attn_layers=Encoder(dim=n_embed, depth=n_layer))
43
+
44
+ def forward(self, tokens):
45
+ tokens = tokens.to(self.device) # meh
46
+ z = self.transformer(tokens, return_embeddings=True)
47
+ return z
48
+
49
+ def encode(self, x):
50
+ return self(x)
51
+
52
+
53
+ class BERTTokenizer(AbstractEncoder):
54
+ """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
55
+ def __init__(self, device="cuda", vq_interface=True, max_length=77):
56
+ super().__init__()
57
+ from transformers import BertTokenizerFast # TODO: add to reuquirements
58
+ self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
59
+ self.device = device
60
+ self.vq_interface = vq_interface
61
+ self.max_length = max_length
62
+
63
+ def forward(self, text):
64
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
65
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt",
66
+ return_offsets_mapping=True)
67
+ tokens = batch_encoding["input_ids"].to(self.device)
68
+ offset_mapping = batch_encoding["offset_mapping"]
69
+ return tokens, offset_mapping
70
+
71
+ @torch.no_grad()
72
+ def encode(self, text):
73
+ tokens = self(text)
74
+ if not self.vq_interface:
75
+ return tokens
76
+ return None, None, [None, None, tokens]
77
+
78
+ def decode(self, text):
79
+ return text
80
+
81
+
82
+ class BERTEmbedder(AbstractEncoder):
83
+ """Uses the BERT tokenizr model and add some transformer encoder layers"""
84
+ def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
85
+ device="cuda",use_tokenizer=True, embedding_dropout=0.0):
86
+ super().__init__()
87
+ self.use_tknz_fn = use_tokenizer
88
+ if self.use_tknz_fn:
89
+ self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
90
+ self.device = device
91
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
92
+ attn_layers=Encoder(dim=n_embed, depth=n_layer),
93
+ emb_dropout=embedding_dropout)
94
+
95
+ def forward(self, text, return_offset_mapping=False):
96
+ if self.use_tknz_fn:
97
+ tokens, offset_mapping = self.tknz_fn(text)#.to(self.device)
98
+ else:
99
+ assert False
100
+ tokens = text
101
+ z = self.transformer(tokens, return_embeddings=True)
102
+
103
+ if return_offset_mapping:
104
+ return z, offset_mapping
105
+ else:
106
+ return z
107
+
108
+ def encode(self, text, return_offset_mapping=False):
109
+ # output of length 77
110
+ return self(text, return_offset_mapping)
111
+
112
+
113
+ class SpatialRescaler(nn.Module):
114
+ def __init__(self,
115
+ n_stages=1,
116
+ method='bilinear',
117
+ multiplier=0.5,
118
+ in_channels=3,
119
+ out_channels=None,
120
+ bias=False):
121
+ super().__init__()
122
+ self.n_stages = n_stages
123
+ assert self.n_stages >= 0
124
+ assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
125
+ self.multiplier = multiplier
126
+ self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
127
+ self.remap_output = out_channels is not None
128
+ if self.remap_output:
129
+ print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
130
+ self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
131
+
132
+ def forward(self,x):
133
+ for stage in range(self.n_stages):
134
+ x = self.interpolator(x, scale_factor=self.multiplier)
135
+
136
+
137
+ if self.remap_output:
138
+ x = self.channel_mapper(x)
139
+ return x
140
+
141
+ def encode(self, x):
142
+ return self(x)
143
+
144
+ class FrozenCLIPEmbedder(AbstractEncoder):
145
+ """Uses the CLIP transformer encoder for text (from Hugging Face)"""
146
+ def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
147
+ super().__init__()
148
+ self.tokenizer = CLIPTokenizer.from_pretrained(version)
149
+ self.transformer = CLIPTextModel.from_pretrained(version)
150
+ self.device = device
151
+ self.max_length = max_length
152
+ self.freeze()
153
+
154
+ def freeze(self):
155
+ self.transformer = self.transformer.eval()
156
+ for param in self.parameters():
157
+ param.requires_grad = False
158
+
159
+ def forward(self, text, return_pooler_output=False):
160
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
161
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
162
+ tokens = batch_encoding["input_ids"].to(self.device)
163
+ outputs = self.transformer(input_ids=tokens)
164
+
165
+ z = outputs.last_hidden_state
166
+
167
+ if not return_pooler_output:
168
+ return z
169
+ else:
170
+ return z, outputs.pooler_output
171
+
172
+ def encode(self, text, return_pooler_output=False):
173
+ return self(text, return_pooler_output)
174
+
175
+
176
+ class FrozenCLIPTextEmbedder(nn.Module):
177
+ """
178
+ Uses the CLIP transformer encoder for text.
179
+ """
180
+ def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True):
181
+ super().__init__()
182
+ self.model, _ = clip.load(version, jit=False, device="cpu")
183
+ self.device = device
184
+ self.max_length = max_length
185
+ self.n_repeat = n_repeat
186
+ self.normalize = normalize
187
+
188
+ def freeze(self):
189
+ self.model = self.model.eval()
190
+ for param in self.parameters():
191
+ param.requires_grad = False
192
+
193
+ def forward(self, text):
194
+ tokens = clip.tokenize(text).to(self.device)
195
+ z = self.model.encode_text(tokens)
196
+ if self.normalize:
197
+ z = z / torch.linalg.norm(z, dim=1, keepdim=True)
198
+ return z
199
+
200
+ def encode(self, text):
201
+ z = self(text)
202
+ if z.ndim==2:
203
+ z = z[:, None, :]
204
+ z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat)
205
+ return z
206
+
207
+
208
+ class FrozenClipImageEmbedder(nn.Module):
209
+ """
210
+ Uses the CLIP image encoder.
211
+ """
212
+ def __init__(
213
+ self,
214
+ model,
215
+ jit=False,
216
+ device='cuda' if torch.cuda.is_available() else 'cpu',
217
+ antialias=False,
218
+ ):
219
+ super().__init__()
220
+ self.model, _ = clip.load(name=model, device=device, jit=jit)
221
+
222
+ self.antialias = antialias
223
+
224
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
225
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
226
+
227
+ def preprocess(self, x):
228
+ # normalize to [0,1]
229
+ x = kornia.geometry.resize(x, (224, 224),
230
+ interpolation='bicubic',align_corners=True,
231
+ antialias=self.antialias)
232
+ x = (x + 1.) / 2.
233
+ # renormalize according to clip
234
+ x = kornia.enhance.normalize(x, self.mean, self.std)
235
+ return x
236
+
237
+ def forward(self, x):
238
+ # x is assumed to be in range [-1,1]
239
+ return self.model.encode_image(self.preprocess(x))
240
+
241
+
242
+ if __name__ == "__main__":
243
+ from ldm.util import count_params
244
+ model = FrozenCLIPEmbedder()
245
+ count_params(model, verbose=True)
gligen/ldm/modules/encoders/modules_backup.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from functools import partial
4
+ import clip
5
+ from einops import rearrange, repeat
6
+ from transformers import CLIPTokenizer, CLIPTextModel
7
+ import kornia
8
+
9
+ from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
10
+
11
+
12
+ class AbstractEncoder(nn.Module):
13
+ def __init__(self):
14
+ super().__init__()
15
+
16
+ def encode(self, *args, **kwargs):
17
+ raise NotImplementedError
18
+
19
+
20
+
21
+ class ClassEmbedder(nn.Module):
22
+ def __init__(self, embed_dim, n_classes=1000, key='class'):
23
+ super().__init__()
24
+ self.key = key
25
+ self.embedding = nn.Embedding(n_classes, embed_dim)
26
+
27
+ def forward(self, batch, key=None):
28
+ if key is None:
29
+ key = self.key
30
+ # this is for use in crossattn
31
+ c = batch[key][:, None]
32
+ c = self.embedding(c)
33
+ return c
34
+
35
+
36
+ class TransformerEmbedder(AbstractEncoder):
37
+ """Some transformer encoder layers"""
38
+ def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
39
+ super().__init__()
40
+ self.device = device
41
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
42
+ attn_layers=Encoder(dim=n_embed, depth=n_layer))
43
+
44
+ def forward(self, tokens):
45
+ tokens = tokens.to(self.device) # meh
46
+ z = self.transformer(tokens, return_embeddings=True)
47
+ return z
48
+
49
+ def encode(self, x):
50
+ return self(x)
51
+
52
+
53
+ class BERTTokenizer(AbstractEncoder):
54
+ """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
55
+ def __init__(self, device="cuda", vq_interface=True, max_length=77):
56
+ super().__init__()
57
+ from transformers import BertTokenizerFast # TODO: add to reuquirements
58
+ self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
59
+ self.device = device
60
+ self.vq_interface = vq_interface
61
+ self.max_length = max_length
62
+
63
+ def forward(self, text):
64
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
65
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
66
+ tokens = batch_encoding["input_ids"].to(self.device)
67
+ return tokens
68
+
69
+ @torch.no_grad()
70
+ def encode(self, text):
71
+ tokens = self(text)
72
+ if not self.vq_interface:
73
+ return tokens
74
+ return None, None, [None, None, tokens]
75
+
76
+ def decode(self, text):
77
+ return text
78
+
79
+
80
+ class BERTEmbedder(AbstractEncoder):
81
+ """Uses the BERT tokenizr model and add some transformer encoder layers"""
82
+ def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
83
+ device="cuda",use_tokenizer=True, embedding_dropout=0.0):
84
+ super().__init__()
85
+ self.use_tknz_fn = use_tokenizer
86
+ if self.use_tknz_fn:
87
+ self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
88
+ self.device = device
89
+ self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
90
+ attn_layers=Encoder(dim=n_embed, depth=n_layer),
91
+ emb_dropout=embedding_dropout)
92
+
93
+ def forward(self, text):
94
+ if self.use_tknz_fn:
95
+ tokens = self.tknz_fn(text)#.to(self.device)
96
+ else:
97
+ tokens = text
98
+ z = self.transformer(tokens, return_embeddings=True)
99
+ return z
100
+
101
+ def encode(self, text):
102
+ # output of length 77
103
+ return self(text)
104
+
105
+
106
+ class SpatialRescaler(nn.Module):
107
+ def __init__(self,
108
+ n_stages=1,
109
+ method='bilinear',
110
+ multiplier=0.5,
111
+ in_channels=3,
112
+ out_channels=None,
113
+ bias=False):
114
+ super().__init__()
115
+ self.n_stages = n_stages
116
+ assert self.n_stages >= 0
117
+ assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
118
+ self.multiplier = multiplier
119
+ self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
120
+ self.remap_output = out_channels is not None
121
+ if self.remap_output:
122
+ print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
123
+ self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
124
+
125
+ def forward(self,x):
126
+ for stage in range(self.n_stages):
127
+ x = self.interpolator(x, scale_factor=self.multiplier)
128
+
129
+
130
+ if self.remap_output:
131
+ x = self.channel_mapper(x)
132
+ return x
133
+
134
+ def encode(self, x):
135
+ return self(x)
136
+
137
+ class FrozenCLIPEmbedder(AbstractEncoder):
138
+ """Uses the CLIP transformer encoder for text (from Hugging Face)"""
139
+ def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77):
140
+ super().__init__()
141
+ self.tokenizer = CLIPTokenizer.from_pretrained(version)
142
+ self.transformer = CLIPTextModel.from_pretrained(version)
143
+ self.device = device
144
+ self.max_length = max_length
145
+ self.freeze()
146
+
147
+ def freeze(self):
148
+ self.transformer = self.transformer.eval()
149
+ for param in self.parameters():
150
+ param.requires_grad = False
151
+
152
+ def forward(self, text):
153
+ batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
154
+ return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
155
+ tokens = batch_encoding["input_ids"].to(self.device)
156
+ outputs = self.transformer(input_ids=tokens)
157
+
158
+ z = outputs.last_hidden_state
159
+ return z
160
+
161
+ def encode(self, text):
162
+ return self(text)
163
+
164
+
165
+ class FrozenCLIPTextEmbedder(nn.Module):
166
+ """
167
+ Uses the CLIP transformer encoder for text.
168
+ """
169
+ def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True):
170
+ super().__init__()
171
+ self.model, _ = clip.load(version, jit=False, device="cpu")
172
+ self.device = device
173
+ self.max_length = max_length
174
+ self.n_repeat = n_repeat
175
+ self.normalize = normalize
176
+
177
+ def freeze(self):
178
+ self.model = self.model.eval()
179
+ for param in self.parameters():
180
+ param.requires_grad = False
181
+
182
+ def forward(self, text):
183
+ tokens = clip.tokenize(text).to(self.device)
184
+ z = self.model.encode_text(tokens)
185
+ if self.normalize:
186
+ z = z / torch.linalg.norm(z, dim=1, keepdim=True)
187
+ return z
188
+
189
+ def encode(self, text):
190
+ z = self(text)
191
+ if z.ndim==2:
192
+ z = z[:, None, :]
193
+ z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat)
194
+ return z
195
+
196
+
197
+ class FrozenClipImageEmbedder(nn.Module):
198
+ """
199
+ Uses the CLIP image encoder.
200
+ """
201
+ def __init__(
202
+ self,
203
+ model,
204
+ jit=False,
205
+ device='cuda' if torch.cuda.is_available() else 'cpu',
206
+ antialias=False,
207
+ ):
208
+ super().__init__()
209
+ self.model, _ = clip.load(name=model, device=device, jit=jit)
210
+
211
+ self.antialias = antialias
212
+
213
+ self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False)
214
+ self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False)
215
+
216
+ def preprocess(self, x):
217
+ # normalize to [0,1]
218
+ x = kornia.geometry.resize(x, (224, 224),
219
+ interpolation='bicubic',align_corners=True,
220
+ antialias=self.antialias)
221
+ x = (x + 1.) / 2.
222
+ # renormalize according to clip
223
+ x = kornia.enhance.normalize(x, self.mean, self.std)
224
+ return x
225
+
226
+ def forward(self, x):
227
+ # x is assumed to be in range [-1,1]
228
+ return self.model.encode_image(self.preprocess(x))
229
+
230
+
231
+ if __name__ == "__main__":
232
+ from ldm.util import count_params
233
+ model = FrozenCLIPEmbedder()
234
+ count_params(model, verbose=True)
gligen/ldm/modules/image_degradation/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
2
+ from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
gligen/ldm/modules/image_degradation/bsrgan.py ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ # --------------------------------------------
4
+ # Super-Resolution
5
+ # --------------------------------------------
6
+ #
7
+ # Kai Zhang (cskaizhang@gmail.com)
8
+ # https://github.com/cszn
9
+ # From 2019/03--2021/08
10
+ # --------------------------------------------
11
+ """
12
+
13
+ import numpy as np
14
+ import cv2
15
+ import torch
16
+
17
+ from functools import partial
18
+ import random
19
+ from scipy import ndimage
20
+ import scipy
21
+ import scipy.stats as ss
22
+ from scipy.interpolate import interp2d
23
+ from scipy.linalg import orth
24
+ import albumentations
25
+
26
+ import ldm.modules.image_degradation.utils_image as util
27
+
28
+
29
+ def modcrop_np(img, sf):
30
+ '''
31
+ Args:
32
+ img: numpy image, WxH or WxHxC
33
+ sf: scale factor
34
+ Return:
35
+ cropped image
36
+ '''
37
+ w, h = img.shape[:2]
38
+ im = np.copy(img)
39
+ return im[:w - w % sf, :h - h % sf, ...]
40
+
41
+
42
+ """
43
+ # --------------------------------------------
44
+ # anisotropic Gaussian kernels
45
+ # --------------------------------------------
46
+ """
47
+
48
+
49
+ def analytic_kernel(k):
50
+ """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
51
+ k_size = k.shape[0]
52
+ # Calculate the big kernels size
53
+ big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
54
+ # Loop over the small kernel to fill the big one
55
+ for r in range(k_size):
56
+ for c in range(k_size):
57
+ big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
58
+ # Crop the edges of the big kernel to ignore very small values and increase run time of SR
59
+ crop = k_size // 2
60
+ cropped_big_k = big_k[crop:-crop, crop:-crop]
61
+ # Normalize to 1
62
+ return cropped_big_k / cropped_big_k.sum()
63
+
64
+
65
+ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
66
+ """ generate an anisotropic Gaussian kernel
67
+ Args:
68
+ ksize : e.g., 15, kernel size
69
+ theta : [0, pi], rotation angle range
70
+ l1 : [0.1,50], scaling of eigenvalues
71
+ l2 : [0.1,l1], scaling of eigenvalues
72
+ If l1 = l2, will get an isotropic Gaussian kernel.
73
+ Returns:
74
+ k : kernel
75
+ """
76
+
77
+ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
78
+ V = np.array([[v[0], v[1]], [v[1], -v[0]]])
79
+ D = np.array([[l1, 0], [0, l2]])
80
+ Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
81
+ k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
82
+
83
+ return k
84
+
85
+
86
+ def gm_blur_kernel(mean, cov, size=15):
87
+ center = size / 2.0 + 0.5
88
+ k = np.zeros([size, size])
89
+ for y in range(size):
90
+ for x in range(size):
91
+ cy = y - center + 1
92
+ cx = x - center + 1
93
+ k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
94
+
95
+ k = k / np.sum(k)
96
+ return k
97
+
98
+
99
+ def shift_pixel(x, sf, upper_left=True):
100
+ """shift pixel for super-resolution with different scale factors
101
+ Args:
102
+ x: WxHxC or WxH
103
+ sf: scale factor
104
+ upper_left: shift direction
105
+ """
106
+ h, w = x.shape[:2]
107
+ shift = (sf - 1) * 0.5
108
+ xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
109
+ if upper_left:
110
+ x1 = xv + shift
111
+ y1 = yv + shift
112
+ else:
113
+ x1 = xv - shift
114
+ y1 = yv - shift
115
+
116
+ x1 = np.clip(x1, 0, w - 1)
117
+ y1 = np.clip(y1, 0, h - 1)
118
+
119
+ if x.ndim == 2:
120
+ x = interp2d(xv, yv, x)(x1, y1)
121
+ if x.ndim == 3:
122
+ for i in range(x.shape[-1]):
123
+ x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
124
+
125
+ return x
126
+
127
+
128
+ def blur(x, k):
129
+ '''
130
+ x: image, NxcxHxW
131
+ k: kernel, Nx1xhxw
132
+ '''
133
+ n, c = x.shape[:2]
134
+ p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
135
+ x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
136
+ k = k.repeat(1, c, 1, 1)
137
+ k = k.view(-1, 1, k.shape[2], k.shape[3])
138
+ x = x.view(1, -1, x.shape[2], x.shape[3])
139
+ x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
140
+ x = x.view(n, c, x.shape[2], x.shape[3])
141
+
142
+ return x
143
+
144
+
145
+ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
146
+ """"
147
+ # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
148
+ # Kai Zhang
149
+ # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
150
+ # max_var = 2.5 * sf
151
+ """
152
+ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
153
+ lambda_1 = min_var + np.random.rand() * (max_var - min_var)
154
+ lambda_2 = min_var + np.random.rand() * (max_var - min_var)
155
+ theta = np.random.rand() * np.pi # random theta
156
+ noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
157
+
158
+ # Set COV matrix using Lambdas and Theta
159
+ LAMBDA = np.diag([lambda_1, lambda_2])
160
+ Q = np.array([[np.cos(theta), -np.sin(theta)],
161
+ [np.sin(theta), np.cos(theta)]])
162
+ SIGMA = Q @ LAMBDA @ Q.T
163
+ INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
164
+
165
+ # Set expectation position (shifting kernel for aligned image)
166
+ MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
167
+ MU = MU[None, None, :, None]
168
+
169
+ # Create meshgrid for Gaussian
170
+ [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
171
+ Z = np.stack([X, Y], 2)[:, :, :, None]
172
+
173
+ # Calcualte Gaussian for every pixel of the kernel
174
+ ZZ = Z - MU
175
+ ZZ_t = ZZ.transpose(0, 1, 3, 2)
176
+ raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
177
+
178
+ # shift the kernel so it will be centered
179
+ # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
180
+
181
+ # Normalize the kernel and return
182
+ # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
183
+ kernel = raw_kernel / np.sum(raw_kernel)
184
+ return kernel
185
+
186
+
187
+ def fspecial_gaussian(hsize, sigma):
188
+ hsize = [hsize, hsize]
189
+ siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
190
+ std = sigma
191
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
192
+ arg = -(x * x + y * y) / (2 * std * std)
193
+ h = np.exp(arg)
194
+ h[h < scipy.finfo(float).eps * h.max()] = 0
195
+ sumh = h.sum()
196
+ if sumh != 0:
197
+ h = h / sumh
198
+ return h
199
+
200
+
201
+ def fspecial_laplacian(alpha):
202
+ alpha = max([0, min([alpha, 1])])
203
+ h1 = alpha / (alpha + 1)
204
+ h2 = (1 - alpha) / (alpha + 1)
205
+ h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
206
+ h = np.array(h)
207
+ return h
208
+
209
+
210
+ def fspecial(filter_type, *args, **kwargs):
211
+ '''
212
+ python code from:
213
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
214
+ '''
215
+ if filter_type == 'gaussian':
216
+ return fspecial_gaussian(*args, **kwargs)
217
+ if filter_type == 'laplacian':
218
+ return fspecial_laplacian(*args, **kwargs)
219
+
220
+
221
+ """
222
+ # --------------------------------------------
223
+ # degradation models
224
+ # --------------------------------------------
225
+ """
226
+
227
+
228
+ def bicubic_degradation(x, sf=3):
229
+ '''
230
+ Args:
231
+ x: HxWxC image, [0, 1]
232
+ sf: down-scale factor
233
+ Return:
234
+ bicubicly downsampled LR image
235
+ '''
236
+ x = util.imresize_np(x, scale=1 / sf)
237
+ return x
238
+
239
+
240
+ def srmd_degradation(x, k, sf=3):
241
+ ''' blur + bicubic downsampling
242
+ Args:
243
+ x: HxWxC image, [0, 1]
244
+ k: hxw, double
245
+ sf: down-scale factor
246
+ Return:
247
+ downsampled LR image
248
+ Reference:
249
+ @inproceedings{zhang2018learning,
250
+ title={Learning a single convolutional super-resolution network for multiple degradations},
251
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
252
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
253
+ pages={3262--3271},
254
+ year={2018}
255
+ }
256
+ '''
257
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
258
+ x = bicubic_degradation(x, sf=sf)
259
+ return x
260
+
261
+
262
+ def dpsr_degradation(x, k, sf=3):
263
+ ''' bicubic downsampling + blur
264
+ Args:
265
+ x: HxWxC image, [0, 1]
266
+ k: hxw, double
267
+ sf: down-scale factor
268
+ Return:
269
+ downsampled LR image
270
+ Reference:
271
+ @inproceedings{zhang2019deep,
272
+ title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
273
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
274
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
275
+ pages={1671--1681},
276
+ year={2019}
277
+ }
278
+ '''
279
+ x = bicubic_degradation(x, sf=sf)
280
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
281
+ return x
282
+
283
+
284
+ def classical_degradation(x, k, sf=3):
285
+ ''' blur + downsampling
286
+ Args:
287
+ x: HxWxC image, [0, 1]/[0, 255]
288
+ k: hxw, double
289
+ sf: down-scale factor
290
+ Return:
291
+ downsampled LR image
292
+ '''
293
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
294
+ # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
295
+ st = 0
296
+ return x[st::sf, st::sf, ...]
297
+
298
+
299
+ def add_sharpening(img, weight=0.5, radius=50, threshold=10):
300
+ """USM sharpening. borrowed from real-ESRGAN
301
+ Input image: I; Blurry image: B.
302
+ 1. K = I + weight * (I - B)
303
+ 2. Mask = 1 if abs(I - B) > threshold, else: 0
304
+ 3. Blur mask:
305
+ 4. Out = Mask * K + (1 - Mask) * I
306
+ Args:
307
+ img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
308
+ weight (float): Sharp weight. Default: 1.
309
+ radius (float): Kernel size of Gaussian blur. Default: 50.
310
+ threshold (int):
311
+ """
312
+ if radius % 2 == 0:
313
+ radius += 1
314
+ blur = cv2.GaussianBlur(img, (radius, radius), 0)
315
+ residual = img - blur
316
+ mask = np.abs(residual) * 255 > threshold
317
+ mask = mask.astype('float32')
318
+ soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
319
+
320
+ K = img + weight * residual
321
+ K = np.clip(K, 0, 1)
322
+ return soft_mask * K + (1 - soft_mask) * img
323
+
324
+
325
+ def add_blur(img, sf=4):
326
+ wd2 = 4.0 + sf
327
+ wd = 2.0 + 0.2 * sf
328
+ if random.random() < 0.5:
329
+ l1 = wd2 * random.random()
330
+ l2 = wd2 * random.random()
331
+ k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
332
+ else:
333
+ k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
334
+ img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
335
+
336
+ return img
337
+
338
+
339
+ def add_resize(img, sf=4):
340
+ rnum = np.random.rand()
341
+ if rnum > 0.8: # up
342
+ sf1 = random.uniform(1, 2)
343
+ elif rnum < 0.7: # down
344
+ sf1 = random.uniform(0.5 / sf, 1)
345
+ else:
346
+ sf1 = 1.0
347
+ img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
348
+ img = np.clip(img, 0.0, 1.0)
349
+
350
+ return img
351
+
352
+
353
+ # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
354
+ # noise_level = random.randint(noise_level1, noise_level2)
355
+ # rnum = np.random.rand()
356
+ # if rnum > 0.6: # add color Gaussian noise
357
+ # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
358
+ # elif rnum < 0.4: # add grayscale Gaussian noise
359
+ # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
360
+ # else: # add noise
361
+ # L = noise_level2 / 255.
362
+ # D = np.diag(np.random.rand(3))
363
+ # U = orth(np.random.rand(3, 3))
364
+ # conv = np.dot(np.dot(np.transpose(U), D), U)
365
+ # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
366
+ # img = np.clip(img, 0.0, 1.0)
367
+ # return img
368
+
369
+ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
370
+ noise_level = random.randint(noise_level1, noise_level2)
371
+ rnum = np.random.rand()
372
+ if rnum > 0.6: # add color Gaussian noise
373
+ img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
374
+ elif rnum < 0.4: # add grayscale Gaussian noise
375
+ img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
376
+ else: # add noise
377
+ L = noise_level2 / 255.
378
+ D = np.diag(np.random.rand(3))
379
+ U = orth(np.random.rand(3, 3))
380
+ conv = np.dot(np.dot(np.transpose(U), D), U)
381
+ img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
382
+ img = np.clip(img, 0.0, 1.0)
383
+ return img
384
+
385
+
386
+ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
387
+ noise_level = random.randint(noise_level1, noise_level2)
388
+ img = np.clip(img, 0.0, 1.0)
389
+ rnum = random.random()
390
+ if rnum > 0.6:
391
+ img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
392
+ elif rnum < 0.4:
393
+ img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
394
+ else:
395
+ L = noise_level2 / 255.
396
+ D = np.diag(np.random.rand(3))
397
+ U = orth(np.random.rand(3, 3))
398
+ conv = np.dot(np.dot(np.transpose(U), D), U)
399
+ img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
400
+ img = np.clip(img, 0.0, 1.0)
401
+ return img
402
+
403
+
404
+ def add_Poisson_noise(img):
405
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
406
+ vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
407
+ if random.random() < 0.5:
408
+ img = np.random.poisson(img * vals).astype(np.float32) / vals
409
+ else:
410
+ img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
411
+ img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
412
+ noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
413
+ img += noise_gray[:, :, np.newaxis]
414
+ img = np.clip(img, 0.0, 1.0)
415
+ return img
416
+
417
+
418
+ def add_JPEG_noise(img):
419
+ quality_factor = random.randint(30, 95)
420
+ img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
421
+ result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
422
+ img = cv2.imdecode(encimg, 1)
423
+ img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
424
+ return img
425
+
426
+
427
+ def random_crop(lq, hq, sf=4, lq_patchsize=64):
428
+ h, w = lq.shape[:2]
429
+ rnd_h = random.randint(0, h - lq_patchsize)
430
+ rnd_w = random.randint(0, w - lq_patchsize)
431
+ lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
432
+
433
+ rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
434
+ hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
435
+ return lq, hq
436
+
437
+
438
+ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
439
+ """
440
+ This is the degradation model of BSRGAN from the paper
441
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
442
+ ----------
443
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
444
+ sf: scale factor
445
+ isp_model: camera ISP model
446
+ Returns
447
+ -------
448
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
449
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
450
+ """
451
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
452
+ sf_ori = sf
453
+
454
+ h1, w1 = img.shape[:2]
455
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
456
+ h, w = img.shape[:2]
457
+
458
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
459
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
460
+
461
+ hq = img.copy()
462
+
463
+ if sf == 4 and random.random() < scale2_prob: # downsample1
464
+ if np.random.rand() < 0.5:
465
+ img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
466
+ interpolation=random.choice([1, 2, 3]))
467
+ else:
468
+ img = util.imresize_np(img, 1 / 2, True)
469
+ img = np.clip(img, 0.0, 1.0)
470
+ sf = 2
471
+
472
+ shuffle_order = random.sample(range(7), 7)
473
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
474
+ if idx1 > idx2: # keep downsample3 last
475
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
476
+
477
+ for i in shuffle_order:
478
+
479
+ if i == 0:
480
+ img = add_blur(img, sf=sf)
481
+
482
+ elif i == 1:
483
+ img = add_blur(img, sf=sf)
484
+
485
+ elif i == 2:
486
+ a, b = img.shape[1], img.shape[0]
487
+ # downsample2
488
+ if random.random() < 0.75:
489
+ sf1 = random.uniform(1, 2 * sf)
490
+ img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
491
+ interpolation=random.choice([1, 2, 3]))
492
+ else:
493
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
494
+ k_shifted = shift_pixel(k, sf)
495
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
496
+ img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
497
+ img = img[0::sf, 0::sf, ...] # nearest downsampling
498
+ img = np.clip(img, 0.0, 1.0)
499
+
500
+ elif i == 3:
501
+ # downsample3
502
+ img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
503
+ img = np.clip(img, 0.0, 1.0)
504
+
505
+ elif i == 4:
506
+ # add Gaussian noise
507
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
508
+
509
+ elif i == 5:
510
+ # add JPEG noise
511
+ if random.random() < jpeg_prob:
512
+ img = add_JPEG_noise(img)
513
+
514
+ elif i == 6:
515
+ # add processed camera sensor noise
516
+ if random.random() < isp_prob and isp_model is not None:
517
+ with torch.no_grad():
518
+ img, hq = isp_model.forward(img.copy(), hq)
519
+
520
+ # add final JPEG compression noise
521
+ img = add_JPEG_noise(img)
522
+
523
+ # random crop
524
+ img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
525
+
526
+ return img, hq
527
+
528
+
529
+ # todo no isp_model?
530
+ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
531
+ """
532
+ This is the degradation model of BSRGAN from the paper
533
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
534
+ ----------
535
+ sf: scale factor
536
+ isp_model: camera ISP model
537
+ Returns
538
+ -------
539
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
540
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
541
+ """
542
+ image = util.uint2single(image)
543
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
544
+ sf_ori = sf
545
+
546
+ h1, w1 = image.shape[:2]
547
+ image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
548
+ h, w = image.shape[:2]
549
+
550
+ hq = image.copy()
551
+
552
+ if sf == 4 and random.random() < scale2_prob: # downsample1
553
+ if np.random.rand() < 0.5:
554
+ image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
555
+ interpolation=random.choice([1, 2, 3]))
556
+ else:
557
+ image = util.imresize_np(image, 1 / 2, True)
558
+ image = np.clip(image, 0.0, 1.0)
559
+ sf = 2
560
+
561
+ shuffle_order = random.sample(range(7), 7)
562
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
563
+ if idx1 > idx2: # keep downsample3 last
564
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
565
+
566
+ for i in shuffle_order:
567
+
568
+ if i == 0:
569
+ image = add_blur(image, sf=sf)
570
+
571
+ elif i == 1:
572
+ image = add_blur(image, sf=sf)
573
+
574
+ elif i == 2:
575
+ a, b = image.shape[1], image.shape[0]
576
+ # downsample2
577
+ if random.random() < 0.75:
578
+ sf1 = random.uniform(1, 2 * sf)
579
+ image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
580
+ interpolation=random.choice([1, 2, 3]))
581
+ else:
582
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
583
+ k_shifted = shift_pixel(k, sf)
584
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
585
+ image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
586
+ image = image[0::sf, 0::sf, ...] # nearest downsampling
587
+ image = np.clip(image, 0.0, 1.0)
588
+
589
+ elif i == 3:
590
+ # downsample3
591
+ image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
592
+ image = np.clip(image, 0.0, 1.0)
593
+
594
+ elif i == 4:
595
+ # add Gaussian noise
596
+ image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
597
+
598
+ elif i == 5:
599
+ # add JPEG noise
600
+ if random.random() < jpeg_prob:
601
+ image = add_JPEG_noise(image)
602
+
603
+ # elif i == 6:
604
+ # # add processed camera sensor noise
605
+ # if random.random() < isp_prob and isp_model is not None:
606
+ # with torch.no_grad():
607
+ # img, hq = isp_model.forward(img.copy(), hq)
608
+
609
+ # add final JPEG compression noise
610
+ image = add_JPEG_noise(image)
611
+ image = util.single2uint(image)
612
+ example = {"image":image}
613
+ return example
614
+
615
+
616
+ # TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
617
+ def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
618
+ """
619
+ This is an extended degradation model by combining
620
+ the degradation models of BSRGAN and Real-ESRGAN
621
+ ----------
622
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
623
+ sf: scale factor
624
+ use_shuffle: the degradation shuffle
625
+ use_sharp: sharpening the img
626
+ Returns
627
+ -------
628
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
629
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
630
+ """
631
+
632
+ h1, w1 = img.shape[:2]
633
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
634
+ h, w = img.shape[:2]
635
+
636
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
637
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
638
+
639
+ if use_sharp:
640
+ img = add_sharpening(img)
641
+ hq = img.copy()
642
+
643
+ if random.random() < shuffle_prob:
644
+ shuffle_order = random.sample(range(13), 13)
645
+ else:
646
+ shuffle_order = list(range(13))
647
+ # local shuffle for noise, JPEG is always the last one
648
+ shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
649
+ shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
650
+
651
+ poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
652
+
653
+ for i in shuffle_order:
654
+ if i == 0:
655
+ img = add_blur(img, sf=sf)
656
+ elif i == 1:
657
+ img = add_resize(img, sf=sf)
658
+ elif i == 2:
659
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
660
+ elif i == 3:
661
+ if random.random() < poisson_prob:
662
+ img = add_Poisson_noise(img)
663
+ elif i == 4:
664
+ if random.random() < speckle_prob:
665
+ img = add_speckle_noise(img)
666
+ elif i == 5:
667
+ if random.random() < isp_prob and isp_model is not None:
668
+ with torch.no_grad():
669
+ img, hq = isp_model.forward(img.copy(), hq)
670
+ elif i == 6:
671
+ img = add_JPEG_noise(img)
672
+ elif i == 7:
673
+ img = add_blur(img, sf=sf)
674
+ elif i == 8:
675
+ img = add_resize(img, sf=sf)
676
+ elif i == 9:
677
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
678
+ elif i == 10:
679
+ if random.random() < poisson_prob:
680
+ img = add_Poisson_noise(img)
681
+ elif i == 11:
682
+ if random.random() < speckle_prob:
683
+ img = add_speckle_noise(img)
684
+ elif i == 12:
685
+ if random.random() < isp_prob and isp_model is not None:
686
+ with torch.no_grad():
687
+ img, hq = isp_model.forward(img.copy(), hq)
688
+ else:
689
+ print('check the shuffle!')
690
+
691
+ # resize to desired size
692
+ img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
693
+ interpolation=random.choice([1, 2, 3]))
694
+
695
+ # add final JPEG compression noise
696
+ img = add_JPEG_noise(img)
697
+
698
+ # random crop
699
+ img, hq = random_crop(img, hq, sf, lq_patchsize)
700
+
701
+ return img, hq
702
+
703
+
704
+ if __name__ == '__main__':
705
+ print("hey")
706
+ img = util.imread_uint('utils/test.png', 3)
707
+ print(img)
708
+ img = util.uint2single(img)
709
+ print(img)
710
+ img = img[:448, :448]
711
+ h = img.shape[0] // 4
712
+ print("resizing to", h)
713
+ sf = 4
714
+ deg_fn = partial(degradation_bsrgan_variant, sf=sf)
715
+ for i in range(20):
716
+ print(i)
717
+ img_lq = deg_fn(img)
718
+ print(img_lq)
719
+ img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
720
+ print(img_lq.shape)
721
+ print("bicubic", img_lq_bicubic.shape)
722
+ print(img_hq.shape)
723
+ lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
724
+ interpolation=0)
725
+ lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
726
+ interpolation=0)
727
+ img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
728
+ util.imsave(img_concat, str(i) + '.png')
729
+
730
+
gligen/ldm/modules/image_degradation/bsrgan_light.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import numpy as np
3
+ import cv2
4
+ import torch
5
+
6
+ from functools import partial
7
+ import random
8
+ from scipy import ndimage
9
+ import scipy
10
+ import scipy.stats as ss
11
+ from scipy.interpolate import interp2d
12
+ from scipy.linalg import orth
13
+ import albumentations
14
+
15
+ import ldm.modules.image_degradation.utils_image as util
16
+
17
+ """
18
+ # --------------------------------------------
19
+ # Super-Resolution
20
+ # --------------------------------------------
21
+ #
22
+ # Kai Zhang (cskaizhang@gmail.com)
23
+ # https://github.com/cszn
24
+ # From 2019/03--2021/08
25
+ # --------------------------------------------
26
+ """
27
+
28
+
29
+ def modcrop_np(img, sf):
30
+ '''
31
+ Args:
32
+ img: numpy image, WxH or WxHxC
33
+ sf: scale factor
34
+ Return:
35
+ cropped image
36
+ '''
37
+ w, h = img.shape[:2]
38
+ im = np.copy(img)
39
+ return im[:w - w % sf, :h - h % sf, ...]
40
+
41
+
42
+ """
43
+ # --------------------------------------------
44
+ # anisotropic Gaussian kernels
45
+ # --------------------------------------------
46
+ """
47
+
48
+
49
+ def analytic_kernel(k):
50
+ """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
51
+ k_size = k.shape[0]
52
+ # Calculate the big kernels size
53
+ big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
54
+ # Loop over the small kernel to fill the big one
55
+ for r in range(k_size):
56
+ for c in range(k_size):
57
+ big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
58
+ # Crop the edges of the big kernel to ignore very small values and increase run time of SR
59
+ crop = k_size // 2
60
+ cropped_big_k = big_k[crop:-crop, crop:-crop]
61
+ # Normalize to 1
62
+ return cropped_big_k / cropped_big_k.sum()
63
+
64
+
65
+ def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
66
+ """ generate an anisotropic Gaussian kernel
67
+ Args:
68
+ ksize : e.g., 15, kernel size
69
+ theta : [0, pi], rotation angle range
70
+ l1 : [0.1,50], scaling of eigenvalues
71
+ l2 : [0.1,l1], scaling of eigenvalues
72
+ If l1 = l2, will get an isotropic Gaussian kernel.
73
+ Returns:
74
+ k : kernel
75
+ """
76
+
77
+ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
78
+ V = np.array([[v[0], v[1]], [v[1], -v[0]]])
79
+ D = np.array([[l1, 0], [0, l2]])
80
+ Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
81
+ k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
82
+
83
+ return k
84
+
85
+
86
+ def gm_blur_kernel(mean, cov, size=15):
87
+ center = size / 2.0 + 0.5
88
+ k = np.zeros([size, size])
89
+ for y in range(size):
90
+ for x in range(size):
91
+ cy = y - center + 1
92
+ cx = x - center + 1
93
+ k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
94
+
95
+ k = k / np.sum(k)
96
+ return k
97
+
98
+
99
+ def shift_pixel(x, sf, upper_left=True):
100
+ """shift pixel for super-resolution with different scale factors
101
+ Args:
102
+ x: WxHxC or WxH
103
+ sf: scale factor
104
+ upper_left: shift direction
105
+ """
106
+ h, w = x.shape[:2]
107
+ shift = (sf - 1) * 0.5
108
+ xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
109
+ if upper_left:
110
+ x1 = xv + shift
111
+ y1 = yv + shift
112
+ else:
113
+ x1 = xv - shift
114
+ y1 = yv - shift
115
+
116
+ x1 = np.clip(x1, 0, w - 1)
117
+ y1 = np.clip(y1, 0, h - 1)
118
+
119
+ if x.ndim == 2:
120
+ x = interp2d(xv, yv, x)(x1, y1)
121
+ if x.ndim == 3:
122
+ for i in range(x.shape[-1]):
123
+ x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
124
+
125
+ return x
126
+
127
+
128
+ def blur(x, k):
129
+ '''
130
+ x: image, NxcxHxW
131
+ k: kernel, Nx1xhxw
132
+ '''
133
+ n, c = x.shape[:2]
134
+ p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
135
+ x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
136
+ k = k.repeat(1, c, 1, 1)
137
+ k = k.view(-1, 1, k.shape[2], k.shape[3])
138
+ x = x.view(1, -1, x.shape[2], x.shape[3])
139
+ x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
140
+ x = x.view(n, c, x.shape[2], x.shape[3])
141
+
142
+ return x
143
+
144
+
145
+ def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
146
+ """"
147
+ # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
148
+ # Kai Zhang
149
+ # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
150
+ # max_var = 2.5 * sf
151
+ """
152
+ # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
153
+ lambda_1 = min_var + np.random.rand() * (max_var - min_var)
154
+ lambda_2 = min_var + np.random.rand() * (max_var - min_var)
155
+ theta = np.random.rand() * np.pi # random theta
156
+ noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
157
+
158
+ # Set COV matrix using Lambdas and Theta
159
+ LAMBDA = np.diag([lambda_1, lambda_2])
160
+ Q = np.array([[np.cos(theta), -np.sin(theta)],
161
+ [np.sin(theta), np.cos(theta)]])
162
+ SIGMA = Q @ LAMBDA @ Q.T
163
+ INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
164
+
165
+ # Set expectation position (shifting kernel for aligned image)
166
+ MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
167
+ MU = MU[None, None, :, None]
168
+
169
+ # Create meshgrid for Gaussian
170
+ [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
171
+ Z = np.stack([X, Y], 2)[:, :, :, None]
172
+
173
+ # Calcualte Gaussian for every pixel of the kernel
174
+ ZZ = Z - MU
175
+ ZZ_t = ZZ.transpose(0, 1, 3, 2)
176
+ raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
177
+
178
+ # shift the kernel so it will be centered
179
+ # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
180
+
181
+ # Normalize the kernel and return
182
+ # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
183
+ kernel = raw_kernel / np.sum(raw_kernel)
184
+ return kernel
185
+
186
+
187
+ def fspecial_gaussian(hsize, sigma):
188
+ hsize = [hsize, hsize]
189
+ siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
190
+ std = sigma
191
+ [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
192
+ arg = -(x * x + y * y) / (2 * std * std)
193
+ h = np.exp(arg)
194
+ h[h < scipy.finfo(float).eps * h.max()] = 0
195
+ sumh = h.sum()
196
+ if sumh != 0:
197
+ h = h / sumh
198
+ return h
199
+
200
+
201
+ def fspecial_laplacian(alpha):
202
+ alpha = max([0, min([alpha, 1])])
203
+ h1 = alpha / (alpha + 1)
204
+ h2 = (1 - alpha) / (alpha + 1)
205
+ h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
206
+ h = np.array(h)
207
+ return h
208
+
209
+
210
+ def fspecial(filter_type, *args, **kwargs):
211
+ '''
212
+ python code from:
213
+ https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
214
+ '''
215
+ if filter_type == 'gaussian':
216
+ return fspecial_gaussian(*args, **kwargs)
217
+ if filter_type == 'laplacian':
218
+ return fspecial_laplacian(*args, **kwargs)
219
+
220
+
221
+ """
222
+ # --------------------------------------------
223
+ # degradation models
224
+ # --------------------------------------------
225
+ """
226
+
227
+
228
+ def bicubic_degradation(x, sf=3):
229
+ '''
230
+ Args:
231
+ x: HxWxC image, [0, 1]
232
+ sf: down-scale factor
233
+ Return:
234
+ bicubicly downsampled LR image
235
+ '''
236
+ x = util.imresize_np(x, scale=1 / sf)
237
+ return x
238
+
239
+
240
+ def srmd_degradation(x, k, sf=3):
241
+ ''' blur + bicubic downsampling
242
+ Args:
243
+ x: HxWxC image, [0, 1]
244
+ k: hxw, double
245
+ sf: down-scale factor
246
+ Return:
247
+ downsampled LR image
248
+ Reference:
249
+ @inproceedings{zhang2018learning,
250
+ title={Learning a single convolutional super-resolution network for multiple degradations},
251
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
252
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
253
+ pages={3262--3271},
254
+ year={2018}
255
+ }
256
+ '''
257
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
258
+ x = bicubic_degradation(x, sf=sf)
259
+ return x
260
+
261
+
262
+ def dpsr_degradation(x, k, sf=3):
263
+ ''' bicubic downsampling + blur
264
+ Args:
265
+ x: HxWxC image, [0, 1]
266
+ k: hxw, double
267
+ sf: down-scale factor
268
+ Return:
269
+ downsampled LR image
270
+ Reference:
271
+ @inproceedings{zhang2019deep,
272
+ title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
273
+ author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
274
+ booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
275
+ pages={1671--1681},
276
+ year={2019}
277
+ }
278
+ '''
279
+ x = bicubic_degradation(x, sf=sf)
280
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
281
+ return x
282
+
283
+
284
+ def classical_degradation(x, k, sf=3):
285
+ ''' blur + downsampling
286
+ Args:
287
+ x: HxWxC image, [0, 1]/[0, 255]
288
+ k: hxw, double
289
+ sf: down-scale factor
290
+ Return:
291
+ downsampled LR image
292
+ '''
293
+ x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
294
+ # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
295
+ st = 0
296
+ return x[st::sf, st::sf, ...]
297
+
298
+
299
+ def add_sharpening(img, weight=0.5, radius=50, threshold=10):
300
+ """USM sharpening. borrowed from real-ESRGAN
301
+ Input image: I; Blurry image: B.
302
+ 1. K = I + weight * (I - B)
303
+ 2. Mask = 1 if abs(I - B) > threshold, else: 0
304
+ 3. Blur mask:
305
+ 4. Out = Mask * K + (1 - Mask) * I
306
+ Args:
307
+ img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
308
+ weight (float): Sharp weight. Default: 1.
309
+ radius (float): Kernel size of Gaussian blur. Default: 50.
310
+ threshold (int):
311
+ """
312
+ if radius % 2 == 0:
313
+ radius += 1
314
+ blur = cv2.GaussianBlur(img, (radius, radius), 0)
315
+ residual = img - blur
316
+ mask = np.abs(residual) * 255 > threshold
317
+ mask = mask.astype('float32')
318
+ soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
319
+
320
+ K = img + weight * residual
321
+ K = np.clip(K, 0, 1)
322
+ return soft_mask * K + (1 - soft_mask) * img
323
+
324
+
325
+ def add_blur(img, sf=4):
326
+ wd2 = 4.0 + sf
327
+ wd = 2.0 + 0.2 * sf
328
+
329
+ wd2 = wd2/4
330
+ wd = wd/4
331
+
332
+ if random.random() < 0.5:
333
+ l1 = wd2 * random.random()
334
+ l2 = wd2 * random.random()
335
+ k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
336
+ else:
337
+ k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
338
+ img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
339
+
340
+ return img
341
+
342
+
343
+ def add_resize(img, sf=4):
344
+ rnum = np.random.rand()
345
+ if rnum > 0.8: # up
346
+ sf1 = random.uniform(1, 2)
347
+ elif rnum < 0.7: # down
348
+ sf1 = random.uniform(0.5 / sf, 1)
349
+ else:
350
+ sf1 = 1.0
351
+ img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
352
+ img = np.clip(img, 0.0, 1.0)
353
+
354
+ return img
355
+
356
+
357
+ # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
358
+ # noise_level = random.randint(noise_level1, noise_level2)
359
+ # rnum = np.random.rand()
360
+ # if rnum > 0.6: # add color Gaussian noise
361
+ # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
362
+ # elif rnum < 0.4: # add grayscale Gaussian noise
363
+ # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
364
+ # else: # add noise
365
+ # L = noise_level2 / 255.
366
+ # D = np.diag(np.random.rand(3))
367
+ # U = orth(np.random.rand(3, 3))
368
+ # conv = np.dot(np.dot(np.transpose(U), D), U)
369
+ # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
370
+ # img = np.clip(img, 0.0, 1.0)
371
+ # return img
372
+
373
+ def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
374
+ noise_level = random.randint(noise_level1, noise_level2)
375
+ rnum = np.random.rand()
376
+ if rnum > 0.6: # add color Gaussian noise
377
+ img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
378
+ elif rnum < 0.4: # add grayscale Gaussian noise
379
+ img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
380
+ else: # add noise
381
+ L = noise_level2 / 255.
382
+ D = np.diag(np.random.rand(3))
383
+ U = orth(np.random.rand(3, 3))
384
+ conv = np.dot(np.dot(np.transpose(U), D), U)
385
+ img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
386
+ img = np.clip(img, 0.0, 1.0)
387
+ return img
388
+
389
+
390
+ def add_speckle_noise(img, noise_level1=2, noise_level2=25):
391
+ noise_level = random.randint(noise_level1, noise_level2)
392
+ img = np.clip(img, 0.0, 1.0)
393
+ rnum = random.random()
394
+ if rnum > 0.6:
395
+ img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
396
+ elif rnum < 0.4:
397
+ img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
398
+ else:
399
+ L = noise_level2 / 255.
400
+ D = np.diag(np.random.rand(3))
401
+ U = orth(np.random.rand(3, 3))
402
+ conv = np.dot(np.dot(np.transpose(U), D), U)
403
+ img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
404
+ img = np.clip(img, 0.0, 1.0)
405
+ return img
406
+
407
+
408
+ def add_Poisson_noise(img):
409
+ img = np.clip((img * 255.0).round(), 0, 255) / 255.
410
+ vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
411
+ if random.random() < 0.5:
412
+ img = np.random.poisson(img * vals).astype(np.float32) / vals
413
+ else:
414
+ img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
415
+ img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
416
+ noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
417
+ img += noise_gray[:, :, np.newaxis]
418
+ img = np.clip(img, 0.0, 1.0)
419
+ return img
420
+
421
+
422
+ def add_JPEG_noise(img):
423
+ quality_factor = random.randint(80, 95)
424
+ img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
425
+ result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
426
+ img = cv2.imdecode(encimg, 1)
427
+ img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
428
+ return img
429
+
430
+
431
+ def random_crop(lq, hq, sf=4, lq_patchsize=64):
432
+ h, w = lq.shape[:2]
433
+ rnd_h = random.randint(0, h - lq_patchsize)
434
+ rnd_w = random.randint(0, w - lq_patchsize)
435
+ lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
436
+
437
+ rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
438
+ hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
439
+ return lq, hq
440
+
441
+
442
+ def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
443
+ """
444
+ This is the degradation model of BSRGAN from the paper
445
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
446
+ ----------
447
+ img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
448
+ sf: scale factor
449
+ isp_model: camera ISP model
450
+ Returns
451
+ -------
452
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
453
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
454
+ """
455
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
456
+ sf_ori = sf
457
+
458
+ h1, w1 = img.shape[:2]
459
+ img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
460
+ h, w = img.shape[:2]
461
+
462
+ if h < lq_patchsize * sf or w < lq_patchsize * sf:
463
+ raise ValueError(f'img size ({h1}X{w1}) is too small!')
464
+
465
+ hq = img.copy()
466
+
467
+ if sf == 4 and random.random() < scale2_prob: # downsample1
468
+ if np.random.rand() < 0.5:
469
+ img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
470
+ interpolation=random.choice([1, 2, 3]))
471
+ else:
472
+ img = util.imresize_np(img, 1 / 2, True)
473
+ img = np.clip(img, 0.0, 1.0)
474
+ sf = 2
475
+
476
+ shuffle_order = random.sample(range(7), 7)
477
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
478
+ if idx1 > idx2: # keep downsample3 last
479
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
480
+
481
+ for i in shuffle_order:
482
+
483
+ if i == 0:
484
+ img = add_blur(img, sf=sf)
485
+
486
+ elif i == 1:
487
+ img = add_blur(img, sf=sf)
488
+
489
+ elif i == 2:
490
+ a, b = img.shape[1], img.shape[0]
491
+ # downsample2
492
+ if random.random() < 0.75:
493
+ sf1 = random.uniform(1, 2 * sf)
494
+ img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
495
+ interpolation=random.choice([1, 2, 3]))
496
+ else:
497
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
498
+ k_shifted = shift_pixel(k, sf)
499
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
500
+ img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
501
+ img = img[0::sf, 0::sf, ...] # nearest downsampling
502
+ img = np.clip(img, 0.0, 1.0)
503
+
504
+ elif i == 3:
505
+ # downsample3
506
+ img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
507
+ img = np.clip(img, 0.0, 1.0)
508
+
509
+ elif i == 4:
510
+ # add Gaussian noise
511
+ img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
512
+
513
+ elif i == 5:
514
+ # add JPEG noise
515
+ if random.random() < jpeg_prob:
516
+ img = add_JPEG_noise(img)
517
+
518
+ elif i == 6:
519
+ # add processed camera sensor noise
520
+ if random.random() < isp_prob and isp_model is not None:
521
+ with torch.no_grad():
522
+ img, hq = isp_model.forward(img.copy(), hq)
523
+
524
+ # add final JPEG compression noise
525
+ img = add_JPEG_noise(img)
526
+
527
+ # random crop
528
+ img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
529
+
530
+ return img, hq
531
+
532
+
533
+ # todo no isp_model?
534
+ def degradation_bsrgan_variant(image, sf=4, isp_model=None):
535
+ """
536
+ This is the degradation model of BSRGAN from the paper
537
+ "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
538
+ ----------
539
+ sf: scale factor
540
+ isp_model: camera ISP model
541
+ Returns
542
+ -------
543
+ img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
544
+ hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
545
+ """
546
+ image = util.uint2single(image)
547
+ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
548
+ sf_ori = sf
549
+
550
+ h1, w1 = image.shape[:2]
551
+ image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
552
+ h, w = image.shape[:2]
553
+
554
+ hq = image.copy()
555
+
556
+ if sf == 4 and random.random() < scale2_prob: # downsample1
557
+ if np.random.rand() < 0.5:
558
+ image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
559
+ interpolation=random.choice([1, 2, 3]))
560
+ else:
561
+ image = util.imresize_np(image, 1 / 2, True)
562
+ image = np.clip(image, 0.0, 1.0)
563
+ sf = 2
564
+
565
+ shuffle_order = random.sample(range(7), 7)
566
+ idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
567
+ if idx1 > idx2: # keep downsample3 last
568
+ shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
569
+
570
+ for i in shuffle_order:
571
+
572
+ if i == 0:
573
+ image = add_blur(image, sf=sf)
574
+
575
+ # elif i == 1:
576
+ # image = add_blur(image, sf=sf)
577
+
578
+ if i == 0:
579
+ pass
580
+
581
+ elif i == 2:
582
+ a, b = image.shape[1], image.shape[0]
583
+ # downsample2
584
+ if random.random() < 0.8:
585
+ sf1 = random.uniform(1, 2 * sf)
586
+ image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
587
+ interpolation=random.choice([1, 2, 3]))
588
+ else:
589
+ k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
590
+ k_shifted = shift_pixel(k, sf)
591
+ k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
592
+ image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
593
+ image = image[0::sf, 0::sf, ...] # nearest downsampling
594
+
595
+ image = np.clip(image, 0.0, 1.0)
596
+
597
+ elif i == 3:
598
+ # downsample3
599
+ image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
600
+ image = np.clip(image, 0.0, 1.0)
601
+
602
+ elif i == 4:
603
+ # add Gaussian noise
604
+ image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
605
+
606
+ elif i == 5:
607
+ # add JPEG noise
608
+ if random.random() < jpeg_prob:
609
+ image = add_JPEG_noise(image)
610
+ #
611
+ # elif i == 6:
612
+ # # add processed camera sensor noise
613
+ # if random.random() < isp_prob and isp_model is not None:
614
+ # with torch.no_grad():
615
+ # img, hq = isp_model.forward(img.copy(), hq)
616
+
617
+ # add final JPEG compression noise
618
+ image = add_JPEG_noise(image)
619
+ image = util.single2uint(image)
620
+ example = {"image": image}
621
+ return example
622
+
623
+
624
+
625
+
626
+ if __name__ == '__main__':
627
+ print("hey")
628
+ img = util.imread_uint('utils/test.png', 3)
629
+ img = img[:448, :448]
630
+ h = img.shape[0] // 4
631
+ print("resizing to", h)
632
+ sf = 4
633
+ deg_fn = partial(degradation_bsrgan_variant, sf=sf)
634
+ for i in range(20):
635
+ print(i)
636
+ img_hq = img
637
+ img_lq = deg_fn(img)["image"]
638
+ img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
639
+ print(img_lq)
640
+ img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
641
+ print(img_lq.shape)
642
+ print("bicubic", img_lq_bicubic.shape)
643
+ print(img_hq.shape)
644
+ lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
645
+ interpolation=0)
646
+ lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
647
+ (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
648
+ interpolation=0)
649
+ img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
650
+ util.imsave(img_concat, str(i) + '.png')