Upload folder using huggingface_hub
Browse files- extensions-builtin/LDSR/ldsr_model_arch.py +250 -0
- extensions-builtin/LDSR/preload.py +6 -0
- extensions-builtin/LDSR/scripts/ldsr_model.py +68 -0
- extensions-builtin/LDSR/sd_hijack_autoencoder.py +293 -0
- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +1443 -0
- extensions-builtin/LDSR/vqvae_quantize.py +147 -0
- extensions-builtin/Lora/extra_networks_lora.py +67 -0
- extensions-builtin/Lora/lora.py +9 -0
- extensions-builtin/Lora/lora_patches.py +31 -0
- extensions-builtin/Lora/lyco_helpers.py +21 -0
- extensions-builtin/Lora/network.py +158 -0
- extensions-builtin/Lora/network_full.py +27 -0
- extensions-builtin/Lora/network_hada.py +55 -0
- extensions-builtin/Lora/network_ia3.py +30 -0
- extensions-builtin/Lora/network_lokr.py +64 -0
- extensions-builtin/Lora/network_lora.py +86 -0
- extensions-builtin/Lora/network_norm.py +28 -0
- extensions-builtin/Lora/networks.py +571 -0
- extensions-builtin/Lora/preload.py +7 -0
- extensions-builtin/Lora/scripts/lora_script.py +99 -0
- extensions-builtin/Lora/ui_edit_user_metadata.py +217 -0
- extensions-builtin/Lora/ui_extra_networks_lora.py +79 -0
- extensions-builtin/ScuNET/preload.py +6 -0
- extensions-builtin/ScuNET/scripts/scunet_model.py +144 -0
- extensions-builtin/ScuNET/scunet_model_arch.py +268 -0
- extensions-builtin/SwinIR/preload.py +6 -0
- extensions-builtin/SwinIR/scripts/swinir_model.py +192 -0
- extensions-builtin/SwinIR/swinir_model_arch.py +867 -0
- extensions-builtin/SwinIR/swinir_model_arch_v2.py +1017 -0
- extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +962 -0
- extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +15 -0
- extensions-builtin/canvas-zoom-and-pan/style.css +66 -0
- extensions-builtin/extra-options-section/scripts/extra_options_section.py +74 -0
- extensions-builtin/mobile/javascript/mobile.js +32 -0
- extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +42 -0
extensions-builtin/LDSR/ldsr_model_arch.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gc
|
3 |
+
import time
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
import torchvision
|
8 |
+
from PIL import Image
|
9 |
+
from einops import rearrange, repeat
|
10 |
+
from omegaconf import OmegaConf
|
11 |
+
import safetensors.torch
|
12 |
+
|
13 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
14 |
+
from ldm.util import instantiate_from_config, ismap
|
15 |
+
from modules import shared, sd_hijack, devices
|
16 |
+
|
17 |
+
cached_ldsr_model: torch.nn.Module = None
|
18 |
+
|
19 |
+
|
20 |
+
# Create LDSR Class
|
21 |
+
class LDSR:
|
22 |
+
def load_model_from_config(self, half_attention):
|
23 |
+
global cached_ldsr_model
|
24 |
+
|
25 |
+
if shared.opts.ldsr_cached and cached_ldsr_model is not None:
|
26 |
+
print("Loading model from cache")
|
27 |
+
model: torch.nn.Module = cached_ldsr_model
|
28 |
+
else:
|
29 |
+
print(f"Loading model from {self.modelPath}")
|
30 |
+
_, extension = os.path.splitext(self.modelPath)
|
31 |
+
if extension.lower() == ".safetensors":
|
32 |
+
pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu")
|
33 |
+
else:
|
34 |
+
pl_sd = torch.load(self.modelPath, map_location="cpu")
|
35 |
+
sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd
|
36 |
+
config = OmegaConf.load(self.yamlPath)
|
37 |
+
config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1"
|
38 |
+
model: torch.nn.Module = instantiate_from_config(config.model)
|
39 |
+
model.load_state_dict(sd, strict=False)
|
40 |
+
model = model.to(shared.device)
|
41 |
+
if half_attention:
|
42 |
+
model = model.half()
|
43 |
+
if shared.cmd_opts.opt_channelslast:
|
44 |
+
model = model.to(memory_format=torch.channels_last)
|
45 |
+
|
46 |
+
sd_hijack.model_hijack.hijack(model) # apply optimization
|
47 |
+
model.eval()
|
48 |
+
|
49 |
+
if shared.opts.ldsr_cached:
|
50 |
+
cached_ldsr_model = model
|
51 |
+
|
52 |
+
return {"model": model}
|
53 |
+
|
54 |
+
def __init__(self, model_path, yaml_path):
|
55 |
+
self.modelPath = model_path
|
56 |
+
self.yamlPath = yaml_path
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def run(model, selected_path, custom_steps, eta):
|
60 |
+
example = get_cond(selected_path)
|
61 |
+
|
62 |
+
n_runs = 1
|
63 |
+
guider = None
|
64 |
+
ckwargs = None
|
65 |
+
ddim_use_x0_pred = False
|
66 |
+
temperature = 1.
|
67 |
+
eta = eta
|
68 |
+
custom_shape = None
|
69 |
+
|
70 |
+
height, width = example["image"].shape[1:3]
|
71 |
+
split_input = height >= 128 and width >= 128
|
72 |
+
|
73 |
+
if split_input:
|
74 |
+
ks = 128
|
75 |
+
stride = 64
|
76 |
+
vqf = 4 #
|
77 |
+
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
|
78 |
+
"vqf": vqf,
|
79 |
+
"patch_distributed_vq": True,
|
80 |
+
"tie_braker": False,
|
81 |
+
"clip_max_weight": 0.5,
|
82 |
+
"clip_min_weight": 0.01,
|
83 |
+
"clip_max_tie_weight": 0.5,
|
84 |
+
"clip_min_tie_weight": 0.01}
|
85 |
+
else:
|
86 |
+
if hasattr(model, "split_input_params"):
|
87 |
+
delattr(model, "split_input_params")
|
88 |
+
|
89 |
+
x_t = None
|
90 |
+
logs = None
|
91 |
+
for _ in range(n_runs):
|
92 |
+
if custom_shape is not None:
|
93 |
+
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
|
94 |
+
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
|
95 |
+
|
96 |
+
logs = make_convolutional_sample(example, model,
|
97 |
+
custom_steps=custom_steps,
|
98 |
+
eta=eta, quantize_x0=False,
|
99 |
+
custom_shape=custom_shape,
|
100 |
+
temperature=temperature, noise_dropout=0.,
|
101 |
+
corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
|
102 |
+
ddim_use_x0_pred=ddim_use_x0_pred
|
103 |
+
)
|
104 |
+
return logs
|
105 |
+
|
106 |
+
def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
|
107 |
+
model = self.load_model_from_config(half_attention)
|
108 |
+
|
109 |
+
# Run settings
|
110 |
+
diffusion_steps = int(steps)
|
111 |
+
eta = 1.0
|
112 |
+
|
113 |
+
|
114 |
+
gc.collect()
|
115 |
+
devices.torch_gc()
|
116 |
+
|
117 |
+
im_og = image
|
118 |
+
width_og, height_og = im_og.size
|
119 |
+
# If we can adjust the max upscale size, then the 4 below should be our variable
|
120 |
+
down_sample_rate = target_scale / 4
|
121 |
+
wd = width_og * down_sample_rate
|
122 |
+
hd = height_og * down_sample_rate
|
123 |
+
width_downsampled_pre = int(np.ceil(wd))
|
124 |
+
height_downsampled_pre = int(np.ceil(hd))
|
125 |
+
|
126 |
+
if down_sample_rate != 1:
|
127 |
+
print(
|
128 |
+
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
|
129 |
+
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
|
130 |
+
else:
|
131 |
+
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
|
132 |
+
|
133 |
+
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
|
134 |
+
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
|
135 |
+
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
|
136 |
+
|
137 |
+
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
|
138 |
+
|
139 |
+
sample = logs["sample"]
|
140 |
+
sample = sample.detach().cpu()
|
141 |
+
sample = torch.clamp(sample, -1., 1.)
|
142 |
+
sample = (sample + 1.) / 2. * 255
|
143 |
+
sample = sample.numpy().astype(np.uint8)
|
144 |
+
sample = np.transpose(sample, (0, 2, 3, 1))
|
145 |
+
a = Image.fromarray(sample[0])
|
146 |
+
|
147 |
+
# remove padding
|
148 |
+
a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
|
149 |
+
|
150 |
+
del model
|
151 |
+
gc.collect()
|
152 |
+
devices.torch_gc()
|
153 |
+
|
154 |
+
return a
|
155 |
+
|
156 |
+
|
157 |
+
def get_cond(selected_path):
|
158 |
+
example = {}
|
159 |
+
up_f = 4
|
160 |
+
c = selected_path.convert('RGB')
|
161 |
+
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
|
162 |
+
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
|
163 |
+
antialias=True)
|
164 |
+
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
|
165 |
+
c = rearrange(c, '1 c h w -> 1 h w c')
|
166 |
+
c = 2. * c - 1.
|
167 |
+
|
168 |
+
c = c.to(shared.device)
|
169 |
+
example["LR_image"] = c
|
170 |
+
example["image"] = c_up
|
171 |
+
|
172 |
+
return example
|
173 |
+
|
174 |
+
|
175 |
+
@torch.no_grad()
|
176 |
+
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
|
177 |
+
mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
|
178 |
+
corrector_kwargs=None, x_t=None
|
179 |
+
):
|
180 |
+
ddim = DDIMSampler(model)
|
181 |
+
bs = shape[0]
|
182 |
+
shape = shape[1:]
|
183 |
+
print(f"Sampling with eta = {eta}; steps: {steps}")
|
184 |
+
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
|
185 |
+
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
|
186 |
+
mask=mask, x0=x0, temperature=temperature, verbose=False,
|
187 |
+
score_corrector=score_corrector,
|
188 |
+
corrector_kwargs=corrector_kwargs, x_t=x_t)
|
189 |
+
|
190 |
+
return samples, intermediates
|
191 |
+
|
192 |
+
|
193 |
+
@torch.no_grad()
|
194 |
+
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
|
195 |
+
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
|
196 |
+
log = {}
|
197 |
+
|
198 |
+
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
|
199 |
+
return_first_stage_outputs=True,
|
200 |
+
force_c_encode=not (hasattr(model, 'split_input_params')
|
201 |
+
and model.cond_stage_key == 'coordinates_bbox'),
|
202 |
+
return_original_cond=True)
|
203 |
+
|
204 |
+
if custom_shape is not None:
|
205 |
+
z = torch.randn(custom_shape)
|
206 |
+
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
|
207 |
+
|
208 |
+
z0 = None
|
209 |
+
|
210 |
+
log["input"] = x
|
211 |
+
log["reconstruction"] = xrec
|
212 |
+
|
213 |
+
if ismap(xc):
|
214 |
+
log["original_conditioning"] = model.to_rgb(xc)
|
215 |
+
if hasattr(model, 'cond_stage_key'):
|
216 |
+
log[model.cond_stage_key] = model.to_rgb(xc)
|
217 |
+
|
218 |
+
else:
|
219 |
+
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
|
220 |
+
if model.cond_stage_model:
|
221 |
+
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
|
222 |
+
if model.cond_stage_key == 'class_label':
|
223 |
+
log[model.cond_stage_key] = xc[model.cond_stage_key]
|
224 |
+
|
225 |
+
with model.ema_scope("Plotting"):
|
226 |
+
t0 = time.time()
|
227 |
+
|
228 |
+
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
|
229 |
+
eta=eta,
|
230 |
+
quantize_x0=quantize_x0, mask=None, x0=z0,
|
231 |
+
temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
|
232 |
+
x_t=x_T)
|
233 |
+
t1 = time.time()
|
234 |
+
|
235 |
+
if ddim_use_x0_pred:
|
236 |
+
sample = intermediates['pred_x0'][-1]
|
237 |
+
|
238 |
+
x_sample = model.decode_first_stage(sample)
|
239 |
+
|
240 |
+
try:
|
241 |
+
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
|
242 |
+
log["sample_noquant"] = x_sample_noquant
|
243 |
+
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
|
244 |
+
except Exception:
|
245 |
+
pass
|
246 |
+
|
247 |
+
log["sample"] = x_sample
|
248 |
+
log["time"] = t1 - t0
|
249 |
+
|
250 |
+
return log
|
extensions-builtin/LDSR/preload.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR'))
|
extensions-builtin/LDSR/scripts/ldsr_model.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from modules.modelloader import load_file_from_url
|
4 |
+
from modules.upscaler import Upscaler, UpscalerData
|
5 |
+
from ldsr_model_arch import LDSR
|
6 |
+
from modules import shared, script_callbacks, errors
|
7 |
+
import sd_hijack_autoencoder # noqa: F401
|
8 |
+
import sd_hijack_ddpm_v1 # noqa: F401
|
9 |
+
|
10 |
+
|
11 |
+
class UpscalerLDSR(Upscaler):
|
12 |
+
def __init__(self, user_path):
|
13 |
+
self.name = "LDSR"
|
14 |
+
self.user_path = user_path
|
15 |
+
self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
|
16 |
+
self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
|
17 |
+
super().__init__()
|
18 |
+
scaler_data = UpscalerData("LDSR", None, self)
|
19 |
+
self.scalers = [scaler_data]
|
20 |
+
|
21 |
+
def load_model(self, path: str):
|
22 |
+
# Remove incorrect project.yaml file if too big
|
23 |
+
yaml_path = os.path.join(self.model_path, "project.yaml")
|
24 |
+
old_model_path = os.path.join(self.model_path, "model.pth")
|
25 |
+
new_model_path = os.path.join(self.model_path, "model.ckpt")
|
26 |
+
|
27 |
+
local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"])
|
28 |
+
local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None)
|
29 |
+
local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None)
|
30 |
+
local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None)
|
31 |
+
|
32 |
+
if os.path.exists(yaml_path):
|
33 |
+
statinfo = os.stat(yaml_path)
|
34 |
+
if statinfo.st_size >= 10485760:
|
35 |
+
print("Removing invalid LDSR YAML file.")
|
36 |
+
os.remove(yaml_path)
|
37 |
+
|
38 |
+
if os.path.exists(old_model_path):
|
39 |
+
print("Renaming model from model.pth to model.ckpt")
|
40 |
+
os.rename(old_model_path, new_model_path)
|
41 |
+
|
42 |
+
if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
|
43 |
+
model = local_safetensors_path
|
44 |
+
else:
|
45 |
+
model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")
|
46 |
+
|
47 |
+
yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")
|
48 |
+
|
49 |
+
return LDSR(model, yaml)
|
50 |
+
|
51 |
+
def do_upscale(self, img, path):
|
52 |
+
try:
|
53 |
+
ldsr = self.load_model(path)
|
54 |
+
except Exception:
|
55 |
+
errors.report(f"Failed loading LDSR model {path}", exc_info=True)
|
56 |
+
return img
|
57 |
+
ddim_steps = shared.opts.ldsr_steps
|
58 |
+
return ldsr.super_resolution(img, ddim_steps, self.scale)
|
59 |
+
|
60 |
+
|
61 |
+
def on_ui_settings():
|
62 |
+
import gradio as gr
|
63 |
+
|
64 |
+
shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling")))
|
65 |
+
shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")))
|
66 |
+
|
67 |
+
|
68 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
extensions-builtin/LDSR/sd_hijack_autoencoder.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
|
2 |
+
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
|
3 |
+
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import pytorch_lightning as pl
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from contextlib import contextmanager
|
9 |
+
|
10 |
+
from torch.optim.lr_scheduler import LambdaLR
|
11 |
+
|
12 |
+
from ldm.modules.ema import LitEma
|
13 |
+
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
|
14 |
+
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
15 |
+
from ldm.util import instantiate_from_config
|
16 |
+
|
17 |
+
import ldm.models.autoencoder
|
18 |
+
from packaging import version
|
19 |
+
|
20 |
+
class VQModel(pl.LightningModule):
|
21 |
+
def __init__(self,
|
22 |
+
ddconfig,
|
23 |
+
lossconfig,
|
24 |
+
n_embed,
|
25 |
+
embed_dim,
|
26 |
+
ckpt_path=None,
|
27 |
+
ignore_keys=None,
|
28 |
+
image_key="image",
|
29 |
+
colorize_nlabels=None,
|
30 |
+
monitor=None,
|
31 |
+
batch_resize_range=None,
|
32 |
+
scheduler_config=None,
|
33 |
+
lr_g_factor=1.0,
|
34 |
+
remap=None,
|
35 |
+
sane_index_shape=False, # tell vector quantizer to return indices as bhw
|
36 |
+
use_ema=False
|
37 |
+
):
|
38 |
+
super().__init__()
|
39 |
+
self.embed_dim = embed_dim
|
40 |
+
self.n_embed = n_embed
|
41 |
+
self.image_key = image_key
|
42 |
+
self.encoder = Encoder(**ddconfig)
|
43 |
+
self.decoder = Decoder(**ddconfig)
|
44 |
+
self.loss = instantiate_from_config(lossconfig)
|
45 |
+
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
|
46 |
+
remap=remap,
|
47 |
+
sane_index_shape=sane_index_shape)
|
48 |
+
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
|
49 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
50 |
+
if colorize_nlabels is not None:
|
51 |
+
assert type(colorize_nlabels)==int
|
52 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
53 |
+
if monitor is not None:
|
54 |
+
self.monitor = monitor
|
55 |
+
self.batch_resize_range = batch_resize_range
|
56 |
+
if self.batch_resize_range is not None:
|
57 |
+
print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
|
58 |
+
|
59 |
+
self.use_ema = use_ema
|
60 |
+
if self.use_ema:
|
61 |
+
self.model_ema = LitEma(self)
|
62 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
63 |
+
|
64 |
+
if ckpt_path is not None:
|
65 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
|
66 |
+
self.scheduler_config = scheduler_config
|
67 |
+
self.lr_g_factor = lr_g_factor
|
68 |
+
|
69 |
+
@contextmanager
|
70 |
+
def ema_scope(self, context=None):
|
71 |
+
if self.use_ema:
|
72 |
+
self.model_ema.store(self.parameters())
|
73 |
+
self.model_ema.copy_to(self)
|
74 |
+
if context is not None:
|
75 |
+
print(f"{context}: Switched to EMA weights")
|
76 |
+
try:
|
77 |
+
yield None
|
78 |
+
finally:
|
79 |
+
if self.use_ema:
|
80 |
+
self.model_ema.restore(self.parameters())
|
81 |
+
if context is not None:
|
82 |
+
print(f"{context}: Restored training weights")
|
83 |
+
|
84 |
+
def init_from_ckpt(self, path, ignore_keys=None):
|
85 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
86 |
+
keys = list(sd.keys())
|
87 |
+
for k in keys:
|
88 |
+
for ik in ignore_keys or []:
|
89 |
+
if k.startswith(ik):
|
90 |
+
print("Deleting key {} from state_dict.".format(k))
|
91 |
+
del sd[k]
|
92 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
93 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
94 |
+
if missing:
|
95 |
+
print(f"Missing Keys: {missing}")
|
96 |
+
if unexpected:
|
97 |
+
print(f"Unexpected Keys: {unexpected}")
|
98 |
+
|
99 |
+
def on_train_batch_end(self, *args, **kwargs):
|
100 |
+
if self.use_ema:
|
101 |
+
self.model_ema(self)
|
102 |
+
|
103 |
+
def encode(self, x):
|
104 |
+
h = self.encoder(x)
|
105 |
+
h = self.quant_conv(h)
|
106 |
+
quant, emb_loss, info = self.quantize(h)
|
107 |
+
return quant, emb_loss, info
|
108 |
+
|
109 |
+
def encode_to_prequant(self, x):
|
110 |
+
h = self.encoder(x)
|
111 |
+
h = self.quant_conv(h)
|
112 |
+
return h
|
113 |
+
|
114 |
+
def decode(self, quant):
|
115 |
+
quant = self.post_quant_conv(quant)
|
116 |
+
dec = self.decoder(quant)
|
117 |
+
return dec
|
118 |
+
|
119 |
+
def decode_code(self, code_b):
|
120 |
+
quant_b = self.quantize.embed_code(code_b)
|
121 |
+
dec = self.decode(quant_b)
|
122 |
+
return dec
|
123 |
+
|
124 |
+
def forward(self, input, return_pred_indices=False):
|
125 |
+
quant, diff, (_,_,ind) = self.encode(input)
|
126 |
+
dec = self.decode(quant)
|
127 |
+
if return_pred_indices:
|
128 |
+
return dec, diff, ind
|
129 |
+
return dec, diff
|
130 |
+
|
131 |
+
def get_input(self, batch, k):
|
132 |
+
x = batch[k]
|
133 |
+
if len(x.shape) == 3:
|
134 |
+
x = x[..., None]
|
135 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
136 |
+
if self.batch_resize_range is not None:
|
137 |
+
lower_size = self.batch_resize_range[0]
|
138 |
+
upper_size = self.batch_resize_range[1]
|
139 |
+
if self.global_step <= 4:
|
140 |
+
# do the first few batches with max size to avoid later oom
|
141 |
+
new_resize = upper_size
|
142 |
+
else:
|
143 |
+
new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
|
144 |
+
if new_resize != x.shape[2]:
|
145 |
+
x = F.interpolate(x, size=new_resize, mode="bicubic")
|
146 |
+
x = x.detach()
|
147 |
+
return x
|
148 |
+
|
149 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
150 |
+
# https://github.com/pytorch/pytorch/issues/37142
|
151 |
+
# try not to fool the heuristics
|
152 |
+
x = self.get_input(batch, self.image_key)
|
153 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
154 |
+
|
155 |
+
if optimizer_idx == 0:
|
156 |
+
# autoencode
|
157 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
158 |
+
last_layer=self.get_last_layer(), split="train",
|
159 |
+
predicted_indices=ind)
|
160 |
+
|
161 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
162 |
+
return aeloss
|
163 |
+
|
164 |
+
if optimizer_idx == 1:
|
165 |
+
# discriminator
|
166 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
167 |
+
last_layer=self.get_last_layer(), split="train")
|
168 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
169 |
+
return discloss
|
170 |
+
|
171 |
+
def validation_step(self, batch, batch_idx):
|
172 |
+
log_dict = self._validation_step(batch, batch_idx)
|
173 |
+
with self.ema_scope():
|
174 |
+
self._validation_step(batch, batch_idx, suffix="_ema")
|
175 |
+
return log_dict
|
176 |
+
|
177 |
+
def _validation_step(self, batch, batch_idx, suffix=""):
|
178 |
+
x = self.get_input(batch, self.image_key)
|
179 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
180 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
|
181 |
+
self.global_step,
|
182 |
+
last_layer=self.get_last_layer(),
|
183 |
+
split="val"+suffix,
|
184 |
+
predicted_indices=ind
|
185 |
+
)
|
186 |
+
|
187 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
|
188 |
+
self.global_step,
|
189 |
+
last_layer=self.get_last_layer(),
|
190 |
+
split="val"+suffix,
|
191 |
+
predicted_indices=ind
|
192 |
+
)
|
193 |
+
rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
|
194 |
+
self.log(f"val{suffix}/rec_loss", rec_loss,
|
195 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
196 |
+
self.log(f"val{suffix}/aeloss", aeloss,
|
197 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
198 |
+
if version.parse(pl.__version__) >= version.parse('1.4.0'):
|
199 |
+
del log_dict_ae[f"val{suffix}/rec_loss"]
|
200 |
+
self.log_dict(log_dict_ae)
|
201 |
+
self.log_dict(log_dict_disc)
|
202 |
+
return self.log_dict
|
203 |
+
|
204 |
+
def configure_optimizers(self):
|
205 |
+
lr_d = self.learning_rate
|
206 |
+
lr_g = self.lr_g_factor*self.learning_rate
|
207 |
+
print("lr_d", lr_d)
|
208 |
+
print("lr_g", lr_g)
|
209 |
+
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
210 |
+
list(self.decoder.parameters())+
|
211 |
+
list(self.quantize.parameters())+
|
212 |
+
list(self.quant_conv.parameters())+
|
213 |
+
list(self.post_quant_conv.parameters()),
|
214 |
+
lr=lr_g, betas=(0.5, 0.9))
|
215 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
216 |
+
lr=lr_d, betas=(0.5, 0.9))
|
217 |
+
|
218 |
+
if self.scheduler_config is not None:
|
219 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
220 |
+
|
221 |
+
print("Setting up LambdaLR scheduler...")
|
222 |
+
scheduler = [
|
223 |
+
{
|
224 |
+
'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
|
225 |
+
'interval': 'step',
|
226 |
+
'frequency': 1
|
227 |
+
},
|
228 |
+
{
|
229 |
+
'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
|
230 |
+
'interval': 'step',
|
231 |
+
'frequency': 1
|
232 |
+
},
|
233 |
+
]
|
234 |
+
return [opt_ae, opt_disc], scheduler
|
235 |
+
return [opt_ae, opt_disc], []
|
236 |
+
|
237 |
+
def get_last_layer(self):
|
238 |
+
return self.decoder.conv_out.weight
|
239 |
+
|
240 |
+
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
|
241 |
+
log = {}
|
242 |
+
x = self.get_input(batch, self.image_key)
|
243 |
+
x = x.to(self.device)
|
244 |
+
if only_inputs:
|
245 |
+
log["inputs"] = x
|
246 |
+
return log
|
247 |
+
xrec, _ = self(x)
|
248 |
+
if x.shape[1] > 3:
|
249 |
+
# colorize with random projection
|
250 |
+
assert xrec.shape[1] > 3
|
251 |
+
x = self.to_rgb(x)
|
252 |
+
xrec = self.to_rgb(xrec)
|
253 |
+
log["inputs"] = x
|
254 |
+
log["reconstructions"] = xrec
|
255 |
+
if plot_ema:
|
256 |
+
with self.ema_scope():
|
257 |
+
xrec_ema, _ = self(x)
|
258 |
+
if x.shape[1] > 3:
|
259 |
+
xrec_ema = self.to_rgb(xrec_ema)
|
260 |
+
log["reconstructions_ema"] = xrec_ema
|
261 |
+
return log
|
262 |
+
|
263 |
+
def to_rgb(self, x):
|
264 |
+
assert self.image_key == "segmentation"
|
265 |
+
if not hasattr(self, "colorize"):
|
266 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
267 |
+
x = F.conv2d(x, weight=self.colorize)
|
268 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
269 |
+
return x
|
270 |
+
|
271 |
+
|
272 |
+
class VQModelInterface(VQModel):
|
273 |
+
def __init__(self, embed_dim, *args, **kwargs):
|
274 |
+
super().__init__(*args, embed_dim=embed_dim, **kwargs)
|
275 |
+
self.embed_dim = embed_dim
|
276 |
+
|
277 |
+
def encode(self, x):
|
278 |
+
h = self.encoder(x)
|
279 |
+
h = self.quant_conv(h)
|
280 |
+
return h
|
281 |
+
|
282 |
+
def decode(self, h, force_not_quantize=False):
|
283 |
+
# also go through quantization layer
|
284 |
+
if not force_not_quantize:
|
285 |
+
quant, emb_loss, info = self.quantize(h)
|
286 |
+
else:
|
287 |
+
quant = h
|
288 |
+
quant = self.post_quant_conv(quant)
|
289 |
+
dec = self.decoder(quant)
|
290 |
+
return dec
|
291 |
+
|
292 |
+
ldm.models.autoencoder.VQModel = VQModel
|
293 |
+
ldm.models.autoencoder.VQModelInterface = VQModelInterface
|
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
ADDED
@@ -0,0 +1,1443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo)
|
2 |
+
# Original filename: ldm/models/diffusion/ddpm.py
|
3 |
+
# The purpose to reinstate the old DDPM logic which works with VQ, whereas the V2 one doesn't
|
4 |
+
# Some models such as LDSR require VQ to work correctly
|
5 |
+
# The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import numpy as np
|
10 |
+
import pytorch_lightning as pl
|
11 |
+
from torch.optim.lr_scheduler import LambdaLR
|
12 |
+
from einops import rearrange, repeat
|
13 |
+
from contextlib import contextmanager
|
14 |
+
from functools import partial
|
15 |
+
from tqdm import tqdm
|
16 |
+
from torchvision.utils import make_grid
|
17 |
+
from pytorch_lightning.utilities.distributed import rank_zero_only
|
18 |
+
|
19 |
+
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
20 |
+
from ldm.modules.ema import LitEma
|
21 |
+
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
|
22 |
+
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
|
23 |
+
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
24 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
25 |
+
|
26 |
+
import ldm.models.diffusion.ddpm
|
27 |
+
|
28 |
+
__conditioning_keys__ = {'concat': 'c_concat',
|
29 |
+
'crossattn': 'c_crossattn',
|
30 |
+
'adm': 'y'}
|
31 |
+
|
32 |
+
|
33 |
+
def disabled_train(self, mode=True):
|
34 |
+
"""Overwrite model.train with this function to make sure train/eval mode
|
35 |
+
does not change anymore."""
|
36 |
+
return self
|
37 |
+
|
38 |
+
|
39 |
+
def uniform_on_device(r1, r2, shape, device):
|
40 |
+
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
41 |
+
|
42 |
+
|
43 |
+
class DDPMV1(pl.LightningModule):
|
44 |
+
# classic DDPM with Gaussian diffusion, in image space
|
45 |
+
def __init__(self,
|
46 |
+
unet_config,
|
47 |
+
timesteps=1000,
|
48 |
+
beta_schedule="linear",
|
49 |
+
loss_type="l2",
|
50 |
+
ckpt_path=None,
|
51 |
+
ignore_keys=None,
|
52 |
+
load_only_unet=False,
|
53 |
+
monitor="val/loss",
|
54 |
+
use_ema=True,
|
55 |
+
first_stage_key="image",
|
56 |
+
image_size=256,
|
57 |
+
channels=3,
|
58 |
+
log_every_t=100,
|
59 |
+
clip_denoised=True,
|
60 |
+
linear_start=1e-4,
|
61 |
+
linear_end=2e-2,
|
62 |
+
cosine_s=8e-3,
|
63 |
+
given_betas=None,
|
64 |
+
original_elbo_weight=0.,
|
65 |
+
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
66 |
+
l_simple_weight=1.,
|
67 |
+
conditioning_key=None,
|
68 |
+
parameterization="eps", # all assuming fixed variance schedules
|
69 |
+
scheduler_config=None,
|
70 |
+
use_positional_encodings=False,
|
71 |
+
learn_logvar=False,
|
72 |
+
logvar_init=0.,
|
73 |
+
):
|
74 |
+
super().__init__()
|
75 |
+
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
|
76 |
+
self.parameterization = parameterization
|
77 |
+
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
78 |
+
self.cond_stage_model = None
|
79 |
+
self.clip_denoised = clip_denoised
|
80 |
+
self.log_every_t = log_every_t
|
81 |
+
self.first_stage_key = first_stage_key
|
82 |
+
self.image_size = image_size # try conv?
|
83 |
+
self.channels = channels
|
84 |
+
self.use_positional_encodings = use_positional_encodings
|
85 |
+
self.model = DiffusionWrapperV1(unet_config, conditioning_key)
|
86 |
+
count_params(self.model, verbose=True)
|
87 |
+
self.use_ema = use_ema
|
88 |
+
if self.use_ema:
|
89 |
+
self.model_ema = LitEma(self.model)
|
90 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
91 |
+
|
92 |
+
self.use_scheduler = scheduler_config is not None
|
93 |
+
if self.use_scheduler:
|
94 |
+
self.scheduler_config = scheduler_config
|
95 |
+
|
96 |
+
self.v_posterior = v_posterior
|
97 |
+
self.original_elbo_weight = original_elbo_weight
|
98 |
+
self.l_simple_weight = l_simple_weight
|
99 |
+
|
100 |
+
if monitor is not None:
|
101 |
+
self.monitor = monitor
|
102 |
+
if ckpt_path is not None:
|
103 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
|
104 |
+
|
105 |
+
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
106 |
+
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
107 |
+
|
108 |
+
self.loss_type = loss_type
|
109 |
+
|
110 |
+
self.learn_logvar = learn_logvar
|
111 |
+
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
|
112 |
+
if self.learn_logvar:
|
113 |
+
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
114 |
+
|
115 |
+
|
116 |
+
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
117 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
118 |
+
if exists(given_betas):
|
119 |
+
betas = given_betas
|
120 |
+
else:
|
121 |
+
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
122 |
+
cosine_s=cosine_s)
|
123 |
+
alphas = 1. - betas
|
124 |
+
alphas_cumprod = np.cumprod(alphas, axis=0)
|
125 |
+
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
126 |
+
|
127 |
+
timesteps, = betas.shape
|
128 |
+
self.num_timesteps = int(timesteps)
|
129 |
+
self.linear_start = linear_start
|
130 |
+
self.linear_end = linear_end
|
131 |
+
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
132 |
+
|
133 |
+
to_torch = partial(torch.tensor, dtype=torch.float32)
|
134 |
+
|
135 |
+
self.register_buffer('betas', to_torch(betas))
|
136 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
137 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
138 |
+
|
139 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
140 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
141 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
142 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
143 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
144 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
145 |
+
|
146 |
+
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
147 |
+
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
|
148 |
+
1. - alphas_cumprod) + self.v_posterior * betas
|
149 |
+
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
150 |
+
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
151 |
+
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
152 |
+
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
153 |
+
self.register_buffer('posterior_mean_coef1', to_torch(
|
154 |
+
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
155 |
+
self.register_buffer('posterior_mean_coef2', to_torch(
|
156 |
+
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
157 |
+
|
158 |
+
if self.parameterization == "eps":
|
159 |
+
lvlb_weights = self.betas ** 2 / (
|
160 |
+
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
|
161 |
+
elif self.parameterization == "x0":
|
162 |
+
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
|
163 |
+
else:
|
164 |
+
raise NotImplementedError("mu not supported")
|
165 |
+
# TODO how to choose this term
|
166 |
+
lvlb_weights[0] = lvlb_weights[1]
|
167 |
+
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
168 |
+
assert not torch.isnan(self.lvlb_weights).all()
|
169 |
+
|
170 |
+
@contextmanager
|
171 |
+
def ema_scope(self, context=None):
|
172 |
+
if self.use_ema:
|
173 |
+
self.model_ema.store(self.model.parameters())
|
174 |
+
self.model_ema.copy_to(self.model)
|
175 |
+
if context is not None:
|
176 |
+
print(f"{context}: Switched to EMA weights")
|
177 |
+
try:
|
178 |
+
yield None
|
179 |
+
finally:
|
180 |
+
if self.use_ema:
|
181 |
+
self.model_ema.restore(self.model.parameters())
|
182 |
+
if context is not None:
|
183 |
+
print(f"{context}: Restored training weights")
|
184 |
+
|
185 |
+
def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
|
186 |
+
sd = torch.load(path, map_location="cpu")
|
187 |
+
if "state_dict" in list(sd.keys()):
|
188 |
+
sd = sd["state_dict"]
|
189 |
+
keys = list(sd.keys())
|
190 |
+
for k in keys:
|
191 |
+
for ik in ignore_keys or []:
|
192 |
+
if k.startswith(ik):
|
193 |
+
print("Deleting key {} from state_dict.".format(k))
|
194 |
+
del sd[k]
|
195 |
+
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
196 |
+
sd, strict=False)
|
197 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
198 |
+
if missing:
|
199 |
+
print(f"Missing Keys: {missing}")
|
200 |
+
if unexpected:
|
201 |
+
print(f"Unexpected Keys: {unexpected}")
|
202 |
+
|
203 |
+
def q_mean_variance(self, x_start, t):
|
204 |
+
"""
|
205 |
+
Get the distribution q(x_t | x_0).
|
206 |
+
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
207 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
208 |
+
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
209 |
+
"""
|
210 |
+
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
|
211 |
+
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
212 |
+
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
213 |
+
return mean, variance, log_variance
|
214 |
+
|
215 |
+
def predict_start_from_noise(self, x_t, t, noise):
|
216 |
+
return (
|
217 |
+
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
218 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
219 |
+
)
|
220 |
+
|
221 |
+
def q_posterior(self, x_start, x_t, t):
|
222 |
+
posterior_mean = (
|
223 |
+
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
224 |
+
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
225 |
+
)
|
226 |
+
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
227 |
+
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
|
228 |
+
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
229 |
+
|
230 |
+
def p_mean_variance(self, x, t, clip_denoised: bool):
|
231 |
+
model_out = self.model(x, t)
|
232 |
+
if self.parameterization == "eps":
|
233 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
234 |
+
elif self.parameterization == "x0":
|
235 |
+
x_recon = model_out
|
236 |
+
if clip_denoised:
|
237 |
+
x_recon.clamp_(-1., 1.)
|
238 |
+
|
239 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
240 |
+
return model_mean, posterior_variance, posterior_log_variance
|
241 |
+
|
242 |
+
@torch.no_grad()
|
243 |
+
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
244 |
+
b, *_, device = *x.shape, x.device
|
245 |
+
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
|
246 |
+
noise = noise_like(x.shape, device, repeat_noise)
|
247 |
+
# no noise when t == 0
|
248 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
249 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
250 |
+
|
251 |
+
@torch.no_grad()
|
252 |
+
def p_sample_loop(self, shape, return_intermediates=False):
|
253 |
+
device = self.betas.device
|
254 |
+
b = shape[0]
|
255 |
+
img = torch.randn(shape, device=device)
|
256 |
+
intermediates = [img]
|
257 |
+
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
|
258 |
+
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
|
259 |
+
clip_denoised=self.clip_denoised)
|
260 |
+
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
261 |
+
intermediates.append(img)
|
262 |
+
if return_intermediates:
|
263 |
+
return img, intermediates
|
264 |
+
return img
|
265 |
+
|
266 |
+
@torch.no_grad()
|
267 |
+
def sample(self, batch_size=16, return_intermediates=False):
|
268 |
+
image_size = self.image_size
|
269 |
+
channels = self.channels
|
270 |
+
return self.p_sample_loop((batch_size, channels, image_size, image_size),
|
271 |
+
return_intermediates=return_intermediates)
|
272 |
+
|
273 |
+
def q_sample(self, x_start, t, noise=None):
|
274 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
275 |
+
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
276 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
277 |
+
|
278 |
+
def get_loss(self, pred, target, mean=True):
|
279 |
+
if self.loss_type == 'l1':
|
280 |
+
loss = (target - pred).abs()
|
281 |
+
if mean:
|
282 |
+
loss = loss.mean()
|
283 |
+
elif self.loss_type == 'l2':
|
284 |
+
if mean:
|
285 |
+
loss = torch.nn.functional.mse_loss(target, pred)
|
286 |
+
else:
|
287 |
+
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
|
288 |
+
else:
|
289 |
+
raise NotImplementedError("unknown loss type '{loss_type}'")
|
290 |
+
|
291 |
+
return loss
|
292 |
+
|
293 |
+
def p_losses(self, x_start, t, noise=None):
|
294 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
295 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
296 |
+
model_out = self.model(x_noisy, t)
|
297 |
+
|
298 |
+
loss_dict = {}
|
299 |
+
if self.parameterization == "eps":
|
300 |
+
target = noise
|
301 |
+
elif self.parameterization == "x0":
|
302 |
+
target = x_start
|
303 |
+
else:
|
304 |
+
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
|
305 |
+
|
306 |
+
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
307 |
+
|
308 |
+
log_prefix = 'train' if self.training else 'val'
|
309 |
+
|
310 |
+
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
311 |
+
loss_simple = loss.mean() * self.l_simple_weight
|
312 |
+
|
313 |
+
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
314 |
+
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
315 |
+
|
316 |
+
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
317 |
+
|
318 |
+
loss_dict.update({f'{log_prefix}/loss': loss})
|
319 |
+
|
320 |
+
return loss, loss_dict
|
321 |
+
|
322 |
+
def forward(self, x, *args, **kwargs):
|
323 |
+
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
324 |
+
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
325 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
326 |
+
return self.p_losses(x, t, *args, **kwargs)
|
327 |
+
|
328 |
+
def get_input(self, batch, k):
|
329 |
+
x = batch[k]
|
330 |
+
if len(x.shape) == 3:
|
331 |
+
x = x[..., None]
|
332 |
+
x = rearrange(x, 'b h w c -> b c h w')
|
333 |
+
x = x.to(memory_format=torch.contiguous_format).float()
|
334 |
+
return x
|
335 |
+
|
336 |
+
def shared_step(self, batch):
|
337 |
+
x = self.get_input(batch, self.first_stage_key)
|
338 |
+
loss, loss_dict = self(x)
|
339 |
+
return loss, loss_dict
|
340 |
+
|
341 |
+
def training_step(self, batch, batch_idx):
|
342 |
+
loss, loss_dict = self.shared_step(batch)
|
343 |
+
|
344 |
+
self.log_dict(loss_dict, prog_bar=True,
|
345 |
+
logger=True, on_step=True, on_epoch=True)
|
346 |
+
|
347 |
+
self.log("global_step", self.global_step,
|
348 |
+
prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
349 |
+
|
350 |
+
if self.use_scheduler:
|
351 |
+
lr = self.optimizers().param_groups[0]['lr']
|
352 |
+
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
353 |
+
|
354 |
+
return loss
|
355 |
+
|
356 |
+
@torch.no_grad()
|
357 |
+
def validation_step(self, batch, batch_idx):
|
358 |
+
_, loss_dict_no_ema = self.shared_step(batch)
|
359 |
+
with self.ema_scope():
|
360 |
+
_, loss_dict_ema = self.shared_step(batch)
|
361 |
+
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
|
362 |
+
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
363 |
+
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
364 |
+
|
365 |
+
def on_train_batch_end(self, *args, **kwargs):
|
366 |
+
if self.use_ema:
|
367 |
+
self.model_ema(self.model)
|
368 |
+
|
369 |
+
def _get_rows_from_list(self, samples):
|
370 |
+
n_imgs_per_row = len(samples)
|
371 |
+
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
372 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
373 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
374 |
+
return denoise_grid
|
375 |
+
|
376 |
+
@torch.no_grad()
|
377 |
+
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
378 |
+
log = {}
|
379 |
+
x = self.get_input(batch, self.first_stage_key)
|
380 |
+
N = min(x.shape[0], N)
|
381 |
+
n_row = min(x.shape[0], n_row)
|
382 |
+
x = x.to(self.device)[:N]
|
383 |
+
log["inputs"] = x
|
384 |
+
|
385 |
+
# get diffusion row
|
386 |
+
diffusion_row = []
|
387 |
+
x_start = x[:n_row]
|
388 |
+
|
389 |
+
for t in range(self.num_timesteps):
|
390 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
391 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
392 |
+
t = t.to(self.device).long()
|
393 |
+
noise = torch.randn_like(x_start)
|
394 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
395 |
+
diffusion_row.append(x_noisy)
|
396 |
+
|
397 |
+
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
|
398 |
+
|
399 |
+
if sample:
|
400 |
+
# get denoise row
|
401 |
+
with self.ema_scope("Plotting"):
|
402 |
+
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
|
403 |
+
|
404 |
+
log["samples"] = samples
|
405 |
+
log["denoise_row"] = self._get_rows_from_list(denoise_row)
|
406 |
+
|
407 |
+
if return_keys:
|
408 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
409 |
+
return log
|
410 |
+
else:
|
411 |
+
return {key: log[key] for key in return_keys}
|
412 |
+
return log
|
413 |
+
|
414 |
+
def configure_optimizers(self):
|
415 |
+
lr = self.learning_rate
|
416 |
+
params = list(self.model.parameters())
|
417 |
+
if self.learn_logvar:
|
418 |
+
params = params + [self.logvar]
|
419 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
420 |
+
return opt
|
421 |
+
|
422 |
+
|
423 |
+
class LatentDiffusionV1(DDPMV1):
|
424 |
+
"""main class"""
|
425 |
+
def __init__(self,
|
426 |
+
first_stage_config,
|
427 |
+
cond_stage_config,
|
428 |
+
num_timesteps_cond=None,
|
429 |
+
cond_stage_key="image",
|
430 |
+
cond_stage_trainable=False,
|
431 |
+
concat_mode=True,
|
432 |
+
cond_stage_forward=None,
|
433 |
+
conditioning_key=None,
|
434 |
+
scale_factor=1.0,
|
435 |
+
scale_by_std=False,
|
436 |
+
*args, **kwargs):
|
437 |
+
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
438 |
+
self.scale_by_std = scale_by_std
|
439 |
+
assert self.num_timesteps_cond <= kwargs['timesteps']
|
440 |
+
# for backwards compatibility after implementation of DiffusionWrapper
|
441 |
+
if conditioning_key is None:
|
442 |
+
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
443 |
+
if cond_stage_config == '__is_unconditional__':
|
444 |
+
conditioning_key = None
|
445 |
+
ckpt_path = kwargs.pop("ckpt_path", None)
|
446 |
+
ignore_keys = kwargs.pop("ignore_keys", [])
|
447 |
+
super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
|
448 |
+
self.concat_mode = concat_mode
|
449 |
+
self.cond_stage_trainable = cond_stage_trainable
|
450 |
+
self.cond_stage_key = cond_stage_key
|
451 |
+
try:
|
452 |
+
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
453 |
+
except Exception:
|
454 |
+
self.num_downs = 0
|
455 |
+
if not scale_by_std:
|
456 |
+
self.scale_factor = scale_factor
|
457 |
+
else:
|
458 |
+
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
459 |
+
self.instantiate_first_stage(first_stage_config)
|
460 |
+
self.instantiate_cond_stage(cond_stage_config)
|
461 |
+
self.cond_stage_forward = cond_stage_forward
|
462 |
+
self.clip_denoised = False
|
463 |
+
self.bbox_tokenizer = None
|
464 |
+
|
465 |
+
self.restarted_from_ckpt = False
|
466 |
+
if ckpt_path is not None:
|
467 |
+
self.init_from_ckpt(ckpt_path, ignore_keys)
|
468 |
+
self.restarted_from_ckpt = True
|
469 |
+
|
470 |
+
def make_cond_schedule(self, ):
|
471 |
+
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
|
472 |
+
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
|
473 |
+
self.cond_ids[:self.num_timesteps_cond] = ids
|
474 |
+
|
475 |
+
@rank_zero_only
|
476 |
+
@torch.no_grad()
|
477 |
+
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
|
478 |
+
# only for very first batch
|
479 |
+
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
|
480 |
+
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
|
481 |
+
# set rescale weight to 1./std of encodings
|
482 |
+
print("### USING STD-RESCALING ###")
|
483 |
+
x = super().get_input(batch, self.first_stage_key)
|
484 |
+
x = x.to(self.device)
|
485 |
+
encoder_posterior = self.encode_first_stage(x)
|
486 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
487 |
+
del self.scale_factor
|
488 |
+
self.register_buffer('scale_factor', 1. / z.flatten().std())
|
489 |
+
print(f"setting self.scale_factor to {self.scale_factor}")
|
490 |
+
print("### USING STD-RESCALING ###")
|
491 |
+
|
492 |
+
def register_schedule(self,
|
493 |
+
given_betas=None, beta_schedule="linear", timesteps=1000,
|
494 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
495 |
+
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
|
496 |
+
|
497 |
+
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
498 |
+
if self.shorten_cond_schedule:
|
499 |
+
self.make_cond_schedule()
|
500 |
+
|
501 |
+
def instantiate_first_stage(self, config):
|
502 |
+
model = instantiate_from_config(config)
|
503 |
+
self.first_stage_model = model.eval()
|
504 |
+
self.first_stage_model.train = disabled_train
|
505 |
+
for param in self.first_stage_model.parameters():
|
506 |
+
param.requires_grad = False
|
507 |
+
|
508 |
+
def instantiate_cond_stage(self, config):
|
509 |
+
if not self.cond_stage_trainable:
|
510 |
+
if config == "__is_first_stage__":
|
511 |
+
print("Using first stage also as cond stage.")
|
512 |
+
self.cond_stage_model = self.first_stage_model
|
513 |
+
elif config == "__is_unconditional__":
|
514 |
+
print(f"Training {self.__class__.__name__} as an unconditional model.")
|
515 |
+
self.cond_stage_model = None
|
516 |
+
# self.be_unconditional = True
|
517 |
+
else:
|
518 |
+
model = instantiate_from_config(config)
|
519 |
+
self.cond_stage_model = model.eval()
|
520 |
+
self.cond_stage_model.train = disabled_train
|
521 |
+
for param in self.cond_stage_model.parameters():
|
522 |
+
param.requires_grad = False
|
523 |
+
else:
|
524 |
+
assert config != '__is_first_stage__'
|
525 |
+
assert config != '__is_unconditional__'
|
526 |
+
model = instantiate_from_config(config)
|
527 |
+
self.cond_stage_model = model
|
528 |
+
|
529 |
+
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
|
530 |
+
denoise_row = []
|
531 |
+
for zd in tqdm(samples, desc=desc):
|
532 |
+
denoise_row.append(self.decode_first_stage(zd.to(self.device),
|
533 |
+
force_not_quantize=force_no_decoder_quantization))
|
534 |
+
n_imgs_per_row = len(denoise_row)
|
535 |
+
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
536 |
+
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
537 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
538 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
539 |
+
return denoise_grid
|
540 |
+
|
541 |
+
def get_first_stage_encoding(self, encoder_posterior):
|
542 |
+
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
543 |
+
z = encoder_posterior.sample()
|
544 |
+
elif isinstance(encoder_posterior, torch.Tensor):
|
545 |
+
z = encoder_posterior
|
546 |
+
else:
|
547 |
+
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
|
548 |
+
return self.scale_factor * z
|
549 |
+
|
550 |
+
def get_learned_conditioning(self, c):
|
551 |
+
if self.cond_stage_forward is None:
|
552 |
+
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
|
553 |
+
c = self.cond_stage_model.encode(c)
|
554 |
+
if isinstance(c, DiagonalGaussianDistribution):
|
555 |
+
c = c.mode()
|
556 |
+
else:
|
557 |
+
c = self.cond_stage_model(c)
|
558 |
+
else:
|
559 |
+
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
560 |
+
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
|
561 |
+
return c
|
562 |
+
|
563 |
+
def meshgrid(self, h, w):
|
564 |
+
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
565 |
+
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
566 |
+
|
567 |
+
arr = torch.cat([y, x], dim=-1)
|
568 |
+
return arr
|
569 |
+
|
570 |
+
def delta_border(self, h, w):
|
571 |
+
"""
|
572 |
+
:param h: height
|
573 |
+
:param w: width
|
574 |
+
:return: normalized distance to image border,
|
575 |
+
wtith min distance = 0 at border and max dist = 0.5 at image center
|
576 |
+
"""
|
577 |
+
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
578 |
+
arr = self.meshgrid(h, w) / lower_right_corner
|
579 |
+
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
580 |
+
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
581 |
+
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
|
582 |
+
return edge_dist
|
583 |
+
|
584 |
+
def get_weighting(self, h, w, Ly, Lx, device):
|
585 |
+
weighting = self.delta_border(h, w)
|
586 |
+
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
|
587 |
+
self.split_input_params["clip_max_weight"], )
|
588 |
+
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
589 |
+
|
590 |
+
if self.split_input_params["tie_braker"]:
|
591 |
+
L_weighting = self.delta_border(Ly, Lx)
|
592 |
+
L_weighting = torch.clip(L_weighting,
|
593 |
+
self.split_input_params["clip_min_tie_weight"],
|
594 |
+
self.split_input_params["clip_max_tie_weight"])
|
595 |
+
|
596 |
+
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
597 |
+
weighting = weighting * L_weighting
|
598 |
+
return weighting
|
599 |
+
|
600 |
+
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
|
601 |
+
"""
|
602 |
+
:param x: img of size (bs, c, h, w)
|
603 |
+
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
604 |
+
"""
|
605 |
+
bs, nc, h, w = x.shape
|
606 |
+
|
607 |
+
# number of crops in image
|
608 |
+
Ly = (h - kernel_size[0]) // stride[0] + 1
|
609 |
+
Lx = (w - kernel_size[1]) // stride[1] + 1
|
610 |
+
|
611 |
+
if uf == 1 and df == 1:
|
612 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
613 |
+
unfold = torch.nn.Unfold(**fold_params)
|
614 |
+
|
615 |
+
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
616 |
+
|
617 |
+
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
|
618 |
+
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
|
619 |
+
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
|
620 |
+
|
621 |
+
elif uf > 1 and df == 1:
|
622 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
623 |
+
unfold = torch.nn.Unfold(**fold_params)
|
624 |
+
|
625 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
626 |
+
dilation=1, padding=0,
|
627 |
+
stride=(stride[0] * uf, stride[1] * uf))
|
628 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
|
629 |
+
|
630 |
+
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
|
631 |
+
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
|
632 |
+
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
|
633 |
+
|
634 |
+
elif df > 1 and uf == 1:
|
635 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
636 |
+
unfold = torch.nn.Unfold(**fold_params)
|
637 |
+
|
638 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
639 |
+
dilation=1, padding=0,
|
640 |
+
stride=(stride[0] // df, stride[1] // df))
|
641 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
|
642 |
+
|
643 |
+
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
|
644 |
+
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
|
645 |
+
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
|
646 |
+
|
647 |
+
else:
|
648 |
+
raise NotImplementedError
|
649 |
+
|
650 |
+
return fold, unfold, normalization, weighting
|
651 |
+
|
652 |
+
@torch.no_grad()
|
653 |
+
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
|
654 |
+
cond_key=None, return_original_cond=False, bs=None):
|
655 |
+
x = super().get_input(batch, k)
|
656 |
+
if bs is not None:
|
657 |
+
x = x[:bs]
|
658 |
+
x = x.to(self.device)
|
659 |
+
encoder_posterior = self.encode_first_stage(x)
|
660 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
661 |
+
|
662 |
+
if self.model.conditioning_key is not None:
|
663 |
+
if cond_key is None:
|
664 |
+
cond_key = self.cond_stage_key
|
665 |
+
if cond_key != self.first_stage_key:
|
666 |
+
if cond_key in ['caption', 'coordinates_bbox']:
|
667 |
+
xc = batch[cond_key]
|
668 |
+
elif cond_key == 'class_label':
|
669 |
+
xc = batch
|
670 |
+
else:
|
671 |
+
xc = super().get_input(batch, cond_key).to(self.device)
|
672 |
+
else:
|
673 |
+
xc = x
|
674 |
+
if not self.cond_stage_trainable or force_c_encode:
|
675 |
+
if isinstance(xc, dict) or isinstance(xc, list):
|
676 |
+
# import pudb; pudb.set_trace()
|
677 |
+
c = self.get_learned_conditioning(xc)
|
678 |
+
else:
|
679 |
+
c = self.get_learned_conditioning(xc.to(self.device))
|
680 |
+
else:
|
681 |
+
c = xc
|
682 |
+
if bs is not None:
|
683 |
+
c = c[:bs]
|
684 |
+
|
685 |
+
if self.use_positional_encodings:
|
686 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
687 |
+
ckey = __conditioning_keys__[self.model.conditioning_key]
|
688 |
+
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
689 |
+
|
690 |
+
else:
|
691 |
+
c = None
|
692 |
+
xc = None
|
693 |
+
if self.use_positional_encodings:
|
694 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
695 |
+
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
696 |
+
out = [z, c]
|
697 |
+
if return_first_stage_outputs:
|
698 |
+
xrec = self.decode_first_stage(z)
|
699 |
+
out.extend([x, xrec])
|
700 |
+
if return_original_cond:
|
701 |
+
out.append(xc)
|
702 |
+
return out
|
703 |
+
|
704 |
+
@torch.no_grad()
|
705 |
+
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
706 |
+
if predict_cids:
|
707 |
+
if z.dim() == 4:
|
708 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
709 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
710 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
711 |
+
|
712 |
+
z = 1. / self.scale_factor * z
|
713 |
+
|
714 |
+
if hasattr(self, "split_input_params"):
|
715 |
+
if self.split_input_params["patch_distributed_vq"]:
|
716 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
717 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
718 |
+
uf = self.split_input_params["vqf"]
|
719 |
+
bs, nc, h, w = z.shape
|
720 |
+
if ks[0] > h or ks[1] > w:
|
721 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
722 |
+
print("reducing Kernel")
|
723 |
+
|
724 |
+
if stride[0] > h or stride[1] > w:
|
725 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
726 |
+
print("reducing stride")
|
727 |
+
|
728 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
729 |
+
|
730 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
731 |
+
# 1. Reshape to img shape
|
732 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
733 |
+
|
734 |
+
# 2. apply model loop over last dim
|
735 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
736 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
737 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
738 |
+
for i in range(z.shape[-1])]
|
739 |
+
else:
|
740 |
+
|
741 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
742 |
+
for i in range(z.shape[-1])]
|
743 |
+
|
744 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
745 |
+
o = o * weighting
|
746 |
+
# Reverse 1. reshape to img shape
|
747 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
748 |
+
# stitch crops together
|
749 |
+
decoded = fold(o)
|
750 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
751 |
+
return decoded
|
752 |
+
else:
|
753 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
754 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
755 |
+
else:
|
756 |
+
return self.first_stage_model.decode(z)
|
757 |
+
|
758 |
+
else:
|
759 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
760 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
761 |
+
else:
|
762 |
+
return self.first_stage_model.decode(z)
|
763 |
+
|
764 |
+
# same as above but without decorator
|
765 |
+
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
766 |
+
if predict_cids:
|
767 |
+
if z.dim() == 4:
|
768 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
769 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
770 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
771 |
+
|
772 |
+
z = 1. / self.scale_factor * z
|
773 |
+
|
774 |
+
if hasattr(self, "split_input_params"):
|
775 |
+
if self.split_input_params["patch_distributed_vq"]:
|
776 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
777 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
778 |
+
uf = self.split_input_params["vqf"]
|
779 |
+
bs, nc, h, w = z.shape
|
780 |
+
if ks[0] > h or ks[1] > w:
|
781 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
782 |
+
print("reducing Kernel")
|
783 |
+
|
784 |
+
if stride[0] > h or stride[1] > w:
|
785 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
786 |
+
print("reducing stride")
|
787 |
+
|
788 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
789 |
+
|
790 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
791 |
+
# 1. Reshape to img shape
|
792 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
793 |
+
|
794 |
+
# 2. apply model loop over last dim
|
795 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
796 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
797 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
798 |
+
for i in range(z.shape[-1])]
|
799 |
+
else:
|
800 |
+
|
801 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
802 |
+
for i in range(z.shape[-1])]
|
803 |
+
|
804 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
805 |
+
o = o * weighting
|
806 |
+
# Reverse 1. reshape to img shape
|
807 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
808 |
+
# stitch crops together
|
809 |
+
decoded = fold(o)
|
810 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
811 |
+
return decoded
|
812 |
+
else:
|
813 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
814 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
815 |
+
else:
|
816 |
+
return self.first_stage_model.decode(z)
|
817 |
+
|
818 |
+
else:
|
819 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
820 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
821 |
+
else:
|
822 |
+
return self.first_stage_model.decode(z)
|
823 |
+
|
824 |
+
@torch.no_grad()
|
825 |
+
def encode_first_stage(self, x):
|
826 |
+
if hasattr(self, "split_input_params"):
|
827 |
+
if self.split_input_params["patch_distributed_vq"]:
|
828 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
829 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
830 |
+
df = self.split_input_params["vqf"]
|
831 |
+
self.split_input_params['original_image_size'] = x.shape[-2:]
|
832 |
+
bs, nc, h, w = x.shape
|
833 |
+
if ks[0] > h or ks[1] > w:
|
834 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
835 |
+
print("reducing Kernel")
|
836 |
+
|
837 |
+
if stride[0] > h or stride[1] > w:
|
838 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
839 |
+
print("reducing stride")
|
840 |
+
|
841 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
|
842 |
+
z = unfold(x) # (bn, nc * prod(**ks), L)
|
843 |
+
# Reshape to img shape
|
844 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
845 |
+
|
846 |
+
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
|
847 |
+
for i in range(z.shape[-1])]
|
848 |
+
|
849 |
+
o = torch.stack(output_list, axis=-1)
|
850 |
+
o = o * weighting
|
851 |
+
|
852 |
+
# Reverse reshape to img shape
|
853 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
854 |
+
# stitch crops together
|
855 |
+
decoded = fold(o)
|
856 |
+
decoded = decoded / normalization
|
857 |
+
return decoded
|
858 |
+
|
859 |
+
else:
|
860 |
+
return self.first_stage_model.encode(x)
|
861 |
+
else:
|
862 |
+
return self.first_stage_model.encode(x)
|
863 |
+
|
864 |
+
def shared_step(self, batch, **kwargs):
|
865 |
+
x, c = self.get_input(batch, self.first_stage_key)
|
866 |
+
loss = self(x, c)
|
867 |
+
return loss
|
868 |
+
|
869 |
+
def forward(self, x, c, *args, **kwargs):
|
870 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
871 |
+
if self.model.conditioning_key is not None:
|
872 |
+
assert c is not None
|
873 |
+
if self.cond_stage_trainable:
|
874 |
+
c = self.get_learned_conditioning(c)
|
875 |
+
if self.shorten_cond_schedule: # TODO: drop this option
|
876 |
+
tc = self.cond_ids[t].to(self.device)
|
877 |
+
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
878 |
+
return self.p_losses(x, c, t, *args, **kwargs)
|
879 |
+
|
880 |
+
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
881 |
+
|
882 |
+
if isinstance(cond, dict):
|
883 |
+
# hybrid case, cond is exptected to be a dict
|
884 |
+
pass
|
885 |
+
else:
|
886 |
+
if not isinstance(cond, list):
|
887 |
+
cond = [cond]
|
888 |
+
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
|
889 |
+
cond = {key: cond}
|
890 |
+
|
891 |
+
if hasattr(self, "split_input_params"):
|
892 |
+
assert len(cond) == 1 # todo can only deal with one conditioning atm
|
893 |
+
assert not return_ids
|
894 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
895 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
896 |
+
|
897 |
+
h, w = x_noisy.shape[-2:]
|
898 |
+
|
899 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
|
900 |
+
|
901 |
+
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
|
902 |
+
# Reshape to img shape
|
903 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
904 |
+
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
|
905 |
+
|
906 |
+
if self.cond_stage_key in ["image", "LR_image", "segmentation",
|
907 |
+
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
|
908 |
+
c_key = next(iter(cond.keys())) # get key
|
909 |
+
c = next(iter(cond.values())) # get value
|
910 |
+
assert (len(c) == 1) # todo extend to list with more than one elem
|
911 |
+
c = c[0] # get element
|
912 |
+
|
913 |
+
c = unfold(c)
|
914 |
+
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
915 |
+
|
916 |
+
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
|
917 |
+
|
918 |
+
elif self.cond_stage_key == 'coordinates_bbox':
|
919 |
+
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
|
920 |
+
|
921 |
+
# assuming padding of unfold is always 0 and its dilation is always 1
|
922 |
+
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
|
923 |
+
full_img_h, full_img_w = self.split_input_params['original_image_size']
|
924 |
+
# as we are operating on latents, we need the factor from the original image size to the
|
925 |
+
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
|
926 |
+
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
927 |
+
rescale_latent = 2 ** (num_downs)
|
928 |
+
|
929 |
+
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
|
930 |
+
# need to rescale the tl patch coordinates to be in between (0,1)
|
931 |
+
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
|
932 |
+
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
|
933 |
+
for patch_nr in range(z.shape[-1])]
|
934 |
+
|
935 |
+
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
|
936 |
+
patch_limits = [(x_tl, y_tl,
|
937 |
+
rescale_latent * ks[0] / full_img_w,
|
938 |
+
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
|
939 |
+
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
|
940 |
+
|
941 |
+
# tokenize crop coordinates for the bounding boxes of the respective patches
|
942 |
+
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
|
943 |
+
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
|
944 |
+
print(patch_limits_tknzd[0].shape)
|
945 |
+
# cut tknzd crop position from conditioning
|
946 |
+
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
|
947 |
+
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
|
948 |
+
print(cut_cond.shape)
|
949 |
+
|
950 |
+
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
|
951 |
+
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
|
952 |
+
print(adapted_cond.shape)
|
953 |
+
adapted_cond = self.get_learned_conditioning(adapted_cond)
|
954 |
+
print(adapted_cond.shape)
|
955 |
+
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
|
956 |
+
print(adapted_cond.shape)
|
957 |
+
|
958 |
+
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
|
959 |
+
|
960 |
+
else:
|
961 |
+
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
|
962 |
+
|
963 |
+
# apply model by loop over crops
|
964 |
+
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
|
965 |
+
assert not isinstance(output_list[0],
|
966 |
+
tuple) # todo cant deal with multiple model outputs check this never happens
|
967 |
+
|
968 |
+
o = torch.stack(output_list, axis=-1)
|
969 |
+
o = o * weighting
|
970 |
+
# Reverse reshape to img shape
|
971 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
972 |
+
# stitch crops together
|
973 |
+
x_recon = fold(o) / normalization
|
974 |
+
|
975 |
+
else:
|
976 |
+
x_recon = self.model(x_noisy, t, **cond)
|
977 |
+
|
978 |
+
if isinstance(x_recon, tuple) and not return_ids:
|
979 |
+
return x_recon[0]
|
980 |
+
else:
|
981 |
+
return x_recon
|
982 |
+
|
983 |
+
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
984 |
+
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
|
985 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
986 |
+
|
987 |
+
def _prior_bpd(self, x_start):
|
988 |
+
"""
|
989 |
+
Get the prior KL term for the variational lower-bound, measured in
|
990 |
+
bits-per-dim.
|
991 |
+
This term can't be optimized, as it only depends on the encoder.
|
992 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
993 |
+
:return: a batch of [N] KL values (in bits), one per batch element.
|
994 |
+
"""
|
995 |
+
batch_size = x_start.shape[0]
|
996 |
+
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
997 |
+
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
998 |
+
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
|
999 |
+
return mean_flat(kl_prior) / np.log(2.0)
|
1000 |
+
|
1001 |
+
def p_losses(self, x_start, cond, t, noise=None):
|
1002 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
1003 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
1004 |
+
model_output = self.apply_model(x_noisy, t, cond)
|
1005 |
+
|
1006 |
+
loss_dict = {}
|
1007 |
+
prefix = 'train' if self.training else 'val'
|
1008 |
+
|
1009 |
+
if self.parameterization == "x0":
|
1010 |
+
target = x_start
|
1011 |
+
elif self.parameterization == "eps":
|
1012 |
+
target = noise
|
1013 |
+
else:
|
1014 |
+
raise NotImplementedError()
|
1015 |
+
|
1016 |
+
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
|
1017 |
+
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
1018 |
+
|
1019 |
+
logvar_t = self.logvar[t].to(self.device)
|
1020 |
+
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
1021 |
+
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
1022 |
+
if self.learn_logvar:
|
1023 |
+
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
1024 |
+
loss_dict.update({'logvar': self.logvar.data.mean()})
|
1025 |
+
|
1026 |
+
loss = self.l_simple_weight * loss.mean()
|
1027 |
+
|
1028 |
+
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
|
1029 |
+
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
1030 |
+
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
1031 |
+
loss += (self.original_elbo_weight * loss_vlb)
|
1032 |
+
loss_dict.update({f'{prefix}/loss': loss})
|
1033 |
+
|
1034 |
+
return loss, loss_dict
|
1035 |
+
|
1036 |
+
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
|
1037 |
+
return_x0=False, score_corrector=None, corrector_kwargs=None):
|
1038 |
+
t_in = t
|
1039 |
+
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
|
1040 |
+
|
1041 |
+
if score_corrector is not None:
|
1042 |
+
assert self.parameterization == "eps"
|
1043 |
+
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
|
1044 |
+
|
1045 |
+
if return_codebook_ids:
|
1046 |
+
model_out, logits = model_out
|
1047 |
+
|
1048 |
+
if self.parameterization == "eps":
|
1049 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
1050 |
+
elif self.parameterization == "x0":
|
1051 |
+
x_recon = model_out
|
1052 |
+
else:
|
1053 |
+
raise NotImplementedError()
|
1054 |
+
|
1055 |
+
if clip_denoised:
|
1056 |
+
x_recon.clamp_(-1., 1.)
|
1057 |
+
if quantize_denoised:
|
1058 |
+
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
|
1059 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
1060 |
+
if return_codebook_ids:
|
1061 |
+
return model_mean, posterior_variance, posterior_log_variance, logits
|
1062 |
+
elif return_x0:
|
1063 |
+
return model_mean, posterior_variance, posterior_log_variance, x_recon
|
1064 |
+
else:
|
1065 |
+
return model_mean, posterior_variance, posterior_log_variance
|
1066 |
+
|
1067 |
+
@torch.no_grad()
|
1068 |
+
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
|
1069 |
+
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
|
1070 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
|
1071 |
+
b, *_, device = *x.shape, x.device
|
1072 |
+
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
|
1073 |
+
return_codebook_ids=return_codebook_ids,
|
1074 |
+
quantize_denoised=quantize_denoised,
|
1075 |
+
return_x0=return_x0,
|
1076 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
1077 |
+
if return_codebook_ids:
|
1078 |
+
raise DeprecationWarning("Support dropped.")
|
1079 |
+
model_mean, _, model_log_variance, logits = outputs
|
1080 |
+
elif return_x0:
|
1081 |
+
model_mean, _, model_log_variance, x0 = outputs
|
1082 |
+
else:
|
1083 |
+
model_mean, _, model_log_variance = outputs
|
1084 |
+
|
1085 |
+
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
1086 |
+
if noise_dropout > 0.:
|
1087 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
1088 |
+
# no noise when t == 0
|
1089 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
1090 |
+
|
1091 |
+
if return_codebook_ids:
|
1092 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
|
1093 |
+
if return_x0:
|
1094 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
|
1095 |
+
else:
|
1096 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
1097 |
+
|
1098 |
+
@torch.no_grad()
|
1099 |
+
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
|
1100 |
+
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
|
1101 |
+
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
|
1102 |
+
log_every_t=None):
|
1103 |
+
if not log_every_t:
|
1104 |
+
log_every_t = self.log_every_t
|
1105 |
+
timesteps = self.num_timesteps
|
1106 |
+
if batch_size is not None:
|
1107 |
+
b = batch_size if batch_size is not None else shape[0]
|
1108 |
+
shape = [batch_size] + list(shape)
|
1109 |
+
else:
|
1110 |
+
b = batch_size = shape[0]
|
1111 |
+
if x_T is None:
|
1112 |
+
img = torch.randn(shape, device=self.device)
|
1113 |
+
else:
|
1114 |
+
img = x_T
|
1115 |
+
intermediates = []
|
1116 |
+
if cond is not None:
|
1117 |
+
if isinstance(cond, dict):
|
1118 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1119 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
1120 |
+
else:
|
1121 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1122 |
+
|
1123 |
+
if start_T is not None:
|
1124 |
+
timesteps = min(timesteps, start_T)
|
1125 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
|
1126 |
+
total=timesteps) if verbose else reversed(
|
1127 |
+
range(0, timesteps))
|
1128 |
+
if type(temperature) == float:
|
1129 |
+
temperature = [temperature] * timesteps
|
1130 |
+
|
1131 |
+
for i in iterator:
|
1132 |
+
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
1133 |
+
if self.shorten_cond_schedule:
|
1134 |
+
assert self.model.conditioning_key != 'hybrid'
|
1135 |
+
tc = self.cond_ids[ts].to(cond.device)
|
1136 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1137 |
+
|
1138 |
+
img, x0_partial = self.p_sample(img, cond, ts,
|
1139 |
+
clip_denoised=self.clip_denoised,
|
1140 |
+
quantize_denoised=quantize_denoised, return_x0=True,
|
1141 |
+
temperature=temperature[i], noise_dropout=noise_dropout,
|
1142 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
1143 |
+
if mask is not None:
|
1144 |
+
assert x0 is not None
|
1145 |
+
img_orig = self.q_sample(x0, ts)
|
1146 |
+
img = img_orig * mask + (1. - mask) * img
|
1147 |
+
|
1148 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
1149 |
+
intermediates.append(x0_partial)
|
1150 |
+
if callback:
|
1151 |
+
callback(i)
|
1152 |
+
if img_callback:
|
1153 |
+
img_callback(img, i)
|
1154 |
+
return img, intermediates
|
1155 |
+
|
1156 |
+
@torch.no_grad()
|
1157 |
+
def p_sample_loop(self, cond, shape, return_intermediates=False,
|
1158 |
+
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
|
1159 |
+
mask=None, x0=None, img_callback=None, start_T=None,
|
1160 |
+
log_every_t=None):
|
1161 |
+
|
1162 |
+
if not log_every_t:
|
1163 |
+
log_every_t = self.log_every_t
|
1164 |
+
device = self.betas.device
|
1165 |
+
b = shape[0]
|
1166 |
+
if x_T is None:
|
1167 |
+
img = torch.randn(shape, device=device)
|
1168 |
+
else:
|
1169 |
+
img = x_T
|
1170 |
+
|
1171 |
+
intermediates = [img]
|
1172 |
+
if timesteps is None:
|
1173 |
+
timesteps = self.num_timesteps
|
1174 |
+
|
1175 |
+
if start_T is not None:
|
1176 |
+
timesteps = min(timesteps, start_T)
|
1177 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
|
1178 |
+
range(0, timesteps))
|
1179 |
+
|
1180 |
+
if mask is not None:
|
1181 |
+
assert x0 is not None
|
1182 |
+
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
|
1183 |
+
|
1184 |
+
for i in iterator:
|
1185 |
+
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
1186 |
+
if self.shorten_cond_schedule:
|
1187 |
+
assert self.model.conditioning_key != 'hybrid'
|
1188 |
+
tc = self.cond_ids[ts].to(cond.device)
|
1189 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1190 |
+
|
1191 |
+
img = self.p_sample(img, cond, ts,
|
1192 |
+
clip_denoised=self.clip_denoised,
|
1193 |
+
quantize_denoised=quantize_denoised)
|
1194 |
+
if mask is not None:
|
1195 |
+
img_orig = self.q_sample(x0, ts)
|
1196 |
+
img = img_orig * mask + (1. - mask) * img
|
1197 |
+
|
1198 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
1199 |
+
intermediates.append(img)
|
1200 |
+
if callback:
|
1201 |
+
callback(i)
|
1202 |
+
if img_callback:
|
1203 |
+
img_callback(img, i)
|
1204 |
+
|
1205 |
+
if return_intermediates:
|
1206 |
+
return img, intermediates
|
1207 |
+
return img
|
1208 |
+
|
1209 |
+
@torch.no_grad()
|
1210 |
+
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
|
1211 |
+
verbose=True, timesteps=None, quantize_denoised=False,
|
1212 |
+
mask=None, x0=None, shape=None,**kwargs):
|
1213 |
+
if shape is None:
|
1214 |
+
shape = (batch_size, self.channels, self.image_size, self.image_size)
|
1215 |
+
if cond is not None:
|
1216 |
+
if isinstance(cond, dict):
|
1217 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1218 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
1219 |
+
else:
|
1220 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1221 |
+
return self.p_sample_loop(cond,
|
1222 |
+
shape,
|
1223 |
+
return_intermediates=return_intermediates, x_T=x_T,
|
1224 |
+
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
|
1225 |
+
mask=mask, x0=x0)
|
1226 |
+
|
1227 |
+
@torch.no_grad()
|
1228 |
+
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
|
1229 |
+
|
1230 |
+
if ddim:
|
1231 |
+
ddim_sampler = DDIMSampler(self)
|
1232 |
+
shape = (self.channels, self.image_size, self.image_size)
|
1233 |
+
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
|
1234 |
+
shape,cond,verbose=False,**kwargs)
|
1235 |
+
|
1236 |
+
else:
|
1237 |
+
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
|
1238 |
+
return_intermediates=True,**kwargs)
|
1239 |
+
|
1240 |
+
return samples, intermediates
|
1241 |
+
|
1242 |
+
|
1243 |
+
@torch.no_grad()
|
1244 |
+
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
1245 |
+
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
1246 |
+
plot_diffusion_rows=True, **kwargs):
|
1247 |
+
|
1248 |
+
use_ddim = ddim_steps is not None
|
1249 |
+
|
1250 |
+
log = {}
|
1251 |
+
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
1252 |
+
return_first_stage_outputs=True,
|
1253 |
+
force_c_encode=True,
|
1254 |
+
return_original_cond=True,
|
1255 |
+
bs=N)
|
1256 |
+
N = min(x.shape[0], N)
|
1257 |
+
n_row = min(x.shape[0], n_row)
|
1258 |
+
log["inputs"] = x
|
1259 |
+
log["reconstruction"] = xrec
|
1260 |
+
if self.model.conditioning_key is not None:
|
1261 |
+
if hasattr(self.cond_stage_model, "decode"):
|
1262 |
+
xc = self.cond_stage_model.decode(c)
|
1263 |
+
log["conditioning"] = xc
|
1264 |
+
elif self.cond_stage_key in ["caption"]:
|
1265 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
|
1266 |
+
log["conditioning"] = xc
|
1267 |
+
elif self.cond_stage_key == 'class_label':
|
1268 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
|
1269 |
+
log['conditioning'] = xc
|
1270 |
+
elif isimage(xc):
|
1271 |
+
log["conditioning"] = xc
|
1272 |
+
if ismap(xc):
|
1273 |
+
log["original_conditioning"] = self.to_rgb(xc)
|
1274 |
+
|
1275 |
+
if plot_diffusion_rows:
|
1276 |
+
# get diffusion row
|
1277 |
+
diffusion_row = []
|
1278 |
+
z_start = z[:n_row]
|
1279 |
+
for t in range(self.num_timesteps):
|
1280 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
1281 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
1282 |
+
t = t.to(self.device).long()
|
1283 |
+
noise = torch.randn_like(z_start)
|
1284 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
1285 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
1286 |
+
|
1287 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
1288 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
1289 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
1290 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
1291 |
+
log["diffusion_row"] = diffusion_grid
|
1292 |
+
|
1293 |
+
if sample:
|
1294 |
+
# get denoise row
|
1295 |
+
with self.ema_scope("Plotting"):
|
1296 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
1297 |
+
ddim_steps=ddim_steps,eta=ddim_eta)
|
1298 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
1299 |
+
x_samples = self.decode_first_stage(samples)
|
1300 |
+
log["samples"] = x_samples
|
1301 |
+
if plot_denoise_rows:
|
1302 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
1303 |
+
log["denoise_row"] = denoise_grid
|
1304 |
+
|
1305 |
+
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
|
1306 |
+
self.first_stage_model, IdentityFirstStage):
|
1307 |
+
# also display when quantizing x0 while sampling
|
1308 |
+
with self.ema_scope("Plotting Quantized Denoised"):
|
1309 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
1310 |
+
ddim_steps=ddim_steps,eta=ddim_eta,
|
1311 |
+
quantize_denoised=True)
|
1312 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
1313 |
+
# quantize_denoised=True)
|
1314 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1315 |
+
log["samples_x0_quantized"] = x_samples
|
1316 |
+
|
1317 |
+
if inpaint:
|
1318 |
+
# make a simple center square
|
1319 |
+
h, w = z.shape[2], z.shape[3]
|
1320 |
+
mask = torch.ones(N, h, w).to(self.device)
|
1321 |
+
# zeros will be filled in
|
1322 |
+
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
1323 |
+
mask = mask[:, None, ...]
|
1324 |
+
with self.ema_scope("Plotting Inpaint"):
|
1325 |
+
|
1326 |
+
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
|
1327 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1328 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1329 |
+
log["samples_inpainting"] = x_samples
|
1330 |
+
log["mask"] = mask
|
1331 |
+
|
1332 |
+
# outpaint
|
1333 |
+
with self.ema_scope("Plotting Outpaint"):
|
1334 |
+
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
|
1335 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1336 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1337 |
+
log["samples_outpainting"] = x_samples
|
1338 |
+
|
1339 |
+
if plot_progressive_rows:
|
1340 |
+
with self.ema_scope("Plotting Progressives"):
|
1341 |
+
img, progressives = self.progressive_denoising(c,
|
1342 |
+
shape=(self.channels, self.image_size, self.image_size),
|
1343 |
+
batch_size=N)
|
1344 |
+
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
1345 |
+
log["progressive_row"] = prog_row
|
1346 |
+
|
1347 |
+
if return_keys:
|
1348 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
1349 |
+
return log
|
1350 |
+
else:
|
1351 |
+
return {key: log[key] for key in return_keys}
|
1352 |
+
return log
|
1353 |
+
|
1354 |
+
def configure_optimizers(self):
|
1355 |
+
lr = self.learning_rate
|
1356 |
+
params = list(self.model.parameters())
|
1357 |
+
if self.cond_stage_trainable:
|
1358 |
+
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
|
1359 |
+
params = params + list(self.cond_stage_model.parameters())
|
1360 |
+
if self.learn_logvar:
|
1361 |
+
print('Diffusion model optimizing logvar')
|
1362 |
+
params.append(self.logvar)
|
1363 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
1364 |
+
if self.use_scheduler:
|
1365 |
+
assert 'target' in self.scheduler_config
|
1366 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
1367 |
+
|
1368 |
+
print("Setting up LambdaLR scheduler...")
|
1369 |
+
scheduler = [
|
1370 |
+
{
|
1371 |
+
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
1372 |
+
'interval': 'step',
|
1373 |
+
'frequency': 1
|
1374 |
+
}]
|
1375 |
+
return [opt], scheduler
|
1376 |
+
return opt
|
1377 |
+
|
1378 |
+
@torch.no_grad()
|
1379 |
+
def to_rgb(self, x):
|
1380 |
+
x = x.float()
|
1381 |
+
if not hasattr(self, "colorize"):
|
1382 |
+
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
1383 |
+
x = nn.functional.conv2d(x, weight=self.colorize)
|
1384 |
+
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
|
1385 |
+
return x
|
1386 |
+
|
1387 |
+
|
1388 |
+
class DiffusionWrapperV1(pl.LightningModule):
|
1389 |
+
def __init__(self, diff_model_config, conditioning_key):
|
1390 |
+
super().__init__()
|
1391 |
+
self.diffusion_model = instantiate_from_config(diff_model_config)
|
1392 |
+
self.conditioning_key = conditioning_key
|
1393 |
+
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
|
1394 |
+
|
1395 |
+
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
|
1396 |
+
if self.conditioning_key is None:
|
1397 |
+
out = self.diffusion_model(x, t)
|
1398 |
+
elif self.conditioning_key == 'concat':
|
1399 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
1400 |
+
out = self.diffusion_model(xc, t)
|
1401 |
+
elif self.conditioning_key == 'crossattn':
|
1402 |
+
cc = torch.cat(c_crossattn, 1)
|
1403 |
+
out = self.diffusion_model(x, t, context=cc)
|
1404 |
+
elif self.conditioning_key == 'hybrid':
|
1405 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
1406 |
+
cc = torch.cat(c_crossattn, 1)
|
1407 |
+
out = self.diffusion_model(xc, t, context=cc)
|
1408 |
+
elif self.conditioning_key == 'adm':
|
1409 |
+
cc = c_crossattn[0]
|
1410 |
+
out = self.diffusion_model(x, t, y=cc)
|
1411 |
+
else:
|
1412 |
+
raise NotImplementedError()
|
1413 |
+
|
1414 |
+
return out
|
1415 |
+
|
1416 |
+
|
1417 |
+
class Layout2ImgDiffusionV1(LatentDiffusionV1):
|
1418 |
+
# TODO: move all layout-specific hacks to this class
|
1419 |
+
def __init__(self, cond_stage_key, *args, **kwargs):
|
1420 |
+
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
|
1421 |
+
super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
|
1422 |
+
|
1423 |
+
def log_images(self, batch, N=8, *args, **kwargs):
|
1424 |
+
logs = super().log_images(*args, batch=batch, N=N, **kwargs)
|
1425 |
+
|
1426 |
+
key = 'train' if self.training else 'validation'
|
1427 |
+
dset = self.trainer.datamodule.datasets[key]
|
1428 |
+
mapper = dset.conditional_builders[self.cond_stage_key]
|
1429 |
+
|
1430 |
+
bbox_imgs = []
|
1431 |
+
map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
|
1432 |
+
for tknzd_bbox in batch[self.cond_stage_key][:N]:
|
1433 |
+
bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
|
1434 |
+
bbox_imgs.append(bboximg)
|
1435 |
+
|
1436 |
+
cond_img = torch.stack(bbox_imgs, dim=0)
|
1437 |
+
logs['bbox_image'] = cond_img
|
1438 |
+
return logs
|
1439 |
+
|
1440 |
+
ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
|
1441 |
+
ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
|
1442 |
+
ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
|
1443 |
+
ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
|
extensions-builtin/LDSR/vqvae_quantize.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
|
2 |
+
# where the license is as follows:
|
3 |
+
#
|
4 |
+
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
|
5 |
+
#
|
6 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
# of this software and associated documentation files (the "Software"), to deal
|
8 |
+
# in the Software without restriction, including without limitation the rights
|
9 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
# copies of the Software, and to permit persons to whom the Software is
|
11 |
+
# furnished to do so, subject to the following conditions:
|
12 |
+
#
|
13 |
+
# The above copyright notice and this permission notice shall be included in all
|
14 |
+
# copies or substantial portions of the Software.
|
15 |
+
#
|
16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
17 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
18 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
19 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
20 |
+
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
21 |
+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
22 |
+
# OR OTHER DEALINGS IN THE SOFTWARE./
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import torch.nn as nn
|
26 |
+
import numpy as np
|
27 |
+
from einops import rearrange
|
28 |
+
|
29 |
+
|
30 |
+
class VectorQuantizer2(nn.Module):
|
31 |
+
"""
|
32 |
+
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
|
33 |
+
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
|
34 |
+
"""
|
35 |
+
|
36 |
+
# NOTE: due to a bug the beta term was applied to the wrong term. for
|
37 |
+
# backwards compatibility we use the buggy version by default, but you can
|
38 |
+
# specify legacy=False to fix it.
|
39 |
+
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
|
40 |
+
sane_index_shape=False, legacy=True):
|
41 |
+
super().__init__()
|
42 |
+
self.n_e = n_e
|
43 |
+
self.e_dim = e_dim
|
44 |
+
self.beta = beta
|
45 |
+
self.legacy = legacy
|
46 |
+
|
47 |
+
self.embedding = nn.Embedding(self.n_e, self.e_dim)
|
48 |
+
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
|
49 |
+
|
50 |
+
self.remap = remap
|
51 |
+
if self.remap is not None:
|
52 |
+
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
53 |
+
self.re_embed = self.used.shape[0]
|
54 |
+
self.unknown_index = unknown_index # "random" or "extra" or integer
|
55 |
+
if self.unknown_index == "extra":
|
56 |
+
self.unknown_index = self.re_embed
|
57 |
+
self.re_embed = self.re_embed + 1
|
58 |
+
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
|
59 |
+
f"Using {self.unknown_index} for unknown indices.")
|
60 |
+
else:
|
61 |
+
self.re_embed = n_e
|
62 |
+
|
63 |
+
self.sane_index_shape = sane_index_shape
|
64 |
+
|
65 |
+
def remap_to_used(self, inds):
|
66 |
+
ishape = inds.shape
|
67 |
+
assert len(ishape) > 1
|
68 |
+
inds = inds.reshape(ishape[0], -1)
|
69 |
+
used = self.used.to(inds)
|
70 |
+
match = (inds[:, :, None] == used[None, None, ...]).long()
|
71 |
+
new = match.argmax(-1)
|
72 |
+
unknown = match.sum(2) < 1
|
73 |
+
if self.unknown_index == "random":
|
74 |
+
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
|
75 |
+
else:
|
76 |
+
new[unknown] = self.unknown_index
|
77 |
+
return new.reshape(ishape)
|
78 |
+
|
79 |
+
def unmap_to_all(self, inds):
|
80 |
+
ishape = inds.shape
|
81 |
+
assert len(ishape) > 1
|
82 |
+
inds = inds.reshape(ishape[0], -1)
|
83 |
+
used = self.used.to(inds)
|
84 |
+
if self.re_embed > self.used.shape[0]: # extra token
|
85 |
+
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
|
86 |
+
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
|
87 |
+
return back.reshape(ishape)
|
88 |
+
|
89 |
+
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
|
90 |
+
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
|
91 |
+
assert rescale_logits is False, "Only for interface compatible with Gumbel"
|
92 |
+
assert return_logits is False, "Only for interface compatible with Gumbel"
|
93 |
+
# reshape z -> (batch, height, width, channel) and flatten
|
94 |
+
z = rearrange(z, 'b c h w -> b h w c').contiguous()
|
95 |
+
z_flattened = z.view(-1, self.e_dim)
|
96 |
+
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
97 |
+
|
98 |
+
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
|
99 |
+
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
|
100 |
+
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
|
101 |
+
|
102 |
+
min_encoding_indices = torch.argmin(d, dim=1)
|
103 |
+
z_q = self.embedding(min_encoding_indices).view(z.shape)
|
104 |
+
perplexity = None
|
105 |
+
min_encodings = None
|
106 |
+
|
107 |
+
# compute loss for embedding
|
108 |
+
if not self.legacy:
|
109 |
+
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
|
110 |
+
torch.mean((z_q - z.detach()) ** 2)
|
111 |
+
else:
|
112 |
+
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
|
113 |
+
torch.mean((z_q - z.detach()) ** 2)
|
114 |
+
|
115 |
+
# preserve gradients
|
116 |
+
z_q = z + (z_q - z).detach()
|
117 |
+
|
118 |
+
# reshape back to match original input shape
|
119 |
+
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
|
120 |
+
|
121 |
+
if self.remap is not None:
|
122 |
+
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
|
123 |
+
min_encoding_indices = self.remap_to_used(min_encoding_indices)
|
124 |
+
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
|
125 |
+
|
126 |
+
if self.sane_index_shape:
|
127 |
+
min_encoding_indices = min_encoding_indices.reshape(
|
128 |
+
z_q.shape[0], z_q.shape[2], z_q.shape[3])
|
129 |
+
|
130 |
+
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
|
131 |
+
|
132 |
+
def get_codebook_entry(self, indices, shape):
|
133 |
+
# shape specifying (batch, height, width, channel)
|
134 |
+
if self.remap is not None:
|
135 |
+
indices = indices.reshape(shape[0], -1) # add batch axis
|
136 |
+
indices = self.unmap_to_all(indices)
|
137 |
+
indices = indices.reshape(-1) # flatten again
|
138 |
+
|
139 |
+
# get quantized latent vectors
|
140 |
+
z_q = self.embedding(indices)
|
141 |
+
|
142 |
+
if shape is not None:
|
143 |
+
z_q = z_q.view(shape)
|
144 |
+
# reshape back to match original input shape
|
145 |
+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
146 |
+
|
147 |
+
return z_q
|
extensions-builtin/Lora/extra_networks_lora.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from modules import extra_networks, shared
|
2 |
+
import networks
|
3 |
+
|
4 |
+
|
5 |
+
class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
6 |
+
def __init__(self):
|
7 |
+
super().__init__('lora')
|
8 |
+
|
9 |
+
self.errors = {}
|
10 |
+
"""mapping of network names to the number of errors the network had during operation"""
|
11 |
+
|
12 |
+
def activate(self, p, params_list):
|
13 |
+
additional = shared.opts.sd_lora
|
14 |
+
|
15 |
+
self.errors.clear()
|
16 |
+
|
17 |
+
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
|
18 |
+
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
|
19 |
+
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
|
20 |
+
|
21 |
+
names = []
|
22 |
+
te_multipliers = []
|
23 |
+
unet_multipliers = []
|
24 |
+
dyn_dims = []
|
25 |
+
for params in params_list:
|
26 |
+
assert params.items
|
27 |
+
|
28 |
+
names.append(params.positional[0])
|
29 |
+
|
30 |
+
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
|
31 |
+
te_multiplier = float(params.named.get("te", te_multiplier))
|
32 |
+
|
33 |
+
unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
|
34 |
+
unet_multiplier = float(params.named.get("unet", unet_multiplier))
|
35 |
+
|
36 |
+
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
|
37 |
+
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
|
38 |
+
|
39 |
+
te_multipliers.append(te_multiplier)
|
40 |
+
unet_multipliers.append(unet_multiplier)
|
41 |
+
dyn_dims.append(dyn_dim)
|
42 |
+
|
43 |
+
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
|
44 |
+
|
45 |
+
if shared.opts.lora_add_hashes_to_infotext:
|
46 |
+
network_hashes = []
|
47 |
+
for item in networks.loaded_networks:
|
48 |
+
shorthash = item.network_on_disk.shorthash
|
49 |
+
if not shorthash:
|
50 |
+
continue
|
51 |
+
|
52 |
+
alias = item.mentioned_name
|
53 |
+
if not alias:
|
54 |
+
continue
|
55 |
+
|
56 |
+
alias = alias.replace(":", "").replace(",", "")
|
57 |
+
|
58 |
+
network_hashes.append(f"{alias}: {shorthash}")
|
59 |
+
|
60 |
+
if network_hashes:
|
61 |
+
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
|
62 |
+
|
63 |
+
def deactivate(self, p):
|
64 |
+
if self.errors:
|
65 |
+
p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
|
66 |
+
|
67 |
+
self.errors.clear()
|
extensions-builtin/Lora/lora.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import networks
|
2 |
+
|
3 |
+
list_available_loras = networks.list_available_networks
|
4 |
+
|
5 |
+
available_loras = networks.available_networks
|
6 |
+
available_lora_aliases = networks.available_network_aliases
|
7 |
+
available_lora_hash_lookup = networks.available_network_hash_lookup
|
8 |
+
forbidden_lora_aliases = networks.forbidden_network_aliases
|
9 |
+
loaded_loras = networks.loaded_networks
|
extensions-builtin/Lora/lora_patches.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
import networks
|
4 |
+
from modules import patches
|
5 |
+
|
6 |
+
|
7 |
+
class LoraPatches:
|
8 |
+
def __init__(self):
|
9 |
+
self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
|
10 |
+
self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
|
11 |
+
self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
|
12 |
+
self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
|
13 |
+
self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
|
14 |
+
self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
|
15 |
+
self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
|
16 |
+
self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
|
17 |
+
self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
|
18 |
+
self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
|
19 |
+
|
20 |
+
def undo(self):
|
21 |
+
self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward')
|
22 |
+
self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict')
|
23 |
+
self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward')
|
24 |
+
self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict')
|
25 |
+
self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward')
|
26 |
+
self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict')
|
27 |
+
self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward')
|
28 |
+
self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict')
|
29 |
+
self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward')
|
30 |
+
self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict')
|
31 |
+
|
extensions-builtin/Lora/lyco_helpers.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
def make_weight_cp(t, wa, wb):
|
5 |
+
temp = torch.einsum('i j k l, j r -> i r k l', t, wb)
|
6 |
+
return torch.einsum('i j k l, i r -> r j k l', temp, wa)
|
7 |
+
|
8 |
+
|
9 |
+
def rebuild_conventional(up, down, shape, dyn_dim=None):
|
10 |
+
up = up.reshape(up.size(0), -1)
|
11 |
+
down = down.reshape(down.size(0), -1)
|
12 |
+
if dyn_dim is not None:
|
13 |
+
up = up[:, :dyn_dim]
|
14 |
+
down = down[:dyn_dim, :]
|
15 |
+
return (up @ down).reshape(shape)
|
16 |
+
|
17 |
+
|
18 |
+
def rebuild_cp_decomposition(up, down, mid):
|
19 |
+
up = up.reshape(up.size(0), -1)
|
20 |
+
down = down.reshape(down.size(0), -1)
|
21 |
+
return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down)
|
extensions-builtin/Lora/network.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
import os
|
3 |
+
from collections import namedtuple
|
4 |
+
import enum
|
5 |
+
|
6 |
+
from modules import sd_models, cache, errors, hashes, shared
|
7 |
+
|
8 |
+
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
|
9 |
+
|
10 |
+
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
|
11 |
+
|
12 |
+
|
13 |
+
class SdVersion(enum.Enum):
|
14 |
+
Unknown = 1
|
15 |
+
SD1 = 2
|
16 |
+
SD2 = 3
|
17 |
+
SDXL = 4
|
18 |
+
|
19 |
+
|
20 |
+
class NetworkOnDisk:
|
21 |
+
def __init__(self, name, filename):
|
22 |
+
self.name = name
|
23 |
+
self.filename = filename
|
24 |
+
self.metadata = {}
|
25 |
+
self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
|
26 |
+
|
27 |
+
def read_metadata():
|
28 |
+
metadata = sd_models.read_metadata_from_safetensors(filename)
|
29 |
+
metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text
|
30 |
+
|
31 |
+
return metadata
|
32 |
+
|
33 |
+
if self.is_safetensors:
|
34 |
+
try:
|
35 |
+
self.metadata = cache.cached_data_for_file('safetensors-metadata', "lora/" + self.name, filename, read_metadata)
|
36 |
+
except Exception as e:
|
37 |
+
errors.display(e, f"reading lora {filename}")
|
38 |
+
|
39 |
+
if self.metadata:
|
40 |
+
m = {}
|
41 |
+
for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
|
42 |
+
m[k] = v
|
43 |
+
|
44 |
+
self.metadata = m
|
45 |
+
|
46 |
+
self.alias = self.metadata.get('ss_output_name', self.name)
|
47 |
+
|
48 |
+
self.hash = None
|
49 |
+
self.shorthash = None
|
50 |
+
self.set_hash(
|
51 |
+
self.metadata.get('sshs_model_hash') or
|
52 |
+
hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
|
53 |
+
''
|
54 |
+
)
|
55 |
+
|
56 |
+
self.sd_version = self.detect_version()
|
57 |
+
|
58 |
+
def detect_version(self):
|
59 |
+
if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"):
|
60 |
+
return SdVersion.SDXL
|
61 |
+
elif str(self.metadata.get('ss_v2', "")) == "True":
|
62 |
+
return SdVersion.SD2
|
63 |
+
elif len(self.metadata):
|
64 |
+
return SdVersion.SD1
|
65 |
+
|
66 |
+
return SdVersion.Unknown
|
67 |
+
|
68 |
+
def set_hash(self, v):
|
69 |
+
self.hash = v
|
70 |
+
self.shorthash = self.hash[0:12]
|
71 |
+
|
72 |
+
if self.shorthash:
|
73 |
+
import networks
|
74 |
+
networks.available_network_hash_lookup[self.shorthash] = self
|
75 |
+
|
76 |
+
def read_hash(self):
|
77 |
+
if not self.hash:
|
78 |
+
self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
|
79 |
+
|
80 |
+
def get_alias(self):
|
81 |
+
import networks
|
82 |
+
if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in networks.forbidden_network_aliases:
|
83 |
+
return self.name
|
84 |
+
else:
|
85 |
+
return self.alias
|
86 |
+
|
87 |
+
|
88 |
+
class Network: # LoraModule
|
89 |
+
def __init__(self, name, network_on_disk: NetworkOnDisk):
|
90 |
+
self.name = name
|
91 |
+
self.network_on_disk = network_on_disk
|
92 |
+
self.te_multiplier = 1.0
|
93 |
+
self.unet_multiplier = 1.0
|
94 |
+
self.dyn_dim = None
|
95 |
+
self.modules = {}
|
96 |
+
self.mtime = None
|
97 |
+
|
98 |
+
self.mentioned_name = None
|
99 |
+
"""the text that was used to add the network to prompt - can be either name or an alias"""
|
100 |
+
|
101 |
+
|
102 |
+
class ModuleType:
|
103 |
+
def create_module(self, net: Network, weights: NetworkWeights) -> Network | None:
|
104 |
+
return None
|
105 |
+
|
106 |
+
|
107 |
+
class NetworkModule:
|
108 |
+
def __init__(self, net: Network, weights: NetworkWeights):
|
109 |
+
self.network = net
|
110 |
+
self.network_key = weights.network_key
|
111 |
+
self.sd_key = weights.sd_key
|
112 |
+
self.sd_module = weights.sd_module
|
113 |
+
|
114 |
+
if hasattr(self.sd_module, 'weight'):
|
115 |
+
self.shape = self.sd_module.weight.shape
|
116 |
+
|
117 |
+
self.dim = None
|
118 |
+
self.bias = weights.w.get("bias")
|
119 |
+
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
|
120 |
+
self.scale = weights.w["scale"].item() if "scale" in weights.w else None
|
121 |
+
|
122 |
+
def multiplier(self):
|
123 |
+
if 'transformer' in self.sd_key[:20]:
|
124 |
+
return self.network.te_multiplier
|
125 |
+
else:
|
126 |
+
return self.network.unet_multiplier
|
127 |
+
|
128 |
+
def calc_scale(self):
|
129 |
+
if self.scale is not None:
|
130 |
+
return self.scale
|
131 |
+
if self.dim is not None and self.alpha is not None:
|
132 |
+
return self.alpha / self.dim
|
133 |
+
|
134 |
+
return 1.0
|
135 |
+
|
136 |
+
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
|
137 |
+
if self.bias is not None:
|
138 |
+
updown = updown.reshape(self.bias.shape)
|
139 |
+
updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype)
|
140 |
+
updown = updown.reshape(output_shape)
|
141 |
+
|
142 |
+
if len(output_shape) == 4:
|
143 |
+
updown = updown.reshape(output_shape)
|
144 |
+
|
145 |
+
if orig_weight.size().numel() == updown.size().numel():
|
146 |
+
updown = updown.reshape(orig_weight.shape)
|
147 |
+
|
148 |
+
if ex_bias is not None:
|
149 |
+
ex_bias = ex_bias * self.multiplier()
|
150 |
+
|
151 |
+
return updown * self.calc_scale() * self.multiplier(), ex_bias
|
152 |
+
|
153 |
+
def calc_updown(self, target):
|
154 |
+
raise NotImplementedError()
|
155 |
+
|
156 |
+
def forward(self, x, y):
|
157 |
+
raise NotImplementedError()
|
158 |
+
|
extensions-builtin/Lora/network_full.py
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import network
|
2 |
+
|
3 |
+
|
4 |
+
class ModuleTypeFull(network.ModuleType):
|
5 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
6 |
+
if all(x in weights.w for x in ["diff"]):
|
7 |
+
return NetworkModuleFull(net, weights)
|
8 |
+
|
9 |
+
return None
|
10 |
+
|
11 |
+
|
12 |
+
class NetworkModuleFull(network.NetworkModule):
|
13 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
14 |
+
super().__init__(net, weights)
|
15 |
+
|
16 |
+
self.weight = weights.w.get("diff")
|
17 |
+
self.ex_bias = weights.w.get("diff_b")
|
18 |
+
|
19 |
+
def calc_updown(self, orig_weight):
|
20 |
+
output_shape = self.weight.shape
|
21 |
+
updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
22 |
+
if self.ex_bias is not None:
|
23 |
+
ex_bias = self.ex_bias.to(orig_weight.device, dtype=orig_weight.dtype)
|
24 |
+
else:
|
25 |
+
ex_bias = None
|
26 |
+
|
27 |
+
return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
|
extensions-builtin/Lora/network_hada.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import lyco_helpers
|
2 |
+
import network
|
3 |
+
|
4 |
+
|
5 |
+
class ModuleTypeHada(network.ModuleType):
|
6 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
7 |
+
if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]):
|
8 |
+
return NetworkModuleHada(net, weights)
|
9 |
+
|
10 |
+
return None
|
11 |
+
|
12 |
+
|
13 |
+
class NetworkModuleHada(network.NetworkModule):
|
14 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
15 |
+
super().__init__(net, weights)
|
16 |
+
|
17 |
+
if hasattr(self.sd_module, 'weight'):
|
18 |
+
self.shape = self.sd_module.weight.shape
|
19 |
+
|
20 |
+
self.w1a = weights.w["hada_w1_a"]
|
21 |
+
self.w1b = weights.w["hada_w1_b"]
|
22 |
+
self.dim = self.w1b.shape[0]
|
23 |
+
self.w2a = weights.w["hada_w2_a"]
|
24 |
+
self.w2b = weights.w["hada_w2_b"]
|
25 |
+
|
26 |
+
self.t1 = weights.w.get("hada_t1")
|
27 |
+
self.t2 = weights.w.get("hada_t2")
|
28 |
+
|
29 |
+
def calc_updown(self, orig_weight):
|
30 |
+
w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype)
|
31 |
+
w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype)
|
32 |
+
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
|
33 |
+
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
|
34 |
+
|
35 |
+
output_shape = [w1a.size(0), w1b.size(1)]
|
36 |
+
|
37 |
+
if self.t1 is not None:
|
38 |
+
output_shape = [w1a.size(1), w1b.size(1)]
|
39 |
+
t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype)
|
40 |
+
updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b)
|
41 |
+
output_shape += t1.shape[2:]
|
42 |
+
else:
|
43 |
+
if len(w1b.shape) == 4:
|
44 |
+
output_shape += w1b.shape[2:]
|
45 |
+
updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape)
|
46 |
+
|
47 |
+
if self.t2 is not None:
|
48 |
+
t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype)
|
49 |
+
updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
|
50 |
+
else:
|
51 |
+
updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape)
|
52 |
+
|
53 |
+
updown = updown1 * updown2
|
54 |
+
|
55 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
extensions-builtin/Lora/network_ia3.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import network
|
2 |
+
|
3 |
+
|
4 |
+
class ModuleTypeIa3(network.ModuleType):
|
5 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
6 |
+
if all(x in weights.w for x in ["weight"]):
|
7 |
+
return NetworkModuleIa3(net, weights)
|
8 |
+
|
9 |
+
return None
|
10 |
+
|
11 |
+
|
12 |
+
class NetworkModuleIa3(network.NetworkModule):
|
13 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
14 |
+
super().__init__(net, weights)
|
15 |
+
|
16 |
+
self.w = weights.w["weight"]
|
17 |
+
self.on_input = weights.w["on_input"].item()
|
18 |
+
|
19 |
+
def calc_updown(self, orig_weight):
|
20 |
+
w = self.w.to(orig_weight.device, dtype=orig_weight.dtype)
|
21 |
+
|
22 |
+
output_shape = [w.size(0), orig_weight.size(1)]
|
23 |
+
if self.on_input:
|
24 |
+
output_shape.reverse()
|
25 |
+
else:
|
26 |
+
w = w.reshape(-1, 1)
|
27 |
+
|
28 |
+
updown = orig_weight * w
|
29 |
+
|
30 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
extensions-builtin/Lora/network_lokr.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
import lyco_helpers
|
4 |
+
import network
|
5 |
+
|
6 |
+
|
7 |
+
class ModuleTypeLokr(network.ModuleType):
|
8 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
9 |
+
has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w)
|
10 |
+
has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w)
|
11 |
+
if has_1 and has_2:
|
12 |
+
return NetworkModuleLokr(net, weights)
|
13 |
+
|
14 |
+
return None
|
15 |
+
|
16 |
+
|
17 |
+
def make_kron(orig_shape, w1, w2):
|
18 |
+
if len(w2.shape) == 4:
|
19 |
+
w1 = w1.unsqueeze(2).unsqueeze(2)
|
20 |
+
w2 = w2.contiguous()
|
21 |
+
return torch.kron(w1, w2).reshape(orig_shape)
|
22 |
+
|
23 |
+
|
24 |
+
class NetworkModuleLokr(network.NetworkModule):
|
25 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
26 |
+
super().__init__(net, weights)
|
27 |
+
|
28 |
+
self.w1 = weights.w.get("lokr_w1")
|
29 |
+
self.w1a = weights.w.get("lokr_w1_a")
|
30 |
+
self.w1b = weights.w.get("lokr_w1_b")
|
31 |
+
self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim
|
32 |
+
self.w2 = weights.w.get("lokr_w2")
|
33 |
+
self.w2a = weights.w.get("lokr_w2_a")
|
34 |
+
self.w2b = weights.w.get("lokr_w2_b")
|
35 |
+
self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim
|
36 |
+
self.t2 = weights.w.get("lokr_t2")
|
37 |
+
|
38 |
+
def calc_updown(self, orig_weight):
|
39 |
+
if self.w1 is not None:
|
40 |
+
w1 = self.w1.to(orig_weight.device, dtype=orig_weight.dtype)
|
41 |
+
else:
|
42 |
+
w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype)
|
43 |
+
w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype)
|
44 |
+
w1 = w1a @ w1b
|
45 |
+
|
46 |
+
if self.w2 is not None:
|
47 |
+
w2 = self.w2.to(orig_weight.device, dtype=orig_weight.dtype)
|
48 |
+
elif self.t2 is None:
|
49 |
+
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
|
50 |
+
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
|
51 |
+
w2 = w2a @ w2b
|
52 |
+
else:
|
53 |
+
t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype)
|
54 |
+
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
|
55 |
+
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
|
56 |
+
w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
|
57 |
+
|
58 |
+
output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)]
|
59 |
+
if len(orig_weight.shape) == 4:
|
60 |
+
output_shape = orig_weight.shape
|
61 |
+
|
62 |
+
updown = make_kron(output_shape, w1, w2)
|
63 |
+
|
64 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
extensions-builtin/Lora/network_lora.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
import lyco_helpers
|
4 |
+
import network
|
5 |
+
from modules import devices
|
6 |
+
|
7 |
+
|
8 |
+
class ModuleTypeLora(network.ModuleType):
|
9 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
10 |
+
if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]):
|
11 |
+
return NetworkModuleLora(net, weights)
|
12 |
+
|
13 |
+
return None
|
14 |
+
|
15 |
+
|
16 |
+
class NetworkModuleLora(network.NetworkModule):
|
17 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
18 |
+
super().__init__(net, weights)
|
19 |
+
|
20 |
+
self.up_model = self.create_module(weights.w, "lora_up.weight")
|
21 |
+
self.down_model = self.create_module(weights.w, "lora_down.weight")
|
22 |
+
self.mid_model = self.create_module(weights.w, "lora_mid.weight", none_ok=True)
|
23 |
+
|
24 |
+
self.dim = weights.w["lora_down.weight"].shape[0]
|
25 |
+
|
26 |
+
def create_module(self, weights, key, none_ok=False):
|
27 |
+
weight = weights.get(key)
|
28 |
+
|
29 |
+
if weight is None and none_ok:
|
30 |
+
return None
|
31 |
+
|
32 |
+
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention]
|
33 |
+
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
|
34 |
+
|
35 |
+
if is_linear:
|
36 |
+
weight = weight.reshape(weight.shape[0], -1)
|
37 |
+
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
|
38 |
+
elif is_conv and key == "lora_down.weight" or key == "dyn_up":
|
39 |
+
if len(weight.shape) == 2:
|
40 |
+
weight = weight.reshape(weight.shape[0], -1, 1, 1)
|
41 |
+
|
42 |
+
if weight.shape[2] != 1 or weight.shape[3] != 1:
|
43 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False)
|
44 |
+
else:
|
45 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
|
46 |
+
elif is_conv and key == "lora_mid.weight":
|
47 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False)
|
48 |
+
elif is_conv and key == "lora_up.weight" or key == "dyn_down":
|
49 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
|
50 |
+
else:
|
51 |
+
raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}')
|
52 |
+
|
53 |
+
with torch.no_grad():
|
54 |
+
if weight.shape != module.weight.shape:
|
55 |
+
weight = weight.reshape(module.weight.shape)
|
56 |
+
module.weight.copy_(weight)
|
57 |
+
|
58 |
+
module.to(device=devices.cpu, dtype=devices.dtype)
|
59 |
+
module.weight.requires_grad_(False)
|
60 |
+
|
61 |
+
return module
|
62 |
+
|
63 |
+
def calc_updown(self, orig_weight):
|
64 |
+
up = self.up_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
65 |
+
down = self.down_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
66 |
+
|
67 |
+
output_shape = [up.size(0), down.size(1)]
|
68 |
+
if self.mid_model is not None:
|
69 |
+
# cp-decomposition
|
70 |
+
mid = self.mid_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
71 |
+
updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid)
|
72 |
+
output_shape += mid.shape[2:]
|
73 |
+
else:
|
74 |
+
if len(down.shape) == 4:
|
75 |
+
output_shape += down.shape[2:]
|
76 |
+
updown = lyco_helpers.rebuild_conventional(up, down, output_shape, self.network.dyn_dim)
|
77 |
+
|
78 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
79 |
+
|
80 |
+
def forward(self, x, y):
|
81 |
+
self.up_model.to(device=devices.device)
|
82 |
+
self.down_model.to(device=devices.device)
|
83 |
+
|
84 |
+
return y + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale()
|
85 |
+
|
86 |
+
|
extensions-builtin/Lora/network_norm.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import network
|
2 |
+
|
3 |
+
|
4 |
+
class ModuleTypeNorm(network.ModuleType):
|
5 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
6 |
+
if all(x in weights.w for x in ["w_norm", "b_norm"]):
|
7 |
+
return NetworkModuleNorm(net, weights)
|
8 |
+
|
9 |
+
return None
|
10 |
+
|
11 |
+
|
12 |
+
class NetworkModuleNorm(network.NetworkModule):
|
13 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
14 |
+
super().__init__(net, weights)
|
15 |
+
|
16 |
+
self.w_norm = weights.w.get("w_norm")
|
17 |
+
self.b_norm = weights.w.get("b_norm")
|
18 |
+
|
19 |
+
def calc_updown(self, orig_weight):
|
20 |
+
output_shape = self.w_norm.shape
|
21 |
+
updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype)
|
22 |
+
|
23 |
+
if self.b_norm is not None:
|
24 |
+
ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype)
|
25 |
+
else:
|
26 |
+
ex_bias = None
|
27 |
+
|
28 |
+
return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
|
extensions-builtin/Lora/networks.py
ADDED
@@ -0,0 +1,571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
|
5 |
+
import lora_patches
|
6 |
+
import network
|
7 |
+
import network_lora
|
8 |
+
import network_hada
|
9 |
+
import network_ia3
|
10 |
+
import network_lokr
|
11 |
+
import network_full
|
12 |
+
import network_norm
|
13 |
+
|
14 |
+
import torch
|
15 |
+
from typing import Union
|
16 |
+
|
17 |
+
from modules import shared, devices, sd_models, errors, scripts, sd_hijack
|
18 |
+
|
19 |
+
module_types = [
|
20 |
+
network_lora.ModuleTypeLora(),
|
21 |
+
network_hada.ModuleTypeHada(),
|
22 |
+
network_ia3.ModuleTypeIa3(),
|
23 |
+
network_lokr.ModuleTypeLokr(),
|
24 |
+
network_full.ModuleTypeFull(),
|
25 |
+
network_norm.ModuleTypeNorm(),
|
26 |
+
]
|
27 |
+
|
28 |
+
|
29 |
+
re_digits = re.compile(r"\d+")
|
30 |
+
re_x_proj = re.compile(r"(.*)_([qkv]_proj)$")
|
31 |
+
re_compiled = {}
|
32 |
+
|
33 |
+
suffix_conversion = {
|
34 |
+
"attentions": {},
|
35 |
+
"resnets": {
|
36 |
+
"conv1": "in_layers_2",
|
37 |
+
"conv2": "out_layers_3",
|
38 |
+
"norm1": "in_layers_0",
|
39 |
+
"norm2": "out_layers_0",
|
40 |
+
"time_emb_proj": "emb_layers_1",
|
41 |
+
"conv_shortcut": "skip_connection",
|
42 |
+
}
|
43 |
+
}
|
44 |
+
|
45 |
+
|
46 |
+
def convert_diffusers_name_to_compvis(key, is_sd2):
|
47 |
+
def match(match_list, regex_text):
|
48 |
+
regex = re_compiled.get(regex_text)
|
49 |
+
if regex is None:
|
50 |
+
regex = re.compile(regex_text)
|
51 |
+
re_compiled[regex_text] = regex
|
52 |
+
|
53 |
+
r = re.match(regex, key)
|
54 |
+
if not r:
|
55 |
+
return False
|
56 |
+
|
57 |
+
match_list.clear()
|
58 |
+
match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
|
59 |
+
return True
|
60 |
+
|
61 |
+
m = []
|
62 |
+
|
63 |
+
if match(m, r"lora_unet_conv_in(.*)"):
|
64 |
+
return f'diffusion_model_input_blocks_0_0{m[0]}'
|
65 |
+
|
66 |
+
if match(m, r"lora_unet_conv_out(.*)"):
|
67 |
+
return f'diffusion_model_out_2{m[0]}'
|
68 |
+
|
69 |
+
if match(m, r"lora_unet_time_embedding_linear_(\d+)(.*)"):
|
70 |
+
return f"diffusion_model_time_embed_{m[0] * 2 - 2}{m[1]}"
|
71 |
+
|
72 |
+
if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
73 |
+
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
74 |
+
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
75 |
+
|
76 |
+
if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"):
|
77 |
+
suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2])
|
78 |
+
return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}"
|
79 |
+
|
80 |
+
if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
81 |
+
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
82 |
+
return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
83 |
+
|
84 |
+
if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"):
|
85 |
+
return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op"
|
86 |
+
|
87 |
+
if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"):
|
88 |
+
return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv"
|
89 |
+
|
90 |
+
if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"):
|
91 |
+
if is_sd2:
|
92 |
+
if 'mlp_fc1' in m[1]:
|
93 |
+
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
|
94 |
+
elif 'mlp_fc2' in m[1]:
|
95 |
+
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
|
96 |
+
else:
|
97 |
+
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
|
98 |
+
|
99 |
+
return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
|
100 |
+
|
101 |
+
if match(m, r"lora_te2_text_model_encoder_layers_(\d+)_(.+)"):
|
102 |
+
if 'mlp_fc1' in m[1]:
|
103 |
+
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
|
104 |
+
elif 'mlp_fc2' in m[1]:
|
105 |
+
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
|
106 |
+
else:
|
107 |
+
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
|
108 |
+
|
109 |
+
return key
|
110 |
+
|
111 |
+
|
112 |
+
def assign_network_names_to_compvis_modules(sd_model):
|
113 |
+
network_layer_mapping = {}
|
114 |
+
|
115 |
+
if shared.sd_model.is_sdxl:
|
116 |
+
for i, embedder in enumerate(shared.sd_model.conditioner.embedders):
|
117 |
+
if not hasattr(embedder, 'wrapped'):
|
118 |
+
continue
|
119 |
+
|
120 |
+
for name, module in embedder.wrapped.named_modules():
|
121 |
+
network_name = f'{i}_{name.replace(".", "_")}'
|
122 |
+
network_layer_mapping[network_name] = module
|
123 |
+
module.network_layer_name = network_name
|
124 |
+
else:
|
125 |
+
for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
|
126 |
+
network_name = name.replace(".", "_")
|
127 |
+
network_layer_mapping[network_name] = module
|
128 |
+
module.network_layer_name = network_name
|
129 |
+
|
130 |
+
for name, module in shared.sd_model.model.named_modules():
|
131 |
+
network_name = name.replace(".", "_")
|
132 |
+
network_layer_mapping[network_name] = module
|
133 |
+
module.network_layer_name = network_name
|
134 |
+
|
135 |
+
sd_model.network_layer_mapping = network_layer_mapping
|
136 |
+
|
137 |
+
|
138 |
+
def load_network(name, network_on_disk):
|
139 |
+
net = network.Network(name, network_on_disk)
|
140 |
+
net.mtime = os.path.getmtime(network_on_disk.filename)
|
141 |
+
|
142 |
+
sd = sd_models.read_state_dict(network_on_disk.filename)
|
143 |
+
|
144 |
+
# this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0
|
145 |
+
if not hasattr(shared.sd_model, 'network_layer_mapping'):
|
146 |
+
assign_network_names_to_compvis_modules(shared.sd_model)
|
147 |
+
|
148 |
+
keys_failed_to_match = {}
|
149 |
+
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping
|
150 |
+
|
151 |
+
matched_networks = {}
|
152 |
+
|
153 |
+
for key_network, weight in sd.items():
|
154 |
+
key_network_without_network_parts, network_part = key_network.split(".", 1)
|
155 |
+
|
156 |
+
key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
|
157 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
158 |
+
|
159 |
+
if sd_module is None:
|
160 |
+
m = re_x_proj.match(key)
|
161 |
+
if m:
|
162 |
+
sd_module = shared.sd_model.network_layer_mapping.get(m.group(1), None)
|
163 |
+
|
164 |
+
# SDXL loras seem to already have correct compvis keys, so only need to replace "lora_unet" with "diffusion_model"
|
165 |
+
if sd_module is None and "lora_unet" in key_network_without_network_parts:
|
166 |
+
key = key_network_without_network_parts.replace("lora_unet", "diffusion_model")
|
167 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
168 |
+
elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts:
|
169 |
+
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
|
170 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
171 |
+
|
172 |
+
# some SD1 Loras also have correct compvis keys
|
173 |
+
if sd_module is None:
|
174 |
+
key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
|
175 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
176 |
+
|
177 |
+
if sd_module is None:
|
178 |
+
keys_failed_to_match[key_network] = key
|
179 |
+
continue
|
180 |
+
|
181 |
+
if key not in matched_networks:
|
182 |
+
matched_networks[key] = network.NetworkWeights(network_key=key_network, sd_key=key, w={}, sd_module=sd_module)
|
183 |
+
|
184 |
+
matched_networks[key].w[network_part] = weight
|
185 |
+
|
186 |
+
for key, weights in matched_networks.items():
|
187 |
+
net_module = None
|
188 |
+
for nettype in module_types:
|
189 |
+
net_module = nettype.create_module(net, weights)
|
190 |
+
if net_module is not None:
|
191 |
+
break
|
192 |
+
|
193 |
+
if net_module is None:
|
194 |
+
raise AssertionError(f"Could not find a module type (out of {', '.join([x.__class__.__name__ for x in module_types])}) that would accept those keys: {', '.join(weights.w)}")
|
195 |
+
|
196 |
+
net.modules[key] = net_module
|
197 |
+
|
198 |
+
if keys_failed_to_match:
|
199 |
+
logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}")
|
200 |
+
|
201 |
+
return net
|
202 |
+
|
203 |
+
|
204 |
+
def purge_networks_from_memory():
|
205 |
+
while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:
|
206 |
+
name = next(iter(networks_in_memory))
|
207 |
+
networks_in_memory.pop(name, None)
|
208 |
+
|
209 |
+
devices.torch_gc()
|
210 |
+
|
211 |
+
|
212 |
+
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
|
213 |
+
already_loaded = {}
|
214 |
+
|
215 |
+
for net in loaded_networks:
|
216 |
+
if net.name in names:
|
217 |
+
already_loaded[net.name] = net
|
218 |
+
|
219 |
+
loaded_networks.clear()
|
220 |
+
|
221 |
+
networks_on_disk = [available_network_aliases.get(name, None) for name in names]
|
222 |
+
if any(x is None for x in networks_on_disk):
|
223 |
+
list_available_networks()
|
224 |
+
|
225 |
+
networks_on_disk = [available_network_aliases.get(name, None) for name in names]
|
226 |
+
|
227 |
+
failed_to_load_networks = []
|
228 |
+
|
229 |
+
for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
|
230 |
+
net = already_loaded.get(name, None)
|
231 |
+
|
232 |
+
if network_on_disk is not None:
|
233 |
+
if net is None:
|
234 |
+
net = networks_in_memory.get(name)
|
235 |
+
|
236 |
+
if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
|
237 |
+
try:
|
238 |
+
net = load_network(name, network_on_disk)
|
239 |
+
|
240 |
+
networks_in_memory.pop(name, None)
|
241 |
+
networks_in_memory[name] = net
|
242 |
+
except Exception as e:
|
243 |
+
errors.display(e, f"loading network {network_on_disk.filename}")
|
244 |
+
continue
|
245 |
+
|
246 |
+
net.mentioned_name = name
|
247 |
+
|
248 |
+
network_on_disk.read_hash()
|
249 |
+
|
250 |
+
if net is None:
|
251 |
+
failed_to_load_networks.append(name)
|
252 |
+
logging.info(f"Couldn't find network with name {name}")
|
253 |
+
continue
|
254 |
+
|
255 |
+
net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0
|
256 |
+
net.unet_multiplier = unet_multipliers[i] if unet_multipliers else 1.0
|
257 |
+
net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0
|
258 |
+
loaded_networks.append(net)
|
259 |
+
|
260 |
+
if failed_to_load_networks:
|
261 |
+
sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks))
|
262 |
+
|
263 |
+
purge_networks_from_memory()
|
264 |
+
|
265 |
+
|
266 |
+
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
|
267 |
+
weights_backup = getattr(self, "network_weights_backup", None)
|
268 |
+
bias_backup = getattr(self, "network_bias_backup", None)
|
269 |
+
|
270 |
+
if weights_backup is None and bias_backup is None:
|
271 |
+
return
|
272 |
+
|
273 |
+
if weights_backup is not None:
|
274 |
+
if isinstance(self, torch.nn.MultiheadAttention):
|
275 |
+
self.in_proj_weight.copy_(weights_backup[0])
|
276 |
+
self.out_proj.weight.copy_(weights_backup[1])
|
277 |
+
else:
|
278 |
+
self.weight.copy_(weights_backup)
|
279 |
+
|
280 |
+
if bias_backup is not None:
|
281 |
+
if isinstance(self, torch.nn.MultiheadAttention):
|
282 |
+
self.out_proj.bias.copy_(bias_backup)
|
283 |
+
else:
|
284 |
+
self.bias.copy_(bias_backup)
|
285 |
+
else:
|
286 |
+
if isinstance(self, torch.nn.MultiheadAttention):
|
287 |
+
self.out_proj.bias = None
|
288 |
+
else:
|
289 |
+
self.bias = None
|
290 |
+
|
291 |
+
|
292 |
+
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
|
293 |
+
"""
|
294 |
+
Applies the currently selected set of networks to the weights of torch layer self.
|
295 |
+
If weights already have this particular set of networks applied, does nothing.
|
296 |
+
If not, restores orginal weights from backup and alters weights according to networks.
|
297 |
+
"""
|
298 |
+
|
299 |
+
network_layer_name = getattr(self, 'network_layer_name', None)
|
300 |
+
if network_layer_name is None:
|
301 |
+
return
|
302 |
+
|
303 |
+
current_names = getattr(self, "network_current_names", ())
|
304 |
+
wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks)
|
305 |
+
|
306 |
+
weights_backup = getattr(self, "network_weights_backup", None)
|
307 |
+
if weights_backup is None and wanted_names != ():
|
308 |
+
if current_names != ():
|
309 |
+
raise RuntimeError("no backup weights found and current weights are not unchanged")
|
310 |
+
|
311 |
+
if isinstance(self, torch.nn.MultiheadAttention):
|
312 |
+
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
|
313 |
+
else:
|
314 |
+
weights_backup = self.weight.to(devices.cpu, copy=True)
|
315 |
+
|
316 |
+
self.network_weights_backup = weights_backup
|
317 |
+
|
318 |
+
bias_backup = getattr(self, "network_bias_backup", None)
|
319 |
+
if bias_backup is None:
|
320 |
+
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
|
321 |
+
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
|
322 |
+
elif getattr(self, 'bias', None) is not None:
|
323 |
+
bias_backup = self.bias.to(devices.cpu, copy=True)
|
324 |
+
else:
|
325 |
+
bias_backup = None
|
326 |
+
self.network_bias_backup = bias_backup
|
327 |
+
|
328 |
+
if current_names != wanted_names:
|
329 |
+
network_restore_weights_from_backup(self)
|
330 |
+
|
331 |
+
for net in loaded_networks:
|
332 |
+
module = net.modules.get(network_layer_name, None)
|
333 |
+
if module is not None and hasattr(self, 'weight'):
|
334 |
+
try:
|
335 |
+
with torch.no_grad():
|
336 |
+
updown, ex_bias = module.calc_updown(self.weight)
|
337 |
+
|
338 |
+
if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
|
339 |
+
# inpainting model. zero pad updown to make channel[1] 4 to 9
|
340 |
+
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
|
341 |
+
|
342 |
+
self.weight += updown
|
343 |
+
if ex_bias is not None and hasattr(self, 'bias'):
|
344 |
+
if self.bias is None:
|
345 |
+
self.bias = torch.nn.Parameter(ex_bias)
|
346 |
+
else:
|
347 |
+
self.bias += ex_bias
|
348 |
+
except RuntimeError as e:
|
349 |
+
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
|
350 |
+
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
351 |
+
|
352 |
+
continue
|
353 |
+
|
354 |
+
module_q = net.modules.get(network_layer_name + "_q_proj", None)
|
355 |
+
module_k = net.modules.get(network_layer_name + "_k_proj", None)
|
356 |
+
module_v = net.modules.get(network_layer_name + "_v_proj", None)
|
357 |
+
module_out = net.modules.get(network_layer_name + "_out_proj", None)
|
358 |
+
|
359 |
+
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
|
360 |
+
try:
|
361 |
+
with torch.no_grad():
|
362 |
+
updown_q, _ = module_q.calc_updown(self.in_proj_weight)
|
363 |
+
updown_k, _ = module_k.calc_updown(self.in_proj_weight)
|
364 |
+
updown_v, _ = module_v.calc_updown(self.in_proj_weight)
|
365 |
+
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
|
366 |
+
updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight)
|
367 |
+
|
368 |
+
self.in_proj_weight += updown_qkv
|
369 |
+
self.out_proj.weight += updown_out
|
370 |
+
if ex_bias is not None:
|
371 |
+
if self.out_proj.bias is None:
|
372 |
+
self.out_proj.bias = torch.nn.Parameter(ex_bias)
|
373 |
+
else:
|
374 |
+
self.out_proj.bias += ex_bias
|
375 |
+
|
376 |
+
except RuntimeError as e:
|
377 |
+
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
|
378 |
+
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
379 |
+
|
380 |
+
continue
|
381 |
+
|
382 |
+
if module is None:
|
383 |
+
continue
|
384 |
+
|
385 |
+
logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation")
|
386 |
+
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
387 |
+
|
388 |
+
self.network_current_names = wanted_names
|
389 |
+
|
390 |
+
|
391 |
+
def network_forward(module, input, original_forward):
|
392 |
+
"""
|
393 |
+
Old way of applying Lora by executing operations during layer's forward.
|
394 |
+
Stacking many loras this way results in big performance degradation.
|
395 |
+
"""
|
396 |
+
|
397 |
+
if len(loaded_networks) == 0:
|
398 |
+
return original_forward(module, input)
|
399 |
+
|
400 |
+
input = devices.cond_cast_unet(input)
|
401 |
+
|
402 |
+
network_restore_weights_from_backup(module)
|
403 |
+
network_reset_cached_weight(module)
|
404 |
+
|
405 |
+
y = original_forward(module, input)
|
406 |
+
|
407 |
+
network_layer_name = getattr(module, 'network_layer_name', None)
|
408 |
+
for lora in loaded_networks:
|
409 |
+
module = lora.modules.get(network_layer_name, None)
|
410 |
+
if module is None:
|
411 |
+
continue
|
412 |
+
|
413 |
+
y = module.forward(input, y)
|
414 |
+
|
415 |
+
return y
|
416 |
+
|
417 |
+
|
418 |
+
def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
|
419 |
+
self.network_current_names = ()
|
420 |
+
self.network_weights_backup = None
|
421 |
+
|
422 |
+
|
423 |
+
def network_Linear_forward(self, input):
|
424 |
+
if shared.opts.lora_functional:
|
425 |
+
return network_forward(self, input, originals.Linear_forward)
|
426 |
+
|
427 |
+
network_apply_weights(self)
|
428 |
+
|
429 |
+
return originals.Linear_forward(self, input)
|
430 |
+
|
431 |
+
|
432 |
+
def network_Linear_load_state_dict(self, *args, **kwargs):
|
433 |
+
network_reset_cached_weight(self)
|
434 |
+
|
435 |
+
return originals.Linear_load_state_dict(self, *args, **kwargs)
|
436 |
+
|
437 |
+
|
438 |
+
def network_Conv2d_forward(self, input):
|
439 |
+
if shared.opts.lora_functional:
|
440 |
+
return network_forward(self, input, originals.Conv2d_forward)
|
441 |
+
|
442 |
+
network_apply_weights(self)
|
443 |
+
|
444 |
+
return originals.Conv2d_forward(self, input)
|
445 |
+
|
446 |
+
|
447 |
+
def network_Conv2d_load_state_dict(self, *args, **kwargs):
|
448 |
+
network_reset_cached_weight(self)
|
449 |
+
|
450 |
+
return originals.Conv2d_load_state_dict(self, *args, **kwargs)
|
451 |
+
|
452 |
+
|
453 |
+
def network_GroupNorm_forward(self, input):
|
454 |
+
if shared.opts.lora_functional:
|
455 |
+
return network_forward(self, input, originals.GroupNorm_forward)
|
456 |
+
|
457 |
+
network_apply_weights(self)
|
458 |
+
|
459 |
+
return originals.GroupNorm_forward(self, input)
|
460 |
+
|
461 |
+
|
462 |
+
def network_GroupNorm_load_state_dict(self, *args, **kwargs):
|
463 |
+
network_reset_cached_weight(self)
|
464 |
+
|
465 |
+
return originals.GroupNorm_load_state_dict(self, *args, **kwargs)
|
466 |
+
|
467 |
+
|
468 |
+
def network_LayerNorm_forward(self, input):
|
469 |
+
if shared.opts.lora_functional:
|
470 |
+
return network_forward(self, input, originals.LayerNorm_forward)
|
471 |
+
|
472 |
+
network_apply_weights(self)
|
473 |
+
|
474 |
+
return originals.LayerNorm_forward(self, input)
|
475 |
+
|
476 |
+
|
477 |
+
def network_LayerNorm_load_state_dict(self, *args, **kwargs):
|
478 |
+
network_reset_cached_weight(self)
|
479 |
+
|
480 |
+
return originals.LayerNorm_load_state_dict(self, *args, **kwargs)
|
481 |
+
|
482 |
+
|
483 |
+
def network_MultiheadAttention_forward(self, *args, **kwargs):
|
484 |
+
network_apply_weights(self)
|
485 |
+
|
486 |
+
return originals.MultiheadAttention_forward(self, *args, **kwargs)
|
487 |
+
|
488 |
+
|
489 |
+
def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
|
490 |
+
network_reset_cached_weight(self)
|
491 |
+
|
492 |
+
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
|
493 |
+
|
494 |
+
|
495 |
+
def list_available_networks():
|
496 |
+
available_networks.clear()
|
497 |
+
available_network_aliases.clear()
|
498 |
+
forbidden_network_aliases.clear()
|
499 |
+
available_network_hash_lookup.clear()
|
500 |
+
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
501 |
+
|
502 |
+
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
503 |
+
|
504 |
+
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
505 |
+
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
506 |
+
for filename in candidates:
|
507 |
+
if os.path.isdir(filename):
|
508 |
+
continue
|
509 |
+
|
510 |
+
name = os.path.splitext(os.path.basename(filename))[0]
|
511 |
+
try:
|
512 |
+
entry = network.NetworkOnDisk(name, filename)
|
513 |
+
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
514 |
+
errors.report(f"Failed to load network {name} from {filename}", exc_info=True)
|
515 |
+
continue
|
516 |
+
|
517 |
+
available_networks[name] = entry
|
518 |
+
|
519 |
+
if entry.alias in available_network_aliases:
|
520 |
+
forbidden_network_aliases[entry.alias.lower()] = 1
|
521 |
+
|
522 |
+
available_network_aliases[name] = entry
|
523 |
+
available_network_aliases[entry.alias] = entry
|
524 |
+
|
525 |
+
|
526 |
+
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
527 |
+
|
528 |
+
|
529 |
+
def infotext_pasted(infotext, params):
|
530 |
+
if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
|
531 |
+
return # if the other extension is active, it will handle those fields, no need to do anything
|
532 |
+
|
533 |
+
added = []
|
534 |
+
|
535 |
+
for k in params:
|
536 |
+
if not k.startswith("AddNet Model "):
|
537 |
+
continue
|
538 |
+
|
539 |
+
num = k[13:]
|
540 |
+
|
541 |
+
if params.get("AddNet Module " + num) != "LoRA":
|
542 |
+
continue
|
543 |
+
|
544 |
+
name = params.get("AddNet Model " + num)
|
545 |
+
if name is None:
|
546 |
+
continue
|
547 |
+
|
548 |
+
m = re_network_name.match(name)
|
549 |
+
if m:
|
550 |
+
name = m.group(1)
|
551 |
+
|
552 |
+
multiplier = params.get("AddNet Weight A " + num, "1.0")
|
553 |
+
|
554 |
+
added.append(f"<lora:{name}:{multiplier}>")
|
555 |
+
|
556 |
+
if added:
|
557 |
+
params["Prompt"] += "\n" + "".join(added)
|
558 |
+
|
559 |
+
|
560 |
+
originals: lora_patches.LoraPatches = None
|
561 |
+
|
562 |
+
extra_network_lora = None
|
563 |
+
|
564 |
+
available_networks = {}
|
565 |
+
available_network_aliases = {}
|
566 |
+
loaded_networks = []
|
567 |
+
networks_in_memory = {}
|
568 |
+
available_network_hash_lookup = {}
|
569 |
+
forbidden_network_aliases = {}
|
570 |
+
|
571 |
+
list_available_networks()
|
extensions-builtin/Lora/preload.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
|
7 |
+
parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
|
extensions-builtin/Lora/scripts/lora_script.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from fastapi import FastAPI
|
5 |
+
|
6 |
+
import network
|
7 |
+
import networks
|
8 |
+
import lora # noqa:F401
|
9 |
+
import lora_patches
|
10 |
+
import extra_networks_lora
|
11 |
+
import ui_extra_networks_lora
|
12 |
+
from modules import script_callbacks, ui_extra_networks, extra_networks, shared
|
13 |
+
|
14 |
+
|
15 |
+
def unload():
|
16 |
+
networks.originals.undo()
|
17 |
+
|
18 |
+
|
19 |
+
def before_ui():
|
20 |
+
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
|
21 |
+
|
22 |
+
networks.extra_network_lora = extra_networks_lora.ExtraNetworkLora()
|
23 |
+
extra_networks.register_extra_network(networks.extra_network_lora)
|
24 |
+
extra_networks.register_extra_network_alias(networks.extra_network_lora, "lyco")
|
25 |
+
|
26 |
+
|
27 |
+
networks.originals = lora_patches.LoraPatches()
|
28 |
+
|
29 |
+
script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
|
30 |
+
script_callbacks.on_script_unloaded(unload)
|
31 |
+
script_callbacks.on_before_ui(before_ui)
|
32 |
+
script_callbacks.on_infotext_pasted(networks.infotext_pasted)
|
33 |
+
|
34 |
+
|
35 |
+
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
|
36 |
+
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
|
37 |
+
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
|
38 |
+
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
|
39 |
+
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
40 |
+
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
41 |
+
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
|
42 |
+
}))
|
43 |
+
|
44 |
+
|
45 |
+
shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
|
46 |
+
"lora_functional": shared.OptionInfo(False, "Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
|
47 |
+
}))
|
48 |
+
|
49 |
+
|
50 |
+
def create_lora_json(obj: network.NetworkOnDisk):
|
51 |
+
return {
|
52 |
+
"name": obj.name,
|
53 |
+
"alias": obj.alias,
|
54 |
+
"path": obj.filename,
|
55 |
+
"metadata": obj.metadata,
|
56 |
+
}
|
57 |
+
|
58 |
+
|
59 |
+
def api_networks(_: gr.Blocks, app: FastAPI):
|
60 |
+
@app.get("/sdapi/v1/loras")
|
61 |
+
async def get_loras():
|
62 |
+
return [create_lora_json(obj) for obj in networks.available_networks.values()]
|
63 |
+
|
64 |
+
@app.post("/sdapi/v1/refresh-loras")
|
65 |
+
async def refresh_loras():
|
66 |
+
return networks.list_available_networks()
|
67 |
+
|
68 |
+
|
69 |
+
script_callbacks.on_app_started(api_networks)
|
70 |
+
|
71 |
+
re_lora = re.compile("<lora:([^:]+):")
|
72 |
+
|
73 |
+
|
74 |
+
def infotext_pasted(infotext, d):
|
75 |
+
hashes = d.get("Lora hashes")
|
76 |
+
if not hashes:
|
77 |
+
return
|
78 |
+
|
79 |
+
hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
|
80 |
+
hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
|
81 |
+
|
82 |
+
def network_replacement(m):
|
83 |
+
alias = m.group(1)
|
84 |
+
shorthash = hashes.get(alias)
|
85 |
+
if shorthash is None:
|
86 |
+
return m.group(0)
|
87 |
+
|
88 |
+
network_on_disk = networks.available_network_hash_lookup.get(shorthash)
|
89 |
+
if network_on_disk is None:
|
90 |
+
return m.group(0)
|
91 |
+
|
92 |
+
return f'<lora:{network_on_disk.get_alias()}:'
|
93 |
+
|
94 |
+
d["Prompt"] = re.sub(re_lora, network_replacement, d["Prompt"])
|
95 |
+
|
96 |
+
|
97 |
+
script_callbacks.on_infotext_pasted(infotext_pasted)
|
98 |
+
|
99 |
+
shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory)
|
extensions-builtin/Lora/ui_edit_user_metadata.py
ADDED
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import html
|
3 |
+
import random
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
import re
|
7 |
+
|
8 |
+
from modules import ui_extra_networks_user_metadata
|
9 |
+
|
10 |
+
|
11 |
+
def is_non_comma_tagset(tags):
|
12 |
+
average_tag_length = sum(len(x) for x in tags.keys()) / len(tags)
|
13 |
+
|
14 |
+
return average_tag_length >= 16
|
15 |
+
|
16 |
+
|
17 |
+
re_word = re.compile(r"[-_\w']+")
|
18 |
+
re_comma = re.compile(r" *, *")
|
19 |
+
|
20 |
+
|
21 |
+
def build_tags(metadata):
|
22 |
+
tags = {}
|
23 |
+
|
24 |
+
for _, tags_dict in metadata.get("ss_tag_frequency", {}).items():
|
25 |
+
for tag, tag_count in tags_dict.items():
|
26 |
+
tag = tag.strip()
|
27 |
+
tags[tag] = tags.get(tag, 0) + int(tag_count)
|
28 |
+
|
29 |
+
if tags and is_non_comma_tagset(tags):
|
30 |
+
new_tags = {}
|
31 |
+
|
32 |
+
for text, text_count in tags.items():
|
33 |
+
for word in re.findall(re_word, text):
|
34 |
+
if len(word) < 3:
|
35 |
+
continue
|
36 |
+
|
37 |
+
new_tags[word] = new_tags.get(word, 0) + text_count
|
38 |
+
|
39 |
+
tags = new_tags
|
40 |
+
|
41 |
+
ordered_tags = sorted(tags.keys(), key=tags.get, reverse=True)
|
42 |
+
|
43 |
+
return [(tag, tags[tag]) for tag in ordered_tags]
|
44 |
+
|
45 |
+
|
46 |
+
class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor):
|
47 |
+
def __init__(self, ui, tabname, page):
|
48 |
+
super().__init__(ui, tabname, page)
|
49 |
+
|
50 |
+
self.select_sd_version = None
|
51 |
+
|
52 |
+
self.taginfo = None
|
53 |
+
self.edit_activation_text = None
|
54 |
+
self.slider_preferred_weight = None
|
55 |
+
self.edit_notes = None
|
56 |
+
|
57 |
+
def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, notes):
|
58 |
+
user_metadata = self.get_user_metadata(name)
|
59 |
+
user_metadata["description"] = desc
|
60 |
+
user_metadata["sd version"] = sd_version
|
61 |
+
user_metadata["activation text"] = activation_text
|
62 |
+
user_metadata["preferred weight"] = preferred_weight
|
63 |
+
user_metadata["notes"] = notes
|
64 |
+
|
65 |
+
self.write_user_metadata(name, user_metadata)
|
66 |
+
|
67 |
+
def get_metadata_table(self, name):
|
68 |
+
table = super().get_metadata_table(name)
|
69 |
+
item = self.page.items.get(name, {})
|
70 |
+
metadata = item.get("metadata") or {}
|
71 |
+
|
72 |
+
keys = {
|
73 |
+
'ss_output_name': "Output name:",
|
74 |
+
'ss_sd_model_name': "Model:",
|
75 |
+
'ss_clip_skip': "Clip skip:",
|
76 |
+
'ss_network_module': "Kohya module:",
|
77 |
+
}
|
78 |
+
|
79 |
+
for key, label in keys.items():
|
80 |
+
value = metadata.get(key, None)
|
81 |
+
if value is not None and str(value) != "None":
|
82 |
+
table.append((label, html.escape(value)))
|
83 |
+
|
84 |
+
ss_training_started_at = metadata.get('ss_training_started_at')
|
85 |
+
if ss_training_started_at:
|
86 |
+
table.append(("Date trained:", datetime.datetime.utcfromtimestamp(float(ss_training_started_at)).strftime('%Y-%m-%d %H:%M')))
|
87 |
+
|
88 |
+
ss_bucket_info = metadata.get("ss_bucket_info")
|
89 |
+
if ss_bucket_info and "buckets" in ss_bucket_info:
|
90 |
+
resolutions = {}
|
91 |
+
for _, bucket in ss_bucket_info["buckets"].items():
|
92 |
+
resolution = bucket["resolution"]
|
93 |
+
resolution = f'{resolution[1]}x{resolution[0]}'
|
94 |
+
|
95 |
+
resolutions[resolution] = resolutions.get(resolution, 0) + int(bucket["count"])
|
96 |
+
|
97 |
+
resolutions_list = sorted(resolutions.keys(), key=resolutions.get, reverse=True)
|
98 |
+
resolutions_text = html.escape(", ".join(resolutions_list[0:4]))
|
99 |
+
if len(resolutions) > 4:
|
100 |
+
resolutions_text += ", ..."
|
101 |
+
resolutions_text = f"<span title='{html.escape(', '.join(resolutions_list))}'>{resolutions_text}</span>"
|
102 |
+
|
103 |
+
table.append(('Resolutions:' if len(resolutions_list) > 1 else 'Resolution:', resolutions_text))
|
104 |
+
|
105 |
+
image_count = 0
|
106 |
+
for _, params in metadata.get("ss_dataset_dirs", {}).items():
|
107 |
+
image_count += int(params.get("img_count", 0))
|
108 |
+
|
109 |
+
if image_count:
|
110 |
+
table.append(("Dataset size:", image_count))
|
111 |
+
|
112 |
+
return table
|
113 |
+
|
114 |
+
def put_values_into_components(self, name):
|
115 |
+
user_metadata = self.get_user_metadata(name)
|
116 |
+
values = super().put_values_into_components(name)
|
117 |
+
|
118 |
+
item = self.page.items.get(name, {})
|
119 |
+
metadata = item.get("metadata") or {}
|
120 |
+
|
121 |
+
tags = build_tags(metadata)
|
122 |
+
gradio_tags = [(tag, str(count)) for tag, count in tags[0:24]]
|
123 |
+
|
124 |
+
return [
|
125 |
+
*values[0:5],
|
126 |
+
item.get("sd_version", "Unknown"),
|
127 |
+
gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False),
|
128 |
+
user_metadata.get('activation text', ''),
|
129 |
+
float(user_metadata.get('preferred weight', 0.0)),
|
130 |
+
gr.update(visible=True if tags else False),
|
131 |
+
gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False),
|
132 |
+
]
|
133 |
+
|
134 |
+
def generate_random_prompt(self, name):
|
135 |
+
item = self.page.items.get(name, {})
|
136 |
+
metadata = item.get("metadata") or {}
|
137 |
+
tags = build_tags(metadata)
|
138 |
+
|
139 |
+
return self.generate_random_prompt_from_tags(tags)
|
140 |
+
|
141 |
+
def generate_random_prompt_from_tags(self, tags):
|
142 |
+
max_count = None
|
143 |
+
res = []
|
144 |
+
for tag, count in tags:
|
145 |
+
if not max_count:
|
146 |
+
max_count = count
|
147 |
+
|
148 |
+
v = random.random() * max_count
|
149 |
+
if count > v:
|
150 |
+
res.append(tag)
|
151 |
+
|
152 |
+
return ", ".join(sorted(res))
|
153 |
+
|
154 |
+
def create_extra_default_items_in_left_column(self):
|
155 |
+
|
156 |
+
# this would be a lot better as gr.Radio but I can't make it work
|
157 |
+
self.select_sd_version = gr.Dropdown(['SD1', 'SD2', 'SDXL', 'Unknown'], value='Unknown', label='Stable Diffusion version', interactive=True)
|
158 |
+
|
159 |
+
def create_editor(self):
|
160 |
+
self.create_default_editor_elems()
|
161 |
+
|
162 |
+
self.taginfo = gr.HighlightedText(label="Training dataset tags")
|
163 |
+
self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora")
|
164 |
+
self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01)
|
165 |
+
|
166 |
+
with gr.Row() as row_random_prompt:
|
167 |
+
with gr.Column(scale=8):
|
168 |
+
random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False)
|
169 |
+
|
170 |
+
with gr.Column(scale=1, min_width=120):
|
171 |
+
generate_random_prompt = gr.Button('Generate', size="lg", scale=1)
|
172 |
+
|
173 |
+
self.edit_notes = gr.TextArea(label='Notes', lines=4)
|
174 |
+
|
175 |
+
generate_random_prompt.click(fn=self.generate_random_prompt, inputs=[self.edit_name_input], outputs=[random_prompt], show_progress=False)
|
176 |
+
|
177 |
+
def select_tag(activation_text, evt: gr.SelectData):
|
178 |
+
tag = evt.value[0]
|
179 |
+
|
180 |
+
words = re.split(re_comma, activation_text)
|
181 |
+
if tag in words:
|
182 |
+
words = [x for x in words if x != tag and x.strip()]
|
183 |
+
return ", ".join(words)
|
184 |
+
|
185 |
+
return activation_text + ", " + tag if activation_text else tag
|
186 |
+
|
187 |
+
self.taginfo.select(fn=select_tag, inputs=[self.edit_activation_text], outputs=[self.edit_activation_text], show_progress=False)
|
188 |
+
|
189 |
+
self.create_default_buttons()
|
190 |
+
|
191 |
+
viewed_components = [
|
192 |
+
self.edit_name,
|
193 |
+
self.edit_description,
|
194 |
+
self.html_filedata,
|
195 |
+
self.html_preview,
|
196 |
+
self.edit_notes,
|
197 |
+
self.select_sd_version,
|
198 |
+
self.taginfo,
|
199 |
+
self.edit_activation_text,
|
200 |
+
self.slider_preferred_weight,
|
201 |
+
row_random_prompt,
|
202 |
+
random_prompt,
|
203 |
+
]
|
204 |
+
|
205 |
+
self.button_edit\
|
206 |
+
.click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\
|
207 |
+
.then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
|
208 |
+
|
209 |
+
edited_components = [
|
210 |
+
self.edit_description,
|
211 |
+
self.select_sd_version,
|
212 |
+
self.edit_activation_text,
|
213 |
+
self.slider_preferred_weight,
|
214 |
+
self.edit_notes,
|
215 |
+
]
|
216 |
+
|
217 |
+
self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components)
|
extensions-builtin/Lora/ui_extra_networks_lora.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import network
|
4 |
+
import networks
|
5 |
+
|
6 |
+
from modules import shared, ui_extra_networks
|
7 |
+
from modules.ui_extra_networks import quote_js
|
8 |
+
from ui_edit_user_metadata import LoraUserMetadataEditor
|
9 |
+
|
10 |
+
|
11 |
+
class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
|
12 |
+
def __init__(self):
|
13 |
+
super().__init__('Lora')
|
14 |
+
|
15 |
+
def refresh(self):
|
16 |
+
networks.list_available_networks()
|
17 |
+
|
18 |
+
def create_item(self, name, index=None, enable_filter=True):
|
19 |
+
lora_on_disk = networks.available_networks.get(name)
|
20 |
+
|
21 |
+
path, ext = os.path.splitext(lora_on_disk.filename)
|
22 |
+
|
23 |
+
alias = lora_on_disk.get_alias()
|
24 |
+
|
25 |
+
item = {
|
26 |
+
"name": name,
|
27 |
+
"filename": lora_on_disk.filename,
|
28 |
+
"shorthash": lora_on_disk.shorthash,
|
29 |
+
"preview": self.find_preview(path),
|
30 |
+
"description": self.find_description(path),
|
31 |
+
"search_term": self.search_terms_from_path(lora_on_disk.filename) + " " + (lora_on_disk.hash or ""),
|
32 |
+
"local_preview": f"{path}.{shared.opts.samples_format}",
|
33 |
+
"metadata": lora_on_disk.metadata,
|
34 |
+
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
|
35 |
+
"sd_version": lora_on_disk.sd_version.name,
|
36 |
+
}
|
37 |
+
|
38 |
+
self.read_user_metadata(item)
|
39 |
+
activation_text = item["user_metadata"].get("activation text")
|
40 |
+
preferred_weight = item["user_metadata"].get("preferred weight", 0.0)
|
41 |
+
item["prompt"] = quote_js(f"<lora:{alias}:") + " + " + (str(preferred_weight) if preferred_weight else "opts.extra_networks_default_multiplier") + " + " + quote_js(">")
|
42 |
+
|
43 |
+
if activation_text:
|
44 |
+
item["prompt"] += " + " + quote_js(" " + activation_text)
|
45 |
+
|
46 |
+
sd_version = item["user_metadata"].get("sd version")
|
47 |
+
if sd_version in network.SdVersion.__members__:
|
48 |
+
item["sd_version"] = sd_version
|
49 |
+
sd_version = network.SdVersion[sd_version]
|
50 |
+
else:
|
51 |
+
sd_version = lora_on_disk.sd_version
|
52 |
+
|
53 |
+
if shared.opts.lora_show_all or not enable_filter:
|
54 |
+
pass
|
55 |
+
elif sd_version == network.SdVersion.Unknown:
|
56 |
+
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
|
57 |
+
if model_version.name in shared.opts.lora_hide_unknown_for_versions:
|
58 |
+
return None
|
59 |
+
elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL:
|
60 |
+
return None
|
61 |
+
elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2:
|
62 |
+
return None
|
63 |
+
elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1:
|
64 |
+
return None
|
65 |
+
|
66 |
+
return item
|
67 |
+
|
68 |
+
def list_items(self):
|
69 |
+
for index, name in enumerate(networks.available_networks):
|
70 |
+
item = self.create_item(name, index)
|
71 |
+
|
72 |
+
if item is not None:
|
73 |
+
yield item
|
74 |
+
|
75 |
+
def allowed_directories_for_previews(self):
|
76 |
+
return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat]
|
77 |
+
|
78 |
+
def create_user_metadata_editor(self, ui, tabname):
|
79 |
+
return LoraUserMetadataEditor(ui, tabname, self)
|
extensions-builtin/ScuNET/preload.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(paths.models_path, 'ScuNET'))
|
extensions-builtin/ScuNET/scripts/scunet_model.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import PIL.Image
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
from tqdm import tqdm
|
7 |
+
|
8 |
+
import modules.upscaler
|
9 |
+
from modules import devices, modelloader, script_callbacks, errors
|
10 |
+
from scunet_model_arch import SCUNet
|
11 |
+
|
12 |
+
from modules.modelloader import load_file_from_url
|
13 |
+
from modules.shared import opts
|
14 |
+
|
15 |
+
|
16 |
+
class UpscalerScuNET(modules.upscaler.Upscaler):
|
17 |
+
def __init__(self, dirname):
|
18 |
+
self.name = "ScuNET"
|
19 |
+
self.model_name = "ScuNET GAN"
|
20 |
+
self.model_name2 = "ScuNET PSNR"
|
21 |
+
self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth"
|
22 |
+
self.model_url2 = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth"
|
23 |
+
self.user_path = dirname
|
24 |
+
super().__init__()
|
25 |
+
model_paths = self.find_models(ext_filter=[".pth"])
|
26 |
+
scalers = []
|
27 |
+
add_model2 = True
|
28 |
+
for file in model_paths:
|
29 |
+
if file.startswith("http"):
|
30 |
+
name = self.model_name
|
31 |
+
else:
|
32 |
+
name = modelloader.friendly_name(file)
|
33 |
+
if name == self.model_name2 or file == self.model_url2:
|
34 |
+
add_model2 = False
|
35 |
+
try:
|
36 |
+
scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
|
37 |
+
scalers.append(scaler_data)
|
38 |
+
except Exception:
|
39 |
+
errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
|
40 |
+
if add_model2:
|
41 |
+
scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
|
42 |
+
scalers.append(scaler_data2)
|
43 |
+
self.scalers = scalers
|
44 |
+
|
45 |
+
@staticmethod
|
46 |
+
@torch.no_grad()
|
47 |
+
def tiled_inference(img, model):
|
48 |
+
# test the image tile by tile
|
49 |
+
h, w = img.shape[2:]
|
50 |
+
tile = opts.SCUNET_tile
|
51 |
+
tile_overlap = opts.SCUNET_tile_overlap
|
52 |
+
if tile == 0:
|
53 |
+
return model(img)
|
54 |
+
|
55 |
+
device = devices.get_device_for('scunet')
|
56 |
+
assert tile % 8 == 0, "tile size should be a multiple of window_size"
|
57 |
+
sf = 1
|
58 |
+
|
59 |
+
stride = tile - tile_overlap
|
60 |
+
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
|
61 |
+
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
|
62 |
+
E = torch.zeros(1, 3, h * sf, w * sf, dtype=img.dtype, device=device)
|
63 |
+
W = torch.zeros_like(E, dtype=devices.dtype, device=device)
|
64 |
+
|
65 |
+
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="ScuNET tiles") as pbar:
|
66 |
+
for h_idx in h_idx_list:
|
67 |
+
|
68 |
+
for w_idx in w_idx_list:
|
69 |
+
|
70 |
+
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
|
71 |
+
|
72 |
+
out_patch = model(in_patch)
|
73 |
+
out_patch_mask = torch.ones_like(out_patch)
|
74 |
+
|
75 |
+
E[
|
76 |
+
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
77 |
+
].add_(out_patch)
|
78 |
+
W[
|
79 |
+
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
80 |
+
].add_(out_patch_mask)
|
81 |
+
pbar.update(1)
|
82 |
+
output = E.div_(W)
|
83 |
+
|
84 |
+
return output
|
85 |
+
|
86 |
+
def do_upscale(self, img: PIL.Image.Image, selected_file):
|
87 |
+
|
88 |
+
devices.torch_gc()
|
89 |
+
|
90 |
+
try:
|
91 |
+
model = self.load_model(selected_file)
|
92 |
+
except Exception as e:
|
93 |
+
print(f"ScuNET: Unable to load model from {selected_file}: {e}", file=sys.stderr)
|
94 |
+
return img
|
95 |
+
|
96 |
+
device = devices.get_device_for('scunet')
|
97 |
+
tile = opts.SCUNET_tile
|
98 |
+
h, w = img.height, img.width
|
99 |
+
np_img = np.array(img)
|
100 |
+
np_img = np_img[:, :, ::-1] # RGB to BGR
|
101 |
+
np_img = np_img.transpose((2, 0, 1)) / 255 # HWC to CHW
|
102 |
+
torch_img = torch.from_numpy(np_img).float().unsqueeze(0).to(device) # type: ignore
|
103 |
+
|
104 |
+
if tile > h or tile > w:
|
105 |
+
_img = torch.zeros(1, 3, max(h, tile), max(w, tile), dtype=torch_img.dtype, device=torch_img.device)
|
106 |
+
_img[:, :, :h, :w] = torch_img # pad image
|
107 |
+
torch_img = _img
|
108 |
+
|
109 |
+
torch_output = self.tiled_inference(torch_img, model).squeeze(0)
|
110 |
+
torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any
|
111 |
+
np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy()
|
112 |
+
del torch_img, torch_output
|
113 |
+
devices.torch_gc()
|
114 |
+
|
115 |
+
output = np_output.transpose((1, 2, 0)) # CHW to HWC
|
116 |
+
output = output[:, :, ::-1] # BGR to RGB
|
117 |
+
return PIL.Image.fromarray((output * 255).astype(np.uint8))
|
118 |
+
|
119 |
+
def load_model(self, path: str):
|
120 |
+
device = devices.get_device_for('scunet')
|
121 |
+
if path.startswith("http"):
|
122 |
+
# TODO: this doesn't use `path` at all?
|
123 |
+
filename = load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name=f"{self.name}.pth")
|
124 |
+
else:
|
125 |
+
filename = path
|
126 |
+
model = SCUNet(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
|
127 |
+
model.load_state_dict(torch.load(filename), strict=True)
|
128 |
+
model.eval()
|
129 |
+
for _, v in model.named_parameters():
|
130 |
+
v.requires_grad = False
|
131 |
+
model = model.to(device)
|
132 |
+
|
133 |
+
return model
|
134 |
+
|
135 |
+
|
136 |
+
def on_ui_settings():
|
137 |
+
import gradio as gr
|
138 |
+
from modules import shared
|
139 |
+
|
140 |
+
shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling"))
|
141 |
+
shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam"))
|
142 |
+
|
143 |
+
|
144 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
extensions-builtin/ScuNET/scunet_model_arch.py
ADDED
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
import torch.nn as nn
|
5 |
+
from einops import rearrange
|
6 |
+
from einops.layers.torch import Rearrange
|
7 |
+
from timm.models.layers import trunc_normal_, DropPath
|
8 |
+
|
9 |
+
|
10 |
+
class WMSA(nn.Module):
|
11 |
+
""" Self-attention module in Swin Transformer
|
12 |
+
"""
|
13 |
+
|
14 |
+
def __init__(self, input_dim, output_dim, head_dim, window_size, type):
|
15 |
+
super(WMSA, self).__init__()
|
16 |
+
self.input_dim = input_dim
|
17 |
+
self.output_dim = output_dim
|
18 |
+
self.head_dim = head_dim
|
19 |
+
self.scale = self.head_dim ** -0.5
|
20 |
+
self.n_heads = input_dim // head_dim
|
21 |
+
self.window_size = window_size
|
22 |
+
self.type = type
|
23 |
+
self.embedding_layer = nn.Linear(self.input_dim, 3 * self.input_dim, bias=True)
|
24 |
+
|
25 |
+
self.relative_position_params = nn.Parameter(
|
26 |
+
torch.zeros((2 * window_size - 1) * (2 * window_size - 1), self.n_heads))
|
27 |
+
|
28 |
+
self.linear = nn.Linear(self.input_dim, self.output_dim)
|
29 |
+
|
30 |
+
trunc_normal_(self.relative_position_params, std=.02)
|
31 |
+
self.relative_position_params = torch.nn.Parameter(
|
32 |
+
self.relative_position_params.view(2 * window_size - 1, 2 * window_size - 1, self.n_heads).transpose(1,
|
33 |
+
2).transpose(
|
34 |
+
0, 1))
|
35 |
+
|
36 |
+
def generate_mask(self, h, w, p, shift):
|
37 |
+
""" generating the mask of SW-MSA
|
38 |
+
Args:
|
39 |
+
shift: shift parameters in CyclicShift.
|
40 |
+
Returns:
|
41 |
+
attn_mask: should be (1 1 w p p),
|
42 |
+
"""
|
43 |
+
# supporting square.
|
44 |
+
attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device)
|
45 |
+
if self.type == 'W':
|
46 |
+
return attn_mask
|
47 |
+
|
48 |
+
s = p - shift
|
49 |
+
attn_mask[-1, :, :s, :, s:, :] = True
|
50 |
+
attn_mask[-1, :, s:, :, :s, :] = True
|
51 |
+
attn_mask[:, -1, :, :s, :, s:] = True
|
52 |
+
attn_mask[:, -1, :, s:, :, :s] = True
|
53 |
+
attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)')
|
54 |
+
return attn_mask
|
55 |
+
|
56 |
+
def forward(self, x):
|
57 |
+
""" Forward pass of Window Multi-head Self-attention module.
|
58 |
+
Args:
|
59 |
+
x: input tensor with shape of [b h w c];
|
60 |
+
attn_mask: attention mask, fill -inf where the value is True;
|
61 |
+
Returns:
|
62 |
+
output: tensor shape [b h w c]
|
63 |
+
"""
|
64 |
+
if self.type != 'W':
|
65 |
+
x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
|
66 |
+
|
67 |
+
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
|
68 |
+
h_windows = x.size(1)
|
69 |
+
w_windows = x.size(2)
|
70 |
+
# square validation
|
71 |
+
# assert h_windows == w_windows
|
72 |
+
|
73 |
+
x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size)
|
74 |
+
qkv = self.embedding_layer(x)
|
75 |
+
q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0)
|
76 |
+
sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale
|
77 |
+
# Adding learnable relative embedding
|
78 |
+
sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q')
|
79 |
+
# Using Attn Mask to distinguish different subwindows.
|
80 |
+
if self.type != 'W':
|
81 |
+
attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size // 2)
|
82 |
+
sim = sim.masked_fill_(attn_mask, float("-inf"))
|
83 |
+
|
84 |
+
probs = nn.functional.softmax(sim, dim=-1)
|
85 |
+
output = torch.einsum('hbwij,hbwjc->hbwic', probs, v)
|
86 |
+
output = rearrange(output, 'h b w p c -> b w p (h c)')
|
87 |
+
output = self.linear(output)
|
88 |
+
output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
|
89 |
+
|
90 |
+
if self.type != 'W':
|
91 |
+
output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
|
92 |
+
|
93 |
+
return output
|
94 |
+
|
95 |
+
def relative_embedding(self):
|
96 |
+
cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)]))
|
97 |
+
relation = cord[:, None, :] - cord[None, :, :] + self.window_size - 1
|
98 |
+
# negative is allowed
|
99 |
+
return self.relative_position_params[:, relation[:, :, 0].long(), relation[:, :, 1].long()]
|
100 |
+
|
101 |
+
|
102 |
+
class Block(nn.Module):
|
103 |
+
def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
|
104 |
+
""" SwinTransformer Block
|
105 |
+
"""
|
106 |
+
super(Block, self).__init__()
|
107 |
+
self.input_dim = input_dim
|
108 |
+
self.output_dim = output_dim
|
109 |
+
assert type in ['W', 'SW']
|
110 |
+
self.type = type
|
111 |
+
if input_resolution <= window_size:
|
112 |
+
self.type = 'W'
|
113 |
+
|
114 |
+
self.ln1 = nn.LayerNorm(input_dim)
|
115 |
+
self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type)
|
116 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
117 |
+
self.ln2 = nn.LayerNorm(input_dim)
|
118 |
+
self.mlp = nn.Sequential(
|
119 |
+
nn.Linear(input_dim, 4 * input_dim),
|
120 |
+
nn.GELU(),
|
121 |
+
nn.Linear(4 * input_dim, output_dim),
|
122 |
+
)
|
123 |
+
|
124 |
+
def forward(self, x):
|
125 |
+
x = x + self.drop_path(self.msa(self.ln1(x)))
|
126 |
+
x = x + self.drop_path(self.mlp(self.ln2(x)))
|
127 |
+
return x
|
128 |
+
|
129 |
+
|
130 |
+
class ConvTransBlock(nn.Module):
|
131 |
+
def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
|
132 |
+
""" SwinTransformer and Conv Block
|
133 |
+
"""
|
134 |
+
super(ConvTransBlock, self).__init__()
|
135 |
+
self.conv_dim = conv_dim
|
136 |
+
self.trans_dim = trans_dim
|
137 |
+
self.head_dim = head_dim
|
138 |
+
self.window_size = window_size
|
139 |
+
self.drop_path = drop_path
|
140 |
+
self.type = type
|
141 |
+
self.input_resolution = input_resolution
|
142 |
+
|
143 |
+
assert self.type in ['W', 'SW']
|
144 |
+
if self.input_resolution <= self.window_size:
|
145 |
+
self.type = 'W'
|
146 |
+
|
147 |
+
self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path,
|
148 |
+
self.type, self.input_resolution)
|
149 |
+
self.conv1_1 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
|
150 |
+
self.conv1_2 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
|
151 |
+
|
152 |
+
self.conv_block = nn.Sequential(
|
153 |
+
nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False),
|
154 |
+
nn.ReLU(True),
|
155 |
+
nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False)
|
156 |
+
)
|
157 |
+
|
158 |
+
def forward(self, x):
|
159 |
+
conv_x, trans_x = torch.split(self.conv1_1(x), (self.conv_dim, self.trans_dim), dim=1)
|
160 |
+
conv_x = self.conv_block(conv_x) + conv_x
|
161 |
+
trans_x = Rearrange('b c h w -> b h w c')(trans_x)
|
162 |
+
trans_x = self.trans_block(trans_x)
|
163 |
+
trans_x = Rearrange('b h w c -> b c h w')(trans_x)
|
164 |
+
res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1))
|
165 |
+
x = x + res
|
166 |
+
|
167 |
+
return x
|
168 |
+
|
169 |
+
|
170 |
+
class SCUNet(nn.Module):
|
171 |
+
# def __init__(self, in_nc=3, config=[2, 2, 2, 2, 2, 2, 2], dim=64, drop_path_rate=0.0, input_resolution=256):
|
172 |
+
def __init__(self, in_nc=3, config=None, dim=64, drop_path_rate=0.0, input_resolution=256):
|
173 |
+
super(SCUNet, self).__init__()
|
174 |
+
if config is None:
|
175 |
+
config = [2, 2, 2, 2, 2, 2, 2]
|
176 |
+
self.config = config
|
177 |
+
self.dim = dim
|
178 |
+
self.head_dim = 32
|
179 |
+
self.window_size = 8
|
180 |
+
|
181 |
+
# drop path rate for each layer
|
182 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))]
|
183 |
+
|
184 |
+
self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)]
|
185 |
+
|
186 |
+
begin = 0
|
187 |
+
self.m_down1 = [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
|
188 |
+
'W' if not i % 2 else 'SW', input_resolution)
|
189 |
+
for i in range(config[0])] + \
|
190 |
+
[nn.Conv2d(dim, 2 * dim, 2, 2, 0, bias=False)]
|
191 |
+
|
192 |
+
begin += config[0]
|
193 |
+
self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
|
194 |
+
'W' if not i % 2 else 'SW', input_resolution // 2)
|
195 |
+
for i in range(config[1])] + \
|
196 |
+
[nn.Conv2d(2 * dim, 4 * dim, 2, 2, 0, bias=False)]
|
197 |
+
|
198 |
+
begin += config[1]
|
199 |
+
self.m_down3 = [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
|
200 |
+
'W' if not i % 2 else 'SW', input_resolution // 4)
|
201 |
+
for i in range(config[2])] + \
|
202 |
+
[nn.Conv2d(4 * dim, 8 * dim, 2, 2, 0, bias=False)]
|
203 |
+
|
204 |
+
begin += config[2]
|
205 |
+
self.m_body = [ConvTransBlock(4 * dim, 4 * dim, self.head_dim, self.window_size, dpr[i + begin],
|
206 |
+
'W' if not i % 2 else 'SW', input_resolution // 8)
|
207 |
+
for i in range(config[3])]
|
208 |
+
|
209 |
+
begin += config[3]
|
210 |
+
self.m_up3 = [nn.ConvTranspose2d(8 * dim, 4 * dim, 2, 2, 0, bias=False), ] + \
|
211 |
+
[ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
|
212 |
+
'W' if not i % 2 else 'SW', input_resolution // 4)
|
213 |
+
for i in range(config[4])]
|
214 |
+
|
215 |
+
begin += config[4]
|
216 |
+
self.m_up2 = [nn.ConvTranspose2d(4 * dim, 2 * dim, 2, 2, 0, bias=False), ] + \
|
217 |
+
[ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
|
218 |
+
'W' if not i % 2 else 'SW', input_resolution // 2)
|
219 |
+
for i in range(config[5])]
|
220 |
+
|
221 |
+
begin += config[5]
|
222 |
+
self.m_up1 = [nn.ConvTranspose2d(2 * dim, dim, 2, 2, 0, bias=False), ] + \
|
223 |
+
[ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
|
224 |
+
'W' if not i % 2 else 'SW', input_resolution)
|
225 |
+
for i in range(config[6])]
|
226 |
+
|
227 |
+
self.m_tail = [nn.Conv2d(dim, in_nc, 3, 1, 1, bias=False)]
|
228 |
+
|
229 |
+
self.m_head = nn.Sequential(*self.m_head)
|
230 |
+
self.m_down1 = nn.Sequential(*self.m_down1)
|
231 |
+
self.m_down2 = nn.Sequential(*self.m_down2)
|
232 |
+
self.m_down3 = nn.Sequential(*self.m_down3)
|
233 |
+
self.m_body = nn.Sequential(*self.m_body)
|
234 |
+
self.m_up3 = nn.Sequential(*self.m_up3)
|
235 |
+
self.m_up2 = nn.Sequential(*self.m_up2)
|
236 |
+
self.m_up1 = nn.Sequential(*self.m_up1)
|
237 |
+
self.m_tail = nn.Sequential(*self.m_tail)
|
238 |
+
# self.apply(self._init_weights)
|
239 |
+
|
240 |
+
def forward(self, x0):
|
241 |
+
|
242 |
+
h, w = x0.size()[-2:]
|
243 |
+
paddingBottom = int(np.ceil(h / 64) * 64 - h)
|
244 |
+
paddingRight = int(np.ceil(w / 64) * 64 - w)
|
245 |
+
x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0)
|
246 |
+
|
247 |
+
x1 = self.m_head(x0)
|
248 |
+
x2 = self.m_down1(x1)
|
249 |
+
x3 = self.m_down2(x2)
|
250 |
+
x4 = self.m_down3(x3)
|
251 |
+
x = self.m_body(x4)
|
252 |
+
x = self.m_up3(x + x4)
|
253 |
+
x = self.m_up2(x + x3)
|
254 |
+
x = self.m_up1(x + x2)
|
255 |
+
x = self.m_tail(x + x1)
|
256 |
+
|
257 |
+
x = x[..., :h, :w]
|
258 |
+
|
259 |
+
return x
|
260 |
+
|
261 |
+
def _init_weights(self, m):
|
262 |
+
if isinstance(m, nn.Linear):
|
263 |
+
trunc_normal_(m.weight, std=.02)
|
264 |
+
if m.bias is not None:
|
265 |
+
nn.init.constant_(m.bias, 0)
|
266 |
+
elif isinstance(m, nn.LayerNorm):
|
267 |
+
nn.init.constant_(m.bias, 0)
|
268 |
+
nn.init.constant_(m.weight, 1.0)
|
extensions-builtin/SwinIR/preload.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(paths.models_path, 'SwinIR'))
|
extensions-builtin/SwinIR/scripts/swinir_model.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import platform
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
from PIL import Image
|
7 |
+
from tqdm import tqdm
|
8 |
+
|
9 |
+
from modules import modelloader, devices, script_callbacks, shared
|
10 |
+
from modules.shared import opts, state
|
11 |
+
from swinir_model_arch import SwinIR
|
12 |
+
from swinir_model_arch_v2 import Swin2SR
|
13 |
+
from modules.upscaler import Upscaler, UpscalerData
|
14 |
+
|
15 |
+
SWINIR_MODEL_URL = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR-L_x4_GAN.pth"
|
16 |
+
|
17 |
+
device_swinir = devices.get_device_for('swinir')
|
18 |
+
|
19 |
+
|
20 |
+
class UpscalerSwinIR(Upscaler):
|
21 |
+
def __init__(self, dirname):
|
22 |
+
self._cached_model = None # keep the model when SWIN_torch_compile is on to prevent re-compile every runs
|
23 |
+
self._cached_model_config = None # to clear '_cached_model' when changing model (v1/v2) or settings
|
24 |
+
self.name = "SwinIR"
|
25 |
+
self.model_url = SWINIR_MODEL_URL
|
26 |
+
self.model_name = "SwinIR 4x"
|
27 |
+
self.user_path = dirname
|
28 |
+
super().__init__()
|
29 |
+
scalers = []
|
30 |
+
model_files = self.find_models(ext_filter=[".pt", ".pth"])
|
31 |
+
for model in model_files:
|
32 |
+
if model.startswith("http"):
|
33 |
+
name = self.model_name
|
34 |
+
else:
|
35 |
+
name = modelloader.friendly_name(model)
|
36 |
+
model_data = UpscalerData(name, model, self)
|
37 |
+
scalers.append(model_data)
|
38 |
+
self.scalers = scalers
|
39 |
+
|
40 |
+
def do_upscale(self, img, model_file):
|
41 |
+
use_compile = hasattr(opts, 'SWIN_torch_compile') and opts.SWIN_torch_compile \
|
42 |
+
and int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows"
|
43 |
+
current_config = (model_file, opts.SWIN_tile)
|
44 |
+
|
45 |
+
if use_compile and self._cached_model_config == current_config:
|
46 |
+
model = self._cached_model
|
47 |
+
else:
|
48 |
+
self._cached_model = None
|
49 |
+
try:
|
50 |
+
model = self.load_model(model_file)
|
51 |
+
except Exception as e:
|
52 |
+
print(f"Failed loading SwinIR model {model_file}: {e}", file=sys.stderr)
|
53 |
+
return img
|
54 |
+
model = model.to(device_swinir, dtype=devices.dtype)
|
55 |
+
if use_compile:
|
56 |
+
model = torch.compile(model)
|
57 |
+
self._cached_model = model
|
58 |
+
self._cached_model_config = current_config
|
59 |
+
img = upscale(img, model)
|
60 |
+
devices.torch_gc()
|
61 |
+
return img
|
62 |
+
|
63 |
+
def load_model(self, path, scale=4):
|
64 |
+
if path.startswith("http"):
|
65 |
+
filename = modelloader.load_file_from_url(
|
66 |
+
url=path,
|
67 |
+
model_dir=self.model_download_path,
|
68 |
+
file_name=f"{self.model_name.replace(' ', '_')}.pth",
|
69 |
+
)
|
70 |
+
else:
|
71 |
+
filename = path
|
72 |
+
if filename.endswith(".v2.pth"):
|
73 |
+
model = Swin2SR(
|
74 |
+
upscale=scale,
|
75 |
+
in_chans=3,
|
76 |
+
img_size=64,
|
77 |
+
window_size=8,
|
78 |
+
img_range=1.0,
|
79 |
+
depths=[6, 6, 6, 6, 6, 6],
|
80 |
+
embed_dim=180,
|
81 |
+
num_heads=[6, 6, 6, 6, 6, 6],
|
82 |
+
mlp_ratio=2,
|
83 |
+
upsampler="nearest+conv",
|
84 |
+
resi_connection="1conv",
|
85 |
+
)
|
86 |
+
params = None
|
87 |
+
else:
|
88 |
+
model = SwinIR(
|
89 |
+
upscale=scale,
|
90 |
+
in_chans=3,
|
91 |
+
img_size=64,
|
92 |
+
window_size=8,
|
93 |
+
img_range=1.0,
|
94 |
+
depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
|
95 |
+
embed_dim=240,
|
96 |
+
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
|
97 |
+
mlp_ratio=2,
|
98 |
+
upsampler="nearest+conv",
|
99 |
+
resi_connection="3conv",
|
100 |
+
)
|
101 |
+
params = "params_ema"
|
102 |
+
|
103 |
+
pretrained_model = torch.load(filename)
|
104 |
+
if params is not None:
|
105 |
+
model.load_state_dict(pretrained_model[params], strict=True)
|
106 |
+
else:
|
107 |
+
model.load_state_dict(pretrained_model, strict=True)
|
108 |
+
return model
|
109 |
+
|
110 |
+
|
111 |
+
def upscale(
|
112 |
+
img,
|
113 |
+
model,
|
114 |
+
tile=None,
|
115 |
+
tile_overlap=None,
|
116 |
+
window_size=8,
|
117 |
+
scale=4,
|
118 |
+
):
|
119 |
+
tile = tile or opts.SWIN_tile
|
120 |
+
tile_overlap = tile_overlap or opts.SWIN_tile_overlap
|
121 |
+
|
122 |
+
|
123 |
+
img = np.array(img)
|
124 |
+
img = img[:, :, ::-1]
|
125 |
+
img = np.moveaxis(img, 2, 0) / 255
|
126 |
+
img = torch.from_numpy(img).float()
|
127 |
+
img = img.unsqueeze(0).to(device_swinir, dtype=devices.dtype)
|
128 |
+
with torch.no_grad(), devices.autocast():
|
129 |
+
_, _, h_old, w_old = img.size()
|
130 |
+
h_pad = (h_old // window_size + 1) * window_size - h_old
|
131 |
+
w_pad = (w_old // window_size + 1) * window_size - w_old
|
132 |
+
img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :]
|
133 |
+
img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad]
|
134 |
+
output = inference(img, model, tile, tile_overlap, window_size, scale)
|
135 |
+
output = output[..., : h_old * scale, : w_old * scale]
|
136 |
+
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
|
137 |
+
if output.ndim == 3:
|
138 |
+
output = np.transpose(
|
139 |
+
output[[2, 1, 0], :, :], (1, 2, 0)
|
140 |
+
) # CHW-RGB to HCW-BGR
|
141 |
+
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
|
142 |
+
return Image.fromarray(output, "RGB")
|
143 |
+
|
144 |
+
|
145 |
+
def inference(img, model, tile, tile_overlap, window_size, scale):
|
146 |
+
# test the image tile by tile
|
147 |
+
b, c, h, w = img.size()
|
148 |
+
tile = min(tile, h, w)
|
149 |
+
assert tile % window_size == 0, "tile size should be a multiple of window_size"
|
150 |
+
sf = scale
|
151 |
+
|
152 |
+
stride = tile - tile_overlap
|
153 |
+
h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
|
154 |
+
w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
|
155 |
+
E = torch.zeros(b, c, h * sf, w * sf, dtype=devices.dtype, device=device_swinir).type_as(img)
|
156 |
+
W = torch.zeros_like(E, dtype=devices.dtype, device=device_swinir)
|
157 |
+
|
158 |
+
with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
|
159 |
+
for h_idx in h_idx_list:
|
160 |
+
if state.interrupted or state.skipped:
|
161 |
+
break
|
162 |
+
|
163 |
+
for w_idx in w_idx_list:
|
164 |
+
if state.interrupted or state.skipped:
|
165 |
+
break
|
166 |
+
|
167 |
+
in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
|
168 |
+
out_patch = model(in_patch)
|
169 |
+
out_patch_mask = torch.ones_like(out_patch)
|
170 |
+
|
171 |
+
E[
|
172 |
+
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
173 |
+
].add_(out_patch)
|
174 |
+
W[
|
175 |
+
..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
|
176 |
+
].add_(out_patch_mask)
|
177 |
+
pbar.update(1)
|
178 |
+
output = E.div_(W)
|
179 |
+
|
180 |
+
return output
|
181 |
+
|
182 |
+
|
183 |
+
def on_ui_settings():
|
184 |
+
import gradio as gr
|
185 |
+
|
186 |
+
shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")))
|
187 |
+
shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling")))
|
188 |
+
if int(torch.__version__.split('.')[0]) >= 2 and platform.system() != "Windows": # torch.compile() require pytorch 2.0 or above, and not on Windows
|
189 |
+
shared.opts.add_option("SWIN_torch_compile", shared.OptionInfo(False, "Use torch.compile to accelerate SwinIR.", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")).info("Takes longer on first run"))
|
190 |
+
|
191 |
+
|
192 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
extensions-builtin/SwinIR/swinir_model_arch.py
ADDED
@@ -0,0 +1,867 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -----------------------------------------------------------------------------------
|
2 |
+
# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
|
3 |
+
# Originally Written by Ze Liu, Modified by Jingyun Liang.
|
4 |
+
# -----------------------------------------------------------------------------------
|
5 |
+
|
6 |
+
import math
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import torch.utils.checkpoint as checkpoint
|
11 |
+
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
12 |
+
|
13 |
+
|
14 |
+
class Mlp(nn.Module):
|
15 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
16 |
+
super().__init__()
|
17 |
+
out_features = out_features or in_features
|
18 |
+
hidden_features = hidden_features or in_features
|
19 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
20 |
+
self.act = act_layer()
|
21 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
22 |
+
self.drop = nn.Dropout(drop)
|
23 |
+
|
24 |
+
def forward(self, x):
|
25 |
+
x = self.fc1(x)
|
26 |
+
x = self.act(x)
|
27 |
+
x = self.drop(x)
|
28 |
+
x = self.fc2(x)
|
29 |
+
x = self.drop(x)
|
30 |
+
return x
|
31 |
+
|
32 |
+
|
33 |
+
def window_partition(x, window_size):
|
34 |
+
"""
|
35 |
+
Args:
|
36 |
+
x: (B, H, W, C)
|
37 |
+
window_size (int): window size
|
38 |
+
|
39 |
+
Returns:
|
40 |
+
windows: (num_windows*B, window_size, window_size, C)
|
41 |
+
"""
|
42 |
+
B, H, W, C = x.shape
|
43 |
+
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
44 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
45 |
+
return windows
|
46 |
+
|
47 |
+
|
48 |
+
def window_reverse(windows, window_size, H, W):
|
49 |
+
"""
|
50 |
+
Args:
|
51 |
+
windows: (num_windows*B, window_size, window_size, C)
|
52 |
+
window_size (int): Window size
|
53 |
+
H (int): Height of image
|
54 |
+
W (int): Width of image
|
55 |
+
|
56 |
+
Returns:
|
57 |
+
x: (B, H, W, C)
|
58 |
+
"""
|
59 |
+
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
60 |
+
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
61 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
62 |
+
return x
|
63 |
+
|
64 |
+
|
65 |
+
class WindowAttention(nn.Module):
|
66 |
+
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
67 |
+
It supports both of shifted and non-shifted window.
|
68 |
+
|
69 |
+
Args:
|
70 |
+
dim (int): Number of input channels.
|
71 |
+
window_size (tuple[int]): The height and width of the window.
|
72 |
+
num_heads (int): Number of attention heads.
|
73 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
74 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
75 |
+
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
76 |
+
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
77 |
+
"""
|
78 |
+
|
79 |
+
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
|
80 |
+
|
81 |
+
super().__init__()
|
82 |
+
self.dim = dim
|
83 |
+
self.window_size = window_size # Wh, Ww
|
84 |
+
self.num_heads = num_heads
|
85 |
+
head_dim = dim // num_heads
|
86 |
+
self.scale = qk_scale or head_dim ** -0.5
|
87 |
+
|
88 |
+
# define a parameter table of relative position bias
|
89 |
+
self.relative_position_bias_table = nn.Parameter(
|
90 |
+
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
|
91 |
+
|
92 |
+
# get pair-wise relative position index for each token inside the window
|
93 |
+
coords_h = torch.arange(self.window_size[0])
|
94 |
+
coords_w = torch.arange(self.window_size[1])
|
95 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
96 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
97 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
98 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
99 |
+
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
100 |
+
relative_coords[:, :, 1] += self.window_size[1] - 1
|
101 |
+
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
102 |
+
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
103 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
104 |
+
|
105 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
106 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
107 |
+
self.proj = nn.Linear(dim, dim)
|
108 |
+
|
109 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
110 |
+
|
111 |
+
trunc_normal_(self.relative_position_bias_table, std=.02)
|
112 |
+
self.softmax = nn.Softmax(dim=-1)
|
113 |
+
|
114 |
+
def forward(self, x, mask=None):
|
115 |
+
"""
|
116 |
+
Args:
|
117 |
+
x: input features with shape of (num_windows*B, N, C)
|
118 |
+
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
119 |
+
"""
|
120 |
+
B_, N, C = x.shape
|
121 |
+
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
122 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
123 |
+
|
124 |
+
q = q * self.scale
|
125 |
+
attn = (q @ k.transpose(-2, -1))
|
126 |
+
|
127 |
+
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
128 |
+
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
129 |
+
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
130 |
+
attn = attn + relative_position_bias.unsqueeze(0)
|
131 |
+
|
132 |
+
if mask is not None:
|
133 |
+
nW = mask.shape[0]
|
134 |
+
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
135 |
+
attn = attn.view(-1, self.num_heads, N, N)
|
136 |
+
attn = self.softmax(attn)
|
137 |
+
else:
|
138 |
+
attn = self.softmax(attn)
|
139 |
+
|
140 |
+
attn = self.attn_drop(attn)
|
141 |
+
|
142 |
+
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
143 |
+
x = self.proj(x)
|
144 |
+
x = self.proj_drop(x)
|
145 |
+
return x
|
146 |
+
|
147 |
+
def extra_repr(self) -> str:
|
148 |
+
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
|
149 |
+
|
150 |
+
def flops(self, N):
|
151 |
+
# calculate flops for 1 window with token length of N
|
152 |
+
flops = 0
|
153 |
+
# qkv = self.qkv(x)
|
154 |
+
flops += N * self.dim * 3 * self.dim
|
155 |
+
# attn = (q @ k.transpose(-2, -1))
|
156 |
+
flops += self.num_heads * N * (self.dim // self.num_heads) * N
|
157 |
+
# x = (attn @ v)
|
158 |
+
flops += self.num_heads * N * N * (self.dim // self.num_heads)
|
159 |
+
# x = self.proj(x)
|
160 |
+
flops += N * self.dim * self.dim
|
161 |
+
return flops
|
162 |
+
|
163 |
+
|
164 |
+
class SwinTransformerBlock(nn.Module):
|
165 |
+
r""" Swin Transformer Block.
|
166 |
+
|
167 |
+
Args:
|
168 |
+
dim (int): Number of input channels.
|
169 |
+
input_resolution (tuple[int]): Input resolution.
|
170 |
+
num_heads (int): Number of attention heads.
|
171 |
+
window_size (int): Window size.
|
172 |
+
shift_size (int): Shift size for SW-MSA.
|
173 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
174 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
175 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
176 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
177 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
178 |
+
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
179 |
+
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
180 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
181 |
+
"""
|
182 |
+
|
183 |
+
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
|
184 |
+
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
|
185 |
+
act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
186 |
+
super().__init__()
|
187 |
+
self.dim = dim
|
188 |
+
self.input_resolution = input_resolution
|
189 |
+
self.num_heads = num_heads
|
190 |
+
self.window_size = window_size
|
191 |
+
self.shift_size = shift_size
|
192 |
+
self.mlp_ratio = mlp_ratio
|
193 |
+
if min(self.input_resolution) <= self.window_size:
|
194 |
+
# if window size is larger than input resolution, we don't partition windows
|
195 |
+
self.shift_size = 0
|
196 |
+
self.window_size = min(self.input_resolution)
|
197 |
+
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
198 |
+
|
199 |
+
self.norm1 = norm_layer(dim)
|
200 |
+
self.attn = WindowAttention(
|
201 |
+
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
202 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
203 |
+
|
204 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
205 |
+
self.norm2 = norm_layer(dim)
|
206 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
207 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
208 |
+
|
209 |
+
if self.shift_size > 0:
|
210 |
+
attn_mask = self.calculate_mask(self.input_resolution)
|
211 |
+
else:
|
212 |
+
attn_mask = None
|
213 |
+
|
214 |
+
self.register_buffer("attn_mask", attn_mask)
|
215 |
+
|
216 |
+
def calculate_mask(self, x_size):
|
217 |
+
# calculate attention mask for SW-MSA
|
218 |
+
H, W = x_size
|
219 |
+
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
220 |
+
h_slices = (slice(0, -self.window_size),
|
221 |
+
slice(-self.window_size, -self.shift_size),
|
222 |
+
slice(-self.shift_size, None))
|
223 |
+
w_slices = (slice(0, -self.window_size),
|
224 |
+
slice(-self.window_size, -self.shift_size),
|
225 |
+
slice(-self.shift_size, None))
|
226 |
+
cnt = 0
|
227 |
+
for h in h_slices:
|
228 |
+
for w in w_slices:
|
229 |
+
img_mask[:, h, w, :] = cnt
|
230 |
+
cnt += 1
|
231 |
+
|
232 |
+
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
233 |
+
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
234 |
+
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
235 |
+
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
236 |
+
|
237 |
+
return attn_mask
|
238 |
+
|
239 |
+
def forward(self, x, x_size):
|
240 |
+
H, W = x_size
|
241 |
+
B, L, C = x.shape
|
242 |
+
# assert L == H * W, "input feature has wrong size"
|
243 |
+
|
244 |
+
shortcut = x
|
245 |
+
x = self.norm1(x)
|
246 |
+
x = x.view(B, H, W, C)
|
247 |
+
|
248 |
+
# cyclic shift
|
249 |
+
if self.shift_size > 0:
|
250 |
+
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
251 |
+
else:
|
252 |
+
shifted_x = x
|
253 |
+
|
254 |
+
# partition windows
|
255 |
+
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
256 |
+
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
257 |
+
|
258 |
+
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
|
259 |
+
if self.input_resolution == x_size:
|
260 |
+
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
|
261 |
+
else:
|
262 |
+
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
|
263 |
+
|
264 |
+
# merge windows
|
265 |
+
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
266 |
+
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
267 |
+
|
268 |
+
# reverse cyclic shift
|
269 |
+
if self.shift_size > 0:
|
270 |
+
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
271 |
+
else:
|
272 |
+
x = shifted_x
|
273 |
+
x = x.view(B, H * W, C)
|
274 |
+
|
275 |
+
# FFN
|
276 |
+
x = shortcut + self.drop_path(x)
|
277 |
+
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
278 |
+
|
279 |
+
return x
|
280 |
+
|
281 |
+
def extra_repr(self) -> str:
|
282 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
283 |
+
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
284 |
+
|
285 |
+
def flops(self):
|
286 |
+
flops = 0
|
287 |
+
H, W = self.input_resolution
|
288 |
+
# norm1
|
289 |
+
flops += self.dim * H * W
|
290 |
+
# W-MSA/SW-MSA
|
291 |
+
nW = H * W / self.window_size / self.window_size
|
292 |
+
flops += nW * self.attn.flops(self.window_size * self.window_size)
|
293 |
+
# mlp
|
294 |
+
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
|
295 |
+
# norm2
|
296 |
+
flops += self.dim * H * W
|
297 |
+
return flops
|
298 |
+
|
299 |
+
|
300 |
+
class PatchMerging(nn.Module):
|
301 |
+
r""" Patch Merging Layer.
|
302 |
+
|
303 |
+
Args:
|
304 |
+
input_resolution (tuple[int]): Resolution of input feature.
|
305 |
+
dim (int): Number of input channels.
|
306 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
307 |
+
"""
|
308 |
+
|
309 |
+
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
310 |
+
super().__init__()
|
311 |
+
self.input_resolution = input_resolution
|
312 |
+
self.dim = dim
|
313 |
+
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
314 |
+
self.norm = norm_layer(4 * dim)
|
315 |
+
|
316 |
+
def forward(self, x):
|
317 |
+
"""
|
318 |
+
x: B, H*W, C
|
319 |
+
"""
|
320 |
+
H, W = self.input_resolution
|
321 |
+
B, L, C = x.shape
|
322 |
+
assert L == H * W, "input feature has wrong size"
|
323 |
+
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
|
324 |
+
|
325 |
+
x = x.view(B, H, W, C)
|
326 |
+
|
327 |
+
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
328 |
+
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
329 |
+
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
330 |
+
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
331 |
+
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
332 |
+
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
333 |
+
|
334 |
+
x = self.norm(x)
|
335 |
+
x = self.reduction(x)
|
336 |
+
|
337 |
+
return x
|
338 |
+
|
339 |
+
def extra_repr(self) -> str:
|
340 |
+
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
341 |
+
|
342 |
+
def flops(self):
|
343 |
+
H, W = self.input_resolution
|
344 |
+
flops = H * W * self.dim
|
345 |
+
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
|
346 |
+
return flops
|
347 |
+
|
348 |
+
|
349 |
+
class BasicLayer(nn.Module):
|
350 |
+
""" A basic Swin Transformer layer for one stage.
|
351 |
+
|
352 |
+
Args:
|
353 |
+
dim (int): Number of input channels.
|
354 |
+
input_resolution (tuple[int]): Input resolution.
|
355 |
+
depth (int): Number of blocks.
|
356 |
+
num_heads (int): Number of attention heads.
|
357 |
+
window_size (int): Local window size.
|
358 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
359 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
360 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
361 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
362 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
363 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
364 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
365 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
366 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
367 |
+
"""
|
368 |
+
|
369 |
+
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
370 |
+
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
371 |
+
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
|
372 |
+
|
373 |
+
super().__init__()
|
374 |
+
self.dim = dim
|
375 |
+
self.input_resolution = input_resolution
|
376 |
+
self.depth = depth
|
377 |
+
self.use_checkpoint = use_checkpoint
|
378 |
+
|
379 |
+
# build blocks
|
380 |
+
self.blocks = nn.ModuleList([
|
381 |
+
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
|
382 |
+
num_heads=num_heads, window_size=window_size,
|
383 |
+
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
384 |
+
mlp_ratio=mlp_ratio,
|
385 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
386 |
+
drop=drop, attn_drop=attn_drop,
|
387 |
+
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
388 |
+
norm_layer=norm_layer)
|
389 |
+
for i in range(depth)])
|
390 |
+
|
391 |
+
# patch merging layer
|
392 |
+
if downsample is not None:
|
393 |
+
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
394 |
+
else:
|
395 |
+
self.downsample = None
|
396 |
+
|
397 |
+
def forward(self, x, x_size):
|
398 |
+
for blk in self.blocks:
|
399 |
+
if self.use_checkpoint:
|
400 |
+
x = checkpoint.checkpoint(blk, x, x_size)
|
401 |
+
else:
|
402 |
+
x = blk(x, x_size)
|
403 |
+
if self.downsample is not None:
|
404 |
+
x = self.downsample(x)
|
405 |
+
return x
|
406 |
+
|
407 |
+
def extra_repr(self) -> str:
|
408 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
409 |
+
|
410 |
+
def flops(self):
|
411 |
+
flops = 0
|
412 |
+
for blk in self.blocks:
|
413 |
+
flops += blk.flops()
|
414 |
+
if self.downsample is not None:
|
415 |
+
flops += self.downsample.flops()
|
416 |
+
return flops
|
417 |
+
|
418 |
+
|
419 |
+
class RSTB(nn.Module):
|
420 |
+
"""Residual Swin Transformer Block (RSTB).
|
421 |
+
|
422 |
+
Args:
|
423 |
+
dim (int): Number of input channels.
|
424 |
+
input_resolution (tuple[int]): Input resolution.
|
425 |
+
depth (int): Number of blocks.
|
426 |
+
num_heads (int): Number of attention heads.
|
427 |
+
window_size (int): Local window size.
|
428 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
429 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
430 |
+
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
431 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
432 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
433 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
434 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
435 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
436 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
437 |
+
img_size: Input image size.
|
438 |
+
patch_size: Patch size.
|
439 |
+
resi_connection: The convolutional block before residual connection.
|
440 |
+
"""
|
441 |
+
|
442 |
+
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
443 |
+
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
|
444 |
+
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
|
445 |
+
img_size=224, patch_size=4, resi_connection='1conv'):
|
446 |
+
super(RSTB, self).__init__()
|
447 |
+
|
448 |
+
self.dim = dim
|
449 |
+
self.input_resolution = input_resolution
|
450 |
+
|
451 |
+
self.residual_group = BasicLayer(dim=dim,
|
452 |
+
input_resolution=input_resolution,
|
453 |
+
depth=depth,
|
454 |
+
num_heads=num_heads,
|
455 |
+
window_size=window_size,
|
456 |
+
mlp_ratio=mlp_ratio,
|
457 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
458 |
+
drop=drop, attn_drop=attn_drop,
|
459 |
+
drop_path=drop_path,
|
460 |
+
norm_layer=norm_layer,
|
461 |
+
downsample=downsample,
|
462 |
+
use_checkpoint=use_checkpoint)
|
463 |
+
|
464 |
+
if resi_connection == '1conv':
|
465 |
+
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
|
466 |
+
elif resi_connection == '3conv':
|
467 |
+
# to save parameters and memory
|
468 |
+
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
469 |
+
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
|
470 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
471 |
+
nn.Conv2d(dim // 4, dim, 3, 1, 1))
|
472 |
+
|
473 |
+
self.patch_embed = PatchEmbed(
|
474 |
+
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
|
475 |
+
norm_layer=None)
|
476 |
+
|
477 |
+
self.patch_unembed = PatchUnEmbed(
|
478 |
+
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
|
479 |
+
norm_layer=None)
|
480 |
+
|
481 |
+
def forward(self, x, x_size):
|
482 |
+
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
|
483 |
+
|
484 |
+
def flops(self):
|
485 |
+
flops = 0
|
486 |
+
flops += self.residual_group.flops()
|
487 |
+
H, W = self.input_resolution
|
488 |
+
flops += H * W * self.dim * self.dim * 9
|
489 |
+
flops += self.patch_embed.flops()
|
490 |
+
flops += self.patch_unembed.flops()
|
491 |
+
|
492 |
+
return flops
|
493 |
+
|
494 |
+
|
495 |
+
class PatchEmbed(nn.Module):
|
496 |
+
r""" Image to Patch Embedding
|
497 |
+
|
498 |
+
Args:
|
499 |
+
img_size (int): Image size. Default: 224.
|
500 |
+
patch_size (int): Patch token size. Default: 4.
|
501 |
+
in_chans (int): Number of input image channels. Default: 3.
|
502 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
503 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
504 |
+
"""
|
505 |
+
|
506 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
507 |
+
super().__init__()
|
508 |
+
img_size = to_2tuple(img_size)
|
509 |
+
patch_size = to_2tuple(patch_size)
|
510 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
511 |
+
self.img_size = img_size
|
512 |
+
self.patch_size = patch_size
|
513 |
+
self.patches_resolution = patches_resolution
|
514 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
515 |
+
|
516 |
+
self.in_chans = in_chans
|
517 |
+
self.embed_dim = embed_dim
|
518 |
+
|
519 |
+
if norm_layer is not None:
|
520 |
+
self.norm = norm_layer(embed_dim)
|
521 |
+
else:
|
522 |
+
self.norm = None
|
523 |
+
|
524 |
+
def forward(self, x):
|
525 |
+
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
|
526 |
+
if self.norm is not None:
|
527 |
+
x = self.norm(x)
|
528 |
+
return x
|
529 |
+
|
530 |
+
def flops(self):
|
531 |
+
flops = 0
|
532 |
+
H, W = self.img_size
|
533 |
+
if self.norm is not None:
|
534 |
+
flops += H * W * self.embed_dim
|
535 |
+
return flops
|
536 |
+
|
537 |
+
|
538 |
+
class PatchUnEmbed(nn.Module):
|
539 |
+
r""" Image to Patch Unembedding
|
540 |
+
|
541 |
+
Args:
|
542 |
+
img_size (int): Image size. Default: 224.
|
543 |
+
patch_size (int): Patch token size. Default: 4.
|
544 |
+
in_chans (int): Number of input image channels. Default: 3.
|
545 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
546 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
547 |
+
"""
|
548 |
+
|
549 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
550 |
+
super().__init__()
|
551 |
+
img_size = to_2tuple(img_size)
|
552 |
+
patch_size = to_2tuple(patch_size)
|
553 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
554 |
+
self.img_size = img_size
|
555 |
+
self.patch_size = patch_size
|
556 |
+
self.patches_resolution = patches_resolution
|
557 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
558 |
+
|
559 |
+
self.in_chans = in_chans
|
560 |
+
self.embed_dim = embed_dim
|
561 |
+
|
562 |
+
def forward(self, x, x_size):
|
563 |
+
B, HW, C = x.shape
|
564 |
+
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
|
565 |
+
return x
|
566 |
+
|
567 |
+
def flops(self):
|
568 |
+
flops = 0
|
569 |
+
return flops
|
570 |
+
|
571 |
+
|
572 |
+
class Upsample(nn.Sequential):
|
573 |
+
"""Upsample module.
|
574 |
+
|
575 |
+
Args:
|
576 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
577 |
+
num_feat (int): Channel number of intermediate features.
|
578 |
+
"""
|
579 |
+
|
580 |
+
def __init__(self, scale, num_feat):
|
581 |
+
m = []
|
582 |
+
if (scale & (scale - 1)) == 0: # scale = 2^n
|
583 |
+
for _ in range(int(math.log(scale, 2))):
|
584 |
+
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
|
585 |
+
m.append(nn.PixelShuffle(2))
|
586 |
+
elif scale == 3:
|
587 |
+
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
|
588 |
+
m.append(nn.PixelShuffle(3))
|
589 |
+
else:
|
590 |
+
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
|
591 |
+
super(Upsample, self).__init__(*m)
|
592 |
+
|
593 |
+
|
594 |
+
class UpsampleOneStep(nn.Sequential):
|
595 |
+
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
|
596 |
+
Used in lightweight SR to save parameters.
|
597 |
+
|
598 |
+
Args:
|
599 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
600 |
+
num_feat (int): Channel number of intermediate features.
|
601 |
+
|
602 |
+
"""
|
603 |
+
|
604 |
+
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
|
605 |
+
self.num_feat = num_feat
|
606 |
+
self.input_resolution = input_resolution
|
607 |
+
m = []
|
608 |
+
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
|
609 |
+
m.append(nn.PixelShuffle(scale))
|
610 |
+
super(UpsampleOneStep, self).__init__(*m)
|
611 |
+
|
612 |
+
def flops(self):
|
613 |
+
H, W = self.input_resolution
|
614 |
+
flops = H * W * self.num_feat * 3 * 9
|
615 |
+
return flops
|
616 |
+
|
617 |
+
|
618 |
+
class SwinIR(nn.Module):
|
619 |
+
r""" SwinIR
|
620 |
+
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
|
621 |
+
|
622 |
+
Args:
|
623 |
+
img_size (int | tuple(int)): Input image size. Default 64
|
624 |
+
patch_size (int | tuple(int)): Patch size. Default: 1
|
625 |
+
in_chans (int): Number of input image channels. Default: 3
|
626 |
+
embed_dim (int): Patch embedding dimension. Default: 96
|
627 |
+
depths (tuple(int)): Depth of each Swin Transformer layer.
|
628 |
+
num_heads (tuple(int)): Number of attention heads in different layers.
|
629 |
+
window_size (int): Window size. Default: 7
|
630 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
631 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
632 |
+
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
633 |
+
drop_rate (float): Dropout rate. Default: 0
|
634 |
+
attn_drop_rate (float): Attention dropout rate. Default: 0
|
635 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
636 |
+
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
637 |
+
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
638 |
+
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
639 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
640 |
+
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
|
641 |
+
img_range: Image range. 1. or 255.
|
642 |
+
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
|
643 |
+
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
|
644 |
+
"""
|
645 |
+
|
646 |
+
def __init__(self, img_size=64, patch_size=1, in_chans=3,
|
647 |
+
embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
|
648 |
+
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
|
649 |
+
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
650 |
+
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
|
651 |
+
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
|
652 |
+
**kwargs):
|
653 |
+
super(SwinIR, self).__init__()
|
654 |
+
num_in_ch = in_chans
|
655 |
+
num_out_ch = in_chans
|
656 |
+
num_feat = 64
|
657 |
+
self.img_range = img_range
|
658 |
+
if in_chans == 3:
|
659 |
+
rgb_mean = (0.4488, 0.4371, 0.4040)
|
660 |
+
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
|
661 |
+
else:
|
662 |
+
self.mean = torch.zeros(1, 1, 1, 1)
|
663 |
+
self.upscale = upscale
|
664 |
+
self.upsampler = upsampler
|
665 |
+
self.window_size = window_size
|
666 |
+
|
667 |
+
#####################################################################################################
|
668 |
+
################################### 1, shallow feature extraction ###################################
|
669 |
+
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
|
670 |
+
|
671 |
+
#####################################################################################################
|
672 |
+
################################### 2, deep feature extraction ######################################
|
673 |
+
self.num_layers = len(depths)
|
674 |
+
self.embed_dim = embed_dim
|
675 |
+
self.ape = ape
|
676 |
+
self.patch_norm = patch_norm
|
677 |
+
self.num_features = embed_dim
|
678 |
+
self.mlp_ratio = mlp_ratio
|
679 |
+
|
680 |
+
# split image into non-overlapping patches
|
681 |
+
self.patch_embed = PatchEmbed(
|
682 |
+
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
683 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
684 |
+
num_patches = self.patch_embed.num_patches
|
685 |
+
patches_resolution = self.patch_embed.patches_resolution
|
686 |
+
self.patches_resolution = patches_resolution
|
687 |
+
|
688 |
+
# merge non-overlapping patches into image
|
689 |
+
self.patch_unembed = PatchUnEmbed(
|
690 |
+
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
691 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
692 |
+
|
693 |
+
# absolute position embedding
|
694 |
+
if self.ape:
|
695 |
+
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
696 |
+
trunc_normal_(self.absolute_pos_embed, std=.02)
|
697 |
+
|
698 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
699 |
+
|
700 |
+
# stochastic depth
|
701 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
702 |
+
|
703 |
+
# build Residual Swin Transformer blocks (RSTB)
|
704 |
+
self.layers = nn.ModuleList()
|
705 |
+
for i_layer in range(self.num_layers):
|
706 |
+
layer = RSTB(dim=embed_dim,
|
707 |
+
input_resolution=(patches_resolution[0],
|
708 |
+
patches_resolution[1]),
|
709 |
+
depth=depths[i_layer],
|
710 |
+
num_heads=num_heads[i_layer],
|
711 |
+
window_size=window_size,
|
712 |
+
mlp_ratio=self.mlp_ratio,
|
713 |
+
qkv_bias=qkv_bias, qk_scale=qk_scale,
|
714 |
+
drop=drop_rate, attn_drop=attn_drop_rate,
|
715 |
+
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
|
716 |
+
norm_layer=norm_layer,
|
717 |
+
downsample=None,
|
718 |
+
use_checkpoint=use_checkpoint,
|
719 |
+
img_size=img_size,
|
720 |
+
patch_size=patch_size,
|
721 |
+
resi_connection=resi_connection
|
722 |
+
|
723 |
+
)
|
724 |
+
self.layers.append(layer)
|
725 |
+
self.norm = norm_layer(self.num_features)
|
726 |
+
|
727 |
+
# build the last conv layer in deep feature extraction
|
728 |
+
if resi_connection == '1conv':
|
729 |
+
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
|
730 |
+
elif resi_connection == '3conv':
|
731 |
+
# to save parameters and memory
|
732 |
+
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
|
733 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
734 |
+
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
|
735 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
736 |
+
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
|
737 |
+
|
738 |
+
#####################################################################################################
|
739 |
+
################################ 3, high quality image reconstruction ################################
|
740 |
+
if self.upsampler == 'pixelshuffle':
|
741 |
+
# for classical SR
|
742 |
+
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
743 |
+
nn.LeakyReLU(inplace=True))
|
744 |
+
self.upsample = Upsample(upscale, num_feat)
|
745 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
746 |
+
elif self.upsampler == 'pixelshuffledirect':
|
747 |
+
# for lightweight SR (to save parameters)
|
748 |
+
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
|
749 |
+
(patches_resolution[0], patches_resolution[1]))
|
750 |
+
elif self.upsampler == 'nearest+conv':
|
751 |
+
# for real-world SR (less artifacts)
|
752 |
+
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
753 |
+
nn.LeakyReLU(inplace=True))
|
754 |
+
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
755 |
+
if self.upscale == 4:
|
756 |
+
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
757 |
+
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
758 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
759 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
760 |
+
else:
|
761 |
+
# for image denoising and JPEG compression artifact reduction
|
762 |
+
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
|
763 |
+
|
764 |
+
self.apply(self._init_weights)
|
765 |
+
|
766 |
+
def _init_weights(self, m):
|
767 |
+
if isinstance(m, nn.Linear):
|
768 |
+
trunc_normal_(m.weight, std=.02)
|
769 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
770 |
+
nn.init.constant_(m.bias, 0)
|
771 |
+
elif isinstance(m, nn.LayerNorm):
|
772 |
+
nn.init.constant_(m.bias, 0)
|
773 |
+
nn.init.constant_(m.weight, 1.0)
|
774 |
+
|
775 |
+
@torch.jit.ignore
|
776 |
+
def no_weight_decay(self):
|
777 |
+
return {'absolute_pos_embed'}
|
778 |
+
|
779 |
+
@torch.jit.ignore
|
780 |
+
def no_weight_decay_keywords(self):
|
781 |
+
return {'relative_position_bias_table'}
|
782 |
+
|
783 |
+
def check_image_size(self, x):
|
784 |
+
_, _, h, w = x.size()
|
785 |
+
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
|
786 |
+
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
|
787 |
+
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
|
788 |
+
return x
|
789 |
+
|
790 |
+
def forward_features(self, x):
|
791 |
+
x_size = (x.shape[2], x.shape[3])
|
792 |
+
x = self.patch_embed(x)
|
793 |
+
if self.ape:
|
794 |
+
x = x + self.absolute_pos_embed
|
795 |
+
x = self.pos_drop(x)
|
796 |
+
|
797 |
+
for layer in self.layers:
|
798 |
+
x = layer(x, x_size)
|
799 |
+
|
800 |
+
x = self.norm(x) # B L C
|
801 |
+
x = self.patch_unembed(x, x_size)
|
802 |
+
|
803 |
+
return x
|
804 |
+
|
805 |
+
def forward(self, x):
|
806 |
+
H, W = x.shape[2:]
|
807 |
+
x = self.check_image_size(x)
|
808 |
+
|
809 |
+
self.mean = self.mean.type_as(x)
|
810 |
+
x = (x - self.mean) * self.img_range
|
811 |
+
|
812 |
+
if self.upsampler == 'pixelshuffle':
|
813 |
+
# for classical SR
|
814 |
+
x = self.conv_first(x)
|
815 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
816 |
+
x = self.conv_before_upsample(x)
|
817 |
+
x = self.conv_last(self.upsample(x))
|
818 |
+
elif self.upsampler == 'pixelshuffledirect':
|
819 |
+
# for lightweight SR
|
820 |
+
x = self.conv_first(x)
|
821 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
822 |
+
x = self.upsample(x)
|
823 |
+
elif self.upsampler == 'nearest+conv':
|
824 |
+
# for real-world SR
|
825 |
+
x = self.conv_first(x)
|
826 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
827 |
+
x = self.conv_before_upsample(x)
|
828 |
+
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
829 |
+
if self.upscale == 4:
|
830 |
+
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
831 |
+
x = self.conv_last(self.lrelu(self.conv_hr(x)))
|
832 |
+
else:
|
833 |
+
# for image denoising and JPEG compression artifact reduction
|
834 |
+
x_first = self.conv_first(x)
|
835 |
+
res = self.conv_after_body(self.forward_features(x_first)) + x_first
|
836 |
+
x = x + self.conv_last(res)
|
837 |
+
|
838 |
+
x = x / self.img_range + self.mean
|
839 |
+
|
840 |
+
return x[:, :, :H*self.upscale, :W*self.upscale]
|
841 |
+
|
842 |
+
def flops(self):
|
843 |
+
flops = 0
|
844 |
+
H, W = self.patches_resolution
|
845 |
+
flops += H * W * 3 * self.embed_dim * 9
|
846 |
+
flops += self.patch_embed.flops()
|
847 |
+
for layer in self.layers:
|
848 |
+
flops += layer.flops()
|
849 |
+
flops += H * W * 3 * self.embed_dim * self.embed_dim
|
850 |
+
flops += self.upsample.flops()
|
851 |
+
return flops
|
852 |
+
|
853 |
+
|
854 |
+
if __name__ == '__main__':
|
855 |
+
upscale = 4
|
856 |
+
window_size = 8
|
857 |
+
height = (1024 // upscale // window_size + 1) * window_size
|
858 |
+
width = (720 // upscale // window_size + 1) * window_size
|
859 |
+
model = SwinIR(upscale=2, img_size=(height, width),
|
860 |
+
window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
|
861 |
+
embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
|
862 |
+
print(model)
|
863 |
+
print(height, width, model.flops() / 1e9)
|
864 |
+
|
865 |
+
x = torch.randn((1, 3, height, width))
|
866 |
+
x = model(x)
|
867 |
+
print(x.shape)
|
extensions-builtin/SwinIR/swinir_model_arch_v2.py
ADDED
@@ -0,0 +1,1017 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -----------------------------------------------------------------------------------
|
2 |
+
# Swin2SR: Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration, https://arxiv.org/abs/
|
3 |
+
# Written by Conde and Choi et al.
|
4 |
+
# -----------------------------------------------------------------------------------
|
5 |
+
|
6 |
+
import math
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
import torch.nn as nn
|
10 |
+
import torch.nn.functional as F
|
11 |
+
import torch.utils.checkpoint as checkpoint
|
12 |
+
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
13 |
+
|
14 |
+
|
15 |
+
class Mlp(nn.Module):
|
16 |
+
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
17 |
+
super().__init__()
|
18 |
+
out_features = out_features or in_features
|
19 |
+
hidden_features = hidden_features or in_features
|
20 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
21 |
+
self.act = act_layer()
|
22 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
23 |
+
self.drop = nn.Dropout(drop)
|
24 |
+
|
25 |
+
def forward(self, x):
|
26 |
+
x = self.fc1(x)
|
27 |
+
x = self.act(x)
|
28 |
+
x = self.drop(x)
|
29 |
+
x = self.fc2(x)
|
30 |
+
x = self.drop(x)
|
31 |
+
return x
|
32 |
+
|
33 |
+
|
34 |
+
def window_partition(x, window_size):
|
35 |
+
"""
|
36 |
+
Args:
|
37 |
+
x: (B, H, W, C)
|
38 |
+
window_size (int): window size
|
39 |
+
Returns:
|
40 |
+
windows: (num_windows*B, window_size, window_size, C)
|
41 |
+
"""
|
42 |
+
B, H, W, C = x.shape
|
43 |
+
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
44 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
45 |
+
return windows
|
46 |
+
|
47 |
+
|
48 |
+
def window_reverse(windows, window_size, H, W):
|
49 |
+
"""
|
50 |
+
Args:
|
51 |
+
windows: (num_windows*B, window_size, window_size, C)
|
52 |
+
window_size (int): Window size
|
53 |
+
H (int): Height of image
|
54 |
+
W (int): Width of image
|
55 |
+
Returns:
|
56 |
+
x: (B, H, W, C)
|
57 |
+
"""
|
58 |
+
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
59 |
+
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
60 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
61 |
+
return x
|
62 |
+
|
63 |
+
class WindowAttention(nn.Module):
|
64 |
+
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
|
65 |
+
It supports both of shifted and non-shifted window.
|
66 |
+
Args:
|
67 |
+
dim (int): Number of input channels.
|
68 |
+
window_size (tuple[int]): The height and width of the window.
|
69 |
+
num_heads (int): Number of attention heads.
|
70 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
71 |
+
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
72 |
+
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
73 |
+
pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
|
74 |
+
"""
|
75 |
+
|
76 |
+
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
|
77 |
+
pretrained_window_size=(0, 0)):
|
78 |
+
|
79 |
+
super().__init__()
|
80 |
+
self.dim = dim
|
81 |
+
self.window_size = window_size # Wh, Ww
|
82 |
+
self.pretrained_window_size = pretrained_window_size
|
83 |
+
self.num_heads = num_heads
|
84 |
+
|
85 |
+
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True)
|
86 |
+
|
87 |
+
# mlp to generate continuous relative position bias
|
88 |
+
self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True),
|
89 |
+
nn.ReLU(inplace=True),
|
90 |
+
nn.Linear(512, num_heads, bias=False))
|
91 |
+
|
92 |
+
# get relative_coords_table
|
93 |
+
relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
|
94 |
+
relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
|
95 |
+
relative_coords_table = torch.stack(
|
96 |
+
torch.meshgrid([relative_coords_h,
|
97 |
+
relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
|
98 |
+
if pretrained_window_size[0] > 0:
|
99 |
+
relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1)
|
100 |
+
relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1)
|
101 |
+
else:
|
102 |
+
relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
|
103 |
+
relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
|
104 |
+
relative_coords_table *= 8 # normalize to -8, 8
|
105 |
+
relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
|
106 |
+
torch.abs(relative_coords_table) + 1.0) / np.log2(8)
|
107 |
+
|
108 |
+
self.register_buffer("relative_coords_table", relative_coords_table)
|
109 |
+
|
110 |
+
# get pair-wise relative position index for each token inside the window
|
111 |
+
coords_h = torch.arange(self.window_size[0])
|
112 |
+
coords_w = torch.arange(self.window_size[1])
|
113 |
+
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
114 |
+
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
115 |
+
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
116 |
+
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
117 |
+
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
118 |
+
relative_coords[:, :, 1] += self.window_size[1] - 1
|
119 |
+
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
120 |
+
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
121 |
+
self.register_buffer("relative_position_index", relative_position_index)
|
122 |
+
|
123 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=False)
|
124 |
+
if qkv_bias:
|
125 |
+
self.q_bias = nn.Parameter(torch.zeros(dim))
|
126 |
+
self.v_bias = nn.Parameter(torch.zeros(dim))
|
127 |
+
else:
|
128 |
+
self.q_bias = None
|
129 |
+
self.v_bias = None
|
130 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
131 |
+
self.proj = nn.Linear(dim, dim)
|
132 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
133 |
+
self.softmax = nn.Softmax(dim=-1)
|
134 |
+
|
135 |
+
def forward(self, x, mask=None):
|
136 |
+
"""
|
137 |
+
Args:
|
138 |
+
x: input features with shape of (num_windows*B, N, C)
|
139 |
+
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
140 |
+
"""
|
141 |
+
B_, N, C = x.shape
|
142 |
+
qkv_bias = None
|
143 |
+
if self.q_bias is not None:
|
144 |
+
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
|
145 |
+
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
|
146 |
+
qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
|
147 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
148 |
+
|
149 |
+
# cosine attention
|
150 |
+
attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
|
151 |
+
logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01)).to(self.logit_scale.device)).exp()
|
152 |
+
attn = attn * logit_scale
|
153 |
+
|
154 |
+
relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
|
155 |
+
relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
|
156 |
+
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
|
157 |
+
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
|
158 |
+
relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
|
159 |
+
attn = attn + relative_position_bias.unsqueeze(0)
|
160 |
+
|
161 |
+
if mask is not None:
|
162 |
+
nW = mask.shape[0]
|
163 |
+
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
164 |
+
attn = attn.view(-1, self.num_heads, N, N)
|
165 |
+
attn = self.softmax(attn)
|
166 |
+
else:
|
167 |
+
attn = self.softmax(attn)
|
168 |
+
|
169 |
+
attn = self.attn_drop(attn)
|
170 |
+
|
171 |
+
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
172 |
+
x = self.proj(x)
|
173 |
+
x = self.proj_drop(x)
|
174 |
+
return x
|
175 |
+
|
176 |
+
def extra_repr(self) -> str:
|
177 |
+
return f'dim={self.dim}, window_size={self.window_size}, ' \
|
178 |
+
f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}'
|
179 |
+
|
180 |
+
def flops(self, N):
|
181 |
+
# calculate flops for 1 window with token length of N
|
182 |
+
flops = 0
|
183 |
+
# qkv = self.qkv(x)
|
184 |
+
flops += N * self.dim * 3 * self.dim
|
185 |
+
# attn = (q @ k.transpose(-2, -1))
|
186 |
+
flops += self.num_heads * N * (self.dim // self.num_heads) * N
|
187 |
+
# x = (attn @ v)
|
188 |
+
flops += self.num_heads * N * N * (self.dim // self.num_heads)
|
189 |
+
# x = self.proj(x)
|
190 |
+
flops += N * self.dim * self.dim
|
191 |
+
return flops
|
192 |
+
|
193 |
+
class SwinTransformerBlock(nn.Module):
|
194 |
+
r""" Swin Transformer Block.
|
195 |
+
Args:
|
196 |
+
dim (int): Number of input channels.
|
197 |
+
input_resolution (tuple[int]): Input resulotion.
|
198 |
+
num_heads (int): Number of attention heads.
|
199 |
+
window_size (int): Window size.
|
200 |
+
shift_size (int): Shift size for SW-MSA.
|
201 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
202 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
203 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
204 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
205 |
+
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
206 |
+
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
207 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
208 |
+
pretrained_window_size (int): Window size in pre-training.
|
209 |
+
"""
|
210 |
+
|
211 |
+
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
|
212 |
+
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
|
213 |
+
act_layer=nn.GELU, norm_layer=nn.LayerNorm, pretrained_window_size=0):
|
214 |
+
super().__init__()
|
215 |
+
self.dim = dim
|
216 |
+
self.input_resolution = input_resolution
|
217 |
+
self.num_heads = num_heads
|
218 |
+
self.window_size = window_size
|
219 |
+
self.shift_size = shift_size
|
220 |
+
self.mlp_ratio = mlp_ratio
|
221 |
+
if min(self.input_resolution) <= self.window_size:
|
222 |
+
# if window size is larger than input resolution, we don't partition windows
|
223 |
+
self.shift_size = 0
|
224 |
+
self.window_size = min(self.input_resolution)
|
225 |
+
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
226 |
+
|
227 |
+
self.norm1 = norm_layer(dim)
|
228 |
+
self.attn = WindowAttention(
|
229 |
+
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
|
230 |
+
qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
|
231 |
+
pretrained_window_size=to_2tuple(pretrained_window_size))
|
232 |
+
|
233 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
234 |
+
self.norm2 = norm_layer(dim)
|
235 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
236 |
+
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
237 |
+
|
238 |
+
if self.shift_size > 0:
|
239 |
+
attn_mask = self.calculate_mask(self.input_resolution)
|
240 |
+
else:
|
241 |
+
attn_mask = None
|
242 |
+
|
243 |
+
self.register_buffer("attn_mask", attn_mask)
|
244 |
+
|
245 |
+
def calculate_mask(self, x_size):
|
246 |
+
# calculate attention mask for SW-MSA
|
247 |
+
H, W = x_size
|
248 |
+
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
249 |
+
h_slices = (slice(0, -self.window_size),
|
250 |
+
slice(-self.window_size, -self.shift_size),
|
251 |
+
slice(-self.shift_size, None))
|
252 |
+
w_slices = (slice(0, -self.window_size),
|
253 |
+
slice(-self.window_size, -self.shift_size),
|
254 |
+
slice(-self.shift_size, None))
|
255 |
+
cnt = 0
|
256 |
+
for h in h_slices:
|
257 |
+
for w in w_slices:
|
258 |
+
img_mask[:, h, w, :] = cnt
|
259 |
+
cnt += 1
|
260 |
+
|
261 |
+
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
|
262 |
+
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
263 |
+
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
264 |
+
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
|
265 |
+
|
266 |
+
return attn_mask
|
267 |
+
|
268 |
+
def forward(self, x, x_size):
|
269 |
+
H, W = x_size
|
270 |
+
B, L, C = x.shape
|
271 |
+
#assert L == H * W, "input feature has wrong size"
|
272 |
+
|
273 |
+
shortcut = x
|
274 |
+
x = x.view(B, H, W, C)
|
275 |
+
|
276 |
+
# cyclic shift
|
277 |
+
if self.shift_size > 0:
|
278 |
+
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
279 |
+
else:
|
280 |
+
shifted_x = x
|
281 |
+
|
282 |
+
# partition windows
|
283 |
+
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
|
284 |
+
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
|
285 |
+
|
286 |
+
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
|
287 |
+
if self.input_resolution == x_size:
|
288 |
+
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
|
289 |
+
else:
|
290 |
+
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
|
291 |
+
|
292 |
+
# merge windows
|
293 |
+
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
294 |
+
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
295 |
+
|
296 |
+
# reverse cyclic shift
|
297 |
+
if self.shift_size > 0:
|
298 |
+
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
299 |
+
else:
|
300 |
+
x = shifted_x
|
301 |
+
x = x.view(B, H * W, C)
|
302 |
+
x = shortcut + self.drop_path(self.norm1(x))
|
303 |
+
|
304 |
+
# FFN
|
305 |
+
x = x + self.drop_path(self.norm2(self.mlp(x)))
|
306 |
+
|
307 |
+
return x
|
308 |
+
|
309 |
+
def extra_repr(self) -> str:
|
310 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
|
311 |
+
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
312 |
+
|
313 |
+
def flops(self):
|
314 |
+
flops = 0
|
315 |
+
H, W = self.input_resolution
|
316 |
+
# norm1
|
317 |
+
flops += self.dim * H * W
|
318 |
+
# W-MSA/SW-MSA
|
319 |
+
nW = H * W / self.window_size / self.window_size
|
320 |
+
flops += nW * self.attn.flops(self.window_size * self.window_size)
|
321 |
+
# mlp
|
322 |
+
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
|
323 |
+
# norm2
|
324 |
+
flops += self.dim * H * W
|
325 |
+
return flops
|
326 |
+
|
327 |
+
class PatchMerging(nn.Module):
|
328 |
+
r""" Patch Merging Layer.
|
329 |
+
Args:
|
330 |
+
input_resolution (tuple[int]): Resolution of input feature.
|
331 |
+
dim (int): Number of input channels.
|
332 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
333 |
+
"""
|
334 |
+
|
335 |
+
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
336 |
+
super().__init__()
|
337 |
+
self.input_resolution = input_resolution
|
338 |
+
self.dim = dim
|
339 |
+
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
340 |
+
self.norm = norm_layer(2 * dim)
|
341 |
+
|
342 |
+
def forward(self, x):
|
343 |
+
"""
|
344 |
+
x: B, H*W, C
|
345 |
+
"""
|
346 |
+
H, W = self.input_resolution
|
347 |
+
B, L, C = x.shape
|
348 |
+
assert L == H * W, "input feature has wrong size"
|
349 |
+
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
|
350 |
+
|
351 |
+
x = x.view(B, H, W, C)
|
352 |
+
|
353 |
+
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
354 |
+
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
355 |
+
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
356 |
+
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
357 |
+
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
358 |
+
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
359 |
+
|
360 |
+
x = self.reduction(x)
|
361 |
+
x = self.norm(x)
|
362 |
+
|
363 |
+
return x
|
364 |
+
|
365 |
+
def extra_repr(self) -> str:
|
366 |
+
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
367 |
+
|
368 |
+
def flops(self):
|
369 |
+
H, W = self.input_resolution
|
370 |
+
flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
|
371 |
+
flops += H * W * self.dim // 2
|
372 |
+
return flops
|
373 |
+
|
374 |
+
class BasicLayer(nn.Module):
|
375 |
+
""" A basic Swin Transformer layer for one stage.
|
376 |
+
Args:
|
377 |
+
dim (int): Number of input channels.
|
378 |
+
input_resolution (tuple[int]): Input resolution.
|
379 |
+
depth (int): Number of blocks.
|
380 |
+
num_heads (int): Number of attention heads.
|
381 |
+
window_size (int): Local window size.
|
382 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
383 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
384 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
385 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
386 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
387 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
388 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
389 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
390 |
+
pretrained_window_size (int): Local window size in pre-training.
|
391 |
+
"""
|
392 |
+
|
393 |
+
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
394 |
+
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
|
395 |
+
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
|
396 |
+
pretrained_window_size=0):
|
397 |
+
|
398 |
+
super().__init__()
|
399 |
+
self.dim = dim
|
400 |
+
self.input_resolution = input_resolution
|
401 |
+
self.depth = depth
|
402 |
+
self.use_checkpoint = use_checkpoint
|
403 |
+
|
404 |
+
# build blocks
|
405 |
+
self.blocks = nn.ModuleList([
|
406 |
+
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
|
407 |
+
num_heads=num_heads, window_size=window_size,
|
408 |
+
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
409 |
+
mlp_ratio=mlp_ratio,
|
410 |
+
qkv_bias=qkv_bias,
|
411 |
+
drop=drop, attn_drop=attn_drop,
|
412 |
+
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
413 |
+
norm_layer=norm_layer,
|
414 |
+
pretrained_window_size=pretrained_window_size)
|
415 |
+
for i in range(depth)])
|
416 |
+
|
417 |
+
# patch merging layer
|
418 |
+
if downsample is not None:
|
419 |
+
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
|
420 |
+
else:
|
421 |
+
self.downsample = None
|
422 |
+
|
423 |
+
def forward(self, x, x_size):
|
424 |
+
for blk in self.blocks:
|
425 |
+
if self.use_checkpoint:
|
426 |
+
x = checkpoint.checkpoint(blk, x, x_size)
|
427 |
+
else:
|
428 |
+
x = blk(x, x_size)
|
429 |
+
if self.downsample is not None:
|
430 |
+
x = self.downsample(x)
|
431 |
+
return x
|
432 |
+
|
433 |
+
def extra_repr(self) -> str:
|
434 |
+
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
435 |
+
|
436 |
+
def flops(self):
|
437 |
+
flops = 0
|
438 |
+
for blk in self.blocks:
|
439 |
+
flops += blk.flops()
|
440 |
+
if self.downsample is not None:
|
441 |
+
flops += self.downsample.flops()
|
442 |
+
return flops
|
443 |
+
|
444 |
+
def _init_respostnorm(self):
|
445 |
+
for blk in self.blocks:
|
446 |
+
nn.init.constant_(blk.norm1.bias, 0)
|
447 |
+
nn.init.constant_(blk.norm1.weight, 0)
|
448 |
+
nn.init.constant_(blk.norm2.bias, 0)
|
449 |
+
nn.init.constant_(blk.norm2.weight, 0)
|
450 |
+
|
451 |
+
class PatchEmbed(nn.Module):
|
452 |
+
r""" Image to Patch Embedding
|
453 |
+
Args:
|
454 |
+
img_size (int): Image size. Default: 224.
|
455 |
+
patch_size (int): Patch token size. Default: 4.
|
456 |
+
in_chans (int): Number of input image channels. Default: 3.
|
457 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
458 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
459 |
+
"""
|
460 |
+
|
461 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
462 |
+
super().__init__()
|
463 |
+
img_size = to_2tuple(img_size)
|
464 |
+
patch_size = to_2tuple(patch_size)
|
465 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
466 |
+
self.img_size = img_size
|
467 |
+
self.patch_size = patch_size
|
468 |
+
self.patches_resolution = patches_resolution
|
469 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
470 |
+
|
471 |
+
self.in_chans = in_chans
|
472 |
+
self.embed_dim = embed_dim
|
473 |
+
|
474 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
475 |
+
if norm_layer is not None:
|
476 |
+
self.norm = norm_layer(embed_dim)
|
477 |
+
else:
|
478 |
+
self.norm = None
|
479 |
+
|
480 |
+
def forward(self, x):
|
481 |
+
B, C, H, W = x.shape
|
482 |
+
# FIXME look at relaxing size constraints
|
483 |
+
# assert H == self.img_size[0] and W == self.img_size[1],
|
484 |
+
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
485 |
+
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
|
486 |
+
if self.norm is not None:
|
487 |
+
x = self.norm(x)
|
488 |
+
return x
|
489 |
+
|
490 |
+
def flops(self):
|
491 |
+
Ho, Wo = self.patches_resolution
|
492 |
+
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
|
493 |
+
if self.norm is not None:
|
494 |
+
flops += Ho * Wo * self.embed_dim
|
495 |
+
return flops
|
496 |
+
|
497 |
+
class RSTB(nn.Module):
|
498 |
+
"""Residual Swin Transformer Block (RSTB).
|
499 |
+
|
500 |
+
Args:
|
501 |
+
dim (int): Number of input channels.
|
502 |
+
input_resolution (tuple[int]): Input resolution.
|
503 |
+
depth (int): Number of blocks.
|
504 |
+
num_heads (int): Number of attention heads.
|
505 |
+
window_size (int): Local window size.
|
506 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
507 |
+
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
508 |
+
drop (float, optional): Dropout rate. Default: 0.0
|
509 |
+
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
510 |
+
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
511 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
512 |
+
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
513 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
514 |
+
img_size: Input image size.
|
515 |
+
patch_size: Patch size.
|
516 |
+
resi_connection: The convolutional block before residual connection.
|
517 |
+
"""
|
518 |
+
|
519 |
+
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
|
520 |
+
mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
|
521 |
+
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
|
522 |
+
img_size=224, patch_size=4, resi_connection='1conv'):
|
523 |
+
super(RSTB, self).__init__()
|
524 |
+
|
525 |
+
self.dim = dim
|
526 |
+
self.input_resolution = input_resolution
|
527 |
+
|
528 |
+
self.residual_group = BasicLayer(dim=dim,
|
529 |
+
input_resolution=input_resolution,
|
530 |
+
depth=depth,
|
531 |
+
num_heads=num_heads,
|
532 |
+
window_size=window_size,
|
533 |
+
mlp_ratio=mlp_ratio,
|
534 |
+
qkv_bias=qkv_bias,
|
535 |
+
drop=drop, attn_drop=attn_drop,
|
536 |
+
drop_path=drop_path,
|
537 |
+
norm_layer=norm_layer,
|
538 |
+
downsample=downsample,
|
539 |
+
use_checkpoint=use_checkpoint)
|
540 |
+
|
541 |
+
if resi_connection == '1conv':
|
542 |
+
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
|
543 |
+
elif resi_connection == '3conv':
|
544 |
+
# to save parameters and memory
|
545 |
+
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
546 |
+
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
|
547 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
548 |
+
nn.Conv2d(dim // 4, dim, 3, 1, 1))
|
549 |
+
|
550 |
+
self.patch_embed = PatchEmbed(
|
551 |
+
img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim,
|
552 |
+
norm_layer=None)
|
553 |
+
|
554 |
+
self.patch_unembed = PatchUnEmbed(
|
555 |
+
img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim,
|
556 |
+
norm_layer=None)
|
557 |
+
|
558 |
+
def forward(self, x, x_size):
|
559 |
+
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
|
560 |
+
|
561 |
+
def flops(self):
|
562 |
+
flops = 0
|
563 |
+
flops += self.residual_group.flops()
|
564 |
+
H, W = self.input_resolution
|
565 |
+
flops += H * W * self.dim * self.dim * 9
|
566 |
+
flops += self.patch_embed.flops()
|
567 |
+
flops += self.patch_unembed.flops()
|
568 |
+
|
569 |
+
return flops
|
570 |
+
|
571 |
+
class PatchUnEmbed(nn.Module):
|
572 |
+
r""" Image to Patch Unembedding
|
573 |
+
|
574 |
+
Args:
|
575 |
+
img_size (int): Image size. Default: 224.
|
576 |
+
patch_size (int): Patch token size. Default: 4.
|
577 |
+
in_chans (int): Number of input image channels. Default: 3.
|
578 |
+
embed_dim (int): Number of linear projection output channels. Default: 96.
|
579 |
+
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
580 |
+
"""
|
581 |
+
|
582 |
+
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
583 |
+
super().__init__()
|
584 |
+
img_size = to_2tuple(img_size)
|
585 |
+
patch_size = to_2tuple(patch_size)
|
586 |
+
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
587 |
+
self.img_size = img_size
|
588 |
+
self.patch_size = patch_size
|
589 |
+
self.patches_resolution = patches_resolution
|
590 |
+
self.num_patches = patches_resolution[0] * patches_resolution[1]
|
591 |
+
|
592 |
+
self.in_chans = in_chans
|
593 |
+
self.embed_dim = embed_dim
|
594 |
+
|
595 |
+
def forward(self, x, x_size):
|
596 |
+
B, HW, C = x.shape
|
597 |
+
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
|
598 |
+
return x
|
599 |
+
|
600 |
+
def flops(self):
|
601 |
+
flops = 0
|
602 |
+
return flops
|
603 |
+
|
604 |
+
|
605 |
+
class Upsample(nn.Sequential):
|
606 |
+
"""Upsample module.
|
607 |
+
|
608 |
+
Args:
|
609 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
610 |
+
num_feat (int): Channel number of intermediate features.
|
611 |
+
"""
|
612 |
+
|
613 |
+
def __init__(self, scale, num_feat):
|
614 |
+
m = []
|
615 |
+
if (scale & (scale - 1)) == 0: # scale = 2^n
|
616 |
+
for _ in range(int(math.log(scale, 2))):
|
617 |
+
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
|
618 |
+
m.append(nn.PixelShuffle(2))
|
619 |
+
elif scale == 3:
|
620 |
+
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
|
621 |
+
m.append(nn.PixelShuffle(3))
|
622 |
+
else:
|
623 |
+
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
|
624 |
+
super(Upsample, self).__init__(*m)
|
625 |
+
|
626 |
+
class Upsample_hf(nn.Sequential):
|
627 |
+
"""Upsample module.
|
628 |
+
|
629 |
+
Args:
|
630 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
631 |
+
num_feat (int): Channel number of intermediate features.
|
632 |
+
"""
|
633 |
+
|
634 |
+
def __init__(self, scale, num_feat):
|
635 |
+
m = []
|
636 |
+
if (scale & (scale - 1)) == 0: # scale = 2^n
|
637 |
+
for _ in range(int(math.log(scale, 2))):
|
638 |
+
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
|
639 |
+
m.append(nn.PixelShuffle(2))
|
640 |
+
elif scale == 3:
|
641 |
+
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
|
642 |
+
m.append(nn.PixelShuffle(3))
|
643 |
+
else:
|
644 |
+
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
|
645 |
+
super(Upsample_hf, self).__init__(*m)
|
646 |
+
|
647 |
+
|
648 |
+
class UpsampleOneStep(nn.Sequential):
|
649 |
+
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
|
650 |
+
Used in lightweight SR to save parameters.
|
651 |
+
|
652 |
+
Args:
|
653 |
+
scale (int): Scale factor. Supported scales: 2^n and 3.
|
654 |
+
num_feat (int): Channel number of intermediate features.
|
655 |
+
|
656 |
+
"""
|
657 |
+
|
658 |
+
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
|
659 |
+
self.num_feat = num_feat
|
660 |
+
self.input_resolution = input_resolution
|
661 |
+
m = []
|
662 |
+
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
|
663 |
+
m.append(nn.PixelShuffle(scale))
|
664 |
+
super(UpsampleOneStep, self).__init__(*m)
|
665 |
+
|
666 |
+
def flops(self):
|
667 |
+
H, W = self.input_resolution
|
668 |
+
flops = H * W * self.num_feat * 3 * 9
|
669 |
+
return flops
|
670 |
+
|
671 |
+
|
672 |
+
|
673 |
+
class Swin2SR(nn.Module):
|
674 |
+
r""" Swin2SR
|
675 |
+
A PyTorch impl of : `Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration`.
|
676 |
+
|
677 |
+
Args:
|
678 |
+
img_size (int | tuple(int)): Input image size. Default 64
|
679 |
+
patch_size (int | tuple(int)): Patch size. Default: 1
|
680 |
+
in_chans (int): Number of input image channels. Default: 3
|
681 |
+
embed_dim (int): Patch embedding dimension. Default: 96
|
682 |
+
depths (tuple(int)): Depth of each Swin Transformer layer.
|
683 |
+
num_heads (tuple(int)): Number of attention heads in different layers.
|
684 |
+
window_size (int): Window size. Default: 7
|
685 |
+
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
686 |
+
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
687 |
+
drop_rate (float): Dropout rate. Default: 0
|
688 |
+
attn_drop_rate (float): Attention dropout rate. Default: 0
|
689 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
690 |
+
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
691 |
+
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
692 |
+
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
693 |
+
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
694 |
+
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
|
695 |
+
img_range: Image range. 1. or 255.
|
696 |
+
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
|
697 |
+
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
|
698 |
+
"""
|
699 |
+
|
700 |
+
def __init__(self, img_size=64, patch_size=1, in_chans=3,
|
701 |
+
embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
|
702 |
+
window_size=7, mlp_ratio=4., qkv_bias=True,
|
703 |
+
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
704 |
+
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
|
705 |
+
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
|
706 |
+
**kwargs):
|
707 |
+
super(Swin2SR, self).__init__()
|
708 |
+
num_in_ch = in_chans
|
709 |
+
num_out_ch = in_chans
|
710 |
+
num_feat = 64
|
711 |
+
self.img_range = img_range
|
712 |
+
if in_chans == 3:
|
713 |
+
rgb_mean = (0.4488, 0.4371, 0.4040)
|
714 |
+
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
|
715 |
+
else:
|
716 |
+
self.mean = torch.zeros(1, 1, 1, 1)
|
717 |
+
self.upscale = upscale
|
718 |
+
self.upsampler = upsampler
|
719 |
+
self.window_size = window_size
|
720 |
+
|
721 |
+
#####################################################################################################
|
722 |
+
################################### 1, shallow feature extraction ###################################
|
723 |
+
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
|
724 |
+
|
725 |
+
#####################################################################################################
|
726 |
+
################################### 2, deep feature extraction ######################################
|
727 |
+
self.num_layers = len(depths)
|
728 |
+
self.embed_dim = embed_dim
|
729 |
+
self.ape = ape
|
730 |
+
self.patch_norm = patch_norm
|
731 |
+
self.num_features = embed_dim
|
732 |
+
self.mlp_ratio = mlp_ratio
|
733 |
+
|
734 |
+
# split image into non-overlapping patches
|
735 |
+
self.patch_embed = PatchEmbed(
|
736 |
+
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
737 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
738 |
+
num_patches = self.patch_embed.num_patches
|
739 |
+
patches_resolution = self.patch_embed.patches_resolution
|
740 |
+
self.patches_resolution = patches_resolution
|
741 |
+
|
742 |
+
# merge non-overlapping patches into image
|
743 |
+
self.patch_unembed = PatchUnEmbed(
|
744 |
+
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
|
745 |
+
norm_layer=norm_layer if self.patch_norm else None)
|
746 |
+
|
747 |
+
# absolute position embedding
|
748 |
+
if self.ape:
|
749 |
+
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
750 |
+
trunc_normal_(self.absolute_pos_embed, std=.02)
|
751 |
+
|
752 |
+
self.pos_drop = nn.Dropout(p=drop_rate)
|
753 |
+
|
754 |
+
# stochastic depth
|
755 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
756 |
+
|
757 |
+
# build Residual Swin Transformer blocks (RSTB)
|
758 |
+
self.layers = nn.ModuleList()
|
759 |
+
for i_layer in range(self.num_layers):
|
760 |
+
layer = RSTB(dim=embed_dim,
|
761 |
+
input_resolution=(patches_resolution[0],
|
762 |
+
patches_resolution[1]),
|
763 |
+
depth=depths[i_layer],
|
764 |
+
num_heads=num_heads[i_layer],
|
765 |
+
window_size=window_size,
|
766 |
+
mlp_ratio=self.mlp_ratio,
|
767 |
+
qkv_bias=qkv_bias,
|
768 |
+
drop=drop_rate, attn_drop=attn_drop_rate,
|
769 |
+
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
|
770 |
+
norm_layer=norm_layer,
|
771 |
+
downsample=None,
|
772 |
+
use_checkpoint=use_checkpoint,
|
773 |
+
img_size=img_size,
|
774 |
+
patch_size=patch_size,
|
775 |
+
resi_connection=resi_connection
|
776 |
+
|
777 |
+
)
|
778 |
+
self.layers.append(layer)
|
779 |
+
|
780 |
+
if self.upsampler == 'pixelshuffle_hf':
|
781 |
+
self.layers_hf = nn.ModuleList()
|
782 |
+
for i_layer in range(self.num_layers):
|
783 |
+
layer = RSTB(dim=embed_dim,
|
784 |
+
input_resolution=(patches_resolution[0],
|
785 |
+
patches_resolution[1]),
|
786 |
+
depth=depths[i_layer],
|
787 |
+
num_heads=num_heads[i_layer],
|
788 |
+
window_size=window_size,
|
789 |
+
mlp_ratio=self.mlp_ratio,
|
790 |
+
qkv_bias=qkv_bias,
|
791 |
+
drop=drop_rate, attn_drop=attn_drop_rate,
|
792 |
+
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
|
793 |
+
norm_layer=norm_layer,
|
794 |
+
downsample=None,
|
795 |
+
use_checkpoint=use_checkpoint,
|
796 |
+
img_size=img_size,
|
797 |
+
patch_size=patch_size,
|
798 |
+
resi_connection=resi_connection
|
799 |
+
|
800 |
+
)
|
801 |
+
self.layers_hf.append(layer)
|
802 |
+
|
803 |
+
self.norm = norm_layer(self.num_features)
|
804 |
+
|
805 |
+
# build the last conv layer in deep feature extraction
|
806 |
+
if resi_connection == '1conv':
|
807 |
+
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
|
808 |
+
elif resi_connection == '3conv':
|
809 |
+
# to save parameters and memory
|
810 |
+
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
|
811 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
812 |
+
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
|
813 |
+
nn.LeakyReLU(negative_slope=0.2, inplace=True),
|
814 |
+
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
|
815 |
+
|
816 |
+
#####################################################################################################
|
817 |
+
################################ 3, high quality image reconstruction ################################
|
818 |
+
if self.upsampler == 'pixelshuffle':
|
819 |
+
# for classical SR
|
820 |
+
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
821 |
+
nn.LeakyReLU(inplace=True))
|
822 |
+
self.upsample = Upsample(upscale, num_feat)
|
823 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
824 |
+
elif self.upsampler == 'pixelshuffle_aux':
|
825 |
+
self.conv_bicubic = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
|
826 |
+
self.conv_before_upsample = nn.Sequential(
|
827 |
+
nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
828 |
+
nn.LeakyReLU(inplace=True))
|
829 |
+
self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
830 |
+
self.conv_after_aux = nn.Sequential(
|
831 |
+
nn.Conv2d(3, num_feat, 3, 1, 1),
|
832 |
+
nn.LeakyReLU(inplace=True))
|
833 |
+
self.upsample = Upsample(upscale, num_feat)
|
834 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
835 |
+
|
836 |
+
elif self.upsampler == 'pixelshuffle_hf':
|
837 |
+
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
838 |
+
nn.LeakyReLU(inplace=True))
|
839 |
+
self.upsample = Upsample(upscale, num_feat)
|
840 |
+
self.upsample_hf = Upsample_hf(upscale, num_feat)
|
841 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
842 |
+
self.conv_first_hf = nn.Sequential(nn.Conv2d(num_feat, embed_dim, 3, 1, 1),
|
843 |
+
nn.LeakyReLU(inplace=True))
|
844 |
+
self.conv_after_body_hf = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
|
845 |
+
self.conv_before_upsample_hf = nn.Sequential(
|
846 |
+
nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
847 |
+
nn.LeakyReLU(inplace=True))
|
848 |
+
self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
849 |
+
|
850 |
+
elif self.upsampler == 'pixelshuffledirect':
|
851 |
+
# for lightweight SR (to save parameters)
|
852 |
+
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
|
853 |
+
(patches_resolution[0], patches_resolution[1]))
|
854 |
+
elif self.upsampler == 'nearest+conv':
|
855 |
+
# for real-world SR (less artifacts)
|
856 |
+
assert self.upscale == 4, 'only support x4 now.'
|
857 |
+
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
|
858 |
+
nn.LeakyReLU(inplace=True))
|
859 |
+
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
860 |
+
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
861 |
+
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
|
862 |
+
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
|
863 |
+
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
|
864 |
+
else:
|
865 |
+
# for image denoising and JPEG compression artifact reduction
|
866 |
+
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
|
867 |
+
|
868 |
+
self.apply(self._init_weights)
|
869 |
+
|
870 |
+
def _init_weights(self, m):
|
871 |
+
if isinstance(m, nn.Linear):
|
872 |
+
trunc_normal_(m.weight, std=.02)
|
873 |
+
if isinstance(m, nn.Linear) and m.bias is not None:
|
874 |
+
nn.init.constant_(m.bias, 0)
|
875 |
+
elif isinstance(m, nn.LayerNorm):
|
876 |
+
nn.init.constant_(m.bias, 0)
|
877 |
+
nn.init.constant_(m.weight, 1.0)
|
878 |
+
|
879 |
+
@torch.jit.ignore
|
880 |
+
def no_weight_decay(self):
|
881 |
+
return {'absolute_pos_embed'}
|
882 |
+
|
883 |
+
@torch.jit.ignore
|
884 |
+
def no_weight_decay_keywords(self):
|
885 |
+
return {'relative_position_bias_table'}
|
886 |
+
|
887 |
+
def check_image_size(self, x):
|
888 |
+
_, _, h, w = x.size()
|
889 |
+
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
|
890 |
+
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
|
891 |
+
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
|
892 |
+
return x
|
893 |
+
|
894 |
+
def forward_features(self, x):
|
895 |
+
x_size = (x.shape[2], x.shape[3])
|
896 |
+
x = self.patch_embed(x)
|
897 |
+
if self.ape:
|
898 |
+
x = x + self.absolute_pos_embed
|
899 |
+
x = self.pos_drop(x)
|
900 |
+
|
901 |
+
for layer in self.layers:
|
902 |
+
x = layer(x, x_size)
|
903 |
+
|
904 |
+
x = self.norm(x) # B L C
|
905 |
+
x = self.patch_unembed(x, x_size)
|
906 |
+
|
907 |
+
return x
|
908 |
+
|
909 |
+
def forward_features_hf(self, x):
|
910 |
+
x_size = (x.shape[2], x.shape[3])
|
911 |
+
x = self.patch_embed(x)
|
912 |
+
if self.ape:
|
913 |
+
x = x + self.absolute_pos_embed
|
914 |
+
x = self.pos_drop(x)
|
915 |
+
|
916 |
+
for layer in self.layers_hf:
|
917 |
+
x = layer(x, x_size)
|
918 |
+
|
919 |
+
x = self.norm(x) # B L C
|
920 |
+
x = self.patch_unembed(x, x_size)
|
921 |
+
|
922 |
+
return x
|
923 |
+
|
924 |
+
def forward(self, x):
|
925 |
+
H, W = x.shape[2:]
|
926 |
+
x = self.check_image_size(x)
|
927 |
+
|
928 |
+
self.mean = self.mean.type_as(x)
|
929 |
+
x = (x - self.mean) * self.img_range
|
930 |
+
|
931 |
+
if self.upsampler == 'pixelshuffle':
|
932 |
+
# for classical SR
|
933 |
+
x = self.conv_first(x)
|
934 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
935 |
+
x = self.conv_before_upsample(x)
|
936 |
+
x = self.conv_last(self.upsample(x))
|
937 |
+
elif self.upsampler == 'pixelshuffle_aux':
|
938 |
+
bicubic = F.interpolate(x, size=(H * self.upscale, W * self.upscale), mode='bicubic', align_corners=False)
|
939 |
+
bicubic = self.conv_bicubic(bicubic)
|
940 |
+
x = self.conv_first(x)
|
941 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
942 |
+
x = self.conv_before_upsample(x)
|
943 |
+
aux = self.conv_aux(x) # b, 3, LR_H, LR_W
|
944 |
+
x = self.conv_after_aux(aux)
|
945 |
+
x = self.upsample(x)[:, :, :H * self.upscale, :W * self.upscale] + bicubic[:, :, :H * self.upscale, :W * self.upscale]
|
946 |
+
x = self.conv_last(x)
|
947 |
+
aux = aux / self.img_range + self.mean
|
948 |
+
elif self.upsampler == 'pixelshuffle_hf':
|
949 |
+
# for classical SR with HF
|
950 |
+
x = self.conv_first(x)
|
951 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
952 |
+
x_before = self.conv_before_upsample(x)
|
953 |
+
x_out = self.conv_last(self.upsample(x_before))
|
954 |
+
|
955 |
+
x_hf = self.conv_first_hf(x_before)
|
956 |
+
x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf
|
957 |
+
x_hf = self.conv_before_upsample_hf(x_hf)
|
958 |
+
x_hf = self.conv_last_hf(self.upsample_hf(x_hf))
|
959 |
+
x = x_out + x_hf
|
960 |
+
x_hf = x_hf / self.img_range + self.mean
|
961 |
+
|
962 |
+
elif self.upsampler == 'pixelshuffledirect':
|
963 |
+
# for lightweight SR
|
964 |
+
x = self.conv_first(x)
|
965 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
966 |
+
x = self.upsample(x)
|
967 |
+
elif self.upsampler == 'nearest+conv':
|
968 |
+
# for real-world SR
|
969 |
+
x = self.conv_first(x)
|
970 |
+
x = self.conv_after_body(self.forward_features(x)) + x
|
971 |
+
x = self.conv_before_upsample(x)
|
972 |
+
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
973 |
+
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
|
974 |
+
x = self.conv_last(self.lrelu(self.conv_hr(x)))
|
975 |
+
else:
|
976 |
+
# for image denoising and JPEG compression artifact reduction
|
977 |
+
x_first = self.conv_first(x)
|
978 |
+
res = self.conv_after_body(self.forward_features(x_first)) + x_first
|
979 |
+
x = x + self.conv_last(res)
|
980 |
+
|
981 |
+
x = x / self.img_range + self.mean
|
982 |
+
if self.upsampler == "pixelshuffle_aux":
|
983 |
+
return x[:, :, :H*self.upscale, :W*self.upscale], aux
|
984 |
+
|
985 |
+
elif self.upsampler == "pixelshuffle_hf":
|
986 |
+
x_out = x_out / self.img_range + self.mean
|
987 |
+
return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale]
|
988 |
+
|
989 |
+
else:
|
990 |
+
return x[:, :, :H*self.upscale, :W*self.upscale]
|
991 |
+
|
992 |
+
def flops(self):
|
993 |
+
flops = 0
|
994 |
+
H, W = self.patches_resolution
|
995 |
+
flops += H * W * 3 * self.embed_dim * 9
|
996 |
+
flops += self.patch_embed.flops()
|
997 |
+
for layer in self.layers:
|
998 |
+
flops += layer.flops()
|
999 |
+
flops += H * W * 3 * self.embed_dim * self.embed_dim
|
1000 |
+
flops += self.upsample.flops()
|
1001 |
+
return flops
|
1002 |
+
|
1003 |
+
|
1004 |
+
if __name__ == '__main__':
|
1005 |
+
upscale = 4
|
1006 |
+
window_size = 8
|
1007 |
+
height = (1024 // upscale // window_size + 1) * window_size
|
1008 |
+
width = (720 // upscale // window_size + 1) * window_size
|
1009 |
+
model = Swin2SR(upscale=2, img_size=(height, width),
|
1010 |
+
window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
|
1011 |
+
embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
|
1012 |
+
print(model)
|
1013 |
+
print(height, width, model.flops() / 1e9)
|
1014 |
+
|
1015 |
+
x = torch.randn((1, 3, height, width))
|
1016 |
+
x = model(x)
|
1017 |
+
print(x.shape)
|
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
ADDED
@@ -0,0 +1,962 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
onUiLoaded(async() => {
|
2 |
+
const elementIDs = {
|
3 |
+
img2imgTabs: "#mode_img2img .tab-nav",
|
4 |
+
inpaint: "#img2maskimg",
|
5 |
+
inpaintSketch: "#inpaint_sketch",
|
6 |
+
rangeGroup: "#img2img_column_size",
|
7 |
+
sketch: "#img2img_sketch"
|
8 |
+
};
|
9 |
+
const tabNameToElementId = {
|
10 |
+
"Inpaint sketch": elementIDs.inpaintSketch,
|
11 |
+
"Inpaint": elementIDs.inpaint,
|
12 |
+
"Sketch": elementIDs.sketch
|
13 |
+
};
|
14 |
+
|
15 |
+
|
16 |
+
// Helper functions
|
17 |
+
// Get active tab
|
18 |
+
|
19 |
+
/**
|
20 |
+
* Waits for an element to be present in the DOM.
|
21 |
+
*/
|
22 |
+
const waitForElement = (id) => new Promise(resolve => {
|
23 |
+
const checkForElement = () => {
|
24 |
+
const element = document.querySelector(id);
|
25 |
+
if (element) return resolve(element);
|
26 |
+
setTimeout(checkForElement, 100);
|
27 |
+
};
|
28 |
+
checkForElement();
|
29 |
+
});
|
30 |
+
|
31 |
+
function getActiveTab(elements, all = false) {
|
32 |
+
const tabs = elements.img2imgTabs.querySelectorAll("button");
|
33 |
+
|
34 |
+
if (all) return tabs;
|
35 |
+
|
36 |
+
for (let tab of tabs) {
|
37 |
+
if (tab.classList.contains("selected")) {
|
38 |
+
return tab;
|
39 |
+
}
|
40 |
+
}
|
41 |
+
}
|
42 |
+
|
43 |
+
// Get tab ID
|
44 |
+
function getTabId(elements) {
|
45 |
+
const activeTab = getActiveTab(elements);
|
46 |
+
return tabNameToElementId[activeTab.innerText];
|
47 |
+
}
|
48 |
+
|
49 |
+
// Wait until opts loaded
|
50 |
+
async function waitForOpts() {
|
51 |
+
for (; ;) {
|
52 |
+
if (window.opts && Object.keys(window.opts).length) {
|
53 |
+
return window.opts;
|
54 |
+
}
|
55 |
+
await new Promise(resolve => setTimeout(resolve, 100));
|
56 |
+
}
|
57 |
+
}
|
58 |
+
|
59 |
+
// Detect whether the element has a horizontal scroll bar
|
60 |
+
function hasHorizontalScrollbar(element) {
|
61 |
+
return element.scrollWidth > element.clientWidth;
|
62 |
+
}
|
63 |
+
|
64 |
+
// Function for defining the "Ctrl", "Shift" and "Alt" keys
|
65 |
+
function isModifierKey(event, key) {
|
66 |
+
switch (key) {
|
67 |
+
case "Ctrl":
|
68 |
+
return event.ctrlKey;
|
69 |
+
case "Shift":
|
70 |
+
return event.shiftKey;
|
71 |
+
case "Alt":
|
72 |
+
return event.altKey;
|
73 |
+
default:
|
74 |
+
return false;
|
75 |
+
}
|
76 |
+
}
|
77 |
+
|
78 |
+
// Check if hotkey is valid
|
79 |
+
function isValidHotkey(value) {
|
80 |
+
const specialKeys = ["Ctrl", "Alt", "Shift", "Disable"];
|
81 |
+
return (
|
82 |
+
(typeof value === "string" &&
|
83 |
+
value.length === 1 &&
|
84 |
+
/[a-z]/i.test(value)) ||
|
85 |
+
specialKeys.includes(value)
|
86 |
+
);
|
87 |
+
}
|
88 |
+
|
89 |
+
// Normalize hotkey
|
90 |
+
function normalizeHotkey(hotkey) {
|
91 |
+
return hotkey.length === 1 ? "Key" + hotkey.toUpperCase() : hotkey;
|
92 |
+
}
|
93 |
+
|
94 |
+
// Format hotkey for display
|
95 |
+
function formatHotkeyForDisplay(hotkey) {
|
96 |
+
return hotkey.startsWith("Key") ? hotkey.slice(3) : hotkey;
|
97 |
+
}
|
98 |
+
|
99 |
+
// Create hotkey configuration with the provided options
|
100 |
+
function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) {
|
101 |
+
const result = {}; // Resulting hotkey configuration
|
102 |
+
const usedKeys = new Set(); // Set of used hotkeys
|
103 |
+
|
104 |
+
// Iterate through defaultHotkeysConfig keys
|
105 |
+
for (const key in defaultHotkeysConfig) {
|
106 |
+
const userValue = hotkeysConfigOpts[key]; // User-provided hotkey value
|
107 |
+
const defaultValue = defaultHotkeysConfig[key]; // Default hotkey value
|
108 |
+
|
109 |
+
// Apply appropriate value for undefined, boolean, or object userValue
|
110 |
+
if (
|
111 |
+
userValue === undefined ||
|
112 |
+
typeof userValue === "boolean" ||
|
113 |
+
typeof userValue === "object" ||
|
114 |
+
userValue === "disable"
|
115 |
+
) {
|
116 |
+
result[key] =
|
117 |
+
userValue === undefined ? defaultValue : userValue;
|
118 |
+
} else if (isValidHotkey(userValue)) {
|
119 |
+
const normalizedUserValue = normalizeHotkey(userValue);
|
120 |
+
|
121 |
+
// Check for conflicting hotkeys
|
122 |
+
if (!usedKeys.has(normalizedUserValue)) {
|
123 |
+
usedKeys.add(normalizedUserValue);
|
124 |
+
result[key] = normalizedUserValue;
|
125 |
+
} else {
|
126 |
+
console.error(
|
127 |
+
`Hotkey: ${formatHotkeyForDisplay(
|
128 |
+
userValue
|
129 |
+
)} for ${key} is repeated and conflicts with another hotkey. The default hotkey is used: ${formatHotkeyForDisplay(
|
130 |
+
defaultValue
|
131 |
+
)}`
|
132 |
+
);
|
133 |
+
result[key] = defaultValue;
|
134 |
+
}
|
135 |
+
} else {
|
136 |
+
console.error(
|
137 |
+
`Hotkey: ${formatHotkeyForDisplay(
|
138 |
+
userValue
|
139 |
+
)} for ${key} is not valid. The default hotkey is used: ${formatHotkeyForDisplay(
|
140 |
+
defaultValue
|
141 |
+
)}`
|
142 |
+
);
|
143 |
+
result[key] = defaultValue;
|
144 |
+
}
|
145 |
+
}
|
146 |
+
|
147 |
+
return result;
|
148 |
+
}
|
149 |
+
|
150 |
+
// Disables functions in the config object based on the provided list of function names
|
151 |
+
function disableFunctions(config, disabledFunctions) {
|
152 |
+
// Bind the hasOwnProperty method to the functionMap object to avoid errors
|
153 |
+
const hasOwnProperty =
|
154 |
+
Object.prototype.hasOwnProperty.bind(functionMap);
|
155 |
+
|
156 |
+
// Loop through the disabledFunctions array and disable the corresponding functions in the config object
|
157 |
+
disabledFunctions.forEach(funcName => {
|
158 |
+
if (hasOwnProperty(funcName)) {
|
159 |
+
const key = functionMap[funcName];
|
160 |
+
config[key] = "disable";
|
161 |
+
}
|
162 |
+
});
|
163 |
+
|
164 |
+
// Return the updated config object
|
165 |
+
return config;
|
166 |
+
}
|
167 |
+
|
168 |
+
/**
|
169 |
+
* The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio.
|
170 |
+
* If the image display property is set to 'none', the mask breaks. To fix this, the function
|
171 |
+
* temporarily sets the display property to 'block' and then hides the mask again after 300 milliseconds
|
172 |
+
* to avoid breaking the canvas. Additionally, the function adjusts the mask to work correctly on
|
173 |
+
* very long images.
|
174 |
+
*/
|
175 |
+
function restoreImgRedMask(elements) {
|
176 |
+
const mainTabId = getTabId(elements);
|
177 |
+
|
178 |
+
if (!mainTabId) return;
|
179 |
+
|
180 |
+
const mainTab = gradioApp().querySelector(mainTabId);
|
181 |
+
const img = mainTab.querySelector("img");
|
182 |
+
const imageARPreview = gradioApp().querySelector("#imageARPreview");
|
183 |
+
|
184 |
+
if (!img || !imageARPreview) return;
|
185 |
+
|
186 |
+
imageARPreview.style.transform = "";
|
187 |
+
if (parseFloat(mainTab.style.width) > 865) {
|
188 |
+
const transformString = mainTab.style.transform;
|
189 |
+
const scaleMatch = transformString.match(
|
190 |
+
/scale\(([-+]?[0-9]*\.?[0-9]+)\)/
|
191 |
+
);
|
192 |
+
let zoom = 1; // default zoom
|
193 |
+
|
194 |
+
if (scaleMatch && scaleMatch[1]) {
|
195 |
+
zoom = Number(scaleMatch[1]);
|
196 |
+
}
|
197 |
+
|
198 |
+
imageARPreview.style.transformOrigin = "0 0";
|
199 |
+
imageARPreview.style.transform = `scale(${zoom})`;
|
200 |
+
}
|
201 |
+
|
202 |
+
if (img.style.display !== "none") return;
|
203 |
+
|
204 |
+
img.style.display = "block";
|
205 |
+
|
206 |
+
setTimeout(() => {
|
207 |
+
img.style.display = "none";
|
208 |
+
}, 400);
|
209 |
+
}
|
210 |
+
|
211 |
+
const hotkeysConfigOpts = await waitForOpts();
|
212 |
+
|
213 |
+
// Default config
|
214 |
+
const defaultHotkeysConfig = {
|
215 |
+
canvas_hotkey_zoom: "Alt",
|
216 |
+
canvas_hotkey_adjust: "Ctrl",
|
217 |
+
canvas_hotkey_reset: "KeyR",
|
218 |
+
canvas_hotkey_fullscreen: "KeyS",
|
219 |
+
canvas_hotkey_move: "KeyF",
|
220 |
+
canvas_hotkey_overlap: "KeyO",
|
221 |
+
canvas_disabled_functions: [],
|
222 |
+
canvas_show_tooltip: true,
|
223 |
+
canvas_auto_expand: true,
|
224 |
+
canvas_blur_prompt: false,
|
225 |
+
};
|
226 |
+
|
227 |
+
const functionMap = {
|
228 |
+
"Zoom": "canvas_hotkey_zoom",
|
229 |
+
"Adjust brush size": "canvas_hotkey_adjust",
|
230 |
+
"Moving canvas": "canvas_hotkey_move",
|
231 |
+
"Fullscreen": "canvas_hotkey_fullscreen",
|
232 |
+
"Reset Zoom": "canvas_hotkey_reset",
|
233 |
+
"Overlap": "canvas_hotkey_overlap"
|
234 |
+
};
|
235 |
+
|
236 |
+
// Loading the configuration from opts
|
237 |
+
const preHotkeysConfig = createHotkeyConfig(
|
238 |
+
defaultHotkeysConfig,
|
239 |
+
hotkeysConfigOpts
|
240 |
+
);
|
241 |
+
|
242 |
+
// Disable functions that are not needed by the user
|
243 |
+
const hotkeysConfig = disableFunctions(
|
244 |
+
preHotkeysConfig,
|
245 |
+
preHotkeysConfig.canvas_disabled_functions
|
246 |
+
);
|
247 |
+
|
248 |
+
let isMoving = false;
|
249 |
+
let mouseX, mouseY;
|
250 |
+
let activeElement;
|
251 |
+
|
252 |
+
const elements = Object.fromEntries(
|
253 |
+
Object.keys(elementIDs).map(id => [
|
254 |
+
id,
|
255 |
+
gradioApp().querySelector(elementIDs[id])
|
256 |
+
])
|
257 |
+
);
|
258 |
+
const elemData = {};
|
259 |
+
|
260 |
+
// Apply functionality to the range inputs. Restore redmask and correct for long images.
|
261 |
+
const rangeInputs = elements.rangeGroup ?
|
262 |
+
Array.from(elements.rangeGroup.querySelectorAll("input")) :
|
263 |
+
[
|
264 |
+
gradioApp().querySelector("#img2img_width input[type='range']"),
|
265 |
+
gradioApp().querySelector("#img2img_height input[type='range']")
|
266 |
+
];
|
267 |
+
|
268 |
+
for (const input of rangeInputs) {
|
269 |
+
input?.addEventListener("input", () => restoreImgRedMask(elements));
|
270 |
+
}
|
271 |
+
|
272 |
+
function applyZoomAndPan(elemId, isExtension = true) {
|
273 |
+
const targetElement = gradioApp().querySelector(elemId);
|
274 |
+
|
275 |
+
if (!targetElement) {
|
276 |
+
console.log("Element not found");
|
277 |
+
return;
|
278 |
+
}
|
279 |
+
|
280 |
+
targetElement.style.transformOrigin = "0 0";
|
281 |
+
|
282 |
+
elemData[elemId] = {
|
283 |
+
zoom: 1,
|
284 |
+
panX: 0,
|
285 |
+
panY: 0
|
286 |
+
};
|
287 |
+
let fullScreenMode = false;
|
288 |
+
|
289 |
+
// Create tooltip
|
290 |
+
function createTooltip() {
|
291 |
+
const toolTipElemnt =
|
292 |
+
targetElement.querySelector(".image-container");
|
293 |
+
const tooltip = document.createElement("div");
|
294 |
+
tooltip.className = "canvas-tooltip";
|
295 |
+
|
296 |
+
// Creating an item of information
|
297 |
+
const info = document.createElement("i");
|
298 |
+
info.className = "canvas-tooltip-info";
|
299 |
+
info.textContent = "";
|
300 |
+
|
301 |
+
// Create a container for the contents of the tooltip
|
302 |
+
const tooltipContent = document.createElement("div");
|
303 |
+
tooltipContent.className = "canvas-tooltip-content";
|
304 |
+
|
305 |
+
// Define an array with hotkey information and their actions
|
306 |
+
const hotkeysInfo = [
|
307 |
+
{
|
308 |
+
configKey: "canvas_hotkey_zoom",
|
309 |
+
action: "Zoom canvas",
|
310 |
+
keySuffix: " + wheel"
|
311 |
+
},
|
312 |
+
{
|
313 |
+
configKey: "canvas_hotkey_adjust",
|
314 |
+
action: "Adjust brush size",
|
315 |
+
keySuffix: " + wheel"
|
316 |
+
},
|
317 |
+
{configKey: "canvas_hotkey_reset", action: "Reset zoom"},
|
318 |
+
{
|
319 |
+
configKey: "canvas_hotkey_fullscreen",
|
320 |
+
action: "Fullscreen mode"
|
321 |
+
},
|
322 |
+
{configKey: "canvas_hotkey_move", action: "Move canvas"},
|
323 |
+
{configKey: "canvas_hotkey_overlap", action: "Overlap"}
|
324 |
+
];
|
325 |
+
|
326 |
+
// Create hotkeys array with disabled property based on the config values
|
327 |
+
const hotkeys = hotkeysInfo.map(info => {
|
328 |
+
const configValue = hotkeysConfig[info.configKey];
|
329 |
+
const key = info.keySuffix ?
|
330 |
+
`${configValue}${info.keySuffix}` :
|
331 |
+
configValue.charAt(configValue.length - 1);
|
332 |
+
return {
|
333 |
+
key,
|
334 |
+
action: info.action,
|
335 |
+
disabled: configValue === "disable"
|
336 |
+
};
|
337 |
+
});
|
338 |
+
|
339 |
+
for (const hotkey of hotkeys) {
|
340 |
+
if (hotkey.disabled) {
|
341 |
+
continue;
|
342 |
+
}
|
343 |
+
|
344 |
+
const p = document.createElement("p");
|
345 |
+
p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
|
346 |
+
tooltipContent.appendChild(p);
|
347 |
+
}
|
348 |
+
|
349 |
+
// Add information and content elements to the tooltip element
|
350 |
+
tooltip.appendChild(info);
|
351 |
+
tooltip.appendChild(tooltipContent);
|
352 |
+
|
353 |
+
// Add a hint element to the target element
|
354 |
+
toolTipElemnt.appendChild(tooltip);
|
355 |
+
}
|
356 |
+
|
357 |
+
//Show tool tip if setting enable
|
358 |
+
if (hotkeysConfig.canvas_show_tooltip) {
|
359 |
+
createTooltip();
|
360 |
+
}
|
361 |
+
|
362 |
+
// In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
|
363 |
+
function fixCanvas() {
|
364 |
+
const activeTab = getActiveTab(elements).textContent.trim();
|
365 |
+
|
366 |
+
if (activeTab !== "img2img") {
|
367 |
+
const img = targetElement.querySelector(`${elemId} img`);
|
368 |
+
|
369 |
+
if (img && img.style.display !== "none") {
|
370 |
+
img.style.display = "none";
|
371 |
+
img.style.visibility = "hidden";
|
372 |
+
}
|
373 |
+
}
|
374 |
+
}
|
375 |
+
|
376 |
+
// Reset the zoom level and pan position of the target element to their initial values
|
377 |
+
function resetZoom() {
|
378 |
+
elemData[elemId] = {
|
379 |
+
zoomLevel: 1,
|
380 |
+
panX: 0,
|
381 |
+
panY: 0
|
382 |
+
};
|
383 |
+
|
384 |
+
if (isExtension) {
|
385 |
+
targetElement.style.overflow = "hidden";
|
386 |
+
}
|
387 |
+
|
388 |
+
targetElement.isZoomed = false;
|
389 |
+
|
390 |
+
fixCanvas();
|
391 |
+
targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
|
392 |
+
|
393 |
+
const canvas = gradioApp().querySelector(
|
394 |
+
`${elemId} canvas[key="interface"]`
|
395 |
+
);
|
396 |
+
|
397 |
+
toggleOverlap("off");
|
398 |
+
fullScreenMode = false;
|
399 |
+
|
400 |
+
const closeBtn = targetElement.querySelector("button[aria-label='Remove Image']");
|
401 |
+
if (closeBtn) {
|
402 |
+
closeBtn.addEventListener("click", resetZoom);
|
403 |
+
}
|
404 |
+
|
405 |
+
if (canvas && isExtension) {
|
406 |
+
const parentElement = targetElement.closest('[id^="component-"]');
|
407 |
+
if (
|
408 |
+
canvas &&
|
409 |
+
parseFloat(canvas.style.width) > parentElement.offsetWidth &&
|
410 |
+
parseFloat(targetElement.style.width) > parentElement.offsetWidth
|
411 |
+
) {
|
412 |
+
fitToElement();
|
413 |
+
return;
|
414 |
+
}
|
415 |
+
|
416 |
+
}
|
417 |
+
|
418 |
+
if (
|
419 |
+
canvas &&
|
420 |
+
!isExtension &&
|
421 |
+
parseFloat(canvas.style.width) > 865 &&
|
422 |
+
parseFloat(targetElement.style.width) > 865
|
423 |
+
) {
|
424 |
+
fitToElement();
|
425 |
+
return;
|
426 |
+
}
|
427 |
+
|
428 |
+
targetElement.style.width = "";
|
429 |
+
}
|
430 |
+
|
431 |
+
// Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
|
432 |
+
function toggleOverlap(forced = "") {
|
433 |
+
const zIndex1 = "0";
|
434 |
+
const zIndex2 = "998";
|
435 |
+
|
436 |
+
targetElement.style.zIndex =
|
437 |
+
targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
|
438 |
+
|
439 |
+
if (forced === "off") {
|
440 |
+
targetElement.style.zIndex = zIndex1;
|
441 |
+
} else if (forced === "on") {
|
442 |
+
targetElement.style.zIndex = zIndex2;
|
443 |
+
}
|
444 |
+
}
|
445 |
+
|
446 |
+
// Adjust the brush size based on the deltaY value from a mouse wheel event
|
447 |
+
function adjustBrushSize(
|
448 |
+
elemId,
|
449 |
+
deltaY,
|
450 |
+
withoutValue = false,
|
451 |
+
percentage = 5
|
452 |
+
) {
|
453 |
+
const input =
|
454 |
+
gradioApp().querySelector(
|
455 |
+
`${elemId} input[aria-label='Brush radius']`
|
456 |
+
) ||
|
457 |
+
gradioApp().querySelector(
|
458 |
+
`${elemId} button[aria-label="Use brush"]`
|
459 |
+
);
|
460 |
+
|
461 |
+
if (input) {
|
462 |
+
input.click();
|
463 |
+
if (!withoutValue) {
|
464 |
+
const maxValue =
|
465 |
+
parseFloat(input.getAttribute("max")) || 100;
|
466 |
+
const changeAmount = maxValue * (percentage / 100);
|
467 |
+
const newValue =
|
468 |
+
parseFloat(input.value) +
|
469 |
+
(deltaY > 0 ? -changeAmount : changeAmount);
|
470 |
+
input.value = Math.min(Math.max(newValue, 0), maxValue);
|
471 |
+
input.dispatchEvent(new Event("change"));
|
472 |
+
}
|
473 |
+
}
|
474 |
+
}
|
475 |
+
|
476 |
+
// Reset zoom when uploading a new image
|
477 |
+
const fileInput = gradioApp().querySelector(
|
478 |
+
`${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
|
479 |
+
);
|
480 |
+
fileInput.addEventListener("click", resetZoom);
|
481 |
+
|
482 |
+
// Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
|
483 |
+
function updateZoom(newZoomLevel, mouseX, mouseY) {
|
484 |
+
newZoomLevel = Math.max(0.1, Math.min(newZoomLevel, 15));
|
485 |
+
|
486 |
+
elemData[elemId].panX +=
|
487 |
+
mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
|
488 |
+
elemData[elemId].panY +=
|
489 |
+
mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel;
|
490 |
+
|
491 |
+
targetElement.style.transformOrigin = "0 0";
|
492 |
+
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
|
493 |
+
|
494 |
+
toggleOverlap("on");
|
495 |
+
if (isExtension) {
|
496 |
+
targetElement.style.overflow = "visible";
|
497 |
+
}
|
498 |
+
|
499 |
+
return newZoomLevel;
|
500 |
+
}
|
501 |
+
|
502 |
+
// Change the zoom level based on user interaction
|
503 |
+
function changeZoomLevel(operation, e) {
|
504 |
+
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_zoom)) {
|
505 |
+
e.preventDefault();
|
506 |
+
|
507 |
+
let zoomPosX, zoomPosY;
|
508 |
+
let delta = 0.2;
|
509 |
+
if (elemData[elemId].zoomLevel > 7) {
|
510 |
+
delta = 0.9;
|
511 |
+
} else if (elemData[elemId].zoomLevel > 2) {
|
512 |
+
delta = 0.6;
|
513 |
+
}
|
514 |
+
|
515 |
+
zoomPosX = e.clientX;
|
516 |
+
zoomPosY = e.clientY;
|
517 |
+
|
518 |
+
fullScreenMode = false;
|
519 |
+
elemData[elemId].zoomLevel = updateZoom(
|
520 |
+
elemData[elemId].zoomLevel +
|
521 |
+
(operation === "+" ? delta : -delta),
|
522 |
+
zoomPosX - targetElement.getBoundingClientRect().left,
|
523 |
+
zoomPosY - targetElement.getBoundingClientRect().top
|
524 |
+
);
|
525 |
+
|
526 |
+
targetElement.isZoomed = true;
|
527 |
+
}
|
528 |
+
}
|
529 |
+
|
530 |
+
/**
|
531 |
+
* This function fits the target element to the screen by calculating
|
532 |
+
* the required scale and offsets. It also updates the global variables
|
533 |
+
* zoomLevel, panX, and panY to reflect the new state.
|
534 |
+
*/
|
535 |
+
|
536 |
+
function fitToElement() {
|
537 |
+
//Reset Zoom
|
538 |
+
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
|
539 |
+
|
540 |
+
let parentElement;
|
541 |
+
|
542 |
+
if (isExtension) {
|
543 |
+
parentElement = targetElement.closest('[id^="component-"]');
|
544 |
+
} else {
|
545 |
+
parentElement = targetElement.parentElement;
|
546 |
+
}
|
547 |
+
|
548 |
+
|
549 |
+
// Get element and screen dimensions
|
550 |
+
const elementWidth = targetElement.offsetWidth;
|
551 |
+
const elementHeight = targetElement.offsetHeight;
|
552 |
+
|
553 |
+
const screenWidth = parentElement.clientWidth;
|
554 |
+
const screenHeight = parentElement.clientHeight;
|
555 |
+
|
556 |
+
// Get element's coordinates relative to the parent element
|
557 |
+
const elementRect = targetElement.getBoundingClientRect();
|
558 |
+
const parentRect = parentElement.getBoundingClientRect();
|
559 |
+
const elementX = elementRect.x - parentRect.x;
|
560 |
+
|
561 |
+
// Calculate scale and offsets
|
562 |
+
const scaleX = screenWidth / elementWidth;
|
563 |
+
const scaleY = screenHeight / elementHeight;
|
564 |
+
const scale = Math.min(scaleX, scaleY);
|
565 |
+
|
566 |
+
const transformOrigin =
|
567 |
+
window.getComputedStyle(targetElement).transformOrigin;
|
568 |
+
const [originX, originY] = transformOrigin.split(" ");
|
569 |
+
const originXValue = parseFloat(originX);
|
570 |
+
const originYValue = parseFloat(originY);
|
571 |
+
|
572 |
+
const offsetX =
|
573 |
+
(screenWidth - elementWidth * scale) / 2 -
|
574 |
+
originXValue * (1 - scale);
|
575 |
+
const offsetY =
|
576 |
+
(screenHeight - elementHeight * scale) / 2.5 -
|
577 |
+
originYValue * (1 - scale);
|
578 |
+
|
579 |
+
// Apply scale and offsets to the element
|
580 |
+
targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
|
581 |
+
|
582 |
+
// Update global variables
|
583 |
+
elemData[elemId].zoomLevel = scale;
|
584 |
+
elemData[elemId].panX = offsetX;
|
585 |
+
elemData[elemId].panY = offsetY;
|
586 |
+
|
587 |
+
fullScreenMode = false;
|
588 |
+
toggleOverlap("off");
|
589 |
+
}
|
590 |
+
|
591 |
+
/**
|
592 |
+
* This function fits the target element to the screen by calculating
|
593 |
+
* the required scale and offsets. It also updates the global variables
|
594 |
+
* zoomLevel, panX, and panY to reflect the new state.
|
595 |
+
*/
|
596 |
+
|
597 |
+
// Fullscreen mode
|
598 |
+
function fitToScreen() {
|
599 |
+
const canvas = gradioApp().querySelector(
|
600 |
+
`${elemId} canvas[key="interface"]`
|
601 |
+
);
|
602 |
+
|
603 |
+
if (!canvas) return;
|
604 |
+
|
605 |
+
if (canvas.offsetWidth > 862 || isExtension) {
|
606 |
+
targetElement.style.width = (canvas.offsetWidth + 2) + "px";
|
607 |
+
}
|
608 |
+
|
609 |
+
if (isExtension) {
|
610 |
+
targetElement.style.overflow = "visible";
|
611 |
+
}
|
612 |
+
|
613 |
+
if (fullScreenMode) {
|
614 |
+
resetZoom();
|
615 |
+
fullScreenMode = false;
|
616 |
+
return;
|
617 |
+
}
|
618 |
+
|
619 |
+
//Reset Zoom
|
620 |
+
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
|
621 |
+
|
622 |
+
// Get scrollbar width to right-align the image
|
623 |
+
const scrollbarWidth =
|
624 |
+
window.innerWidth - document.documentElement.clientWidth;
|
625 |
+
|
626 |
+
// Get element and screen dimensions
|
627 |
+
const elementWidth = targetElement.offsetWidth;
|
628 |
+
const elementHeight = targetElement.offsetHeight;
|
629 |
+
const screenWidth = window.innerWidth - scrollbarWidth;
|
630 |
+
const screenHeight = window.innerHeight;
|
631 |
+
|
632 |
+
// Get element's coordinates relative to the page
|
633 |
+
const elementRect = targetElement.getBoundingClientRect();
|
634 |
+
const elementY = elementRect.y;
|
635 |
+
const elementX = elementRect.x;
|
636 |
+
|
637 |
+
// Calculate scale and offsets
|
638 |
+
const scaleX = screenWidth / elementWidth;
|
639 |
+
const scaleY = screenHeight / elementHeight;
|
640 |
+
const scale = Math.min(scaleX, scaleY);
|
641 |
+
|
642 |
+
// Get the current transformOrigin
|
643 |
+
const computedStyle = window.getComputedStyle(targetElement);
|
644 |
+
const transformOrigin = computedStyle.transformOrigin;
|
645 |
+
const [originX, originY] = transformOrigin.split(" ");
|
646 |
+
const originXValue = parseFloat(originX);
|
647 |
+
const originYValue = parseFloat(originY);
|
648 |
+
|
649 |
+
// Calculate offsets with respect to the transformOrigin
|
650 |
+
const offsetX =
|
651 |
+
(screenWidth - elementWidth * scale) / 2 -
|
652 |
+
elementX -
|
653 |
+
originXValue * (1 - scale);
|
654 |
+
const offsetY =
|
655 |
+
(screenHeight - elementHeight * scale) / 2 -
|
656 |
+
elementY -
|
657 |
+
originYValue * (1 - scale);
|
658 |
+
|
659 |
+
// Apply scale and offsets to the element
|
660 |
+
targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
|
661 |
+
|
662 |
+
// Update global variables
|
663 |
+
elemData[elemId].zoomLevel = scale;
|
664 |
+
elemData[elemId].panX = offsetX;
|
665 |
+
elemData[elemId].panY = offsetY;
|
666 |
+
|
667 |
+
fullScreenMode = true;
|
668 |
+
toggleOverlap("on");
|
669 |
+
}
|
670 |
+
|
671 |
+
// Handle keydown events
|
672 |
+
function handleKeyDown(event) {
|
673 |
+
// Disable key locks to make pasting from the buffer work correctly
|
674 |
+
if ((event.ctrlKey && event.code === 'KeyV') || (event.ctrlKey && event.code === 'KeyC') || event.code === "F5") {
|
675 |
+
return;
|
676 |
+
}
|
677 |
+
|
678 |
+
// before activating shortcut, ensure user is not actively typing in an input field
|
679 |
+
if (!hotkeysConfig.canvas_blur_prompt) {
|
680 |
+
if (event.target.nodeName === 'TEXTAREA' || event.target.nodeName === 'INPUT') {
|
681 |
+
return;
|
682 |
+
}
|
683 |
+
}
|
684 |
+
|
685 |
+
|
686 |
+
const hotkeyActions = {
|
687 |
+
[hotkeysConfig.canvas_hotkey_reset]: resetZoom,
|
688 |
+
[hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
|
689 |
+
[hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen
|
690 |
+
};
|
691 |
+
|
692 |
+
const action = hotkeyActions[event.code];
|
693 |
+
if (action) {
|
694 |
+
event.preventDefault();
|
695 |
+
action(event);
|
696 |
+
}
|
697 |
+
|
698 |
+
if (
|
699 |
+
isModifierKey(event, hotkeysConfig.canvas_hotkey_zoom) ||
|
700 |
+
isModifierKey(event, hotkeysConfig.canvas_hotkey_adjust)
|
701 |
+
) {
|
702 |
+
event.preventDefault();
|
703 |
+
}
|
704 |
+
}
|
705 |
+
|
706 |
+
// Get Mouse position
|
707 |
+
function getMousePosition(e) {
|
708 |
+
mouseX = e.offsetX;
|
709 |
+
mouseY = e.offsetY;
|
710 |
+
}
|
711 |
+
|
712 |
+
// Simulation of the function to put a long image into the screen.
|
713 |
+
// We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element.
|
714 |
+
// We hide the image and show it to the user when it is ready.
|
715 |
+
|
716 |
+
targetElement.isExpanded = false;
|
717 |
+
function autoExpand() {
|
718 |
+
const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
|
719 |
+
if (canvas) {
|
720 |
+
if (hasHorizontalScrollbar(targetElement) && targetElement.isExpanded === false) {
|
721 |
+
targetElement.style.visibility = "hidden";
|
722 |
+
setTimeout(() => {
|
723 |
+
fitToScreen();
|
724 |
+
resetZoom();
|
725 |
+
targetElement.style.visibility = "visible";
|
726 |
+
targetElement.isExpanded = true;
|
727 |
+
}, 10);
|
728 |
+
}
|
729 |
+
}
|
730 |
+
}
|
731 |
+
|
732 |
+
targetElement.addEventListener("mousemove", getMousePosition);
|
733 |
+
|
734 |
+
//observers
|
735 |
+
// Creating an observer with a callback function to handle DOM changes
|
736 |
+
const observer = new MutationObserver((mutationsList, observer) => {
|
737 |
+
for (let mutation of mutationsList) {
|
738 |
+
// If the style attribute of the canvas has changed, by observation it happens only when the picture changes
|
739 |
+
if (mutation.type === 'attributes' && mutation.attributeName === 'style' &&
|
740 |
+
mutation.target.tagName.toLowerCase() === 'canvas') {
|
741 |
+
targetElement.isExpanded = false;
|
742 |
+
setTimeout(resetZoom, 10);
|
743 |
+
}
|
744 |
+
}
|
745 |
+
});
|
746 |
+
|
747 |
+
// Apply auto expand if enabled
|
748 |
+
if (hotkeysConfig.canvas_auto_expand) {
|
749 |
+
targetElement.addEventListener("mousemove", autoExpand);
|
750 |
+
// Set up an observer to track attribute changes
|
751 |
+
observer.observe(targetElement, {attributes: true, childList: true, subtree: true});
|
752 |
+
}
|
753 |
+
|
754 |
+
// Handle events only inside the targetElement
|
755 |
+
let isKeyDownHandlerAttached = false;
|
756 |
+
|
757 |
+
function handleMouseMove() {
|
758 |
+
if (!isKeyDownHandlerAttached) {
|
759 |
+
document.addEventListener("keydown", handleKeyDown);
|
760 |
+
isKeyDownHandlerAttached = true;
|
761 |
+
|
762 |
+
activeElement = elemId;
|
763 |
+
}
|
764 |
+
}
|
765 |
+
|
766 |
+
function handleMouseLeave() {
|
767 |
+
if (isKeyDownHandlerAttached) {
|
768 |
+
document.removeEventListener("keydown", handleKeyDown);
|
769 |
+
isKeyDownHandlerAttached = false;
|
770 |
+
|
771 |
+
activeElement = null;
|
772 |
+
}
|
773 |
+
}
|
774 |
+
|
775 |
+
// Add mouse event handlers
|
776 |
+
targetElement.addEventListener("mousemove", handleMouseMove);
|
777 |
+
targetElement.addEventListener("mouseleave", handleMouseLeave);
|
778 |
+
|
779 |
+
// Reset zoom when click on another tab
|
780 |
+
elements.img2imgTabs.addEventListener("click", resetZoom);
|
781 |
+
elements.img2imgTabs.addEventListener("click", () => {
|
782 |
+
// targetElement.style.width = "";
|
783 |
+
if (parseInt(targetElement.style.width) > 865) {
|
784 |
+
setTimeout(fitToElement, 0);
|
785 |
+
}
|
786 |
+
});
|
787 |
+
|
788 |
+
targetElement.addEventListener("wheel", e => {
|
789 |
+
// change zoom level
|
790 |
+
const operation = e.deltaY > 0 ? "-" : "+";
|
791 |
+
changeZoomLevel(operation, e);
|
792 |
+
|
793 |
+
// Handle brush size adjustment with ctrl key pressed
|
794 |
+
if (isModifierKey(e, hotkeysConfig.canvas_hotkey_adjust)) {
|
795 |
+
e.preventDefault();
|
796 |
+
|
797 |
+
// Increase or decrease brush size based on scroll direction
|
798 |
+
adjustBrushSize(elemId, e.deltaY);
|
799 |
+
}
|
800 |
+
});
|
801 |
+
|
802 |
+
// Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
|
803 |
+
function handleMoveKeyDown(e) {
|
804 |
+
|
805 |
+
// Disable key locks to make pasting from the buffer work correctly
|
806 |
+
if ((e.ctrlKey && e.code === 'KeyV') || (e.ctrlKey && event.code === 'KeyC') || e.code === "F5") {
|
807 |
+
return;
|
808 |
+
}
|
809 |
+
|
810 |
+
// before activating shortcut, ensure user is not actively typing in an input field
|
811 |
+
if (!hotkeysConfig.canvas_blur_prompt) {
|
812 |
+
if (e.target.nodeName === 'TEXTAREA' || e.target.nodeName === 'INPUT') {
|
813 |
+
return;
|
814 |
+
}
|
815 |
+
}
|
816 |
+
|
817 |
+
|
818 |
+
if (e.code === hotkeysConfig.canvas_hotkey_move) {
|
819 |
+
if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
|
820 |
+
e.preventDefault();
|
821 |
+
document.activeElement.blur();
|
822 |
+
isMoving = true;
|
823 |
+
}
|
824 |
+
}
|
825 |
+
}
|
826 |
+
|
827 |
+
function handleMoveKeyUp(e) {
|
828 |
+
if (e.code === hotkeysConfig.canvas_hotkey_move) {
|
829 |
+
isMoving = false;
|
830 |
+
}
|
831 |
+
}
|
832 |
+
|
833 |
+
document.addEventListener("keydown", handleMoveKeyDown);
|
834 |
+
document.addEventListener("keyup", handleMoveKeyUp);
|
835 |
+
|
836 |
+
// Detect zoom level and update the pan speed.
|
837 |
+
function updatePanPosition(movementX, movementY) {
|
838 |
+
let panSpeed = 2;
|
839 |
+
|
840 |
+
if (elemData[elemId].zoomLevel > 8) {
|
841 |
+
panSpeed = 3.5;
|
842 |
+
}
|
843 |
+
|
844 |
+
elemData[elemId].panX += movementX * panSpeed;
|
845 |
+
elemData[elemId].panY += movementY * panSpeed;
|
846 |
+
|
847 |
+
// Delayed redraw of an element
|
848 |
+
requestAnimationFrame(() => {
|
849 |
+
targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${elemData[elemId].zoomLevel})`;
|
850 |
+
toggleOverlap("on");
|
851 |
+
});
|
852 |
+
}
|
853 |
+
|
854 |
+
function handleMoveByKey(e) {
|
855 |
+
if (isMoving && elemId === activeElement) {
|
856 |
+
updatePanPosition(e.movementX, e.movementY);
|
857 |
+
targetElement.style.pointerEvents = "none";
|
858 |
+
|
859 |
+
if (isExtension) {
|
860 |
+
targetElement.style.overflow = "visible";
|
861 |
+
}
|
862 |
+
|
863 |
+
} else {
|
864 |
+
targetElement.style.pointerEvents = "auto";
|
865 |
+
}
|
866 |
+
}
|
867 |
+
|
868 |
+
// Prevents sticking to the mouse
|
869 |
+
window.onblur = function() {
|
870 |
+
isMoving = false;
|
871 |
+
};
|
872 |
+
|
873 |
+
// Checks for extension
|
874 |
+
function checkForOutBox() {
|
875 |
+
const parentElement = targetElement.closest('[id^="component-"]');
|
876 |
+
if (parentElement.offsetWidth < targetElement.offsetWidth && !targetElement.isExpanded) {
|
877 |
+
resetZoom();
|
878 |
+
targetElement.isExpanded = true;
|
879 |
+
}
|
880 |
+
|
881 |
+
if (parentElement.offsetWidth < targetElement.offsetWidth && elemData[elemId].zoomLevel == 1) {
|
882 |
+
resetZoom();
|
883 |
+
}
|
884 |
+
|
885 |
+
if (parentElement.offsetWidth < targetElement.offsetWidth && targetElement.offsetWidth * elemData[elemId].zoomLevel > parentElement.offsetWidth && elemData[elemId].zoomLevel < 1 && !targetElement.isZoomed) {
|
886 |
+
resetZoom();
|
887 |
+
}
|
888 |
+
}
|
889 |
+
|
890 |
+
if (isExtension) {
|
891 |
+
targetElement.addEventListener("mousemove", checkForOutBox);
|
892 |
+
}
|
893 |
+
|
894 |
+
|
895 |
+
window.addEventListener('resize', (e) => {
|
896 |
+
resetZoom();
|
897 |
+
|
898 |
+
if (isExtension) {
|
899 |
+
targetElement.isExpanded = false;
|
900 |
+
targetElement.isZoomed = false;
|
901 |
+
}
|
902 |
+
});
|
903 |
+
|
904 |
+
gradioApp().addEventListener("mousemove", handleMoveByKey);
|
905 |
+
|
906 |
+
|
907 |
+
}
|
908 |
+
|
909 |
+
applyZoomAndPan(elementIDs.sketch, false);
|
910 |
+
applyZoomAndPan(elementIDs.inpaint, false);
|
911 |
+
applyZoomAndPan(elementIDs.inpaintSketch, false);
|
912 |
+
|
913 |
+
// Make the function global so that other extensions can take advantage of this solution
|
914 |
+
const applyZoomAndPanIntegration = async(id, elementIDs) => {
|
915 |
+
const mainEl = document.querySelector(id);
|
916 |
+
if (id.toLocaleLowerCase() === "none") {
|
917 |
+
for (const elementID of elementIDs) {
|
918 |
+
const el = await waitForElement(elementID);
|
919 |
+
if (!el) break;
|
920 |
+
applyZoomAndPan(elementID);
|
921 |
+
}
|
922 |
+
return;
|
923 |
+
}
|
924 |
+
|
925 |
+
if (!mainEl) return;
|
926 |
+
mainEl.addEventListener("click", async() => {
|
927 |
+
for (const elementID of elementIDs) {
|
928 |
+
const el = await waitForElement(elementID);
|
929 |
+
if (!el) break;
|
930 |
+
applyZoomAndPan(elementID);
|
931 |
+
}
|
932 |
+
}, {once: true});
|
933 |
+
};
|
934 |
+
|
935 |
+
window.applyZoomAndPan = applyZoomAndPan; // Only 1 elements, argument elementID, for example applyZoomAndPan("#txt2img_controlnet_ControlNet_input_image")
|
936 |
+
|
937 |
+
window.applyZoomAndPanIntegration = applyZoomAndPanIntegration; // for any extension
|
938 |
+
|
939 |
+
/*
|
940 |
+
The function `applyZoomAndPanIntegration` takes two arguments:
|
941 |
+
|
942 |
+
1. `id`: A string identifier for the element to which zoom and pan functionality will be applied on click.
|
943 |
+
If the `id` value is "none", the functionality will be applied to all elements specified in the second argument without a click event.
|
944 |
+
|
945 |
+
2. `elementIDs`: An array of string identifiers for elements. Zoom and pan functionality will be applied to each of these elements on click of the element specified by the first argument.
|
946 |
+
If "none" is specified in the first argument, the functionality will be applied to each of these elements without a click event.
|
947 |
+
|
948 |
+
Example usage:
|
949 |
+
applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
|
950 |
+
In this example, zoom and pan functionality will be applied to the element with the identifier "txt2img_controlnet_ControlNet_input_image" upon clicking the element with the identifier "txt2img_controlnet".
|
951 |
+
*/
|
952 |
+
|
953 |
+
// More examples
|
954 |
+
// Add integration with ControlNet txt2img One TAB
|
955 |
+
// applyZoomAndPanIntegration("#txt2img_controlnet", ["#txt2img_controlnet_ControlNet_input_image"]);
|
956 |
+
|
957 |
+
// Add integration with ControlNet txt2img Tabs
|
958 |
+
// applyZoomAndPanIntegration("#txt2img_controlnet",Array.from({ length: 10 }, (_, i) => `#txt2img_controlnet_ControlNet-${i}_input_image`));
|
959 |
+
|
960 |
+
// Add integration with Inpaint Anything
|
961 |
+
// applyZoomAndPanIntegration("None", ["#ia_sam_image", "#ia_sel_mask"]);
|
962 |
+
});
|
extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from modules import shared
|
3 |
+
|
4 |
+
shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
|
5 |
+
"canvas_hotkey_zoom": shared.OptionInfo("Alt", "Zoom canvas", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
|
6 |
+
"canvas_hotkey_adjust": shared.OptionInfo("Ctrl", "Adjust brush size", gr.Radio, {"choices": ["Shift","Ctrl", "Alt"]}).info("If you choose 'Shift' you cannot scroll horizontally, 'Alt' can cause a little trouble in firefox"),
|
7 |
+
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
|
8 |
+
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
|
9 |
+
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
|
10 |
+
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
|
11 |
+
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
|
12 |
+
"canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
|
13 |
+
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
|
14 |
+
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
|
15 |
+
}))
|
extensions-builtin/canvas-zoom-and-pan/style.css
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.canvas-tooltip-info {
|
2 |
+
position: absolute;
|
3 |
+
top: 10px;
|
4 |
+
left: 10px;
|
5 |
+
cursor: help;
|
6 |
+
background-color: rgba(0, 0, 0, 0.3);
|
7 |
+
width: 20px;
|
8 |
+
height: 20px;
|
9 |
+
border-radius: 50%;
|
10 |
+
display: flex;
|
11 |
+
align-items: center;
|
12 |
+
justify-content: center;
|
13 |
+
flex-direction: column;
|
14 |
+
|
15 |
+
z-index: 100;
|
16 |
+
}
|
17 |
+
|
18 |
+
.canvas-tooltip-info::after {
|
19 |
+
content: '';
|
20 |
+
display: block;
|
21 |
+
width: 2px;
|
22 |
+
height: 7px;
|
23 |
+
background-color: white;
|
24 |
+
margin-top: 2px;
|
25 |
+
}
|
26 |
+
|
27 |
+
.canvas-tooltip-info::before {
|
28 |
+
content: '';
|
29 |
+
display: block;
|
30 |
+
width: 2px;
|
31 |
+
height: 2px;
|
32 |
+
background-color: white;
|
33 |
+
}
|
34 |
+
|
35 |
+
.canvas-tooltip-content {
|
36 |
+
display: none;
|
37 |
+
background-color: #f9f9f9;
|
38 |
+
color: #333;
|
39 |
+
border: 1px solid #ddd;
|
40 |
+
padding: 15px;
|
41 |
+
position: absolute;
|
42 |
+
top: 40px;
|
43 |
+
left: 10px;
|
44 |
+
width: 250px;
|
45 |
+
font-size: 16px;
|
46 |
+
opacity: 0;
|
47 |
+
border-radius: 8px;
|
48 |
+
box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
|
49 |
+
|
50 |
+
z-index: 100;
|
51 |
+
}
|
52 |
+
|
53 |
+
.canvas-tooltip:hover .canvas-tooltip-content {
|
54 |
+
display: block;
|
55 |
+
animation: fadeIn 0.5s;
|
56 |
+
opacity: 1;
|
57 |
+
}
|
58 |
+
|
59 |
+
@keyframes fadeIn {
|
60 |
+
from {opacity: 0;}
|
61 |
+
to {opacity: 1;}
|
62 |
+
}
|
63 |
+
|
64 |
+
.styler {
|
65 |
+
overflow:inherit !important;
|
66 |
+
}
|
extensions-builtin/extra-options-section/scripts/extra_options_section.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
|
3 |
+
import gradio as gr
|
4 |
+
from modules import scripts, shared, ui_components, ui_settings, generation_parameters_copypaste
|
5 |
+
from modules.ui_components import FormColumn
|
6 |
+
|
7 |
+
|
8 |
+
class ExtraOptionsSection(scripts.Script):
|
9 |
+
section = "extra_options"
|
10 |
+
|
11 |
+
def __init__(self):
|
12 |
+
self.comps = None
|
13 |
+
self.setting_names = None
|
14 |
+
|
15 |
+
def title(self):
|
16 |
+
return "Extra options"
|
17 |
+
|
18 |
+
def show(self, is_img2img):
|
19 |
+
return scripts.AlwaysVisible
|
20 |
+
|
21 |
+
def ui(self, is_img2img):
|
22 |
+
self.comps = []
|
23 |
+
self.setting_names = []
|
24 |
+
self.infotext_fields = []
|
25 |
+
extra_options = shared.opts.extra_options_img2img if is_img2img else shared.opts.extra_options_txt2img
|
26 |
+
|
27 |
+
mapping = {k: v for v, k in generation_parameters_copypaste.infotext_to_setting_name_mapping}
|
28 |
+
|
29 |
+
with gr.Blocks() as interface:
|
30 |
+
with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and extra_options else gr.Group():
|
31 |
+
|
32 |
+
row_count = math.ceil(len(extra_options) / shared.opts.extra_options_cols)
|
33 |
+
|
34 |
+
for row in range(row_count):
|
35 |
+
with gr.Row():
|
36 |
+
for col in range(shared.opts.extra_options_cols):
|
37 |
+
index = row * shared.opts.extra_options_cols + col
|
38 |
+
if index >= len(extra_options):
|
39 |
+
break
|
40 |
+
|
41 |
+
setting_name = extra_options[index]
|
42 |
+
|
43 |
+
with FormColumn():
|
44 |
+
comp = ui_settings.create_setting_component(setting_name)
|
45 |
+
|
46 |
+
self.comps.append(comp)
|
47 |
+
self.setting_names.append(setting_name)
|
48 |
+
|
49 |
+
setting_infotext_name = mapping.get(setting_name)
|
50 |
+
if setting_infotext_name is not None:
|
51 |
+
self.infotext_fields.append((comp, setting_infotext_name))
|
52 |
+
|
53 |
+
def get_settings_values():
|
54 |
+
res = [ui_settings.get_value_for_setting(key) for key in self.setting_names]
|
55 |
+
return res[0] if len(res) == 1 else res
|
56 |
+
|
57 |
+
interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
|
58 |
+
|
59 |
+
return self.comps
|
60 |
+
|
61 |
+
def before_process(self, p, *args):
|
62 |
+
for name, value in zip(self.setting_names, args):
|
63 |
+
if name not in p.override_settings:
|
64 |
+
p.override_settings[name] = value
|
65 |
+
|
66 |
+
|
67 |
+
shared.options_templates.update(shared.options_section(('ui', "User interface"), {
|
68 |
+
"extra_options_txt2img": shared.OptionInfo([], "Options in main UI - txt2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img interfaces").needs_reload_ui(),
|
69 |
+
"extra_options_img2img": shared.OptionInfo([], "Options in main UI - img2img", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in img2img interfaces").needs_reload_ui(),
|
70 |
+
"extra_options_cols": shared.OptionInfo(1, "Options in main UI - number of columns", gr.Number, {"precision": 0}).needs_reload_ui(),
|
71 |
+
"extra_options_accordion": shared.OptionInfo(False, "Options in main UI - place into an accordion").needs_reload_ui()
|
72 |
+
}))
|
73 |
+
|
74 |
+
|
extensions-builtin/mobile/javascript/mobile.js
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
var isSetupForMobile = false;
|
2 |
+
|
3 |
+
function isMobile() {
|
4 |
+
for (var tab of ["txt2img", "img2img"]) {
|
5 |
+
var imageTab = gradioApp().getElementById(tab + '_results');
|
6 |
+
if (imageTab && imageTab.offsetParent && imageTab.offsetLeft == 0) {
|
7 |
+
return true;
|
8 |
+
}
|
9 |
+
}
|
10 |
+
|
11 |
+
return false;
|
12 |
+
}
|
13 |
+
|
14 |
+
function reportWindowSize() {
|
15 |
+
var currentlyMobile = isMobile();
|
16 |
+
if (currentlyMobile == isSetupForMobile) return;
|
17 |
+
isSetupForMobile = currentlyMobile;
|
18 |
+
|
19 |
+
for (var tab of ["txt2img", "img2img"]) {
|
20 |
+
var button = gradioApp().getElementById(tab + '_generate_box');
|
21 |
+
var target = gradioApp().getElementById(currentlyMobile ? tab + '_results' : tab + '_actions_column');
|
22 |
+
target.insertBefore(button, target.firstElementChild);
|
23 |
+
|
24 |
+
gradioApp().getElementById(tab + '_results').classList.toggle('mobile', currentlyMobile);
|
25 |
+
}
|
26 |
+
}
|
27 |
+
|
28 |
+
window.addEventListener("resize", reportWindowSize);
|
29 |
+
|
30 |
+
onUiLoaded(function() {
|
31 |
+
reportWindowSize();
|
32 |
+
});
|
extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
// Stable Diffusion WebUI - Bracket checker
|
2 |
+
// By Hingashi no Florin/Bwin4L & @akx
|
3 |
+
// Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs.
|
4 |
+
// If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.
|
5 |
+
|
6 |
+
function checkBrackets(textArea, counterElt) {
|
7 |
+
var counts = {};
|
8 |
+
(textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => {
|
9 |
+
counts[bracket] = (counts[bracket] || 0) + 1;
|
10 |
+
});
|
11 |
+
var errors = [];
|
12 |
+
|
13 |
+
function checkPair(open, close, kind) {
|
14 |
+
if (counts[open] !== counts[close]) {
|
15 |
+
errors.push(
|
16 |
+
`${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.`
|
17 |
+
);
|
18 |
+
}
|
19 |
+
}
|
20 |
+
|
21 |
+
checkPair('(', ')', 'round brackets');
|
22 |
+
checkPair('[', ']', 'square brackets');
|
23 |
+
checkPair('{', '}', 'curly brackets');
|
24 |
+
counterElt.title = errors.join('\n');
|
25 |
+
counterElt.classList.toggle('error', errors.length !== 0);
|
26 |
+
}
|
27 |
+
|
28 |
+
function setupBracketChecking(id_prompt, id_counter) {
|
29 |
+
var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
|
30 |
+
var counter = gradioApp().getElementById(id_counter);
|
31 |
+
|
32 |
+
if (textarea && counter) {
|
33 |
+
textarea.addEventListener("input", () => checkBrackets(textarea, counter));
|
34 |
+
}
|
35 |
+
}
|
36 |
+
|
37 |
+
onUiLoaded(function() {
|
38 |
+
setupBracketChecking('txt2img_prompt', 'txt2img_token_counter');
|
39 |
+
setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter');
|
40 |
+
setupBracketChecking('img2img_prompt', 'img2img_token_counter');
|
41 |
+
setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter');
|
42 |
+
});
|