79a9c1df70e530032ecec83bd18723ea452cf5fbcce098c3153421501b705acf
Browse files- extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/ldsr_model_arch.py +250 -0
- extensions-builtin/LDSR/preload.py +6 -0
- extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc +0 -0
- extensions-builtin/LDSR/scripts/ldsr_model.py +68 -0
- extensions-builtin/LDSR/sd_hijack_autoencoder.py +293 -0
- extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +1443 -0
- extensions-builtin/LDSR/vqvae_quantize.py +147 -0
- extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/lyco_helpers.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_full.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_hada.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_ia3.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_lokr.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/network_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc +0 -0
- extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc +0 -0
- extensions-builtin/Lora/extra_networks_lora.py +59 -0
- extensions-builtin/Lora/lora.py +9 -0
- extensions-builtin/Lora/lyco_helpers.py +21 -0
- extensions-builtin/Lora/network.py +154 -0
- extensions-builtin/Lora/network_full.py +22 -0
- extensions-builtin/Lora/network_hada.py +55 -0
- extensions-builtin/Lora/network_ia3.py +30 -0
- extensions-builtin/Lora/network_lokr.py +64 -0
- extensions-builtin/Lora/network_lora.py +86 -0
- extensions-builtin/Lora/networks.py +468 -0
- extensions-builtin/Lora/preload.py +7 -0
- extensions-builtin/Lora/scripts/__pycache__/lora_script.cpython-310.pyc +0 -0
- extensions-builtin/Lora/scripts/lora_script.py +123 -0
- extensions-builtin/Lora/ui_edit_user_metadata.py +216 -0
- extensions-builtin/Lora/ui_extra_networks_lora.py +78 -0
- extensions-builtin/ScuNET/__pycache__/preload.cpython-310.pyc +0 -0
- extensions/stable-diffusion-webui-images-browser/scripts/wib/__pycache__/wib_db.cpython-310.pyc +0 -0
- extensions/stable-diffusion-webui-images-browser/scripts/wib/wib_db.py +888 -0
- extensions/stable-diffusion-webui-images-browser/style.css +23 -0
- extensions/stable-diffusion-webui-images-browser/wib.sqlite3 +0 -0
- extensions/ultimate-upscale-for-automatic1111/.gitignore +1 -0
- extensions/ultimate-upscale-for-automatic1111/LICENSE +674 -0
- extensions/ultimate-upscale-for-automatic1111/README.md +43 -0
- extensions/ultimate-upscale-for-automatic1111/scripts/__pycache__/ultimate-upscale.cpython-310.pyc +0 -0
- extensions/ultimate-upscale-for-automatic1111/scripts/ultimate-upscale.py +557 -0
extensions-builtin/LDSR/__pycache__/ldsr_model_arch.cpython-310.pyc
ADDED
Binary file (6.68 kB). View file
|
|
extensions-builtin/LDSR/__pycache__/preload.cpython-310.pyc
ADDED
Binary file (483 Bytes). View file
|
|
extensions-builtin/LDSR/__pycache__/sd_hijack_autoencoder.cpython-310.pyc
ADDED
Binary file (8.92 kB). View file
|
|
extensions-builtin/LDSR/__pycache__/sd_hijack_ddpm_v1.cpython-310.pyc
ADDED
Binary file (42.4 kB). View file
|
|
extensions-builtin/LDSR/__pycache__/vqvae_quantize.cpython-310.pyc
ADDED
Binary file (3.64 kB). View file
|
|
extensions-builtin/LDSR/ldsr_model_arch.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gc
|
3 |
+
import time
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
import torchvision
|
8 |
+
from PIL import Image
|
9 |
+
from einops import rearrange, repeat
|
10 |
+
from omegaconf import OmegaConf
|
11 |
+
import safetensors.torch
|
12 |
+
|
13 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
14 |
+
from ldm.util import instantiate_from_config, ismap
|
15 |
+
from modules import shared, sd_hijack, devices
|
16 |
+
|
17 |
+
cached_ldsr_model: torch.nn.Module = None
|
18 |
+
|
19 |
+
|
20 |
+
# Create LDSR Class
|
21 |
+
class LDSR:
|
22 |
+
def load_model_from_config(self, half_attention):
|
23 |
+
global cached_ldsr_model
|
24 |
+
|
25 |
+
if shared.opts.ldsr_cached and cached_ldsr_model is not None:
|
26 |
+
print("Loading model from cache")
|
27 |
+
model: torch.nn.Module = cached_ldsr_model
|
28 |
+
else:
|
29 |
+
print(f"Loading model from {self.modelPath}")
|
30 |
+
_, extension = os.path.splitext(self.modelPath)
|
31 |
+
if extension.lower() == ".safetensors":
|
32 |
+
pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu")
|
33 |
+
else:
|
34 |
+
pl_sd = torch.load(self.modelPath, map_location="cpu")
|
35 |
+
sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd
|
36 |
+
config = OmegaConf.load(self.yamlPath)
|
37 |
+
config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1"
|
38 |
+
model: torch.nn.Module = instantiate_from_config(config.model)
|
39 |
+
model.load_state_dict(sd, strict=False)
|
40 |
+
model = model.to(shared.device)
|
41 |
+
if half_attention:
|
42 |
+
model = model.half()
|
43 |
+
if shared.cmd_opts.opt_channelslast:
|
44 |
+
model = model.to(memory_format=torch.channels_last)
|
45 |
+
|
46 |
+
sd_hijack.model_hijack.hijack(model) # apply optimization
|
47 |
+
model.eval()
|
48 |
+
|
49 |
+
if shared.opts.ldsr_cached:
|
50 |
+
cached_ldsr_model = model
|
51 |
+
|
52 |
+
return {"model": model}
|
53 |
+
|
54 |
+
def __init__(self, model_path, yaml_path):
|
55 |
+
self.modelPath = model_path
|
56 |
+
self.yamlPath = yaml_path
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def run(model, selected_path, custom_steps, eta):
|
60 |
+
example = get_cond(selected_path)
|
61 |
+
|
62 |
+
n_runs = 1
|
63 |
+
guider = None
|
64 |
+
ckwargs = None
|
65 |
+
ddim_use_x0_pred = False
|
66 |
+
temperature = 1.
|
67 |
+
eta = eta
|
68 |
+
custom_shape = None
|
69 |
+
|
70 |
+
height, width = example["image"].shape[1:3]
|
71 |
+
split_input = height >= 128 and width >= 128
|
72 |
+
|
73 |
+
if split_input:
|
74 |
+
ks = 128
|
75 |
+
stride = 64
|
76 |
+
vqf = 4 #
|
77 |
+
model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
|
78 |
+
"vqf": vqf,
|
79 |
+
"patch_distributed_vq": True,
|
80 |
+
"tie_braker": False,
|
81 |
+
"clip_max_weight": 0.5,
|
82 |
+
"clip_min_weight": 0.01,
|
83 |
+
"clip_max_tie_weight": 0.5,
|
84 |
+
"clip_min_tie_weight": 0.01}
|
85 |
+
else:
|
86 |
+
if hasattr(model, "split_input_params"):
|
87 |
+
delattr(model, "split_input_params")
|
88 |
+
|
89 |
+
x_t = None
|
90 |
+
logs = None
|
91 |
+
for _ in range(n_runs):
|
92 |
+
if custom_shape is not None:
|
93 |
+
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
|
94 |
+
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
|
95 |
+
|
96 |
+
logs = make_convolutional_sample(example, model,
|
97 |
+
custom_steps=custom_steps,
|
98 |
+
eta=eta, quantize_x0=False,
|
99 |
+
custom_shape=custom_shape,
|
100 |
+
temperature=temperature, noise_dropout=0.,
|
101 |
+
corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
|
102 |
+
ddim_use_x0_pred=ddim_use_x0_pred
|
103 |
+
)
|
104 |
+
return logs
|
105 |
+
|
106 |
+
def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
|
107 |
+
model = self.load_model_from_config(half_attention)
|
108 |
+
|
109 |
+
# Run settings
|
110 |
+
diffusion_steps = int(steps)
|
111 |
+
eta = 1.0
|
112 |
+
|
113 |
+
|
114 |
+
gc.collect()
|
115 |
+
devices.torch_gc()
|
116 |
+
|
117 |
+
im_og = image
|
118 |
+
width_og, height_og = im_og.size
|
119 |
+
# If we can adjust the max upscale size, then the 4 below should be our variable
|
120 |
+
down_sample_rate = target_scale / 4
|
121 |
+
wd = width_og * down_sample_rate
|
122 |
+
hd = height_og * down_sample_rate
|
123 |
+
width_downsampled_pre = int(np.ceil(wd))
|
124 |
+
height_downsampled_pre = int(np.ceil(hd))
|
125 |
+
|
126 |
+
if down_sample_rate != 1:
|
127 |
+
print(
|
128 |
+
f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
|
129 |
+
im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
|
130 |
+
else:
|
131 |
+
print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
|
132 |
+
|
133 |
+
# pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
|
134 |
+
pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
|
135 |
+
im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
|
136 |
+
|
137 |
+
logs = self.run(model["model"], im_padded, diffusion_steps, eta)
|
138 |
+
|
139 |
+
sample = logs["sample"]
|
140 |
+
sample = sample.detach().cpu()
|
141 |
+
sample = torch.clamp(sample, -1., 1.)
|
142 |
+
sample = (sample + 1.) / 2. * 255
|
143 |
+
sample = sample.numpy().astype(np.uint8)
|
144 |
+
sample = np.transpose(sample, (0, 2, 3, 1))
|
145 |
+
a = Image.fromarray(sample[0])
|
146 |
+
|
147 |
+
# remove padding
|
148 |
+
a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
|
149 |
+
|
150 |
+
del model
|
151 |
+
gc.collect()
|
152 |
+
devices.torch_gc()
|
153 |
+
|
154 |
+
return a
|
155 |
+
|
156 |
+
|
157 |
+
def get_cond(selected_path):
|
158 |
+
example = {}
|
159 |
+
up_f = 4
|
160 |
+
c = selected_path.convert('RGB')
|
161 |
+
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
|
162 |
+
c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
|
163 |
+
antialias=True)
|
164 |
+
c_up = rearrange(c_up, '1 c h w -> 1 h w c')
|
165 |
+
c = rearrange(c, '1 c h w -> 1 h w c')
|
166 |
+
c = 2. * c - 1.
|
167 |
+
|
168 |
+
c = c.to(shared.device)
|
169 |
+
example["LR_image"] = c
|
170 |
+
example["image"] = c_up
|
171 |
+
|
172 |
+
return example
|
173 |
+
|
174 |
+
|
175 |
+
@torch.no_grad()
|
176 |
+
def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
|
177 |
+
mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
|
178 |
+
corrector_kwargs=None, x_t=None
|
179 |
+
):
|
180 |
+
ddim = DDIMSampler(model)
|
181 |
+
bs = shape[0]
|
182 |
+
shape = shape[1:]
|
183 |
+
print(f"Sampling with eta = {eta}; steps: {steps}")
|
184 |
+
samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
|
185 |
+
normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
|
186 |
+
mask=mask, x0=x0, temperature=temperature, verbose=False,
|
187 |
+
score_corrector=score_corrector,
|
188 |
+
corrector_kwargs=corrector_kwargs, x_t=x_t)
|
189 |
+
|
190 |
+
return samples, intermediates
|
191 |
+
|
192 |
+
|
193 |
+
@torch.no_grad()
|
194 |
+
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
|
195 |
+
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
|
196 |
+
log = {}
|
197 |
+
|
198 |
+
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
|
199 |
+
return_first_stage_outputs=True,
|
200 |
+
force_c_encode=not (hasattr(model, 'split_input_params')
|
201 |
+
and model.cond_stage_key == 'coordinates_bbox'),
|
202 |
+
return_original_cond=True)
|
203 |
+
|
204 |
+
if custom_shape is not None:
|
205 |
+
z = torch.randn(custom_shape)
|
206 |
+
print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
|
207 |
+
|
208 |
+
z0 = None
|
209 |
+
|
210 |
+
log["input"] = x
|
211 |
+
log["reconstruction"] = xrec
|
212 |
+
|
213 |
+
if ismap(xc):
|
214 |
+
log["original_conditioning"] = model.to_rgb(xc)
|
215 |
+
if hasattr(model, 'cond_stage_key'):
|
216 |
+
log[model.cond_stage_key] = model.to_rgb(xc)
|
217 |
+
|
218 |
+
else:
|
219 |
+
log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
|
220 |
+
if model.cond_stage_model:
|
221 |
+
log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
|
222 |
+
if model.cond_stage_key == 'class_label':
|
223 |
+
log[model.cond_stage_key] = xc[model.cond_stage_key]
|
224 |
+
|
225 |
+
with model.ema_scope("Plotting"):
|
226 |
+
t0 = time.time()
|
227 |
+
|
228 |
+
sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
|
229 |
+
eta=eta,
|
230 |
+
quantize_x0=quantize_x0, mask=None, x0=z0,
|
231 |
+
temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
|
232 |
+
x_t=x_T)
|
233 |
+
t1 = time.time()
|
234 |
+
|
235 |
+
if ddim_use_x0_pred:
|
236 |
+
sample = intermediates['pred_x0'][-1]
|
237 |
+
|
238 |
+
x_sample = model.decode_first_stage(sample)
|
239 |
+
|
240 |
+
try:
|
241 |
+
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
|
242 |
+
log["sample_noquant"] = x_sample_noquant
|
243 |
+
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
|
244 |
+
except Exception:
|
245 |
+
pass
|
246 |
+
|
247 |
+
log["sample"] = x_sample
|
248 |
+
log["time"] = t1 - t0
|
249 |
+
|
250 |
+
return log
|
extensions-builtin/LDSR/preload.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR'))
|
extensions-builtin/LDSR/scripts/__pycache__/ldsr_model.cpython-310.pyc
ADDED
Binary file (3.18 kB). View file
|
|
extensions-builtin/LDSR/scripts/ldsr_model.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from modules.modelloader import load_file_from_url
|
4 |
+
from modules.upscaler import Upscaler, UpscalerData
|
5 |
+
from ldsr_model_arch import LDSR
|
6 |
+
from modules import shared, script_callbacks, errors
|
7 |
+
import sd_hijack_autoencoder # noqa: F401
|
8 |
+
import sd_hijack_ddpm_v1 # noqa: F401
|
9 |
+
|
10 |
+
|
11 |
+
class UpscalerLDSR(Upscaler):
|
12 |
+
def __init__(self, user_path):
|
13 |
+
self.name = "LDSR"
|
14 |
+
self.user_path = user_path
|
15 |
+
self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
|
16 |
+
self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
|
17 |
+
super().__init__()
|
18 |
+
scaler_data = UpscalerData("LDSR", None, self)
|
19 |
+
self.scalers = [scaler_data]
|
20 |
+
|
21 |
+
def load_model(self, path: str):
|
22 |
+
# Remove incorrect project.yaml file if too big
|
23 |
+
yaml_path = os.path.join(self.model_path, "project.yaml")
|
24 |
+
old_model_path = os.path.join(self.model_path, "model.pth")
|
25 |
+
new_model_path = os.path.join(self.model_path, "model.ckpt")
|
26 |
+
|
27 |
+
local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"])
|
28 |
+
local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None)
|
29 |
+
local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None)
|
30 |
+
local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None)
|
31 |
+
|
32 |
+
if os.path.exists(yaml_path):
|
33 |
+
statinfo = os.stat(yaml_path)
|
34 |
+
if statinfo.st_size >= 10485760:
|
35 |
+
print("Removing invalid LDSR YAML file.")
|
36 |
+
os.remove(yaml_path)
|
37 |
+
|
38 |
+
if os.path.exists(old_model_path):
|
39 |
+
print("Renaming model from model.pth to model.ckpt")
|
40 |
+
os.rename(old_model_path, new_model_path)
|
41 |
+
|
42 |
+
if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
|
43 |
+
model = local_safetensors_path
|
44 |
+
else:
|
45 |
+
model = local_ckpt_path or load_file_from_url(self.model_url, model_dir=self.model_download_path, file_name="model.ckpt")
|
46 |
+
|
47 |
+
yaml = local_yaml_path or load_file_from_url(self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml")
|
48 |
+
|
49 |
+
return LDSR(model, yaml)
|
50 |
+
|
51 |
+
def do_upscale(self, img, path):
|
52 |
+
try:
|
53 |
+
ldsr = self.load_model(path)
|
54 |
+
except Exception:
|
55 |
+
errors.report(f"Failed loading LDSR model {path}", exc_info=True)
|
56 |
+
return img
|
57 |
+
ddim_steps = shared.opts.ldsr_steps
|
58 |
+
return ldsr.super_resolution(img, ddim_steps, self.scale)
|
59 |
+
|
60 |
+
|
61 |
+
def on_ui_settings():
|
62 |
+
import gradio as gr
|
63 |
+
|
64 |
+
shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling")))
|
65 |
+
shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")))
|
66 |
+
|
67 |
+
|
68 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
extensions-builtin/LDSR/sd_hijack_autoencoder.py
ADDED
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
|
2 |
+
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
|
3 |
+
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
|
4 |
+
import numpy as np
|
5 |
+
import torch
|
6 |
+
import pytorch_lightning as pl
|
7 |
+
import torch.nn.functional as F
|
8 |
+
from contextlib import contextmanager
|
9 |
+
|
10 |
+
from torch.optim.lr_scheduler import LambdaLR
|
11 |
+
|
12 |
+
from ldm.modules.ema import LitEma
|
13 |
+
from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
|
14 |
+
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
15 |
+
from ldm.util import instantiate_from_config
|
16 |
+
|
17 |
+
import ldm.models.autoencoder
|
18 |
+
from packaging import version
|
19 |
+
|
20 |
+
class VQModel(pl.LightningModule):
|
21 |
+
def __init__(self,
|
22 |
+
ddconfig,
|
23 |
+
lossconfig,
|
24 |
+
n_embed,
|
25 |
+
embed_dim,
|
26 |
+
ckpt_path=None,
|
27 |
+
ignore_keys=None,
|
28 |
+
image_key="image",
|
29 |
+
colorize_nlabels=None,
|
30 |
+
monitor=None,
|
31 |
+
batch_resize_range=None,
|
32 |
+
scheduler_config=None,
|
33 |
+
lr_g_factor=1.0,
|
34 |
+
remap=None,
|
35 |
+
sane_index_shape=False, # tell vector quantizer to return indices as bhw
|
36 |
+
use_ema=False
|
37 |
+
):
|
38 |
+
super().__init__()
|
39 |
+
self.embed_dim = embed_dim
|
40 |
+
self.n_embed = n_embed
|
41 |
+
self.image_key = image_key
|
42 |
+
self.encoder = Encoder(**ddconfig)
|
43 |
+
self.decoder = Decoder(**ddconfig)
|
44 |
+
self.loss = instantiate_from_config(lossconfig)
|
45 |
+
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
|
46 |
+
remap=remap,
|
47 |
+
sane_index_shape=sane_index_shape)
|
48 |
+
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
|
49 |
+
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
|
50 |
+
if colorize_nlabels is not None:
|
51 |
+
assert type(colorize_nlabels)==int
|
52 |
+
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
|
53 |
+
if monitor is not None:
|
54 |
+
self.monitor = monitor
|
55 |
+
self.batch_resize_range = batch_resize_range
|
56 |
+
if self.batch_resize_range is not None:
|
57 |
+
print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
|
58 |
+
|
59 |
+
self.use_ema = use_ema
|
60 |
+
if self.use_ema:
|
61 |
+
self.model_ema = LitEma(self)
|
62 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
63 |
+
|
64 |
+
if ckpt_path is not None:
|
65 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
|
66 |
+
self.scheduler_config = scheduler_config
|
67 |
+
self.lr_g_factor = lr_g_factor
|
68 |
+
|
69 |
+
@contextmanager
|
70 |
+
def ema_scope(self, context=None):
|
71 |
+
if self.use_ema:
|
72 |
+
self.model_ema.store(self.parameters())
|
73 |
+
self.model_ema.copy_to(self)
|
74 |
+
if context is not None:
|
75 |
+
print(f"{context}: Switched to EMA weights")
|
76 |
+
try:
|
77 |
+
yield None
|
78 |
+
finally:
|
79 |
+
if self.use_ema:
|
80 |
+
self.model_ema.restore(self.parameters())
|
81 |
+
if context is not None:
|
82 |
+
print(f"{context}: Restored training weights")
|
83 |
+
|
84 |
+
def init_from_ckpt(self, path, ignore_keys=None):
|
85 |
+
sd = torch.load(path, map_location="cpu")["state_dict"]
|
86 |
+
keys = list(sd.keys())
|
87 |
+
for k in keys:
|
88 |
+
for ik in ignore_keys or []:
|
89 |
+
if k.startswith(ik):
|
90 |
+
print("Deleting key {} from state_dict.".format(k))
|
91 |
+
del sd[k]
|
92 |
+
missing, unexpected = self.load_state_dict(sd, strict=False)
|
93 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
94 |
+
if missing:
|
95 |
+
print(f"Missing Keys: {missing}")
|
96 |
+
if unexpected:
|
97 |
+
print(f"Unexpected Keys: {unexpected}")
|
98 |
+
|
99 |
+
def on_train_batch_end(self, *args, **kwargs):
|
100 |
+
if self.use_ema:
|
101 |
+
self.model_ema(self)
|
102 |
+
|
103 |
+
def encode(self, x):
|
104 |
+
h = self.encoder(x)
|
105 |
+
h = self.quant_conv(h)
|
106 |
+
quant, emb_loss, info = self.quantize(h)
|
107 |
+
return quant, emb_loss, info
|
108 |
+
|
109 |
+
def encode_to_prequant(self, x):
|
110 |
+
h = self.encoder(x)
|
111 |
+
h = self.quant_conv(h)
|
112 |
+
return h
|
113 |
+
|
114 |
+
def decode(self, quant):
|
115 |
+
quant = self.post_quant_conv(quant)
|
116 |
+
dec = self.decoder(quant)
|
117 |
+
return dec
|
118 |
+
|
119 |
+
def decode_code(self, code_b):
|
120 |
+
quant_b = self.quantize.embed_code(code_b)
|
121 |
+
dec = self.decode(quant_b)
|
122 |
+
return dec
|
123 |
+
|
124 |
+
def forward(self, input, return_pred_indices=False):
|
125 |
+
quant, diff, (_,_,ind) = self.encode(input)
|
126 |
+
dec = self.decode(quant)
|
127 |
+
if return_pred_indices:
|
128 |
+
return dec, diff, ind
|
129 |
+
return dec, diff
|
130 |
+
|
131 |
+
def get_input(self, batch, k):
|
132 |
+
x = batch[k]
|
133 |
+
if len(x.shape) == 3:
|
134 |
+
x = x[..., None]
|
135 |
+
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
|
136 |
+
if self.batch_resize_range is not None:
|
137 |
+
lower_size = self.batch_resize_range[0]
|
138 |
+
upper_size = self.batch_resize_range[1]
|
139 |
+
if self.global_step <= 4:
|
140 |
+
# do the first few batches with max size to avoid later oom
|
141 |
+
new_resize = upper_size
|
142 |
+
else:
|
143 |
+
new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
|
144 |
+
if new_resize != x.shape[2]:
|
145 |
+
x = F.interpolate(x, size=new_resize, mode="bicubic")
|
146 |
+
x = x.detach()
|
147 |
+
return x
|
148 |
+
|
149 |
+
def training_step(self, batch, batch_idx, optimizer_idx):
|
150 |
+
# https://github.com/pytorch/pytorch/issues/37142
|
151 |
+
# try not to fool the heuristics
|
152 |
+
x = self.get_input(batch, self.image_key)
|
153 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
154 |
+
|
155 |
+
if optimizer_idx == 0:
|
156 |
+
# autoencode
|
157 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
158 |
+
last_layer=self.get_last_layer(), split="train",
|
159 |
+
predicted_indices=ind)
|
160 |
+
|
161 |
+
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
162 |
+
return aeloss
|
163 |
+
|
164 |
+
if optimizer_idx == 1:
|
165 |
+
# discriminator
|
166 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
|
167 |
+
last_layer=self.get_last_layer(), split="train")
|
168 |
+
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
|
169 |
+
return discloss
|
170 |
+
|
171 |
+
def validation_step(self, batch, batch_idx):
|
172 |
+
log_dict = self._validation_step(batch, batch_idx)
|
173 |
+
with self.ema_scope():
|
174 |
+
self._validation_step(batch, batch_idx, suffix="_ema")
|
175 |
+
return log_dict
|
176 |
+
|
177 |
+
def _validation_step(self, batch, batch_idx, suffix=""):
|
178 |
+
x = self.get_input(batch, self.image_key)
|
179 |
+
xrec, qloss, ind = self(x, return_pred_indices=True)
|
180 |
+
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
|
181 |
+
self.global_step,
|
182 |
+
last_layer=self.get_last_layer(),
|
183 |
+
split="val"+suffix,
|
184 |
+
predicted_indices=ind
|
185 |
+
)
|
186 |
+
|
187 |
+
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
|
188 |
+
self.global_step,
|
189 |
+
last_layer=self.get_last_layer(),
|
190 |
+
split="val"+suffix,
|
191 |
+
predicted_indices=ind
|
192 |
+
)
|
193 |
+
rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
|
194 |
+
self.log(f"val{suffix}/rec_loss", rec_loss,
|
195 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
196 |
+
self.log(f"val{suffix}/aeloss", aeloss,
|
197 |
+
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
|
198 |
+
if version.parse(pl.__version__) >= version.parse('1.4.0'):
|
199 |
+
del log_dict_ae[f"val{suffix}/rec_loss"]
|
200 |
+
self.log_dict(log_dict_ae)
|
201 |
+
self.log_dict(log_dict_disc)
|
202 |
+
return self.log_dict
|
203 |
+
|
204 |
+
def configure_optimizers(self):
|
205 |
+
lr_d = self.learning_rate
|
206 |
+
lr_g = self.lr_g_factor*self.learning_rate
|
207 |
+
print("lr_d", lr_d)
|
208 |
+
print("lr_g", lr_g)
|
209 |
+
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
|
210 |
+
list(self.decoder.parameters())+
|
211 |
+
list(self.quantize.parameters())+
|
212 |
+
list(self.quant_conv.parameters())+
|
213 |
+
list(self.post_quant_conv.parameters()),
|
214 |
+
lr=lr_g, betas=(0.5, 0.9))
|
215 |
+
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
|
216 |
+
lr=lr_d, betas=(0.5, 0.9))
|
217 |
+
|
218 |
+
if self.scheduler_config is not None:
|
219 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
220 |
+
|
221 |
+
print("Setting up LambdaLR scheduler...")
|
222 |
+
scheduler = [
|
223 |
+
{
|
224 |
+
'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
|
225 |
+
'interval': 'step',
|
226 |
+
'frequency': 1
|
227 |
+
},
|
228 |
+
{
|
229 |
+
'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
|
230 |
+
'interval': 'step',
|
231 |
+
'frequency': 1
|
232 |
+
},
|
233 |
+
]
|
234 |
+
return [opt_ae, opt_disc], scheduler
|
235 |
+
return [opt_ae, opt_disc], []
|
236 |
+
|
237 |
+
def get_last_layer(self):
|
238 |
+
return self.decoder.conv_out.weight
|
239 |
+
|
240 |
+
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
|
241 |
+
log = {}
|
242 |
+
x = self.get_input(batch, self.image_key)
|
243 |
+
x = x.to(self.device)
|
244 |
+
if only_inputs:
|
245 |
+
log["inputs"] = x
|
246 |
+
return log
|
247 |
+
xrec, _ = self(x)
|
248 |
+
if x.shape[1] > 3:
|
249 |
+
# colorize with random projection
|
250 |
+
assert xrec.shape[1] > 3
|
251 |
+
x = self.to_rgb(x)
|
252 |
+
xrec = self.to_rgb(xrec)
|
253 |
+
log["inputs"] = x
|
254 |
+
log["reconstructions"] = xrec
|
255 |
+
if plot_ema:
|
256 |
+
with self.ema_scope():
|
257 |
+
xrec_ema, _ = self(x)
|
258 |
+
if x.shape[1] > 3:
|
259 |
+
xrec_ema = self.to_rgb(xrec_ema)
|
260 |
+
log["reconstructions_ema"] = xrec_ema
|
261 |
+
return log
|
262 |
+
|
263 |
+
def to_rgb(self, x):
|
264 |
+
assert self.image_key == "segmentation"
|
265 |
+
if not hasattr(self, "colorize"):
|
266 |
+
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
|
267 |
+
x = F.conv2d(x, weight=self.colorize)
|
268 |
+
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
|
269 |
+
return x
|
270 |
+
|
271 |
+
|
272 |
+
class VQModelInterface(VQModel):
|
273 |
+
def __init__(self, embed_dim, *args, **kwargs):
|
274 |
+
super().__init__(*args, embed_dim=embed_dim, **kwargs)
|
275 |
+
self.embed_dim = embed_dim
|
276 |
+
|
277 |
+
def encode(self, x):
|
278 |
+
h = self.encoder(x)
|
279 |
+
h = self.quant_conv(h)
|
280 |
+
return h
|
281 |
+
|
282 |
+
def decode(self, h, force_not_quantize=False):
|
283 |
+
# also go through quantization layer
|
284 |
+
if not force_not_quantize:
|
285 |
+
quant, emb_loss, info = self.quantize(h)
|
286 |
+
else:
|
287 |
+
quant = h
|
288 |
+
quant = self.post_quant_conv(quant)
|
289 |
+
dec = self.decoder(quant)
|
290 |
+
return dec
|
291 |
+
|
292 |
+
ldm.models.autoencoder.VQModel = VQModel
|
293 |
+
ldm.models.autoencoder.VQModelInterface = VQModelInterface
|
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
ADDED
@@ -0,0 +1,1443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo)
|
2 |
+
# Original filename: ldm/models/diffusion/ddpm.py
|
3 |
+
# The purpose to reinstate the old DDPM logic which works with VQ, whereas the V2 one doesn't
|
4 |
+
# Some models such as LDSR require VQ to work correctly
|
5 |
+
# The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module
|
6 |
+
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import numpy as np
|
10 |
+
import pytorch_lightning as pl
|
11 |
+
from torch.optim.lr_scheduler import LambdaLR
|
12 |
+
from einops import rearrange, repeat
|
13 |
+
from contextlib import contextmanager
|
14 |
+
from functools import partial
|
15 |
+
from tqdm import tqdm
|
16 |
+
from torchvision.utils import make_grid
|
17 |
+
from pytorch_lightning.utilities.distributed import rank_zero_only
|
18 |
+
|
19 |
+
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
|
20 |
+
from ldm.modules.ema import LitEma
|
21 |
+
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
|
22 |
+
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
|
23 |
+
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
|
24 |
+
from ldm.models.diffusion.ddim import DDIMSampler
|
25 |
+
|
26 |
+
import ldm.models.diffusion.ddpm
|
27 |
+
|
28 |
+
__conditioning_keys__ = {'concat': 'c_concat',
|
29 |
+
'crossattn': 'c_crossattn',
|
30 |
+
'adm': 'y'}
|
31 |
+
|
32 |
+
|
33 |
+
def disabled_train(self, mode=True):
|
34 |
+
"""Overwrite model.train with this function to make sure train/eval mode
|
35 |
+
does not change anymore."""
|
36 |
+
return self
|
37 |
+
|
38 |
+
|
39 |
+
def uniform_on_device(r1, r2, shape, device):
|
40 |
+
return (r1 - r2) * torch.rand(*shape, device=device) + r2
|
41 |
+
|
42 |
+
|
43 |
+
class DDPMV1(pl.LightningModule):
|
44 |
+
# classic DDPM with Gaussian diffusion, in image space
|
45 |
+
def __init__(self,
|
46 |
+
unet_config,
|
47 |
+
timesteps=1000,
|
48 |
+
beta_schedule="linear",
|
49 |
+
loss_type="l2",
|
50 |
+
ckpt_path=None,
|
51 |
+
ignore_keys=None,
|
52 |
+
load_only_unet=False,
|
53 |
+
monitor="val/loss",
|
54 |
+
use_ema=True,
|
55 |
+
first_stage_key="image",
|
56 |
+
image_size=256,
|
57 |
+
channels=3,
|
58 |
+
log_every_t=100,
|
59 |
+
clip_denoised=True,
|
60 |
+
linear_start=1e-4,
|
61 |
+
linear_end=2e-2,
|
62 |
+
cosine_s=8e-3,
|
63 |
+
given_betas=None,
|
64 |
+
original_elbo_weight=0.,
|
65 |
+
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
|
66 |
+
l_simple_weight=1.,
|
67 |
+
conditioning_key=None,
|
68 |
+
parameterization="eps", # all assuming fixed variance schedules
|
69 |
+
scheduler_config=None,
|
70 |
+
use_positional_encodings=False,
|
71 |
+
learn_logvar=False,
|
72 |
+
logvar_init=0.,
|
73 |
+
):
|
74 |
+
super().__init__()
|
75 |
+
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
|
76 |
+
self.parameterization = parameterization
|
77 |
+
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
|
78 |
+
self.cond_stage_model = None
|
79 |
+
self.clip_denoised = clip_denoised
|
80 |
+
self.log_every_t = log_every_t
|
81 |
+
self.first_stage_key = first_stage_key
|
82 |
+
self.image_size = image_size # try conv?
|
83 |
+
self.channels = channels
|
84 |
+
self.use_positional_encodings = use_positional_encodings
|
85 |
+
self.model = DiffusionWrapperV1(unet_config, conditioning_key)
|
86 |
+
count_params(self.model, verbose=True)
|
87 |
+
self.use_ema = use_ema
|
88 |
+
if self.use_ema:
|
89 |
+
self.model_ema = LitEma(self.model)
|
90 |
+
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
91 |
+
|
92 |
+
self.use_scheduler = scheduler_config is not None
|
93 |
+
if self.use_scheduler:
|
94 |
+
self.scheduler_config = scheduler_config
|
95 |
+
|
96 |
+
self.v_posterior = v_posterior
|
97 |
+
self.original_elbo_weight = original_elbo_weight
|
98 |
+
self.l_simple_weight = l_simple_weight
|
99 |
+
|
100 |
+
if monitor is not None:
|
101 |
+
self.monitor = monitor
|
102 |
+
if ckpt_path is not None:
|
103 |
+
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
|
104 |
+
|
105 |
+
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
106 |
+
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
107 |
+
|
108 |
+
self.loss_type = loss_type
|
109 |
+
|
110 |
+
self.learn_logvar = learn_logvar
|
111 |
+
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
|
112 |
+
if self.learn_logvar:
|
113 |
+
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
|
114 |
+
|
115 |
+
|
116 |
+
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
|
117 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
118 |
+
if exists(given_betas):
|
119 |
+
betas = given_betas
|
120 |
+
else:
|
121 |
+
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
|
122 |
+
cosine_s=cosine_s)
|
123 |
+
alphas = 1. - betas
|
124 |
+
alphas_cumprod = np.cumprod(alphas, axis=0)
|
125 |
+
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
|
126 |
+
|
127 |
+
timesteps, = betas.shape
|
128 |
+
self.num_timesteps = int(timesteps)
|
129 |
+
self.linear_start = linear_start
|
130 |
+
self.linear_end = linear_end
|
131 |
+
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
|
132 |
+
|
133 |
+
to_torch = partial(torch.tensor, dtype=torch.float32)
|
134 |
+
|
135 |
+
self.register_buffer('betas', to_torch(betas))
|
136 |
+
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
137 |
+
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
|
138 |
+
|
139 |
+
# calculations for diffusion q(x_t | x_{t-1}) and others
|
140 |
+
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
|
141 |
+
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
|
142 |
+
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
|
143 |
+
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
|
144 |
+
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
|
145 |
+
|
146 |
+
# calculations for posterior q(x_{t-1} | x_t, x_0)
|
147 |
+
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
|
148 |
+
1. - alphas_cumprod) + self.v_posterior * betas
|
149 |
+
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
|
150 |
+
self.register_buffer('posterior_variance', to_torch(posterior_variance))
|
151 |
+
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
|
152 |
+
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
|
153 |
+
self.register_buffer('posterior_mean_coef1', to_torch(
|
154 |
+
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
|
155 |
+
self.register_buffer('posterior_mean_coef2', to_torch(
|
156 |
+
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
|
157 |
+
|
158 |
+
if self.parameterization == "eps":
|
159 |
+
lvlb_weights = self.betas ** 2 / (
|
160 |
+
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
|
161 |
+
elif self.parameterization == "x0":
|
162 |
+
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
|
163 |
+
else:
|
164 |
+
raise NotImplementedError("mu not supported")
|
165 |
+
# TODO how to choose this term
|
166 |
+
lvlb_weights[0] = lvlb_weights[1]
|
167 |
+
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
|
168 |
+
assert not torch.isnan(self.lvlb_weights).all()
|
169 |
+
|
170 |
+
@contextmanager
|
171 |
+
def ema_scope(self, context=None):
|
172 |
+
if self.use_ema:
|
173 |
+
self.model_ema.store(self.model.parameters())
|
174 |
+
self.model_ema.copy_to(self.model)
|
175 |
+
if context is not None:
|
176 |
+
print(f"{context}: Switched to EMA weights")
|
177 |
+
try:
|
178 |
+
yield None
|
179 |
+
finally:
|
180 |
+
if self.use_ema:
|
181 |
+
self.model_ema.restore(self.model.parameters())
|
182 |
+
if context is not None:
|
183 |
+
print(f"{context}: Restored training weights")
|
184 |
+
|
185 |
+
def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
|
186 |
+
sd = torch.load(path, map_location="cpu")
|
187 |
+
if "state_dict" in list(sd.keys()):
|
188 |
+
sd = sd["state_dict"]
|
189 |
+
keys = list(sd.keys())
|
190 |
+
for k in keys:
|
191 |
+
for ik in ignore_keys or []:
|
192 |
+
if k.startswith(ik):
|
193 |
+
print("Deleting key {} from state_dict.".format(k))
|
194 |
+
del sd[k]
|
195 |
+
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
|
196 |
+
sd, strict=False)
|
197 |
+
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
|
198 |
+
if missing:
|
199 |
+
print(f"Missing Keys: {missing}")
|
200 |
+
if unexpected:
|
201 |
+
print(f"Unexpected Keys: {unexpected}")
|
202 |
+
|
203 |
+
def q_mean_variance(self, x_start, t):
|
204 |
+
"""
|
205 |
+
Get the distribution q(x_t | x_0).
|
206 |
+
:param x_start: the [N x C x ...] tensor of noiseless inputs.
|
207 |
+
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
|
208 |
+
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
|
209 |
+
"""
|
210 |
+
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
|
211 |
+
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
|
212 |
+
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
|
213 |
+
return mean, variance, log_variance
|
214 |
+
|
215 |
+
def predict_start_from_noise(self, x_t, t, noise):
|
216 |
+
return (
|
217 |
+
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
|
218 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
|
219 |
+
)
|
220 |
+
|
221 |
+
def q_posterior(self, x_start, x_t, t):
|
222 |
+
posterior_mean = (
|
223 |
+
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
|
224 |
+
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
|
225 |
+
)
|
226 |
+
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
|
227 |
+
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
|
228 |
+
return posterior_mean, posterior_variance, posterior_log_variance_clipped
|
229 |
+
|
230 |
+
def p_mean_variance(self, x, t, clip_denoised: bool):
|
231 |
+
model_out = self.model(x, t)
|
232 |
+
if self.parameterization == "eps":
|
233 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
234 |
+
elif self.parameterization == "x0":
|
235 |
+
x_recon = model_out
|
236 |
+
if clip_denoised:
|
237 |
+
x_recon.clamp_(-1., 1.)
|
238 |
+
|
239 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
240 |
+
return model_mean, posterior_variance, posterior_log_variance
|
241 |
+
|
242 |
+
@torch.no_grad()
|
243 |
+
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
|
244 |
+
b, *_, device = *x.shape, x.device
|
245 |
+
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
|
246 |
+
noise = noise_like(x.shape, device, repeat_noise)
|
247 |
+
# no noise when t == 0
|
248 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
249 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
250 |
+
|
251 |
+
@torch.no_grad()
|
252 |
+
def p_sample_loop(self, shape, return_intermediates=False):
|
253 |
+
device = self.betas.device
|
254 |
+
b = shape[0]
|
255 |
+
img = torch.randn(shape, device=device)
|
256 |
+
intermediates = [img]
|
257 |
+
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
|
258 |
+
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
|
259 |
+
clip_denoised=self.clip_denoised)
|
260 |
+
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
|
261 |
+
intermediates.append(img)
|
262 |
+
if return_intermediates:
|
263 |
+
return img, intermediates
|
264 |
+
return img
|
265 |
+
|
266 |
+
@torch.no_grad()
|
267 |
+
def sample(self, batch_size=16, return_intermediates=False):
|
268 |
+
image_size = self.image_size
|
269 |
+
channels = self.channels
|
270 |
+
return self.p_sample_loop((batch_size, channels, image_size, image_size),
|
271 |
+
return_intermediates=return_intermediates)
|
272 |
+
|
273 |
+
def q_sample(self, x_start, t, noise=None):
|
274 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
275 |
+
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
|
276 |
+
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
|
277 |
+
|
278 |
+
def get_loss(self, pred, target, mean=True):
|
279 |
+
if self.loss_type == 'l1':
|
280 |
+
loss = (target - pred).abs()
|
281 |
+
if mean:
|
282 |
+
loss = loss.mean()
|
283 |
+
elif self.loss_type == 'l2':
|
284 |
+
if mean:
|
285 |
+
loss = torch.nn.functional.mse_loss(target, pred)
|
286 |
+
else:
|
287 |
+
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
|
288 |
+
else:
|
289 |
+
raise NotImplementedError("unknown loss type '{loss_type}'")
|
290 |
+
|
291 |
+
return loss
|
292 |
+
|
293 |
+
def p_losses(self, x_start, t, noise=None):
|
294 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
295 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
296 |
+
model_out = self.model(x_noisy, t)
|
297 |
+
|
298 |
+
loss_dict = {}
|
299 |
+
if self.parameterization == "eps":
|
300 |
+
target = noise
|
301 |
+
elif self.parameterization == "x0":
|
302 |
+
target = x_start
|
303 |
+
else:
|
304 |
+
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
|
305 |
+
|
306 |
+
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
|
307 |
+
|
308 |
+
log_prefix = 'train' if self.training else 'val'
|
309 |
+
|
310 |
+
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
|
311 |
+
loss_simple = loss.mean() * self.l_simple_weight
|
312 |
+
|
313 |
+
loss_vlb = (self.lvlb_weights[t] * loss).mean()
|
314 |
+
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
|
315 |
+
|
316 |
+
loss = loss_simple + self.original_elbo_weight * loss_vlb
|
317 |
+
|
318 |
+
loss_dict.update({f'{log_prefix}/loss': loss})
|
319 |
+
|
320 |
+
return loss, loss_dict
|
321 |
+
|
322 |
+
def forward(self, x, *args, **kwargs):
|
323 |
+
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
|
324 |
+
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
|
325 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
326 |
+
return self.p_losses(x, t, *args, **kwargs)
|
327 |
+
|
328 |
+
def get_input(self, batch, k):
|
329 |
+
x = batch[k]
|
330 |
+
if len(x.shape) == 3:
|
331 |
+
x = x[..., None]
|
332 |
+
x = rearrange(x, 'b h w c -> b c h w')
|
333 |
+
x = x.to(memory_format=torch.contiguous_format).float()
|
334 |
+
return x
|
335 |
+
|
336 |
+
def shared_step(self, batch):
|
337 |
+
x = self.get_input(batch, self.first_stage_key)
|
338 |
+
loss, loss_dict = self(x)
|
339 |
+
return loss, loss_dict
|
340 |
+
|
341 |
+
def training_step(self, batch, batch_idx):
|
342 |
+
loss, loss_dict = self.shared_step(batch)
|
343 |
+
|
344 |
+
self.log_dict(loss_dict, prog_bar=True,
|
345 |
+
logger=True, on_step=True, on_epoch=True)
|
346 |
+
|
347 |
+
self.log("global_step", self.global_step,
|
348 |
+
prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
349 |
+
|
350 |
+
if self.use_scheduler:
|
351 |
+
lr = self.optimizers().param_groups[0]['lr']
|
352 |
+
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
|
353 |
+
|
354 |
+
return loss
|
355 |
+
|
356 |
+
@torch.no_grad()
|
357 |
+
def validation_step(self, batch, batch_idx):
|
358 |
+
_, loss_dict_no_ema = self.shared_step(batch)
|
359 |
+
with self.ema_scope():
|
360 |
+
_, loss_dict_ema = self.shared_step(batch)
|
361 |
+
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
|
362 |
+
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
363 |
+
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
|
364 |
+
|
365 |
+
def on_train_batch_end(self, *args, **kwargs):
|
366 |
+
if self.use_ema:
|
367 |
+
self.model_ema(self.model)
|
368 |
+
|
369 |
+
def _get_rows_from_list(self, samples):
|
370 |
+
n_imgs_per_row = len(samples)
|
371 |
+
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
|
372 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
373 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
374 |
+
return denoise_grid
|
375 |
+
|
376 |
+
@torch.no_grad()
|
377 |
+
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
378 |
+
log = {}
|
379 |
+
x = self.get_input(batch, self.first_stage_key)
|
380 |
+
N = min(x.shape[0], N)
|
381 |
+
n_row = min(x.shape[0], n_row)
|
382 |
+
x = x.to(self.device)[:N]
|
383 |
+
log["inputs"] = x
|
384 |
+
|
385 |
+
# get diffusion row
|
386 |
+
diffusion_row = []
|
387 |
+
x_start = x[:n_row]
|
388 |
+
|
389 |
+
for t in range(self.num_timesteps):
|
390 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
391 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
392 |
+
t = t.to(self.device).long()
|
393 |
+
noise = torch.randn_like(x_start)
|
394 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
395 |
+
diffusion_row.append(x_noisy)
|
396 |
+
|
397 |
+
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
|
398 |
+
|
399 |
+
if sample:
|
400 |
+
# get denoise row
|
401 |
+
with self.ema_scope("Plotting"):
|
402 |
+
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
|
403 |
+
|
404 |
+
log["samples"] = samples
|
405 |
+
log["denoise_row"] = self._get_rows_from_list(denoise_row)
|
406 |
+
|
407 |
+
if return_keys:
|
408 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
409 |
+
return log
|
410 |
+
else:
|
411 |
+
return {key: log[key] for key in return_keys}
|
412 |
+
return log
|
413 |
+
|
414 |
+
def configure_optimizers(self):
|
415 |
+
lr = self.learning_rate
|
416 |
+
params = list(self.model.parameters())
|
417 |
+
if self.learn_logvar:
|
418 |
+
params = params + [self.logvar]
|
419 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
420 |
+
return opt
|
421 |
+
|
422 |
+
|
423 |
+
class LatentDiffusionV1(DDPMV1):
|
424 |
+
"""main class"""
|
425 |
+
def __init__(self,
|
426 |
+
first_stage_config,
|
427 |
+
cond_stage_config,
|
428 |
+
num_timesteps_cond=None,
|
429 |
+
cond_stage_key="image",
|
430 |
+
cond_stage_trainable=False,
|
431 |
+
concat_mode=True,
|
432 |
+
cond_stage_forward=None,
|
433 |
+
conditioning_key=None,
|
434 |
+
scale_factor=1.0,
|
435 |
+
scale_by_std=False,
|
436 |
+
*args, **kwargs):
|
437 |
+
self.num_timesteps_cond = default(num_timesteps_cond, 1)
|
438 |
+
self.scale_by_std = scale_by_std
|
439 |
+
assert self.num_timesteps_cond <= kwargs['timesteps']
|
440 |
+
# for backwards compatibility after implementation of DiffusionWrapper
|
441 |
+
if conditioning_key is None:
|
442 |
+
conditioning_key = 'concat' if concat_mode else 'crossattn'
|
443 |
+
if cond_stage_config == '__is_unconditional__':
|
444 |
+
conditioning_key = None
|
445 |
+
ckpt_path = kwargs.pop("ckpt_path", None)
|
446 |
+
ignore_keys = kwargs.pop("ignore_keys", [])
|
447 |
+
super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
|
448 |
+
self.concat_mode = concat_mode
|
449 |
+
self.cond_stage_trainable = cond_stage_trainable
|
450 |
+
self.cond_stage_key = cond_stage_key
|
451 |
+
try:
|
452 |
+
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
453 |
+
except Exception:
|
454 |
+
self.num_downs = 0
|
455 |
+
if not scale_by_std:
|
456 |
+
self.scale_factor = scale_factor
|
457 |
+
else:
|
458 |
+
self.register_buffer('scale_factor', torch.tensor(scale_factor))
|
459 |
+
self.instantiate_first_stage(first_stage_config)
|
460 |
+
self.instantiate_cond_stage(cond_stage_config)
|
461 |
+
self.cond_stage_forward = cond_stage_forward
|
462 |
+
self.clip_denoised = False
|
463 |
+
self.bbox_tokenizer = None
|
464 |
+
|
465 |
+
self.restarted_from_ckpt = False
|
466 |
+
if ckpt_path is not None:
|
467 |
+
self.init_from_ckpt(ckpt_path, ignore_keys)
|
468 |
+
self.restarted_from_ckpt = True
|
469 |
+
|
470 |
+
def make_cond_schedule(self, ):
|
471 |
+
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
|
472 |
+
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
|
473 |
+
self.cond_ids[:self.num_timesteps_cond] = ids
|
474 |
+
|
475 |
+
@rank_zero_only
|
476 |
+
@torch.no_grad()
|
477 |
+
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
|
478 |
+
# only for very first batch
|
479 |
+
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
|
480 |
+
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
|
481 |
+
# set rescale weight to 1./std of encodings
|
482 |
+
print("### USING STD-RESCALING ###")
|
483 |
+
x = super().get_input(batch, self.first_stage_key)
|
484 |
+
x = x.to(self.device)
|
485 |
+
encoder_posterior = self.encode_first_stage(x)
|
486 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
487 |
+
del self.scale_factor
|
488 |
+
self.register_buffer('scale_factor', 1. / z.flatten().std())
|
489 |
+
print(f"setting self.scale_factor to {self.scale_factor}")
|
490 |
+
print("### USING STD-RESCALING ###")
|
491 |
+
|
492 |
+
def register_schedule(self,
|
493 |
+
given_betas=None, beta_schedule="linear", timesteps=1000,
|
494 |
+
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
|
495 |
+
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
|
496 |
+
|
497 |
+
self.shorten_cond_schedule = self.num_timesteps_cond > 1
|
498 |
+
if self.shorten_cond_schedule:
|
499 |
+
self.make_cond_schedule()
|
500 |
+
|
501 |
+
def instantiate_first_stage(self, config):
|
502 |
+
model = instantiate_from_config(config)
|
503 |
+
self.first_stage_model = model.eval()
|
504 |
+
self.first_stage_model.train = disabled_train
|
505 |
+
for param in self.first_stage_model.parameters():
|
506 |
+
param.requires_grad = False
|
507 |
+
|
508 |
+
def instantiate_cond_stage(self, config):
|
509 |
+
if not self.cond_stage_trainable:
|
510 |
+
if config == "__is_first_stage__":
|
511 |
+
print("Using first stage also as cond stage.")
|
512 |
+
self.cond_stage_model = self.first_stage_model
|
513 |
+
elif config == "__is_unconditional__":
|
514 |
+
print(f"Training {self.__class__.__name__} as an unconditional model.")
|
515 |
+
self.cond_stage_model = None
|
516 |
+
# self.be_unconditional = True
|
517 |
+
else:
|
518 |
+
model = instantiate_from_config(config)
|
519 |
+
self.cond_stage_model = model.eval()
|
520 |
+
self.cond_stage_model.train = disabled_train
|
521 |
+
for param in self.cond_stage_model.parameters():
|
522 |
+
param.requires_grad = False
|
523 |
+
else:
|
524 |
+
assert config != '__is_first_stage__'
|
525 |
+
assert config != '__is_unconditional__'
|
526 |
+
model = instantiate_from_config(config)
|
527 |
+
self.cond_stage_model = model
|
528 |
+
|
529 |
+
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
|
530 |
+
denoise_row = []
|
531 |
+
for zd in tqdm(samples, desc=desc):
|
532 |
+
denoise_row.append(self.decode_first_stage(zd.to(self.device),
|
533 |
+
force_not_quantize=force_no_decoder_quantization))
|
534 |
+
n_imgs_per_row = len(denoise_row)
|
535 |
+
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
|
536 |
+
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
|
537 |
+
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
|
538 |
+
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
|
539 |
+
return denoise_grid
|
540 |
+
|
541 |
+
def get_first_stage_encoding(self, encoder_posterior):
|
542 |
+
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
|
543 |
+
z = encoder_posterior.sample()
|
544 |
+
elif isinstance(encoder_posterior, torch.Tensor):
|
545 |
+
z = encoder_posterior
|
546 |
+
else:
|
547 |
+
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
|
548 |
+
return self.scale_factor * z
|
549 |
+
|
550 |
+
def get_learned_conditioning(self, c):
|
551 |
+
if self.cond_stage_forward is None:
|
552 |
+
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
|
553 |
+
c = self.cond_stage_model.encode(c)
|
554 |
+
if isinstance(c, DiagonalGaussianDistribution):
|
555 |
+
c = c.mode()
|
556 |
+
else:
|
557 |
+
c = self.cond_stage_model(c)
|
558 |
+
else:
|
559 |
+
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
|
560 |
+
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
|
561 |
+
return c
|
562 |
+
|
563 |
+
def meshgrid(self, h, w):
|
564 |
+
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
|
565 |
+
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
|
566 |
+
|
567 |
+
arr = torch.cat([y, x], dim=-1)
|
568 |
+
return arr
|
569 |
+
|
570 |
+
def delta_border(self, h, w):
|
571 |
+
"""
|
572 |
+
:param h: height
|
573 |
+
:param w: width
|
574 |
+
:return: normalized distance to image border,
|
575 |
+
wtith min distance = 0 at border and max dist = 0.5 at image center
|
576 |
+
"""
|
577 |
+
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
|
578 |
+
arr = self.meshgrid(h, w) / lower_right_corner
|
579 |
+
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
|
580 |
+
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
|
581 |
+
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
|
582 |
+
return edge_dist
|
583 |
+
|
584 |
+
def get_weighting(self, h, w, Ly, Lx, device):
|
585 |
+
weighting = self.delta_border(h, w)
|
586 |
+
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
|
587 |
+
self.split_input_params["clip_max_weight"], )
|
588 |
+
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
|
589 |
+
|
590 |
+
if self.split_input_params["tie_braker"]:
|
591 |
+
L_weighting = self.delta_border(Ly, Lx)
|
592 |
+
L_weighting = torch.clip(L_weighting,
|
593 |
+
self.split_input_params["clip_min_tie_weight"],
|
594 |
+
self.split_input_params["clip_max_tie_weight"])
|
595 |
+
|
596 |
+
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
|
597 |
+
weighting = weighting * L_weighting
|
598 |
+
return weighting
|
599 |
+
|
600 |
+
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
|
601 |
+
"""
|
602 |
+
:param x: img of size (bs, c, h, w)
|
603 |
+
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
|
604 |
+
"""
|
605 |
+
bs, nc, h, w = x.shape
|
606 |
+
|
607 |
+
# number of crops in image
|
608 |
+
Ly = (h - kernel_size[0]) // stride[0] + 1
|
609 |
+
Lx = (w - kernel_size[1]) // stride[1] + 1
|
610 |
+
|
611 |
+
if uf == 1 and df == 1:
|
612 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
613 |
+
unfold = torch.nn.Unfold(**fold_params)
|
614 |
+
|
615 |
+
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
|
616 |
+
|
617 |
+
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
|
618 |
+
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
|
619 |
+
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
|
620 |
+
|
621 |
+
elif uf > 1 and df == 1:
|
622 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
623 |
+
unfold = torch.nn.Unfold(**fold_params)
|
624 |
+
|
625 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
|
626 |
+
dilation=1, padding=0,
|
627 |
+
stride=(stride[0] * uf, stride[1] * uf))
|
628 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
|
629 |
+
|
630 |
+
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
|
631 |
+
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
|
632 |
+
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
|
633 |
+
|
634 |
+
elif df > 1 and uf == 1:
|
635 |
+
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
|
636 |
+
unfold = torch.nn.Unfold(**fold_params)
|
637 |
+
|
638 |
+
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
|
639 |
+
dilation=1, padding=0,
|
640 |
+
stride=(stride[0] // df, stride[1] // df))
|
641 |
+
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
|
642 |
+
|
643 |
+
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
|
644 |
+
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
|
645 |
+
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
|
646 |
+
|
647 |
+
else:
|
648 |
+
raise NotImplementedError
|
649 |
+
|
650 |
+
return fold, unfold, normalization, weighting
|
651 |
+
|
652 |
+
@torch.no_grad()
|
653 |
+
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
|
654 |
+
cond_key=None, return_original_cond=False, bs=None):
|
655 |
+
x = super().get_input(batch, k)
|
656 |
+
if bs is not None:
|
657 |
+
x = x[:bs]
|
658 |
+
x = x.to(self.device)
|
659 |
+
encoder_posterior = self.encode_first_stage(x)
|
660 |
+
z = self.get_first_stage_encoding(encoder_posterior).detach()
|
661 |
+
|
662 |
+
if self.model.conditioning_key is not None:
|
663 |
+
if cond_key is None:
|
664 |
+
cond_key = self.cond_stage_key
|
665 |
+
if cond_key != self.first_stage_key:
|
666 |
+
if cond_key in ['caption', 'coordinates_bbox']:
|
667 |
+
xc = batch[cond_key]
|
668 |
+
elif cond_key == 'class_label':
|
669 |
+
xc = batch
|
670 |
+
else:
|
671 |
+
xc = super().get_input(batch, cond_key).to(self.device)
|
672 |
+
else:
|
673 |
+
xc = x
|
674 |
+
if not self.cond_stage_trainable or force_c_encode:
|
675 |
+
if isinstance(xc, dict) or isinstance(xc, list):
|
676 |
+
# import pudb; pudb.set_trace()
|
677 |
+
c = self.get_learned_conditioning(xc)
|
678 |
+
else:
|
679 |
+
c = self.get_learned_conditioning(xc.to(self.device))
|
680 |
+
else:
|
681 |
+
c = xc
|
682 |
+
if bs is not None:
|
683 |
+
c = c[:bs]
|
684 |
+
|
685 |
+
if self.use_positional_encodings:
|
686 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
687 |
+
ckey = __conditioning_keys__[self.model.conditioning_key]
|
688 |
+
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
|
689 |
+
|
690 |
+
else:
|
691 |
+
c = None
|
692 |
+
xc = None
|
693 |
+
if self.use_positional_encodings:
|
694 |
+
pos_x, pos_y = self.compute_latent_shifts(batch)
|
695 |
+
c = {'pos_x': pos_x, 'pos_y': pos_y}
|
696 |
+
out = [z, c]
|
697 |
+
if return_first_stage_outputs:
|
698 |
+
xrec = self.decode_first_stage(z)
|
699 |
+
out.extend([x, xrec])
|
700 |
+
if return_original_cond:
|
701 |
+
out.append(xc)
|
702 |
+
return out
|
703 |
+
|
704 |
+
@torch.no_grad()
|
705 |
+
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
706 |
+
if predict_cids:
|
707 |
+
if z.dim() == 4:
|
708 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
709 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
710 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
711 |
+
|
712 |
+
z = 1. / self.scale_factor * z
|
713 |
+
|
714 |
+
if hasattr(self, "split_input_params"):
|
715 |
+
if self.split_input_params["patch_distributed_vq"]:
|
716 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
717 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
718 |
+
uf = self.split_input_params["vqf"]
|
719 |
+
bs, nc, h, w = z.shape
|
720 |
+
if ks[0] > h or ks[1] > w:
|
721 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
722 |
+
print("reducing Kernel")
|
723 |
+
|
724 |
+
if stride[0] > h or stride[1] > w:
|
725 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
726 |
+
print("reducing stride")
|
727 |
+
|
728 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
729 |
+
|
730 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
731 |
+
# 1. Reshape to img shape
|
732 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
733 |
+
|
734 |
+
# 2. apply model loop over last dim
|
735 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
736 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
737 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
738 |
+
for i in range(z.shape[-1])]
|
739 |
+
else:
|
740 |
+
|
741 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
742 |
+
for i in range(z.shape[-1])]
|
743 |
+
|
744 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
745 |
+
o = o * weighting
|
746 |
+
# Reverse 1. reshape to img shape
|
747 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
748 |
+
# stitch crops together
|
749 |
+
decoded = fold(o)
|
750 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
751 |
+
return decoded
|
752 |
+
else:
|
753 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
754 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
755 |
+
else:
|
756 |
+
return self.first_stage_model.decode(z)
|
757 |
+
|
758 |
+
else:
|
759 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
760 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
761 |
+
else:
|
762 |
+
return self.first_stage_model.decode(z)
|
763 |
+
|
764 |
+
# same as above but without decorator
|
765 |
+
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
|
766 |
+
if predict_cids:
|
767 |
+
if z.dim() == 4:
|
768 |
+
z = torch.argmax(z.exp(), dim=1).long()
|
769 |
+
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
|
770 |
+
z = rearrange(z, 'b h w c -> b c h w').contiguous()
|
771 |
+
|
772 |
+
z = 1. / self.scale_factor * z
|
773 |
+
|
774 |
+
if hasattr(self, "split_input_params"):
|
775 |
+
if self.split_input_params["patch_distributed_vq"]:
|
776 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
777 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
778 |
+
uf = self.split_input_params["vqf"]
|
779 |
+
bs, nc, h, w = z.shape
|
780 |
+
if ks[0] > h or ks[1] > w:
|
781 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
782 |
+
print("reducing Kernel")
|
783 |
+
|
784 |
+
if stride[0] > h or stride[1] > w:
|
785 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
786 |
+
print("reducing stride")
|
787 |
+
|
788 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
|
789 |
+
|
790 |
+
z = unfold(z) # (bn, nc * prod(**ks), L)
|
791 |
+
# 1. Reshape to img shape
|
792 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
793 |
+
|
794 |
+
# 2. apply model loop over last dim
|
795 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
796 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
|
797 |
+
force_not_quantize=predict_cids or force_not_quantize)
|
798 |
+
for i in range(z.shape[-1])]
|
799 |
+
else:
|
800 |
+
|
801 |
+
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
|
802 |
+
for i in range(z.shape[-1])]
|
803 |
+
|
804 |
+
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
|
805 |
+
o = o * weighting
|
806 |
+
# Reverse 1. reshape to img shape
|
807 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
808 |
+
# stitch crops together
|
809 |
+
decoded = fold(o)
|
810 |
+
decoded = decoded / normalization # norm is shape (1, 1, h, w)
|
811 |
+
return decoded
|
812 |
+
else:
|
813 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
814 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
815 |
+
else:
|
816 |
+
return self.first_stage_model.decode(z)
|
817 |
+
|
818 |
+
else:
|
819 |
+
if isinstance(self.first_stage_model, VQModelInterface):
|
820 |
+
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
|
821 |
+
else:
|
822 |
+
return self.first_stage_model.decode(z)
|
823 |
+
|
824 |
+
@torch.no_grad()
|
825 |
+
def encode_first_stage(self, x):
|
826 |
+
if hasattr(self, "split_input_params"):
|
827 |
+
if self.split_input_params["patch_distributed_vq"]:
|
828 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
829 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
830 |
+
df = self.split_input_params["vqf"]
|
831 |
+
self.split_input_params['original_image_size'] = x.shape[-2:]
|
832 |
+
bs, nc, h, w = x.shape
|
833 |
+
if ks[0] > h or ks[1] > w:
|
834 |
+
ks = (min(ks[0], h), min(ks[1], w))
|
835 |
+
print("reducing Kernel")
|
836 |
+
|
837 |
+
if stride[0] > h or stride[1] > w:
|
838 |
+
stride = (min(stride[0], h), min(stride[1], w))
|
839 |
+
print("reducing stride")
|
840 |
+
|
841 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
|
842 |
+
z = unfold(x) # (bn, nc * prod(**ks), L)
|
843 |
+
# Reshape to img shape
|
844 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
845 |
+
|
846 |
+
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
|
847 |
+
for i in range(z.shape[-1])]
|
848 |
+
|
849 |
+
o = torch.stack(output_list, axis=-1)
|
850 |
+
o = o * weighting
|
851 |
+
|
852 |
+
# Reverse reshape to img shape
|
853 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
854 |
+
# stitch crops together
|
855 |
+
decoded = fold(o)
|
856 |
+
decoded = decoded / normalization
|
857 |
+
return decoded
|
858 |
+
|
859 |
+
else:
|
860 |
+
return self.first_stage_model.encode(x)
|
861 |
+
else:
|
862 |
+
return self.first_stage_model.encode(x)
|
863 |
+
|
864 |
+
def shared_step(self, batch, **kwargs):
|
865 |
+
x, c = self.get_input(batch, self.first_stage_key)
|
866 |
+
loss = self(x, c)
|
867 |
+
return loss
|
868 |
+
|
869 |
+
def forward(self, x, c, *args, **kwargs):
|
870 |
+
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
|
871 |
+
if self.model.conditioning_key is not None:
|
872 |
+
assert c is not None
|
873 |
+
if self.cond_stage_trainable:
|
874 |
+
c = self.get_learned_conditioning(c)
|
875 |
+
if self.shorten_cond_schedule: # TODO: drop this option
|
876 |
+
tc = self.cond_ids[t].to(self.device)
|
877 |
+
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
878 |
+
return self.p_losses(x, c, t, *args, **kwargs)
|
879 |
+
|
880 |
+
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
881 |
+
|
882 |
+
if isinstance(cond, dict):
|
883 |
+
# hybrid case, cond is exptected to be a dict
|
884 |
+
pass
|
885 |
+
else:
|
886 |
+
if not isinstance(cond, list):
|
887 |
+
cond = [cond]
|
888 |
+
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
|
889 |
+
cond = {key: cond}
|
890 |
+
|
891 |
+
if hasattr(self, "split_input_params"):
|
892 |
+
assert len(cond) == 1 # todo can only deal with one conditioning atm
|
893 |
+
assert not return_ids
|
894 |
+
ks = self.split_input_params["ks"] # eg. (128, 128)
|
895 |
+
stride = self.split_input_params["stride"] # eg. (64, 64)
|
896 |
+
|
897 |
+
h, w = x_noisy.shape[-2:]
|
898 |
+
|
899 |
+
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
|
900 |
+
|
901 |
+
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
|
902 |
+
# Reshape to img shape
|
903 |
+
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
904 |
+
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
|
905 |
+
|
906 |
+
if self.cond_stage_key in ["image", "LR_image", "segmentation",
|
907 |
+
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
|
908 |
+
c_key = next(iter(cond.keys())) # get key
|
909 |
+
c = next(iter(cond.values())) # get value
|
910 |
+
assert (len(c) == 1) # todo extend to list with more than one elem
|
911 |
+
c = c[0] # get element
|
912 |
+
|
913 |
+
c = unfold(c)
|
914 |
+
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
|
915 |
+
|
916 |
+
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
|
917 |
+
|
918 |
+
elif self.cond_stage_key == 'coordinates_bbox':
|
919 |
+
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
|
920 |
+
|
921 |
+
# assuming padding of unfold is always 0 and its dilation is always 1
|
922 |
+
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
|
923 |
+
full_img_h, full_img_w = self.split_input_params['original_image_size']
|
924 |
+
# as we are operating on latents, we need the factor from the original image size to the
|
925 |
+
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
|
926 |
+
num_downs = self.first_stage_model.encoder.num_resolutions - 1
|
927 |
+
rescale_latent = 2 ** (num_downs)
|
928 |
+
|
929 |
+
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
|
930 |
+
# need to rescale the tl patch coordinates to be in between (0,1)
|
931 |
+
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
|
932 |
+
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
|
933 |
+
for patch_nr in range(z.shape[-1])]
|
934 |
+
|
935 |
+
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
|
936 |
+
patch_limits = [(x_tl, y_tl,
|
937 |
+
rescale_latent * ks[0] / full_img_w,
|
938 |
+
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
|
939 |
+
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
|
940 |
+
|
941 |
+
# tokenize crop coordinates for the bounding boxes of the respective patches
|
942 |
+
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
|
943 |
+
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
|
944 |
+
print(patch_limits_tknzd[0].shape)
|
945 |
+
# cut tknzd crop position from conditioning
|
946 |
+
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
|
947 |
+
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
|
948 |
+
print(cut_cond.shape)
|
949 |
+
|
950 |
+
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
|
951 |
+
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
|
952 |
+
print(adapted_cond.shape)
|
953 |
+
adapted_cond = self.get_learned_conditioning(adapted_cond)
|
954 |
+
print(adapted_cond.shape)
|
955 |
+
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
|
956 |
+
print(adapted_cond.shape)
|
957 |
+
|
958 |
+
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
|
959 |
+
|
960 |
+
else:
|
961 |
+
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
|
962 |
+
|
963 |
+
# apply model by loop over crops
|
964 |
+
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
|
965 |
+
assert not isinstance(output_list[0],
|
966 |
+
tuple) # todo cant deal with multiple model outputs check this never happens
|
967 |
+
|
968 |
+
o = torch.stack(output_list, axis=-1)
|
969 |
+
o = o * weighting
|
970 |
+
# Reverse reshape to img shape
|
971 |
+
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
|
972 |
+
# stitch crops together
|
973 |
+
x_recon = fold(o) / normalization
|
974 |
+
|
975 |
+
else:
|
976 |
+
x_recon = self.model(x_noisy, t, **cond)
|
977 |
+
|
978 |
+
if isinstance(x_recon, tuple) and not return_ids:
|
979 |
+
return x_recon[0]
|
980 |
+
else:
|
981 |
+
return x_recon
|
982 |
+
|
983 |
+
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
|
984 |
+
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
|
985 |
+
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
|
986 |
+
|
987 |
+
def _prior_bpd(self, x_start):
|
988 |
+
"""
|
989 |
+
Get the prior KL term for the variational lower-bound, measured in
|
990 |
+
bits-per-dim.
|
991 |
+
This term can't be optimized, as it only depends on the encoder.
|
992 |
+
:param x_start: the [N x C x ...] tensor of inputs.
|
993 |
+
:return: a batch of [N] KL values (in bits), one per batch element.
|
994 |
+
"""
|
995 |
+
batch_size = x_start.shape[0]
|
996 |
+
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
|
997 |
+
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
|
998 |
+
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
|
999 |
+
return mean_flat(kl_prior) / np.log(2.0)
|
1000 |
+
|
1001 |
+
def p_losses(self, x_start, cond, t, noise=None):
|
1002 |
+
noise = default(noise, lambda: torch.randn_like(x_start))
|
1003 |
+
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
|
1004 |
+
model_output = self.apply_model(x_noisy, t, cond)
|
1005 |
+
|
1006 |
+
loss_dict = {}
|
1007 |
+
prefix = 'train' if self.training else 'val'
|
1008 |
+
|
1009 |
+
if self.parameterization == "x0":
|
1010 |
+
target = x_start
|
1011 |
+
elif self.parameterization == "eps":
|
1012 |
+
target = noise
|
1013 |
+
else:
|
1014 |
+
raise NotImplementedError()
|
1015 |
+
|
1016 |
+
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
|
1017 |
+
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
|
1018 |
+
|
1019 |
+
logvar_t = self.logvar[t].to(self.device)
|
1020 |
+
loss = loss_simple / torch.exp(logvar_t) + logvar_t
|
1021 |
+
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
|
1022 |
+
if self.learn_logvar:
|
1023 |
+
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
|
1024 |
+
loss_dict.update({'logvar': self.logvar.data.mean()})
|
1025 |
+
|
1026 |
+
loss = self.l_simple_weight * loss.mean()
|
1027 |
+
|
1028 |
+
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
|
1029 |
+
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
|
1030 |
+
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
|
1031 |
+
loss += (self.original_elbo_weight * loss_vlb)
|
1032 |
+
loss_dict.update({f'{prefix}/loss': loss})
|
1033 |
+
|
1034 |
+
return loss, loss_dict
|
1035 |
+
|
1036 |
+
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
|
1037 |
+
return_x0=False, score_corrector=None, corrector_kwargs=None):
|
1038 |
+
t_in = t
|
1039 |
+
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
|
1040 |
+
|
1041 |
+
if score_corrector is not None:
|
1042 |
+
assert self.parameterization == "eps"
|
1043 |
+
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
|
1044 |
+
|
1045 |
+
if return_codebook_ids:
|
1046 |
+
model_out, logits = model_out
|
1047 |
+
|
1048 |
+
if self.parameterization == "eps":
|
1049 |
+
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
|
1050 |
+
elif self.parameterization == "x0":
|
1051 |
+
x_recon = model_out
|
1052 |
+
else:
|
1053 |
+
raise NotImplementedError()
|
1054 |
+
|
1055 |
+
if clip_denoised:
|
1056 |
+
x_recon.clamp_(-1., 1.)
|
1057 |
+
if quantize_denoised:
|
1058 |
+
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
|
1059 |
+
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
|
1060 |
+
if return_codebook_ids:
|
1061 |
+
return model_mean, posterior_variance, posterior_log_variance, logits
|
1062 |
+
elif return_x0:
|
1063 |
+
return model_mean, posterior_variance, posterior_log_variance, x_recon
|
1064 |
+
else:
|
1065 |
+
return model_mean, posterior_variance, posterior_log_variance
|
1066 |
+
|
1067 |
+
@torch.no_grad()
|
1068 |
+
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
|
1069 |
+
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
|
1070 |
+
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
|
1071 |
+
b, *_, device = *x.shape, x.device
|
1072 |
+
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
|
1073 |
+
return_codebook_ids=return_codebook_ids,
|
1074 |
+
quantize_denoised=quantize_denoised,
|
1075 |
+
return_x0=return_x0,
|
1076 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
1077 |
+
if return_codebook_ids:
|
1078 |
+
raise DeprecationWarning("Support dropped.")
|
1079 |
+
model_mean, _, model_log_variance, logits = outputs
|
1080 |
+
elif return_x0:
|
1081 |
+
model_mean, _, model_log_variance, x0 = outputs
|
1082 |
+
else:
|
1083 |
+
model_mean, _, model_log_variance = outputs
|
1084 |
+
|
1085 |
+
noise = noise_like(x.shape, device, repeat_noise) * temperature
|
1086 |
+
if noise_dropout > 0.:
|
1087 |
+
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
1088 |
+
# no noise when t == 0
|
1089 |
+
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
|
1090 |
+
|
1091 |
+
if return_codebook_ids:
|
1092 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
|
1093 |
+
if return_x0:
|
1094 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
|
1095 |
+
else:
|
1096 |
+
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
|
1097 |
+
|
1098 |
+
@torch.no_grad()
|
1099 |
+
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
|
1100 |
+
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
|
1101 |
+
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
|
1102 |
+
log_every_t=None):
|
1103 |
+
if not log_every_t:
|
1104 |
+
log_every_t = self.log_every_t
|
1105 |
+
timesteps = self.num_timesteps
|
1106 |
+
if batch_size is not None:
|
1107 |
+
b = batch_size if batch_size is not None else shape[0]
|
1108 |
+
shape = [batch_size] + list(shape)
|
1109 |
+
else:
|
1110 |
+
b = batch_size = shape[0]
|
1111 |
+
if x_T is None:
|
1112 |
+
img = torch.randn(shape, device=self.device)
|
1113 |
+
else:
|
1114 |
+
img = x_T
|
1115 |
+
intermediates = []
|
1116 |
+
if cond is not None:
|
1117 |
+
if isinstance(cond, dict):
|
1118 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1119 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
1120 |
+
else:
|
1121 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1122 |
+
|
1123 |
+
if start_T is not None:
|
1124 |
+
timesteps = min(timesteps, start_T)
|
1125 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
|
1126 |
+
total=timesteps) if verbose else reversed(
|
1127 |
+
range(0, timesteps))
|
1128 |
+
if type(temperature) == float:
|
1129 |
+
temperature = [temperature] * timesteps
|
1130 |
+
|
1131 |
+
for i in iterator:
|
1132 |
+
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
|
1133 |
+
if self.shorten_cond_schedule:
|
1134 |
+
assert self.model.conditioning_key != 'hybrid'
|
1135 |
+
tc = self.cond_ids[ts].to(cond.device)
|
1136 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1137 |
+
|
1138 |
+
img, x0_partial = self.p_sample(img, cond, ts,
|
1139 |
+
clip_denoised=self.clip_denoised,
|
1140 |
+
quantize_denoised=quantize_denoised, return_x0=True,
|
1141 |
+
temperature=temperature[i], noise_dropout=noise_dropout,
|
1142 |
+
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
|
1143 |
+
if mask is not None:
|
1144 |
+
assert x0 is not None
|
1145 |
+
img_orig = self.q_sample(x0, ts)
|
1146 |
+
img = img_orig * mask + (1. - mask) * img
|
1147 |
+
|
1148 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
1149 |
+
intermediates.append(x0_partial)
|
1150 |
+
if callback:
|
1151 |
+
callback(i)
|
1152 |
+
if img_callback:
|
1153 |
+
img_callback(img, i)
|
1154 |
+
return img, intermediates
|
1155 |
+
|
1156 |
+
@torch.no_grad()
|
1157 |
+
def p_sample_loop(self, cond, shape, return_intermediates=False,
|
1158 |
+
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
|
1159 |
+
mask=None, x0=None, img_callback=None, start_T=None,
|
1160 |
+
log_every_t=None):
|
1161 |
+
|
1162 |
+
if not log_every_t:
|
1163 |
+
log_every_t = self.log_every_t
|
1164 |
+
device = self.betas.device
|
1165 |
+
b = shape[0]
|
1166 |
+
if x_T is None:
|
1167 |
+
img = torch.randn(shape, device=device)
|
1168 |
+
else:
|
1169 |
+
img = x_T
|
1170 |
+
|
1171 |
+
intermediates = [img]
|
1172 |
+
if timesteps is None:
|
1173 |
+
timesteps = self.num_timesteps
|
1174 |
+
|
1175 |
+
if start_T is not None:
|
1176 |
+
timesteps = min(timesteps, start_T)
|
1177 |
+
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
|
1178 |
+
range(0, timesteps))
|
1179 |
+
|
1180 |
+
if mask is not None:
|
1181 |
+
assert x0 is not None
|
1182 |
+
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
|
1183 |
+
|
1184 |
+
for i in iterator:
|
1185 |
+
ts = torch.full((b,), i, device=device, dtype=torch.long)
|
1186 |
+
if self.shorten_cond_schedule:
|
1187 |
+
assert self.model.conditioning_key != 'hybrid'
|
1188 |
+
tc = self.cond_ids[ts].to(cond.device)
|
1189 |
+
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
|
1190 |
+
|
1191 |
+
img = self.p_sample(img, cond, ts,
|
1192 |
+
clip_denoised=self.clip_denoised,
|
1193 |
+
quantize_denoised=quantize_denoised)
|
1194 |
+
if mask is not None:
|
1195 |
+
img_orig = self.q_sample(x0, ts)
|
1196 |
+
img = img_orig * mask + (1. - mask) * img
|
1197 |
+
|
1198 |
+
if i % log_every_t == 0 or i == timesteps - 1:
|
1199 |
+
intermediates.append(img)
|
1200 |
+
if callback:
|
1201 |
+
callback(i)
|
1202 |
+
if img_callback:
|
1203 |
+
img_callback(img, i)
|
1204 |
+
|
1205 |
+
if return_intermediates:
|
1206 |
+
return img, intermediates
|
1207 |
+
return img
|
1208 |
+
|
1209 |
+
@torch.no_grad()
|
1210 |
+
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
|
1211 |
+
verbose=True, timesteps=None, quantize_denoised=False,
|
1212 |
+
mask=None, x0=None, shape=None,**kwargs):
|
1213 |
+
if shape is None:
|
1214 |
+
shape = (batch_size, self.channels, self.image_size, self.image_size)
|
1215 |
+
if cond is not None:
|
1216 |
+
if isinstance(cond, dict):
|
1217 |
+
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
1218 |
+
[x[:batch_size] for x in cond[key]] for key in cond}
|
1219 |
+
else:
|
1220 |
+
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
1221 |
+
return self.p_sample_loop(cond,
|
1222 |
+
shape,
|
1223 |
+
return_intermediates=return_intermediates, x_T=x_T,
|
1224 |
+
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
|
1225 |
+
mask=mask, x0=x0)
|
1226 |
+
|
1227 |
+
@torch.no_grad()
|
1228 |
+
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
|
1229 |
+
|
1230 |
+
if ddim:
|
1231 |
+
ddim_sampler = DDIMSampler(self)
|
1232 |
+
shape = (self.channels, self.image_size, self.image_size)
|
1233 |
+
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
|
1234 |
+
shape,cond,verbose=False,**kwargs)
|
1235 |
+
|
1236 |
+
else:
|
1237 |
+
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
|
1238 |
+
return_intermediates=True,**kwargs)
|
1239 |
+
|
1240 |
+
return samples, intermediates
|
1241 |
+
|
1242 |
+
|
1243 |
+
@torch.no_grad()
|
1244 |
+
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
|
1245 |
+
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
|
1246 |
+
plot_diffusion_rows=True, **kwargs):
|
1247 |
+
|
1248 |
+
use_ddim = ddim_steps is not None
|
1249 |
+
|
1250 |
+
log = {}
|
1251 |
+
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
1252 |
+
return_first_stage_outputs=True,
|
1253 |
+
force_c_encode=True,
|
1254 |
+
return_original_cond=True,
|
1255 |
+
bs=N)
|
1256 |
+
N = min(x.shape[0], N)
|
1257 |
+
n_row = min(x.shape[0], n_row)
|
1258 |
+
log["inputs"] = x
|
1259 |
+
log["reconstruction"] = xrec
|
1260 |
+
if self.model.conditioning_key is not None:
|
1261 |
+
if hasattr(self.cond_stage_model, "decode"):
|
1262 |
+
xc = self.cond_stage_model.decode(c)
|
1263 |
+
log["conditioning"] = xc
|
1264 |
+
elif self.cond_stage_key in ["caption"]:
|
1265 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
|
1266 |
+
log["conditioning"] = xc
|
1267 |
+
elif self.cond_stage_key == 'class_label':
|
1268 |
+
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
|
1269 |
+
log['conditioning'] = xc
|
1270 |
+
elif isimage(xc):
|
1271 |
+
log["conditioning"] = xc
|
1272 |
+
if ismap(xc):
|
1273 |
+
log["original_conditioning"] = self.to_rgb(xc)
|
1274 |
+
|
1275 |
+
if plot_diffusion_rows:
|
1276 |
+
# get diffusion row
|
1277 |
+
diffusion_row = []
|
1278 |
+
z_start = z[:n_row]
|
1279 |
+
for t in range(self.num_timesteps):
|
1280 |
+
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
1281 |
+
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
|
1282 |
+
t = t.to(self.device).long()
|
1283 |
+
noise = torch.randn_like(z_start)
|
1284 |
+
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
|
1285 |
+
diffusion_row.append(self.decode_first_stage(z_noisy))
|
1286 |
+
|
1287 |
+
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
|
1288 |
+
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
|
1289 |
+
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
|
1290 |
+
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
|
1291 |
+
log["diffusion_row"] = diffusion_grid
|
1292 |
+
|
1293 |
+
if sample:
|
1294 |
+
# get denoise row
|
1295 |
+
with self.ema_scope("Plotting"):
|
1296 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
1297 |
+
ddim_steps=ddim_steps,eta=ddim_eta)
|
1298 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
|
1299 |
+
x_samples = self.decode_first_stage(samples)
|
1300 |
+
log["samples"] = x_samples
|
1301 |
+
if plot_denoise_rows:
|
1302 |
+
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
|
1303 |
+
log["denoise_row"] = denoise_grid
|
1304 |
+
|
1305 |
+
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
|
1306 |
+
self.first_stage_model, IdentityFirstStage):
|
1307 |
+
# also display when quantizing x0 while sampling
|
1308 |
+
with self.ema_scope("Plotting Quantized Denoised"):
|
1309 |
+
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
|
1310 |
+
ddim_steps=ddim_steps,eta=ddim_eta,
|
1311 |
+
quantize_denoised=True)
|
1312 |
+
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
|
1313 |
+
# quantize_denoised=True)
|
1314 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1315 |
+
log["samples_x0_quantized"] = x_samples
|
1316 |
+
|
1317 |
+
if inpaint:
|
1318 |
+
# make a simple center square
|
1319 |
+
h, w = z.shape[2], z.shape[3]
|
1320 |
+
mask = torch.ones(N, h, w).to(self.device)
|
1321 |
+
# zeros will be filled in
|
1322 |
+
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
1323 |
+
mask = mask[:, None, ...]
|
1324 |
+
with self.ema_scope("Plotting Inpaint"):
|
1325 |
+
|
1326 |
+
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
|
1327 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1328 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1329 |
+
log["samples_inpainting"] = x_samples
|
1330 |
+
log["mask"] = mask
|
1331 |
+
|
1332 |
+
# outpaint
|
1333 |
+
with self.ema_scope("Plotting Outpaint"):
|
1334 |
+
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
|
1335 |
+
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
|
1336 |
+
x_samples = self.decode_first_stage(samples.to(self.device))
|
1337 |
+
log["samples_outpainting"] = x_samples
|
1338 |
+
|
1339 |
+
if plot_progressive_rows:
|
1340 |
+
with self.ema_scope("Plotting Progressives"):
|
1341 |
+
img, progressives = self.progressive_denoising(c,
|
1342 |
+
shape=(self.channels, self.image_size, self.image_size),
|
1343 |
+
batch_size=N)
|
1344 |
+
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
|
1345 |
+
log["progressive_row"] = prog_row
|
1346 |
+
|
1347 |
+
if return_keys:
|
1348 |
+
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
|
1349 |
+
return log
|
1350 |
+
else:
|
1351 |
+
return {key: log[key] for key in return_keys}
|
1352 |
+
return log
|
1353 |
+
|
1354 |
+
def configure_optimizers(self):
|
1355 |
+
lr = self.learning_rate
|
1356 |
+
params = list(self.model.parameters())
|
1357 |
+
if self.cond_stage_trainable:
|
1358 |
+
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
|
1359 |
+
params = params + list(self.cond_stage_model.parameters())
|
1360 |
+
if self.learn_logvar:
|
1361 |
+
print('Diffusion model optimizing logvar')
|
1362 |
+
params.append(self.logvar)
|
1363 |
+
opt = torch.optim.AdamW(params, lr=lr)
|
1364 |
+
if self.use_scheduler:
|
1365 |
+
assert 'target' in self.scheduler_config
|
1366 |
+
scheduler = instantiate_from_config(self.scheduler_config)
|
1367 |
+
|
1368 |
+
print("Setting up LambdaLR scheduler...")
|
1369 |
+
scheduler = [
|
1370 |
+
{
|
1371 |
+
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
|
1372 |
+
'interval': 'step',
|
1373 |
+
'frequency': 1
|
1374 |
+
}]
|
1375 |
+
return [opt], scheduler
|
1376 |
+
return opt
|
1377 |
+
|
1378 |
+
@torch.no_grad()
|
1379 |
+
def to_rgb(self, x):
|
1380 |
+
x = x.float()
|
1381 |
+
if not hasattr(self, "colorize"):
|
1382 |
+
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
|
1383 |
+
x = nn.functional.conv2d(x, weight=self.colorize)
|
1384 |
+
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
|
1385 |
+
return x
|
1386 |
+
|
1387 |
+
|
1388 |
+
class DiffusionWrapperV1(pl.LightningModule):
|
1389 |
+
def __init__(self, diff_model_config, conditioning_key):
|
1390 |
+
super().__init__()
|
1391 |
+
self.diffusion_model = instantiate_from_config(diff_model_config)
|
1392 |
+
self.conditioning_key = conditioning_key
|
1393 |
+
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
|
1394 |
+
|
1395 |
+
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
|
1396 |
+
if self.conditioning_key is None:
|
1397 |
+
out = self.diffusion_model(x, t)
|
1398 |
+
elif self.conditioning_key == 'concat':
|
1399 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
1400 |
+
out = self.diffusion_model(xc, t)
|
1401 |
+
elif self.conditioning_key == 'crossattn':
|
1402 |
+
cc = torch.cat(c_crossattn, 1)
|
1403 |
+
out = self.diffusion_model(x, t, context=cc)
|
1404 |
+
elif self.conditioning_key == 'hybrid':
|
1405 |
+
xc = torch.cat([x] + c_concat, dim=1)
|
1406 |
+
cc = torch.cat(c_crossattn, 1)
|
1407 |
+
out = self.diffusion_model(xc, t, context=cc)
|
1408 |
+
elif self.conditioning_key == 'adm':
|
1409 |
+
cc = c_crossattn[0]
|
1410 |
+
out = self.diffusion_model(x, t, y=cc)
|
1411 |
+
else:
|
1412 |
+
raise NotImplementedError()
|
1413 |
+
|
1414 |
+
return out
|
1415 |
+
|
1416 |
+
|
1417 |
+
class Layout2ImgDiffusionV1(LatentDiffusionV1):
|
1418 |
+
# TODO: move all layout-specific hacks to this class
|
1419 |
+
def __init__(self, cond_stage_key, *args, **kwargs):
|
1420 |
+
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
|
1421 |
+
super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
|
1422 |
+
|
1423 |
+
def log_images(self, batch, N=8, *args, **kwargs):
|
1424 |
+
logs = super().log_images(*args, batch=batch, N=N, **kwargs)
|
1425 |
+
|
1426 |
+
key = 'train' if self.training else 'validation'
|
1427 |
+
dset = self.trainer.datamodule.datasets[key]
|
1428 |
+
mapper = dset.conditional_builders[self.cond_stage_key]
|
1429 |
+
|
1430 |
+
bbox_imgs = []
|
1431 |
+
map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
|
1432 |
+
for tknzd_bbox in batch[self.cond_stage_key][:N]:
|
1433 |
+
bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
|
1434 |
+
bbox_imgs.append(bboximg)
|
1435 |
+
|
1436 |
+
cond_img = torch.stack(bbox_imgs, dim=0)
|
1437 |
+
logs['bbox_image'] = cond_img
|
1438 |
+
return logs
|
1439 |
+
|
1440 |
+
ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
|
1441 |
+
ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
|
1442 |
+
ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
|
1443 |
+
ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
|
extensions-builtin/LDSR/vqvae_quantize.py
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
|
2 |
+
# where the license is as follows:
|
3 |
+
#
|
4 |
+
# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
|
5 |
+
#
|
6 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
# of this software and associated documentation files (the "Software"), to deal
|
8 |
+
# in the Software without restriction, including without limitation the rights
|
9 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
# copies of the Software, and to permit persons to whom the Software is
|
11 |
+
# furnished to do so, subject to the following conditions:
|
12 |
+
#
|
13 |
+
# The above copyright notice and this permission notice shall be included in all
|
14 |
+
# copies or substantial portions of the Software.
|
15 |
+
#
|
16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
17 |
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
18 |
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
19 |
+
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
20 |
+
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
21 |
+
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
22 |
+
# OR OTHER DEALINGS IN THE SOFTWARE./
|
23 |
+
|
24 |
+
import torch
|
25 |
+
import torch.nn as nn
|
26 |
+
import numpy as np
|
27 |
+
from einops import rearrange
|
28 |
+
|
29 |
+
|
30 |
+
class VectorQuantizer2(nn.Module):
|
31 |
+
"""
|
32 |
+
Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
|
33 |
+
avoids costly matrix multiplications and allows for post-hoc remapping of indices.
|
34 |
+
"""
|
35 |
+
|
36 |
+
# NOTE: due to a bug the beta term was applied to the wrong term. for
|
37 |
+
# backwards compatibility we use the buggy version by default, but you can
|
38 |
+
# specify legacy=False to fix it.
|
39 |
+
def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
|
40 |
+
sane_index_shape=False, legacy=True):
|
41 |
+
super().__init__()
|
42 |
+
self.n_e = n_e
|
43 |
+
self.e_dim = e_dim
|
44 |
+
self.beta = beta
|
45 |
+
self.legacy = legacy
|
46 |
+
|
47 |
+
self.embedding = nn.Embedding(self.n_e, self.e_dim)
|
48 |
+
self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
|
49 |
+
|
50 |
+
self.remap = remap
|
51 |
+
if self.remap is not None:
|
52 |
+
self.register_buffer("used", torch.tensor(np.load(self.remap)))
|
53 |
+
self.re_embed = self.used.shape[0]
|
54 |
+
self.unknown_index = unknown_index # "random" or "extra" or integer
|
55 |
+
if self.unknown_index == "extra":
|
56 |
+
self.unknown_index = self.re_embed
|
57 |
+
self.re_embed = self.re_embed + 1
|
58 |
+
print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
|
59 |
+
f"Using {self.unknown_index} for unknown indices.")
|
60 |
+
else:
|
61 |
+
self.re_embed = n_e
|
62 |
+
|
63 |
+
self.sane_index_shape = sane_index_shape
|
64 |
+
|
65 |
+
def remap_to_used(self, inds):
|
66 |
+
ishape = inds.shape
|
67 |
+
assert len(ishape) > 1
|
68 |
+
inds = inds.reshape(ishape[0], -1)
|
69 |
+
used = self.used.to(inds)
|
70 |
+
match = (inds[:, :, None] == used[None, None, ...]).long()
|
71 |
+
new = match.argmax(-1)
|
72 |
+
unknown = match.sum(2) < 1
|
73 |
+
if self.unknown_index == "random":
|
74 |
+
new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
|
75 |
+
else:
|
76 |
+
new[unknown] = self.unknown_index
|
77 |
+
return new.reshape(ishape)
|
78 |
+
|
79 |
+
def unmap_to_all(self, inds):
|
80 |
+
ishape = inds.shape
|
81 |
+
assert len(ishape) > 1
|
82 |
+
inds = inds.reshape(ishape[0], -1)
|
83 |
+
used = self.used.to(inds)
|
84 |
+
if self.re_embed > self.used.shape[0]: # extra token
|
85 |
+
inds[inds >= self.used.shape[0]] = 0 # simply set to zero
|
86 |
+
back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
|
87 |
+
return back.reshape(ishape)
|
88 |
+
|
89 |
+
def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
|
90 |
+
assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
|
91 |
+
assert rescale_logits is False, "Only for interface compatible with Gumbel"
|
92 |
+
assert return_logits is False, "Only for interface compatible with Gumbel"
|
93 |
+
# reshape z -> (batch, height, width, channel) and flatten
|
94 |
+
z = rearrange(z, 'b c h w -> b h w c').contiguous()
|
95 |
+
z_flattened = z.view(-1, self.e_dim)
|
96 |
+
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
|
97 |
+
|
98 |
+
d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
|
99 |
+
torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
|
100 |
+
torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
|
101 |
+
|
102 |
+
min_encoding_indices = torch.argmin(d, dim=1)
|
103 |
+
z_q = self.embedding(min_encoding_indices).view(z.shape)
|
104 |
+
perplexity = None
|
105 |
+
min_encodings = None
|
106 |
+
|
107 |
+
# compute loss for embedding
|
108 |
+
if not self.legacy:
|
109 |
+
loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
|
110 |
+
torch.mean((z_q - z.detach()) ** 2)
|
111 |
+
else:
|
112 |
+
loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
|
113 |
+
torch.mean((z_q - z.detach()) ** 2)
|
114 |
+
|
115 |
+
# preserve gradients
|
116 |
+
z_q = z + (z_q - z).detach()
|
117 |
+
|
118 |
+
# reshape back to match original input shape
|
119 |
+
z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
|
120 |
+
|
121 |
+
if self.remap is not None:
|
122 |
+
min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
|
123 |
+
min_encoding_indices = self.remap_to_used(min_encoding_indices)
|
124 |
+
min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
|
125 |
+
|
126 |
+
if self.sane_index_shape:
|
127 |
+
min_encoding_indices = min_encoding_indices.reshape(
|
128 |
+
z_q.shape[0], z_q.shape[2], z_q.shape[3])
|
129 |
+
|
130 |
+
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
|
131 |
+
|
132 |
+
def get_codebook_entry(self, indices, shape):
|
133 |
+
# shape specifying (batch, height, width, channel)
|
134 |
+
if self.remap is not None:
|
135 |
+
indices = indices.reshape(shape[0], -1) # add batch axis
|
136 |
+
indices = self.unmap_to_all(indices)
|
137 |
+
indices = indices.reshape(-1) # flatten again
|
138 |
+
|
139 |
+
# get quantized latent vectors
|
140 |
+
z_q = self.embedding(indices)
|
141 |
+
|
142 |
+
if shape is not None:
|
143 |
+
z_q = z_q.view(shape)
|
144 |
+
# reshape back to match original input shape
|
145 |
+
z_q = z_q.permute(0, 3, 1, 2).contiguous()
|
146 |
+
|
147 |
+
return z_q
|
extensions-builtin/Lora/__pycache__/extra_networks_lora.cpython-310.pyc
ADDED
Binary file (2.35 kB). View file
|
|
extensions-builtin/Lora/__pycache__/lora.cpython-310.pyc
ADDED
Binary file (547 Bytes). View file
|
|
extensions-builtin/Lora/__pycache__/lyco_helpers.cpython-310.pyc
ADDED
Binary file (946 Bytes). View file
|
|
extensions-builtin/Lora/__pycache__/network.cpython-310.pyc
ADDED
Binary file (5.65 kB). View file
|
|
extensions-builtin/Lora/__pycache__/network_full.cpython-310.pyc
ADDED
Binary file (1.51 kB). View file
|
|
extensions-builtin/Lora/__pycache__/network_hada.cpython-310.pyc
ADDED
Binary file (2.24 kB). View file
|
|
extensions-builtin/Lora/__pycache__/network_ia3.cpython-310.pyc
ADDED
Binary file (1.62 kB). View file
|
|
extensions-builtin/Lora/__pycache__/network_lokr.cpython-310.pyc
ADDED
Binary file (2.43 kB). View file
|
|
extensions-builtin/Lora/__pycache__/network_lora.cpython-310.pyc
ADDED
Binary file (3.5 kB). View file
|
|
extensions-builtin/Lora/__pycache__/networks.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
extensions-builtin/Lora/__pycache__/preload.cpython-310.pyc
ADDED
Binary file (632 Bytes). View file
|
|
extensions-builtin/Lora/__pycache__/ui_edit_user_metadata.cpython-310.pyc
ADDED
Binary file (7.47 kB). View file
|
|
extensions-builtin/Lora/__pycache__/ui_extra_networks_lora.cpython-310.pyc
ADDED
Binary file (3.07 kB). View file
|
|
extensions-builtin/Lora/extra_networks_lora.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from modules import extra_networks, shared
|
2 |
+
import networks
|
3 |
+
|
4 |
+
|
5 |
+
class ExtraNetworkLora(extra_networks.ExtraNetwork):
|
6 |
+
def __init__(self):
|
7 |
+
super().__init__('lora')
|
8 |
+
|
9 |
+
def activate(self, p, params_list):
|
10 |
+
additional = shared.opts.sd_lora
|
11 |
+
|
12 |
+
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
|
13 |
+
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
|
14 |
+
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
|
15 |
+
|
16 |
+
names = []
|
17 |
+
te_multipliers = []
|
18 |
+
unet_multipliers = []
|
19 |
+
dyn_dims = []
|
20 |
+
for params in params_list:
|
21 |
+
assert params.items
|
22 |
+
|
23 |
+
names.append(params.positional[0])
|
24 |
+
|
25 |
+
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
|
26 |
+
te_multiplier = float(params.named.get("te", te_multiplier))
|
27 |
+
|
28 |
+
unet_multiplier = float(params.positional[2]) if len(params.positional) > 2 else te_multiplier
|
29 |
+
unet_multiplier = float(params.named.get("unet", unet_multiplier))
|
30 |
+
|
31 |
+
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
|
32 |
+
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
|
33 |
+
|
34 |
+
te_multipliers.append(te_multiplier)
|
35 |
+
unet_multipliers.append(unet_multiplier)
|
36 |
+
dyn_dims.append(dyn_dim)
|
37 |
+
|
38 |
+
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
|
39 |
+
|
40 |
+
if shared.opts.lora_add_hashes_to_infotext:
|
41 |
+
network_hashes = []
|
42 |
+
for item in networks.loaded_networks:
|
43 |
+
shorthash = item.network_on_disk.shorthash
|
44 |
+
if not shorthash:
|
45 |
+
continue
|
46 |
+
|
47 |
+
alias = item.mentioned_name
|
48 |
+
if not alias:
|
49 |
+
continue
|
50 |
+
|
51 |
+
alias = alias.replace(":", "").replace(",", "")
|
52 |
+
|
53 |
+
network_hashes.append(f"{alias}: {shorthash}")
|
54 |
+
|
55 |
+
if network_hashes:
|
56 |
+
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
|
57 |
+
|
58 |
+
def deactivate(self, p):
|
59 |
+
pass
|
extensions-builtin/Lora/lora.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import networks
|
2 |
+
|
3 |
+
list_available_loras = networks.list_available_networks
|
4 |
+
|
5 |
+
available_loras = networks.available_networks
|
6 |
+
available_lora_aliases = networks.available_network_aliases
|
7 |
+
available_lora_hash_lookup = networks.available_network_hash_lookup
|
8 |
+
forbidden_lora_aliases = networks.forbidden_network_aliases
|
9 |
+
loaded_loras = networks.loaded_networks
|
extensions-builtin/Lora/lyco_helpers.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
def make_weight_cp(t, wa, wb):
|
5 |
+
temp = torch.einsum('i j k l, j r -> i r k l', t, wb)
|
6 |
+
return torch.einsum('i j k l, i r -> r j k l', temp, wa)
|
7 |
+
|
8 |
+
|
9 |
+
def rebuild_conventional(up, down, shape, dyn_dim=None):
|
10 |
+
up = up.reshape(up.size(0), -1)
|
11 |
+
down = down.reshape(down.size(0), -1)
|
12 |
+
if dyn_dim is not None:
|
13 |
+
up = up[:, :dyn_dim]
|
14 |
+
down = down[:dyn_dim, :]
|
15 |
+
return (up @ down).reshape(shape)
|
16 |
+
|
17 |
+
|
18 |
+
def rebuild_cp_decomposition(up, down, mid):
|
19 |
+
up = up.reshape(up.size(0), -1)
|
20 |
+
down = down.reshape(down.size(0), -1)
|
21 |
+
return torch.einsum('n m k l, i n, m j -> i j k l', mid, up, down)
|
extensions-builtin/Lora/network.py
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from collections import namedtuple
|
3 |
+
import enum
|
4 |
+
|
5 |
+
from modules import sd_models, cache, errors, hashes, shared
|
6 |
+
|
7 |
+
NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
|
8 |
+
|
9 |
+
metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
|
10 |
+
|
11 |
+
|
12 |
+
class SdVersion(enum.Enum):
|
13 |
+
Unknown = 1
|
14 |
+
SD1 = 2
|
15 |
+
SD2 = 3
|
16 |
+
SDXL = 4
|
17 |
+
|
18 |
+
|
19 |
+
class NetworkOnDisk:
|
20 |
+
def __init__(self, name, filename):
|
21 |
+
self.name = name
|
22 |
+
self.filename = filename
|
23 |
+
self.metadata = {}
|
24 |
+
self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
|
25 |
+
|
26 |
+
def read_metadata():
|
27 |
+
metadata = sd_models.read_metadata_from_safetensors(filename)
|
28 |
+
metadata.pop('ssmd_cover_images', None) # those are cover images, and they are too big to display in UI as text
|
29 |
+
|
30 |
+
return metadata
|
31 |
+
|
32 |
+
if self.is_safetensors:
|
33 |
+
try:
|
34 |
+
self.metadata = cache.cached_data_for_file('safetensors-metadata', "lora/" + self.name, filename, read_metadata)
|
35 |
+
except Exception as e:
|
36 |
+
errors.display(e, f"reading lora {filename}")
|
37 |
+
|
38 |
+
if self.metadata:
|
39 |
+
m = {}
|
40 |
+
for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
|
41 |
+
m[k] = v
|
42 |
+
|
43 |
+
self.metadata = m
|
44 |
+
|
45 |
+
self.alias = self.metadata.get('ss_output_name', self.name)
|
46 |
+
|
47 |
+
self.hash = None
|
48 |
+
self.shorthash = None
|
49 |
+
self.set_hash(
|
50 |
+
self.metadata.get('sshs_model_hash') or
|
51 |
+
hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
|
52 |
+
''
|
53 |
+
)
|
54 |
+
|
55 |
+
self.sd_version = self.detect_version()
|
56 |
+
|
57 |
+
def detect_version(self):
|
58 |
+
if str(self.metadata.get('ss_base_model_version', "")).startswith("sdxl_"):
|
59 |
+
return SdVersion.SDXL
|
60 |
+
elif str(self.metadata.get('ss_v2', "")) == "True":
|
61 |
+
return SdVersion.SD2
|
62 |
+
elif len(self.metadata):
|
63 |
+
return SdVersion.SD1
|
64 |
+
|
65 |
+
return SdVersion.Unknown
|
66 |
+
|
67 |
+
def set_hash(self, v):
|
68 |
+
self.hash = v
|
69 |
+
self.shorthash = self.hash[0:12]
|
70 |
+
|
71 |
+
if self.shorthash:
|
72 |
+
import networks
|
73 |
+
networks.available_network_hash_lookup[self.shorthash] = self
|
74 |
+
|
75 |
+
def read_hash(self):
|
76 |
+
if not self.hash:
|
77 |
+
self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
|
78 |
+
|
79 |
+
def get_alias(self):
|
80 |
+
import networks
|
81 |
+
if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in networks.forbidden_network_aliases:
|
82 |
+
return self.name
|
83 |
+
else:
|
84 |
+
return self.alias
|
85 |
+
|
86 |
+
|
87 |
+
class Network: # LoraModule
|
88 |
+
def __init__(self, name, network_on_disk: NetworkOnDisk):
|
89 |
+
self.name = name
|
90 |
+
self.network_on_disk = network_on_disk
|
91 |
+
self.te_multiplier = 1.0
|
92 |
+
self.unet_multiplier = 1.0
|
93 |
+
self.dyn_dim = None
|
94 |
+
self.modules = {}
|
95 |
+
self.mtime = None
|
96 |
+
|
97 |
+
self.mentioned_name = None
|
98 |
+
"""the text that was used to add the network to prompt - can be either name or an alias"""
|
99 |
+
|
100 |
+
|
101 |
+
class ModuleType:
|
102 |
+
def create_module(self, net: Network, weights: NetworkWeights) -> Network | None:
|
103 |
+
return None
|
104 |
+
|
105 |
+
|
106 |
+
class NetworkModule:
|
107 |
+
def __init__(self, net: Network, weights: NetworkWeights):
|
108 |
+
self.network = net
|
109 |
+
self.network_key = weights.network_key
|
110 |
+
self.sd_key = weights.sd_key
|
111 |
+
self.sd_module = weights.sd_module
|
112 |
+
|
113 |
+
if hasattr(self.sd_module, 'weight'):
|
114 |
+
self.shape = self.sd_module.weight.shape
|
115 |
+
|
116 |
+
self.dim = None
|
117 |
+
self.bias = weights.w.get("bias")
|
118 |
+
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
|
119 |
+
self.scale = weights.w["scale"].item() if "scale" in weights.w else None
|
120 |
+
|
121 |
+
def multiplier(self):
|
122 |
+
if 'transformer' in self.sd_key[:20]:
|
123 |
+
return self.network.te_multiplier
|
124 |
+
else:
|
125 |
+
return self.network.unet_multiplier
|
126 |
+
|
127 |
+
def calc_scale(self):
|
128 |
+
if self.scale is not None:
|
129 |
+
return self.scale
|
130 |
+
if self.dim is not None and self.alpha is not None:
|
131 |
+
return self.alpha / self.dim
|
132 |
+
|
133 |
+
return 1.0
|
134 |
+
|
135 |
+
def finalize_updown(self, updown, orig_weight, output_shape):
|
136 |
+
if self.bias is not None:
|
137 |
+
updown = updown.reshape(self.bias.shape)
|
138 |
+
updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype)
|
139 |
+
updown = updown.reshape(output_shape)
|
140 |
+
|
141 |
+
if len(output_shape) == 4:
|
142 |
+
updown = updown.reshape(output_shape)
|
143 |
+
|
144 |
+
if orig_weight.size().numel() == updown.size().numel():
|
145 |
+
updown = updown.reshape(orig_weight.shape)
|
146 |
+
|
147 |
+
return updown * self.calc_scale() * self.multiplier()
|
148 |
+
|
149 |
+
def calc_updown(self, target):
|
150 |
+
raise NotImplementedError()
|
151 |
+
|
152 |
+
def forward(self, x, y):
|
153 |
+
raise NotImplementedError()
|
154 |
+
|
extensions-builtin/Lora/network_full.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import network
|
2 |
+
|
3 |
+
|
4 |
+
class ModuleTypeFull(network.ModuleType):
|
5 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
6 |
+
if all(x in weights.w for x in ["diff"]):
|
7 |
+
return NetworkModuleFull(net, weights)
|
8 |
+
|
9 |
+
return None
|
10 |
+
|
11 |
+
|
12 |
+
class NetworkModuleFull(network.NetworkModule):
|
13 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
14 |
+
super().__init__(net, weights)
|
15 |
+
|
16 |
+
self.weight = weights.w.get("diff")
|
17 |
+
|
18 |
+
def calc_updown(self, orig_weight):
|
19 |
+
output_shape = self.weight.shape
|
20 |
+
updown = self.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
21 |
+
|
22 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
extensions-builtin/Lora/network_hada.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import lyco_helpers
|
2 |
+
import network
|
3 |
+
|
4 |
+
|
5 |
+
class ModuleTypeHada(network.ModuleType):
|
6 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
7 |
+
if all(x in weights.w for x in ["hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b"]):
|
8 |
+
return NetworkModuleHada(net, weights)
|
9 |
+
|
10 |
+
return None
|
11 |
+
|
12 |
+
|
13 |
+
class NetworkModuleHada(network.NetworkModule):
|
14 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
15 |
+
super().__init__(net, weights)
|
16 |
+
|
17 |
+
if hasattr(self.sd_module, 'weight'):
|
18 |
+
self.shape = self.sd_module.weight.shape
|
19 |
+
|
20 |
+
self.w1a = weights.w["hada_w1_a"]
|
21 |
+
self.w1b = weights.w["hada_w1_b"]
|
22 |
+
self.dim = self.w1b.shape[0]
|
23 |
+
self.w2a = weights.w["hada_w2_a"]
|
24 |
+
self.w2b = weights.w["hada_w2_b"]
|
25 |
+
|
26 |
+
self.t1 = weights.w.get("hada_t1")
|
27 |
+
self.t2 = weights.w.get("hada_t2")
|
28 |
+
|
29 |
+
def calc_updown(self, orig_weight):
|
30 |
+
w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype)
|
31 |
+
w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype)
|
32 |
+
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
|
33 |
+
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
|
34 |
+
|
35 |
+
output_shape = [w1a.size(0), w1b.size(1)]
|
36 |
+
|
37 |
+
if self.t1 is not None:
|
38 |
+
output_shape = [w1a.size(1), w1b.size(1)]
|
39 |
+
t1 = self.t1.to(orig_weight.device, dtype=orig_weight.dtype)
|
40 |
+
updown1 = lyco_helpers.make_weight_cp(t1, w1a, w1b)
|
41 |
+
output_shape += t1.shape[2:]
|
42 |
+
else:
|
43 |
+
if len(w1b.shape) == 4:
|
44 |
+
output_shape += w1b.shape[2:]
|
45 |
+
updown1 = lyco_helpers.rebuild_conventional(w1a, w1b, output_shape)
|
46 |
+
|
47 |
+
if self.t2 is not None:
|
48 |
+
t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype)
|
49 |
+
updown2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
|
50 |
+
else:
|
51 |
+
updown2 = lyco_helpers.rebuild_conventional(w2a, w2b, output_shape)
|
52 |
+
|
53 |
+
updown = updown1 * updown2
|
54 |
+
|
55 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
extensions-builtin/Lora/network_ia3.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import network
|
2 |
+
|
3 |
+
|
4 |
+
class ModuleTypeIa3(network.ModuleType):
|
5 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
6 |
+
if all(x in weights.w for x in ["weight"]):
|
7 |
+
return NetworkModuleIa3(net, weights)
|
8 |
+
|
9 |
+
return None
|
10 |
+
|
11 |
+
|
12 |
+
class NetworkModuleIa3(network.NetworkModule):
|
13 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
14 |
+
super().__init__(net, weights)
|
15 |
+
|
16 |
+
self.w = weights.w["weight"]
|
17 |
+
self.on_input = weights.w["on_input"].item()
|
18 |
+
|
19 |
+
def calc_updown(self, orig_weight):
|
20 |
+
w = self.w.to(orig_weight.device, dtype=orig_weight.dtype)
|
21 |
+
|
22 |
+
output_shape = [w.size(0), orig_weight.size(1)]
|
23 |
+
if self.on_input:
|
24 |
+
output_shape.reverse()
|
25 |
+
else:
|
26 |
+
w = w.reshape(-1, 1)
|
27 |
+
|
28 |
+
updown = orig_weight * w
|
29 |
+
|
30 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
extensions-builtin/Lora/network_lokr.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
import lyco_helpers
|
4 |
+
import network
|
5 |
+
|
6 |
+
|
7 |
+
class ModuleTypeLokr(network.ModuleType):
|
8 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
9 |
+
has_1 = "lokr_w1" in weights.w or ("lokr_w1_a" in weights.w and "lokr_w1_b" in weights.w)
|
10 |
+
has_2 = "lokr_w2" in weights.w or ("lokr_w2_a" in weights.w and "lokr_w2_b" in weights.w)
|
11 |
+
if has_1 and has_2:
|
12 |
+
return NetworkModuleLokr(net, weights)
|
13 |
+
|
14 |
+
return None
|
15 |
+
|
16 |
+
|
17 |
+
def make_kron(orig_shape, w1, w2):
|
18 |
+
if len(w2.shape) == 4:
|
19 |
+
w1 = w1.unsqueeze(2).unsqueeze(2)
|
20 |
+
w2 = w2.contiguous()
|
21 |
+
return torch.kron(w1, w2).reshape(orig_shape)
|
22 |
+
|
23 |
+
|
24 |
+
class NetworkModuleLokr(network.NetworkModule):
|
25 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
26 |
+
super().__init__(net, weights)
|
27 |
+
|
28 |
+
self.w1 = weights.w.get("lokr_w1")
|
29 |
+
self.w1a = weights.w.get("lokr_w1_a")
|
30 |
+
self.w1b = weights.w.get("lokr_w1_b")
|
31 |
+
self.dim = self.w1b.shape[0] if self.w1b is not None else self.dim
|
32 |
+
self.w2 = weights.w.get("lokr_w2")
|
33 |
+
self.w2a = weights.w.get("lokr_w2_a")
|
34 |
+
self.w2b = weights.w.get("lokr_w2_b")
|
35 |
+
self.dim = self.w2b.shape[0] if self.w2b is not None else self.dim
|
36 |
+
self.t2 = weights.w.get("lokr_t2")
|
37 |
+
|
38 |
+
def calc_updown(self, orig_weight):
|
39 |
+
if self.w1 is not None:
|
40 |
+
w1 = self.w1.to(orig_weight.device, dtype=orig_weight.dtype)
|
41 |
+
else:
|
42 |
+
w1a = self.w1a.to(orig_weight.device, dtype=orig_weight.dtype)
|
43 |
+
w1b = self.w1b.to(orig_weight.device, dtype=orig_weight.dtype)
|
44 |
+
w1 = w1a @ w1b
|
45 |
+
|
46 |
+
if self.w2 is not None:
|
47 |
+
w2 = self.w2.to(orig_weight.device, dtype=orig_weight.dtype)
|
48 |
+
elif self.t2 is None:
|
49 |
+
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
|
50 |
+
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
|
51 |
+
w2 = w2a @ w2b
|
52 |
+
else:
|
53 |
+
t2 = self.t2.to(orig_weight.device, dtype=orig_weight.dtype)
|
54 |
+
w2a = self.w2a.to(orig_weight.device, dtype=orig_weight.dtype)
|
55 |
+
w2b = self.w2b.to(orig_weight.device, dtype=orig_weight.dtype)
|
56 |
+
w2 = lyco_helpers.make_weight_cp(t2, w2a, w2b)
|
57 |
+
|
58 |
+
output_shape = [w1.size(0) * w2.size(0), w1.size(1) * w2.size(1)]
|
59 |
+
if len(orig_weight.shape) == 4:
|
60 |
+
output_shape = orig_weight.shape
|
61 |
+
|
62 |
+
updown = make_kron(output_shape, w1, w2)
|
63 |
+
|
64 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
extensions-builtin/Lora/network_lora.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
import lyco_helpers
|
4 |
+
import network
|
5 |
+
from modules import devices
|
6 |
+
|
7 |
+
|
8 |
+
class ModuleTypeLora(network.ModuleType):
|
9 |
+
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
10 |
+
if all(x in weights.w for x in ["lora_up.weight", "lora_down.weight"]):
|
11 |
+
return NetworkModuleLora(net, weights)
|
12 |
+
|
13 |
+
return None
|
14 |
+
|
15 |
+
|
16 |
+
class NetworkModuleLora(network.NetworkModule):
|
17 |
+
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
18 |
+
super().__init__(net, weights)
|
19 |
+
|
20 |
+
self.up_model = self.create_module(weights.w, "lora_up.weight")
|
21 |
+
self.down_model = self.create_module(weights.w, "lora_down.weight")
|
22 |
+
self.mid_model = self.create_module(weights.w, "lora_mid.weight", none_ok=True)
|
23 |
+
|
24 |
+
self.dim = weights.w["lora_down.weight"].shape[0]
|
25 |
+
|
26 |
+
def create_module(self, weights, key, none_ok=False):
|
27 |
+
weight = weights.get(key)
|
28 |
+
|
29 |
+
if weight is None and none_ok:
|
30 |
+
return None
|
31 |
+
|
32 |
+
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.nn.MultiheadAttention]
|
33 |
+
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
|
34 |
+
|
35 |
+
if is_linear:
|
36 |
+
weight = weight.reshape(weight.shape[0], -1)
|
37 |
+
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
|
38 |
+
elif is_conv and key == "lora_down.weight" or key == "dyn_up":
|
39 |
+
if len(weight.shape) == 2:
|
40 |
+
weight = weight.reshape(weight.shape[0], -1, 1, 1)
|
41 |
+
|
42 |
+
if weight.shape[2] != 1 or weight.shape[3] != 1:
|
43 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False)
|
44 |
+
else:
|
45 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
|
46 |
+
elif is_conv and key == "lora_mid.weight":
|
47 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], self.sd_module.kernel_size, self.sd_module.stride, self.sd_module.padding, bias=False)
|
48 |
+
elif is_conv and key == "lora_up.weight" or key == "dyn_down":
|
49 |
+
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
|
50 |
+
else:
|
51 |
+
raise AssertionError(f'Lora layer {self.network_key} matched a layer with unsupported type: {type(self.sd_module).__name__}')
|
52 |
+
|
53 |
+
with torch.no_grad():
|
54 |
+
if weight.shape != module.weight.shape:
|
55 |
+
weight = weight.reshape(module.weight.shape)
|
56 |
+
module.weight.copy_(weight)
|
57 |
+
|
58 |
+
module.to(device=devices.cpu, dtype=devices.dtype)
|
59 |
+
module.weight.requires_grad_(False)
|
60 |
+
|
61 |
+
return module
|
62 |
+
|
63 |
+
def calc_updown(self, orig_weight):
|
64 |
+
up = self.up_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
65 |
+
down = self.down_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
66 |
+
|
67 |
+
output_shape = [up.size(0), down.size(1)]
|
68 |
+
if self.mid_model is not None:
|
69 |
+
# cp-decomposition
|
70 |
+
mid = self.mid_model.weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
71 |
+
updown = lyco_helpers.rebuild_cp_decomposition(up, down, mid)
|
72 |
+
output_shape += mid.shape[2:]
|
73 |
+
else:
|
74 |
+
if len(down.shape) == 4:
|
75 |
+
output_shape += down.shape[2:]
|
76 |
+
updown = lyco_helpers.rebuild_conventional(up, down, output_shape, self.network.dyn_dim)
|
77 |
+
|
78 |
+
return self.finalize_updown(updown, orig_weight, output_shape)
|
79 |
+
|
80 |
+
def forward(self, x, y):
|
81 |
+
self.up_model.to(device=devices.device)
|
82 |
+
self.down_model.to(device=devices.device)
|
83 |
+
|
84 |
+
return y + self.up_model(self.down_model(x)) * self.multiplier() * self.calc_scale()
|
85 |
+
|
86 |
+
|
extensions-builtin/Lora/networks.py
ADDED
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
|
4 |
+
import network
|
5 |
+
import network_lora
|
6 |
+
import network_hada
|
7 |
+
import network_ia3
|
8 |
+
import network_lokr
|
9 |
+
import network_full
|
10 |
+
|
11 |
+
import torch
|
12 |
+
from typing import Union
|
13 |
+
|
14 |
+
from modules import shared, devices, sd_models, errors, scripts, sd_hijack
|
15 |
+
|
16 |
+
module_types = [
|
17 |
+
network_lora.ModuleTypeLora(),
|
18 |
+
network_hada.ModuleTypeHada(),
|
19 |
+
network_ia3.ModuleTypeIa3(),
|
20 |
+
network_lokr.ModuleTypeLokr(),
|
21 |
+
network_full.ModuleTypeFull(),
|
22 |
+
]
|
23 |
+
|
24 |
+
|
25 |
+
re_digits = re.compile(r"\d+")
|
26 |
+
re_x_proj = re.compile(r"(.*)_([qkv]_proj)$")
|
27 |
+
re_compiled = {}
|
28 |
+
|
29 |
+
suffix_conversion = {
|
30 |
+
"attentions": {},
|
31 |
+
"resnets": {
|
32 |
+
"conv1": "in_layers_2",
|
33 |
+
"conv2": "out_layers_3",
|
34 |
+
"time_emb_proj": "emb_layers_1",
|
35 |
+
"conv_shortcut": "skip_connection",
|
36 |
+
}
|
37 |
+
}
|
38 |
+
|
39 |
+
|
40 |
+
def convert_diffusers_name_to_compvis(key, is_sd2):
|
41 |
+
def match(match_list, regex_text):
|
42 |
+
regex = re_compiled.get(regex_text)
|
43 |
+
if regex is None:
|
44 |
+
regex = re.compile(regex_text)
|
45 |
+
re_compiled[regex_text] = regex
|
46 |
+
|
47 |
+
r = re.match(regex, key)
|
48 |
+
if not r:
|
49 |
+
return False
|
50 |
+
|
51 |
+
match_list.clear()
|
52 |
+
match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
|
53 |
+
return True
|
54 |
+
|
55 |
+
m = []
|
56 |
+
|
57 |
+
if match(m, r"lora_unet_conv_in(.*)"):
|
58 |
+
return f'diffusion_model_input_blocks_0_0{m[0]}'
|
59 |
+
|
60 |
+
if match(m, r"lora_unet_conv_out(.*)"):
|
61 |
+
return f'diffusion_model_out_2{m[0]}'
|
62 |
+
|
63 |
+
if match(m, r"lora_unet_time_embedding_linear_(\d+)(.*)"):
|
64 |
+
return f"diffusion_model_time_embed_{m[0] * 2 - 2}{m[1]}"
|
65 |
+
|
66 |
+
if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
67 |
+
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
68 |
+
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
69 |
+
|
70 |
+
if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"):
|
71 |
+
suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2])
|
72 |
+
return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}"
|
73 |
+
|
74 |
+
if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
|
75 |
+
suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
|
76 |
+
return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
|
77 |
+
|
78 |
+
if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"):
|
79 |
+
return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op"
|
80 |
+
|
81 |
+
if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"):
|
82 |
+
return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv"
|
83 |
+
|
84 |
+
if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"):
|
85 |
+
if is_sd2:
|
86 |
+
if 'mlp_fc1' in m[1]:
|
87 |
+
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
|
88 |
+
elif 'mlp_fc2' in m[1]:
|
89 |
+
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
|
90 |
+
else:
|
91 |
+
return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
|
92 |
+
|
93 |
+
return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
|
94 |
+
|
95 |
+
if match(m, r"lora_te2_text_model_encoder_layers_(\d+)_(.+)"):
|
96 |
+
if 'mlp_fc1' in m[1]:
|
97 |
+
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
|
98 |
+
elif 'mlp_fc2' in m[1]:
|
99 |
+
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
|
100 |
+
else:
|
101 |
+
return f"1_model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
|
102 |
+
|
103 |
+
return key
|
104 |
+
|
105 |
+
|
106 |
+
def assign_network_names_to_compvis_modules(sd_model):
|
107 |
+
network_layer_mapping = {}
|
108 |
+
|
109 |
+
if shared.sd_model.is_sdxl:
|
110 |
+
for i, embedder in enumerate(shared.sd_model.conditioner.embedders):
|
111 |
+
if not hasattr(embedder, 'wrapped'):
|
112 |
+
continue
|
113 |
+
|
114 |
+
for name, module in embedder.wrapped.named_modules():
|
115 |
+
network_name = f'{i}_{name.replace(".", "_")}'
|
116 |
+
network_layer_mapping[network_name] = module
|
117 |
+
module.network_layer_name = network_name
|
118 |
+
else:
|
119 |
+
for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
|
120 |
+
network_name = name.replace(".", "_")
|
121 |
+
network_layer_mapping[network_name] = module
|
122 |
+
module.network_layer_name = network_name
|
123 |
+
|
124 |
+
for name, module in shared.sd_model.model.named_modules():
|
125 |
+
network_name = name.replace(".", "_")
|
126 |
+
network_layer_mapping[network_name] = module
|
127 |
+
module.network_layer_name = network_name
|
128 |
+
|
129 |
+
sd_model.network_layer_mapping = network_layer_mapping
|
130 |
+
|
131 |
+
|
132 |
+
def load_network(name, network_on_disk):
|
133 |
+
net = network.Network(name, network_on_disk)
|
134 |
+
net.mtime = os.path.getmtime(network_on_disk.filename)
|
135 |
+
|
136 |
+
sd = sd_models.read_state_dict(network_on_disk.filename)
|
137 |
+
|
138 |
+
# this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0
|
139 |
+
if not hasattr(shared.sd_model, 'network_layer_mapping'):
|
140 |
+
assign_network_names_to_compvis_modules(shared.sd_model)
|
141 |
+
|
142 |
+
keys_failed_to_match = {}
|
143 |
+
is_sd2 = 'model_transformer_resblocks' in shared.sd_model.network_layer_mapping
|
144 |
+
|
145 |
+
matched_networks = {}
|
146 |
+
|
147 |
+
for key_network, weight in sd.items():
|
148 |
+
key_network_without_network_parts, network_part = key_network.split(".", 1)
|
149 |
+
|
150 |
+
key = convert_diffusers_name_to_compvis(key_network_without_network_parts, is_sd2)
|
151 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
152 |
+
|
153 |
+
if sd_module is None:
|
154 |
+
m = re_x_proj.match(key)
|
155 |
+
if m:
|
156 |
+
sd_module = shared.sd_model.network_layer_mapping.get(m.group(1), None)
|
157 |
+
|
158 |
+
# SDXL loras seem to already have correct compvis keys, so only need to replace "lora_unet" with "diffusion_model"
|
159 |
+
if sd_module is None and "lora_unet" in key_network_without_network_parts:
|
160 |
+
key = key_network_without_network_parts.replace("lora_unet", "diffusion_model")
|
161 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
162 |
+
elif sd_module is None and "lora_te1_text_model" in key_network_without_network_parts:
|
163 |
+
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
|
164 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
165 |
+
|
166 |
+
# some SD1 Loras also have correct compvis keys
|
167 |
+
if sd_module is None:
|
168 |
+
key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
|
169 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
170 |
+
|
171 |
+
if sd_module is None:
|
172 |
+
keys_failed_to_match[key_network] = key
|
173 |
+
continue
|
174 |
+
|
175 |
+
if key not in matched_networks:
|
176 |
+
matched_networks[key] = network.NetworkWeights(network_key=key_network, sd_key=key, w={}, sd_module=sd_module)
|
177 |
+
|
178 |
+
matched_networks[key].w[network_part] = weight
|
179 |
+
|
180 |
+
for key, weights in matched_networks.items():
|
181 |
+
net_module = None
|
182 |
+
for nettype in module_types:
|
183 |
+
net_module = nettype.create_module(net, weights)
|
184 |
+
if net_module is not None:
|
185 |
+
break
|
186 |
+
|
187 |
+
if net_module is None:
|
188 |
+
raise AssertionError(f"Could not find a module type (out of {', '.join([x.__class__.__name__ for x in module_types])}) that would accept those keys: {', '.join(weights.w)}")
|
189 |
+
|
190 |
+
net.modules[key] = net_module
|
191 |
+
|
192 |
+
if keys_failed_to_match:
|
193 |
+
print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}")
|
194 |
+
|
195 |
+
return net
|
196 |
+
|
197 |
+
|
198 |
+
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
|
199 |
+
already_loaded = {}
|
200 |
+
|
201 |
+
for net in loaded_networks:
|
202 |
+
if net.name in names:
|
203 |
+
already_loaded[net.name] = net
|
204 |
+
|
205 |
+
loaded_networks.clear()
|
206 |
+
|
207 |
+
networks_on_disk = [available_network_aliases.get(name, None) for name in names]
|
208 |
+
if any(x is None for x in networks_on_disk):
|
209 |
+
list_available_networks()
|
210 |
+
|
211 |
+
networks_on_disk = [available_network_aliases.get(name, None) for name in names]
|
212 |
+
|
213 |
+
failed_to_load_networks = []
|
214 |
+
|
215 |
+
for i, name in enumerate(names):
|
216 |
+
net = already_loaded.get(name, None)
|
217 |
+
|
218 |
+
network_on_disk = networks_on_disk[i]
|
219 |
+
|
220 |
+
if network_on_disk is not None:
|
221 |
+
if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
|
222 |
+
try:
|
223 |
+
net = load_network(name, network_on_disk)
|
224 |
+
except Exception as e:
|
225 |
+
errors.display(e, f"loading network {network_on_disk.filename}")
|
226 |
+
continue
|
227 |
+
|
228 |
+
net.mentioned_name = name
|
229 |
+
|
230 |
+
network_on_disk.read_hash()
|
231 |
+
|
232 |
+
if net is None:
|
233 |
+
failed_to_load_networks.append(name)
|
234 |
+
print(f"Couldn't find network with name {name}")
|
235 |
+
continue
|
236 |
+
|
237 |
+
net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0
|
238 |
+
net.unet_multiplier = unet_multipliers[i] if unet_multipliers else 1.0
|
239 |
+
net.dyn_dim = dyn_dims[i] if dyn_dims else 1.0
|
240 |
+
loaded_networks.append(net)
|
241 |
+
|
242 |
+
if failed_to_load_networks:
|
243 |
+
sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks))
|
244 |
+
|
245 |
+
|
246 |
+
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
|
247 |
+
weights_backup = getattr(self, "network_weights_backup", None)
|
248 |
+
|
249 |
+
if weights_backup is None:
|
250 |
+
return
|
251 |
+
|
252 |
+
if isinstance(self, torch.nn.MultiheadAttention):
|
253 |
+
self.in_proj_weight.copy_(weights_backup[0])
|
254 |
+
self.out_proj.weight.copy_(weights_backup[1])
|
255 |
+
else:
|
256 |
+
self.weight.copy_(weights_backup)
|
257 |
+
|
258 |
+
|
259 |
+
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
|
260 |
+
"""
|
261 |
+
Applies the currently selected set of networks to the weights of torch layer self.
|
262 |
+
If weights already have this particular set of networks applied, does nothing.
|
263 |
+
If not, restores orginal weights from backup and alters weights according to networks.
|
264 |
+
"""
|
265 |
+
|
266 |
+
network_layer_name = getattr(self, 'network_layer_name', None)
|
267 |
+
if network_layer_name is None:
|
268 |
+
return
|
269 |
+
|
270 |
+
current_names = getattr(self, "network_current_names", ())
|
271 |
+
wanted_names = tuple((x.name, x.te_multiplier, x.unet_multiplier, x.dyn_dim) for x in loaded_networks)
|
272 |
+
|
273 |
+
weights_backup = getattr(self, "network_weights_backup", None)
|
274 |
+
if weights_backup is None:
|
275 |
+
if isinstance(self, torch.nn.MultiheadAttention):
|
276 |
+
weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
|
277 |
+
else:
|
278 |
+
weights_backup = self.weight.to(devices.cpu, copy=True)
|
279 |
+
|
280 |
+
self.network_weights_backup = weights_backup
|
281 |
+
|
282 |
+
if current_names != wanted_names:
|
283 |
+
network_restore_weights_from_backup(self)
|
284 |
+
|
285 |
+
for net in loaded_networks:
|
286 |
+
module = net.modules.get(network_layer_name, None)
|
287 |
+
if module is not None and hasattr(self, 'weight'):
|
288 |
+
with torch.no_grad():
|
289 |
+
updown = module.calc_updown(self.weight)
|
290 |
+
|
291 |
+
if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
|
292 |
+
# inpainting model. zero pad updown to make channel[1] 4 to 9
|
293 |
+
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
|
294 |
+
|
295 |
+
self.weight += updown
|
296 |
+
continue
|
297 |
+
|
298 |
+
module_q = net.modules.get(network_layer_name + "_q_proj", None)
|
299 |
+
module_k = net.modules.get(network_layer_name + "_k_proj", None)
|
300 |
+
module_v = net.modules.get(network_layer_name + "_v_proj", None)
|
301 |
+
module_out = net.modules.get(network_layer_name + "_out_proj", None)
|
302 |
+
|
303 |
+
if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
|
304 |
+
with torch.no_grad():
|
305 |
+
updown_q = module_q.calc_updown(self.in_proj_weight)
|
306 |
+
updown_k = module_k.calc_updown(self.in_proj_weight)
|
307 |
+
updown_v = module_v.calc_updown(self.in_proj_weight)
|
308 |
+
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
|
309 |
+
updown_out = module_out.calc_updown(self.out_proj.weight)
|
310 |
+
|
311 |
+
self.in_proj_weight += updown_qkv
|
312 |
+
self.out_proj.weight += updown_out
|
313 |
+
continue
|
314 |
+
|
315 |
+
if module is None:
|
316 |
+
continue
|
317 |
+
|
318 |
+
print(f'failed to calculate network weights for layer {network_layer_name}')
|
319 |
+
|
320 |
+
self.network_current_names = wanted_names
|
321 |
+
|
322 |
+
|
323 |
+
def network_forward(module, input, original_forward):
|
324 |
+
"""
|
325 |
+
Old way of applying Lora by executing operations during layer's forward.
|
326 |
+
Stacking many loras this way results in big performance degradation.
|
327 |
+
"""
|
328 |
+
|
329 |
+
if len(loaded_networks) == 0:
|
330 |
+
return original_forward(module, input)
|
331 |
+
|
332 |
+
input = devices.cond_cast_unet(input)
|
333 |
+
|
334 |
+
network_restore_weights_from_backup(module)
|
335 |
+
network_reset_cached_weight(module)
|
336 |
+
|
337 |
+
y = original_forward(module, input)
|
338 |
+
|
339 |
+
network_layer_name = getattr(module, 'network_layer_name', None)
|
340 |
+
for lora in loaded_networks:
|
341 |
+
module = lora.modules.get(network_layer_name, None)
|
342 |
+
if module is None:
|
343 |
+
continue
|
344 |
+
|
345 |
+
y = module.forward(y, input)
|
346 |
+
|
347 |
+
return y
|
348 |
+
|
349 |
+
|
350 |
+
def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
|
351 |
+
self.network_current_names = ()
|
352 |
+
self.network_weights_backup = None
|
353 |
+
|
354 |
+
|
355 |
+
def network_Linear_forward(self, input):
|
356 |
+
if shared.opts.lora_functional:
|
357 |
+
return network_forward(self, input, torch.nn.Linear_forward_before_network)
|
358 |
+
|
359 |
+
network_apply_weights(self)
|
360 |
+
|
361 |
+
return torch.nn.Linear_forward_before_network(self, input)
|
362 |
+
|
363 |
+
|
364 |
+
def network_Linear_load_state_dict(self, *args, **kwargs):
|
365 |
+
network_reset_cached_weight(self)
|
366 |
+
|
367 |
+
return torch.nn.Linear_load_state_dict_before_network(self, *args, **kwargs)
|
368 |
+
|
369 |
+
|
370 |
+
def network_Conv2d_forward(self, input):
|
371 |
+
if shared.opts.lora_functional:
|
372 |
+
return network_forward(self, input, torch.nn.Conv2d_forward_before_network)
|
373 |
+
|
374 |
+
network_apply_weights(self)
|
375 |
+
|
376 |
+
return torch.nn.Conv2d_forward_before_network(self, input)
|
377 |
+
|
378 |
+
|
379 |
+
def network_Conv2d_load_state_dict(self, *args, **kwargs):
|
380 |
+
network_reset_cached_weight(self)
|
381 |
+
|
382 |
+
return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs)
|
383 |
+
|
384 |
+
|
385 |
+
def network_MultiheadAttention_forward(self, *args, **kwargs):
|
386 |
+
network_apply_weights(self)
|
387 |
+
|
388 |
+
return torch.nn.MultiheadAttention_forward_before_network(self, *args, **kwargs)
|
389 |
+
|
390 |
+
|
391 |
+
def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
|
392 |
+
network_reset_cached_weight(self)
|
393 |
+
|
394 |
+
return torch.nn.MultiheadAttention_load_state_dict_before_network(self, *args, **kwargs)
|
395 |
+
|
396 |
+
|
397 |
+
def list_available_networks():
|
398 |
+
available_networks.clear()
|
399 |
+
available_network_aliases.clear()
|
400 |
+
forbidden_network_aliases.clear()
|
401 |
+
available_network_hash_lookup.clear()
|
402 |
+
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
403 |
+
|
404 |
+
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
405 |
+
|
406 |
+
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
407 |
+
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
408 |
+
for filename in candidates:
|
409 |
+
if os.path.isdir(filename):
|
410 |
+
continue
|
411 |
+
|
412 |
+
name = os.path.splitext(os.path.basename(filename))[0]
|
413 |
+
try:
|
414 |
+
entry = network.NetworkOnDisk(name, filename)
|
415 |
+
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
416 |
+
errors.report(f"Failed to load network {name} from {filename}", exc_info=True)
|
417 |
+
continue
|
418 |
+
|
419 |
+
available_networks[name] = entry
|
420 |
+
|
421 |
+
if entry.alias in available_network_aliases:
|
422 |
+
forbidden_network_aliases[entry.alias.lower()] = 1
|
423 |
+
|
424 |
+
available_network_aliases[name] = entry
|
425 |
+
available_network_aliases[entry.alias] = entry
|
426 |
+
|
427 |
+
|
428 |
+
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
429 |
+
|
430 |
+
|
431 |
+
def infotext_pasted(infotext, params):
|
432 |
+
if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
|
433 |
+
return # if the other extension is active, it will handle those fields, no need to do anything
|
434 |
+
|
435 |
+
added = []
|
436 |
+
|
437 |
+
for k in params:
|
438 |
+
if not k.startswith("AddNet Model "):
|
439 |
+
continue
|
440 |
+
|
441 |
+
num = k[13:]
|
442 |
+
|
443 |
+
if params.get("AddNet Module " + num) != "LoRA":
|
444 |
+
continue
|
445 |
+
|
446 |
+
name = params.get("AddNet Model " + num)
|
447 |
+
if name is None:
|
448 |
+
continue
|
449 |
+
|
450 |
+
m = re_network_name.match(name)
|
451 |
+
if m:
|
452 |
+
name = m.group(1)
|
453 |
+
|
454 |
+
multiplier = params.get("AddNet Weight A " + num, "1.0")
|
455 |
+
|
456 |
+
added.append(f"<lora:{name}:{multiplier}>")
|
457 |
+
|
458 |
+
if added:
|
459 |
+
params["Prompt"] += "\n" + "".join(added)
|
460 |
+
|
461 |
+
|
462 |
+
available_networks = {}
|
463 |
+
available_network_aliases = {}
|
464 |
+
loaded_networks = []
|
465 |
+
available_network_hash_lookup = {}
|
466 |
+
forbidden_network_aliases = {}
|
467 |
+
|
468 |
+
list_available_networks()
|
extensions-builtin/Lora/preload.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
|
7 |
+
parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS'))
|
extensions-builtin/Lora/scripts/__pycache__/lora_script.cpython-310.pyc
ADDED
Binary file (5.11 kB). View file
|
|
extensions-builtin/Lora/scripts/lora_script.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import gradio as gr
|
5 |
+
from fastapi import FastAPI
|
6 |
+
|
7 |
+
import network
|
8 |
+
import networks
|
9 |
+
import lora # noqa:F401
|
10 |
+
import extra_networks_lora
|
11 |
+
import ui_extra_networks_lora
|
12 |
+
from modules import script_callbacks, ui_extra_networks, extra_networks, shared
|
13 |
+
|
14 |
+
def unload():
|
15 |
+
torch.nn.Linear.forward = torch.nn.Linear_forward_before_network
|
16 |
+
torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_network
|
17 |
+
torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_network
|
18 |
+
torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_network
|
19 |
+
torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_network
|
20 |
+
torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_network
|
21 |
+
|
22 |
+
|
23 |
+
def before_ui():
|
24 |
+
ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
|
25 |
+
|
26 |
+
extra_network = extra_networks_lora.ExtraNetworkLora()
|
27 |
+
extra_networks.register_extra_network(extra_network)
|
28 |
+
extra_networks.register_extra_network_alias(extra_network, "lyco")
|
29 |
+
|
30 |
+
|
31 |
+
if not hasattr(torch.nn, 'Linear_forward_before_network'):
|
32 |
+
torch.nn.Linear_forward_before_network = torch.nn.Linear.forward
|
33 |
+
|
34 |
+
if not hasattr(torch.nn, 'Linear_load_state_dict_before_network'):
|
35 |
+
torch.nn.Linear_load_state_dict_before_network = torch.nn.Linear._load_from_state_dict
|
36 |
+
|
37 |
+
if not hasattr(torch.nn, 'Conv2d_forward_before_network'):
|
38 |
+
torch.nn.Conv2d_forward_before_network = torch.nn.Conv2d.forward
|
39 |
+
|
40 |
+
if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_network'):
|
41 |
+
torch.nn.Conv2d_load_state_dict_before_network = torch.nn.Conv2d._load_from_state_dict
|
42 |
+
|
43 |
+
if not hasattr(torch.nn, 'MultiheadAttention_forward_before_network'):
|
44 |
+
torch.nn.MultiheadAttention_forward_before_network = torch.nn.MultiheadAttention.forward
|
45 |
+
|
46 |
+
if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_network'):
|
47 |
+
torch.nn.MultiheadAttention_load_state_dict_before_network = torch.nn.MultiheadAttention._load_from_state_dict
|
48 |
+
|
49 |
+
torch.nn.Linear.forward = networks.network_Linear_forward
|
50 |
+
torch.nn.Linear._load_from_state_dict = networks.network_Linear_load_state_dict
|
51 |
+
torch.nn.Conv2d.forward = networks.network_Conv2d_forward
|
52 |
+
torch.nn.Conv2d._load_from_state_dict = networks.network_Conv2d_load_state_dict
|
53 |
+
torch.nn.MultiheadAttention.forward = networks.network_MultiheadAttention_forward
|
54 |
+
torch.nn.MultiheadAttention._load_from_state_dict = networks.network_MultiheadAttention_load_state_dict
|
55 |
+
|
56 |
+
script_callbacks.on_model_loaded(networks.assign_network_names_to_compvis_modules)
|
57 |
+
script_callbacks.on_script_unloaded(unload)
|
58 |
+
script_callbacks.on_before_ui(before_ui)
|
59 |
+
script_callbacks.on_infotext_pasted(networks.infotext_pasted)
|
60 |
+
|
61 |
+
|
62 |
+
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
|
63 |
+
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
|
64 |
+
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
|
65 |
+
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
|
66 |
+
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
67 |
+
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
68 |
+
}))
|
69 |
+
|
70 |
+
|
71 |
+
shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
|
72 |
+
"lora_functional": shared.OptionInfo(False, "Lora/Networks: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
|
73 |
+
}))
|
74 |
+
|
75 |
+
|
76 |
+
def create_lora_json(obj: network.NetworkOnDisk):
|
77 |
+
return {
|
78 |
+
"name": obj.name,
|
79 |
+
"alias": obj.alias,
|
80 |
+
"path": obj.filename,
|
81 |
+
"metadata": obj.metadata,
|
82 |
+
}
|
83 |
+
|
84 |
+
|
85 |
+
def api_networks(_: gr.Blocks, app: FastAPI):
|
86 |
+
@app.get("/sdapi/v1/loras")
|
87 |
+
async def get_loras():
|
88 |
+
return [create_lora_json(obj) for obj in networks.available_networks.values()]
|
89 |
+
|
90 |
+
@app.post("/sdapi/v1/refresh-loras")
|
91 |
+
async def refresh_loras():
|
92 |
+
return networks.list_available_networks()
|
93 |
+
|
94 |
+
|
95 |
+
script_callbacks.on_app_started(api_networks)
|
96 |
+
|
97 |
+
re_lora = re.compile("<lora:([^:]+):")
|
98 |
+
|
99 |
+
|
100 |
+
def infotext_pasted(infotext, d):
|
101 |
+
hashes = d.get("Lora hashes")
|
102 |
+
if not hashes:
|
103 |
+
return
|
104 |
+
|
105 |
+
hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
|
106 |
+
hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
|
107 |
+
|
108 |
+
def network_replacement(m):
|
109 |
+
alias = m.group(1)
|
110 |
+
shorthash = hashes.get(alias)
|
111 |
+
if shorthash is None:
|
112 |
+
return m.group(0)
|
113 |
+
|
114 |
+
network_on_disk = networks.available_network_hash_lookup.get(shorthash)
|
115 |
+
if network_on_disk is None:
|
116 |
+
return m.group(0)
|
117 |
+
|
118 |
+
return f'<lora:{network_on_disk.get_alias()}:'
|
119 |
+
|
120 |
+
d["Prompt"] = re.sub(re_lora, network_replacement, d["Prompt"])
|
121 |
+
|
122 |
+
|
123 |
+
script_callbacks.on_infotext_pasted(infotext_pasted)
|
extensions-builtin/Lora/ui_edit_user_metadata.py
ADDED
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
import html
|
3 |
+
import random
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
import re
|
7 |
+
|
8 |
+
from modules import ui_extra_networks_user_metadata
|
9 |
+
|
10 |
+
|
11 |
+
def is_non_comma_tagset(tags):
|
12 |
+
average_tag_length = sum(len(x) for x in tags.keys()) / len(tags)
|
13 |
+
|
14 |
+
return average_tag_length >= 16
|
15 |
+
|
16 |
+
|
17 |
+
re_word = re.compile(r"[-_\w']+")
|
18 |
+
re_comma = re.compile(r" *, *")
|
19 |
+
|
20 |
+
|
21 |
+
def build_tags(metadata):
|
22 |
+
tags = {}
|
23 |
+
|
24 |
+
for _, tags_dict in metadata.get("ss_tag_frequency", {}).items():
|
25 |
+
for tag, tag_count in tags_dict.items():
|
26 |
+
tag = tag.strip()
|
27 |
+
tags[tag] = tags.get(tag, 0) + int(tag_count)
|
28 |
+
|
29 |
+
if tags and is_non_comma_tagset(tags):
|
30 |
+
new_tags = {}
|
31 |
+
|
32 |
+
for text, text_count in tags.items():
|
33 |
+
for word in re.findall(re_word, text):
|
34 |
+
if len(word) < 3:
|
35 |
+
continue
|
36 |
+
|
37 |
+
new_tags[word] = new_tags.get(word, 0) + text_count
|
38 |
+
|
39 |
+
tags = new_tags
|
40 |
+
|
41 |
+
ordered_tags = sorted(tags.keys(), key=tags.get, reverse=True)
|
42 |
+
|
43 |
+
return [(tag, tags[tag]) for tag in ordered_tags]
|
44 |
+
|
45 |
+
|
46 |
+
class LoraUserMetadataEditor(ui_extra_networks_user_metadata.UserMetadataEditor):
|
47 |
+
def __init__(self, ui, tabname, page):
|
48 |
+
super().__init__(ui, tabname, page)
|
49 |
+
|
50 |
+
self.select_sd_version = None
|
51 |
+
|
52 |
+
self.taginfo = None
|
53 |
+
self.edit_activation_text = None
|
54 |
+
self.slider_preferred_weight = None
|
55 |
+
self.edit_notes = None
|
56 |
+
|
57 |
+
def save_lora_user_metadata(self, name, desc, sd_version, activation_text, preferred_weight, notes):
|
58 |
+
user_metadata = self.get_user_metadata(name)
|
59 |
+
user_metadata["description"] = desc
|
60 |
+
user_metadata["sd version"] = sd_version
|
61 |
+
user_metadata["activation text"] = activation_text
|
62 |
+
user_metadata["preferred weight"] = preferred_weight
|
63 |
+
user_metadata["notes"] = notes
|
64 |
+
|
65 |
+
self.write_user_metadata(name, user_metadata)
|
66 |
+
|
67 |
+
def get_metadata_table(self, name):
|
68 |
+
table = super().get_metadata_table(name)
|
69 |
+
item = self.page.items.get(name, {})
|
70 |
+
metadata = item.get("metadata") or {}
|
71 |
+
|
72 |
+
keys = {
|
73 |
+
'ss_sd_model_name': "Model:",
|
74 |
+
'ss_clip_skip': "Clip skip:",
|
75 |
+
'ss_network_module': "Kohya module:",
|
76 |
+
}
|
77 |
+
|
78 |
+
for key, label in keys.items():
|
79 |
+
value = metadata.get(key, None)
|
80 |
+
if value is not None and str(value) != "None":
|
81 |
+
table.append((label, html.escape(value)))
|
82 |
+
|
83 |
+
ss_training_started_at = metadata.get('ss_training_started_at')
|
84 |
+
if ss_training_started_at:
|
85 |
+
table.append(("Date trained:", datetime.datetime.utcfromtimestamp(float(ss_training_started_at)).strftime('%Y-%m-%d %H:%M')))
|
86 |
+
|
87 |
+
ss_bucket_info = metadata.get("ss_bucket_info")
|
88 |
+
if ss_bucket_info and "buckets" in ss_bucket_info:
|
89 |
+
resolutions = {}
|
90 |
+
for _, bucket in ss_bucket_info["buckets"].items():
|
91 |
+
resolution = bucket["resolution"]
|
92 |
+
resolution = f'{resolution[1]}x{resolution[0]}'
|
93 |
+
|
94 |
+
resolutions[resolution] = resolutions.get(resolution, 0) + int(bucket["count"])
|
95 |
+
|
96 |
+
resolutions_list = sorted(resolutions.keys(), key=resolutions.get, reverse=True)
|
97 |
+
resolutions_text = html.escape(", ".join(resolutions_list[0:4]))
|
98 |
+
if len(resolutions) > 4:
|
99 |
+
resolutions_text += ", ..."
|
100 |
+
resolutions_text = f"<span title='{html.escape(', '.join(resolutions_list))}'>{resolutions_text}</span>"
|
101 |
+
|
102 |
+
table.append(('Resolutions:' if len(resolutions_list) > 1 else 'Resolution:', resolutions_text))
|
103 |
+
|
104 |
+
image_count = 0
|
105 |
+
for _, params in metadata.get("ss_dataset_dirs", {}).items():
|
106 |
+
image_count += int(params.get("img_count", 0))
|
107 |
+
|
108 |
+
if image_count:
|
109 |
+
table.append(("Dataset size:", image_count))
|
110 |
+
|
111 |
+
return table
|
112 |
+
|
113 |
+
def put_values_into_components(self, name):
|
114 |
+
user_metadata = self.get_user_metadata(name)
|
115 |
+
values = super().put_values_into_components(name)
|
116 |
+
|
117 |
+
item = self.page.items.get(name, {})
|
118 |
+
metadata = item.get("metadata") or {}
|
119 |
+
|
120 |
+
tags = build_tags(metadata)
|
121 |
+
gradio_tags = [(tag, str(count)) for tag, count in tags[0:24]]
|
122 |
+
|
123 |
+
return [
|
124 |
+
*values[0:5],
|
125 |
+
item.get("sd_version", "Unknown"),
|
126 |
+
gr.HighlightedText.update(value=gradio_tags, visible=True if tags else False),
|
127 |
+
user_metadata.get('activation text', ''),
|
128 |
+
float(user_metadata.get('preferred weight', 0.0)),
|
129 |
+
gr.update(visible=True if tags else False),
|
130 |
+
gr.update(value=self.generate_random_prompt_from_tags(tags), visible=True if tags else False),
|
131 |
+
]
|
132 |
+
|
133 |
+
def generate_random_prompt(self, name):
|
134 |
+
item = self.page.items.get(name, {})
|
135 |
+
metadata = item.get("metadata") or {}
|
136 |
+
tags = build_tags(metadata)
|
137 |
+
|
138 |
+
return self.generate_random_prompt_from_tags(tags)
|
139 |
+
|
140 |
+
def generate_random_prompt_from_tags(self, tags):
|
141 |
+
max_count = None
|
142 |
+
res = []
|
143 |
+
for tag, count in tags:
|
144 |
+
if not max_count:
|
145 |
+
max_count = count
|
146 |
+
|
147 |
+
v = random.random() * max_count
|
148 |
+
if count > v:
|
149 |
+
res.append(tag)
|
150 |
+
|
151 |
+
return ", ".join(sorted(res))
|
152 |
+
|
153 |
+
def create_extra_default_items_in_left_column(self):
|
154 |
+
|
155 |
+
# this would be a lot better as gr.Radio but I can't make it work
|
156 |
+
self.select_sd_version = gr.Dropdown(['SD1', 'SD2', 'SDXL', 'Unknown'], value='Unknown', label='Stable Diffusion version', interactive=True)
|
157 |
+
|
158 |
+
def create_editor(self):
|
159 |
+
self.create_default_editor_elems()
|
160 |
+
|
161 |
+
self.taginfo = gr.HighlightedText(label="Training dataset tags")
|
162 |
+
self.edit_activation_text = gr.Text(label='Activation text', info="Will be added to prompt along with Lora")
|
163 |
+
self.slider_preferred_weight = gr.Slider(label='Preferred weight', info="Set to 0 to disable", minimum=0.0, maximum=2.0, step=0.01)
|
164 |
+
|
165 |
+
with gr.Row() as row_random_prompt:
|
166 |
+
with gr.Column(scale=8):
|
167 |
+
random_prompt = gr.Textbox(label='Random prompt', lines=4, max_lines=4, interactive=False)
|
168 |
+
|
169 |
+
with gr.Column(scale=1, min_width=120):
|
170 |
+
generate_random_prompt = gr.Button('Generate').style(full_width=True, size="lg")
|
171 |
+
|
172 |
+
self.edit_notes = gr.TextArea(label='Notes', lines=4)
|
173 |
+
|
174 |
+
generate_random_prompt.click(fn=self.generate_random_prompt, inputs=[self.edit_name_input], outputs=[random_prompt], show_progress=False)
|
175 |
+
|
176 |
+
def select_tag(activation_text, evt: gr.SelectData):
|
177 |
+
tag = evt.value[0]
|
178 |
+
|
179 |
+
words = re.split(re_comma, activation_text)
|
180 |
+
if tag in words:
|
181 |
+
words = [x for x in words if x != tag and x.strip()]
|
182 |
+
return ", ".join(words)
|
183 |
+
|
184 |
+
return activation_text + ", " + tag if activation_text else tag
|
185 |
+
|
186 |
+
self.taginfo.select(fn=select_tag, inputs=[self.edit_activation_text], outputs=[self.edit_activation_text], show_progress=False)
|
187 |
+
|
188 |
+
self.create_default_buttons()
|
189 |
+
|
190 |
+
viewed_components = [
|
191 |
+
self.edit_name,
|
192 |
+
self.edit_description,
|
193 |
+
self.html_filedata,
|
194 |
+
self.html_preview,
|
195 |
+
self.edit_notes,
|
196 |
+
self.select_sd_version,
|
197 |
+
self.taginfo,
|
198 |
+
self.edit_activation_text,
|
199 |
+
self.slider_preferred_weight,
|
200 |
+
row_random_prompt,
|
201 |
+
random_prompt,
|
202 |
+
]
|
203 |
+
|
204 |
+
self.button_edit\
|
205 |
+
.click(fn=self.put_values_into_components, inputs=[self.edit_name_input], outputs=viewed_components)\
|
206 |
+
.then(fn=lambda: gr.update(visible=True), inputs=[], outputs=[self.box])
|
207 |
+
|
208 |
+
edited_components = [
|
209 |
+
self.edit_description,
|
210 |
+
self.select_sd_version,
|
211 |
+
self.edit_activation_text,
|
212 |
+
self.slider_preferred_weight,
|
213 |
+
self.edit_notes,
|
214 |
+
]
|
215 |
+
|
216 |
+
self.setup_save_handler(self.button_save, self.save_lora_user_metadata, edited_components)
|
extensions-builtin/Lora/ui_extra_networks_lora.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import network
|
4 |
+
import networks
|
5 |
+
|
6 |
+
from modules import shared, ui_extra_networks
|
7 |
+
from modules.ui_extra_networks import quote_js
|
8 |
+
from ui_edit_user_metadata import LoraUserMetadataEditor
|
9 |
+
|
10 |
+
|
11 |
+
class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
|
12 |
+
def __init__(self):
|
13 |
+
super().__init__('Lora')
|
14 |
+
|
15 |
+
def refresh(self):
|
16 |
+
networks.list_available_networks()
|
17 |
+
|
18 |
+
def create_item(self, name, index=None, enable_filter=True):
|
19 |
+
lora_on_disk = networks.available_networks.get(name)
|
20 |
+
|
21 |
+
path, ext = os.path.splitext(lora_on_disk.filename)
|
22 |
+
|
23 |
+
alias = lora_on_disk.get_alias()
|
24 |
+
|
25 |
+
item = {
|
26 |
+
"name": name,
|
27 |
+
"filename": lora_on_disk.filename,
|
28 |
+
"preview": self.find_preview(path) if self.find_preview(path) else './file=html/card-no-preview.png',
|
29 |
+
"description": self.find_description(path),
|
30 |
+
"search_term": self.search_terms_from_path(lora_on_disk.filename),
|
31 |
+
"local_preview": f"{path}.{shared.opts.samples_format}",
|
32 |
+
"metadata": lora_on_disk.metadata,
|
33 |
+
"sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
|
34 |
+
"sd_version": lora_on_disk.sd_version.name,
|
35 |
+
}
|
36 |
+
|
37 |
+
self.read_user_metadata(item)
|
38 |
+
activation_text = item["user_metadata"].get("activation text")
|
39 |
+
preferred_weight = item["user_metadata"].get("preferred weight", 0.0)
|
40 |
+
item["prompt"] = quote_js(f"<lora:{alias}:") + " + " + (str(preferred_weight) if preferred_weight else "opts.extra_networks_default_multiplier") + " + " + quote_js(">")
|
41 |
+
|
42 |
+
if activation_text:
|
43 |
+
item["prompt"] += " + " + quote_js(" " + activation_text)
|
44 |
+
|
45 |
+
sd_version = item["user_metadata"].get("sd version")
|
46 |
+
if sd_version in network.SdVersion.__members__:
|
47 |
+
item["sd_version"] = sd_version
|
48 |
+
sd_version = network.SdVersion[sd_version]
|
49 |
+
else:
|
50 |
+
sd_version = lora_on_disk.sd_version
|
51 |
+
|
52 |
+
if shared.opts.lora_show_all or not enable_filter:
|
53 |
+
pass
|
54 |
+
elif sd_version == network.SdVersion.Unknown:
|
55 |
+
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
|
56 |
+
if model_version.name in shared.opts.lora_hide_unknown_for_versions:
|
57 |
+
return None
|
58 |
+
elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL:
|
59 |
+
return None
|
60 |
+
elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2:
|
61 |
+
return None
|
62 |
+
elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1:
|
63 |
+
return None
|
64 |
+
|
65 |
+
return item
|
66 |
+
|
67 |
+
def list_items(self):
|
68 |
+
for index, name in enumerate(networks.available_networks):
|
69 |
+
item = self.create_item(name, index)
|
70 |
+
|
71 |
+
if item is not None:
|
72 |
+
yield item
|
73 |
+
|
74 |
+
def allowed_directories_for_previews(self):
|
75 |
+
return [shared.cmd_opts.lora_dir, shared.cmd_opts.lyco_dir_backcompat]
|
76 |
+
|
77 |
+
def create_user_metadata_editor(self, ui, tabname):
|
78 |
+
return LoraUserMetadataEditor(ui, tabname, self)
|
extensions-builtin/ScuNET/__pycache__/preload.cpython-310.pyc
ADDED
Binary file (491 Bytes). View file
|
|
extensions/stable-diffusion-webui-images-browser/scripts/wib/__pycache__/wib_db.cpython-310.pyc
ADDED
Binary file (21.8 kB). View file
|
|
extensions/stable-diffusion-webui-images-browser/scripts/wib/wib_db.py
ADDED
@@ -0,0 +1,888 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import json
|
3 |
+
import os
|
4 |
+
import sqlite3
|
5 |
+
from modules import scripts
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
version = 6
|
9 |
+
|
10 |
+
path_recorder_file = os.path.join(scripts.basedir(), "path_recorder.txt")
|
11 |
+
aes_cache_file = os.path.join(scripts.basedir(), "aes_scores.json")
|
12 |
+
exif_cache_file = os.path.join(scripts.basedir(), "exif_data.json")
|
13 |
+
ranking_file = os.path.join(scripts.basedir(), "ranking.json")
|
14 |
+
archive = os.path.join(scripts.basedir(), "archive")
|
15 |
+
db_file = os.path.join(scripts.basedir(), "wib.sqlite3")
|
16 |
+
np = "Negative prompt: "
|
17 |
+
st = "Steps: "
|
18 |
+
timeout = 30
|
19 |
+
|
20 |
+
def create_filehash(cursor):
|
21 |
+
cursor.execute('''
|
22 |
+
CREATE TABLE IF NOT EXISTS filehash (
|
23 |
+
file TEXT PRIMARY KEY,
|
24 |
+
hash TEXT,
|
25 |
+
created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
26 |
+
updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
27 |
+
)
|
28 |
+
''')
|
29 |
+
|
30 |
+
cursor.execute('''
|
31 |
+
CREATE TRIGGER filehash_tr
|
32 |
+
AFTER UPDATE ON filehash
|
33 |
+
BEGIN
|
34 |
+
UPDATE filehash SET updated = CURRENT_TIMESTAMP WHERE file = OLD.file;
|
35 |
+
END;
|
36 |
+
''')
|
37 |
+
|
38 |
+
return
|
39 |
+
|
40 |
+
def create_work_files(cursor):
|
41 |
+
cursor.execute('''
|
42 |
+
CREATE TABLE IF NOT EXISTS work_files (
|
43 |
+
file TEXT PRIMARY KEY
|
44 |
+
)
|
45 |
+
''')
|
46 |
+
|
47 |
+
return
|
48 |
+
|
49 |
+
def create_db(cursor):
|
50 |
+
cursor.execute('''
|
51 |
+
CREATE TABLE IF NOT EXISTS db_data (
|
52 |
+
key TEXT PRIMARY KEY,
|
53 |
+
value TEXT
|
54 |
+
)
|
55 |
+
''')
|
56 |
+
|
57 |
+
cursor.execute('''
|
58 |
+
CREATE TABLE IF NOT EXISTS path_recorder (
|
59 |
+
path TEXT PRIMARY KEY,
|
60 |
+
depth INT,
|
61 |
+
path_display TEXT,
|
62 |
+
created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
63 |
+
updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
64 |
+
)
|
65 |
+
''')
|
66 |
+
|
67 |
+
cursor.execute('''
|
68 |
+
CREATE TRIGGER path_recorder_tr
|
69 |
+
AFTER UPDATE ON path_recorder
|
70 |
+
BEGIN
|
71 |
+
UPDATE path_recorder SET updated = CURRENT_TIMESTAMP WHERE path = OLD.path;
|
72 |
+
END;
|
73 |
+
''')
|
74 |
+
|
75 |
+
cursor.execute('''
|
76 |
+
CREATE TABLE IF NOT EXISTS exif_data (
|
77 |
+
file TEXT,
|
78 |
+
key TEXT,
|
79 |
+
value TEXT,
|
80 |
+
created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
81 |
+
updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
82 |
+
PRIMARY KEY (file, key)
|
83 |
+
)
|
84 |
+
''')
|
85 |
+
|
86 |
+
cursor.execute('''
|
87 |
+
CREATE INDEX IF NOT EXISTS exif_data_key ON exif_data (key)
|
88 |
+
''')
|
89 |
+
|
90 |
+
cursor.execute('''
|
91 |
+
CREATE TRIGGER exif_data_tr
|
92 |
+
AFTER UPDATE ON exif_data
|
93 |
+
BEGIN
|
94 |
+
UPDATE exif_data SET updated = CURRENT_TIMESTAMP WHERE file = OLD.file AND key = OLD.key;
|
95 |
+
END;
|
96 |
+
''')
|
97 |
+
|
98 |
+
cursor.execute('''
|
99 |
+
CREATE TABLE IF NOT EXISTS ranking (
|
100 |
+
file TEXT PRIMARY KEY,
|
101 |
+
name TEXT,
|
102 |
+
ranking TEXT,
|
103 |
+
created TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
104 |
+
updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
105 |
+
)
|
106 |
+
''')
|
107 |
+
|
108 |
+
cursor.execute('''
|
109 |
+
CREATE INDEX IF NOT EXISTS ranking_name ON ranking (name)
|
110 |
+
''')
|
111 |
+
|
112 |
+
cursor.execute('''
|
113 |
+
CREATE TRIGGER ranking_tr
|
114 |
+
AFTER UPDATE ON ranking
|
115 |
+
BEGIN
|
116 |
+
UPDATE ranking SET updated = CURRENT_TIMESTAMP WHERE file = OLD.file;
|
117 |
+
END;
|
118 |
+
''')
|
119 |
+
|
120 |
+
create_filehash(cursor)
|
121 |
+
create_work_files(cursor)
|
122 |
+
|
123 |
+
return
|
124 |
+
|
125 |
+
def migrate_path_recorder(cursor):
|
126 |
+
if os.path.exists(path_recorder_file):
|
127 |
+
try:
|
128 |
+
with open(path_recorder_file) as f:
|
129 |
+
# json-version
|
130 |
+
path_recorder = json.load(f)
|
131 |
+
for path, values in path_recorder.items():
|
132 |
+
path = os.path.realpath(path)
|
133 |
+
depth = values["depth"]
|
134 |
+
path_display = f"{path} [{depth}]"
|
135 |
+
cursor.execute('''
|
136 |
+
INSERT INTO path_recorder (path, depth, path_display)
|
137 |
+
VALUES (?, ?, ?)
|
138 |
+
''', (path, depth, path_display))
|
139 |
+
except json.JSONDecodeError:
|
140 |
+
with open(path_recorder_file) as f:
|
141 |
+
# old txt-version
|
142 |
+
path = f.readline().rstrip("\n")
|
143 |
+
while len(path) > 0:
|
144 |
+
path = os.path.realpath(path)
|
145 |
+
cursor.execute('''
|
146 |
+
INSERT INTO path_recorder (path, depth, path_display)
|
147 |
+
VALUES (?, ?, ?)
|
148 |
+
''', (path, 0, f"{path} [0]"))
|
149 |
+
path = f.readline().rstrip("\n")
|
150 |
+
|
151 |
+
return
|
152 |
+
|
153 |
+
def update_exif_data(cursor, file, info):
|
154 |
+
prompt = "0"
|
155 |
+
negative_prompt = "0"
|
156 |
+
key_values = "0: 0"
|
157 |
+
if info != "0":
|
158 |
+
info_list = info.split("\n")
|
159 |
+
prompt = ""
|
160 |
+
negative_prompt = ""
|
161 |
+
key_values = ""
|
162 |
+
for info_item in info_list:
|
163 |
+
if info_item.startswith(st):
|
164 |
+
key_values = info_item
|
165 |
+
elif info_item.startswith(np):
|
166 |
+
negative_prompt = info_item.replace(np, "")
|
167 |
+
else:
|
168 |
+
if prompt == "":
|
169 |
+
prompt = info_item
|
170 |
+
else:
|
171 |
+
# multiline prompts
|
172 |
+
prompt = f"{prompt}\n{info_item}"
|
173 |
+
if key_values != "":
|
174 |
+
key_value_pairs = []
|
175 |
+
key_value = ""
|
176 |
+
quote_open = False
|
177 |
+
for char in key_values + ",":
|
178 |
+
key_value += char
|
179 |
+
if char == '"':
|
180 |
+
quote_open = not quote_open
|
181 |
+
if char == "," and not quote_open:
|
182 |
+
try:
|
183 |
+
k, v = key_value.strip(" ,").split(": ")
|
184 |
+
except ValueError:
|
185 |
+
k = key_value.strip(" ,").split(": ")[0]
|
186 |
+
v = ""
|
187 |
+
key_value_pairs.append((k, v))
|
188 |
+
key_value = ""
|
189 |
+
|
190 |
+
try:
|
191 |
+
cursor.execute('''
|
192 |
+
INSERT INTO exif_data (file, key, value)
|
193 |
+
VALUES (?, ?, ?)
|
194 |
+
''', (file, "prompt", prompt))
|
195 |
+
except sqlite3.IntegrityError:
|
196 |
+
# Duplicate, delete all "file" entries and try again
|
197 |
+
cursor.execute('''
|
198 |
+
DELETE FROM exif_data
|
199 |
+
WHERE file = ?
|
200 |
+
''', (file,))
|
201 |
+
|
202 |
+
cursor.execute('''
|
203 |
+
INSERT INTO exif_data (file, key, value)
|
204 |
+
VALUES (?, ?, ?)
|
205 |
+
''', (file, "prompt", prompt))
|
206 |
+
|
207 |
+
cursor.execute('''
|
208 |
+
INSERT INTO exif_data (file, key, value)
|
209 |
+
VALUES (?, ?, ?)
|
210 |
+
''', (file, "negative_prompt", negative_prompt))
|
211 |
+
|
212 |
+
for (key, value) in key_value_pairs:
|
213 |
+
try:
|
214 |
+
cursor.execute('''
|
215 |
+
INSERT INTO exif_data (file, key, value)
|
216 |
+
VALUES (?, ?, ?)
|
217 |
+
''', (file, key, value))
|
218 |
+
except sqlite3.IntegrityError:
|
219 |
+
pass
|
220 |
+
|
221 |
+
return
|
222 |
+
|
223 |
+
def migrate_exif_data(cursor):
|
224 |
+
if os.path.exists(exif_cache_file):
|
225 |
+
with open(exif_cache_file, 'r') as file:
|
226 |
+
exif_cache = json.load(file)
|
227 |
+
|
228 |
+
for file, info in exif_cache.items():
|
229 |
+
file = os.path.realpath(file)
|
230 |
+
update_exif_data(cursor, file, info)
|
231 |
+
|
232 |
+
return
|
233 |
+
|
234 |
+
def migrate_ranking(cursor):
|
235 |
+
if os.path.exists(ranking_file):
|
236 |
+
with open(ranking_file, 'r') as file:
|
237 |
+
ranking = json.load(file)
|
238 |
+
for file, info in ranking.items():
|
239 |
+
if info != "None":
|
240 |
+
file = os.path.realpath(file)
|
241 |
+
name = os.path.basename(file)
|
242 |
+
cursor.execute('''
|
243 |
+
INSERT INTO ranking (file, name, ranking)
|
244 |
+
VALUES (?, ?, ?)
|
245 |
+
''', (file, name, info))
|
246 |
+
|
247 |
+
return
|
248 |
+
|
249 |
+
def get_hash(file):
|
250 |
+
# Get filehash without exif info
|
251 |
+
try:
|
252 |
+
image = Image.open(file)
|
253 |
+
except Exception as e:
|
254 |
+
print(e)
|
255 |
+
|
256 |
+
hash = hashlib.sha512(image.tobytes()).hexdigest()
|
257 |
+
image.close()
|
258 |
+
|
259 |
+
return hash
|
260 |
+
|
261 |
+
def migrate_filehash(cursor, version):
|
262 |
+
if version <= "4":
|
263 |
+
create_filehash(cursor)
|
264 |
+
|
265 |
+
cursor.execute('''
|
266 |
+
SELECT file
|
267 |
+
FROM ranking
|
268 |
+
''')
|
269 |
+
for (file,) in cursor.fetchall():
|
270 |
+
if os.path.exists(file):
|
271 |
+
hash = get_hash(file)
|
272 |
+
cursor.execute('''
|
273 |
+
INSERT INTO filehash (file, hash)
|
274 |
+
VALUES (?, ?)
|
275 |
+
''', (file, hash))
|
276 |
+
|
277 |
+
return
|
278 |
+
|
279 |
+
def migrate_work_files(cursor):
|
280 |
+
create_work_files(cursor)
|
281 |
+
|
282 |
+
return
|
283 |
+
|
284 |
+
def update_db_data(cursor, key, value):
|
285 |
+
cursor.execute('''
|
286 |
+
INSERT OR REPLACE
|
287 |
+
INTO db_data (key, value)
|
288 |
+
VALUES (?, ?)
|
289 |
+
''', (key, value))
|
290 |
+
|
291 |
+
return
|
292 |
+
|
293 |
+
def get_version():
|
294 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
295 |
+
cursor = conn.cursor()
|
296 |
+
cursor.execute('''
|
297 |
+
SELECT value
|
298 |
+
FROM db_data
|
299 |
+
WHERE key = 'version'
|
300 |
+
''',)
|
301 |
+
db_version = cursor.fetchone()
|
302 |
+
|
303 |
+
return db_version
|
304 |
+
|
305 |
+
def migrate_path_recorder_dirs(cursor):
|
306 |
+
cursor.execute('''
|
307 |
+
SELECT path, path_display
|
308 |
+
FROM path_recorder
|
309 |
+
''')
|
310 |
+
for (path, path_display) in cursor.fetchall():
|
311 |
+
real_path = os.path.realpath(path)
|
312 |
+
if path != real_path:
|
313 |
+
update_from = path
|
314 |
+
update_to = real_path
|
315 |
+
try:
|
316 |
+
cursor.execute('''
|
317 |
+
UPDATE path_recorder
|
318 |
+
SET path = ?,
|
319 |
+
path_display = ? || SUBSTR(path_display, LENGTH(?) + 1)
|
320 |
+
WHERE path = ?
|
321 |
+
''', (update_to, update_to, update_from, update_from))
|
322 |
+
except sqlite3.IntegrityError as e:
|
323 |
+
# these are double keys, because the same file can be in the db with different path notations
|
324 |
+
(e_msg,) = e.args
|
325 |
+
if e_msg.startswith("UNIQUE constraint"):
|
326 |
+
cursor.execute('''
|
327 |
+
DELETE FROM path_recorder
|
328 |
+
WHERE path = ?
|
329 |
+
''', (update_from,))
|
330 |
+
else:
|
331 |
+
raise
|
332 |
+
|
333 |
+
return
|
334 |
+
|
335 |
+
def migrate_exif_data_dirs(cursor):
|
336 |
+
cursor.execute('''
|
337 |
+
SELECT file
|
338 |
+
FROM exif_data
|
339 |
+
''')
|
340 |
+
for (filepath,) in cursor.fetchall():
|
341 |
+
(path, file) = os.path.split(filepath)
|
342 |
+
real_path = os.path.realpath(path)
|
343 |
+
if path != real_path:
|
344 |
+
update_from = filepath
|
345 |
+
update_to = os.path.join(real_path, file)
|
346 |
+
try:
|
347 |
+
cursor.execute('''
|
348 |
+
UPDATE exif_data
|
349 |
+
SET file = ?
|
350 |
+
WHERE file = ?
|
351 |
+
''', (update_to, update_from))
|
352 |
+
except sqlite3.IntegrityError as e:
|
353 |
+
# these are double keys, because the same file can be in the db with different path notations
|
354 |
+
(e_msg,) = e.args
|
355 |
+
if e_msg.startswith("UNIQUE constraint"):
|
356 |
+
cursor.execute('''
|
357 |
+
DELETE FROM exif_data
|
358 |
+
WHERE file = ?
|
359 |
+
''', (update_from,))
|
360 |
+
else:
|
361 |
+
raise
|
362 |
+
|
363 |
+
return
|
364 |
+
|
365 |
+
def migrate_ranking_dirs(cursor, db_version):
|
366 |
+
if db_version == "1":
|
367 |
+
cursor.execute('''
|
368 |
+
ALTER TABLE ranking
|
369 |
+
ADD COLUMN name TEXT
|
370 |
+
''')
|
371 |
+
|
372 |
+
cursor.execute('''
|
373 |
+
CREATE INDEX IF NOT EXISTS ranking_name ON ranking (name)
|
374 |
+
''')
|
375 |
+
|
376 |
+
cursor.execute('''
|
377 |
+
SELECT file, ranking
|
378 |
+
FROM ranking
|
379 |
+
''')
|
380 |
+
for (filepath, ranking) in cursor.fetchall():
|
381 |
+
if filepath == "" or ranking == "None":
|
382 |
+
cursor.execute('''
|
383 |
+
DELETE FROM ranking
|
384 |
+
WHERE file = ?
|
385 |
+
''', (filepath,))
|
386 |
+
else:
|
387 |
+
(path, file) = os.path.split(filepath)
|
388 |
+
real_path = os.path.realpath(path)
|
389 |
+
name = file
|
390 |
+
update_from = filepath
|
391 |
+
update_to = os.path.join(real_path, file)
|
392 |
+
try:
|
393 |
+
cursor.execute('''
|
394 |
+
UPDATE ranking
|
395 |
+
SET file = ?,
|
396 |
+
name = ?
|
397 |
+
WHERE file = ?
|
398 |
+
''', (update_to, name, update_from))
|
399 |
+
except sqlite3.IntegrityError as e:
|
400 |
+
# these are double keys, because the same file can be in the db with different path notations
|
401 |
+
(e_msg,) = e.args
|
402 |
+
if e_msg.startswith("UNIQUE constraint"):
|
403 |
+
cursor.execute('''
|
404 |
+
DELETE FROM ranking
|
405 |
+
WHERE file = ?
|
406 |
+
''', (update_from,))
|
407 |
+
else:
|
408 |
+
raise
|
409 |
+
|
410 |
+
return
|
411 |
+
|
412 |
+
def check():
|
413 |
+
if not os.path.exists(db_file):
|
414 |
+
conn, cursor = transaction_begin()
|
415 |
+
print("Image Browser: Creating database")
|
416 |
+
create_db(cursor)
|
417 |
+
update_db_data(cursor, "version", version)
|
418 |
+
migrate_path_recorder(cursor)
|
419 |
+
migrate_exif_data(cursor)
|
420 |
+
migrate_ranking(cursor)
|
421 |
+
migrate_filehash(cursor, str(version))
|
422 |
+
transaction_end(conn, cursor)
|
423 |
+
print("Image Browser: Database created")
|
424 |
+
db_version = get_version()
|
425 |
+
conn, cursor = transaction_begin()
|
426 |
+
if db_version[0] <= "2":
|
427 |
+
# version 1 database had mixed path notations, changed them all to abspath
|
428 |
+
# version 2 database still had mixed path notations, because of windows short name, changed them all to realpath
|
429 |
+
print(f"Image Browser: Upgrading database from version {db_version[0]} to version {version}")
|
430 |
+
migrate_path_recorder_dirs(cursor)
|
431 |
+
migrate_exif_data_dirs(cursor)
|
432 |
+
migrate_ranking_dirs(cursor, db_version[0])
|
433 |
+
if db_version[0] <= "4":
|
434 |
+
migrate_filehash(cursor, db_version[0])
|
435 |
+
if db_version[0] <= "5":
|
436 |
+
migrate_work_files(cursor)
|
437 |
+
update_db_data(cursor, "version", version)
|
438 |
+
print(f"Image Browser: Database upgraded from version {db_version[0]} to version {version}")
|
439 |
+
transaction_end(conn, cursor)
|
440 |
+
|
441 |
+
return version
|
442 |
+
|
443 |
+
def load_path_recorder():
|
444 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
445 |
+
cursor = conn.cursor()
|
446 |
+
cursor.execute('''
|
447 |
+
SELECT path, depth, path_display
|
448 |
+
FROM path_recorder
|
449 |
+
''')
|
450 |
+
path_recorder = {path: {"depth": depth, "path_display": path_display} for path, depth, path_display in cursor.fetchall()}
|
451 |
+
|
452 |
+
return path_recorder
|
453 |
+
|
454 |
+
def select_ranking(file):
|
455 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
456 |
+
cursor = conn.cursor()
|
457 |
+
cursor.execute('''
|
458 |
+
SELECT ranking
|
459 |
+
FROM ranking
|
460 |
+
WHERE file = ?
|
461 |
+
''', (file,))
|
462 |
+
ranking_value = cursor.fetchone()
|
463 |
+
|
464 |
+
if ranking_value is None:
|
465 |
+
return_ranking = "None"
|
466 |
+
else:
|
467 |
+
(return_ranking,) = ranking_value
|
468 |
+
|
469 |
+
return return_ranking
|
470 |
+
|
471 |
+
def update_ranking(file, ranking):
|
472 |
+
name = os.path.basename(file)
|
473 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
474 |
+
cursor = conn.cursor()
|
475 |
+
if ranking == "None":
|
476 |
+
cursor.execute('''
|
477 |
+
DELETE FROM ranking
|
478 |
+
WHERE file = ?
|
479 |
+
''', (file,))
|
480 |
+
else:
|
481 |
+
cursor.execute('''
|
482 |
+
INSERT OR REPLACE
|
483 |
+
INTO ranking (file, name, ranking)
|
484 |
+
VALUES (?, ?, ?)
|
485 |
+
''', (file, name, ranking))
|
486 |
+
|
487 |
+
hash = get_hash(file)
|
488 |
+
cursor.execute('''
|
489 |
+
INSERT OR REPLACE
|
490 |
+
INTO filehash (file, hash)
|
491 |
+
VALUES (?, ?)
|
492 |
+
''', (file, hash))
|
493 |
+
|
494 |
+
return
|
495 |
+
|
496 |
+
def select_image_reward_score(cursor, file):
|
497 |
+
cursor.execute('''
|
498 |
+
SELECT value
|
499 |
+
FROM exif_data
|
500 |
+
WHERE file = ?
|
501 |
+
AND key = 'ImageRewardScore'
|
502 |
+
''', (file,))
|
503 |
+
image_reward_score = cursor.fetchone()
|
504 |
+
if image_reward_score is None:
|
505 |
+
return_image_reward_score = None
|
506 |
+
else:
|
507 |
+
(return_image_reward_score,) = image_reward_score
|
508 |
+
cursor.execute('''
|
509 |
+
SELECT value
|
510 |
+
FROM exif_data
|
511 |
+
WHERE file = ?
|
512 |
+
AND key = 'prompt'
|
513 |
+
''', (file,))
|
514 |
+
image_reward_prompt = cursor.fetchone()
|
515 |
+
if image_reward_prompt is None:
|
516 |
+
return_image_reward_prompt = None
|
517 |
+
else:
|
518 |
+
(return_image_reward_prompt,) = image_reward_prompt
|
519 |
+
|
520 |
+
return return_image_reward_score, return_image_reward_prompt
|
521 |
+
|
522 |
+
def update_image_reward_score(cursor, file, image_reward_score):
|
523 |
+
cursor.execute('''
|
524 |
+
INSERT OR REPLACE
|
525 |
+
INTO exif_data (file, key, value)
|
526 |
+
VALUES (?, ?, ?)
|
527 |
+
''', (file, "ImageRewardScore", image_reward_score))
|
528 |
+
|
529 |
+
return
|
530 |
+
|
531 |
+
def update_path_recorder(path, depth, path_display):
|
532 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
533 |
+
cursor = conn.cursor()
|
534 |
+
cursor.execute('''
|
535 |
+
INSERT OR REPLACE
|
536 |
+
INTO path_recorder (path, depth, path_display)
|
537 |
+
VALUES (?, ?, ?)
|
538 |
+
''', (path, depth, path_display))
|
539 |
+
|
540 |
+
return
|
541 |
+
|
542 |
+
def update_path_recorder(path, depth, path_display):
|
543 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
544 |
+
cursor = conn.cursor()
|
545 |
+
cursor.execute('''
|
546 |
+
INSERT OR REPLACE
|
547 |
+
INTO path_recorder (path, depth, path_display)
|
548 |
+
VALUES (?, ?, ?)
|
549 |
+
''', (path, depth, path_display))
|
550 |
+
|
551 |
+
return
|
552 |
+
|
553 |
+
def delete_path_recorder(path):
|
554 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
555 |
+
cursor = conn.cursor()
|
556 |
+
cursor.execute('''
|
557 |
+
DELETE FROM path_recorder
|
558 |
+
WHERE path = ?
|
559 |
+
''', (path,))
|
560 |
+
|
561 |
+
return
|
562 |
+
|
563 |
+
def update_path_recorder_mult(cursor, update_from, update_to):
|
564 |
+
cursor.execute('''
|
565 |
+
UPDATE path_recorder
|
566 |
+
SET path = ?,
|
567 |
+
path_display = ? || SUBSTR(path_display, LENGTH(?) + 1)
|
568 |
+
WHERE path = ?
|
569 |
+
''', (update_to, update_to, update_from, update_from))
|
570 |
+
|
571 |
+
return
|
572 |
+
|
573 |
+
def update_exif_data_mult(cursor, update_from, update_to):
|
574 |
+
update_from = update_from + os.path.sep
|
575 |
+
update_to = update_to + os.path.sep
|
576 |
+
cursor.execute('''
|
577 |
+
UPDATE exif_data
|
578 |
+
SET file = ? || SUBSTR(file, LENGTH(?) + 1)
|
579 |
+
WHERE file like ? || '%'
|
580 |
+
''', (update_to, update_from, update_from))
|
581 |
+
|
582 |
+
return
|
583 |
+
|
584 |
+
def update_ranking_mult(cursor, update_from, update_to):
|
585 |
+
update_from = update_from + os.path.sep
|
586 |
+
update_to = update_to + os.path.sep
|
587 |
+
cursor.execute('''
|
588 |
+
UPDATE ranking
|
589 |
+
SET file = ? || SUBSTR(file, LENGTH(?) + 1)
|
590 |
+
WHERE file like ? || '%'
|
591 |
+
''', (update_to, update_from, update_from))
|
592 |
+
|
593 |
+
return
|
594 |
+
|
595 |
+
def delete_exif_0(cursor):
|
596 |
+
cursor.execute('''
|
597 |
+
DELETE FROM exif_data
|
598 |
+
WHERE file IN (
|
599 |
+
SELECT file FROM exif_data a
|
600 |
+
WHERE value = '0'
|
601 |
+
GROUP BY file
|
602 |
+
HAVING COUNT(*) = (SELECT COUNT(*) FROM exif_data WHERE file = a.file)
|
603 |
+
)
|
604 |
+
''')
|
605 |
+
|
606 |
+
return
|
607 |
+
|
608 |
+
def get_ranking_by_file(cursor, file):
|
609 |
+
cursor.execute('''
|
610 |
+
SELECT ranking
|
611 |
+
FROM ranking
|
612 |
+
WHERE file = ?
|
613 |
+
''', (file,))
|
614 |
+
ranking_value = cursor.fetchone()
|
615 |
+
|
616 |
+
return ranking_value
|
617 |
+
|
618 |
+
def get_ranking_by_name(cursor, name):
|
619 |
+
cursor.execute('''
|
620 |
+
SELECT file, ranking
|
621 |
+
FROM ranking
|
622 |
+
WHERE name = ?
|
623 |
+
''', (name,))
|
624 |
+
ranking_value = cursor.fetchone()
|
625 |
+
|
626 |
+
if ranking_value is not None:
|
627 |
+
(file, _) = ranking_value
|
628 |
+
cursor.execute('''
|
629 |
+
SELECT hash
|
630 |
+
FROM filehash
|
631 |
+
WHERE file = ?
|
632 |
+
''', (file,))
|
633 |
+
hash_value = cursor.fetchone()
|
634 |
+
else:
|
635 |
+
hash_value = None
|
636 |
+
|
637 |
+
return ranking_value, hash_value
|
638 |
+
|
639 |
+
def insert_ranking(cursor, file, ranking, hash):
|
640 |
+
name = os.path.basename(file)
|
641 |
+
cursor.execute('''
|
642 |
+
INSERT INTO ranking (file, name, ranking)
|
643 |
+
VALUES (?, ?, ?)
|
644 |
+
''', (file, name, ranking))
|
645 |
+
|
646 |
+
cursor.execute('''
|
647 |
+
INSERT OR REPLACE
|
648 |
+
INTO filehash (file, hash)
|
649 |
+
VALUES (?, ?)
|
650 |
+
''', (file, hash))
|
651 |
+
|
652 |
+
return
|
653 |
+
|
654 |
+
def replace_ranking(cursor, file, alternate_file, hash):
|
655 |
+
name = os.path.basename(file)
|
656 |
+
cursor.execute('''
|
657 |
+
UPDATE ranking
|
658 |
+
SET file = ?
|
659 |
+
WHERE file = ?
|
660 |
+
''', (file, alternate_file))
|
661 |
+
|
662 |
+
cursor.execute('''
|
663 |
+
INSERT OR REPLACE
|
664 |
+
INTO filehash (file, hash)
|
665 |
+
VALUES (?, ?)
|
666 |
+
''', (file, hash))
|
667 |
+
|
668 |
+
return
|
669 |
+
|
670 |
+
def transaction_begin():
|
671 |
+
conn = sqlite3.connect(db_file, timeout=timeout)
|
672 |
+
conn.isolation_level = None
|
673 |
+
cursor = conn.cursor()
|
674 |
+
cursor.execute("BEGIN")
|
675 |
+
return conn, cursor
|
676 |
+
|
677 |
+
def transaction_end(conn, cursor):
|
678 |
+
cursor.execute("COMMIT")
|
679 |
+
conn.close()
|
680 |
+
return
|
681 |
+
|
682 |
+
def update_exif_data_by_key(cursor, file, key, value):
|
683 |
+
cursor.execute('''
|
684 |
+
INSERT OR REPLACE
|
685 |
+
INTO exif_data (file, key, value)
|
686 |
+
VALUES (?, ?, ?)
|
687 |
+
''', (file, key, value))
|
688 |
+
|
689 |
+
return
|
690 |
+
|
691 |
+
def select_prompts(file):
|
692 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
693 |
+
cursor = conn.cursor()
|
694 |
+
cursor.execute('''
|
695 |
+
SELECT key, value
|
696 |
+
FROM exif_data
|
697 |
+
WHERE file = ?
|
698 |
+
AND KEY in ('prompt', 'negative_prompt')
|
699 |
+
''', (file,))
|
700 |
+
|
701 |
+
rows = cursor.fetchall()
|
702 |
+
prompt = ""
|
703 |
+
neg_prompt = ""
|
704 |
+
for row in rows:
|
705 |
+
(key, value) = row
|
706 |
+
if key == 'prompt':
|
707 |
+
prompt = value
|
708 |
+
elif key == 'negative_prompt':
|
709 |
+
neg_prompt = value
|
710 |
+
|
711 |
+
return prompt, neg_prompt
|
712 |
+
|
713 |
+
def load_exif_data(exif_cache):
|
714 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
715 |
+
cursor = conn.cursor()
|
716 |
+
cursor.execute('''
|
717 |
+
SELECT file, group_concat(
|
718 |
+
case when key = 'prompt' or key = 'negative_prompt' then key || ': ' || value || '\n'
|
719 |
+
else key || ': ' || value
|
720 |
+
end, ', ') AS string
|
721 |
+
FROM (
|
722 |
+
SELECT *
|
723 |
+
FROM exif_data
|
724 |
+
ORDER BY
|
725 |
+
CASE WHEN key = 'prompt' THEN 0
|
726 |
+
WHEN key = 'negative_prompt' THEN 1
|
727 |
+
ELSE 2 END,
|
728 |
+
key
|
729 |
+
)
|
730 |
+
GROUP BY file
|
731 |
+
''')
|
732 |
+
|
733 |
+
rows = cursor.fetchall()
|
734 |
+
for row in rows:
|
735 |
+
exif_cache[row[0]] = row[1]
|
736 |
+
|
737 |
+
return exif_cache
|
738 |
+
|
739 |
+
def load_exif_data_by_key(cache, key1, key2):
|
740 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
741 |
+
cursor = conn.cursor()
|
742 |
+
cursor.execute('''
|
743 |
+
SELECT file, value
|
744 |
+
FROM exif_data
|
745 |
+
WHERE key IN (?, ?)
|
746 |
+
''', (key1, key2))
|
747 |
+
|
748 |
+
rows = cursor.fetchall()
|
749 |
+
for row in rows:
|
750 |
+
cache[row[0]] = row[1]
|
751 |
+
|
752 |
+
return cache
|
753 |
+
|
754 |
+
def get_exif_dirs():
|
755 |
+
with sqlite3.connect(db_file, timeout=timeout) as conn:
|
756 |
+
cursor = conn.cursor()
|
757 |
+
cursor.execute('''
|
758 |
+
SELECT file
|
759 |
+
FROM exif_data
|
760 |
+
''')
|
761 |
+
|
762 |
+
rows = cursor.fetchall()
|
763 |
+
|
764 |
+
dirs = {}
|
765 |
+
for row in rows:
|
766 |
+
dir = os.path.dirname(row[0])
|
767 |
+
dirs[dir] = dir
|
768 |
+
|
769 |
+
return dirs
|
770 |
+
|
771 |
+
def fill_work_files(cursor, fileinfos):
|
772 |
+
filenames = [x[0] for x in fileinfos]
|
773 |
+
|
774 |
+
cursor.execute('''
|
775 |
+
DELETE
|
776 |
+
FROM work_files
|
777 |
+
''')
|
778 |
+
|
779 |
+
sql = '''
|
780 |
+
INSERT INTO work_files (file)
|
781 |
+
VALUES (?)
|
782 |
+
'''
|
783 |
+
|
784 |
+
cursor.executemany(sql, [(x,) for x in filenames])
|
785 |
+
|
786 |
+
return
|
787 |
+
|
788 |
+
def filter_aes(cursor, fileinfos, aes_filter_min_num, aes_filter_max_num, score_type):
|
789 |
+
if score_type == "aesthetic_score":
|
790 |
+
key = "aesthetic_score"
|
791 |
+
else:
|
792 |
+
key = "ImageRewardScore"
|
793 |
+
|
794 |
+
cursor.execute('''
|
795 |
+
DELETE
|
796 |
+
FROM work_files
|
797 |
+
WHERE file not in (
|
798 |
+
SELECT file
|
799 |
+
FROM exif_data b
|
800 |
+
WHERE file = b.file
|
801 |
+
AND b.key = ?
|
802 |
+
AND CAST(b.value AS REAL) between ? and ?
|
803 |
+
)
|
804 |
+
''', (key, aes_filter_min_num, aes_filter_max_num))
|
805 |
+
|
806 |
+
cursor.execute('''
|
807 |
+
SELECT file
|
808 |
+
FROM work_files
|
809 |
+
''')
|
810 |
+
|
811 |
+
rows = cursor.fetchall()
|
812 |
+
|
813 |
+
fileinfos_dict = {pair[0]: pair[1] for pair in fileinfos}
|
814 |
+
fileinfos_new = []
|
815 |
+
for (file,) in rows:
|
816 |
+
if fileinfos_dict.get(file) is not None:
|
817 |
+
fileinfos_new.append((file, fileinfos_dict[file]))
|
818 |
+
|
819 |
+
return fileinfos_new
|
820 |
+
|
821 |
+
def filter_ranking(cursor, fileinfos, ranking_filter, ranking_filter_min_num, ranking_filter_max_num):
|
822 |
+
if ranking_filter == "None":
|
823 |
+
cursor.execute('''
|
824 |
+
DELETE
|
825 |
+
FROM work_files
|
826 |
+
WHERE file IN (
|
827 |
+
SELECT file
|
828 |
+
FROM ranking b
|
829 |
+
WHERE file = b.file
|
830 |
+
)
|
831 |
+
''')
|
832 |
+
elif ranking_filter == "Min-max":
|
833 |
+
cursor.execute('''
|
834 |
+
DELETE
|
835 |
+
FROM work_files
|
836 |
+
WHERE file NOT IN (
|
837 |
+
SELECT file
|
838 |
+
FROM ranking b
|
839 |
+
WHERE file = b.file
|
840 |
+
AND b.ranking BETWEEN ? AND ?
|
841 |
+
)
|
842 |
+
''', (ranking_filter_min_num, ranking_filter_max_num))
|
843 |
+
else:
|
844 |
+
cursor.execute('''
|
845 |
+
DELETE
|
846 |
+
FROM work_files
|
847 |
+
WHERE file NOT IN (
|
848 |
+
SELECT file
|
849 |
+
FROM ranking b
|
850 |
+
WHERE file = b.file
|
851 |
+
AND b.ranking = ?
|
852 |
+
)
|
853 |
+
''', (ranking_filter,))
|
854 |
+
|
855 |
+
cursor.execute('''
|
856 |
+
SELECT file
|
857 |
+
FROM work_files
|
858 |
+
''')
|
859 |
+
|
860 |
+
rows = cursor.fetchall()
|
861 |
+
|
862 |
+
fileinfos_dict = {pair[0]: pair[1] for pair in fileinfos}
|
863 |
+
fileinfos_new = []
|
864 |
+
for (file,) in rows:
|
865 |
+
if fileinfos_dict.get(file) is not None:
|
866 |
+
fileinfos_new.append((file, fileinfos_dict[file]))
|
867 |
+
|
868 |
+
return fileinfos_new
|
869 |
+
|
870 |
+
def select_x_y(cursor, file):
|
871 |
+
cursor.execute('''
|
872 |
+
SELECT value
|
873 |
+
FROM exif_data
|
874 |
+
WHERE file = ?
|
875 |
+
AND key = 'Size'
|
876 |
+
''', (file,))
|
877 |
+
size_value = cursor.fetchone()
|
878 |
+
|
879 |
+
if size_value is None:
|
880 |
+
x = "?"
|
881 |
+
y = "?"
|
882 |
+
else:
|
883 |
+
(size,) = size_value
|
884 |
+
parts = size.split("x")
|
885 |
+
x = parts[0]
|
886 |
+
y = parts[1]
|
887 |
+
|
888 |
+
return x, y
|
extensions/stable-diffusion-webui-images-browser/style.css
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.thumbnails.svelte-1tkea93.svelte-1tkea93 {
|
2 |
+
justify-content: initial;
|
3 |
+
}
|
4 |
+
|
5 |
+
.thumbnails.scroll-hide.svelte-g4rw9 {
|
6 |
+
justify-content: initial;
|
7 |
+
}
|
8 |
+
|
9 |
+
div[id^="image_browser_tab"][id$="image_browser_gallery"].hide_loading > .svelte-gjihhp {
|
10 |
+
display: none;
|
11 |
+
}
|
12 |
+
|
13 |
+
.image_browser_gallery img {
|
14 |
+
object-fit: scale-down !important;
|
15 |
+
}
|
16 |
+
|
17 |
+
/* Workaround until gradio version is updated to a version that fixes it
|
18 |
+
see https://github.com/gradio-app/gradio/issues/1590
|
19 |
+
*/
|
20 |
+
#tab_image_browser .thumbnail-item > img {
|
21 |
+
width: auto !important;
|
22 |
+
height: auto !important;
|
23 |
+
}
|
extensions/stable-diffusion-webui-images-browser/wib.sqlite3
ADDED
Binary file (307 kB). View file
|
|
extensions/ultimate-upscale-for-automatic1111/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.vscode
|
extensions/ultimate-upscale-for-automatic1111/LICENSE
ADDED
@@ -0,0 +1,674 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GNU GENERAL PUBLIC LICENSE
|
2 |
+
Version 3, 29 June 2007
|
3 |
+
|
4 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
5 |
+
Everyone is permitted to copy and distribute verbatim copies
|
6 |
+
of this license document, but changing it is not allowed.
|
7 |
+
|
8 |
+
Preamble
|
9 |
+
|
10 |
+
The GNU General Public License is a free, copyleft license for
|
11 |
+
software and other kinds of works.
|
12 |
+
|
13 |
+
The licenses for most software and other practical works are designed
|
14 |
+
to take away your freedom to share and change the works. By contrast,
|
15 |
+
the GNU General Public License is intended to guarantee your freedom to
|
16 |
+
share and change all versions of a program--to make sure it remains free
|
17 |
+
software for all its users. We, the Free Software Foundation, use the
|
18 |
+
GNU General Public License for most of our software; it applies also to
|
19 |
+
any other work released this way by its authors. You can apply it to
|
20 |
+
your programs, too.
|
21 |
+
|
22 |
+
When we speak of free software, we are referring to freedom, not
|
23 |
+
price. Our General Public Licenses are designed to make sure that you
|
24 |
+
have the freedom to distribute copies of free software (and charge for
|
25 |
+
them if you wish), that you receive source code or can get it if you
|
26 |
+
want it, that you can change the software or use pieces of it in new
|
27 |
+
free programs, and that you know you can do these things.
|
28 |
+
|
29 |
+
To protect your rights, we need to prevent others from denying you
|
30 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
31 |
+
certain responsibilities if you distribute copies of the software, or if
|
32 |
+
you modify it: responsibilities to respect the freedom of others.
|
33 |
+
|
34 |
+
For example, if you distribute copies of such a program, whether
|
35 |
+
gratis or for a fee, you must pass on to the recipients the same
|
36 |
+
freedoms that you received. You must make sure that they, too, receive
|
37 |
+
or can get the source code. And you must show them these terms so they
|
38 |
+
know their rights.
|
39 |
+
|
40 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
41 |
+
(1) assert copyright on the software, and (2) offer you this License
|
42 |
+
giving you legal permission to copy, distribute and/or modify it.
|
43 |
+
|
44 |
+
For the developers' and authors' protection, the GPL clearly explains
|
45 |
+
that there is no warranty for this free software. For both users' and
|
46 |
+
authors' sake, the GPL requires that modified versions be marked as
|
47 |
+
changed, so that their problems will not be attributed erroneously to
|
48 |
+
authors of previous versions.
|
49 |
+
|
50 |
+
Some devices are designed to deny users access to install or run
|
51 |
+
modified versions of the software inside them, although the manufacturer
|
52 |
+
can do so. This is fundamentally incompatible with the aim of
|
53 |
+
protecting users' freedom to change the software. The systematic
|
54 |
+
pattern of such abuse occurs in the area of products for individuals to
|
55 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
56 |
+
have designed this version of the GPL to prohibit the practice for those
|
57 |
+
products. If such problems arise substantially in other domains, we
|
58 |
+
stand ready to extend this provision to those domains in future versions
|
59 |
+
of the GPL, as needed to protect the freedom of users.
|
60 |
+
|
61 |
+
Finally, every program is threatened constantly by software patents.
|
62 |
+
States should not allow patents to restrict development and use of
|
63 |
+
software on general-purpose computers, but in those that do, we wish to
|
64 |
+
avoid the special danger that patents applied to a free program could
|
65 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
66 |
+
patents cannot be used to render the program non-free.
|
67 |
+
|
68 |
+
The precise terms and conditions for copying, distribution and
|
69 |
+
modification follow.
|
70 |
+
|
71 |
+
TERMS AND CONDITIONS
|
72 |
+
|
73 |
+
0. Definitions.
|
74 |
+
|
75 |
+
"This License" refers to version 3 of the GNU General Public License.
|
76 |
+
|
77 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
78 |
+
works, such as semiconductor masks.
|
79 |
+
|
80 |
+
"The Program" refers to any copyrightable work licensed under this
|
81 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
82 |
+
"recipients" may be individuals or organizations.
|
83 |
+
|
84 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
85 |
+
in a fashion requiring copyright permission, other than the making of an
|
86 |
+
exact copy. The resulting work is called a "modified version" of the
|
87 |
+
earlier work or a work "based on" the earlier work.
|
88 |
+
|
89 |
+
A "covered work" means either the unmodified Program or a work based
|
90 |
+
on the Program.
|
91 |
+
|
92 |
+
To "propagate" a work means to do anything with it that, without
|
93 |
+
permission, would make you directly or secondarily liable for
|
94 |
+
infringement under applicable copyright law, except executing it on a
|
95 |
+
computer or modifying a private copy. Propagation includes copying,
|
96 |
+
distribution (with or without modification), making available to the
|
97 |
+
public, and in some countries other activities as well.
|
98 |
+
|
99 |
+
To "convey" a work means any kind of propagation that enables other
|
100 |
+
parties to make or receive copies. Mere interaction with a user through
|
101 |
+
a computer network, with no transfer of a copy, is not conveying.
|
102 |
+
|
103 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
104 |
+
to the extent that it includes a convenient and prominently visible
|
105 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
106 |
+
tells the user that there is no warranty for the work (except to the
|
107 |
+
extent that warranties are provided), that licensees may convey the
|
108 |
+
work under this License, and how to view a copy of this License. If
|
109 |
+
the interface presents a list of user commands or options, such as a
|
110 |
+
menu, a prominent item in the list meets this criterion.
|
111 |
+
|
112 |
+
1. Source Code.
|
113 |
+
|
114 |
+
The "source code" for a work means the preferred form of the work
|
115 |
+
for making modifications to it. "Object code" means any non-source
|
116 |
+
form of a work.
|
117 |
+
|
118 |
+
A "Standard Interface" means an interface that either is an official
|
119 |
+
standard defined by a recognized standards body, or, in the case of
|
120 |
+
interfaces specified for a particular programming language, one that
|
121 |
+
is widely used among developers working in that language.
|
122 |
+
|
123 |
+
The "System Libraries" of an executable work include anything, other
|
124 |
+
than the work as a whole, that (a) is included in the normal form of
|
125 |
+
packaging a Major Component, but which is not part of that Major
|
126 |
+
Component, and (b) serves only to enable use of the work with that
|
127 |
+
Major Component, or to implement a Standard Interface for which an
|
128 |
+
implementation is available to the public in source code form. A
|
129 |
+
"Major Component", in this context, means a major essential component
|
130 |
+
(kernel, window system, and so on) of the specific operating system
|
131 |
+
(if any) on which the executable work runs, or a compiler used to
|
132 |
+
produce the work, or an object code interpreter used to run it.
|
133 |
+
|
134 |
+
The "Corresponding Source" for a work in object code form means all
|
135 |
+
the source code needed to generate, install, and (for an executable
|
136 |
+
work) run the object code and to modify the work, including scripts to
|
137 |
+
control those activities. However, it does not include the work's
|
138 |
+
System Libraries, or general-purpose tools or generally available free
|
139 |
+
programs which are used unmodified in performing those activities but
|
140 |
+
which are not part of the work. For example, Corresponding Source
|
141 |
+
includes interface definition files associated with source files for
|
142 |
+
the work, and the source code for shared libraries and dynamically
|
143 |
+
linked subprograms that the work is specifically designed to require,
|
144 |
+
such as by intimate data communication or control flow between those
|
145 |
+
subprograms and other parts of the work.
|
146 |
+
|
147 |
+
The Corresponding Source need not include anything that users
|
148 |
+
can regenerate automatically from other parts of the Corresponding
|
149 |
+
Source.
|
150 |
+
|
151 |
+
The Corresponding Source for a work in source code form is that
|
152 |
+
same work.
|
153 |
+
|
154 |
+
2. Basic Permissions.
|
155 |
+
|
156 |
+
All rights granted under this License are granted for the term of
|
157 |
+
copyright on the Program, and are irrevocable provided the stated
|
158 |
+
conditions are met. This License explicitly affirms your unlimited
|
159 |
+
permission to run the unmodified Program. The output from running a
|
160 |
+
covered work is covered by this License only if the output, given its
|
161 |
+
content, constitutes a covered work. This License acknowledges your
|
162 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
163 |
+
|
164 |
+
You may make, run and propagate covered works that you do not
|
165 |
+
convey, without conditions so long as your license otherwise remains
|
166 |
+
in force. You may convey covered works to others for the sole purpose
|
167 |
+
of having them make modifications exclusively for you, or provide you
|
168 |
+
with facilities for running those works, provided that you comply with
|
169 |
+
the terms of this License in conveying all material for which you do
|
170 |
+
not control copyright. Those thus making or running the covered works
|
171 |
+
for you must do so exclusively on your behalf, under your direction
|
172 |
+
and control, on terms that prohibit them from making any copies of
|
173 |
+
your copyrighted material outside their relationship with you.
|
174 |
+
|
175 |
+
Conveying under any other circumstances is permitted solely under
|
176 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
177 |
+
makes it unnecessary.
|
178 |
+
|
179 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
180 |
+
|
181 |
+
No covered work shall be deemed part of an effective technological
|
182 |
+
measure under any applicable law fulfilling obligations under article
|
183 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
184 |
+
similar laws prohibiting or restricting circumvention of such
|
185 |
+
measures.
|
186 |
+
|
187 |
+
When you convey a covered work, you waive any legal power to forbid
|
188 |
+
circumvention of technological measures to the extent such circumvention
|
189 |
+
is effected by exercising rights under this License with respect to
|
190 |
+
the covered work, and you disclaim any intention to limit operation or
|
191 |
+
modification of the work as a means of enforcing, against the work's
|
192 |
+
users, your or third parties' legal rights to forbid circumvention of
|
193 |
+
technological measures.
|
194 |
+
|
195 |
+
4. Conveying Verbatim Copies.
|
196 |
+
|
197 |
+
You may convey verbatim copies of the Program's source code as you
|
198 |
+
receive it, in any medium, provided that you conspicuously and
|
199 |
+
appropriately publish on each copy an appropriate copyright notice;
|
200 |
+
keep intact all notices stating that this License and any
|
201 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
202 |
+
keep intact all notices of the absence of any warranty; and give all
|
203 |
+
recipients a copy of this License along with the Program.
|
204 |
+
|
205 |
+
You may charge any price or no price for each copy that you convey,
|
206 |
+
and you may offer support or warranty protection for a fee.
|
207 |
+
|
208 |
+
5. Conveying Modified Source Versions.
|
209 |
+
|
210 |
+
You may convey a work based on the Program, or the modifications to
|
211 |
+
produce it from the Program, in the form of source code under the
|
212 |
+
terms of section 4, provided that you also meet all of these conditions:
|
213 |
+
|
214 |
+
a) The work must carry prominent notices stating that you modified
|
215 |
+
it, and giving a relevant date.
|
216 |
+
|
217 |
+
b) The work must carry prominent notices stating that it is
|
218 |
+
released under this License and any conditions added under section
|
219 |
+
7. This requirement modifies the requirement in section 4 to
|
220 |
+
"keep intact all notices".
|
221 |
+
|
222 |
+
c) You must license the entire work, as a whole, under this
|
223 |
+
License to anyone who comes into possession of a copy. This
|
224 |
+
License will therefore apply, along with any applicable section 7
|
225 |
+
additional terms, to the whole of the work, and all its parts,
|
226 |
+
regardless of how they are packaged. This License gives no
|
227 |
+
permission to license the work in any other way, but it does not
|
228 |
+
invalidate such permission if you have separately received it.
|
229 |
+
|
230 |
+
d) If the work has interactive user interfaces, each must display
|
231 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
232 |
+
interfaces that do not display Appropriate Legal Notices, your
|
233 |
+
work need not make them do so.
|
234 |
+
|
235 |
+
A compilation of a covered work with other separate and independent
|
236 |
+
works, which are not by their nature extensions of the covered work,
|
237 |
+
and which are not combined with it such as to form a larger program,
|
238 |
+
in or on a volume of a storage or distribution medium, is called an
|
239 |
+
"aggregate" if the compilation and its resulting copyright are not
|
240 |
+
used to limit the access or legal rights of the compilation's users
|
241 |
+
beyond what the individual works permit. Inclusion of a covered work
|
242 |
+
in an aggregate does not cause this License to apply to the other
|
243 |
+
parts of the aggregate.
|
244 |
+
|
245 |
+
6. Conveying Non-Source Forms.
|
246 |
+
|
247 |
+
You may convey a covered work in object code form under the terms
|
248 |
+
of sections 4 and 5, provided that you also convey the
|
249 |
+
machine-readable Corresponding Source under the terms of this License,
|
250 |
+
in one of these ways:
|
251 |
+
|
252 |
+
a) Convey the object code in, or embodied in, a physical product
|
253 |
+
(including a physical distribution medium), accompanied by the
|
254 |
+
Corresponding Source fixed on a durable physical medium
|
255 |
+
customarily used for software interchange.
|
256 |
+
|
257 |
+
b) Convey the object code in, or embodied in, a physical product
|
258 |
+
(including a physical distribution medium), accompanied by a
|
259 |
+
written offer, valid for at least three years and valid for as
|
260 |
+
long as you offer spare parts or customer support for that product
|
261 |
+
model, to give anyone who possesses the object code either (1) a
|
262 |
+
copy of the Corresponding Source for all the software in the
|
263 |
+
product that is covered by this License, on a durable physical
|
264 |
+
medium customarily used for software interchange, for a price no
|
265 |
+
more than your reasonable cost of physically performing this
|
266 |
+
conveying of source, or (2) access to copy the
|
267 |
+
Corresponding Source from a network server at no charge.
|
268 |
+
|
269 |
+
c) Convey individual copies of the object code with a copy of the
|
270 |
+
written offer to provide the Corresponding Source. This
|
271 |
+
alternative is allowed only occasionally and noncommercially, and
|
272 |
+
only if you received the object code with such an offer, in accord
|
273 |
+
with subsection 6b.
|
274 |
+
|
275 |
+
d) Convey the object code by offering access from a designated
|
276 |
+
place (gratis or for a charge), and offer equivalent access to the
|
277 |
+
Corresponding Source in the same way through the same place at no
|
278 |
+
further charge. You need not require recipients to copy the
|
279 |
+
Corresponding Source along with the object code. If the place to
|
280 |
+
copy the object code is a network server, the Corresponding Source
|
281 |
+
may be on a different server (operated by you or a third party)
|
282 |
+
that supports equivalent copying facilities, provided you maintain
|
283 |
+
clear directions next to the object code saying where to find the
|
284 |
+
Corresponding Source. Regardless of what server hosts the
|
285 |
+
Corresponding Source, you remain obligated to ensure that it is
|
286 |
+
available for as long as needed to satisfy these requirements.
|
287 |
+
|
288 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
289 |
+
you inform other peers where the object code and Corresponding
|
290 |
+
Source of the work are being offered to the general public at no
|
291 |
+
charge under subsection 6d.
|
292 |
+
|
293 |
+
A separable portion of the object code, whose source code is excluded
|
294 |
+
from the Corresponding Source as a System Library, need not be
|
295 |
+
included in conveying the object code work.
|
296 |
+
|
297 |
+
A "User Product" is either (1) a "consumer product", which means any
|
298 |
+
tangible personal property which is normally used for personal, family,
|
299 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
300 |
+
into a dwelling. In determining whether a product is a consumer product,
|
301 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
302 |
+
product received by a particular user, "normally used" refers to a
|
303 |
+
typical or common use of that class of product, regardless of the status
|
304 |
+
of the particular user or of the way in which the particular user
|
305 |
+
actually uses, or expects or is expected to use, the product. A product
|
306 |
+
is a consumer product regardless of whether the product has substantial
|
307 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
308 |
+
the only significant mode of use of the product.
|
309 |
+
|
310 |
+
"Installation Information" for a User Product means any methods,
|
311 |
+
procedures, authorization keys, or other information required to install
|
312 |
+
and execute modified versions of a covered work in that User Product from
|
313 |
+
a modified version of its Corresponding Source. The information must
|
314 |
+
suffice to ensure that the continued functioning of the modified object
|
315 |
+
code is in no case prevented or interfered with solely because
|
316 |
+
modification has been made.
|
317 |
+
|
318 |
+
If you convey an object code work under this section in, or with, or
|
319 |
+
specifically for use in, a User Product, and the conveying occurs as
|
320 |
+
part of a transaction in which the right of possession and use of the
|
321 |
+
User Product is transferred to the recipient in perpetuity or for a
|
322 |
+
fixed term (regardless of how the transaction is characterized), the
|
323 |
+
Corresponding Source conveyed under this section must be accompanied
|
324 |
+
by the Installation Information. But this requirement does not apply
|
325 |
+
if neither you nor any third party retains the ability to install
|
326 |
+
modified object code on the User Product (for example, the work has
|
327 |
+
been installed in ROM).
|
328 |
+
|
329 |
+
The requirement to provide Installation Information does not include a
|
330 |
+
requirement to continue to provide support service, warranty, or updates
|
331 |
+
for a work that has been modified or installed by the recipient, or for
|
332 |
+
the User Product in which it has been modified or installed. Access to a
|
333 |
+
network may be denied when the modification itself materially and
|
334 |
+
adversely affects the operation of the network or violates the rules and
|
335 |
+
protocols for communication across the network.
|
336 |
+
|
337 |
+
Corresponding Source conveyed, and Installation Information provided,
|
338 |
+
in accord with this section must be in a format that is publicly
|
339 |
+
documented (and with an implementation available to the public in
|
340 |
+
source code form), and must require no special password or key for
|
341 |
+
unpacking, reading or copying.
|
342 |
+
|
343 |
+
7. Additional Terms.
|
344 |
+
|
345 |
+
"Additional permissions" are terms that supplement the terms of this
|
346 |
+
License by making exceptions from one or more of its conditions.
|
347 |
+
Additional permissions that are applicable to the entire Program shall
|
348 |
+
be treated as though they were included in this License, to the extent
|
349 |
+
that they are valid under applicable law. If additional permissions
|
350 |
+
apply only to part of the Program, that part may be used separately
|
351 |
+
under those permissions, but the entire Program remains governed by
|
352 |
+
this License without regard to the additional permissions.
|
353 |
+
|
354 |
+
When you convey a copy of a covered work, you may at your option
|
355 |
+
remove any additional permissions from that copy, or from any part of
|
356 |
+
it. (Additional permissions may be written to require their own
|
357 |
+
removal in certain cases when you modify the work.) You may place
|
358 |
+
additional permissions on material, added by you to a covered work,
|
359 |
+
for which you have or can give appropriate copyright permission.
|
360 |
+
|
361 |
+
Notwithstanding any other provision of this License, for material you
|
362 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
363 |
+
that material) supplement the terms of this License with terms:
|
364 |
+
|
365 |
+
a) Disclaiming warranty or limiting liability differently from the
|
366 |
+
terms of sections 15 and 16 of this License; or
|
367 |
+
|
368 |
+
b) Requiring preservation of specified reasonable legal notices or
|
369 |
+
author attributions in that material or in the Appropriate Legal
|
370 |
+
Notices displayed by works containing it; or
|
371 |
+
|
372 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
373 |
+
requiring that modified versions of such material be marked in
|
374 |
+
reasonable ways as different from the original version; or
|
375 |
+
|
376 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
377 |
+
authors of the material; or
|
378 |
+
|
379 |
+
e) Declining to grant rights under trademark law for use of some
|
380 |
+
trade names, trademarks, or service marks; or
|
381 |
+
|
382 |
+
f) Requiring indemnification of licensors and authors of that
|
383 |
+
material by anyone who conveys the material (or modified versions of
|
384 |
+
it) with contractual assumptions of liability to the recipient, for
|
385 |
+
any liability that these contractual assumptions directly impose on
|
386 |
+
those licensors and authors.
|
387 |
+
|
388 |
+
All other non-permissive additional terms are considered "further
|
389 |
+
restrictions" within the meaning of section 10. If the Program as you
|
390 |
+
received it, or any part of it, contains a notice stating that it is
|
391 |
+
governed by this License along with a term that is a further
|
392 |
+
restriction, you may remove that term. If a license document contains
|
393 |
+
a further restriction but permits relicensing or conveying under this
|
394 |
+
License, you may add to a covered work material governed by the terms
|
395 |
+
of that license document, provided that the further restriction does
|
396 |
+
not survive such relicensing or conveying.
|
397 |
+
|
398 |
+
If you add terms to a covered work in accord with this section, you
|
399 |
+
must place, in the relevant source files, a statement of the
|
400 |
+
additional terms that apply to those files, or a notice indicating
|
401 |
+
where to find the applicable terms.
|
402 |
+
|
403 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
404 |
+
form of a separately written license, or stated as exceptions;
|
405 |
+
the above requirements apply either way.
|
406 |
+
|
407 |
+
8. Termination.
|
408 |
+
|
409 |
+
You may not propagate or modify a covered work except as expressly
|
410 |
+
provided under this License. Any attempt otherwise to propagate or
|
411 |
+
modify it is void, and will automatically terminate your rights under
|
412 |
+
this License (including any patent licenses granted under the third
|
413 |
+
paragraph of section 11).
|
414 |
+
|
415 |
+
However, if you cease all violation of this License, then your
|
416 |
+
license from a particular copyright holder is reinstated (a)
|
417 |
+
provisionally, unless and until the copyright holder explicitly and
|
418 |
+
finally terminates your license, and (b) permanently, if the copyright
|
419 |
+
holder fails to notify you of the violation by some reasonable means
|
420 |
+
prior to 60 days after the cessation.
|
421 |
+
|
422 |
+
Moreover, your license from a particular copyright holder is
|
423 |
+
reinstated permanently if the copyright holder notifies you of the
|
424 |
+
violation by some reasonable means, this is the first time you have
|
425 |
+
received notice of violation of this License (for any work) from that
|
426 |
+
copyright holder, and you cure the violation prior to 30 days after
|
427 |
+
your receipt of the notice.
|
428 |
+
|
429 |
+
Termination of your rights under this section does not terminate the
|
430 |
+
licenses of parties who have received copies or rights from you under
|
431 |
+
this License. If your rights have been terminated and not permanently
|
432 |
+
reinstated, you do not qualify to receive new licenses for the same
|
433 |
+
material under section 10.
|
434 |
+
|
435 |
+
9. Acceptance Not Required for Having Copies.
|
436 |
+
|
437 |
+
You are not required to accept this License in order to receive or
|
438 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
439 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
440 |
+
to receive a copy likewise does not require acceptance. However,
|
441 |
+
nothing other than this License grants you permission to propagate or
|
442 |
+
modify any covered work. These actions infringe copyright if you do
|
443 |
+
not accept this License. Therefore, by modifying or propagating a
|
444 |
+
covered work, you indicate your acceptance of this License to do so.
|
445 |
+
|
446 |
+
10. Automatic Licensing of Downstream Recipients.
|
447 |
+
|
448 |
+
Each time you convey a covered work, the recipient automatically
|
449 |
+
receives a license from the original licensors, to run, modify and
|
450 |
+
propagate that work, subject to this License. You are not responsible
|
451 |
+
for enforcing compliance by third parties with this License.
|
452 |
+
|
453 |
+
An "entity transaction" is a transaction transferring control of an
|
454 |
+
organization, or substantially all assets of one, or subdividing an
|
455 |
+
organization, or merging organizations. If propagation of a covered
|
456 |
+
work results from an entity transaction, each party to that
|
457 |
+
transaction who receives a copy of the work also receives whatever
|
458 |
+
licenses to the work the party's predecessor in interest had or could
|
459 |
+
give under the previous paragraph, plus a right to possession of the
|
460 |
+
Corresponding Source of the work from the predecessor in interest, if
|
461 |
+
the predecessor has it or can get it with reasonable efforts.
|
462 |
+
|
463 |
+
You may not impose any further restrictions on the exercise of the
|
464 |
+
rights granted or affirmed under this License. For example, you may
|
465 |
+
not impose a license fee, royalty, or other charge for exercise of
|
466 |
+
rights granted under this License, and you may not initiate litigation
|
467 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
468 |
+
any patent claim is infringed by making, using, selling, offering for
|
469 |
+
sale, or importing the Program or any portion of it.
|
470 |
+
|
471 |
+
11. Patents.
|
472 |
+
|
473 |
+
A "contributor" is a copyright holder who authorizes use under this
|
474 |
+
License of the Program or a work on which the Program is based. The
|
475 |
+
work thus licensed is called the contributor's "contributor version".
|
476 |
+
|
477 |
+
A contributor's "essential patent claims" are all patent claims
|
478 |
+
owned or controlled by the contributor, whether already acquired or
|
479 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
480 |
+
by this License, of making, using, or selling its contributor version,
|
481 |
+
but do not include claims that would be infringed only as a
|
482 |
+
consequence of further modification of the contributor version. For
|
483 |
+
purposes of this definition, "control" includes the right to grant
|
484 |
+
patent sublicenses in a manner consistent with the requirements of
|
485 |
+
this License.
|
486 |
+
|
487 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
488 |
+
patent license under the contributor's essential patent claims, to
|
489 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
490 |
+
propagate the contents of its contributor version.
|
491 |
+
|
492 |
+
In the following three paragraphs, a "patent license" is any express
|
493 |
+
agreement or commitment, however denominated, not to enforce a patent
|
494 |
+
(such as an express permission to practice a patent or covenant not to
|
495 |
+
sue for patent infringement). To "grant" such a patent license to a
|
496 |
+
party means to make such an agreement or commitment not to enforce a
|
497 |
+
patent against the party.
|
498 |
+
|
499 |
+
If you convey a covered work, knowingly relying on a patent license,
|
500 |
+
and the Corresponding Source of the work is not available for anyone
|
501 |
+
to copy, free of charge and under the terms of this License, through a
|
502 |
+
publicly available network server or other readily accessible means,
|
503 |
+
then you must either (1) cause the Corresponding Source to be so
|
504 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
505 |
+
patent license for this particular work, or (3) arrange, in a manner
|
506 |
+
consistent with the requirements of this License, to extend the patent
|
507 |
+
license to downstream recipients. "Knowingly relying" means you have
|
508 |
+
actual knowledge that, but for the patent license, your conveying the
|
509 |
+
covered work in a country, or your recipient's use of the covered work
|
510 |
+
in a country, would infringe one or more identifiable patents in that
|
511 |
+
country that you have reason to believe are valid.
|
512 |
+
|
513 |
+
If, pursuant to or in connection with a single transaction or
|
514 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
515 |
+
covered work, and grant a patent license to some of the parties
|
516 |
+
receiving the covered work authorizing them to use, propagate, modify
|
517 |
+
or convey a specific copy of the covered work, then the patent license
|
518 |
+
you grant is automatically extended to all recipients of the covered
|
519 |
+
work and works based on it.
|
520 |
+
|
521 |
+
A patent license is "discriminatory" if it does not include within
|
522 |
+
the scope of its coverage, prohibits the exercise of, or is
|
523 |
+
conditioned on the non-exercise of one or more of the rights that are
|
524 |
+
specifically granted under this License. You may not convey a covered
|
525 |
+
work if you are a party to an arrangement with a third party that is
|
526 |
+
in the business of distributing software, under which you make payment
|
527 |
+
to the third party based on the extent of your activity of conveying
|
528 |
+
the work, and under which the third party grants, to any of the
|
529 |
+
parties who would receive the covered work from you, a discriminatory
|
530 |
+
patent license (a) in connection with copies of the covered work
|
531 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
532 |
+
for and in connection with specific products or compilations that
|
533 |
+
contain the covered work, unless you entered into that arrangement,
|
534 |
+
or that patent license was granted, prior to 28 March 2007.
|
535 |
+
|
536 |
+
Nothing in this License shall be construed as excluding or limiting
|
537 |
+
any implied license or other defenses to infringement that may
|
538 |
+
otherwise be available to you under applicable patent law.
|
539 |
+
|
540 |
+
12. No Surrender of Others' Freedom.
|
541 |
+
|
542 |
+
If conditions are imposed on you (whether by court order, agreement or
|
543 |
+
otherwise) that contradict the conditions of this License, they do not
|
544 |
+
excuse you from the conditions of this License. If you cannot convey a
|
545 |
+
covered work so as to satisfy simultaneously your obligations under this
|
546 |
+
License and any other pertinent obligations, then as a consequence you may
|
547 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
548 |
+
to collect a royalty for further conveying from those to whom you convey
|
549 |
+
the Program, the only way you could satisfy both those terms and this
|
550 |
+
License would be to refrain entirely from conveying the Program.
|
551 |
+
|
552 |
+
13. Use with the GNU Affero General Public License.
|
553 |
+
|
554 |
+
Notwithstanding any other provision of this License, you have
|
555 |
+
permission to link or combine any covered work with a work licensed
|
556 |
+
under version 3 of the GNU Affero General Public License into a single
|
557 |
+
combined work, and to convey the resulting work. The terms of this
|
558 |
+
License will continue to apply to the part which is the covered work,
|
559 |
+
but the special requirements of the GNU Affero General Public License,
|
560 |
+
section 13, concerning interaction through a network will apply to the
|
561 |
+
combination as such.
|
562 |
+
|
563 |
+
14. Revised Versions of this License.
|
564 |
+
|
565 |
+
The Free Software Foundation may publish revised and/or new versions of
|
566 |
+
the GNU General Public License from time to time. Such new versions will
|
567 |
+
be similar in spirit to the present version, but may differ in detail to
|
568 |
+
address new problems or concerns.
|
569 |
+
|
570 |
+
Each version is given a distinguishing version number. If the
|
571 |
+
Program specifies that a certain numbered version of the GNU General
|
572 |
+
Public License "or any later version" applies to it, you have the
|
573 |
+
option of following the terms and conditions either of that numbered
|
574 |
+
version or of any later version published by the Free Software
|
575 |
+
Foundation. If the Program does not specify a version number of the
|
576 |
+
GNU General Public License, you may choose any version ever published
|
577 |
+
by the Free Software Foundation.
|
578 |
+
|
579 |
+
If the Program specifies that a proxy can decide which future
|
580 |
+
versions of the GNU General Public License can be used, that proxy's
|
581 |
+
public statement of acceptance of a version permanently authorizes you
|
582 |
+
to choose that version for the Program.
|
583 |
+
|
584 |
+
Later license versions may give you additional or different
|
585 |
+
permissions. However, no additional obligations are imposed on any
|
586 |
+
author or copyright holder as a result of your choosing to follow a
|
587 |
+
later version.
|
588 |
+
|
589 |
+
15. Disclaimer of Warranty.
|
590 |
+
|
591 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
592 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
593 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
594 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
595 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
596 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
597 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
598 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
599 |
+
|
600 |
+
16. Limitation of Liability.
|
601 |
+
|
602 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
603 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
604 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
605 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
606 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
607 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
608 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
609 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
610 |
+
SUCH DAMAGES.
|
611 |
+
|
612 |
+
17. Interpretation of Sections 15 and 16.
|
613 |
+
|
614 |
+
If the disclaimer of warranty and limitation of liability provided
|
615 |
+
above cannot be given local legal effect according to their terms,
|
616 |
+
reviewing courts shall apply local law that most closely approximates
|
617 |
+
an absolute waiver of all civil liability in connection with the
|
618 |
+
Program, unless a warranty or assumption of liability accompanies a
|
619 |
+
copy of the Program in return for a fee.
|
620 |
+
|
621 |
+
END OF TERMS AND CONDITIONS
|
622 |
+
|
623 |
+
How to Apply These Terms to Your New Programs
|
624 |
+
|
625 |
+
If you develop a new program, and you want it to be of the greatest
|
626 |
+
possible use to the public, the best way to achieve this is to make it
|
627 |
+
free software which everyone can redistribute and change under these terms.
|
628 |
+
|
629 |
+
To do so, attach the following notices to the program. It is safest
|
630 |
+
to attach them to the start of each source file to most effectively
|
631 |
+
state the exclusion of warranty; and each file should have at least
|
632 |
+
the "copyright" line and a pointer to where the full notice is found.
|
633 |
+
|
634 |
+
ultimate-upscale-for-automatic1111
|
635 |
+
Copyright (C) 2023 Mirzam
|
636 |
+
|
637 |
+
This program is free software: you can redistribute it and/or modify
|
638 |
+
it under the terms of the GNU General Public License as published by
|
639 |
+
the Free Software Foundation, either version 3 of the License, or
|
640 |
+
(at your option) any later version.
|
641 |
+
|
642 |
+
This program is distributed in the hope that it will be useful,
|
643 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
644 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
645 |
+
GNU General Public License for more details.
|
646 |
+
|
647 |
+
You should have received a copy of the GNU General Public License
|
648 |
+
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
649 |
+
|
650 |
+
Also add information on how to contact you by electronic and paper mail.
|
651 |
+
|
652 |
+
If the program does terminal interaction, make it output a short
|
653 |
+
notice like this when it starts in an interactive mode:
|
654 |
+
|
655 |
+
<program> Copyright (C) 2023 Mirzam
|
656 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
657 |
+
This is free software, and you are welcome to redistribute it
|
658 |
+
under certain conditions; type `show c' for details.
|
659 |
+
|
660 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
661 |
+
parts of the General Public License. Of course, your program's commands
|
662 |
+
might be different; for a GUI interface, you would use an "about box".
|
663 |
+
|
664 |
+
You should also get your employer (if you work as a programmer) or school,
|
665 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
666 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
667 |
+
<https://www.gnu.org/licenses/>.
|
668 |
+
|
669 |
+
The GNU General Public License does not permit incorporating your program
|
670 |
+
into proprietary programs. If your program is a subroutine library, you
|
671 |
+
may consider it more useful to permit linking proprietary applications with
|
672 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
673 |
+
Public License instead of this License. But first, please read
|
674 |
+
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
extensions/ultimate-upscale-for-automatic1111/README.md
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ultimate SD Upscale extension for [AUTOMATIC1111 Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
|
2 |
+
Now you have the opportunity to use a large denoise (0.3-0.5) and not spawn many artifacts. Works on any video card, since you can use a 512x512 tile size and the image will converge.
|
3 |
+
|
4 |
+
News channel: https://t.me/usdunews
|
5 |
+
|
6 |
+
# Instructions
|
7 |
+
All instructions can be found on the project's [wiki](https://github.com/Coyote-A/ultimate-upscale-for-automatic1111/wiki).
|
8 |
+
|
9 |
+
# Examples
|
10 |
+
More on [wiki page](https://github.com/Coyote-A/ultimate-upscale-for-automatic1111/wiki/Examples)
|
11 |
+
|
12 |
+
<details>
|
13 |
+
<summary>E1</summary>
|
14 |
+
Original image
|
15 |
+
|
16 |
+
![Original](https://i.imgur.com/J8mRYOD.png)
|
17 |
+
|
18 |
+
2k upscaled. **Tile size**: 512, **Padding**: 32, **Mask blur**: 16, **Denoise**: 0.4
|
19 |
+
![2k upscale](https://i.imgur.com/0aKua4r.png)
|
20 |
+
</details>
|
21 |
+
|
22 |
+
<details>
|
23 |
+
<summary>E2</summary>
|
24 |
+
Original image
|
25 |
+
|
26 |
+
![Original](https://i.imgur.com/aALNI2w.png)
|
27 |
+
|
28 |
+
2k upscaled. **Tile size**: 768, **Padding**: 55, **Mask blur**: 20, **Denoise**: 0.35
|
29 |
+
![2k upscale](https://i.imgur.com/B5PHz0J.png)
|
30 |
+
|
31 |
+
4k upscaled. **Tile size**: 768, **Padding**: 55, **Mask blur**: 20, **Denoise**: 0.35
|
32 |
+
![4k upscale](https://i.imgur.com/tIUQ7TJ.jpg)
|
33 |
+
</details>
|
34 |
+
|
35 |
+
<details>
|
36 |
+
<summary>E3</summary>
|
37 |
+
Original image
|
38 |
+
|
39 |
+
![Original](https://i.imgur.com/AGtszA8.png)
|
40 |
+
|
41 |
+
4k upscaled. **Tile size**: 768, **Padding**: 55, **Mask blur**: 20, **Denoise**: 0.4
|
42 |
+
![4k upscale](https://i.imgur.com/LCYLfCs.jpg)
|
43 |
+
</details>
|
extensions/ultimate-upscale-for-automatic1111/scripts/__pycache__/ultimate-upscale.cpython-310.pyc
ADDED
Binary file (16.1 kB). View file
|
|
extensions/ultimate-upscale-for-automatic1111/scripts/ultimate-upscale.py
ADDED
@@ -0,0 +1,557 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import gradio as gr
|
3 |
+
from PIL import Image, ImageDraw, ImageOps
|
4 |
+
from modules import processing, shared, images, devices, scripts
|
5 |
+
from modules.processing import StableDiffusionProcessing
|
6 |
+
from modules.processing import Processed
|
7 |
+
from modules.shared import opts, state
|
8 |
+
from enum import Enum
|
9 |
+
|
10 |
+
class USDUMode(Enum):
|
11 |
+
LINEAR = 0
|
12 |
+
CHESS = 1
|
13 |
+
NONE = 2
|
14 |
+
|
15 |
+
class USDUSFMode(Enum):
|
16 |
+
NONE = 0
|
17 |
+
BAND_PASS = 1
|
18 |
+
HALF_TILE = 2
|
19 |
+
HALF_TILE_PLUS_INTERSECTIONS = 3
|
20 |
+
|
21 |
+
class USDUpscaler():
|
22 |
+
|
23 |
+
def __init__(self, p, image, upscaler_index:int, save_redraw, save_seams_fix, tile_width, tile_height) -> None:
|
24 |
+
self.p:StableDiffusionProcessing = p
|
25 |
+
self.image:Image = image
|
26 |
+
self.scale_factor = math.ceil(max(p.width, p.height) / max(image.width, image.height))
|
27 |
+
self.upscaler = shared.sd_upscalers[upscaler_index]
|
28 |
+
self.redraw = USDURedraw()
|
29 |
+
self.redraw.save = save_redraw
|
30 |
+
self.redraw.tile_width = tile_width if tile_width > 0 else tile_height
|
31 |
+
self.redraw.tile_height = tile_height if tile_height > 0 else tile_width
|
32 |
+
self.seams_fix = USDUSeamsFix()
|
33 |
+
self.seams_fix.save = save_seams_fix
|
34 |
+
self.seams_fix.tile_width = tile_width if tile_width > 0 else tile_height
|
35 |
+
self.seams_fix.tile_height = tile_height if tile_height > 0 else tile_width
|
36 |
+
self.initial_info = None
|
37 |
+
self.rows = math.ceil(self.p.height / self.redraw.tile_height)
|
38 |
+
self.cols = math.ceil(self.p.width / self.redraw.tile_width)
|
39 |
+
|
40 |
+
def get_factor(self, num):
|
41 |
+
# Its just return, don't need elif
|
42 |
+
if num == 1:
|
43 |
+
return 2
|
44 |
+
if num % 4 == 0:
|
45 |
+
return 4
|
46 |
+
if num % 3 == 0:
|
47 |
+
return 3
|
48 |
+
if num % 2 == 0:
|
49 |
+
return 2
|
50 |
+
return 0
|
51 |
+
|
52 |
+
def get_factors(self):
|
53 |
+
scales = []
|
54 |
+
current_scale = 1
|
55 |
+
current_scale_factor = self.get_factor(self.scale_factor)
|
56 |
+
while current_scale_factor == 0:
|
57 |
+
self.scale_factor += 1
|
58 |
+
current_scale_factor = self.get_factor(self.scale_factor)
|
59 |
+
while current_scale < self.scale_factor:
|
60 |
+
current_scale_factor = self.get_factor(self.scale_factor // current_scale)
|
61 |
+
scales.append(current_scale_factor)
|
62 |
+
current_scale = current_scale * current_scale_factor
|
63 |
+
if current_scale_factor == 0:
|
64 |
+
break
|
65 |
+
self.scales = enumerate(scales)
|
66 |
+
|
67 |
+
def upscale(self):
|
68 |
+
# Log info
|
69 |
+
print(f"Canva size: {self.p.width}x{self.p.height}")
|
70 |
+
print(f"Image size: {self.image.width}x{self.image.height}")
|
71 |
+
print(f"Scale factor: {self.scale_factor}")
|
72 |
+
# Check upscaler is not empty
|
73 |
+
if self.upscaler.name == "None":
|
74 |
+
self.image = self.image.resize((self.p.width, self.p.height), resample=Image.LANCZOS)
|
75 |
+
return
|
76 |
+
# Get list with scale factors
|
77 |
+
self.get_factors()
|
78 |
+
# Upscaling image over all factors
|
79 |
+
for index, value in self.scales:
|
80 |
+
print(f"Upscaling iteration {index+1} with scale factor {value}")
|
81 |
+
self.image = self.upscaler.scaler.upscale(self.image, value, self.upscaler.data_path)
|
82 |
+
# Resize image to set values
|
83 |
+
self.image = self.image.resize((self.p.width, self.p.height), resample=Image.LANCZOS)
|
84 |
+
|
85 |
+
def setup_redraw(self, redraw_mode, padding, mask_blur):
|
86 |
+
self.redraw.mode = USDUMode(redraw_mode)
|
87 |
+
self.redraw.enabled = self.redraw.mode != USDUMode.NONE
|
88 |
+
self.redraw.padding = padding
|
89 |
+
self.p.mask_blur = mask_blur
|
90 |
+
|
91 |
+
def setup_seams_fix(self, padding, denoise, mask_blur, width, mode):
|
92 |
+
self.seams_fix.padding = padding
|
93 |
+
self.seams_fix.denoise = denoise
|
94 |
+
self.seams_fix.mask_blur = mask_blur
|
95 |
+
self.seams_fix.width = width
|
96 |
+
self.seams_fix.mode = USDUSFMode(mode)
|
97 |
+
self.seams_fix.enabled = self.seams_fix.mode != USDUSFMode.NONE
|
98 |
+
|
99 |
+
def save_image(self):
|
100 |
+
if type(self.p.prompt) != list:
|
101 |
+
images.save_image(self.image, self.p.outpath_samples, "", self.p.seed, self.p.prompt, opts.samples_format, info=self.initial_info, p=self.p)
|
102 |
+
else:
|
103 |
+
images.save_image(self.image, self.p.outpath_samples, "", self.p.seed, self.p.prompt[0], opts.samples_format, info=self.initial_info, p=self.p)
|
104 |
+
|
105 |
+
def calc_jobs_count(self):
|
106 |
+
redraw_job_count = (self.rows * self.cols) if self.redraw.enabled else 0
|
107 |
+
seams_job_count = 0
|
108 |
+
if self.seams_fix.mode == USDUSFMode.BAND_PASS:
|
109 |
+
seams_job_count = self.rows + self.cols - 2
|
110 |
+
elif self.seams_fix.mode == USDUSFMode.HALF_TILE:
|
111 |
+
seams_job_count = self.rows * (self.cols - 1) + (self.rows - 1) * self.cols
|
112 |
+
elif self.seams_fix.mode == USDUSFMode.HALF_TILE_PLUS_INTERSECTIONS:
|
113 |
+
seams_job_count = self.rows * (self.cols - 1) + (self.rows - 1) * self.cols + (self.rows - 1) * (self.cols - 1)
|
114 |
+
|
115 |
+
state.job_count = redraw_job_count + seams_job_count
|
116 |
+
|
117 |
+
def print_info(self):
|
118 |
+
print(f"Tile size: {self.redraw.tile_width}x{self.redraw.tile_height}")
|
119 |
+
print(f"Tiles amount: {self.rows * self.cols}")
|
120 |
+
print(f"Grid: {self.rows}x{self.cols}")
|
121 |
+
print(f"Redraw enabled: {self.redraw.enabled}")
|
122 |
+
print(f"Seams fix mode: {self.seams_fix.mode.name}")
|
123 |
+
|
124 |
+
def add_extra_info(self):
|
125 |
+
self.p.extra_generation_params["Ultimate SD upscale upscaler"] = self.upscaler.name
|
126 |
+
self.p.extra_generation_params["Ultimate SD upscale tile_width"] = self.redraw.tile_width
|
127 |
+
self.p.extra_generation_params["Ultimate SD upscale tile_height"] = self.redraw.tile_height
|
128 |
+
self.p.extra_generation_params["Ultimate SD upscale mask_blur"] = self.p.mask_blur
|
129 |
+
self.p.extra_generation_params["Ultimate SD upscale padding"] = self.redraw.padding
|
130 |
+
|
131 |
+
def process(self):
|
132 |
+
state.begin()
|
133 |
+
self.calc_jobs_count()
|
134 |
+
self.result_images = []
|
135 |
+
if self.redraw.enabled:
|
136 |
+
self.image = self.redraw.start(self.p, self.image, self.rows, self.cols)
|
137 |
+
self.initial_info = self.redraw.initial_info
|
138 |
+
self.result_images.append(self.image)
|
139 |
+
if self.redraw.save:
|
140 |
+
self.save_image()
|
141 |
+
|
142 |
+
if self.seams_fix.enabled:
|
143 |
+
self.image = self.seams_fix.start(self.p, self.image, self.rows, self.cols)
|
144 |
+
self.initial_info = self.seams_fix.initial_info
|
145 |
+
self.result_images.append(self.image)
|
146 |
+
if self.seams_fix.save:
|
147 |
+
self.save_image()
|
148 |
+
state.end()
|
149 |
+
|
150 |
+
class USDURedraw():
|
151 |
+
|
152 |
+
def init_draw(self, p, width, height):
|
153 |
+
p.inpaint_full_res = True
|
154 |
+
p.inpaint_full_res_padding = self.padding
|
155 |
+
p.width = math.ceil((self.tile_width+self.padding) / 64) * 64
|
156 |
+
p.height = math.ceil((self.tile_height+self.padding) / 64) * 64
|
157 |
+
mask = Image.new("L", (width, height), "black")
|
158 |
+
draw = ImageDraw.Draw(mask)
|
159 |
+
return mask, draw
|
160 |
+
|
161 |
+
def calc_rectangle(self, xi, yi):
|
162 |
+
x1 = xi * self.tile_width
|
163 |
+
y1 = yi * self.tile_height
|
164 |
+
x2 = xi * self.tile_width + self.tile_width
|
165 |
+
y2 = yi * self.tile_height + self.tile_height
|
166 |
+
|
167 |
+
return x1, y1, x2, y2
|
168 |
+
|
169 |
+
def linear_process(self, p, image, rows, cols):
|
170 |
+
mask, draw = self.init_draw(p, image.width, image.height)
|
171 |
+
for yi in range(rows):
|
172 |
+
for xi in range(cols):
|
173 |
+
if state.interrupted:
|
174 |
+
break
|
175 |
+
draw.rectangle(self.calc_rectangle(xi, yi), fill="white")
|
176 |
+
p.init_images = [image]
|
177 |
+
p.image_mask = mask
|
178 |
+
processed = processing.process_images(p)
|
179 |
+
draw.rectangle(self.calc_rectangle(xi, yi), fill="black")
|
180 |
+
if (len(processed.images) > 0):
|
181 |
+
image = processed.images[0]
|
182 |
+
|
183 |
+
p.width = image.width
|
184 |
+
p.height = image.height
|
185 |
+
self.initial_info = processed.infotext(p, 0)
|
186 |
+
|
187 |
+
return image
|
188 |
+
|
189 |
+
def chess_process(self, p, image, rows, cols):
|
190 |
+
mask, draw = self.init_draw(p, image.width, image.height)
|
191 |
+
tiles = []
|
192 |
+
# calc tiles colors
|
193 |
+
for yi in range(rows):
|
194 |
+
for xi in range(cols):
|
195 |
+
if state.interrupted:
|
196 |
+
break
|
197 |
+
if xi == 0:
|
198 |
+
tiles.append([])
|
199 |
+
color = xi % 2 == 0
|
200 |
+
if yi > 0 and yi % 2 != 0:
|
201 |
+
color = not color
|
202 |
+
tiles[yi].append(color)
|
203 |
+
|
204 |
+
for yi in range(len(tiles)):
|
205 |
+
for xi in range(len(tiles[yi])):
|
206 |
+
if state.interrupted:
|
207 |
+
break
|
208 |
+
if not tiles[yi][xi]:
|
209 |
+
tiles[yi][xi] = not tiles[yi][xi]
|
210 |
+
continue
|
211 |
+
tiles[yi][xi] = not tiles[yi][xi]
|
212 |
+
draw.rectangle(self.calc_rectangle(xi, yi), fill="white")
|
213 |
+
p.init_images = [image]
|
214 |
+
p.image_mask = mask
|
215 |
+
processed = processing.process_images(p)
|
216 |
+
draw.rectangle(self.calc_rectangle(xi, yi), fill="black")
|
217 |
+
if (len(processed.images) > 0):
|
218 |
+
image = processed.images[0]
|
219 |
+
|
220 |
+
for yi in range(len(tiles)):
|
221 |
+
for xi in range(len(tiles[yi])):
|
222 |
+
if state.interrupted:
|
223 |
+
break
|
224 |
+
if not tiles[yi][xi]:
|
225 |
+
continue
|
226 |
+
draw.rectangle(self.calc_rectangle(xi, yi), fill="white")
|
227 |
+
p.init_images = [image]
|
228 |
+
p.image_mask = mask
|
229 |
+
processed = processing.process_images(p)
|
230 |
+
draw.rectangle(self.calc_rectangle(xi, yi), fill="black")
|
231 |
+
if (len(processed.images) > 0):
|
232 |
+
image = processed.images[0]
|
233 |
+
|
234 |
+
p.width = image.width
|
235 |
+
p.height = image.height
|
236 |
+
self.initial_info = processed.infotext(p, 0)
|
237 |
+
|
238 |
+
return image
|
239 |
+
|
240 |
+
def start(self, p, image, rows, cols):
|
241 |
+
self.initial_info = None
|
242 |
+
if self.mode == USDUMode.LINEAR:
|
243 |
+
return self.linear_process(p, image, rows, cols)
|
244 |
+
if self.mode == USDUMode.CHESS:
|
245 |
+
return self.chess_process(p, image, rows, cols)
|
246 |
+
|
247 |
+
class USDUSeamsFix():
|
248 |
+
|
249 |
+
def init_draw(self, p):
|
250 |
+
self.initial_info = None
|
251 |
+
p.width = math.ceil((self.tile_width+self.padding) / 64) * 64
|
252 |
+
p.height = math.ceil((self.tile_height+self.padding) / 64) * 64
|
253 |
+
|
254 |
+
def half_tile_process(self, p, image, rows, cols):
|
255 |
+
|
256 |
+
self.init_draw(p)
|
257 |
+
processed = None
|
258 |
+
|
259 |
+
gradient = Image.linear_gradient("L")
|
260 |
+
row_gradient = Image.new("L", (self.tile_width, self.tile_height), "black")
|
261 |
+
row_gradient.paste(gradient.resize(
|
262 |
+
(self.tile_width, self.tile_height//2), resample=Image.BICUBIC), (0, 0))
|
263 |
+
row_gradient.paste(gradient.rotate(180).resize(
|
264 |
+
(self.tile_width, self.tile_height//2), resample=Image.BICUBIC),
|
265 |
+
(0, self.tile_height//2))
|
266 |
+
col_gradient = Image.new("L", (self.tile_width, self.tile_height), "black")
|
267 |
+
col_gradient.paste(gradient.rotate(90).resize(
|
268 |
+
(self.tile_width//2, self.tile_height), resample=Image.BICUBIC), (0, 0))
|
269 |
+
col_gradient.paste(gradient.rotate(270).resize(
|
270 |
+
(self.tile_width//2, self.tile_height), resample=Image.BICUBIC), (self.tile_width//2, 0))
|
271 |
+
|
272 |
+
p.denoising_strength = self.denoise
|
273 |
+
p.mask_blur = self.mask_blur
|
274 |
+
|
275 |
+
for yi in range(rows-1):
|
276 |
+
for xi in range(cols):
|
277 |
+
if state.interrupted:
|
278 |
+
break
|
279 |
+
p.width = self.tile_width
|
280 |
+
p.height = self.tile_height
|
281 |
+
p.inpaint_full_res = True
|
282 |
+
p.inpaint_full_res_padding = self.padding
|
283 |
+
mask = Image.new("L", (image.width, image.height), "black")
|
284 |
+
mask.paste(row_gradient, (xi*self.tile_width, yi*self.tile_height + self.tile_height//2))
|
285 |
+
|
286 |
+
p.init_images = [image]
|
287 |
+
p.image_mask = mask
|
288 |
+
processed = processing.process_images(p)
|
289 |
+
if (len(processed.images) > 0):
|
290 |
+
image = processed.images[0]
|
291 |
+
|
292 |
+
for yi in range(rows):
|
293 |
+
for xi in range(cols-1):
|
294 |
+
if state.interrupted:
|
295 |
+
break
|
296 |
+
p.width = self.tile_width
|
297 |
+
p.height = self.tile_height
|
298 |
+
p.inpaint_full_res = True
|
299 |
+
p.inpaint_full_res_padding = self.padding
|
300 |
+
mask = Image.new("L", (image.width, image.height), "black")
|
301 |
+
mask.paste(col_gradient, (xi*self.tile_width+self.tile_width//2, yi*self.tile_height))
|
302 |
+
|
303 |
+
p.init_images = [image]
|
304 |
+
p.image_mask = mask
|
305 |
+
processed = processing.process_images(p)
|
306 |
+
if (len(processed.images) > 0):
|
307 |
+
image = processed.images[0]
|
308 |
+
|
309 |
+
p.width = image.width
|
310 |
+
p.height = image.height
|
311 |
+
if processed is not None:
|
312 |
+
self.initial_info = processed.infotext(p, 0)
|
313 |
+
|
314 |
+
return image
|
315 |
+
|
316 |
+
def half_tile_process_corners(self, p, image, rows, cols):
|
317 |
+
fixed_image = self.half_tile_process(p, image, rows, cols)
|
318 |
+
processed = None
|
319 |
+
self.init_draw(p)
|
320 |
+
gradient = Image.radial_gradient("L").resize(
|
321 |
+
(self.tile_width, self.tile_height), resample=Image.BICUBIC)
|
322 |
+
gradient = ImageOps.invert(gradient)
|
323 |
+
p.denoising_strength = self.denoise
|
324 |
+
#p.mask_blur = 0
|
325 |
+
p.mask_blur = self.mask_blur
|
326 |
+
|
327 |
+
for yi in range(rows-1):
|
328 |
+
for xi in range(cols-1):
|
329 |
+
if state.interrupted:
|
330 |
+
break
|
331 |
+
p.width = self.tile_width
|
332 |
+
p.height = self.tile_height
|
333 |
+
p.inpaint_full_res = True
|
334 |
+
p.inpaint_full_res_padding = 0
|
335 |
+
mask = Image.new("L", (fixed_image.width, fixed_image.height), "black")
|
336 |
+
mask.paste(gradient, (xi*self.tile_width + self.tile_width//2,
|
337 |
+
yi*self.tile_height + self.tile_height//2))
|
338 |
+
|
339 |
+
p.init_images = [fixed_image]
|
340 |
+
p.image_mask = mask
|
341 |
+
processed = processing.process_images(p)
|
342 |
+
if (len(processed.images) > 0):
|
343 |
+
fixed_image = processed.images[0]
|
344 |
+
|
345 |
+
p.width = fixed_image.width
|
346 |
+
p.height = fixed_image.height
|
347 |
+
if processed is not None:
|
348 |
+
self.initial_info = processed.infotext(p, 0)
|
349 |
+
|
350 |
+
return fixed_image
|
351 |
+
|
352 |
+
def band_pass_process(self, p, image, cols, rows):
|
353 |
+
|
354 |
+
self.init_draw(p)
|
355 |
+
processed = None
|
356 |
+
|
357 |
+
p.denoising_strength = self.denoise
|
358 |
+
p.mask_blur = 0
|
359 |
+
|
360 |
+
gradient = Image.linear_gradient("L")
|
361 |
+
mirror_gradient = Image.new("L", (256, 256), "black")
|
362 |
+
mirror_gradient.paste(gradient.resize((256, 128), resample=Image.BICUBIC), (0, 0))
|
363 |
+
mirror_gradient.paste(gradient.rotate(180).resize((256, 128), resample=Image.BICUBIC), (0, 128))
|
364 |
+
|
365 |
+
row_gradient = mirror_gradient.resize((image.width, self.width), resample=Image.BICUBIC)
|
366 |
+
col_gradient = mirror_gradient.rotate(90).resize((self.width, image.height), resample=Image.BICUBIC)
|
367 |
+
|
368 |
+
for xi in range(1, rows):
|
369 |
+
if state.interrupted:
|
370 |
+
break
|
371 |
+
p.width = self.width + self.padding * 2
|
372 |
+
p.height = image.height
|
373 |
+
p.inpaint_full_res = True
|
374 |
+
p.inpaint_full_res_padding = self.padding
|
375 |
+
mask = Image.new("L", (image.width, image.height), "black")
|
376 |
+
mask.paste(col_gradient, (xi * self.tile_width - self.width // 2, 0))
|
377 |
+
|
378 |
+
p.init_images = [image]
|
379 |
+
p.image_mask = mask
|
380 |
+
processed = processing.process_images(p)
|
381 |
+
if (len(processed.images) > 0):
|
382 |
+
image = processed.images[0]
|
383 |
+
for yi in range(1, cols):
|
384 |
+
if state.interrupted:
|
385 |
+
break
|
386 |
+
p.width = image.width
|
387 |
+
p.height = self.width + self.padding * 2
|
388 |
+
p.inpaint_full_res = True
|
389 |
+
p.inpaint_full_res_padding = self.padding
|
390 |
+
mask = Image.new("L", (image.width, image.height), "black")
|
391 |
+
mask.paste(row_gradient, (0, yi * self.tile_height - self.width // 2))
|
392 |
+
|
393 |
+
p.init_images = [image]
|
394 |
+
p.image_mask = mask
|
395 |
+
processed = processing.process_images(p)
|
396 |
+
if (len(processed.images) > 0):
|
397 |
+
image = processed.images[0]
|
398 |
+
|
399 |
+
p.width = image.width
|
400 |
+
p.height = image.height
|
401 |
+
if processed is not None:
|
402 |
+
self.initial_info = processed.infotext(p, 0)
|
403 |
+
|
404 |
+
return image
|
405 |
+
|
406 |
+
def start(self, p, image, rows, cols):
|
407 |
+
if USDUSFMode(self.mode) == USDUSFMode.BAND_PASS:
|
408 |
+
return self.band_pass_process(p, image, rows, cols)
|
409 |
+
elif USDUSFMode(self.mode) == USDUSFMode.HALF_TILE:
|
410 |
+
return self.half_tile_process(p, image, rows, cols)
|
411 |
+
elif USDUSFMode(self.mode) == USDUSFMode.HALF_TILE_PLUS_INTERSECTIONS:
|
412 |
+
return self.half_tile_process_corners(p, image, rows, cols)
|
413 |
+
else:
|
414 |
+
return image
|
415 |
+
|
416 |
+
class Script(scripts.Script):
|
417 |
+
def title(self):
|
418 |
+
return "Ultimate SD upscale"
|
419 |
+
|
420 |
+
def show(self, is_img2img):
|
421 |
+
return is_img2img
|
422 |
+
|
423 |
+
def ui(self, is_img2img):
|
424 |
+
|
425 |
+
target_size_types = [
|
426 |
+
"From img2img2 settings",
|
427 |
+
"Custom size",
|
428 |
+
"Scale from image size"
|
429 |
+
]
|
430 |
+
|
431 |
+
seams_fix_types = [
|
432 |
+
"None",
|
433 |
+
"Band pass",
|
434 |
+
"Half tile offset pass",
|
435 |
+
"Half tile offset pass + intersections"
|
436 |
+
]
|
437 |
+
|
438 |
+
redrow_modes = [
|
439 |
+
"Linear",
|
440 |
+
"Chess",
|
441 |
+
"None"
|
442 |
+
]
|
443 |
+
|
444 |
+
info = gr.HTML(
|
445 |
+
"<p style=\"margin-bottom:0.75em\">Will upscale the image depending on the selected target size type</p>")
|
446 |
+
|
447 |
+
with gr.Row():
|
448 |
+
target_size_type = gr.Dropdown(label="Target size type", choices=[k for k in target_size_types], type="index",
|
449 |
+
value=next(iter(target_size_types)))
|
450 |
+
|
451 |
+
custom_width = gr.Slider(label='Custom width', minimum=64, maximum=8192, step=64, value=2048, visible=False, interactive=True)
|
452 |
+
custom_height = gr.Slider(label='Custom height', minimum=64, maximum=8192, step=64, value=2048, visible=False, interactive=True)
|
453 |
+
custom_scale = gr.Slider(label='Scale', minimum=1, maximum=16, step=0.01, value=2, visible=False, interactive=True)
|
454 |
+
|
455 |
+
gr.HTML("<p style=\"margin-bottom:0.75em\">Redraw options:</p>")
|
456 |
+
with gr.Row():
|
457 |
+
upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers],
|
458 |
+
value=shared.sd_upscalers[0].name, type="index")
|
459 |
+
with gr.Row():
|
460 |
+
redraw_mode = gr.Dropdown(label="Type", choices=[k for k in redrow_modes], type="index", value=next(iter(redrow_modes)))
|
461 |
+
tile_width = gr.Slider(minimum=0, maximum=2048, step=64, label='Tile width', value=512)
|
462 |
+
tile_height = gr.Slider(minimum=0, maximum=2048, step=64, label='Tile height', value=0)
|
463 |
+
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8)
|
464 |
+
padding = gr.Slider(label='Padding', minimum=0, maximum=128, step=1, value=32)
|
465 |
+
gr.HTML("<p style=\"margin-bottom:0.75em\">Seams fix:</p>")
|
466 |
+
with gr.Row():
|
467 |
+
seams_fix_type = gr.Dropdown(label="Type", choices=[k for k in seams_fix_types], type="index", value=next(iter(seams_fix_types)))
|
468 |
+
seams_fix_denoise = gr.Slider(label='Denoise', minimum=0, maximum=1, step=0.01, value=0.35, visible=False, interactive=True)
|
469 |
+
seams_fix_width = gr.Slider(label='Width', minimum=0, maximum=128, step=1, value=64, visible=False, interactive=True)
|
470 |
+
seams_fix_mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False, interactive=True)
|
471 |
+
seams_fix_padding = gr.Slider(label='Padding', minimum=0, maximum=128, step=1, value=16, visible=False, interactive=True)
|
472 |
+
gr.HTML("<p style=\"margin-bottom:0.75em\">Save options:</p>")
|
473 |
+
with gr.Row():
|
474 |
+
save_upscaled_image = gr.Checkbox(label="Upscaled", value=True)
|
475 |
+
save_seams_fix_image = gr.Checkbox(label="Seams fix", value=False)
|
476 |
+
|
477 |
+
def select_fix_type(fix_index):
|
478 |
+
all_visible = fix_index != 0
|
479 |
+
mask_blur_visible = fix_index == 2 or fix_index == 3
|
480 |
+
width_visible = fix_index == 1
|
481 |
+
|
482 |
+
return [gr.update(visible=all_visible),
|
483 |
+
gr.update(visible=width_visible),
|
484 |
+
gr.update(visible=mask_blur_visible),
|
485 |
+
gr.update(visible=all_visible)]
|
486 |
+
|
487 |
+
seams_fix_type.change(
|
488 |
+
fn=select_fix_type,
|
489 |
+
inputs=seams_fix_type,
|
490 |
+
outputs=[seams_fix_denoise, seams_fix_width, seams_fix_mask_blur, seams_fix_padding]
|
491 |
+
)
|
492 |
+
|
493 |
+
def select_scale_type(scale_index):
|
494 |
+
is_custom_size = scale_index == 1
|
495 |
+
is_custom_scale = scale_index == 2
|
496 |
+
|
497 |
+
return [gr.update(visible=is_custom_size),
|
498 |
+
gr.update(visible=is_custom_size),
|
499 |
+
gr.update(visible=is_custom_scale),
|
500 |
+
]
|
501 |
+
|
502 |
+
target_size_type.change(
|
503 |
+
fn=select_scale_type,
|
504 |
+
inputs=target_size_type,
|
505 |
+
outputs=[custom_width, custom_height, custom_scale]
|
506 |
+
)
|
507 |
+
|
508 |
+
return [info, tile_width, tile_height, mask_blur, padding, seams_fix_width, seams_fix_denoise, seams_fix_padding,
|
509 |
+
upscaler_index, save_upscaled_image, redraw_mode, save_seams_fix_image, seams_fix_mask_blur,
|
510 |
+
seams_fix_type, target_size_type, custom_width, custom_height, custom_scale]
|
511 |
+
|
512 |
+
def run(self, p, _, tile_width, tile_height, mask_blur, padding, seams_fix_width, seams_fix_denoise, seams_fix_padding,
|
513 |
+
upscaler_index, save_upscaled_image, redraw_mode, save_seams_fix_image, seams_fix_mask_blur,
|
514 |
+
seams_fix_type, target_size_type, custom_width, custom_height, custom_scale):
|
515 |
+
|
516 |
+
# Init
|
517 |
+
processing.fix_seed(p)
|
518 |
+
devices.torch_gc()
|
519 |
+
|
520 |
+
p.do_not_save_grid = True
|
521 |
+
p.do_not_save_samples = True
|
522 |
+
p.inpaint_full_res = False
|
523 |
+
|
524 |
+
p.inpainting_fill = 1
|
525 |
+
p.n_iter = 1
|
526 |
+
p.batch_size = 1
|
527 |
+
|
528 |
+
seed = p.seed
|
529 |
+
|
530 |
+
# Init image
|
531 |
+
init_img = p.init_images[0]
|
532 |
+
if init_img == None:
|
533 |
+
return Processed(p, [], seed, "Empty image")
|
534 |
+
init_img = images.flatten(init_img, opts.img2img_background_color)
|
535 |
+
|
536 |
+
#override size
|
537 |
+
if target_size_type == 1:
|
538 |
+
p.width = custom_width
|
539 |
+
p.height = custom_height
|
540 |
+
if target_size_type == 2:
|
541 |
+
p.width = math.ceil((init_img.width * custom_scale) / 64) * 64
|
542 |
+
p.height = math.ceil((init_img.height * custom_scale) / 64) * 64
|
543 |
+
|
544 |
+
# Upscaling
|
545 |
+
upscaler = USDUpscaler(p, init_img, upscaler_index, save_upscaled_image, save_seams_fix_image, tile_width, tile_height)
|
546 |
+
upscaler.upscale()
|
547 |
+
|
548 |
+
# Drawing
|
549 |
+
upscaler.setup_redraw(redraw_mode, padding, mask_blur)
|
550 |
+
upscaler.setup_seams_fix(seams_fix_padding, seams_fix_denoise, seams_fix_mask_blur, seams_fix_width, seams_fix_type)
|
551 |
+
upscaler.print_info()
|
552 |
+
upscaler.add_extra_info()
|
553 |
+
upscaler.process()
|
554 |
+
result_images = upscaler.result_images
|
555 |
+
|
556 |
+
return Processed(p, result_images, seed, upscaler.initial_info if upscaler.initial_info is not None else "")
|
557 |
+
|