Kizi-Art commited on
Commit
62d142f
1 Parent(s): e8944c6

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. modules/Roboto-Regular.ttf +0 -0
  2. modules/api/api.py +788 -0
  3. modules/api/models.py +313 -0
  4. modules/cache.py +124 -0
  5. modules/call_queue.py +118 -0
  6. modules/cmd_args.py +119 -0
  7. modules/codeformer/codeformer_arch.py +276 -0
  8. modules/codeformer/vqgan_arch.py +435 -0
  9. modules/codeformer_model.py +132 -0
  10. modules/config_states.py +199 -0
  11. modules/deepbooru.py +98 -0
  12. modules/deepbooru_model.py +678 -0
  13. modules/devices.py +153 -0
  14. modules/errors.py +136 -0
  15. modules/esrgan_model.py +229 -0
  16. modules/esrgan_model_arch.py +465 -0
  17. modules/extensions.py +165 -0
  18. modules/extra_networks.py +224 -0
  19. modules/extra_networks_hypernet.py +28 -0
  20. modules/extras.py +330 -0
  21. modules/face_restoration.py +19 -0
  22. modules/fifo_lock.py +37 -0
  23. modules/generation_parameters_copypaste.py +445 -0
  24. modules/gfpgan_model.py +110 -0
  25. modules/gitpython_hack.py +42 -0
  26. modules/gradio_extensons.py +73 -0
  27. modules/hashes.py +81 -0
  28. modules/hypernetworks/hypernetwork.py +782 -0
  29. modules/hypernetworks/ui.py +38 -0
  30. modules/images.py +778 -0
  31. modules/img2img.py +219 -0
  32. modules/import_hook.py +5 -0
  33. modules/initialize.py +168 -0
  34. modules/initialize_util.py +202 -0
  35. modules/interrogate.py +222 -0
  36. modules/launch_utils.py +449 -0
  37. modules/localization.py +34 -0
  38. modules/logging_config.py +16 -0
  39. modules/lowvram.py +147 -0
  40. modules/mac_specific.py +83 -0
  41. modules/masking.py +99 -0
  42. modules/memmon.py +92 -0
  43. modules/modelloader.py +179 -0
  44. modules/models/diffusion/ddpm_edit.py +1455 -0
  45. modules/models/diffusion/uni_pc/__init__.py +1 -0
  46. modules/models/diffusion/uni_pc/sampler.py +101 -0
  47. modules/models/diffusion/uni_pc/uni_pc.py +863 -0
  48. modules/ngrok.py +30 -0
  49. modules/options.py +245 -0
  50. modules/patches.py +64 -0
modules/Roboto-Regular.ttf ADDED
Binary file (306 kB). View file
 
modules/api/api.py ADDED
@@ -0,0 +1,788 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import os
4
+ import time
5
+ import datetime
6
+ import uvicorn
7
+ import ipaddress
8
+ import requests
9
+ import gradio as gr
10
+ from threading import Lock
11
+ from io import BytesIO
12
+ from fastapi import APIRouter, Depends, FastAPI, Request, Response
13
+ from fastapi.security import HTTPBasic, HTTPBasicCredentials
14
+ from fastapi.exceptions import HTTPException
15
+ from fastapi.responses import JSONResponse
16
+ from fastapi.encoders import jsonable_encoder
17
+ from secrets import compare_digest
18
+
19
+ import modules.shared as shared
20
+ from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors, restart, shared_items
21
+ from modules.api import models
22
+ from modules.shared import opts
23
+ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
24
+ from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
25
+ from modules.textual_inversion.preprocess import preprocess
26
+ from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
27
+ from PIL import PngImagePlugin,Image
28
+ from modules.sd_models import unload_model_weights, reload_model_weights, checkpoint_aliases
29
+ from modules.sd_models_config import find_checkpoint_config_near_filename
30
+ from modules.realesrgan_model import get_realesrgan_models
31
+ from modules import devices
32
+ from typing import Dict, List, Any
33
+ import piexif
34
+ import piexif.helper
35
+ from contextlib import closing
36
+
37
+
38
+ def script_name_to_index(name, scripts):
39
+ try:
40
+ return [script.title().lower() for script in scripts].index(name.lower())
41
+ except Exception as e:
42
+ raise HTTPException(status_code=422, detail=f"Script '{name}' not found") from e
43
+
44
+
45
+ def validate_sampler_name(name):
46
+ config = sd_samplers.all_samplers_map.get(name, None)
47
+ if config is None:
48
+ raise HTTPException(status_code=404, detail="Sampler not found")
49
+
50
+ return name
51
+
52
+
53
+ def setUpscalers(req: dict):
54
+ reqDict = vars(req)
55
+ reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None)
56
+ reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None)
57
+ return reqDict
58
+
59
+
60
+ def verify_url(url):
61
+ """Returns True if the url refers to a global resource."""
62
+
63
+ import socket
64
+ from urllib.parse import urlparse
65
+ try:
66
+ parsed_url = urlparse(url)
67
+ domain_name = parsed_url.netloc
68
+ host = socket.gethostbyname_ex(domain_name)
69
+ for ip in host[2]:
70
+ ip_addr = ipaddress.ip_address(ip)
71
+ if not ip_addr.is_global:
72
+ return False
73
+ except Exception:
74
+ return False
75
+
76
+ return True
77
+
78
+
79
+ def decode_base64_to_image(encoding):
80
+ if encoding.startswith("http://") or encoding.startswith("https://"):
81
+ if not opts.api_enable_requests:
82
+ raise HTTPException(status_code=500, detail="Requests not allowed")
83
+
84
+ if opts.api_forbid_local_requests and not verify_url(encoding):
85
+ raise HTTPException(status_code=500, detail="Request to local resource not allowed")
86
+
87
+ headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {}
88
+ response = requests.get(encoding, timeout=30, headers=headers)
89
+ try:
90
+ image = Image.open(BytesIO(response.content))
91
+ return image
92
+ except Exception as e:
93
+ raise HTTPException(status_code=500, detail="Invalid image url") from e
94
+
95
+ if encoding.startswith("data:image/"):
96
+ encoding = encoding.split(";")[1].split(",")[1]
97
+ try:
98
+ image = Image.open(BytesIO(base64.b64decode(encoding)))
99
+ return image
100
+ except Exception as e:
101
+ raise HTTPException(status_code=500, detail="Invalid encoded image") from e
102
+
103
+
104
+ def encode_pil_to_base64(image):
105
+ with io.BytesIO() as output_bytes:
106
+
107
+ if opts.samples_format.lower() == 'png':
108
+ use_metadata = False
109
+ metadata = PngImagePlugin.PngInfo()
110
+ for key, value in image.info.items():
111
+ if isinstance(key, str) and isinstance(value, str):
112
+ metadata.add_text(key, value)
113
+ use_metadata = True
114
+ image.save(output_bytes, format="PNG", pnginfo=(metadata if use_metadata else None), quality=opts.jpeg_quality)
115
+
116
+ elif opts.samples_format.lower() in ("jpg", "jpeg", "webp"):
117
+ if image.mode == "RGBA":
118
+ image = image.convert("RGB")
119
+ parameters = image.info.get('parameters', None)
120
+ exif_bytes = piexif.dump({
121
+ "Exif": { piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(parameters or "", encoding="unicode") }
122
+ })
123
+ if opts.samples_format.lower() in ("jpg", "jpeg"):
124
+ image.save(output_bytes, format="JPEG", exif = exif_bytes, quality=opts.jpeg_quality)
125
+ else:
126
+ image.save(output_bytes, format="WEBP", exif = exif_bytes, quality=opts.jpeg_quality)
127
+
128
+ else:
129
+ raise HTTPException(status_code=500, detail="Invalid image format")
130
+
131
+ bytes_data = output_bytes.getvalue()
132
+
133
+ return base64.b64encode(bytes_data)
134
+
135
+
136
+ def api_middleware(app: FastAPI):
137
+ rich_available = False
138
+ try:
139
+ if os.environ.get('WEBUI_RICH_EXCEPTIONS', None) is not None:
140
+ import anyio # importing just so it can be placed on silent list
141
+ import starlette # importing just so it can be placed on silent list
142
+ from rich.console import Console
143
+ console = Console()
144
+ rich_available = True
145
+ except Exception:
146
+ pass
147
+
148
+ @app.middleware("http")
149
+ async def log_and_time(req: Request, call_next):
150
+ ts = time.time()
151
+ res: Response = await call_next(req)
152
+ duration = str(round(time.time() - ts, 4))
153
+ res.headers["X-Process-Time"] = duration
154
+ endpoint = req.scope.get('path', 'err')
155
+ if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'):
156
+ print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format(
157
+ t=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"),
158
+ code=res.status_code,
159
+ ver=req.scope.get('http_version', '0.0'),
160
+ cli=req.scope.get('client', ('0:0.0.0', 0))[0],
161
+ prot=req.scope.get('scheme', 'err'),
162
+ method=req.scope.get('method', 'err'),
163
+ endpoint=endpoint,
164
+ duration=duration,
165
+ ))
166
+ return res
167
+
168
+ def handle_exception(request: Request, e: Exception):
169
+ err = {
170
+ "error": type(e).__name__,
171
+ "detail": vars(e).get('detail', ''),
172
+ "body": vars(e).get('body', ''),
173
+ "errors": str(e),
174
+ }
175
+ if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
176
+ message = f"API error: {request.method}: {request.url} {err}"
177
+ if rich_available:
178
+ print(message)
179
+ console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
180
+ else:
181
+ errors.report(message, exc_info=True)
182
+ return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
183
+
184
+ @app.middleware("http")
185
+ async def exception_handling(request: Request, call_next):
186
+ try:
187
+ return await call_next(request)
188
+ except Exception as e:
189
+ return handle_exception(request, e)
190
+
191
+ @app.exception_handler(Exception)
192
+ async def fastapi_exception_handler(request: Request, e: Exception):
193
+ return handle_exception(request, e)
194
+
195
+ @app.exception_handler(HTTPException)
196
+ async def http_exception_handler(request: Request, e: HTTPException):
197
+ return handle_exception(request, e)
198
+
199
+
200
+ class Api:
201
+ def __init__(self, app: FastAPI, queue_lock: Lock):
202
+ if shared.cmd_opts.api_auth:
203
+ self.credentials = {}
204
+ for auth in shared.cmd_opts.api_auth.split(","):
205
+ user, password = auth.split(":")
206
+ self.credentials[user] = password
207
+
208
+ self.router = APIRouter()
209
+ self.app = app
210
+ self.queue_lock = queue_lock
211
+ api_middleware(self.app)
212
+ self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse)
213
+ self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse)
214
+ self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse)
215
+ self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse)
216
+ self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse)
217
+ self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse)
218
+ self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
219
+ self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
220
+ self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"])
221
+ self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
222
+ self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
223
+ self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
224
+ self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
225
+ self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
226
+ self.add_api_route("/sdapi/v1/latent-upscale-modes", self.get_latent_upscale_modes, methods=["GET"], response_model=List[models.LatentUpscalerModeItem])
227
+ self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
228
+ self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem])
229
+ self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
230
+ self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
231
+ self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
232
+ self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
233
+ self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
234
+ self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
235
+ self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"])
236
+ self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
237
+ self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
238
+ self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
239
+ self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
240
+ self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
241
+ self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
242
+ self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
243
+ self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
244
+ self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
245
+ self.add_api_route("/sdapi/v1/script-info", self.get_script_info, methods=["GET"], response_model=List[models.ScriptInfo])
246
+
247
+ if shared.cmd_opts.api_server_stop:
248
+ self.add_api_route("/sdapi/v1/server-kill", self.kill_webui, methods=["POST"])
249
+ self.add_api_route("/sdapi/v1/server-restart", self.restart_webui, methods=["POST"])
250
+ self.add_api_route("/sdapi/v1/server-stop", self.stop_webui, methods=["POST"])
251
+
252
+ self.default_script_arg_txt2img = []
253
+ self.default_script_arg_img2img = []
254
+
255
+ def add_api_route(self, path: str, endpoint, **kwargs):
256
+ if shared.cmd_opts.api_auth:
257
+ return self.app.add_api_route(path, endpoint, dependencies=[Depends(self.auth)], **kwargs)
258
+ return self.app.add_api_route(path, endpoint, **kwargs)
259
+
260
+ def auth(self, credentials: HTTPBasicCredentials = Depends(HTTPBasic())):
261
+ if credentials.username in self.credentials:
262
+ if compare_digest(credentials.password, self.credentials[credentials.username]):
263
+ return True
264
+
265
+ raise HTTPException(status_code=401, detail="Incorrect username or password", headers={"WWW-Authenticate": "Basic"})
266
+
267
+ def get_selectable_script(self, script_name, script_runner):
268
+ if script_name is None or script_name == "":
269
+ return None, None
270
+
271
+ script_idx = script_name_to_index(script_name, script_runner.selectable_scripts)
272
+ script = script_runner.selectable_scripts[script_idx]
273
+ return script, script_idx
274
+
275
+ def get_scripts_list(self):
276
+ t2ilist = [script.name for script in scripts.scripts_txt2img.scripts if script.name is not None]
277
+ i2ilist = [script.name for script in scripts.scripts_img2img.scripts if script.name is not None]
278
+
279
+ return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist)
280
+
281
+ def get_script_info(self):
282
+ res = []
283
+
284
+ for script_list in [scripts.scripts_txt2img.scripts, scripts.scripts_img2img.scripts]:
285
+ res += [script.api_info for script in script_list if script.api_info is not None]
286
+
287
+ return res
288
+
289
+ def get_script(self, script_name, script_runner):
290
+ if script_name is None or script_name == "":
291
+ return None, None
292
+
293
+ script_idx = script_name_to_index(script_name, script_runner.scripts)
294
+ return script_runner.scripts[script_idx]
295
+
296
+ def init_default_script_args(self, script_runner):
297
+ #find max idx from the scripts in runner and generate a none array to init script_args
298
+ last_arg_index = 1
299
+ for script in script_runner.scripts:
300
+ if last_arg_index < script.args_to:
301
+ last_arg_index = script.args_to
302
+ # None everywhere except position 0 to initialize script args
303
+ script_args = [None]*last_arg_index
304
+ script_args[0] = 0
305
+
306
+ # get default values
307
+ with gr.Blocks(): # will throw errors calling ui function without this
308
+ for script in script_runner.scripts:
309
+ if script.ui(script.is_img2img):
310
+ ui_default_values = []
311
+ for elem in script.ui(script.is_img2img):
312
+ ui_default_values.append(elem.value)
313
+ script_args[script.args_from:script.args_to] = ui_default_values
314
+ return script_args
315
+
316
+ def init_script_args(self, request, default_script_args, selectable_scripts, selectable_idx, script_runner):
317
+ script_args = default_script_args.copy()
318
+ # position 0 in script_arg is the idx+1 of the selectable script that is going to be run when using scripts.scripts_*2img.run()
319
+ if selectable_scripts:
320
+ script_args[selectable_scripts.args_from:selectable_scripts.args_to] = request.script_args
321
+ script_args[0] = selectable_idx + 1
322
+
323
+ # Now check for always on scripts
324
+ if request.alwayson_scripts:
325
+ for alwayson_script_name in request.alwayson_scripts.keys():
326
+ alwayson_script = self.get_script(alwayson_script_name, script_runner)
327
+ if alwayson_script is None:
328
+ raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
329
+ # Selectable script in always on script param check
330
+ if alwayson_script.alwayson is False:
331
+ raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
332
+ # always on script with no arg should always run so you don't really need to add them to the requests
333
+ if "args" in request.alwayson_scripts[alwayson_script_name]:
334
+ # min between arg length in scriptrunner and arg length in the request
335
+ for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))):
336
+ script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
337
+ return script_args
338
+
339
+ def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
340
+ script_runner = scripts.scripts_txt2img
341
+ if not script_runner.scripts:
342
+ script_runner.initialize_scripts(False)
343
+ ui.create_ui()
344
+ if not self.default_script_arg_txt2img:
345
+ self.default_script_arg_txt2img = self.init_default_script_args(script_runner)
346
+ selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
347
+
348
+ populate = txt2imgreq.copy(update={ # Override __init__ params
349
+ "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
350
+ "do_not_save_samples": not txt2imgreq.save_images,
351
+ "do_not_save_grid": not txt2imgreq.save_images,
352
+ })
353
+ if populate.sampler_name:
354
+ populate.sampler_index = None # prevent a warning later on
355
+
356
+ args = vars(populate)
357
+ args.pop('script_name', None)
358
+ args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
359
+ args.pop('alwayson_scripts', None)
360
+
361
+ script_args = self.init_script_args(txt2imgreq, self.default_script_arg_txt2img, selectable_scripts, selectable_script_idx, script_runner)
362
+
363
+ send_images = args.pop('send_images', True)
364
+ args.pop('save_images', None)
365
+
366
+ with self.queue_lock:
367
+ with closing(StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **args)) as p:
368
+ p.is_api = True
369
+ p.scripts = script_runner
370
+ p.outpath_grids = opts.outdir_txt2img_grids
371
+ p.outpath_samples = opts.outdir_txt2img_samples
372
+
373
+ try:
374
+ shared.state.begin(job="scripts_txt2img")
375
+ if selectable_scripts is not None:
376
+ p.script_args = script_args
377
+ processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
378
+ else:
379
+ p.script_args = tuple(script_args) # Need to pass args as tuple here
380
+ processed = process_images(p)
381
+ finally:
382
+ shared.state.end()
383
+ shared.total_tqdm.clear()
384
+
385
+ b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
386
+
387
+ return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
388
+
389
+ def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
390
+ init_images = img2imgreq.init_images
391
+ if init_images is None:
392
+ raise HTTPException(status_code=404, detail="Init image not found")
393
+
394
+ mask = img2imgreq.mask
395
+ if mask:
396
+ mask = decode_base64_to_image(mask)
397
+
398
+ script_runner = scripts.scripts_img2img
399
+ if not script_runner.scripts:
400
+ script_runner.initialize_scripts(True)
401
+ ui.create_ui()
402
+ if not self.default_script_arg_img2img:
403
+ self.default_script_arg_img2img = self.init_default_script_args(script_runner)
404
+ selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
405
+
406
+ populate = img2imgreq.copy(update={ # Override __init__ params
407
+ "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
408
+ "do_not_save_samples": not img2imgreq.save_images,
409
+ "do_not_save_grid": not img2imgreq.save_images,
410
+ "mask": mask,
411
+ })
412
+ if populate.sampler_name:
413
+ populate.sampler_index = None # prevent a warning later on
414
+
415
+ args = vars(populate)
416
+ args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
417
+ args.pop('script_name', None)
418
+ args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
419
+ args.pop('alwayson_scripts', None)
420
+
421
+ script_args = self.init_script_args(img2imgreq, self.default_script_arg_img2img, selectable_scripts, selectable_script_idx, script_runner)
422
+
423
+ send_images = args.pop('send_images', True)
424
+ args.pop('save_images', None)
425
+
426
+ with self.queue_lock:
427
+ with closing(StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args)) as p:
428
+ p.init_images = [decode_base64_to_image(x) for x in init_images]
429
+ p.is_api = True
430
+ p.scripts = script_runner
431
+ p.outpath_grids = opts.outdir_img2img_grids
432
+ p.outpath_samples = opts.outdir_img2img_samples
433
+
434
+ try:
435
+ shared.state.begin(job="scripts_img2img")
436
+ if selectable_scripts is not None:
437
+ p.script_args = script_args
438
+ processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
439
+ else:
440
+ p.script_args = tuple(script_args) # Need to pass args as tuple here
441
+ processed = process_images(p)
442
+ finally:
443
+ shared.state.end()
444
+ shared.total_tqdm.clear()
445
+
446
+ b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
447
+
448
+ if not img2imgreq.include_init_images:
449
+ img2imgreq.init_images = None
450
+ img2imgreq.mask = None
451
+
452
+ return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
453
+
454
+ def extras_single_image_api(self, req: models.ExtrasSingleImageRequest):
455
+ reqDict = setUpscalers(req)
456
+
457
+ reqDict['image'] = decode_base64_to_image(reqDict['image'])
458
+
459
+ with self.queue_lock:
460
+ result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
461
+
462
+ return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
463
+
464
+ def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest):
465
+ reqDict = setUpscalers(req)
466
+
467
+ image_list = reqDict.pop('imageList', [])
468
+ image_folder = [decode_base64_to_image(x.data) for x in image_list]
469
+
470
+ with self.queue_lock:
471
+ result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict)
472
+
473
+ return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
474
+
475
+ def pnginfoapi(self, req: models.PNGInfoRequest):
476
+ if(not req.image.strip()):
477
+ return models.PNGInfoResponse(info="")
478
+
479
+ image = decode_base64_to_image(req.image.strip())
480
+ if image is None:
481
+ return models.PNGInfoResponse(info="")
482
+
483
+ geninfo, items = images.read_info_from_image(image)
484
+ if geninfo is None:
485
+ geninfo = ""
486
+
487
+ items = {**{'parameters': geninfo}, **items}
488
+
489
+ return models.PNGInfoResponse(info=geninfo, items=items)
490
+
491
+ def progressapi(self, req: models.ProgressRequest = Depends()):
492
+ # copy from check_progress_call of ui.py
493
+
494
+ if shared.state.job_count == 0:
495
+ return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
496
+
497
+ # avoid dividing zero
498
+ progress = 0.01
499
+
500
+ if shared.state.job_count > 0:
501
+ progress += shared.state.job_no / shared.state.job_count
502
+ if shared.state.sampling_steps > 0:
503
+ progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps
504
+
505
+ time_since_start = time.time() - shared.state.time_start
506
+ eta = (time_since_start/progress)
507
+ eta_relative = eta-time_since_start
508
+
509
+ progress = min(progress, 1)
510
+
511
+ shared.state.set_current_image()
512
+
513
+ current_image = None
514
+ if shared.state.current_image and not req.skip_current_image:
515
+ current_image = encode_pil_to_base64(shared.state.current_image)
516
+
517
+ return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
518
+
519
+ def interrogateapi(self, interrogatereq: models.InterrogateRequest):
520
+ image_b64 = interrogatereq.image
521
+ if image_b64 is None:
522
+ raise HTTPException(status_code=404, detail="Image not found")
523
+
524
+ img = decode_base64_to_image(image_b64)
525
+ img = img.convert('RGB')
526
+
527
+ # Override object param
528
+ with self.queue_lock:
529
+ if interrogatereq.model == "clip":
530
+ processed = shared.interrogator.interrogate(img)
531
+ elif interrogatereq.model == "deepdanbooru":
532
+ processed = deepbooru.model.tag(img)
533
+ else:
534
+ raise HTTPException(status_code=404, detail="Model not found")
535
+
536
+ return models.InterrogateResponse(caption=processed)
537
+
538
+ def interruptapi(self):
539
+ shared.state.interrupt()
540
+
541
+ return {}
542
+
543
+ def unloadapi(self):
544
+ unload_model_weights()
545
+
546
+ return {}
547
+
548
+ def reloadapi(self):
549
+ reload_model_weights()
550
+
551
+ return {}
552
+
553
+ def skip(self):
554
+ shared.state.skip()
555
+
556
+ def get_config(self):
557
+ options = {}
558
+ for key in shared.opts.data.keys():
559
+ metadata = shared.opts.data_labels.get(key)
560
+ if(metadata is not None):
561
+ options.update({key: shared.opts.data.get(key, shared.opts.data_labels.get(key).default)})
562
+ else:
563
+ options.update({key: shared.opts.data.get(key, None)})
564
+
565
+ return options
566
+
567
+ def set_config(self, req: Dict[str, Any]):
568
+ checkpoint_name = req.get("sd_model_checkpoint", None)
569
+ if checkpoint_name is not None and checkpoint_name not in checkpoint_aliases:
570
+ raise RuntimeError(f"model {checkpoint_name!r} not found")
571
+
572
+ for k, v in req.items():
573
+ shared.opts.set(k, v, is_api=True)
574
+
575
+ shared.opts.save(shared.config_filename)
576
+ return
577
+
578
+ def get_cmd_flags(self):
579
+ return vars(shared.cmd_opts)
580
+
581
+ def get_samplers(self):
582
+ return [{"name": sampler[0], "aliases":sampler[2], "options":sampler[3]} for sampler in sd_samplers.all_samplers]
583
+
584
+ def get_upscalers(self):
585
+ return [
586
+ {
587
+ "name": upscaler.name,
588
+ "model_name": upscaler.scaler.model_name,
589
+ "model_path": upscaler.data_path,
590
+ "model_url": None,
591
+ "scale": upscaler.scale,
592
+ }
593
+ for upscaler in shared.sd_upscalers
594
+ ]
595
+
596
+ def get_latent_upscale_modes(self):
597
+ return [
598
+ {
599
+ "name": upscale_mode,
600
+ }
601
+ for upscale_mode in [*(shared.latent_upscale_modes or {})]
602
+ ]
603
+
604
+ def get_sd_models(self):
605
+ import modules.sd_models as sd_models
606
+ return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in sd_models.checkpoints_list.values()]
607
+
608
+ def get_sd_vaes(self):
609
+ import modules.sd_vae as sd_vae
610
+ return [{"model_name": x, "filename": sd_vae.vae_dict[x]} for x in sd_vae.vae_dict.keys()]
611
+
612
+ def get_hypernetworks(self):
613
+ return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
614
+
615
+ def get_face_restorers(self):
616
+ return [{"name":x.name(), "cmd_dir": getattr(x, "cmd_dir", None)} for x in shared.face_restorers]
617
+
618
+ def get_realesrgan_models(self):
619
+ return [{"name":x.name,"path":x.data_path, "scale":x.scale} for x in get_realesrgan_models(None)]
620
+
621
+ def get_prompt_styles(self):
622
+ styleList = []
623
+ for k in shared.prompt_styles.styles:
624
+ style = shared.prompt_styles.styles[k]
625
+ styleList.append({"name":style[0], "prompt": style[1], "negative_prompt": style[2]})
626
+
627
+ return styleList
628
+
629
+ def get_embeddings(self):
630
+ db = sd_hijack.model_hijack.embedding_db
631
+
632
+ def convert_embedding(embedding):
633
+ return {
634
+ "step": embedding.step,
635
+ "sd_checkpoint": embedding.sd_checkpoint,
636
+ "sd_checkpoint_name": embedding.sd_checkpoint_name,
637
+ "shape": embedding.shape,
638
+ "vectors": embedding.vectors,
639
+ }
640
+
641
+ def convert_embeddings(embeddings):
642
+ return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()}
643
+
644
+ return {
645
+ "loaded": convert_embeddings(db.word_embeddings),
646
+ "skipped": convert_embeddings(db.skipped_embeddings),
647
+ }
648
+
649
+ def refresh_checkpoints(self):
650
+ with self.queue_lock:
651
+ shared.refresh_checkpoints()
652
+
653
+ def refresh_vae(self):
654
+ with self.queue_lock:
655
+ shared_items.refresh_vae_list()
656
+
657
+ def create_embedding(self, args: dict):
658
+ try:
659
+ shared.state.begin(job="create_embedding")
660
+ filename = create_embedding(**args) # create empty embedding
661
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
662
+ return models.CreateResponse(info=f"create embedding filename: {filename}")
663
+ except AssertionError as e:
664
+ return models.TrainResponse(info=f"create embedding error: {e}")
665
+ finally:
666
+ shared.state.end()
667
+
668
+
669
+ def create_hypernetwork(self, args: dict):
670
+ try:
671
+ shared.state.begin(job="create_hypernetwork")
672
+ filename = create_hypernetwork(**args) # create empty embedding
673
+ return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
674
+ except AssertionError as e:
675
+ return models.TrainResponse(info=f"create hypernetwork error: {e}")
676
+ finally:
677
+ shared.state.end()
678
+
679
+ def preprocess(self, args: dict):
680
+ try:
681
+ shared.state.begin(job="preprocess")
682
+ preprocess(**args) # quick operation unless blip/booru interrogation is enabled
683
+ shared.state.end()
684
+ return models.PreprocessResponse(info='preprocess complete')
685
+ except KeyError as e:
686
+ return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
687
+ except Exception as e:
688
+ return models.PreprocessResponse(info=f"preprocess error: {e}")
689
+ finally:
690
+ shared.state.end()
691
+
692
+ def train_embedding(self, args: dict):
693
+ try:
694
+ shared.state.begin(job="train_embedding")
695
+ apply_optimizations = shared.opts.training_xattention_optimizations
696
+ error = None
697
+ filename = ''
698
+ if not apply_optimizations:
699
+ sd_hijack.undo_optimizations()
700
+ try:
701
+ embedding, filename = train_embedding(**args) # can take a long time to complete
702
+ except Exception as e:
703
+ error = e
704
+ finally:
705
+ if not apply_optimizations:
706
+ sd_hijack.apply_optimizations()
707
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
708
+ except Exception as msg:
709
+ return models.TrainResponse(info=f"train embedding error: {msg}")
710
+ finally:
711
+ shared.state.end()
712
+
713
+ def train_hypernetwork(self, args: dict):
714
+ try:
715
+ shared.state.begin(job="train_hypernetwork")
716
+ shared.loaded_hypernetworks = []
717
+ apply_optimizations = shared.opts.training_xattention_optimizations
718
+ error = None
719
+ filename = ''
720
+ if not apply_optimizations:
721
+ sd_hijack.undo_optimizations()
722
+ try:
723
+ hypernetwork, filename = train_hypernetwork(**args)
724
+ except Exception as e:
725
+ error = e
726
+ finally:
727
+ shared.sd_model.cond_stage_model.to(devices.device)
728
+ shared.sd_model.first_stage_model.to(devices.device)
729
+ if not apply_optimizations:
730
+ sd_hijack.apply_optimizations()
731
+ shared.state.end()
732
+ return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
733
+ except Exception as exc:
734
+ return models.TrainResponse(info=f"train embedding error: {exc}")
735
+ finally:
736
+ shared.state.end()
737
+
738
+ def get_memory(self):
739
+ try:
740
+ import os
741
+ import psutil
742
+ process = psutil.Process(os.getpid())
743
+ res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
744
+ ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
745
+ ram = { 'free': ram_total - res.rss, 'used': res.rss, 'total': ram_total }
746
+ except Exception as err:
747
+ ram = { 'error': f'{err}' }
748
+ try:
749
+ import torch
750
+ if torch.cuda.is_available():
751
+ s = torch.cuda.mem_get_info()
752
+ system = { 'free': s[0], 'used': s[1] - s[0], 'total': s[1] }
753
+ s = dict(torch.cuda.memory_stats(shared.device))
754
+ allocated = { 'current': s['allocated_bytes.all.current'], 'peak': s['allocated_bytes.all.peak'] }
755
+ reserved = { 'current': s['reserved_bytes.all.current'], 'peak': s['reserved_bytes.all.peak'] }
756
+ active = { 'current': s['active_bytes.all.current'], 'peak': s['active_bytes.all.peak'] }
757
+ inactive = { 'current': s['inactive_split_bytes.all.current'], 'peak': s['inactive_split_bytes.all.peak'] }
758
+ warnings = { 'retries': s['num_alloc_retries'], 'oom': s['num_ooms'] }
759
+ cuda = {
760
+ 'system': system,
761
+ 'active': active,
762
+ 'allocated': allocated,
763
+ 'reserved': reserved,
764
+ 'inactive': inactive,
765
+ 'events': warnings,
766
+ }
767
+ else:
768
+ cuda = {'error': 'unavailable'}
769
+ except Exception as err:
770
+ cuda = {'error': f'{err}'}
771
+ return models.MemoryResponse(ram=ram, cuda=cuda)
772
+
773
+ def launch(self, server_name, port, root_path):
774
+ self.app.include_router(self.router)
775
+ uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path)
776
+
777
+ def kill_webui(self):
778
+ restart.stop_program()
779
+
780
+ def restart_webui(self):
781
+ if restart.is_restartable():
782
+ restart.restart_program()
783
+ return Response(status_code=501)
784
+
785
+ def stop_webui(request):
786
+ shared.state.server_command = "stop"
787
+ return Response("Stopping.")
788
+
modules/api/models.py ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+
3
+ from pydantic import BaseModel, Field, create_model
4
+ from typing import Any, Optional
5
+ from typing_extensions import Literal
6
+ from inflection import underscore
7
+ from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
8
+ from modules.shared import sd_upscalers, opts, parser
9
+ from typing import Dict, List
10
+
11
+ API_NOT_ALLOWED = [
12
+ "self",
13
+ "kwargs",
14
+ "sd_model",
15
+ "outpath_samples",
16
+ "outpath_grids",
17
+ "sampler_index",
18
+ # "do_not_save_samples",
19
+ # "do_not_save_grid",
20
+ "extra_generation_params",
21
+ "overlay_images",
22
+ "do_not_reload_embeddings",
23
+ "seed_enable_extras",
24
+ "prompt_for_display",
25
+ "sampler_noise_scheduler_override",
26
+ "ddim_discretize"
27
+ ]
28
+
29
+ class ModelDef(BaseModel):
30
+ """Assistance Class for Pydantic Dynamic Model Generation"""
31
+
32
+ field: str
33
+ field_alias: str
34
+ field_type: Any
35
+ field_value: Any
36
+ field_exclude: bool = False
37
+
38
+
39
+ class PydanticModelGenerator:
40
+ """
41
+ Takes in created classes and stubs them out in a way FastAPI/Pydantic is happy about:
42
+ source_data is a snapshot of the default values produced by the class
43
+ params are the names of the actual keys required by __init__
44
+ """
45
+
46
+ def __init__(
47
+ self,
48
+ model_name: str = None,
49
+ class_instance = None,
50
+ additional_fields = None,
51
+ ):
52
+ def field_type_generator(k, v):
53
+ field_type = v.annotation
54
+
55
+ if field_type == 'Image':
56
+ # images are sent as base64 strings via API
57
+ field_type = 'str'
58
+
59
+ return Optional[field_type]
60
+
61
+ def merge_class_params(class_):
62
+ all_classes = list(filter(lambda x: x is not object, inspect.getmro(class_)))
63
+ parameters = {}
64
+ for classes in all_classes:
65
+ parameters = {**parameters, **inspect.signature(classes.__init__).parameters}
66
+ return parameters
67
+
68
+ self._model_name = model_name
69
+ self._class_data = merge_class_params(class_instance)
70
+
71
+ self._model_def = [
72
+ ModelDef(
73
+ field=underscore(k),
74
+ field_alias=k,
75
+ field_type=field_type_generator(k, v),
76
+ field_value=None if isinstance(v.default, property) else v.default
77
+ )
78
+ for (k,v) in self._class_data.items() if k not in API_NOT_ALLOWED
79
+ ]
80
+
81
+ for fields in additional_fields:
82
+ self._model_def.append(ModelDef(
83
+ field=underscore(fields["key"]),
84
+ field_alias=fields["key"],
85
+ field_type=fields["type"],
86
+ field_value=fields["default"],
87
+ field_exclude=fields["exclude"] if "exclude" in fields else False))
88
+
89
+ def generate_model(self):
90
+ """
91
+ Creates a pydantic BaseModel
92
+ from the json and overrides provided at initialization
93
+ """
94
+ fields = {
95
+ d.field: (d.field_type, Field(default=d.field_value, alias=d.field_alias, exclude=d.field_exclude)) for d in self._model_def
96
+ }
97
+ DynamicModel = create_model(self._model_name, **fields)
98
+ DynamicModel.__config__.allow_population_by_field_name = True
99
+ DynamicModel.__config__.allow_mutation = True
100
+ return DynamicModel
101
+
102
+ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator(
103
+ "StableDiffusionProcessingTxt2Img",
104
+ StableDiffusionProcessingTxt2Img,
105
+ [
106
+ {"key": "sampler_index", "type": str, "default": "Euler"},
107
+ {"key": "script_name", "type": str, "default": None},
108
+ {"key": "script_args", "type": list, "default": []},
109
+ {"key": "send_images", "type": bool, "default": True},
110
+ {"key": "save_images", "type": bool, "default": False},
111
+ {"key": "alwayson_scripts", "type": dict, "default": {}},
112
+ ]
113
+ ).generate_model()
114
+
115
+ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator(
116
+ "StableDiffusionProcessingImg2Img",
117
+ StableDiffusionProcessingImg2Img,
118
+ [
119
+ {"key": "sampler_index", "type": str, "default": "Euler"},
120
+ {"key": "init_images", "type": list, "default": None},
121
+ {"key": "denoising_strength", "type": float, "default": 0.75},
122
+ {"key": "mask", "type": str, "default": None},
123
+ {"key": "include_init_images", "type": bool, "default": False, "exclude" : True},
124
+ {"key": "script_name", "type": str, "default": None},
125
+ {"key": "script_args", "type": list, "default": []},
126
+ {"key": "send_images", "type": bool, "default": True},
127
+ {"key": "save_images", "type": bool, "default": False},
128
+ {"key": "alwayson_scripts", "type": dict, "default": {}},
129
+ ]
130
+ ).generate_model()
131
+
132
+ class TextToImageResponse(BaseModel):
133
+ images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
134
+ parameters: dict
135
+ info: str
136
+
137
+ class ImageToImageResponse(BaseModel):
138
+ images: List[str] = Field(default=None, title="Image", description="The generated image in base64 format.")
139
+ parameters: dict
140
+ info: str
141
+
142
+ class ExtrasBaseRequest(BaseModel):
143
+ resize_mode: Literal[0, 1] = Field(default=0, title="Resize Mode", description="Sets the resize mode: 0 to upscale by upscaling_resize amount, 1 to upscale up to upscaling_resize_h x upscaling_resize_w.")
144
+ show_extras_results: bool = Field(default=True, title="Show results", description="Should the backend return the generated image?")
145
+ gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.")
146
+ codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.")
147
+ codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.")
148
+ upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.")
149
+ upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.")
150
+ upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.")
151
+ upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the chosen size?")
152
+ upscaler_1: str = Field(default="None", title="Main upscaler", description=f"The name of the main upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
153
+ upscaler_2: str = Field(default="None", title="Secondary upscaler", description=f"The name of the secondary upscaler to use, it has to be one of this list: {' , '.join([x.name for x in sd_upscalers])}")
154
+ extras_upscaler_2_visibility: float = Field(default=0, title="Secondary upscaler visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of secondary upscaler, values should be between 0 and 1.")
155
+ upscale_first: bool = Field(default=False, title="Upscale first", description="Should the upscaler run before restoring faces?")
156
+
157
+ class ExtraBaseResponse(BaseModel):
158
+ html_info: str = Field(title="HTML info", description="A series of HTML tags containing the process info.")
159
+
160
+ class ExtrasSingleImageRequest(ExtrasBaseRequest):
161
+ image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
162
+
163
+ class ExtrasSingleImageResponse(ExtraBaseResponse):
164
+ image: str = Field(default=None, title="Image", description="The generated image in base64 format.")
165
+
166
+ class FileData(BaseModel):
167
+ data: str = Field(title="File data", description="Base64 representation of the file")
168
+ name: str = Field(title="File name")
169
+
170
+ class ExtrasBatchImagesRequest(ExtrasBaseRequest):
171
+ imageList: List[FileData] = Field(title="Images", description="List of images to work on. Must be Base64 strings")
172
+
173
+ class ExtrasBatchImagesResponse(ExtraBaseResponse):
174
+ images: List[str] = Field(title="Images", description="The generated images in base64 format.")
175
+
176
+ class PNGInfoRequest(BaseModel):
177
+ image: str = Field(title="Image", description="The base64 encoded PNG image")
178
+
179
+ class PNGInfoResponse(BaseModel):
180
+ info: str = Field(title="Image info", description="A string with the parameters used to generate the image")
181
+ items: dict = Field(title="Items", description="An object containing all the info the image had")
182
+
183
+ class ProgressRequest(BaseModel):
184
+ skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization")
185
+
186
+ class ProgressResponse(BaseModel):
187
+ progress: float = Field(title="Progress", description="The progress with a range of 0 to 1")
188
+ eta_relative: float = Field(title="ETA in secs")
189
+ state: dict = Field(title="State", description="The current state snapshot")
190
+ current_image: str = Field(default=None, title="Current image", description="The current image in base64 format. opts.show_progress_every_n_steps is required for this to work.")
191
+ textinfo: str = Field(default=None, title="Info text", description="Info text used by WebUI.")
192
+
193
+ class InterrogateRequest(BaseModel):
194
+ image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
195
+ model: str = Field(default="clip", title="Model", description="The interrogate model used.")
196
+
197
+ class InterrogateResponse(BaseModel):
198
+ caption: str = Field(default=None, title="Caption", description="The generated caption for the image.")
199
+
200
+ class TrainResponse(BaseModel):
201
+ info: str = Field(title="Train info", description="Response string from train embedding or hypernetwork task.")
202
+
203
+ class CreateResponse(BaseModel):
204
+ info: str = Field(title="Create info", description="Response string from create embedding or hypernetwork task.")
205
+
206
+ class PreprocessResponse(BaseModel):
207
+ info: str = Field(title="Preprocess info", description="Response string from preprocessing task.")
208
+
209
+ fields = {}
210
+ for key, metadata in opts.data_labels.items():
211
+ value = opts.data.get(key)
212
+ optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any
213
+
214
+ if metadata is not None:
215
+ fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
216
+ else:
217
+ fields.update({key: (Optional[optType], Field())})
218
+
219
+ OptionsModel = create_model("Options", **fields)
220
+
221
+ flags = {}
222
+ _options = vars(parser)['_option_string_actions']
223
+ for key in _options:
224
+ if(_options[key].dest != 'help'):
225
+ flag = _options[key]
226
+ _type = str
227
+ if _options[key].default is not None:
228
+ _type = type(_options[key].default)
229
+ flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))})
230
+
231
+ FlagsModel = create_model("Flags", **flags)
232
+
233
+ class SamplerItem(BaseModel):
234
+ name: str = Field(title="Name")
235
+ aliases: List[str] = Field(title="Aliases")
236
+ options: Dict[str, str] = Field(title="Options")
237
+
238
+ class UpscalerItem(BaseModel):
239
+ name: str = Field(title="Name")
240
+ model_name: Optional[str] = Field(title="Model Name")
241
+ model_path: Optional[str] = Field(title="Path")
242
+ model_url: Optional[str] = Field(title="URL")
243
+ scale: Optional[float] = Field(title="Scale")
244
+
245
+ class LatentUpscalerModeItem(BaseModel):
246
+ name: str = Field(title="Name")
247
+
248
+ class SDModelItem(BaseModel):
249
+ title: str = Field(title="Title")
250
+ model_name: str = Field(title="Model Name")
251
+ hash: Optional[str] = Field(title="Short hash")
252
+ sha256: Optional[str] = Field(title="sha256 hash")
253
+ filename: str = Field(title="Filename")
254
+ config: Optional[str] = Field(title="Config file")
255
+
256
+ class SDVaeItem(BaseModel):
257
+ model_name: str = Field(title="Model Name")
258
+ filename: str = Field(title="Filename")
259
+
260
+ class HypernetworkItem(BaseModel):
261
+ name: str = Field(title="Name")
262
+ path: Optional[str] = Field(title="Path")
263
+
264
+ class FaceRestorerItem(BaseModel):
265
+ name: str = Field(title="Name")
266
+ cmd_dir: Optional[str] = Field(title="Path")
267
+
268
+ class RealesrganItem(BaseModel):
269
+ name: str = Field(title="Name")
270
+ path: Optional[str] = Field(title="Path")
271
+ scale: Optional[int] = Field(title="Scale")
272
+
273
+ class PromptStyleItem(BaseModel):
274
+ name: str = Field(title="Name")
275
+ prompt: Optional[str] = Field(title="Prompt")
276
+ negative_prompt: Optional[str] = Field(title="Negative Prompt")
277
+
278
+
279
+ class EmbeddingItem(BaseModel):
280
+ step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available")
281
+ sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available")
282
+ sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead")
283
+ shape: int = Field(title="Shape", description="The length of each individual vector in the embedding")
284
+ vectors: int = Field(title="Vectors", description="The number of vectors in the embedding")
285
+
286
+ class EmbeddingsResponse(BaseModel):
287
+ loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model")
288
+ skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)")
289
+
290
+ class MemoryResponse(BaseModel):
291
+ ram: dict = Field(title="RAM", description="System memory stats")
292
+ cuda: dict = Field(title="CUDA", description="nVidia CUDA memory stats")
293
+
294
+
295
+ class ScriptsList(BaseModel):
296
+ txt2img: list = Field(default=None, title="Txt2img", description="Titles of scripts (txt2img)")
297
+ img2img: list = Field(default=None, title="Img2img", description="Titles of scripts (img2img)")
298
+
299
+
300
+ class ScriptArg(BaseModel):
301
+ label: str = Field(default=None, title="Label", description="Name of the argument in UI")
302
+ value: Optional[Any] = Field(default=None, title="Value", description="Default value of the argument")
303
+ minimum: Optional[Any] = Field(default=None, title="Minimum", description="Minimum allowed value for the argumentin UI")
304
+ maximum: Optional[Any] = Field(default=None, title="Minimum", description="Maximum allowed value for the argumentin UI")
305
+ step: Optional[Any] = Field(default=None, title="Minimum", description="Step for changing value of the argumentin UI")
306
+ choices: Optional[List[str]] = Field(default=None, title="Choices", description="Possible values for the argument")
307
+
308
+
309
+ class ScriptInfo(BaseModel):
310
+ name: str = Field(default=None, title="Name", description="Script name")
311
+ is_alwayson: bool = Field(default=None, title="IsAlwayson", description="Flag specifying whether this script is an alwayson script")
312
+ is_img2img: bool = Field(default=None, title="IsImg2img", description="Flag specifying whether this script is an img2img script")
313
+ args: List[ScriptArg] = Field(title="Arguments", description="List of script's arguments")
modules/cache.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import os.path
4
+ import threading
5
+ import time
6
+
7
+ from modules.paths import data_path, script_path
8
+
9
+ cache_filename = os.environ.get('SD_WEBUI_CACHE_FILE', os.path.join(data_path, "cache.json"))
10
+ cache_data = None
11
+ cache_lock = threading.Lock()
12
+
13
+ dump_cache_after = None
14
+ dump_cache_thread = None
15
+
16
+
17
+ def dump_cache():
18
+ """
19
+ Marks cache for writing to disk. 5 seconds after no one else flags the cache for writing, it is written.
20
+ """
21
+
22
+ global dump_cache_after
23
+ global dump_cache_thread
24
+
25
+ def thread_func():
26
+ global dump_cache_after
27
+ global dump_cache_thread
28
+
29
+ while dump_cache_after is not None and time.time() < dump_cache_after:
30
+ time.sleep(1)
31
+
32
+ with cache_lock:
33
+ cache_filename_tmp = cache_filename + "-"
34
+ with open(cache_filename_tmp, "w", encoding="utf8") as file:
35
+ json.dump(cache_data, file, indent=4)
36
+
37
+ os.replace(cache_filename_tmp, cache_filename)
38
+
39
+ dump_cache_after = None
40
+ dump_cache_thread = None
41
+
42
+ with cache_lock:
43
+ dump_cache_after = time.time() + 5
44
+ if dump_cache_thread is None:
45
+ dump_cache_thread = threading.Thread(name='cache-writer', target=thread_func)
46
+ dump_cache_thread.start()
47
+
48
+
49
+ def cache(subsection):
50
+ """
51
+ Retrieves or initializes a cache for a specific subsection.
52
+
53
+ Parameters:
54
+ subsection (str): The subsection identifier for the cache.
55
+
56
+ Returns:
57
+ dict: The cache data for the specified subsection.
58
+ """
59
+
60
+ global cache_data
61
+
62
+ if cache_data is None:
63
+ with cache_lock:
64
+ if cache_data is None:
65
+ if not os.path.isfile(cache_filename):
66
+ cache_data = {}
67
+ else:
68
+ try:
69
+ with open(cache_filename, "r", encoding="utf8") as file:
70
+ cache_data = json.load(file)
71
+ except Exception:
72
+ os.replace(cache_filename, os.path.join(script_path, "tmp", "cache.json"))
73
+ print('[ERROR] issue occurred while trying to read cache.json, move current cache to tmp/cache.json and create new cache')
74
+ cache_data = {}
75
+
76
+ s = cache_data.get(subsection, {})
77
+ cache_data[subsection] = s
78
+
79
+ return s
80
+
81
+
82
+ def cached_data_for_file(subsection, title, filename, func):
83
+ """
84
+ Retrieves or generates data for a specific file, using a caching mechanism.
85
+
86
+ Parameters:
87
+ subsection (str): The subsection of the cache to use.
88
+ title (str): The title of the data entry in the subsection of the cache.
89
+ filename (str): The path to the file to be checked for modifications.
90
+ func (callable): A function that generates the data if it is not available in the cache.
91
+
92
+ Returns:
93
+ dict or None: The cached or generated data, or None if data generation fails.
94
+
95
+ The `cached_data_for_file` function implements a caching mechanism for data stored in files.
96
+ It checks if the data associated with the given `title` is present in the cache and compares the
97
+ modification time of the file with the cached modification time. If the file has been modified,
98
+ the cache is considered invalid and the data is regenerated using the provided `func`.
99
+ Otherwise, the cached data is returned.
100
+
101
+ If the data generation fails, None is returned to indicate the failure. Otherwise, the generated
102
+ or cached data is returned as a dictionary.
103
+ """
104
+
105
+ existing_cache = cache(subsection)
106
+ ondisk_mtime = os.path.getmtime(filename)
107
+
108
+ entry = existing_cache.get(title)
109
+ if entry:
110
+ cached_mtime = entry.get("mtime", 0)
111
+ if ondisk_mtime > cached_mtime:
112
+ entry = None
113
+
114
+ if not entry or 'value' not in entry:
115
+ value = func()
116
+ if value is None:
117
+ return None
118
+
119
+ entry = {'mtime': ondisk_mtime, 'value': value}
120
+ existing_cache[title] = entry
121
+
122
+ dump_cache()
123
+
124
+ return entry['value']
modules/call_queue.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+ import html
3
+ import time
4
+
5
+ from modules import shared, progress, errors, devices, fifo_lock
6
+
7
+ queue_lock = fifo_lock.FIFOLock()
8
+
9
+
10
+ def wrap_queued_call(func):
11
+ def f(*args, **kwargs):
12
+ with queue_lock:
13
+ res = func(*args, **kwargs)
14
+
15
+ return res
16
+
17
+ return f
18
+
19
+
20
+ def wrap_gradio_gpu_call(func, extra_outputs=None):
21
+ @wraps(func)
22
+ def f(*args, **kwargs):
23
+
24
+ # if the first argument is a string that says "task(...)", it is treated as a job id
25
+ if args and type(args[0]) == str and args[0].startswith("task(") and args[0].endswith(")"):
26
+ id_task = args[0]
27
+ progress.add_task_to_queue(id_task)
28
+ else:
29
+ id_task = None
30
+
31
+ with queue_lock:
32
+ shared.state.begin(job=id_task)
33
+ progress.start_task(id_task)
34
+
35
+ try:
36
+ res = func(*args, **kwargs)
37
+ progress.record_results(id_task, res)
38
+ finally:
39
+ progress.finish_task(id_task)
40
+
41
+ shared.state.end()
42
+
43
+ return res
44
+
45
+ return wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True)
46
+
47
+
48
+ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
49
+ @wraps(func)
50
+ def f(*args, extra_outputs_array=extra_outputs, **kwargs):
51
+ run_memmon = shared.opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats
52
+ if run_memmon:
53
+ shared.mem_mon.monitor()
54
+ t = time.perf_counter()
55
+
56
+ try:
57
+ res = list(func(*args, **kwargs))
58
+ except Exception as e:
59
+ # When printing out our debug argument list,
60
+ # do not print out more than a 100 KB of text
61
+ max_debug_str_len = 131072
62
+ message = "Error completing request"
63
+ arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
64
+ if len(arg_str) > max_debug_str_len:
65
+ arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
66
+ errors.report(f"{message}\n{arg_str}", exc_info=True)
67
+
68
+ shared.state.job = ""
69
+ shared.state.job_count = 0
70
+
71
+ if extra_outputs_array is None:
72
+ extra_outputs_array = [None, '']
73
+
74
+ error_message = f'{type(e).__name__}: {e}'
75
+ res = extra_outputs_array + [f"<div class='error'>{html.escape(error_message)}</div>"]
76
+
77
+ devices.torch_gc()
78
+
79
+ shared.state.skipped = False
80
+ shared.state.interrupted = False
81
+ shared.state.job_count = 0
82
+
83
+ if not add_stats:
84
+ return tuple(res)
85
+
86
+ elapsed = time.perf_counter() - t
87
+ elapsed_m = int(elapsed // 60)
88
+ elapsed_s = elapsed % 60
89
+ elapsed_text = f"{elapsed_s:.1f} sec."
90
+ if elapsed_m > 0:
91
+ elapsed_text = f"{elapsed_m} min. "+elapsed_text
92
+
93
+ if run_memmon:
94
+ mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
95
+ active_peak = mem_stats['active_peak']
96
+ reserved_peak = mem_stats['reserved_peak']
97
+ sys_peak = mem_stats['system_peak']
98
+ sys_total = mem_stats['total']
99
+ sys_pct = sys_peak/max(sys_total, 1) * 100
100
+
101
+ toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)"
102
+ toltip_r = "Reserved: total amout of video memory allocated by the Torch library "
103
+ toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity"
104
+
105
+ text_a = f"<abbr title='{toltip_a}'>A</abbr>: <span class='measurement'>{active_peak/1024:.2f} GB</span>"
106
+ text_r = f"<abbr title='{toltip_r}'>R</abbr>: <span class='measurement'>{reserved_peak/1024:.2f} GB</span>"
107
+ text_sys = f"<abbr title='{toltip_sys}'>Sys</abbr>: <span class='measurement'>{sys_peak/1024:.1f}/{sys_total/1024:g} GB</span> ({sys_pct:.1f}%)"
108
+
109
+ vram_html = f"<p class='vram'>{text_a}, <wbr>{text_r}, <wbr>{text_sys}</p>"
110
+ else:
111
+ vram_html = ''
112
+
113
+ # last item is always HTML
114
+ res[-1] += f"<div class='performance'><p class='time'>Time taken: <wbr><span class='measurement'>{elapsed_text}</span></p>{vram_html}</div>"
115
+
116
+ return tuple(res)
117
+
118
+ return f
modules/cmd_args.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import os
4
+ from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401
5
+
6
+ parser = argparse.ArgumentParser()
7
+
8
+ parser.add_argument("-f", action='store_true', help=argparse.SUPPRESS) # allows running as root; implemented outside of webui
9
+ parser.add_argument("--update-all-extensions", action='store_true', help="launch.py argument: download updates for all extensions when starting the program")
10
+ parser.add_argument("--skip-python-version-check", action='store_true', help="launch.py argument: do not check python version")
11
+ parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly")
12
+ parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed")
13
+ parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
14
+ parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup")
15
+ parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
16
+ parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup")
17
+ parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
18
+ parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
19
+ parser.add_argument("--dump-sysinfo", action='store_true', help="launch.py argument: dump limited sysinfo file (without information about extensions, options) to disk and quit")
20
+ parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
21
+ parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
22
+ parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
23
+ parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
24
+ parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
25
+ parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints")
26
+ parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files")
27
+ parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN'))
28
+ parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None)
29
+ parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats")
30
+ parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats")
31
+ parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
32
+ parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
33
+ parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
34
+ parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates")
35
+ parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
36
+ parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory")
37
+ parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
38
+ parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
39
+ parser.add_argument("--medvram-sdxl", action='store_true', help="enable --medvram optimization just for SDXL models")
40
+ parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
41
+ parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
42
+ parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything")
43
+ parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
44
+ parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
45
+ parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
46
+ parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
47
+ parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
48
+ parser.add_argument("--ngrok-region", type=str, help="does not do anything.", default="")
49
+ parser.add_argument("--ngrok-options", type=json.loads, help='The options to pass to ngrok in JSON format, e.g.: \'{"authtoken_from_env":true, "basic_auth":"user:password", "oauth_provider":"google", "oauth_allow_emails":"user@asdf.com"}\'', default=dict())
50
+ parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options")
51
+ parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
52
+ parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
53
+ parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
54
+ parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
55
+ parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
56
+ parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None)
57
+ parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
58
+ parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
59
+ parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)")
60
+ parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything")
61
+ parser.add_argument("--opt-split-attention", action='store_true', help="prefer Doggettx's cross-attention layer optimization for automatic choice of optimization")
62
+ parser.add_argument("--opt-sub-quad-attention", action='store_true', help="prefer memory efficient sub-quadratic cross-attention layer optimization for automatic choice of optimization")
63
+ parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024)
64
+ parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None)
65
+ parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None)
66
+ parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="prefer InvokeAI's cross-attention layer optimization for automatic choice of optimization")
67
+ parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization")
68
+ parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
69
+ parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
70
+ parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
71
+ parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
72
+ parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
73
+ parser.add_argument("--disable-model-loading-ram-optimization", action='store_true', help="disable an optimization that reduces RAM use when loading a model")
74
+ parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
75
+ parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
76
+ parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
77
+ parser.add_argument("--ui-config-file", type=str, help="filename to use for ui configuration", default=os.path.join(data_path, 'ui-config.json'))
78
+ parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide directory configuration from webui", default=False)
79
+ parser.add_argument("--freeze-settings", action='store_true', help="disable editing settings", default=False)
80
+ parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json'))
81
+ parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
82
+ parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
83
+ parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None)
84
+ parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything')
85
+ parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything")
86
+ parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path])
87
+ parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
88
+ parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv'))
89
+ parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
90
+ parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None)
91
+ parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
92
+ parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
93
+ parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
94
+ parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None)
95
+ parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
96
+ parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)")
97
+ parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
98
+ parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests")
99
+ parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui")
100
+ parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
101
+ parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
102
+ parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
103
+ parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origin(s) in the form of a comma-separated list (no spaces)", default=None)
104
+ parser.add_argument("--cors-allow-origins-regex", type=str, help="Allowed CORS origin(s) in the form of a single regular expression", default=None)
105
+ parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
106
+ parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
107
+ parser.add_argument("--disable-tls-verify", action="store_false", help="When passed, enables the use of self-signed certificates.", default=None)
108
+ parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
109
+ parser.add_argument("--gradio-queue", action='store_true', help="does not do anything", default=True)
110
+ parser.add_argument("--no-gradio-queue", action='store_true', help="Disables gradio queue; causes the webpage to use http requests instead of websockets; was the defaul in earlier versions")
111
+ parser.add_argument("--skip-version-check", action='store_true', help="Do not check versions of torch and xformers")
112
+ parser.add_argument("--no-hashing", action='store_true', help="disable sha256 hashing of checkpoints to help loading performance", default=False)
113
+ parser.add_argument("--no-download-sd-model", action='store_true', help="don't download SD1.5 model even if no model is found in --ckpt-dir", default=False)
114
+ parser.add_argument('--subpath', type=str, help='customize the subpath for gradio, use with reverse proxy')
115
+ parser.add_argument('--add-stop-route', action='store_true', help='add /_stop route to stop server')
116
+ parser.add_argument('--api-server-stop', action='store_true', help='enable server stop/restart/kill via api')
117
+ parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn')
118
+ parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False)
119
+ parser.add_argument("--disable-extra-extensions", action='store_true', help=" prevent all extensions except built-in from running regardless of any other settings", default=False)
modules/codeformer/codeformer_arch.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
2
+
3
+ import math
4
+ import torch
5
+ from torch import nn, Tensor
6
+ import torch.nn.functional as F
7
+ from typing import Optional
8
+
9
+ from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
10
+ from basicsr.utils.registry import ARCH_REGISTRY
11
+
12
+ def calc_mean_std(feat, eps=1e-5):
13
+ """Calculate mean and std for adaptive_instance_normalization.
14
+
15
+ Args:
16
+ feat (Tensor): 4D tensor.
17
+ eps (float): A small value added to the variance to avoid
18
+ divide-by-zero. Default: 1e-5.
19
+ """
20
+ size = feat.size()
21
+ assert len(size) == 4, 'The input feature should be 4D tensor.'
22
+ b, c = size[:2]
23
+ feat_var = feat.view(b, c, -1).var(dim=2) + eps
24
+ feat_std = feat_var.sqrt().view(b, c, 1, 1)
25
+ feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
26
+ return feat_mean, feat_std
27
+
28
+
29
+ def adaptive_instance_normalization(content_feat, style_feat):
30
+ """Adaptive instance normalization.
31
+
32
+ Adjust the reference features to have the similar color and illuminations
33
+ as those in the degradate features.
34
+
35
+ Args:
36
+ content_feat (Tensor): The reference feature.
37
+ style_feat (Tensor): The degradate features.
38
+ """
39
+ size = content_feat.size()
40
+ style_mean, style_std = calc_mean_std(style_feat)
41
+ content_mean, content_std = calc_mean_std(content_feat)
42
+ normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(size)
43
+ return normalized_feat * style_std.expand(size) + style_mean.expand(size)
44
+
45
+
46
+ class PositionEmbeddingSine(nn.Module):
47
+ """
48
+ This is a more standard version of the position embedding, very similar to the one
49
+ used by the Attention is all you need paper, generalized to work on images.
50
+ """
51
+
52
+ def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
53
+ super().__init__()
54
+ self.num_pos_feats = num_pos_feats
55
+ self.temperature = temperature
56
+ self.normalize = normalize
57
+ if scale is not None and normalize is False:
58
+ raise ValueError("normalize should be True if scale is passed")
59
+ if scale is None:
60
+ scale = 2 * math.pi
61
+ self.scale = scale
62
+
63
+ def forward(self, x, mask=None):
64
+ if mask is None:
65
+ mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
66
+ not_mask = ~mask
67
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
68
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
69
+ if self.normalize:
70
+ eps = 1e-6
71
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
72
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
73
+
74
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
75
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
76
+
77
+ pos_x = x_embed[:, :, :, None] / dim_t
78
+ pos_y = y_embed[:, :, :, None] / dim_t
79
+ pos_x = torch.stack(
80
+ (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
81
+ ).flatten(3)
82
+ pos_y = torch.stack(
83
+ (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
84
+ ).flatten(3)
85
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
86
+ return pos
87
+
88
+ def _get_activation_fn(activation):
89
+ """Return an activation function given a string"""
90
+ if activation == "relu":
91
+ return F.relu
92
+ if activation == "gelu":
93
+ return F.gelu
94
+ if activation == "glu":
95
+ return F.glu
96
+ raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
97
+
98
+
99
+ class TransformerSALayer(nn.Module):
100
+ def __init__(self, embed_dim, nhead=8, dim_mlp=2048, dropout=0.0, activation="gelu"):
101
+ super().__init__()
102
+ self.self_attn = nn.MultiheadAttention(embed_dim, nhead, dropout=dropout)
103
+ # Implementation of Feedforward model - MLP
104
+ self.linear1 = nn.Linear(embed_dim, dim_mlp)
105
+ self.dropout = nn.Dropout(dropout)
106
+ self.linear2 = nn.Linear(dim_mlp, embed_dim)
107
+
108
+ self.norm1 = nn.LayerNorm(embed_dim)
109
+ self.norm2 = nn.LayerNorm(embed_dim)
110
+ self.dropout1 = nn.Dropout(dropout)
111
+ self.dropout2 = nn.Dropout(dropout)
112
+
113
+ self.activation = _get_activation_fn(activation)
114
+
115
+ def with_pos_embed(self, tensor, pos: Optional[Tensor]):
116
+ return tensor if pos is None else tensor + pos
117
+
118
+ def forward(self, tgt,
119
+ tgt_mask: Optional[Tensor] = None,
120
+ tgt_key_padding_mask: Optional[Tensor] = None,
121
+ query_pos: Optional[Tensor] = None):
122
+
123
+ # self attention
124
+ tgt2 = self.norm1(tgt)
125
+ q = k = self.with_pos_embed(tgt2, query_pos)
126
+ tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
127
+ key_padding_mask=tgt_key_padding_mask)[0]
128
+ tgt = tgt + self.dropout1(tgt2)
129
+
130
+ # ffn
131
+ tgt2 = self.norm2(tgt)
132
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
133
+ tgt = tgt + self.dropout2(tgt2)
134
+ return tgt
135
+
136
+ class Fuse_sft_block(nn.Module):
137
+ def __init__(self, in_ch, out_ch):
138
+ super().__init__()
139
+ self.encode_enc = ResBlock(2*in_ch, out_ch)
140
+
141
+ self.scale = nn.Sequential(
142
+ nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
143
+ nn.LeakyReLU(0.2, True),
144
+ nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
145
+
146
+ self.shift = nn.Sequential(
147
+ nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
148
+ nn.LeakyReLU(0.2, True),
149
+ nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1))
150
+
151
+ def forward(self, enc_feat, dec_feat, w=1):
152
+ enc_feat = self.encode_enc(torch.cat([enc_feat, dec_feat], dim=1))
153
+ scale = self.scale(enc_feat)
154
+ shift = self.shift(enc_feat)
155
+ residual = w * (dec_feat * scale + shift)
156
+ out = dec_feat + residual
157
+ return out
158
+
159
+
160
+ @ARCH_REGISTRY.register()
161
+ class CodeFormer(VQAutoEncoder):
162
+ def __init__(self, dim_embd=512, n_head=8, n_layers=9,
163
+ codebook_size=1024, latent_size=256,
164
+ connect_list=('32', '64', '128', '256'),
165
+ fix_modules=('quantize', 'generator')):
166
+ super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
167
+
168
+ if fix_modules is not None:
169
+ for module in fix_modules:
170
+ for param in getattr(self, module).parameters():
171
+ param.requires_grad = False
172
+
173
+ self.connect_list = connect_list
174
+ self.n_layers = n_layers
175
+ self.dim_embd = dim_embd
176
+ self.dim_mlp = dim_embd*2
177
+
178
+ self.position_emb = nn.Parameter(torch.zeros(latent_size, self.dim_embd))
179
+ self.feat_emb = nn.Linear(256, self.dim_embd)
180
+
181
+ # transformer
182
+ self.ft_layers = nn.Sequential(*[TransformerSALayer(embed_dim=dim_embd, nhead=n_head, dim_mlp=self.dim_mlp, dropout=0.0)
183
+ for _ in range(self.n_layers)])
184
+
185
+ # logits_predict head
186
+ self.idx_pred_layer = nn.Sequential(
187
+ nn.LayerNorm(dim_embd),
188
+ nn.Linear(dim_embd, codebook_size, bias=False))
189
+
190
+ self.channels = {
191
+ '16': 512,
192
+ '32': 256,
193
+ '64': 256,
194
+ '128': 128,
195
+ '256': 128,
196
+ '512': 64,
197
+ }
198
+
199
+ # after second residual block for > 16, before attn layer for ==16
200
+ self.fuse_encoder_block = {'512':2, '256':5, '128':8, '64':11, '32':14, '16':18}
201
+ # after first residual block for > 16, before attn layer for ==16
202
+ self.fuse_generator_block = {'16':6, '32': 9, '64':12, '128':15, '256':18, '512':21}
203
+
204
+ # fuse_convs_dict
205
+ self.fuse_convs_dict = nn.ModuleDict()
206
+ for f_size in self.connect_list:
207
+ in_ch = self.channels[f_size]
208
+ self.fuse_convs_dict[f_size] = Fuse_sft_block(in_ch, in_ch)
209
+
210
+ def _init_weights(self, module):
211
+ if isinstance(module, (nn.Linear, nn.Embedding)):
212
+ module.weight.data.normal_(mean=0.0, std=0.02)
213
+ if isinstance(module, nn.Linear) and module.bias is not None:
214
+ module.bias.data.zero_()
215
+ elif isinstance(module, nn.LayerNorm):
216
+ module.bias.data.zero_()
217
+ module.weight.data.fill_(1.0)
218
+
219
+ def forward(self, x, w=0, detach_16=True, code_only=False, adain=False):
220
+ # ################### Encoder #####################
221
+ enc_feat_dict = {}
222
+ out_list = [self.fuse_encoder_block[f_size] for f_size in self.connect_list]
223
+ for i, block in enumerate(self.encoder.blocks):
224
+ x = block(x)
225
+ if i in out_list:
226
+ enc_feat_dict[str(x.shape[-1])] = x.clone()
227
+
228
+ lq_feat = x
229
+ # ################# Transformer ###################
230
+ # quant_feat, codebook_loss, quant_stats = self.quantize(lq_feat)
231
+ pos_emb = self.position_emb.unsqueeze(1).repeat(1,x.shape[0],1)
232
+ # BCHW -> BC(HW) -> (HW)BC
233
+ feat_emb = self.feat_emb(lq_feat.flatten(2).permute(2,0,1))
234
+ query_emb = feat_emb
235
+ # Transformer encoder
236
+ for layer in self.ft_layers:
237
+ query_emb = layer(query_emb, query_pos=pos_emb)
238
+
239
+ # output logits
240
+ logits = self.idx_pred_layer(query_emb) # (hw)bn
241
+ logits = logits.permute(1,0,2) # (hw)bn -> b(hw)n
242
+
243
+ if code_only: # for training stage II
244
+ # logits doesn't need softmax before cross_entropy loss
245
+ return logits, lq_feat
246
+
247
+ # ################# Quantization ###################
248
+ # if self.training:
249
+ # quant_feat = torch.einsum('btn,nc->btc', [soft_one_hot, self.quantize.embedding.weight])
250
+ # # b(hw)c -> bc(hw) -> bchw
251
+ # quant_feat = quant_feat.permute(0,2,1).view(lq_feat.shape)
252
+ # ------------
253
+ soft_one_hot = F.softmax(logits, dim=2)
254
+ _, top_idx = torch.topk(soft_one_hot, 1, dim=2)
255
+ quant_feat = self.quantize.get_codebook_feat(top_idx, shape=[x.shape[0],16,16,256])
256
+ # preserve gradients
257
+ # quant_feat = lq_feat + (quant_feat - lq_feat).detach()
258
+
259
+ if detach_16:
260
+ quant_feat = quant_feat.detach() # for training stage III
261
+ if adain:
262
+ quant_feat = adaptive_instance_normalization(quant_feat, lq_feat)
263
+
264
+ # ################## Generator ####################
265
+ x = quant_feat
266
+ fuse_list = [self.fuse_generator_block[f_size] for f_size in self.connect_list]
267
+
268
+ for i, block in enumerate(self.generator.blocks):
269
+ x = block(x)
270
+ if i in fuse_list: # fuse after i-th block
271
+ f_size = str(x.shape[-1])
272
+ if w>0:
273
+ x = self.fuse_convs_dict[f_size](enc_feat_dict[f_size].detach(), x, w)
274
+ out = x
275
+ # logits doesn't need softmax before cross_entropy loss
276
+ return out, logits, lq_feat
modules/codeformer/vqgan_arch.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
2
+
3
+ '''
4
+ VQGAN code, adapted from the original created by the Unleashing Transformers authors:
5
+ https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
6
+
7
+ '''
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from basicsr.utils import get_root_logger
12
+ from basicsr.utils.registry import ARCH_REGISTRY
13
+
14
+ def normalize(in_channels):
15
+ return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
16
+
17
+
18
+ @torch.jit.script
19
+ def swish(x):
20
+ return x*torch.sigmoid(x)
21
+
22
+
23
+ # Define VQVAE classes
24
+ class VectorQuantizer(nn.Module):
25
+ def __init__(self, codebook_size, emb_dim, beta):
26
+ super(VectorQuantizer, self).__init__()
27
+ self.codebook_size = codebook_size # number of embeddings
28
+ self.emb_dim = emb_dim # dimension of embedding
29
+ self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
30
+ self.embedding = nn.Embedding(self.codebook_size, self.emb_dim)
31
+ self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size)
32
+
33
+ def forward(self, z):
34
+ # reshape z -> (batch, height, width, channel) and flatten
35
+ z = z.permute(0, 2, 3, 1).contiguous()
36
+ z_flattened = z.view(-1, self.emb_dim)
37
+
38
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
39
+ d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \
40
+ 2 * torch.matmul(z_flattened, self.embedding.weight.t())
41
+
42
+ mean_distance = torch.mean(d)
43
+ # find closest encodings
44
+ # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
45
+ min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False)
46
+ # [0-1], higher score, higher confidence
47
+ min_encoding_scores = torch.exp(-min_encoding_scores/10)
48
+
49
+ min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z)
50
+ min_encodings.scatter_(1, min_encoding_indices, 1)
51
+
52
+ # get quantized latent vectors
53
+ z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
54
+ # compute loss for embedding
55
+ loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
56
+ # preserve gradients
57
+ z_q = z + (z_q - z).detach()
58
+
59
+ # perplexity
60
+ e_mean = torch.mean(min_encodings, dim=0)
61
+ perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
62
+ # reshape back to match original input shape
63
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
64
+
65
+ return z_q, loss, {
66
+ "perplexity": perplexity,
67
+ "min_encodings": min_encodings,
68
+ "min_encoding_indices": min_encoding_indices,
69
+ "min_encoding_scores": min_encoding_scores,
70
+ "mean_distance": mean_distance
71
+ }
72
+
73
+ def get_codebook_feat(self, indices, shape):
74
+ # input indices: batch*token_num -> (batch*token_num)*1
75
+ # shape: batch, height, width, channel
76
+ indices = indices.view(-1,1)
77
+ min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices)
78
+ min_encodings.scatter_(1, indices, 1)
79
+ # get quantized latent vectors
80
+ z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
81
+
82
+ if shape is not None: # reshape back to match original input shape
83
+ z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous()
84
+
85
+ return z_q
86
+
87
+
88
+ class GumbelQuantizer(nn.Module):
89
+ def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0):
90
+ super().__init__()
91
+ self.codebook_size = codebook_size # number of embeddings
92
+ self.emb_dim = emb_dim # dimension of embedding
93
+ self.straight_through = straight_through
94
+ self.temperature = temp_init
95
+ self.kl_weight = kl_weight
96
+ self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits
97
+ self.embed = nn.Embedding(codebook_size, emb_dim)
98
+
99
+ def forward(self, z):
100
+ hard = self.straight_through if self.training else True
101
+
102
+ logits = self.proj(z)
103
+
104
+ soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard)
105
+
106
+ z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight)
107
+
108
+ # + kl divergence to the prior loss
109
+ qy = F.softmax(logits, dim=1)
110
+ diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean()
111
+ min_encoding_indices = soft_one_hot.argmax(dim=1)
112
+
113
+ return z_q, diff, {
114
+ "min_encoding_indices": min_encoding_indices
115
+ }
116
+
117
+
118
+ class Downsample(nn.Module):
119
+ def __init__(self, in_channels):
120
+ super().__init__()
121
+ self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
122
+
123
+ def forward(self, x):
124
+ pad = (0, 1, 0, 1)
125
+ x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
126
+ x = self.conv(x)
127
+ return x
128
+
129
+
130
+ class Upsample(nn.Module):
131
+ def __init__(self, in_channels):
132
+ super().__init__()
133
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
134
+
135
+ def forward(self, x):
136
+ x = F.interpolate(x, scale_factor=2.0, mode="nearest")
137
+ x = self.conv(x)
138
+
139
+ return x
140
+
141
+
142
+ class ResBlock(nn.Module):
143
+ def __init__(self, in_channels, out_channels=None):
144
+ super(ResBlock, self).__init__()
145
+ self.in_channels = in_channels
146
+ self.out_channels = in_channels if out_channels is None else out_channels
147
+ self.norm1 = normalize(in_channels)
148
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
149
+ self.norm2 = normalize(out_channels)
150
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
151
+ if self.in_channels != self.out_channels:
152
+ self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
153
+
154
+ def forward(self, x_in):
155
+ x = x_in
156
+ x = self.norm1(x)
157
+ x = swish(x)
158
+ x = self.conv1(x)
159
+ x = self.norm2(x)
160
+ x = swish(x)
161
+ x = self.conv2(x)
162
+ if self.in_channels != self.out_channels:
163
+ x_in = self.conv_out(x_in)
164
+
165
+ return x + x_in
166
+
167
+
168
+ class AttnBlock(nn.Module):
169
+ def __init__(self, in_channels):
170
+ super().__init__()
171
+ self.in_channels = in_channels
172
+
173
+ self.norm = normalize(in_channels)
174
+ self.q = torch.nn.Conv2d(
175
+ in_channels,
176
+ in_channels,
177
+ kernel_size=1,
178
+ stride=1,
179
+ padding=0
180
+ )
181
+ self.k = torch.nn.Conv2d(
182
+ in_channels,
183
+ in_channels,
184
+ kernel_size=1,
185
+ stride=1,
186
+ padding=0
187
+ )
188
+ self.v = torch.nn.Conv2d(
189
+ in_channels,
190
+ in_channels,
191
+ kernel_size=1,
192
+ stride=1,
193
+ padding=0
194
+ )
195
+ self.proj_out = torch.nn.Conv2d(
196
+ in_channels,
197
+ in_channels,
198
+ kernel_size=1,
199
+ stride=1,
200
+ padding=0
201
+ )
202
+
203
+ def forward(self, x):
204
+ h_ = x
205
+ h_ = self.norm(h_)
206
+ q = self.q(h_)
207
+ k = self.k(h_)
208
+ v = self.v(h_)
209
+
210
+ # compute attention
211
+ b, c, h, w = q.shape
212
+ q = q.reshape(b, c, h*w)
213
+ q = q.permute(0, 2, 1)
214
+ k = k.reshape(b, c, h*w)
215
+ w_ = torch.bmm(q, k)
216
+ w_ = w_ * (int(c)**(-0.5))
217
+ w_ = F.softmax(w_, dim=2)
218
+
219
+ # attend to values
220
+ v = v.reshape(b, c, h*w)
221
+ w_ = w_.permute(0, 2, 1)
222
+ h_ = torch.bmm(v, w_)
223
+ h_ = h_.reshape(b, c, h, w)
224
+
225
+ h_ = self.proj_out(h_)
226
+
227
+ return x+h_
228
+
229
+
230
+ class Encoder(nn.Module):
231
+ def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions):
232
+ super().__init__()
233
+ self.nf = nf
234
+ self.num_resolutions = len(ch_mult)
235
+ self.num_res_blocks = num_res_blocks
236
+ self.resolution = resolution
237
+ self.attn_resolutions = attn_resolutions
238
+
239
+ curr_res = self.resolution
240
+ in_ch_mult = (1,)+tuple(ch_mult)
241
+
242
+ blocks = []
243
+ # initial convultion
244
+ blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1))
245
+
246
+ # residual and downsampling blocks, with attention on smaller res (16x16)
247
+ for i in range(self.num_resolutions):
248
+ block_in_ch = nf * in_ch_mult[i]
249
+ block_out_ch = nf * ch_mult[i]
250
+ for _ in range(self.num_res_blocks):
251
+ blocks.append(ResBlock(block_in_ch, block_out_ch))
252
+ block_in_ch = block_out_ch
253
+ if curr_res in attn_resolutions:
254
+ blocks.append(AttnBlock(block_in_ch))
255
+
256
+ if i != self.num_resolutions - 1:
257
+ blocks.append(Downsample(block_in_ch))
258
+ curr_res = curr_res // 2
259
+
260
+ # non-local attention block
261
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
262
+ blocks.append(AttnBlock(block_in_ch))
263
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
264
+
265
+ # normalise and convert to latent size
266
+ blocks.append(normalize(block_in_ch))
267
+ blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1))
268
+ self.blocks = nn.ModuleList(blocks)
269
+
270
+ def forward(self, x):
271
+ for block in self.blocks:
272
+ x = block(x)
273
+
274
+ return x
275
+
276
+
277
+ class Generator(nn.Module):
278
+ def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions):
279
+ super().__init__()
280
+ self.nf = nf
281
+ self.ch_mult = ch_mult
282
+ self.num_resolutions = len(self.ch_mult)
283
+ self.num_res_blocks = res_blocks
284
+ self.resolution = img_size
285
+ self.attn_resolutions = attn_resolutions
286
+ self.in_channels = emb_dim
287
+ self.out_channels = 3
288
+ block_in_ch = self.nf * self.ch_mult[-1]
289
+ curr_res = self.resolution // 2 ** (self.num_resolutions-1)
290
+
291
+ blocks = []
292
+ # initial conv
293
+ blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1))
294
+
295
+ # non-local attention block
296
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
297
+ blocks.append(AttnBlock(block_in_ch))
298
+ blocks.append(ResBlock(block_in_ch, block_in_ch))
299
+
300
+ for i in reversed(range(self.num_resolutions)):
301
+ block_out_ch = self.nf * self.ch_mult[i]
302
+
303
+ for _ in range(self.num_res_blocks):
304
+ blocks.append(ResBlock(block_in_ch, block_out_ch))
305
+ block_in_ch = block_out_ch
306
+
307
+ if curr_res in self.attn_resolutions:
308
+ blocks.append(AttnBlock(block_in_ch))
309
+
310
+ if i != 0:
311
+ blocks.append(Upsample(block_in_ch))
312
+ curr_res = curr_res * 2
313
+
314
+ blocks.append(normalize(block_in_ch))
315
+ blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1))
316
+
317
+ self.blocks = nn.ModuleList(blocks)
318
+
319
+
320
+ def forward(self, x):
321
+ for block in self.blocks:
322
+ x = block(x)
323
+
324
+ return x
325
+
326
+
327
+ @ARCH_REGISTRY.register()
328
+ class VQAutoEncoder(nn.Module):
329
+ def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256,
330
+ beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
331
+ super().__init__()
332
+ logger = get_root_logger()
333
+ self.in_channels = 3
334
+ self.nf = nf
335
+ self.n_blocks = res_blocks
336
+ self.codebook_size = codebook_size
337
+ self.embed_dim = emb_dim
338
+ self.ch_mult = ch_mult
339
+ self.resolution = img_size
340
+ self.attn_resolutions = attn_resolutions or [16]
341
+ self.quantizer_type = quantizer
342
+ self.encoder = Encoder(
343
+ self.in_channels,
344
+ self.nf,
345
+ self.embed_dim,
346
+ self.ch_mult,
347
+ self.n_blocks,
348
+ self.resolution,
349
+ self.attn_resolutions
350
+ )
351
+ if self.quantizer_type == "nearest":
352
+ self.beta = beta #0.25
353
+ self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta)
354
+ elif self.quantizer_type == "gumbel":
355
+ self.gumbel_num_hiddens = emb_dim
356
+ self.straight_through = gumbel_straight_through
357
+ self.kl_weight = gumbel_kl_weight
358
+ self.quantize = GumbelQuantizer(
359
+ self.codebook_size,
360
+ self.embed_dim,
361
+ self.gumbel_num_hiddens,
362
+ self.straight_through,
363
+ self.kl_weight
364
+ )
365
+ self.generator = Generator(
366
+ self.nf,
367
+ self.embed_dim,
368
+ self.ch_mult,
369
+ self.n_blocks,
370
+ self.resolution,
371
+ self.attn_resolutions
372
+ )
373
+
374
+ if model_path is not None:
375
+ chkpt = torch.load(model_path, map_location='cpu')
376
+ if 'params_ema' in chkpt:
377
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema'])
378
+ logger.info(f'vqgan is loaded from: {model_path} [params_ema]')
379
+ elif 'params' in chkpt:
380
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
381
+ logger.info(f'vqgan is loaded from: {model_path} [params]')
382
+ else:
383
+ raise ValueError('Wrong params!')
384
+
385
+
386
+ def forward(self, x):
387
+ x = self.encoder(x)
388
+ quant, codebook_loss, quant_stats = self.quantize(x)
389
+ x = self.generator(quant)
390
+ return x, codebook_loss, quant_stats
391
+
392
+
393
+
394
+ # patch based discriminator
395
+ @ARCH_REGISTRY.register()
396
+ class VQGANDiscriminator(nn.Module):
397
+ def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None):
398
+ super().__init__()
399
+
400
+ layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)]
401
+ ndf_mult = 1
402
+ ndf_mult_prev = 1
403
+ for n in range(1, n_layers): # gradually increase the number of filters
404
+ ndf_mult_prev = ndf_mult
405
+ ndf_mult = min(2 ** n, 8)
406
+ layers += [
407
+ nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False),
408
+ nn.BatchNorm2d(ndf * ndf_mult),
409
+ nn.LeakyReLU(0.2, True)
410
+ ]
411
+
412
+ ndf_mult_prev = ndf_mult
413
+ ndf_mult = min(2 ** n_layers, 8)
414
+
415
+ layers += [
416
+ nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False),
417
+ nn.BatchNorm2d(ndf * ndf_mult),
418
+ nn.LeakyReLU(0.2, True)
419
+ ]
420
+
421
+ layers += [
422
+ nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map
423
+ self.main = nn.Sequential(*layers)
424
+
425
+ if model_path is not None:
426
+ chkpt = torch.load(model_path, map_location='cpu')
427
+ if 'params_d' in chkpt:
428
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d'])
429
+ elif 'params' in chkpt:
430
+ self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
431
+ else:
432
+ raise ValueError('Wrong params!')
433
+
434
+ def forward(self, x):
435
+ return self.main(x)
modules/codeformer_model.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cv2
4
+ import torch
5
+
6
+ import modules.face_restoration
7
+ import modules.shared
8
+ from modules import shared, devices, modelloader, errors
9
+ from modules.paths import models_path
10
+
11
+ # codeformer people made a choice to include modified basicsr library to their project which makes
12
+ # it utterly impossible to use it alongside with other libraries that also use basicsr, like GFPGAN.
13
+ # I am making a choice to include some files from codeformer to work around this issue.
14
+ model_dir = "Codeformer"
15
+ model_path = os.path.join(models_path, model_dir)
16
+ model_url = 'https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth'
17
+
18
+ codeformer = None
19
+
20
+
21
+ def setup_model(dirname):
22
+ os.makedirs(model_path, exist_ok=True)
23
+
24
+ path = modules.paths.paths.get("CodeFormer", None)
25
+ if path is None:
26
+ return
27
+
28
+ try:
29
+ from torchvision.transforms.functional import normalize
30
+ from modules.codeformer.codeformer_arch import CodeFormer
31
+ from basicsr.utils import img2tensor, tensor2img
32
+ from facelib.utils.face_restoration_helper import FaceRestoreHelper
33
+ from facelib.detection.retinaface import retinaface
34
+
35
+ net_class = CodeFormer
36
+
37
+ class FaceRestorerCodeFormer(modules.face_restoration.FaceRestoration):
38
+ def name(self):
39
+ return "CodeFormer"
40
+
41
+ def __init__(self, dirname):
42
+ self.net = None
43
+ self.face_helper = None
44
+ self.cmd_dir = dirname
45
+
46
+ def create_models(self):
47
+
48
+ if self.net is not None and self.face_helper is not None:
49
+ self.net.to(devices.device_codeformer)
50
+ return self.net, self.face_helper
51
+ model_paths = modelloader.load_models(model_path, model_url, self.cmd_dir, download_name='codeformer-v0.1.0.pth', ext_filter=['.pth'])
52
+ if len(model_paths) != 0:
53
+ ckpt_path = model_paths[0]
54
+ else:
55
+ print("Unable to load codeformer model.")
56
+ return None, None
57
+ net = net_class(dim_embd=512, codebook_size=1024, n_head=8, n_layers=9, connect_list=['32', '64', '128', '256']).to(devices.device_codeformer)
58
+ checkpoint = torch.load(ckpt_path)['params_ema']
59
+ net.load_state_dict(checkpoint)
60
+ net.eval()
61
+
62
+ if hasattr(retinaface, 'device'):
63
+ retinaface.device = devices.device_codeformer
64
+ face_helper = FaceRestoreHelper(1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png', use_parse=True, device=devices.device_codeformer)
65
+
66
+ self.net = net
67
+ self.face_helper = face_helper
68
+
69
+ return net, face_helper
70
+
71
+ def send_model_to(self, device):
72
+ self.net.to(device)
73
+ self.face_helper.face_det.to(device)
74
+ self.face_helper.face_parse.to(device)
75
+
76
+ def restore(self, np_image, w=None):
77
+ np_image = np_image[:, :, ::-1]
78
+
79
+ original_resolution = np_image.shape[0:2]
80
+
81
+ self.create_models()
82
+ if self.net is None or self.face_helper is None:
83
+ return np_image
84
+
85
+ self.send_model_to(devices.device_codeformer)
86
+
87
+ self.face_helper.clean_all()
88
+ self.face_helper.read_image(np_image)
89
+ self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
90
+ self.face_helper.align_warp_face()
91
+
92
+ for cropped_face in self.face_helper.cropped_faces:
93
+ cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
94
+ normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
95
+ cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)
96
+
97
+ try:
98
+ with torch.no_grad():
99
+ output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
100
+ restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
101
+ del output
102
+ devices.torch_gc()
103
+ except Exception:
104
+ errors.report('Failed inference for CodeFormer', exc_info=True)
105
+ restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
106
+
107
+ restored_face = restored_face.astype('uint8')
108
+ self.face_helper.add_restored_face(restored_face)
109
+
110
+ self.face_helper.get_inverse_affine(None)
111
+
112
+ restored_img = self.face_helper.paste_faces_to_input_image()
113
+ restored_img = restored_img[:, :, ::-1]
114
+
115
+ if original_resolution != restored_img.shape[0:2]:
116
+ restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR)
117
+
118
+ self.face_helper.clean_all()
119
+
120
+ if shared.opts.face_restoration_unload:
121
+ self.send_model_to(devices.cpu)
122
+
123
+ return restored_img
124
+
125
+ global codeformer
126
+ codeformer = FaceRestorerCodeFormer(dirname)
127
+ shared.face_restorers.append(codeformer)
128
+
129
+ except Exception:
130
+ errors.report("Error setting up CodeFormer", exc_info=True)
131
+
132
+ # sys.path = stored_sys_path
modules/config_states.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Supports saving and restoring webui and extensions from a known working set of commits
3
+ """
4
+
5
+ import os
6
+ import json
7
+ import time
8
+ import tqdm
9
+
10
+ from datetime import datetime
11
+ import git
12
+
13
+ from modules import shared, extensions, errors
14
+ from modules.paths_internal import script_path, config_states_dir
15
+
16
+ all_config_states = {}
17
+
18
+
19
+ def list_config_states():
20
+ global all_config_states
21
+
22
+ all_config_states.clear()
23
+ os.makedirs(config_states_dir, exist_ok=True)
24
+
25
+ config_states = []
26
+ for filename in os.listdir(config_states_dir):
27
+ if filename.endswith(".json"):
28
+ path = os.path.join(config_states_dir, filename)
29
+ try:
30
+ with open(path, "r", encoding="utf-8") as f:
31
+ j = json.load(f)
32
+ assert "created_at" in j, '"created_at" does not exist'
33
+ j["filepath"] = path
34
+ config_states.append(j)
35
+ except Exception as e:
36
+ print(f'[ERROR]: Config states {path}, {e}')
37
+
38
+ config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)
39
+
40
+ for cs in config_states:
41
+ timestamp = time.asctime(time.gmtime(cs["created_at"]))
42
+ name = cs.get("name", "Config")
43
+ full_name = f"{name}: {timestamp}"
44
+ all_config_states[full_name] = cs
45
+
46
+ return all_config_states
47
+
48
+
49
+ def get_webui_config():
50
+ webui_repo = None
51
+
52
+ try:
53
+ if os.path.exists(os.path.join(script_path, ".git")):
54
+ webui_repo = git.Repo(script_path)
55
+ except Exception:
56
+ errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
57
+
58
+ webui_remote = None
59
+ webui_commit_hash = None
60
+ webui_commit_date = None
61
+ webui_branch = None
62
+ if webui_repo and not webui_repo.bare:
63
+ try:
64
+ webui_remote = next(webui_repo.remote().urls, None)
65
+ head = webui_repo.head.commit
66
+ webui_commit_date = webui_repo.head.commit.committed_date
67
+ webui_commit_hash = head.hexsha
68
+ webui_branch = webui_repo.active_branch.name
69
+
70
+ except Exception:
71
+ webui_remote = None
72
+
73
+ return {
74
+ "remote": webui_remote,
75
+ "commit_hash": webui_commit_hash,
76
+ "commit_date": webui_commit_date,
77
+ "branch": webui_branch,
78
+ }
79
+
80
+
81
+ def get_extension_config():
82
+ ext_config = {}
83
+
84
+ for ext in extensions.extensions:
85
+ ext.read_info_from_repo()
86
+
87
+ entry = {
88
+ "name": ext.name,
89
+ "path": ext.path,
90
+ "enabled": ext.enabled,
91
+ "is_builtin": ext.is_builtin,
92
+ "remote": ext.remote,
93
+ "commit_hash": ext.commit_hash,
94
+ "commit_date": ext.commit_date,
95
+ "branch": ext.branch,
96
+ "have_info_from_repo": ext.have_info_from_repo
97
+ }
98
+
99
+ ext_config[ext.name] = entry
100
+
101
+ return ext_config
102
+
103
+
104
+ def get_config():
105
+ creation_time = datetime.now().timestamp()
106
+ webui_config = get_webui_config()
107
+ ext_config = get_extension_config()
108
+
109
+ return {
110
+ "created_at": creation_time,
111
+ "webui": webui_config,
112
+ "extensions": ext_config
113
+ }
114
+
115
+
116
+ def restore_webui_config(config):
117
+ print("* Restoring webui state...")
118
+
119
+ if "webui" not in config:
120
+ print("Error: No webui data saved to config")
121
+ return
122
+
123
+ webui_config = config["webui"]
124
+
125
+ if "commit_hash" not in webui_config:
126
+ print("Error: No commit saved to webui config")
127
+ return
128
+
129
+ webui_commit_hash = webui_config.get("commit_hash", None)
130
+ webui_repo = None
131
+
132
+ try:
133
+ if os.path.exists(os.path.join(script_path, ".git")):
134
+ webui_repo = git.Repo(script_path)
135
+ except Exception:
136
+ errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
137
+ return
138
+
139
+ try:
140
+ webui_repo.git.fetch(all=True)
141
+ webui_repo.git.reset(webui_commit_hash, hard=True)
142
+ print(f"* Restored webui to commit {webui_commit_hash}.")
143
+ except Exception:
144
+ errors.report(f"Error restoring webui to commit{webui_commit_hash}")
145
+
146
+
147
+ def restore_extension_config(config):
148
+ print("* Restoring extension state...")
149
+
150
+ if "extensions" not in config:
151
+ print("Error: No extension data saved to config")
152
+ return
153
+
154
+ ext_config = config["extensions"]
155
+
156
+ results = []
157
+ disabled = []
158
+
159
+ for ext in tqdm.tqdm(extensions.extensions):
160
+ if ext.is_builtin:
161
+ continue
162
+
163
+ ext.read_info_from_repo()
164
+ current_commit = ext.commit_hash
165
+
166
+ if ext.name not in ext_config:
167
+ ext.disabled = True
168
+ disabled.append(ext.name)
169
+ results.append((ext, current_commit[:8], False, "Saved extension state not found in config, marking as disabled"))
170
+ continue
171
+
172
+ entry = ext_config[ext.name]
173
+
174
+ if "commit_hash" in entry and entry["commit_hash"]:
175
+ try:
176
+ ext.fetch_and_reset_hard(entry["commit_hash"])
177
+ ext.read_info_from_repo()
178
+ if current_commit != entry["commit_hash"]:
179
+ results.append((ext, current_commit[:8], True, entry["commit_hash"][:8]))
180
+ except Exception as ex:
181
+ results.append((ext, current_commit[:8], False, ex))
182
+ else:
183
+ results.append((ext, current_commit[:8], False, "No commit hash found in config"))
184
+
185
+ if not entry.get("enabled", False):
186
+ ext.disabled = True
187
+ disabled.append(ext.name)
188
+ else:
189
+ ext.disabled = False
190
+
191
+ shared.opts.disabled_extensions = disabled
192
+ shared.opts.save(shared.config_filename)
193
+
194
+ print("* Finished restoring extensions. Results:")
195
+ for ext, prev_commit, success, result in results:
196
+ if success:
197
+ print(f" + {ext.name}: {prev_commit} -> {result}")
198
+ else:
199
+ print(f" ! {ext.name}: FAILURE ({result})")
modules/deepbooru.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+
4
+ import torch
5
+ import numpy as np
6
+
7
+ from modules import modelloader, paths, deepbooru_model, devices, images, shared
8
+
9
+ re_special = re.compile(r'([\\()])')
10
+
11
+
12
+ class DeepDanbooru:
13
+ def __init__(self):
14
+ self.model = None
15
+
16
+ def load(self):
17
+ if self.model is not None:
18
+ return
19
+
20
+ files = modelloader.load_models(
21
+ model_path=os.path.join(paths.models_path, "torch_deepdanbooru"),
22
+ model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt',
23
+ ext_filter=[".pt"],
24
+ download_name='model-resnet_custom_v3.pt',
25
+ )
26
+
27
+ self.model = deepbooru_model.DeepDanbooruModel()
28
+ self.model.load_state_dict(torch.load(files[0], map_location="cpu"))
29
+
30
+ self.model.eval()
31
+ self.model.to(devices.cpu, devices.dtype)
32
+
33
+ def start(self):
34
+ self.load()
35
+ self.model.to(devices.device)
36
+
37
+ def stop(self):
38
+ if not shared.opts.interrogate_keep_models_in_memory:
39
+ self.model.to(devices.cpu)
40
+ devices.torch_gc()
41
+
42
+ def tag(self, pil_image):
43
+ self.start()
44
+ res = self.tag_multi(pil_image)
45
+ self.stop()
46
+
47
+ return res
48
+
49
+ def tag_multi(self, pil_image, force_disable_ranks=False):
50
+ threshold = shared.opts.interrogate_deepbooru_score_threshold
51
+ use_spaces = shared.opts.deepbooru_use_spaces
52
+ use_escape = shared.opts.deepbooru_escape
53
+ alpha_sort = shared.opts.deepbooru_sort_alpha
54
+ include_ranks = shared.opts.interrogate_return_ranks and not force_disable_ranks
55
+
56
+ pic = images.resize_image(2, pil_image.convert("RGB"), 512, 512)
57
+ a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255
58
+
59
+ with torch.no_grad(), devices.autocast():
60
+ x = torch.from_numpy(a).to(devices.device)
61
+ y = self.model(x)[0].detach().cpu().numpy()
62
+
63
+ probability_dict = {}
64
+
65
+ for tag, probability in zip(self.model.tags, y):
66
+ if probability < threshold:
67
+ continue
68
+
69
+ if tag.startswith("rating:"):
70
+ continue
71
+
72
+ probability_dict[tag] = probability
73
+
74
+ if alpha_sort:
75
+ tags = sorted(probability_dict)
76
+ else:
77
+ tags = [tag for tag, _ in sorted(probability_dict.items(), key=lambda x: -x[1])]
78
+
79
+ res = []
80
+
81
+ filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")}
82
+
83
+ for tag in [x for x in tags if x not in filtertags]:
84
+ probability = probability_dict[tag]
85
+ tag_outformat = tag
86
+ if use_spaces:
87
+ tag_outformat = tag_outformat.replace('_', ' ')
88
+ if use_escape:
89
+ tag_outformat = re.sub(re_special, r'\\\1', tag_outformat)
90
+ if include_ranks:
91
+ tag_outformat = f"({tag_outformat}:{probability:.3f})"
92
+
93
+ res.append(tag_outformat)
94
+
95
+ return ", ".join(res)
96
+
97
+
98
+ model = DeepDanbooru()
modules/deepbooru_model.py ADDED
@@ -0,0 +1,678 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ from modules import devices
6
+
7
+ # see https://github.com/AUTOMATIC1111/TorchDeepDanbooru for more
8
+
9
+
10
+ class DeepDanbooruModel(nn.Module):
11
+ def __init__(self):
12
+ super(DeepDanbooruModel, self).__init__()
13
+
14
+ self.tags = []
15
+
16
+ self.n_Conv_0 = nn.Conv2d(kernel_size=(7, 7), in_channels=3, out_channels=64, stride=(2, 2))
17
+ self.n_MaxPool_0 = nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2))
18
+ self.n_Conv_1 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
19
+ self.n_Conv_2 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=64)
20
+ self.n_Conv_3 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64)
21
+ self.n_Conv_4 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
22
+ self.n_Conv_5 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64)
23
+ self.n_Conv_6 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64)
24
+ self.n_Conv_7 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
25
+ self.n_Conv_8 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=64)
26
+ self.n_Conv_9 = nn.Conv2d(kernel_size=(3, 3), in_channels=64, out_channels=64)
27
+ self.n_Conv_10 = nn.Conv2d(kernel_size=(1, 1), in_channels=64, out_channels=256)
28
+ self.n_Conv_11 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=512, stride=(2, 2))
29
+ self.n_Conv_12 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=128)
30
+ self.n_Conv_13 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128, stride=(2, 2))
31
+ self.n_Conv_14 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
32
+ self.n_Conv_15 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
33
+ self.n_Conv_16 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
34
+ self.n_Conv_17 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
35
+ self.n_Conv_18 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
36
+ self.n_Conv_19 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
37
+ self.n_Conv_20 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
38
+ self.n_Conv_21 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
39
+ self.n_Conv_22 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
40
+ self.n_Conv_23 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
41
+ self.n_Conv_24 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
42
+ self.n_Conv_25 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
43
+ self.n_Conv_26 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
44
+ self.n_Conv_27 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
45
+ self.n_Conv_28 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
46
+ self.n_Conv_29 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
47
+ self.n_Conv_30 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
48
+ self.n_Conv_31 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
49
+ self.n_Conv_32 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
50
+ self.n_Conv_33 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=128)
51
+ self.n_Conv_34 = nn.Conv2d(kernel_size=(3, 3), in_channels=128, out_channels=128)
52
+ self.n_Conv_35 = nn.Conv2d(kernel_size=(1, 1), in_channels=128, out_channels=512)
53
+ self.n_Conv_36 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=1024, stride=(2, 2))
54
+ self.n_Conv_37 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=256)
55
+ self.n_Conv_38 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2))
56
+ self.n_Conv_39 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
57
+ self.n_Conv_40 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
58
+ self.n_Conv_41 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
59
+ self.n_Conv_42 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
60
+ self.n_Conv_43 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
61
+ self.n_Conv_44 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
62
+ self.n_Conv_45 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
63
+ self.n_Conv_46 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
64
+ self.n_Conv_47 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
65
+ self.n_Conv_48 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
66
+ self.n_Conv_49 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
67
+ self.n_Conv_50 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
68
+ self.n_Conv_51 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
69
+ self.n_Conv_52 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
70
+ self.n_Conv_53 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
71
+ self.n_Conv_54 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
72
+ self.n_Conv_55 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
73
+ self.n_Conv_56 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
74
+ self.n_Conv_57 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
75
+ self.n_Conv_58 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
76
+ self.n_Conv_59 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
77
+ self.n_Conv_60 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
78
+ self.n_Conv_61 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
79
+ self.n_Conv_62 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
80
+ self.n_Conv_63 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
81
+ self.n_Conv_64 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
82
+ self.n_Conv_65 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
83
+ self.n_Conv_66 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
84
+ self.n_Conv_67 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
85
+ self.n_Conv_68 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
86
+ self.n_Conv_69 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
87
+ self.n_Conv_70 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
88
+ self.n_Conv_71 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
89
+ self.n_Conv_72 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
90
+ self.n_Conv_73 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
91
+ self.n_Conv_74 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
92
+ self.n_Conv_75 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
93
+ self.n_Conv_76 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
94
+ self.n_Conv_77 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
95
+ self.n_Conv_78 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
96
+ self.n_Conv_79 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
97
+ self.n_Conv_80 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
98
+ self.n_Conv_81 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
99
+ self.n_Conv_82 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
100
+ self.n_Conv_83 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
101
+ self.n_Conv_84 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
102
+ self.n_Conv_85 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
103
+ self.n_Conv_86 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
104
+ self.n_Conv_87 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
105
+ self.n_Conv_88 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
106
+ self.n_Conv_89 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
107
+ self.n_Conv_90 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
108
+ self.n_Conv_91 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
109
+ self.n_Conv_92 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
110
+ self.n_Conv_93 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
111
+ self.n_Conv_94 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
112
+ self.n_Conv_95 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
113
+ self.n_Conv_96 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
114
+ self.n_Conv_97 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
115
+ self.n_Conv_98 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256, stride=(2, 2))
116
+ self.n_Conv_99 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
117
+ self.n_Conv_100 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=1024, stride=(2, 2))
118
+ self.n_Conv_101 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
119
+ self.n_Conv_102 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
120
+ self.n_Conv_103 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
121
+ self.n_Conv_104 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
122
+ self.n_Conv_105 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
123
+ self.n_Conv_106 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
124
+ self.n_Conv_107 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
125
+ self.n_Conv_108 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
126
+ self.n_Conv_109 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
127
+ self.n_Conv_110 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
128
+ self.n_Conv_111 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
129
+ self.n_Conv_112 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
130
+ self.n_Conv_113 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
131
+ self.n_Conv_114 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
132
+ self.n_Conv_115 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
133
+ self.n_Conv_116 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
134
+ self.n_Conv_117 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
135
+ self.n_Conv_118 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
136
+ self.n_Conv_119 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
137
+ self.n_Conv_120 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
138
+ self.n_Conv_121 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
139
+ self.n_Conv_122 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
140
+ self.n_Conv_123 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
141
+ self.n_Conv_124 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
142
+ self.n_Conv_125 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
143
+ self.n_Conv_126 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
144
+ self.n_Conv_127 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
145
+ self.n_Conv_128 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
146
+ self.n_Conv_129 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
147
+ self.n_Conv_130 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
148
+ self.n_Conv_131 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
149
+ self.n_Conv_132 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
150
+ self.n_Conv_133 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
151
+ self.n_Conv_134 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
152
+ self.n_Conv_135 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
153
+ self.n_Conv_136 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
154
+ self.n_Conv_137 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
155
+ self.n_Conv_138 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
156
+ self.n_Conv_139 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
157
+ self.n_Conv_140 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
158
+ self.n_Conv_141 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
159
+ self.n_Conv_142 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
160
+ self.n_Conv_143 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
161
+ self.n_Conv_144 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
162
+ self.n_Conv_145 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
163
+ self.n_Conv_146 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
164
+ self.n_Conv_147 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
165
+ self.n_Conv_148 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
166
+ self.n_Conv_149 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
167
+ self.n_Conv_150 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
168
+ self.n_Conv_151 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
169
+ self.n_Conv_152 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
170
+ self.n_Conv_153 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
171
+ self.n_Conv_154 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
172
+ self.n_Conv_155 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=256)
173
+ self.n_Conv_156 = nn.Conv2d(kernel_size=(3, 3), in_channels=256, out_channels=256)
174
+ self.n_Conv_157 = nn.Conv2d(kernel_size=(1, 1), in_channels=256, out_channels=1024)
175
+ self.n_Conv_158 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=2048, stride=(2, 2))
176
+ self.n_Conv_159 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=512)
177
+ self.n_Conv_160 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512, stride=(2, 2))
178
+ self.n_Conv_161 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048)
179
+ self.n_Conv_162 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512)
180
+ self.n_Conv_163 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512)
181
+ self.n_Conv_164 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048)
182
+ self.n_Conv_165 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=512)
183
+ self.n_Conv_166 = nn.Conv2d(kernel_size=(3, 3), in_channels=512, out_channels=512)
184
+ self.n_Conv_167 = nn.Conv2d(kernel_size=(1, 1), in_channels=512, out_channels=2048)
185
+ self.n_Conv_168 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=4096, stride=(2, 2))
186
+ self.n_Conv_169 = nn.Conv2d(kernel_size=(1, 1), in_channels=2048, out_channels=1024)
187
+ self.n_Conv_170 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024, stride=(2, 2))
188
+ self.n_Conv_171 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096)
189
+ self.n_Conv_172 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024)
190
+ self.n_Conv_173 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024)
191
+ self.n_Conv_174 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096)
192
+ self.n_Conv_175 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=1024)
193
+ self.n_Conv_176 = nn.Conv2d(kernel_size=(3, 3), in_channels=1024, out_channels=1024)
194
+ self.n_Conv_177 = nn.Conv2d(kernel_size=(1, 1), in_channels=1024, out_channels=4096)
195
+ self.n_Conv_178 = nn.Conv2d(kernel_size=(1, 1), in_channels=4096, out_channels=9176, bias=False)
196
+
197
+ def forward(self, *inputs):
198
+ t_358, = inputs
199
+ t_359 = t_358.permute(*[0, 3, 1, 2])
200
+ t_359_padded = F.pad(t_359, [2, 3, 2, 3], value=0)
201
+ t_360 = self.n_Conv_0(t_359_padded.to(self.n_Conv_0.bias.dtype) if devices.unet_needs_upcast else t_359_padded)
202
+ t_361 = F.relu(t_360)
203
+ t_361 = F.pad(t_361, [0, 1, 0, 1], value=float('-inf'))
204
+ t_362 = self.n_MaxPool_0(t_361)
205
+ t_363 = self.n_Conv_1(t_362)
206
+ t_364 = self.n_Conv_2(t_362)
207
+ t_365 = F.relu(t_364)
208
+ t_365_padded = F.pad(t_365, [1, 1, 1, 1], value=0)
209
+ t_366 = self.n_Conv_3(t_365_padded)
210
+ t_367 = F.relu(t_366)
211
+ t_368 = self.n_Conv_4(t_367)
212
+ t_369 = torch.add(t_368, t_363)
213
+ t_370 = F.relu(t_369)
214
+ t_371 = self.n_Conv_5(t_370)
215
+ t_372 = F.relu(t_371)
216
+ t_372_padded = F.pad(t_372, [1, 1, 1, 1], value=0)
217
+ t_373 = self.n_Conv_6(t_372_padded)
218
+ t_374 = F.relu(t_373)
219
+ t_375 = self.n_Conv_7(t_374)
220
+ t_376 = torch.add(t_375, t_370)
221
+ t_377 = F.relu(t_376)
222
+ t_378 = self.n_Conv_8(t_377)
223
+ t_379 = F.relu(t_378)
224
+ t_379_padded = F.pad(t_379, [1, 1, 1, 1], value=0)
225
+ t_380 = self.n_Conv_9(t_379_padded)
226
+ t_381 = F.relu(t_380)
227
+ t_382 = self.n_Conv_10(t_381)
228
+ t_383 = torch.add(t_382, t_377)
229
+ t_384 = F.relu(t_383)
230
+ t_385 = self.n_Conv_11(t_384)
231
+ t_386 = self.n_Conv_12(t_384)
232
+ t_387 = F.relu(t_386)
233
+ t_387_padded = F.pad(t_387, [0, 1, 0, 1], value=0)
234
+ t_388 = self.n_Conv_13(t_387_padded)
235
+ t_389 = F.relu(t_388)
236
+ t_390 = self.n_Conv_14(t_389)
237
+ t_391 = torch.add(t_390, t_385)
238
+ t_392 = F.relu(t_391)
239
+ t_393 = self.n_Conv_15(t_392)
240
+ t_394 = F.relu(t_393)
241
+ t_394_padded = F.pad(t_394, [1, 1, 1, 1], value=0)
242
+ t_395 = self.n_Conv_16(t_394_padded)
243
+ t_396 = F.relu(t_395)
244
+ t_397 = self.n_Conv_17(t_396)
245
+ t_398 = torch.add(t_397, t_392)
246
+ t_399 = F.relu(t_398)
247
+ t_400 = self.n_Conv_18(t_399)
248
+ t_401 = F.relu(t_400)
249
+ t_401_padded = F.pad(t_401, [1, 1, 1, 1], value=0)
250
+ t_402 = self.n_Conv_19(t_401_padded)
251
+ t_403 = F.relu(t_402)
252
+ t_404 = self.n_Conv_20(t_403)
253
+ t_405 = torch.add(t_404, t_399)
254
+ t_406 = F.relu(t_405)
255
+ t_407 = self.n_Conv_21(t_406)
256
+ t_408 = F.relu(t_407)
257
+ t_408_padded = F.pad(t_408, [1, 1, 1, 1], value=0)
258
+ t_409 = self.n_Conv_22(t_408_padded)
259
+ t_410 = F.relu(t_409)
260
+ t_411 = self.n_Conv_23(t_410)
261
+ t_412 = torch.add(t_411, t_406)
262
+ t_413 = F.relu(t_412)
263
+ t_414 = self.n_Conv_24(t_413)
264
+ t_415 = F.relu(t_414)
265
+ t_415_padded = F.pad(t_415, [1, 1, 1, 1], value=0)
266
+ t_416 = self.n_Conv_25(t_415_padded)
267
+ t_417 = F.relu(t_416)
268
+ t_418 = self.n_Conv_26(t_417)
269
+ t_419 = torch.add(t_418, t_413)
270
+ t_420 = F.relu(t_419)
271
+ t_421 = self.n_Conv_27(t_420)
272
+ t_422 = F.relu(t_421)
273
+ t_422_padded = F.pad(t_422, [1, 1, 1, 1], value=0)
274
+ t_423 = self.n_Conv_28(t_422_padded)
275
+ t_424 = F.relu(t_423)
276
+ t_425 = self.n_Conv_29(t_424)
277
+ t_426 = torch.add(t_425, t_420)
278
+ t_427 = F.relu(t_426)
279
+ t_428 = self.n_Conv_30(t_427)
280
+ t_429 = F.relu(t_428)
281
+ t_429_padded = F.pad(t_429, [1, 1, 1, 1], value=0)
282
+ t_430 = self.n_Conv_31(t_429_padded)
283
+ t_431 = F.relu(t_430)
284
+ t_432 = self.n_Conv_32(t_431)
285
+ t_433 = torch.add(t_432, t_427)
286
+ t_434 = F.relu(t_433)
287
+ t_435 = self.n_Conv_33(t_434)
288
+ t_436 = F.relu(t_435)
289
+ t_436_padded = F.pad(t_436, [1, 1, 1, 1], value=0)
290
+ t_437 = self.n_Conv_34(t_436_padded)
291
+ t_438 = F.relu(t_437)
292
+ t_439 = self.n_Conv_35(t_438)
293
+ t_440 = torch.add(t_439, t_434)
294
+ t_441 = F.relu(t_440)
295
+ t_442 = self.n_Conv_36(t_441)
296
+ t_443 = self.n_Conv_37(t_441)
297
+ t_444 = F.relu(t_443)
298
+ t_444_padded = F.pad(t_444, [0, 1, 0, 1], value=0)
299
+ t_445 = self.n_Conv_38(t_444_padded)
300
+ t_446 = F.relu(t_445)
301
+ t_447 = self.n_Conv_39(t_446)
302
+ t_448 = torch.add(t_447, t_442)
303
+ t_449 = F.relu(t_448)
304
+ t_450 = self.n_Conv_40(t_449)
305
+ t_451 = F.relu(t_450)
306
+ t_451_padded = F.pad(t_451, [1, 1, 1, 1], value=0)
307
+ t_452 = self.n_Conv_41(t_451_padded)
308
+ t_453 = F.relu(t_452)
309
+ t_454 = self.n_Conv_42(t_453)
310
+ t_455 = torch.add(t_454, t_449)
311
+ t_456 = F.relu(t_455)
312
+ t_457 = self.n_Conv_43(t_456)
313
+ t_458 = F.relu(t_457)
314
+ t_458_padded = F.pad(t_458, [1, 1, 1, 1], value=0)
315
+ t_459 = self.n_Conv_44(t_458_padded)
316
+ t_460 = F.relu(t_459)
317
+ t_461 = self.n_Conv_45(t_460)
318
+ t_462 = torch.add(t_461, t_456)
319
+ t_463 = F.relu(t_462)
320
+ t_464 = self.n_Conv_46(t_463)
321
+ t_465 = F.relu(t_464)
322
+ t_465_padded = F.pad(t_465, [1, 1, 1, 1], value=0)
323
+ t_466 = self.n_Conv_47(t_465_padded)
324
+ t_467 = F.relu(t_466)
325
+ t_468 = self.n_Conv_48(t_467)
326
+ t_469 = torch.add(t_468, t_463)
327
+ t_470 = F.relu(t_469)
328
+ t_471 = self.n_Conv_49(t_470)
329
+ t_472 = F.relu(t_471)
330
+ t_472_padded = F.pad(t_472, [1, 1, 1, 1], value=0)
331
+ t_473 = self.n_Conv_50(t_472_padded)
332
+ t_474 = F.relu(t_473)
333
+ t_475 = self.n_Conv_51(t_474)
334
+ t_476 = torch.add(t_475, t_470)
335
+ t_477 = F.relu(t_476)
336
+ t_478 = self.n_Conv_52(t_477)
337
+ t_479 = F.relu(t_478)
338
+ t_479_padded = F.pad(t_479, [1, 1, 1, 1], value=0)
339
+ t_480 = self.n_Conv_53(t_479_padded)
340
+ t_481 = F.relu(t_480)
341
+ t_482 = self.n_Conv_54(t_481)
342
+ t_483 = torch.add(t_482, t_477)
343
+ t_484 = F.relu(t_483)
344
+ t_485 = self.n_Conv_55(t_484)
345
+ t_486 = F.relu(t_485)
346
+ t_486_padded = F.pad(t_486, [1, 1, 1, 1], value=0)
347
+ t_487 = self.n_Conv_56(t_486_padded)
348
+ t_488 = F.relu(t_487)
349
+ t_489 = self.n_Conv_57(t_488)
350
+ t_490 = torch.add(t_489, t_484)
351
+ t_491 = F.relu(t_490)
352
+ t_492 = self.n_Conv_58(t_491)
353
+ t_493 = F.relu(t_492)
354
+ t_493_padded = F.pad(t_493, [1, 1, 1, 1], value=0)
355
+ t_494 = self.n_Conv_59(t_493_padded)
356
+ t_495 = F.relu(t_494)
357
+ t_496 = self.n_Conv_60(t_495)
358
+ t_497 = torch.add(t_496, t_491)
359
+ t_498 = F.relu(t_497)
360
+ t_499 = self.n_Conv_61(t_498)
361
+ t_500 = F.relu(t_499)
362
+ t_500_padded = F.pad(t_500, [1, 1, 1, 1], value=0)
363
+ t_501 = self.n_Conv_62(t_500_padded)
364
+ t_502 = F.relu(t_501)
365
+ t_503 = self.n_Conv_63(t_502)
366
+ t_504 = torch.add(t_503, t_498)
367
+ t_505 = F.relu(t_504)
368
+ t_506 = self.n_Conv_64(t_505)
369
+ t_507 = F.relu(t_506)
370
+ t_507_padded = F.pad(t_507, [1, 1, 1, 1], value=0)
371
+ t_508 = self.n_Conv_65(t_507_padded)
372
+ t_509 = F.relu(t_508)
373
+ t_510 = self.n_Conv_66(t_509)
374
+ t_511 = torch.add(t_510, t_505)
375
+ t_512 = F.relu(t_511)
376
+ t_513 = self.n_Conv_67(t_512)
377
+ t_514 = F.relu(t_513)
378
+ t_514_padded = F.pad(t_514, [1, 1, 1, 1], value=0)
379
+ t_515 = self.n_Conv_68(t_514_padded)
380
+ t_516 = F.relu(t_515)
381
+ t_517 = self.n_Conv_69(t_516)
382
+ t_518 = torch.add(t_517, t_512)
383
+ t_519 = F.relu(t_518)
384
+ t_520 = self.n_Conv_70(t_519)
385
+ t_521 = F.relu(t_520)
386
+ t_521_padded = F.pad(t_521, [1, 1, 1, 1], value=0)
387
+ t_522 = self.n_Conv_71(t_521_padded)
388
+ t_523 = F.relu(t_522)
389
+ t_524 = self.n_Conv_72(t_523)
390
+ t_525 = torch.add(t_524, t_519)
391
+ t_526 = F.relu(t_525)
392
+ t_527 = self.n_Conv_73(t_526)
393
+ t_528 = F.relu(t_527)
394
+ t_528_padded = F.pad(t_528, [1, 1, 1, 1], value=0)
395
+ t_529 = self.n_Conv_74(t_528_padded)
396
+ t_530 = F.relu(t_529)
397
+ t_531 = self.n_Conv_75(t_530)
398
+ t_532 = torch.add(t_531, t_526)
399
+ t_533 = F.relu(t_532)
400
+ t_534 = self.n_Conv_76(t_533)
401
+ t_535 = F.relu(t_534)
402
+ t_535_padded = F.pad(t_535, [1, 1, 1, 1], value=0)
403
+ t_536 = self.n_Conv_77(t_535_padded)
404
+ t_537 = F.relu(t_536)
405
+ t_538 = self.n_Conv_78(t_537)
406
+ t_539 = torch.add(t_538, t_533)
407
+ t_540 = F.relu(t_539)
408
+ t_541 = self.n_Conv_79(t_540)
409
+ t_542 = F.relu(t_541)
410
+ t_542_padded = F.pad(t_542, [1, 1, 1, 1], value=0)
411
+ t_543 = self.n_Conv_80(t_542_padded)
412
+ t_544 = F.relu(t_543)
413
+ t_545 = self.n_Conv_81(t_544)
414
+ t_546 = torch.add(t_545, t_540)
415
+ t_547 = F.relu(t_546)
416
+ t_548 = self.n_Conv_82(t_547)
417
+ t_549 = F.relu(t_548)
418
+ t_549_padded = F.pad(t_549, [1, 1, 1, 1], value=0)
419
+ t_550 = self.n_Conv_83(t_549_padded)
420
+ t_551 = F.relu(t_550)
421
+ t_552 = self.n_Conv_84(t_551)
422
+ t_553 = torch.add(t_552, t_547)
423
+ t_554 = F.relu(t_553)
424
+ t_555 = self.n_Conv_85(t_554)
425
+ t_556 = F.relu(t_555)
426
+ t_556_padded = F.pad(t_556, [1, 1, 1, 1], value=0)
427
+ t_557 = self.n_Conv_86(t_556_padded)
428
+ t_558 = F.relu(t_557)
429
+ t_559 = self.n_Conv_87(t_558)
430
+ t_560 = torch.add(t_559, t_554)
431
+ t_561 = F.relu(t_560)
432
+ t_562 = self.n_Conv_88(t_561)
433
+ t_563 = F.relu(t_562)
434
+ t_563_padded = F.pad(t_563, [1, 1, 1, 1], value=0)
435
+ t_564 = self.n_Conv_89(t_563_padded)
436
+ t_565 = F.relu(t_564)
437
+ t_566 = self.n_Conv_90(t_565)
438
+ t_567 = torch.add(t_566, t_561)
439
+ t_568 = F.relu(t_567)
440
+ t_569 = self.n_Conv_91(t_568)
441
+ t_570 = F.relu(t_569)
442
+ t_570_padded = F.pad(t_570, [1, 1, 1, 1], value=0)
443
+ t_571 = self.n_Conv_92(t_570_padded)
444
+ t_572 = F.relu(t_571)
445
+ t_573 = self.n_Conv_93(t_572)
446
+ t_574 = torch.add(t_573, t_568)
447
+ t_575 = F.relu(t_574)
448
+ t_576 = self.n_Conv_94(t_575)
449
+ t_577 = F.relu(t_576)
450
+ t_577_padded = F.pad(t_577, [1, 1, 1, 1], value=0)
451
+ t_578 = self.n_Conv_95(t_577_padded)
452
+ t_579 = F.relu(t_578)
453
+ t_580 = self.n_Conv_96(t_579)
454
+ t_581 = torch.add(t_580, t_575)
455
+ t_582 = F.relu(t_581)
456
+ t_583 = self.n_Conv_97(t_582)
457
+ t_584 = F.relu(t_583)
458
+ t_584_padded = F.pad(t_584, [0, 1, 0, 1], value=0)
459
+ t_585 = self.n_Conv_98(t_584_padded)
460
+ t_586 = F.relu(t_585)
461
+ t_587 = self.n_Conv_99(t_586)
462
+ t_588 = self.n_Conv_100(t_582)
463
+ t_589 = torch.add(t_587, t_588)
464
+ t_590 = F.relu(t_589)
465
+ t_591 = self.n_Conv_101(t_590)
466
+ t_592 = F.relu(t_591)
467
+ t_592_padded = F.pad(t_592, [1, 1, 1, 1], value=0)
468
+ t_593 = self.n_Conv_102(t_592_padded)
469
+ t_594 = F.relu(t_593)
470
+ t_595 = self.n_Conv_103(t_594)
471
+ t_596 = torch.add(t_595, t_590)
472
+ t_597 = F.relu(t_596)
473
+ t_598 = self.n_Conv_104(t_597)
474
+ t_599 = F.relu(t_598)
475
+ t_599_padded = F.pad(t_599, [1, 1, 1, 1], value=0)
476
+ t_600 = self.n_Conv_105(t_599_padded)
477
+ t_601 = F.relu(t_600)
478
+ t_602 = self.n_Conv_106(t_601)
479
+ t_603 = torch.add(t_602, t_597)
480
+ t_604 = F.relu(t_603)
481
+ t_605 = self.n_Conv_107(t_604)
482
+ t_606 = F.relu(t_605)
483
+ t_606_padded = F.pad(t_606, [1, 1, 1, 1], value=0)
484
+ t_607 = self.n_Conv_108(t_606_padded)
485
+ t_608 = F.relu(t_607)
486
+ t_609 = self.n_Conv_109(t_608)
487
+ t_610 = torch.add(t_609, t_604)
488
+ t_611 = F.relu(t_610)
489
+ t_612 = self.n_Conv_110(t_611)
490
+ t_613 = F.relu(t_612)
491
+ t_613_padded = F.pad(t_613, [1, 1, 1, 1], value=0)
492
+ t_614 = self.n_Conv_111(t_613_padded)
493
+ t_615 = F.relu(t_614)
494
+ t_616 = self.n_Conv_112(t_615)
495
+ t_617 = torch.add(t_616, t_611)
496
+ t_618 = F.relu(t_617)
497
+ t_619 = self.n_Conv_113(t_618)
498
+ t_620 = F.relu(t_619)
499
+ t_620_padded = F.pad(t_620, [1, 1, 1, 1], value=0)
500
+ t_621 = self.n_Conv_114(t_620_padded)
501
+ t_622 = F.relu(t_621)
502
+ t_623 = self.n_Conv_115(t_622)
503
+ t_624 = torch.add(t_623, t_618)
504
+ t_625 = F.relu(t_624)
505
+ t_626 = self.n_Conv_116(t_625)
506
+ t_627 = F.relu(t_626)
507
+ t_627_padded = F.pad(t_627, [1, 1, 1, 1], value=0)
508
+ t_628 = self.n_Conv_117(t_627_padded)
509
+ t_629 = F.relu(t_628)
510
+ t_630 = self.n_Conv_118(t_629)
511
+ t_631 = torch.add(t_630, t_625)
512
+ t_632 = F.relu(t_631)
513
+ t_633 = self.n_Conv_119(t_632)
514
+ t_634 = F.relu(t_633)
515
+ t_634_padded = F.pad(t_634, [1, 1, 1, 1], value=0)
516
+ t_635 = self.n_Conv_120(t_634_padded)
517
+ t_636 = F.relu(t_635)
518
+ t_637 = self.n_Conv_121(t_636)
519
+ t_638 = torch.add(t_637, t_632)
520
+ t_639 = F.relu(t_638)
521
+ t_640 = self.n_Conv_122(t_639)
522
+ t_641 = F.relu(t_640)
523
+ t_641_padded = F.pad(t_641, [1, 1, 1, 1], value=0)
524
+ t_642 = self.n_Conv_123(t_641_padded)
525
+ t_643 = F.relu(t_642)
526
+ t_644 = self.n_Conv_124(t_643)
527
+ t_645 = torch.add(t_644, t_639)
528
+ t_646 = F.relu(t_645)
529
+ t_647 = self.n_Conv_125(t_646)
530
+ t_648 = F.relu(t_647)
531
+ t_648_padded = F.pad(t_648, [1, 1, 1, 1], value=0)
532
+ t_649 = self.n_Conv_126(t_648_padded)
533
+ t_650 = F.relu(t_649)
534
+ t_651 = self.n_Conv_127(t_650)
535
+ t_652 = torch.add(t_651, t_646)
536
+ t_653 = F.relu(t_652)
537
+ t_654 = self.n_Conv_128(t_653)
538
+ t_655 = F.relu(t_654)
539
+ t_655_padded = F.pad(t_655, [1, 1, 1, 1], value=0)
540
+ t_656 = self.n_Conv_129(t_655_padded)
541
+ t_657 = F.relu(t_656)
542
+ t_658 = self.n_Conv_130(t_657)
543
+ t_659 = torch.add(t_658, t_653)
544
+ t_660 = F.relu(t_659)
545
+ t_661 = self.n_Conv_131(t_660)
546
+ t_662 = F.relu(t_661)
547
+ t_662_padded = F.pad(t_662, [1, 1, 1, 1], value=0)
548
+ t_663 = self.n_Conv_132(t_662_padded)
549
+ t_664 = F.relu(t_663)
550
+ t_665 = self.n_Conv_133(t_664)
551
+ t_666 = torch.add(t_665, t_660)
552
+ t_667 = F.relu(t_666)
553
+ t_668 = self.n_Conv_134(t_667)
554
+ t_669 = F.relu(t_668)
555
+ t_669_padded = F.pad(t_669, [1, 1, 1, 1], value=0)
556
+ t_670 = self.n_Conv_135(t_669_padded)
557
+ t_671 = F.relu(t_670)
558
+ t_672 = self.n_Conv_136(t_671)
559
+ t_673 = torch.add(t_672, t_667)
560
+ t_674 = F.relu(t_673)
561
+ t_675 = self.n_Conv_137(t_674)
562
+ t_676 = F.relu(t_675)
563
+ t_676_padded = F.pad(t_676, [1, 1, 1, 1], value=0)
564
+ t_677 = self.n_Conv_138(t_676_padded)
565
+ t_678 = F.relu(t_677)
566
+ t_679 = self.n_Conv_139(t_678)
567
+ t_680 = torch.add(t_679, t_674)
568
+ t_681 = F.relu(t_680)
569
+ t_682 = self.n_Conv_140(t_681)
570
+ t_683 = F.relu(t_682)
571
+ t_683_padded = F.pad(t_683, [1, 1, 1, 1], value=0)
572
+ t_684 = self.n_Conv_141(t_683_padded)
573
+ t_685 = F.relu(t_684)
574
+ t_686 = self.n_Conv_142(t_685)
575
+ t_687 = torch.add(t_686, t_681)
576
+ t_688 = F.relu(t_687)
577
+ t_689 = self.n_Conv_143(t_688)
578
+ t_690 = F.relu(t_689)
579
+ t_690_padded = F.pad(t_690, [1, 1, 1, 1], value=0)
580
+ t_691 = self.n_Conv_144(t_690_padded)
581
+ t_692 = F.relu(t_691)
582
+ t_693 = self.n_Conv_145(t_692)
583
+ t_694 = torch.add(t_693, t_688)
584
+ t_695 = F.relu(t_694)
585
+ t_696 = self.n_Conv_146(t_695)
586
+ t_697 = F.relu(t_696)
587
+ t_697_padded = F.pad(t_697, [1, 1, 1, 1], value=0)
588
+ t_698 = self.n_Conv_147(t_697_padded)
589
+ t_699 = F.relu(t_698)
590
+ t_700 = self.n_Conv_148(t_699)
591
+ t_701 = torch.add(t_700, t_695)
592
+ t_702 = F.relu(t_701)
593
+ t_703 = self.n_Conv_149(t_702)
594
+ t_704 = F.relu(t_703)
595
+ t_704_padded = F.pad(t_704, [1, 1, 1, 1], value=0)
596
+ t_705 = self.n_Conv_150(t_704_padded)
597
+ t_706 = F.relu(t_705)
598
+ t_707 = self.n_Conv_151(t_706)
599
+ t_708 = torch.add(t_707, t_702)
600
+ t_709 = F.relu(t_708)
601
+ t_710 = self.n_Conv_152(t_709)
602
+ t_711 = F.relu(t_710)
603
+ t_711_padded = F.pad(t_711, [1, 1, 1, 1], value=0)
604
+ t_712 = self.n_Conv_153(t_711_padded)
605
+ t_713 = F.relu(t_712)
606
+ t_714 = self.n_Conv_154(t_713)
607
+ t_715 = torch.add(t_714, t_709)
608
+ t_716 = F.relu(t_715)
609
+ t_717 = self.n_Conv_155(t_716)
610
+ t_718 = F.relu(t_717)
611
+ t_718_padded = F.pad(t_718, [1, 1, 1, 1], value=0)
612
+ t_719 = self.n_Conv_156(t_718_padded)
613
+ t_720 = F.relu(t_719)
614
+ t_721 = self.n_Conv_157(t_720)
615
+ t_722 = torch.add(t_721, t_716)
616
+ t_723 = F.relu(t_722)
617
+ t_724 = self.n_Conv_158(t_723)
618
+ t_725 = self.n_Conv_159(t_723)
619
+ t_726 = F.relu(t_725)
620
+ t_726_padded = F.pad(t_726, [0, 1, 0, 1], value=0)
621
+ t_727 = self.n_Conv_160(t_726_padded)
622
+ t_728 = F.relu(t_727)
623
+ t_729 = self.n_Conv_161(t_728)
624
+ t_730 = torch.add(t_729, t_724)
625
+ t_731 = F.relu(t_730)
626
+ t_732 = self.n_Conv_162(t_731)
627
+ t_733 = F.relu(t_732)
628
+ t_733_padded = F.pad(t_733, [1, 1, 1, 1], value=0)
629
+ t_734 = self.n_Conv_163(t_733_padded)
630
+ t_735 = F.relu(t_734)
631
+ t_736 = self.n_Conv_164(t_735)
632
+ t_737 = torch.add(t_736, t_731)
633
+ t_738 = F.relu(t_737)
634
+ t_739 = self.n_Conv_165(t_738)
635
+ t_740 = F.relu(t_739)
636
+ t_740_padded = F.pad(t_740, [1, 1, 1, 1], value=0)
637
+ t_741 = self.n_Conv_166(t_740_padded)
638
+ t_742 = F.relu(t_741)
639
+ t_743 = self.n_Conv_167(t_742)
640
+ t_744 = torch.add(t_743, t_738)
641
+ t_745 = F.relu(t_744)
642
+ t_746 = self.n_Conv_168(t_745)
643
+ t_747 = self.n_Conv_169(t_745)
644
+ t_748 = F.relu(t_747)
645
+ t_748_padded = F.pad(t_748, [0, 1, 0, 1], value=0)
646
+ t_749 = self.n_Conv_170(t_748_padded)
647
+ t_750 = F.relu(t_749)
648
+ t_751 = self.n_Conv_171(t_750)
649
+ t_752 = torch.add(t_751, t_746)
650
+ t_753 = F.relu(t_752)
651
+ t_754 = self.n_Conv_172(t_753)
652
+ t_755 = F.relu(t_754)
653
+ t_755_padded = F.pad(t_755, [1, 1, 1, 1], value=0)
654
+ t_756 = self.n_Conv_173(t_755_padded)
655
+ t_757 = F.relu(t_756)
656
+ t_758 = self.n_Conv_174(t_757)
657
+ t_759 = torch.add(t_758, t_753)
658
+ t_760 = F.relu(t_759)
659
+ t_761 = self.n_Conv_175(t_760)
660
+ t_762 = F.relu(t_761)
661
+ t_762_padded = F.pad(t_762, [1, 1, 1, 1], value=0)
662
+ t_763 = self.n_Conv_176(t_762_padded)
663
+ t_764 = F.relu(t_763)
664
+ t_765 = self.n_Conv_177(t_764)
665
+ t_766 = torch.add(t_765, t_760)
666
+ t_767 = F.relu(t_766)
667
+ t_768 = self.n_Conv_178(t_767)
668
+ t_769 = F.avg_pool2d(t_768, kernel_size=t_768.shape[-2:])
669
+ t_770 = torch.squeeze(t_769, 3)
670
+ t_770 = torch.squeeze(t_770, 2)
671
+ t_771 = torch.sigmoid(t_770)
672
+ return t_771
673
+
674
+ def load_state_dict(self, state_dict, **kwargs):
675
+ self.tags = state_dict.get('tags', [])
676
+
677
+ super(DeepDanbooruModel, self).load_state_dict({k: v for k, v in state_dict.items() if k != 'tags'})
678
+
modules/devices.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import contextlib
3
+ from functools import lru_cache
4
+
5
+ import torch
6
+ from modules import errors, shared
7
+
8
+ if sys.platform == "darwin":
9
+ from modules import mac_specific
10
+
11
+
12
+ def has_mps() -> bool:
13
+ if sys.platform != "darwin":
14
+ return False
15
+ else:
16
+ return mac_specific.has_mps
17
+
18
+
19
+ def get_cuda_device_string():
20
+ if shared.cmd_opts.device_id is not None:
21
+ return f"cuda:{shared.cmd_opts.device_id}"
22
+
23
+ return "cuda"
24
+
25
+
26
+ def get_optimal_device_name():
27
+ if torch.cuda.is_available():
28
+ return get_cuda_device_string()
29
+
30
+ if has_mps():
31
+ return "mps"
32
+
33
+ return "cpu"
34
+
35
+
36
+ def get_optimal_device():
37
+ return torch.device(get_optimal_device_name())
38
+
39
+
40
+ def get_device_for(task):
41
+ if task in shared.cmd_opts.use_cpu:
42
+ return cpu
43
+
44
+ return get_optimal_device()
45
+
46
+
47
+ def torch_gc():
48
+
49
+ if torch.cuda.is_available():
50
+ with torch.cuda.device(get_cuda_device_string()):
51
+ torch.cuda.empty_cache()
52
+ torch.cuda.ipc_collect()
53
+
54
+ if has_mps():
55
+ mac_specific.torch_mps_gc()
56
+
57
+
58
+ def enable_tf32():
59
+ if torch.cuda.is_available():
60
+
61
+ # enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
62
+ # see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
63
+ if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
64
+ torch.backends.cudnn.benchmark = True
65
+
66
+ torch.backends.cuda.matmul.allow_tf32 = True
67
+ torch.backends.cudnn.allow_tf32 = True
68
+
69
+
70
+ errors.run(enable_tf32, "Enabling TF32")
71
+
72
+ cpu: torch.device = torch.device("cpu")
73
+ device: torch.device = None
74
+ device_interrogate: torch.device = None
75
+ device_gfpgan: torch.device = None
76
+ device_esrgan: torch.device = None
77
+ device_codeformer: torch.device = None
78
+ dtype: torch.dtype = torch.float16
79
+ dtype_vae: torch.dtype = torch.float16
80
+ dtype_unet: torch.dtype = torch.float16
81
+ unet_needs_upcast = False
82
+
83
+
84
+ def cond_cast_unet(input):
85
+ return input.to(dtype_unet) if unet_needs_upcast else input
86
+
87
+
88
+ def cond_cast_float(input):
89
+ return input.float() if unet_needs_upcast else input
90
+
91
+
92
+ nv_rng = None
93
+
94
+
95
+ def autocast(disable=False):
96
+ if disable:
97
+ return contextlib.nullcontext()
98
+
99
+ if dtype == torch.float32 or shared.cmd_opts.precision == "full":
100
+ return contextlib.nullcontext()
101
+
102
+ return torch.autocast("cuda")
103
+
104
+
105
+ def without_autocast(disable=False):
106
+ return torch.autocast("cuda", enabled=False) if torch.is_autocast_enabled() and not disable else contextlib.nullcontext()
107
+
108
+
109
+ class NansException(Exception):
110
+ pass
111
+
112
+
113
+ def test_for_nans(x, where):
114
+ if shared.cmd_opts.disable_nan_check:
115
+ return
116
+
117
+ if not torch.all(torch.isnan(x)).item():
118
+ return
119
+
120
+ if where == "unet":
121
+ message = "A tensor with all NaNs was produced in Unet."
122
+
123
+ if not shared.cmd_opts.no_half:
124
+ message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this."
125
+
126
+ elif where == "vae":
127
+ message = "A tensor with all NaNs was produced in VAE."
128
+
129
+ if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae:
130
+ message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this."
131
+ else:
132
+ message = "A tensor with all NaNs was produced."
133
+
134
+ message += " Use --disable-nan-check commandline argument to disable this check."
135
+
136
+ raise NansException(message)
137
+
138
+
139
+ @lru_cache
140
+ def first_time_calculation():
141
+ """
142
+ just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
143
+ spends about 2.7 seconds doing that, at least wih NVidia.
144
+ """
145
+
146
+ x = torch.zeros((1, 1)).to(device, dtype)
147
+ linear = torch.nn.Linear(1, 1).to(device, dtype)
148
+ linear(x)
149
+
150
+ x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
151
+ conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
152
+ conv2d(x)
153
+
modules/errors.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import textwrap
3
+ import traceback
4
+
5
+
6
+ exception_records = []
7
+
8
+
9
+ def record_exception():
10
+ _, e, tb = sys.exc_info()
11
+ if e is None:
12
+ return
13
+
14
+ if exception_records and exception_records[-1] == e:
15
+ return
16
+
17
+ from modules import sysinfo
18
+ exception_records.append(sysinfo.format_exception(e, tb))
19
+
20
+ if len(exception_records) > 5:
21
+ exception_records.pop(0)
22
+
23
+
24
+ def report(message: str, *, exc_info: bool = False) -> None:
25
+ """
26
+ Print an error message to stderr, with optional traceback.
27
+ """
28
+
29
+ record_exception()
30
+
31
+ for line in message.splitlines():
32
+ print("***", line, file=sys.stderr)
33
+ if exc_info:
34
+ print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr)
35
+ print("---", file=sys.stderr)
36
+
37
+
38
+ def print_error_explanation(message):
39
+ record_exception()
40
+
41
+ lines = message.strip().split("\n")
42
+ max_len = max([len(x) for x in lines])
43
+
44
+ print('=' * max_len, file=sys.stderr)
45
+ for line in lines:
46
+ print(line, file=sys.stderr)
47
+ print('=' * max_len, file=sys.stderr)
48
+
49
+
50
+ def display(e: Exception, task, *, full_traceback=False):
51
+ record_exception()
52
+
53
+ print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr)
54
+ te = traceback.TracebackException.from_exception(e)
55
+ if full_traceback:
56
+ # include frames leading up to the try-catch block
57
+ te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack)
58
+ print(*te.format(), sep="", file=sys.stderr)
59
+
60
+ message = str(e)
61
+ if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
62
+ print_error_explanation("""
63
+ The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its config file.
64
+ See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this.
65
+ """)
66
+
67
+
68
+ already_displayed = {}
69
+
70
+
71
+ def display_once(e: Exception, task):
72
+ record_exception()
73
+
74
+ if task in already_displayed:
75
+ return
76
+
77
+ display(e, task)
78
+
79
+ already_displayed[task] = 1
80
+
81
+
82
+ def run(code, task):
83
+ try:
84
+ code()
85
+ except Exception as e:
86
+ display(task, e)
87
+
88
+
89
+ def check_versions():
90
+ from packaging import version
91
+ from modules import shared
92
+
93
+ import torch
94
+ import gradio
95
+
96
+ expected_torch_version = "2.0.0"
97
+ expected_xformers_version = "0.0.20"
98
+ expected_gradio_version = "3.41.2"
99
+
100
+ if version.parse(torch.__version__) < version.parse(expected_torch_version):
101
+ print_error_explanation(f"""
102
+ You are running torch {torch.__version__}.
103
+ The program is tested to work with torch {expected_torch_version}.
104
+ To reinstall the desired version, run with commandline flag --reinstall-torch.
105
+ Beware that this will cause a lot of large files to be downloaded, as well as
106
+ there are reports of issues with training tab on the latest version.
107
+
108
+ Use --skip-version-check commandline argument to disable this check.
109
+ """.strip())
110
+
111
+ if shared.xformers_available:
112
+ import xformers
113
+
114
+ if version.parse(xformers.__version__) < version.parse(expected_xformers_version):
115
+ print_error_explanation(f"""
116
+ You are running xformers {xformers.__version__}.
117
+ The program is tested to work with xformers {expected_xformers_version}.
118
+ To reinstall the desired version, run with commandline flag --reinstall-xformers.
119
+
120
+ Use --skip-version-check commandline argument to disable this check.
121
+ """.strip())
122
+
123
+ if gradio.__version__ != expected_gradio_version:
124
+ print_error_explanation(f"""
125
+ You are running gradio {gradio.__version__}.
126
+ The program is designed to work with gradio {expected_gradio_version}.
127
+ Using a different version of gradio is extremely likely to break the program.
128
+
129
+ Reasons why you have the mismatched gradio version can be:
130
+ - you use --skip-install flag.
131
+ - you use webui.py to start the program instead of launch.py.
132
+ - an extension installs the incompatible gradio version.
133
+
134
+ Use --skip-version-check commandline argument to disable this check.
135
+ """.strip())
136
+
modules/esrgan_model.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import numpy as np
4
+ import torch
5
+ from PIL import Image
6
+
7
+ import modules.esrgan_model_arch as arch
8
+ from modules import modelloader, images, devices
9
+ from modules.shared import opts
10
+ from modules.upscaler import Upscaler, UpscalerData
11
+
12
+
13
+ def mod2normal(state_dict):
14
+ # this code is copied from https://github.com/victorca25/iNNfer
15
+ if 'conv_first.weight' in state_dict:
16
+ crt_net = {}
17
+ items = list(state_dict)
18
+
19
+ crt_net['model.0.weight'] = state_dict['conv_first.weight']
20
+ crt_net['model.0.bias'] = state_dict['conv_first.bias']
21
+
22
+ for k in items.copy():
23
+ if 'RDB' in k:
24
+ ori_k = k.replace('RRDB_trunk.', 'model.1.sub.')
25
+ if '.weight' in k:
26
+ ori_k = ori_k.replace('.weight', '.0.weight')
27
+ elif '.bias' in k:
28
+ ori_k = ori_k.replace('.bias', '.0.bias')
29
+ crt_net[ori_k] = state_dict[k]
30
+ items.remove(k)
31
+
32
+ crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight']
33
+ crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias']
34
+ crt_net['model.3.weight'] = state_dict['upconv1.weight']
35
+ crt_net['model.3.bias'] = state_dict['upconv1.bias']
36
+ crt_net['model.6.weight'] = state_dict['upconv2.weight']
37
+ crt_net['model.6.bias'] = state_dict['upconv2.bias']
38
+ crt_net['model.8.weight'] = state_dict['HRconv.weight']
39
+ crt_net['model.8.bias'] = state_dict['HRconv.bias']
40
+ crt_net['model.10.weight'] = state_dict['conv_last.weight']
41
+ crt_net['model.10.bias'] = state_dict['conv_last.bias']
42
+ state_dict = crt_net
43
+ return state_dict
44
+
45
+
46
+ def resrgan2normal(state_dict, nb=23):
47
+ # this code is copied from https://github.com/victorca25/iNNfer
48
+ if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
49
+ re8x = 0
50
+ crt_net = {}
51
+ items = list(state_dict)
52
+
53
+ crt_net['model.0.weight'] = state_dict['conv_first.weight']
54
+ crt_net['model.0.bias'] = state_dict['conv_first.bias']
55
+
56
+ for k in items.copy():
57
+ if "rdb" in k:
58
+ ori_k = k.replace('body.', 'model.1.sub.')
59
+ ori_k = ori_k.replace('.rdb', '.RDB')
60
+ if '.weight' in k:
61
+ ori_k = ori_k.replace('.weight', '.0.weight')
62
+ elif '.bias' in k:
63
+ ori_k = ori_k.replace('.bias', '.0.bias')
64
+ crt_net[ori_k] = state_dict[k]
65
+ items.remove(k)
66
+
67
+ crt_net[f'model.1.sub.{nb}.weight'] = state_dict['conv_body.weight']
68
+ crt_net[f'model.1.sub.{nb}.bias'] = state_dict['conv_body.bias']
69
+ crt_net['model.3.weight'] = state_dict['conv_up1.weight']
70
+ crt_net['model.3.bias'] = state_dict['conv_up1.bias']
71
+ crt_net['model.6.weight'] = state_dict['conv_up2.weight']
72
+ crt_net['model.6.bias'] = state_dict['conv_up2.bias']
73
+
74
+ if 'conv_up3.weight' in state_dict:
75
+ # modification supporting: https://github.com/ai-forever/Real-ESRGAN/blob/main/RealESRGAN/rrdbnet_arch.py
76
+ re8x = 3
77
+ crt_net['model.9.weight'] = state_dict['conv_up3.weight']
78
+ crt_net['model.9.bias'] = state_dict['conv_up3.bias']
79
+
80
+ crt_net[f'model.{8+re8x}.weight'] = state_dict['conv_hr.weight']
81
+ crt_net[f'model.{8+re8x}.bias'] = state_dict['conv_hr.bias']
82
+ crt_net[f'model.{10+re8x}.weight'] = state_dict['conv_last.weight']
83
+ crt_net[f'model.{10+re8x}.bias'] = state_dict['conv_last.bias']
84
+
85
+ state_dict = crt_net
86
+ return state_dict
87
+
88
+
89
+ def infer_params(state_dict):
90
+ # this code is copied from https://github.com/victorca25/iNNfer
91
+ scale2x = 0
92
+ scalemin = 6
93
+ n_uplayer = 0
94
+ plus = False
95
+
96
+ for block in list(state_dict):
97
+ parts = block.split(".")
98
+ n_parts = len(parts)
99
+ if n_parts == 5 and parts[2] == "sub":
100
+ nb = int(parts[3])
101
+ elif n_parts == 3:
102
+ part_num = int(parts[1])
103
+ if (part_num > scalemin
104
+ and parts[0] == "model"
105
+ and parts[2] == "weight"):
106
+ scale2x += 1
107
+ if part_num > n_uplayer:
108
+ n_uplayer = part_num
109
+ out_nc = state_dict[block].shape[0]
110
+ if not plus and "conv1x1" in block:
111
+ plus = True
112
+
113
+ nf = state_dict["model.0.weight"].shape[0]
114
+ in_nc = state_dict["model.0.weight"].shape[1]
115
+ out_nc = out_nc
116
+ scale = 2 ** scale2x
117
+
118
+ return in_nc, out_nc, nf, nb, plus, scale
119
+
120
+
121
+ class UpscalerESRGAN(Upscaler):
122
+ def __init__(self, dirname):
123
+ self.name = "ESRGAN"
124
+ self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth"
125
+ self.model_name = "ESRGAN_4x"
126
+ self.scalers = []
127
+ self.user_path = dirname
128
+ super().__init__()
129
+ model_paths = self.find_models(ext_filter=[".pt", ".pth"])
130
+ scalers = []
131
+ if len(model_paths) == 0:
132
+ scaler_data = UpscalerData(self.model_name, self.model_url, self, 4)
133
+ scalers.append(scaler_data)
134
+ for file in model_paths:
135
+ if file.startswith("http"):
136
+ name = self.model_name
137
+ else:
138
+ name = modelloader.friendly_name(file)
139
+
140
+ scaler_data = UpscalerData(name, file, self, 4)
141
+ self.scalers.append(scaler_data)
142
+
143
+ def do_upscale(self, img, selected_model):
144
+ try:
145
+ model = self.load_model(selected_model)
146
+ except Exception as e:
147
+ print(f"Unable to load ESRGAN model {selected_model}: {e}", file=sys.stderr)
148
+ return img
149
+ model.to(devices.device_esrgan)
150
+ img = esrgan_upscale(model, img)
151
+ return img
152
+
153
+ def load_model(self, path: str):
154
+ if path.startswith("http"):
155
+ # TODO: this doesn't use `path` at all?
156
+ filename = modelloader.load_file_from_url(
157
+ url=self.model_url,
158
+ model_dir=self.model_download_path,
159
+ file_name=f"{self.model_name}.pth",
160
+ )
161
+ else:
162
+ filename = path
163
+
164
+ state_dict = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None)
165
+
166
+ if "params_ema" in state_dict:
167
+ state_dict = state_dict["params_ema"]
168
+ elif "params" in state_dict:
169
+ state_dict = state_dict["params"]
170
+ num_conv = 16 if "realesr-animevideov3" in filename else 32
171
+ model = arch.SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=num_conv, upscale=4, act_type='prelu')
172
+ model.load_state_dict(state_dict)
173
+ model.eval()
174
+ return model
175
+
176
+ if "body.0.rdb1.conv1.weight" in state_dict and "conv_first.weight" in state_dict:
177
+ nb = 6 if "RealESRGAN_x4plus_anime_6B" in filename else 23
178
+ state_dict = resrgan2normal(state_dict, nb)
179
+ elif "conv_first.weight" in state_dict:
180
+ state_dict = mod2normal(state_dict)
181
+ elif "model.0.weight" not in state_dict:
182
+ raise Exception("The file is not a recognized ESRGAN model.")
183
+
184
+ in_nc, out_nc, nf, nb, plus, mscale = infer_params(state_dict)
185
+
186
+ model = arch.RRDBNet(in_nc=in_nc, out_nc=out_nc, nf=nf, nb=nb, upscale=mscale, plus=plus)
187
+ model.load_state_dict(state_dict)
188
+ model.eval()
189
+
190
+ return model
191
+
192
+
193
+ def upscale_without_tiling(model, img):
194
+ img = np.array(img)
195
+ img = img[:, :, ::-1]
196
+ img = np.ascontiguousarray(np.transpose(img, (2, 0, 1))) / 255
197
+ img = torch.from_numpy(img).float()
198
+ img = img.unsqueeze(0).to(devices.device_esrgan)
199
+ with torch.no_grad():
200
+ output = model(img)
201
+ output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
202
+ output = 255. * np.moveaxis(output, 0, 2)
203
+ output = output.astype(np.uint8)
204
+ output = output[:, :, ::-1]
205
+ return Image.fromarray(output, 'RGB')
206
+
207
+
208
+ def esrgan_upscale(model, img):
209
+ if opts.ESRGAN_tile == 0:
210
+ return upscale_without_tiling(model, img)
211
+
212
+ grid = images.split_grid(img, opts.ESRGAN_tile, opts.ESRGAN_tile, opts.ESRGAN_tile_overlap)
213
+ newtiles = []
214
+ scale_factor = 1
215
+
216
+ for y, h, row in grid.tiles:
217
+ newrow = []
218
+ for tiledata in row:
219
+ x, w, tile = tiledata
220
+
221
+ output = upscale_without_tiling(model, tile)
222
+ scale_factor = output.width // tile.width
223
+
224
+ newrow.append([x * scale_factor, w * scale_factor, output])
225
+ newtiles.append([y * scale_factor, h * scale_factor, newrow])
226
+
227
+ newgrid = images.Grid(newtiles, grid.tile_w * scale_factor, grid.tile_h * scale_factor, grid.image_w * scale_factor, grid.image_h * scale_factor, grid.overlap * scale_factor)
228
+ output = images.combine_grid(newgrid)
229
+ return output
modules/esrgan_model_arch.py ADDED
@@ -0,0 +1,465 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # this file is adapted from https://github.com/victorca25/iNNfer
2
+
3
+ from collections import OrderedDict
4
+ import math
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+
9
+
10
+ ####################
11
+ # RRDBNet Generator
12
+ ####################
13
+
14
+ class RRDBNet(nn.Module):
15
+ def __init__(self, in_nc, out_nc, nf, nb, nr=3, gc=32, upscale=4, norm_type=None,
16
+ act_type='leakyrelu', mode='CNA', upsample_mode='upconv', convtype='Conv2D',
17
+ finalact=None, gaussian_noise=False, plus=False):
18
+ super(RRDBNet, self).__init__()
19
+ n_upscale = int(math.log(upscale, 2))
20
+ if upscale == 3:
21
+ n_upscale = 1
22
+
23
+ self.resrgan_scale = 0
24
+ if in_nc % 16 == 0:
25
+ self.resrgan_scale = 1
26
+ elif in_nc != 4 and in_nc % 4 == 0:
27
+ self.resrgan_scale = 2
28
+
29
+ fea_conv = conv_block(in_nc, nf, kernel_size=3, norm_type=None, act_type=None, convtype=convtype)
30
+ rb_blocks = [RRDB(nf, nr, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
31
+ norm_type=norm_type, act_type=act_type, mode='CNA', convtype=convtype,
32
+ gaussian_noise=gaussian_noise, plus=plus) for _ in range(nb)]
33
+ LR_conv = conv_block(nf, nf, kernel_size=3, norm_type=norm_type, act_type=None, mode=mode, convtype=convtype)
34
+
35
+ if upsample_mode == 'upconv':
36
+ upsample_block = upconv_block
37
+ elif upsample_mode == 'pixelshuffle':
38
+ upsample_block = pixelshuffle_block
39
+ else:
40
+ raise NotImplementedError(f'upsample mode [{upsample_mode}] is not found')
41
+ if upscale == 3:
42
+ upsampler = upsample_block(nf, nf, 3, act_type=act_type, convtype=convtype)
43
+ else:
44
+ upsampler = [upsample_block(nf, nf, act_type=act_type, convtype=convtype) for _ in range(n_upscale)]
45
+ HR_conv0 = conv_block(nf, nf, kernel_size=3, norm_type=None, act_type=act_type, convtype=convtype)
46
+ HR_conv1 = conv_block(nf, out_nc, kernel_size=3, norm_type=None, act_type=None, convtype=convtype)
47
+
48
+ outact = act(finalact) if finalact else None
49
+
50
+ self.model = sequential(fea_conv, ShortcutBlock(sequential(*rb_blocks, LR_conv)),
51
+ *upsampler, HR_conv0, HR_conv1, outact)
52
+
53
+ def forward(self, x, outm=None):
54
+ if self.resrgan_scale == 1:
55
+ feat = pixel_unshuffle(x, scale=4)
56
+ elif self.resrgan_scale == 2:
57
+ feat = pixel_unshuffle(x, scale=2)
58
+ else:
59
+ feat = x
60
+
61
+ return self.model(feat)
62
+
63
+
64
+ class RRDB(nn.Module):
65
+ """
66
+ Residual in Residual Dense Block
67
+ (ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks)
68
+ """
69
+
70
+ def __init__(self, nf, nr=3, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
71
+ norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D',
72
+ spectral_norm=False, gaussian_noise=False, plus=False):
73
+ super(RRDB, self).__init__()
74
+ # This is for backwards compatibility with existing models
75
+ if nr == 3:
76
+ self.RDB1 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
77
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
78
+ gaussian_noise=gaussian_noise, plus=plus)
79
+ self.RDB2 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
80
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
81
+ gaussian_noise=gaussian_noise, plus=plus)
82
+ self.RDB3 = ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
83
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
84
+ gaussian_noise=gaussian_noise, plus=plus)
85
+ else:
86
+ RDB_list = [ResidualDenseBlock_5C(nf, kernel_size, gc, stride, bias, pad_type,
87
+ norm_type, act_type, mode, convtype, spectral_norm=spectral_norm,
88
+ gaussian_noise=gaussian_noise, plus=plus) for _ in range(nr)]
89
+ self.RDBs = nn.Sequential(*RDB_list)
90
+
91
+ def forward(self, x):
92
+ if hasattr(self, 'RDB1'):
93
+ out = self.RDB1(x)
94
+ out = self.RDB2(out)
95
+ out = self.RDB3(out)
96
+ else:
97
+ out = self.RDBs(x)
98
+ return out * 0.2 + x
99
+
100
+
101
+ class ResidualDenseBlock_5C(nn.Module):
102
+ """
103
+ Residual Dense Block
104
+ The core module of paper: (Residual Dense Network for Image Super-Resolution, CVPR 18)
105
+ Modified options that can be used:
106
+ - "Partial Convolution based Padding" arXiv:1811.11718
107
+ - "Spectral normalization" arXiv:1802.05957
108
+ - "ICASSP 2020 - ESRGAN+ : Further Improving ESRGAN" N. C.
109
+ {Rakotonirina} and A. {Rasoanaivo}
110
+ """
111
+
112
+ def __init__(self, nf=64, kernel_size=3, gc=32, stride=1, bias=1, pad_type='zero',
113
+ norm_type=None, act_type='leakyrelu', mode='CNA', convtype='Conv2D',
114
+ spectral_norm=False, gaussian_noise=False, plus=False):
115
+ super(ResidualDenseBlock_5C, self).__init__()
116
+
117
+ self.noise = GaussianNoise() if gaussian_noise else None
118
+ self.conv1x1 = conv1x1(nf, gc) if plus else None
119
+
120
+ self.conv1 = conv_block(nf, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
121
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
122
+ spectral_norm=spectral_norm)
123
+ self.conv2 = conv_block(nf+gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
124
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
125
+ spectral_norm=spectral_norm)
126
+ self.conv3 = conv_block(nf+2*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
127
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
128
+ spectral_norm=spectral_norm)
129
+ self.conv4 = conv_block(nf+3*gc, gc, kernel_size, stride, bias=bias, pad_type=pad_type,
130
+ norm_type=norm_type, act_type=act_type, mode=mode, convtype=convtype,
131
+ spectral_norm=spectral_norm)
132
+ if mode == 'CNA':
133
+ last_act = None
134
+ else:
135
+ last_act = act_type
136
+ self.conv5 = conv_block(nf+4*gc, nf, 3, stride, bias=bias, pad_type=pad_type,
137
+ norm_type=norm_type, act_type=last_act, mode=mode, convtype=convtype,
138
+ spectral_norm=spectral_norm)
139
+
140
+ def forward(self, x):
141
+ x1 = self.conv1(x)
142
+ x2 = self.conv2(torch.cat((x, x1), 1))
143
+ if self.conv1x1:
144
+ x2 = x2 + self.conv1x1(x)
145
+ x3 = self.conv3(torch.cat((x, x1, x2), 1))
146
+ x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
147
+ if self.conv1x1:
148
+ x4 = x4 + x2
149
+ x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
150
+ if self.noise:
151
+ return self.noise(x5.mul(0.2) + x)
152
+ else:
153
+ return x5 * 0.2 + x
154
+
155
+
156
+ ####################
157
+ # ESRGANplus
158
+ ####################
159
+
160
+ class GaussianNoise(nn.Module):
161
+ def __init__(self, sigma=0.1, is_relative_detach=False):
162
+ super().__init__()
163
+ self.sigma = sigma
164
+ self.is_relative_detach = is_relative_detach
165
+ self.noise = torch.tensor(0, dtype=torch.float)
166
+
167
+ def forward(self, x):
168
+ if self.training and self.sigma != 0:
169
+ self.noise = self.noise.to(x.device)
170
+ scale = self.sigma * x.detach() if self.is_relative_detach else self.sigma * x
171
+ sampled_noise = self.noise.repeat(*x.size()).normal_() * scale
172
+ x = x + sampled_noise
173
+ return x
174
+
175
+ def conv1x1(in_planes, out_planes, stride=1):
176
+ return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
177
+
178
+
179
+ ####################
180
+ # SRVGGNetCompact
181
+ ####################
182
+
183
+ class SRVGGNetCompact(nn.Module):
184
+ """A compact VGG-style network structure for super-resolution.
185
+ This class is copied from https://github.com/xinntao/Real-ESRGAN
186
+ """
187
+
188
+ def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
189
+ super(SRVGGNetCompact, self).__init__()
190
+ self.num_in_ch = num_in_ch
191
+ self.num_out_ch = num_out_ch
192
+ self.num_feat = num_feat
193
+ self.num_conv = num_conv
194
+ self.upscale = upscale
195
+ self.act_type = act_type
196
+
197
+ self.body = nn.ModuleList()
198
+ # the first conv
199
+ self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1))
200
+ # the first activation
201
+ if act_type == 'relu':
202
+ activation = nn.ReLU(inplace=True)
203
+ elif act_type == 'prelu':
204
+ activation = nn.PReLU(num_parameters=num_feat)
205
+ elif act_type == 'leakyrelu':
206
+ activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
207
+ self.body.append(activation)
208
+
209
+ # the body structure
210
+ for _ in range(num_conv):
211
+ self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1))
212
+ # activation
213
+ if act_type == 'relu':
214
+ activation = nn.ReLU(inplace=True)
215
+ elif act_type == 'prelu':
216
+ activation = nn.PReLU(num_parameters=num_feat)
217
+ elif act_type == 'leakyrelu':
218
+ activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
219
+ self.body.append(activation)
220
+
221
+ # the last conv
222
+ self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1))
223
+ # upsample
224
+ self.upsampler = nn.PixelShuffle(upscale)
225
+
226
+ def forward(self, x):
227
+ out = x
228
+ for i in range(0, len(self.body)):
229
+ out = self.body[i](out)
230
+
231
+ out = self.upsampler(out)
232
+ # add the nearest upsampled image, so that the network learns the residual
233
+ base = F.interpolate(x, scale_factor=self.upscale, mode='nearest')
234
+ out += base
235
+ return out
236
+
237
+
238
+ ####################
239
+ # Upsampler
240
+ ####################
241
+
242
+ class Upsample(nn.Module):
243
+ r"""Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
244
+ The input data is assumed to be of the form
245
+ `minibatch x channels x [optional depth] x [optional height] x width`.
246
+ """
247
+
248
+ def __init__(self, size=None, scale_factor=None, mode="nearest", align_corners=None):
249
+ super(Upsample, self).__init__()
250
+ if isinstance(scale_factor, tuple):
251
+ self.scale_factor = tuple(float(factor) for factor in scale_factor)
252
+ else:
253
+ self.scale_factor = float(scale_factor) if scale_factor else None
254
+ self.mode = mode
255
+ self.size = size
256
+ self.align_corners = align_corners
257
+
258
+ def forward(self, x):
259
+ return nn.functional.interpolate(x, size=self.size, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners)
260
+
261
+ def extra_repr(self):
262
+ if self.scale_factor is not None:
263
+ info = f'scale_factor={self.scale_factor}'
264
+ else:
265
+ info = f'size={self.size}'
266
+ info += f', mode={self.mode}'
267
+ return info
268
+
269
+
270
+ def pixel_unshuffle(x, scale):
271
+ """ Pixel unshuffle.
272
+ Args:
273
+ x (Tensor): Input feature with shape (b, c, hh, hw).
274
+ scale (int): Downsample ratio.
275
+ Returns:
276
+ Tensor: the pixel unshuffled feature.
277
+ """
278
+ b, c, hh, hw = x.size()
279
+ out_channel = c * (scale**2)
280
+ assert hh % scale == 0 and hw % scale == 0
281
+ h = hh // scale
282
+ w = hw // scale
283
+ x_view = x.view(b, c, h, scale, w, scale)
284
+ return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
285
+
286
+
287
+ def pixelshuffle_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
288
+ pad_type='zero', norm_type=None, act_type='relu', convtype='Conv2D'):
289
+ """
290
+ Pixel shuffle layer
291
+ (Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
292
+ Neural Network, CVPR17)
293
+ """
294
+ conv = conv_block(in_nc, out_nc * (upscale_factor ** 2), kernel_size, stride, bias=bias,
295
+ pad_type=pad_type, norm_type=None, act_type=None, convtype=convtype)
296
+ pixel_shuffle = nn.PixelShuffle(upscale_factor)
297
+
298
+ n = norm(norm_type, out_nc) if norm_type else None
299
+ a = act(act_type) if act_type else None
300
+ return sequential(conv, pixel_shuffle, n, a)
301
+
302
+
303
+ def upconv_block(in_nc, out_nc, upscale_factor=2, kernel_size=3, stride=1, bias=True,
304
+ pad_type='zero', norm_type=None, act_type='relu', mode='nearest', convtype='Conv2D'):
305
+ """ Upconv layer """
306
+ upscale_factor = (1, upscale_factor, upscale_factor) if convtype == 'Conv3D' else upscale_factor
307
+ upsample = Upsample(scale_factor=upscale_factor, mode=mode)
308
+ conv = conv_block(in_nc, out_nc, kernel_size, stride, bias=bias,
309
+ pad_type=pad_type, norm_type=norm_type, act_type=act_type, convtype=convtype)
310
+ return sequential(upsample, conv)
311
+
312
+
313
+
314
+
315
+
316
+
317
+
318
+
319
+ ####################
320
+ # Basic blocks
321
+ ####################
322
+
323
+
324
+ def make_layer(basic_block, num_basic_block, **kwarg):
325
+ """Make layers by stacking the same blocks.
326
+ Args:
327
+ basic_block (nn.module): nn.module class for basic block. (block)
328
+ num_basic_block (int): number of blocks. (n_layers)
329
+ Returns:
330
+ nn.Sequential: Stacked blocks in nn.Sequential.
331
+ """
332
+ layers = []
333
+ for _ in range(num_basic_block):
334
+ layers.append(basic_block(**kwarg))
335
+ return nn.Sequential(*layers)
336
+
337
+
338
+ def act(act_type, inplace=True, neg_slope=0.2, n_prelu=1, beta=1.0):
339
+ """ activation helper """
340
+ act_type = act_type.lower()
341
+ if act_type == 'relu':
342
+ layer = nn.ReLU(inplace)
343
+ elif act_type in ('leakyrelu', 'lrelu'):
344
+ layer = nn.LeakyReLU(neg_slope, inplace)
345
+ elif act_type == 'prelu':
346
+ layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
347
+ elif act_type == 'tanh': # [-1, 1] range output
348
+ layer = nn.Tanh()
349
+ elif act_type == 'sigmoid': # [0, 1] range output
350
+ layer = nn.Sigmoid()
351
+ else:
352
+ raise NotImplementedError(f'activation layer [{act_type}] is not found')
353
+ return layer
354
+
355
+
356
+ class Identity(nn.Module):
357
+ def __init__(self, *kwargs):
358
+ super(Identity, self).__init__()
359
+
360
+ def forward(self, x, *kwargs):
361
+ return x
362
+
363
+
364
+ def norm(norm_type, nc):
365
+ """ Return a normalization layer """
366
+ norm_type = norm_type.lower()
367
+ if norm_type == 'batch':
368
+ layer = nn.BatchNorm2d(nc, affine=True)
369
+ elif norm_type == 'instance':
370
+ layer = nn.InstanceNorm2d(nc, affine=False)
371
+ elif norm_type == 'none':
372
+ def norm_layer(x): return Identity()
373
+ else:
374
+ raise NotImplementedError(f'normalization layer [{norm_type}] is not found')
375
+ return layer
376
+
377
+
378
+ def pad(pad_type, padding):
379
+ """ padding layer helper """
380
+ pad_type = pad_type.lower()
381
+ if padding == 0:
382
+ return None
383
+ if pad_type == 'reflect':
384
+ layer = nn.ReflectionPad2d(padding)
385
+ elif pad_type == 'replicate':
386
+ layer = nn.ReplicationPad2d(padding)
387
+ elif pad_type == 'zero':
388
+ layer = nn.ZeroPad2d(padding)
389
+ else:
390
+ raise NotImplementedError(f'padding layer [{pad_type}] is not implemented')
391
+ return layer
392
+
393
+
394
+ def get_valid_padding(kernel_size, dilation):
395
+ kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
396
+ padding = (kernel_size - 1) // 2
397
+ return padding
398
+
399
+
400
+ class ShortcutBlock(nn.Module):
401
+ """ Elementwise sum the output of a submodule to its input """
402
+ def __init__(self, submodule):
403
+ super(ShortcutBlock, self).__init__()
404
+ self.sub = submodule
405
+
406
+ def forward(self, x):
407
+ output = x + self.sub(x)
408
+ return output
409
+
410
+ def __repr__(self):
411
+ return 'Identity + \n|' + self.sub.__repr__().replace('\n', '\n|')
412
+
413
+
414
+ def sequential(*args):
415
+ """ Flatten Sequential. It unwraps nn.Sequential. """
416
+ if len(args) == 1:
417
+ if isinstance(args[0], OrderedDict):
418
+ raise NotImplementedError('sequential does not support OrderedDict input.')
419
+ return args[0] # No sequential is needed.
420
+ modules = []
421
+ for module in args:
422
+ if isinstance(module, nn.Sequential):
423
+ for submodule in module.children():
424
+ modules.append(submodule)
425
+ elif isinstance(module, nn.Module):
426
+ modules.append(module)
427
+ return nn.Sequential(*modules)
428
+
429
+
430
+ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=True,
431
+ pad_type='zero', norm_type=None, act_type='relu', mode='CNA', convtype='Conv2D',
432
+ spectral_norm=False):
433
+ """ Conv layer with padding, normalization, activation """
434
+ assert mode in ['CNA', 'NAC', 'CNAC'], f'Wrong conv mode [{mode}]'
435
+ padding = get_valid_padding(kernel_size, dilation)
436
+ p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
437
+ padding = padding if pad_type == 'zero' else 0
438
+
439
+ if convtype=='PartialConv2D':
440
+ from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer
441
+ c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
442
+ dilation=dilation, bias=bias, groups=groups)
443
+ elif convtype=='DeformConv2D':
444
+ from torchvision.ops import DeformConv2d # not tested
445
+ c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
446
+ dilation=dilation, bias=bias, groups=groups)
447
+ elif convtype=='Conv3D':
448
+ c = nn.Conv3d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
449
+ dilation=dilation, bias=bias, groups=groups)
450
+ else:
451
+ c = nn.Conv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
452
+ dilation=dilation, bias=bias, groups=groups)
453
+
454
+ if spectral_norm:
455
+ c = nn.utils.spectral_norm(c)
456
+
457
+ a = act(act_type) if act_type else None
458
+ if 'CNA' in mode:
459
+ n = norm(norm_type, out_nc) if norm_type else None
460
+ return sequential(p, c, n, a)
461
+ elif mode == 'NAC':
462
+ if norm_type is None and act_type is not None:
463
+ a = act(act_type, inplace=False)
464
+ n = norm(norm_type, in_nc) if norm_type else None
465
+ return sequential(n, a, p, c)
modules/extensions.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import threading
3
+
4
+ from modules import shared, errors, cache, scripts
5
+ from modules.gitpython_hack import Repo
6
+ from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
7
+
8
+ extensions = []
9
+
10
+ os.makedirs(extensions_dir, exist_ok=True)
11
+
12
+
13
+ def active():
14
+ if shared.cmd_opts.disable_all_extensions or shared.opts.disable_all_extensions == "all":
15
+ return []
16
+ elif shared.cmd_opts.disable_extra_extensions or shared.opts.disable_all_extensions == "extra":
17
+ return [x for x in extensions if x.enabled and x.is_builtin]
18
+ else:
19
+ return [x for x in extensions if x.enabled]
20
+
21
+
22
+ class Extension:
23
+ lock = threading.Lock()
24
+ cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version']
25
+
26
+ def __init__(self, name, path, enabled=True, is_builtin=False):
27
+ self.name = name
28
+ self.path = path
29
+ self.enabled = enabled
30
+ self.status = ''
31
+ self.can_update = False
32
+ self.is_builtin = is_builtin
33
+ self.commit_hash = ''
34
+ self.commit_date = None
35
+ self.version = ''
36
+ self.branch = None
37
+ self.remote = None
38
+ self.have_info_from_repo = False
39
+
40
+ def to_dict(self):
41
+ return {x: getattr(self, x) for x in self.cached_fields}
42
+
43
+ def from_dict(self, d):
44
+ for field in self.cached_fields:
45
+ setattr(self, field, d[field])
46
+
47
+ def read_info_from_repo(self):
48
+ if self.is_builtin or self.have_info_from_repo:
49
+ return
50
+
51
+ def read_from_repo():
52
+ with self.lock:
53
+ if self.have_info_from_repo:
54
+ return
55
+
56
+ self.do_read_info_from_repo()
57
+
58
+ return self.to_dict()
59
+ try:
60
+ d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
61
+ self.from_dict(d)
62
+ except FileNotFoundError:
63
+ pass
64
+ self.status = 'unknown' if self.status == '' else self.status
65
+
66
+ def do_read_info_from_repo(self):
67
+ repo = None
68
+ try:
69
+ if os.path.exists(os.path.join(self.path, ".git")):
70
+ repo = Repo(self.path)
71
+ except Exception:
72
+ errors.report(f"Error reading github repository info from {self.path}", exc_info=True)
73
+
74
+ if repo is None or repo.bare:
75
+ self.remote = None
76
+ else:
77
+ try:
78
+ self.remote = next(repo.remote().urls, None)
79
+ commit = repo.head.commit
80
+ self.commit_date = commit.committed_date
81
+ if repo.active_branch:
82
+ self.branch = repo.active_branch.name
83
+ self.commit_hash = commit.hexsha
84
+ self.version = self.commit_hash[:8]
85
+
86
+ except Exception:
87
+ errors.report(f"Failed reading extension data from Git repository ({self.name})", exc_info=True)
88
+ self.remote = None
89
+
90
+ self.have_info_from_repo = True
91
+
92
+ def list_files(self, subdir, extension):
93
+ dirpath = os.path.join(self.path, subdir)
94
+ if not os.path.isdir(dirpath):
95
+ return []
96
+
97
+ res = []
98
+ for filename in sorted(os.listdir(dirpath)):
99
+ res.append(scripts.ScriptFile(self.path, filename, os.path.join(dirpath, filename)))
100
+
101
+ res = [x for x in res if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
102
+
103
+ return res
104
+
105
+ def check_updates(self):
106
+ repo = Repo(self.path)
107
+ for fetch in repo.remote().fetch(dry_run=True):
108
+ if fetch.flags != fetch.HEAD_UPTODATE:
109
+ self.can_update = True
110
+ self.status = "new commits"
111
+ return
112
+
113
+ try:
114
+ origin = repo.rev_parse('origin')
115
+ if repo.head.commit != origin:
116
+ self.can_update = True
117
+ self.status = "behind HEAD"
118
+ return
119
+ except Exception:
120
+ self.can_update = False
121
+ self.status = "unknown (remote error)"
122
+ return
123
+
124
+ self.can_update = False
125
+ self.status = "latest"
126
+
127
+ def fetch_and_reset_hard(self, commit='origin'):
128
+ repo = Repo(self.path)
129
+ # Fix: `error: Your local changes to the following files would be overwritten by merge`,
130
+ # because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
131
+ repo.git.fetch(all=True)
132
+ repo.git.reset(commit, hard=True)
133
+ self.have_info_from_repo = False
134
+
135
+
136
+ def list_extensions():
137
+ extensions.clear()
138
+
139
+ if not os.path.isdir(extensions_dir):
140
+ return
141
+
142
+ if shared.cmd_opts.disable_all_extensions:
143
+ print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***")
144
+ elif shared.opts.disable_all_extensions == "all":
145
+ print("*** \"Disable all extensions\" option was set, will not load any extensions ***")
146
+ elif shared.cmd_opts.disable_extra_extensions:
147
+ print("*** \"--disable-extra-extensions\" arg was used, will only load built-in extensions ***")
148
+ elif shared.opts.disable_all_extensions == "extra":
149
+ print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***")
150
+
151
+ extension_paths = []
152
+ for dirname in [extensions_dir, extensions_builtin_dir]:
153
+ if not os.path.isdir(dirname):
154
+ return
155
+
156
+ for extension_dirname in sorted(os.listdir(dirname)):
157
+ path = os.path.join(dirname, extension_dirname)
158
+ if not os.path.isdir(path):
159
+ continue
160
+
161
+ extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir))
162
+
163
+ for dirname, path, is_builtin in extension_paths:
164
+ extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin)
165
+ extensions.append(extension)
modules/extra_networks.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import re
4
+ import logging
5
+ from collections import defaultdict
6
+
7
+ from modules import errors
8
+
9
+ extra_network_registry = {}
10
+ extra_network_aliases = {}
11
+
12
+
13
+ def initialize():
14
+ extra_network_registry.clear()
15
+ extra_network_aliases.clear()
16
+
17
+
18
+ def register_extra_network(extra_network):
19
+ extra_network_registry[extra_network.name] = extra_network
20
+
21
+
22
+ def register_extra_network_alias(extra_network, alias):
23
+ extra_network_aliases[alias] = extra_network
24
+
25
+
26
+ def register_default_extra_networks():
27
+ from modules.extra_networks_hypernet import ExtraNetworkHypernet
28
+ register_extra_network(ExtraNetworkHypernet())
29
+
30
+
31
+ class ExtraNetworkParams:
32
+ def __init__(self, items=None):
33
+ self.items = items or []
34
+ self.positional = []
35
+ self.named = {}
36
+
37
+ for item in self.items:
38
+ parts = item.split('=', 2) if isinstance(item, str) else [item]
39
+ if len(parts) == 2:
40
+ self.named[parts[0]] = parts[1]
41
+ else:
42
+ self.positional.append(item)
43
+
44
+ def __eq__(self, other):
45
+ return self.items == other.items
46
+
47
+
48
+ class ExtraNetwork:
49
+ def __init__(self, name):
50
+ self.name = name
51
+
52
+ def activate(self, p, params_list):
53
+ """
54
+ Called by processing on every run. Whatever the extra network is meant to do should be activated here.
55
+ Passes arguments related to this extra network in params_list.
56
+ User passes arguments by specifying this in his prompt:
57
+
58
+ <name:arg1:arg2:arg3>
59
+
60
+ Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments
61
+ separated by colon.
62
+
63
+ Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list -
64
+ in this case, all effects of this extra networks should be disabled.
65
+
66
+ Can be called multiple times before deactivate() - each new call should override the previous call completely.
67
+
68
+ For example, if this ExtraNetwork's name is 'hypernet' and user's prompt is:
69
+
70
+ > "1girl, <hypernet:agm:1.1> <extrasupernet:master:12:13:14> <hypernet:ray>"
71
+
72
+ params_list will be:
73
+
74
+ [
75
+ ExtraNetworkParams(items=["agm", "1.1"]),
76
+ ExtraNetworkParams(items=["ray"])
77
+ ]
78
+
79
+ """
80
+ raise NotImplementedError
81
+
82
+ def deactivate(self, p):
83
+ """
84
+ Called at the end of processing for housekeeping. No need to do anything here.
85
+ """
86
+
87
+ raise NotImplementedError
88
+
89
+
90
+ def lookup_extra_networks(extra_network_data):
91
+ """returns a dict mapping ExtraNetwork objects to lists of arguments for those extra networks.
92
+
93
+ Example input:
94
+ {
95
+ 'lora': [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58310>],
96
+ 'lyco': [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58F70>],
97
+ 'hypernet': [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D5A800>]
98
+ }
99
+
100
+ Example output:
101
+
102
+ {
103
+ <extra_networks_lora.ExtraNetworkLora object at 0x0000020581BEECE0>: [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58310>, <modules.extra_networks.ExtraNetworkParams object at 0x0000020690D58F70>],
104
+ <modules.extra_networks_hypernet.ExtraNetworkHypernet object at 0x0000020581BEEE60>: [<modules.extra_networks.ExtraNetworkParams object at 0x0000020690D5A800>]
105
+ }
106
+ """
107
+
108
+ res = {}
109
+
110
+ for extra_network_name, extra_network_args in list(extra_network_data.items()):
111
+ extra_network = extra_network_registry.get(extra_network_name, None)
112
+ alias = extra_network_aliases.get(extra_network_name, None)
113
+
114
+ if alias is not None and extra_network is None:
115
+ extra_network = alias
116
+
117
+ if extra_network is None:
118
+ logging.info(f"Skipping unknown extra network: {extra_network_name}")
119
+ continue
120
+
121
+ res.setdefault(extra_network, []).extend(extra_network_args)
122
+
123
+ return res
124
+
125
+
126
+ def activate(p, extra_network_data):
127
+ """call activate for extra networks in extra_network_data in specified order, then call
128
+ activate for all remaining registered networks with an empty argument list"""
129
+
130
+ activated = []
131
+
132
+ for extra_network, extra_network_args in lookup_extra_networks(extra_network_data).items():
133
+
134
+ try:
135
+ extra_network.activate(p, extra_network_args)
136
+ activated.append(extra_network)
137
+ except Exception as e:
138
+ errors.display(e, f"activating extra network {extra_network.name} with arguments {extra_network_args}")
139
+
140
+ for extra_network_name, extra_network in extra_network_registry.items():
141
+ if extra_network in activated:
142
+ continue
143
+
144
+ try:
145
+ extra_network.activate(p, [])
146
+ except Exception as e:
147
+ errors.display(e, f"activating extra network {extra_network_name}")
148
+
149
+ if p.scripts is not None:
150
+ p.scripts.after_extra_networks_activate(p, batch_number=p.iteration, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds, extra_network_data=extra_network_data)
151
+
152
+
153
+ def deactivate(p, extra_network_data):
154
+ """call deactivate for extra networks in extra_network_data in specified order, then call
155
+ deactivate for all remaining registered networks"""
156
+
157
+ data = lookup_extra_networks(extra_network_data)
158
+
159
+ for extra_network in data:
160
+ try:
161
+ extra_network.deactivate(p)
162
+ except Exception as e:
163
+ errors.display(e, f"deactivating extra network {extra_network.name}")
164
+
165
+ for extra_network_name, extra_network in extra_network_registry.items():
166
+ if extra_network in data:
167
+ continue
168
+
169
+ try:
170
+ extra_network.deactivate(p)
171
+ except Exception as e:
172
+ errors.display(e, f"deactivating unmentioned extra network {extra_network_name}")
173
+
174
+
175
+ re_extra_net = re.compile(r"<(\w+):([^>]+)>")
176
+
177
+
178
+ def parse_prompt(prompt):
179
+ res = defaultdict(list)
180
+
181
+ def found(m):
182
+ name = m.group(1)
183
+ args = m.group(2)
184
+
185
+ res[name].append(ExtraNetworkParams(items=args.split(":")))
186
+
187
+ return ""
188
+
189
+ prompt = re.sub(re_extra_net, found, prompt)
190
+
191
+ return prompt, res
192
+
193
+
194
+ def parse_prompts(prompts):
195
+ res = []
196
+ extra_data = None
197
+
198
+ for prompt in prompts:
199
+ updated_prompt, parsed_extra_data = parse_prompt(prompt)
200
+
201
+ if extra_data is None:
202
+ extra_data = parsed_extra_data
203
+
204
+ res.append(updated_prompt)
205
+
206
+ return res, extra_data
207
+
208
+
209
+ def get_user_metadata(filename):
210
+ if filename is None:
211
+ return {}
212
+
213
+ basename, ext = os.path.splitext(filename)
214
+ metadata_filename = basename + '.json'
215
+
216
+ metadata = {}
217
+ try:
218
+ if os.path.isfile(metadata_filename):
219
+ with open(metadata_filename, "r", encoding="utf8") as file:
220
+ metadata = json.load(file)
221
+ except Exception as e:
222
+ errors.display(e, f"reading extra network user metadata from {metadata_filename}")
223
+
224
+ return metadata
modules/extra_networks_hypernet.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules import extra_networks, shared
2
+ from modules.hypernetworks import hypernetwork
3
+
4
+
5
+ class ExtraNetworkHypernet(extra_networks.ExtraNetwork):
6
+ def __init__(self):
7
+ super().__init__('hypernet')
8
+
9
+ def activate(self, p, params_list):
10
+ additional = shared.opts.sd_hypernetwork
11
+
12
+ if additional != "None" and additional in shared.hypernetworks and not any(x for x in params_list if x.items[0] == additional):
13
+ hypernet_prompt_text = f"<hypernet:{additional}:{shared.opts.extra_networks_default_multiplier}>"
14
+ p.all_prompts = [f"{prompt}{hypernet_prompt_text}" for prompt in p.all_prompts]
15
+ params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
16
+
17
+ names = []
18
+ multipliers = []
19
+ for params in params_list:
20
+ assert params.items
21
+
22
+ names.append(params.items[0])
23
+ multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
24
+
25
+ hypernetwork.load_hypernetworks(names, multipliers)
26
+
27
+ def deactivate(self, p):
28
+ pass
modules/extras.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import shutil
4
+ import json
5
+
6
+
7
+ import torch
8
+ import tqdm
9
+
10
+ from modules import shared, images, sd_models, sd_vae, sd_models_config, errors
11
+ from modules.ui_common import plaintext_to_html
12
+ import gradio as gr
13
+ import safetensors.torch
14
+
15
+
16
+ def run_pnginfo(image):
17
+ if image is None:
18
+ return '', '', ''
19
+
20
+ geninfo, items = images.read_info_from_image(image)
21
+ items = {**{'parameters': geninfo}, **items}
22
+
23
+ info = ''
24
+ for key, text in items.items():
25
+ info += f"""
26
+ <div>
27
+ <p><b>{plaintext_to_html(str(key))}</b></p>
28
+ <p>{plaintext_to_html(str(text))}</p>
29
+ </div>
30
+ """.strip()+"\n"
31
+
32
+ if len(info) == 0:
33
+ message = "Nothing found in the image."
34
+ info = f"<div><p>{message}<p></div>"
35
+
36
+ return '', geninfo, info
37
+
38
+
39
+ def create_config(ckpt_result, config_source, a, b, c):
40
+ def config(x):
41
+ res = sd_models_config.find_checkpoint_config_near_filename(x) if x else None
42
+ return res if res != shared.sd_default_config else None
43
+
44
+ if config_source == 0:
45
+ cfg = config(a) or config(b) or config(c)
46
+ elif config_source == 1:
47
+ cfg = config(b)
48
+ elif config_source == 2:
49
+ cfg = config(c)
50
+ else:
51
+ cfg = None
52
+
53
+ if cfg is None:
54
+ return
55
+
56
+ filename, _ = os.path.splitext(ckpt_result)
57
+ checkpoint_filename = filename + ".yaml"
58
+
59
+ print("Copying config:")
60
+ print(" from:", cfg)
61
+ print(" to:", checkpoint_filename)
62
+ shutil.copyfile(cfg, checkpoint_filename)
63
+
64
+
65
+ checkpoint_dict_skip_on_merge = ["cond_stage_model.transformer.text_model.embeddings.position_ids"]
66
+
67
+
68
+ def to_half(tensor, enable):
69
+ if enable and tensor.dtype == torch.float:
70
+ return tensor.half()
71
+
72
+ return tensor
73
+
74
+
75
+ def read_metadata(primary_model_name, secondary_model_name, tertiary_model_name):
76
+ metadata = {}
77
+
78
+ for checkpoint_name in [primary_model_name, secondary_model_name, tertiary_model_name]:
79
+ checkpoint_info = sd_models.checkpoints_list.get(checkpoint_name, None)
80
+ if checkpoint_info is None:
81
+ continue
82
+
83
+ metadata.update(checkpoint_info.metadata)
84
+
85
+ return json.dumps(metadata, indent=4, ensure_ascii=False)
86
+
87
+
88
+ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format, config_source, bake_in_vae, discard_weights, save_metadata, add_merge_recipe, copy_metadata_fields, metadata_json):
89
+ shared.state.begin(job="model-merge")
90
+
91
+ def fail(message):
92
+ shared.state.textinfo = message
93
+ shared.state.end()
94
+ return [*[gr.update() for _ in range(4)], message]
95
+
96
+ def weighted_sum(theta0, theta1, alpha):
97
+ return ((1 - alpha) * theta0) + (alpha * theta1)
98
+
99
+ def get_difference(theta1, theta2):
100
+ return theta1 - theta2
101
+
102
+ def add_difference(theta0, theta1_2_diff, alpha):
103
+ return theta0 + (alpha * theta1_2_diff)
104
+
105
+ def filename_weighted_sum():
106
+ a = primary_model_info.model_name
107
+ b = secondary_model_info.model_name
108
+ Ma = round(1 - multiplier, 2)
109
+ Mb = round(multiplier, 2)
110
+
111
+ return f"{Ma}({a}) + {Mb}({b})"
112
+
113
+ def filename_add_difference():
114
+ a = primary_model_info.model_name
115
+ b = secondary_model_info.model_name
116
+ c = tertiary_model_info.model_name
117
+ M = round(multiplier, 2)
118
+
119
+ return f"{a} + {M}({b} - {c})"
120
+
121
+ def filename_nothing():
122
+ return primary_model_info.model_name
123
+
124
+ theta_funcs = {
125
+ "Weighted sum": (filename_weighted_sum, None, weighted_sum),
126
+ "Add difference": (filename_add_difference, get_difference, add_difference),
127
+ "No interpolation": (filename_nothing, None, None),
128
+ }
129
+ filename_generator, theta_func1, theta_func2 = theta_funcs[interp_method]
130
+ shared.state.job_count = (1 if theta_func1 else 0) + (1 if theta_func2 else 0)
131
+
132
+ if not primary_model_name:
133
+ return fail("Failed: Merging requires a primary model.")
134
+
135
+ primary_model_info = sd_models.checkpoints_list[primary_model_name]
136
+
137
+ if theta_func2 and not secondary_model_name:
138
+ return fail("Failed: Merging requires a secondary model.")
139
+
140
+ secondary_model_info = sd_models.checkpoints_list[secondary_model_name] if theta_func2 else None
141
+
142
+ if theta_func1 and not tertiary_model_name:
143
+ return fail(f"Failed: Interpolation method ({interp_method}) requires a tertiary model.")
144
+
145
+ tertiary_model_info = sd_models.checkpoints_list[tertiary_model_name] if theta_func1 else None
146
+
147
+ result_is_inpainting_model = False
148
+ result_is_instruct_pix2pix_model = False
149
+
150
+ if theta_func2:
151
+ shared.state.textinfo = "Loading B"
152
+ print(f"Loading {secondary_model_info.filename}...")
153
+ theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
154
+ else:
155
+ theta_1 = None
156
+
157
+ if theta_func1:
158
+ shared.state.textinfo = "Loading C"
159
+ print(f"Loading {tertiary_model_info.filename}...")
160
+ theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')
161
+
162
+ shared.state.textinfo = 'Merging B and C'
163
+ shared.state.sampling_steps = len(theta_1.keys())
164
+ for key in tqdm.tqdm(theta_1.keys()):
165
+ if key in checkpoint_dict_skip_on_merge:
166
+ continue
167
+
168
+ if 'model' in key:
169
+ if key in theta_2:
170
+ t2 = theta_2.get(key, torch.zeros_like(theta_1[key]))
171
+ theta_1[key] = theta_func1(theta_1[key], t2)
172
+ else:
173
+ theta_1[key] = torch.zeros_like(theta_1[key])
174
+
175
+ shared.state.sampling_step += 1
176
+ del theta_2
177
+
178
+ shared.state.nextjob()
179
+
180
+ shared.state.textinfo = f"Loading {primary_model_info.filename}..."
181
+ print(f"Loading {primary_model_info.filename}...")
182
+ theta_0 = sd_models.read_state_dict(primary_model_info.filename, map_location='cpu')
183
+
184
+ print("Merging...")
185
+ shared.state.textinfo = 'Merging A and B'
186
+ shared.state.sampling_steps = len(theta_0.keys())
187
+ for key in tqdm.tqdm(theta_0.keys()):
188
+ if theta_1 and 'model' in key and key in theta_1:
189
+
190
+ if key in checkpoint_dict_skip_on_merge:
191
+ continue
192
+
193
+ a = theta_0[key]
194
+ b = theta_1[key]
195
+
196
+ # this enables merging an inpainting model (A) with another one (B);
197
+ # where normal model would have 4 channels, for latenst space, inpainting model would
198
+ # have another 4 channels for unmasked picture's latent space, plus one channel for mask, for a total of 9
199
+ if a.shape != b.shape and a.shape[0:1] + a.shape[2:] == b.shape[0:1] + b.shape[2:]:
200
+ if a.shape[1] == 4 and b.shape[1] == 9:
201
+ raise RuntimeError("When merging inpainting model with a normal one, A must be the inpainting model.")
202
+ if a.shape[1] == 4 and b.shape[1] == 8:
203
+ raise RuntimeError("When merging instruct-pix2pix model with a normal one, A must be the instruct-pix2pix model.")
204
+
205
+ if a.shape[1] == 8 and b.shape[1] == 4:#If we have an Instruct-Pix2Pix model...
206
+ theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)#Merge only the vectors the models have in common. Otherwise we get an error due to dimension mismatch.
207
+ result_is_instruct_pix2pix_model = True
208
+ else:
209
+ assert a.shape[1] == 9 and b.shape[1] == 4, f"Bad dimensions for merged layer {key}: A={a.shape}, B={b.shape}"
210
+ theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier)
211
+ result_is_inpainting_model = True
212
+ else:
213
+ theta_0[key] = theta_func2(a, b, multiplier)
214
+
215
+ theta_0[key] = to_half(theta_0[key], save_as_half)
216
+
217
+ shared.state.sampling_step += 1
218
+
219
+ del theta_1
220
+
221
+ bake_in_vae_filename = sd_vae.vae_dict.get(bake_in_vae, None)
222
+ if bake_in_vae_filename is not None:
223
+ print(f"Baking in VAE from {bake_in_vae_filename}")
224
+ shared.state.textinfo = 'Baking in VAE'
225
+ vae_dict = sd_vae.load_vae_dict(bake_in_vae_filename, map_location='cpu')
226
+
227
+ for key in vae_dict.keys():
228
+ theta_0_key = 'first_stage_model.' + key
229
+ if theta_0_key in theta_0:
230
+ theta_0[theta_0_key] = to_half(vae_dict[key], save_as_half)
231
+
232
+ del vae_dict
233
+
234
+ if save_as_half and not theta_func2:
235
+ for key in theta_0.keys():
236
+ theta_0[key] = to_half(theta_0[key], save_as_half)
237
+
238
+ if discard_weights:
239
+ regex = re.compile(discard_weights)
240
+ for key in list(theta_0):
241
+ if re.search(regex, key):
242
+ theta_0.pop(key, None)
243
+
244
+ ckpt_dir = shared.cmd_opts.ckpt_dir or sd_models.model_path
245
+
246
+ filename = filename_generator() if custom_name == '' else custom_name
247
+ filename += ".inpainting" if result_is_inpainting_model else ""
248
+ filename += ".instruct-pix2pix" if result_is_instruct_pix2pix_model else ""
249
+ filename += "." + checkpoint_format
250
+
251
+ output_modelname = os.path.join(ckpt_dir, filename)
252
+
253
+ shared.state.nextjob()
254
+ shared.state.textinfo = "Saving"
255
+ print(f"Saving to {output_modelname}...")
256
+
257
+ metadata = {}
258
+
259
+ if save_metadata and copy_metadata_fields:
260
+ if primary_model_info:
261
+ metadata.update(primary_model_info.metadata)
262
+ if secondary_model_info:
263
+ metadata.update(secondary_model_info.metadata)
264
+ if tertiary_model_info:
265
+ metadata.update(tertiary_model_info.metadata)
266
+
267
+ if save_metadata:
268
+ try:
269
+ metadata.update(json.loads(metadata_json))
270
+ except Exception as e:
271
+ errors.display(e, "readin metadata from json")
272
+
273
+ metadata["format"] = "pt"
274
+
275
+ if save_metadata and add_merge_recipe:
276
+ merge_recipe = {
277
+ "type": "webui", # indicate this model was merged with webui's built-in merger
278
+ "primary_model_hash": primary_model_info.sha256,
279
+ "secondary_model_hash": secondary_model_info.sha256 if secondary_model_info else None,
280
+ "tertiary_model_hash": tertiary_model_info.sha256 if tertiary_model_info else None,
281
+ "interp_method": interp_method,
282
+ "multiplier": multiplier,
283
+ "save_as_half": save_as_half,
284
+ "custom_name": custom_name,
285
+ "config_source": config_source,
286
+ "bake_in_vae": bake_in_vae,
287
+ "discard_weights": discard_weights,
288
+ "is_inpainting": result_is_inpainting_model,
289
+ "is_instruct_pix2pix": result_is_instruct_pix2pix_model
290
+ }
291
+
292
+ sd_merge_models = {}
293
+
294
+ def add_model_metadata(checkpoint_info):
295
+ checkpoint_info.calculate_shorthash()
296
+ sd_merge_models[checkpoint_info.sha256] = {
297
+ "name": checkpoint_info.name,
298
+ "legacy_hash": checkpoint_info.hash,
299
+ "sd_merge_recipe": checkpoint_info.metadata.get("sd_merge_recipe", None)
300
+ }
301
+
302
+ sd_merge_models.update(checkpoint_info.metadata.get("sd_merge_models", {}))
303
+
304
+ add_model_metadata(primary_model_info)
305
+ if secondary_model_info:
306
+ add_model_metadata(secondary_model_info)
307
+ if tertiary_model_info:
308
+ add_model_metadata(tertiary_model_info)
309
+
310
+ metadata["sd_merge_recipe"] = json.dumps(merge_recipe)
311
+ metadata["sd_merge_models"] = json.dumps(sd_merge_models)
312
+
313
+ _, extension = os.path.splitext(output_modelname)
314
+ if extension.lower() == ".safetensors":
315
+ safetensors.torch.save_file(theta_0, output_modelname, metadata=metadata if len(metadata)>0 else None)
316
+ else:
317
+ torch.save(theta_0, output_modelname)
318
+
319
+ sd_models.list_models()
320
+ created_model = next((ckpt for ckpt in sd_models.checkpoints_list.values() if ckpt.name == filename), None)
321
+ if created_model:
322
+ created_model.calculate_shorthash()
323
+
324
+ create_config(output_modelname, config_source, primary_model_info, secondary_model_info, tertiary_model_info)
325
+
326
+ print(f"Checkpoint saved to {output_modelname}.")
327
+ shared.state.textinfo = "Checkpoint saved"
328
+ shared.state.end()
329
+
330
+ return [*[gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)], "Checkpoint saved to " + output_modelname]
modules/face_restoration.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules import shared
2
+
3
+
4
+ class FaceRestoration:
5
+ def name(self):
6
+ return "None"
7
+
8
+ def restore(self, np_image):
9
+ return np_image
10
+
11
+
12
+ def restore_faces(np_image):
13
+ face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None]
14
+ if len(face_restorers) == 0:
15
+ return np_image
16
+
17
+ face_restorer = face_restorers[0]
18
+
19
+ return face_restorer.restore(np_image)
modules/fifo_lock.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import collections
3
+
4
+
5
+ # reference: https://gist.github.com/vitaliyp/6d54dd76ca2c3cdfc1149d33007dc34a
6
+ class FIFOLock(object):
7
+ def __init__(self):
8
+ self._lock = threading.Lock()
9
+ self._inner_lock = threading.Lock()
10
+ self._pending_threads = collections.deque()
11
+
12
+ def acquire(self, blocking=True):
13
+ with self._inner_lock:
14
+ lock_acquired = self._lock.acquire(False)
15
+ if lock_acquired:
16
+ return True
17
+ elif not blocking:
18
+ return False
19
+
20
+ release_event = threading.Event()
21
+ self._pending_threads.append(release_event)
22
+
23
+ release_event.wait()
24
+ return self._lock.acquire()
25
+
26
+ def release(self):
27
+ with self._inner_lock:
28
+ if self._pending_threads:
29
+ release_event = self._pending_threads.popleft()
30
+ release_event.set()
31
+
32
+ self._lock.release()
33
+
34
+ __enter__ = acquire
35
+
36
+ def __exit__(self, t, v, tb):
37
+ self.release()
modules/generation_parameters_copypaste.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import json
4
+ import os
5
+ import re
6
+
7
+ import gradio as gr
8
+ from modules.paths import data_path
9
+ from modules import shared, ui_tempdir, script_callbacks, processing
10
+ from PIL import Image
11
+
12
+ re_param_code = r'\s*([\w ]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)'
13
+ re_param = re.compile(re_param_code)
14
+ re_imagesize = re.compile(r"^(\d+)x(\d+)$")
15
+ re_hypernet_hash = re.compile("\(([0-9a-f]+)\)$")
16
+ type_of_gr_update = type(gr.update())
17
+
18
+ paste_fields = {}
19
+ registered_param_bindings = []
20
+
21
+
22
+ class ParamBinding:
23
+ def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None):
24
+ self.paste_button = paste_button
25
+ self.tabname = tabname
26
+ self.source_text_component = source_text_component
27
+ self.source_image_component = source_image_component
28
+ self.source_tabname = source_tabname
29
+ self.override_settings_component = override_settings_component
30
+ self.paste_field_names = paste_field_names or []
31
+
32
+
33
+ def reset():
34
+ paste_fields.clear()
35
+ registered_param_bindings.clear()
36
+
37
+
38
+ def quote(text):
39
+ if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
40
+ return text
41
+
42
+ return json.dumps(text, ensure_ascii=False)
43
+
44
+
45
+ def unquote(text):
46
+ if len(text) == 0 or text[0] != '"' or text[-1] != '"':
47
+ return text
48
+
49
+ try:
50
+ return json.loads(text)
51
+ except Exception:
52
+ return text
53
+
54
+
55
+ def image_from_url_text(filedata):
56
+ if filedata is None:
57
+ return None
58
+
59
+ if type(filedata) == list and filedata and type(filedata[0]) == dict and filedata[0].get("is_file", False):
60
+ filedata = filedata[0]
61
+
62
+ if type(filedata) == dict and filedata.get("is_file", False):
63
+ filename = filedata["name"]
64
+ is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename)
65
+ assert is_in_right_dir, 'trying to open image file outside of allowed directories'
66
+
67
+ filename = filename.rsplit('?', 1)[0]
68
+ return Image.open(filename)
69
+
70
+ if type(filedata) == list:
71
+ if len(filedata) == 0:
72
+ return None
73
+
74
+ filedata = filedata[0]
75
+
76
+ if filedata.startswith("data:image/png;base64,"):
77
+ filedata = filedata[len("data:image/png;base64,"):]
78
+
79
+ filedata = base64.decodebytes(filedata.encode('utf-8'))
80
+ image = Image.open(io.BytesIO(filedata))
81
+ return image
82
+
83
+
84
+ def add_paste_fields(tabname, init_img, fields, override_settings_component=None):
85
+ paste_fields[tabname] = {"init_img": init_img, "fields": fields, "override_settings_component": override_settings_component}
86
+
87
+ # backwards compatibility for existing extensions
88
+ import modules.ui
89
+ if tabname == 'txt2img':
90
+ modules.ui.txt2img_paste_fields = fields
91
+ elif tabname == 'img2img':
92
+ modules.ui.img2img_paste_fields = fields
93
+
94
+
95
+ def create_buttons(tabs_list):
96
+ buttons = {}
97
+ for tab in tabs_list:
98
+ buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab")
99
+ return buttons
100
+
101
+
102
+ def bind_buttons(buttons, send_image, send_generate_info):
103
+ """old function for backwards compatibility; do not use this, use register_paste_params_button"""
104
+ for tabname, button in buttons.items():
105
+ source_text_component = send_generate_info if isinstance(send_generate_info, gr.components.Component) else None
106
+ source_tabname = send_generate_info if isinstance(send_generate_info, str) else None
107
+
108
+ register_paste_params_button(ParamBinding(paste_button=button, tabname=tabname, source_text_component=source_text_component, source_image_component=send_image, source_tabname=source_tabname))
109
+
110
+
111
+ def register_paste_params_button(binding: ParamBinding):
112
+ registered_param_bindings.append(binding)
113
+
114
+
115
+ def connect_paste_params_buttons():
116
+ binding: ParamBinding
117
+ for binding in registered_param_bindings:
118
+ destination_image_component = paste_fields[binding.tabname]["init_img"]
119
+ fields = paste_fields[binding.tabname]["fields"]
120
+ override_settings_component = binding.override_settings_component or paste_fields[binding.tabname]["override_settings_component"]
121
+
122
+ destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None)
123
+ destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None)
124
+
125
+ if binding.source_image_component and destination_image_component:
126
+ if isinstance(binding.source_image_component, gr.Gallery):
127
+ func = send_image_and_dimensions if destination_width_component else image_from_url_text
128
+ jsfunc = "extract_image_from_gallery"
129
+ else:
130
+ func = send_image_and_dimensions if destination_width_component else lambda x: x
131
+ jsfunc = None
132
+
133
+ binding.paste_button.click(
134
+ fn=func,
135
+ _js=jsfunc,
136
+ inputs=[binding.source_image_component],
137
+ outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component],
138
+ show_progress=False,
139
+ )
140
+
141
+ if binding.source_text_component is not None and fields is not None:
142
+ connect_paste(binding.paste_button, fields, binding.source_text_component, override_settings_component, binding.tabname)
143
+
144
+ if binding.source_tabname is not None and fields is not None:
145
+ paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) + binding.paste_field_names
146
+ binding.paste_button.click(
147
+ fn=lambda *x: x,
148
+ inputs=[field for field, name in paste_fields[binding.source_tabname]["fields"] if name in paste_field_names],
149
+ outputs=[field for field, name in fields if name in paste_field_names],
150
+ show_progress=False,
151
+ )
152
+
153
+ binding.paste_button.click(
154
+ fn=None,
155
+ _js=f"switch_to_{binding.tabname}",
156
+ inputs=None,
157
+ outputs=None,
158
+ show_progress=False,
159
+ )
160
+
161
+
162
+ def send_image_and_dimensions(x):
163
+ if isinstance(x, Image.Image):
164
+ img = x
165
+ else:
166
+ img = image_from_url_text(x)
167
+
168
+ if shared.opts.send_size and isinstance(img, Image.Image):
169
+ w = img.width
170
+ h = img.height
171
+ else:
172
+ w = gr.update()
173
+ h = gr.update()
174
+
175
+ return img, w, h
176
+
177
+
178
+ def restore_old_hires_fix_params(res):
179
+ """for infotexts that specify old First pass size parameter, convert it into
180
+ width, height, and hr scale"""
181
+
182
+ firstpass_width = res.get('First pass size-1', None)
183
+ firstpass_height = res.get('First pass size-2', None)
184
+
185
+ if shared.opts.use_old_hires_fix_width_height:
186
+ hires_width = int(res.get("Hires resize-1", 0))
187
+ hires_height = int(res.get("Hires resize-2", 0))
188
+
189
+ if hires_width and hires_height:
190
+ res['Size-1'] = hires_width
191
+ res['Size-2'] = hires_height
192
+ return
193
+
194
+ if firstpass_width is None or firstpass_height is None:
195
+ return
196
+
197
+ firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height)
198
+ width = int(res.get("Size-1", 512))
199
+ height = int(res.get("Size-2", 512))
200
+
201
+ if firstpass_width == 0 or firstpass_height == 0:
202
+ firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height)
203
+
204
+ res['Size-1'] = firstpass_width
205
+ res['Size-2'] = firstpass_height
206
+ res['Hires resize-1'] = width
207
+ res['Hires resize-2'] = height
208
+
209
+
210
+ def parse_generation_parameters(x: str):
211
+ """parses generation parameters string, the one you see in text field under the picture in UI:
212
+ ```
213
+ girl with an artist's beret, determined, blue eyes, desert scene, computer monitors, heavy makeup, by Alphonse Mucha and Charlie Bowater, ((eyeshadow)), (coquettish), detailed, intricate
214
+ Negative prompt: ugly, fat, obese, chubby, (((deformed))), [blurry], bad anatomy, disfigured, poorly drawn face, mutation, mutated, (extra_limb), (ugly), (poorly drawn hands), messy drawing
215
+ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hash: 45dee52b
216
+ ```
217
+
218
+ returns a dict with field values
219
+ """
220
+
221
+ res = {}
222
+
223
+ prompt = ""
224
+ negative_prompt = ""
225
+
226
+ done_with_prompt = False
227
+
228
+ *lines, lastline = x.strip().split("\n")
229
+ if len(re_param.findall(lastline)) < 3:
230
+ lines.append(lastline)
231
+ lastline = ''
232
+
233
+ for line in lines:
234
+ line = line.strip()
235
+ if line.startswith("Negative prompt:"):
236
+ done_with_prompt = True
237
+ line = line[16:].strip()
238
+ if done_with_prompt:
239
+ negative_prompt += ("" if negative_prompt == "" else "\n") + line
240
+ else:
241
+ prompt += ("" if prompt == "" else "\n") + line
242
+
243
+ if shared.opts.infotext_styles != "Ignore":
244
+ found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
245
+
246
+ if shared.opts.infotext_styles == "Apply":
247
+ res["Styles array"] = found_styles
248
+ elif shared.opts.infotext_styles == "Apply if any" and found_styles:
249
+ res["Styles array"] = found_styles
250
+
251
+ res["Prompt"] = prompt
252
+ res["Negative prompt"] = negative_prompt
253
+
254
+ for k, v in re_param.findall(lastline):
255
+ try:
256
+ if v[0] == '"' and v[-1] == '"':
257
+ v = unquote(v)
258
+
259
+ m = re_imagesize.match(v)
260
+ if m is not None:
261
+ res[f"{k}-1"] = m.group(1)
262
+ res[f"{k}-2"] = m.group(2)
263
+ else:
264
+ res[k] = v
265
+ except Exception:
266
+ print(f"Error parsing \"{k}: {v}\"")
267
+
268
+ # Missing CLIP skip means it was set to 1 (the default)
269
+ if "Clip skip" not in res:
270
+ res["Clip skip"] = "1"
271
+
272
+ hypernet = res.get("Hypernet", None)
273
+ if hypernet is not None:
274
+ res["Prompt"] += f"""<hypernet:{hypernet}:{res.get("Hypernet strength", "1.0")}>"""
275
+
276
+ if "Hires resize-1" not in res:
277
+ res["Hires resize-1"] = 0
278
+ res["Hires resize-2"] = 0
279
+
280
+ if "Hires sampler" not in res:
281
+ res["Hires sampler"] = "Use same sampler"
282
+
283
+ if "Hires checkpoint" not in res:
284
+ res["Hires checkpoint"] = "Use same checkpoint"
285
+
286
+ if "Hires prompt" not in res:
287
+ res["Hires prompt"] = ""
288
+
289
+ if "Hires negative prompt" not in res:
290
+ res["Hires negative prompt"] = ""
291
+
292
+ restore_old_hires_fix_params(res)
293
+
294
+ # Missing RNG means the default was set, which is GPU RNG
295
+ if "RNG" not in res:
296
+ res["RNG"] = "GPU"
297
+
298
+ if "Schedule type" not in res:
299
+ res["Schedule type"] = "Automatic"
300
+
301
+ if "Schedule max sigma" not in res:
302
+ res["Schedule max sigma"] = 0
303
+
304
+ if "Schedule min sigma" not in res:
305
+ res["Schedule min sigma"] = 0
306
+
307
+ if "Schedule rho" not in res:
308
+ res["Schedule rho"] = 0
309
+
310
+ if "VAE Encoder" not in res:
311
+ res["VAE Encoder"] = "Full"
312
+
313
+ if "VAE Decoder" not in res:
314
+ res["VAE Decoder"] = "Full"
315
+
316
+ return res
317
+
318
+
319
+ infotext_to_setting_name_mapping = [
320
+
321
+ ]
322
+ """Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead.
323
+ Example content:
324
+
325
+ infotext_to_setting_name_mapping = [
326
+ ('Conditional mask weight', 'inpainting_mask_weight'),
327
+ ('Model hash', 'sd_model_checkpoint'),
328
+ ('ENSD', 'eta_noise_seed_delta'),
329
+ ('Schedule type', 'k_sched_type'),
330
+ ]
331
+ """
332
+
333
+
334
+ def create_override_settings_dict(text_pairs):
335
+ """creates processing's override_settings parameters from gradio's multiselect
336
+
337
+ Example input:
338
+ ['Clip skip: 2', 'Model hash: e6e99610c4', 'ENSD: 31337']
339
+
340
+ Example output:
341
+ {'CLIP_stop_at_last_layers': 2, 'sd_model_checkpoint': 'e6e99610c4', 'eta_noise_seed_delta': 31337}
342
+ """
343
+
344
+ res = {}
345
+
346
+ params = {}
347
+ for pair in text_pairs:
348
+ k, v = pair.split(":", maxsplit=1)
349
+
350
+ params[k] = v.strip()
351
+
352
+ mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
353
+ for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
354
+ value = params.get(param_name, None)
355
+
356
+ if value is None:
357
+ continue
358
+
359
+ res[setting_name] = shared.opts.cast_value(setting_name, value)
360
+
361
+ return res
362
+
363
+
364
+ def connect_paste(button, paste_fields, input_comp, override_settings_component, tabname):
365
+ def paste_func(prompt):
366
+ if not prompt and not shared.cmd_opts.hide_ui_dir_config:
367
+ filename = os.path.join(data_path, "params.txt")
368
+ if os.path.exists(filename):
369
+ with open(filename, "r", encoding="utf8") as file:
370
+ prompt = file.read()
371
+
372
+ params = parse_generation_parameters(prompt)
373
+ script_callbacks.infotext_pasted_callback(prompt, params)
374
+ res = []
375
+
376
+ for output, key in paste_fields:
377
+ if callable(key):
378
+ v = key(params)
379
+ else:
380
+ v = params.get(key, None)
381
+
382
+ if v is None:
383
+ res.append(gr.update())
384
+ elif isinstance(v, type_of_gr_update):
385
+ res.append(v)
386
+ else:
387
+ try:
388
+ valtype = type(output.value)
389
+
390
+ if valtype == bool and v == "False":
391
+ val = False
392
+ else:
393
+ val = valtype(v)
394
+
395
+ res.append(gr.update(value=val))
396
+ except Exception:
397
+ res.append(gr.update())
398
+
399
+ return res
400
+
401
+ if override_settings_component is not None:
402
+ already_handled_fields = {key: 1 for _, key in paste_fields}
403
+
404
+ def paste_settings(params):
405
+ vals = {}
406
+
407
+ mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
408
+ for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
409
+ if param_name in already_handled_fields:
410
+ continue
411
+
412
+ v = params.get(param_name, None)
413
+ if v is None:
414
+ continue
415
+
416
+ if setting_name == "sd_model_checkpoint" and shared.opts.disable_weights_auto_swap:
417
+ continue
418
+
419
+ v = shared.opts.cast_value(setting_name, v)
420
+ current_value = getattr(shared.opts, setting_name, None)
421
+
422
+ if v == current_value:
423
+ continue
424
+
425
+ vals[param_name] = v
426
+
427
+ vals_pairs = [f"{k}: {v}" for k, v in vals.items()]
428
+
429
+ return gr.Dropdown.update(value=vals_pairs, choices=vals_pairs, visible=bool(vals_pairs))
430
+
431
+ paste_fields = paste_fields + [(override_settings_component, paste_settings)]
432
+
433
+ button.click(
434
+ fn=paste_func,
435
+ inputs=[input_comp],
436
+ outputs=[x[0] for x in paste_fields],
437
+ show_progress=False,
438
+ )
439
+ button.click(
440
+ fn=None,
441
+ _js=f"recalculate_prompts_{tabname}",
442
+ inputs=[],
443
+ outputs=[],
444
+ show_progress=False,
445
+ )
modules/gfpgan_model.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import facexlib
4
+ import gfpgan
5
+
6
+ import modules.face_restoration
7
+ from modules import paths, shared, devices, modelloader, errors
8
+
9
+ model_dir = "GFPGAN"
10
+ user_path = None
11
+ model_path = os.path.join(paths.models_path, model_dir)
12
+ model_url = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
13
+ have_gfpgan = False
14
+ loaded_gfpgan_model = None
15
+
16
+
17
+ def gfpgann():
18
+ global loaded_gfpgan_model
19
+ global model_path
20
+ if loaded_gfpgan_model is not None:
21
+ loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan)
22
+ return loaded_gfpgan_model
23
+
24
+ if gfpgan_constructor is None:
25
+ return None
26
+
27
+ models = modelloader.load_models(model_path, model_url, user_path, ext_filter="GFPGAN")
28
+ if len(models) == 1 and models[0].startswith("http"):
29
+ model_file = models[0]
30
+ elif len(models) != 0:
31
+ latest_file = max(models, key=os.path.getctime)
32
+ model_file = latest_file
33
+ else:
34
+ print("Unable to load gfpgan model!")
35
+ return None
36
+ if hasattr(facexlib.detection.retinaface, 'device'):
37
+ facexlib.detection.retinaface.device = devices.device_gfpgan
38
+ model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan)
39
+ loaded_gfpgan_model = model
40
+
41
+ return model
42
+
43
+
44
+ def send_model_to(model, device):
45
+ model.gfpgan.to(device)
46
+ model.face_helper.face_det.to(device)
47
+ model.face_helper.face_parse.to(device)
48
+
49
+
50
+ def gfpgan_fix_faces(np_image):
51
+ model = gfpgann()
52
+ if model is None:
53
+ return np_image
54
+
55
+ send_model_to(model, devices.device_gfpgan)
56
+
57
+ np_image_bgr = np_image[:, :, ::-1]
58
+ cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
59
+ np_image = gfpgan_output_bgr[:, :, ::-1]
60
+
61
+ model.face_helper.clean_all()
62
+
63
+ if shared.opts.face_restoration_unload:
64
+ send_model_to(model, devices.cpu)
65
+
66
+ return np_image
67
+
68
+
69
+ gfpgan_constructor = None
70
+
71
+
72
+ def setup_model(dirname):
73
+ try:
74
+ os.makedirs(model_path, exist_ok=True)
75
+ from gfpgan import GFPGANer
76
+ from facexlib import detection, parsing # noqa: F401
77
+ global user_path
78
+ global have_gfpgan
79
+ global gfpgan_constructor
80
+
81
+ load_file_from_url_orig = gfpgan.utils.load_file_from_url
82
+ facex_load_file_from_url_orig = facexlib.detection.load_file_from_url
83
+ facex_load_file_from_url_orig2 = facexlib.parsing.load_file_from_url
84
+
85
+ def my_load_file_from_url(**kwargs):
86
+ return load_file_from_url_orig(**dict(kwargs, model_dir=model_path))
87
+
88
+ def facex_load_file_from_url(**kwargs):
89
+ return facex_load_file_from_url_orig(**dict(kwargs, save_dir=model_path, model_dir=None))
90
+
91
+ def facex_load_file_from_url2(**kwargs):
92
+ return facex_load_file_from_url_orig2(**dict(kwargs, save_dir=model_path, model_dir=None))
93
+
94
+ gfpgan.utils.load_file_from_url = my_load_file_from_url
95
+ facexlib.detection.load_file_from_url = facex_load_file_from_url
96
+ facexlib.parsing.load_file_from_url = facex_load_file_from_url2
97
+ user_path = dirname
98
+ have_gfpgan = True
99
+ gfpgan_constructor = GFPGANer
100
+
101
+ class FaceRestorerGFPGAN(modules.face_restoration.FaceRestoration):
102
+ def name(self):
103
+ return "GFPGAN"
104
+
105
+ def restore(self, np_image):
106
+ return gfpgan_fix_faces(np_image)
107
+
108
+ shared.face_restorers.append(FaceRestorerGFPGAN())
109
+ except Exception:
110
+ errors.report("Error setting up GFPGAN", exc_info=True)
modules/gitpython_hack.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import subprocess
5
+
6
+ import git
7
+
8
+
9
+ class Git(git.Git):
10
+ """
11
+ Git subclassed to never use persistent processes.
12
+ """
13
+
14
+ def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
15
+ raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})")
16
+
17
+ def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]:
18
+ ret = subprocess.check_output(
19
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"],
20
+ input=self._prepare_ref(ref),
21
+ cwd=self._working_dir,
22
+ timeout=2,
23
+ )
24
+ return self._parse_object_header(ret)
25
+
26
+ def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]:
27
+ # Not really streaming, per se; this buffers the entire object in memory.
28
+ # Shouldn't be a problem for our use case, since we're only using this for
29
+ # object headers (commit objects).
30
+ ret = subprocess.check_output(
31
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"],
32
+ input=self._prepare_ref(ref),
33
+ cwd=self._working_dir,
34
+ timeout=30,
35
+ )
36
+ bio = io.BytesIO(ret)
37
+ hexsha, typename, size = self._parse_object_header(bio.readline())
38
+ return (hexsha, typename, size, self.CatFileContentStream(size, bio))
39
+
40
+
41
+ class Repo(git.Repo):
42
+ GitCommandWrapperType = Git
modules/gradio_extensons.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from modules import scripts, ui_tempdir, patches
4
+
5
+
6
+ def add_classes_to_gradio_component(comp):
7
+ """
8
+ this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
9
+ """
10
+
11
+ comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
12
+
13
+ if getattr(comp, 'multiselect', False):
14
+ comp.elem_classes.append('multiselect')
15
+
16
+
17
+ def IOComponent_init(self, *args, **kwargs):
18
+ self.webui_tooltip = kwargs.pop('tooltip', None)
19
+
20
+ if scripts.scripts_current is not None:
21
+ scripts.scripts_current.before_component(self, **kwargs)
22
+
23
+ scripts.script_callbacks.before_component_callback(self, **kwargs)
24
+
25
+ res = original_IOComponent_init(self, *args, **kwargs)
26
+
27
+ add_classes_to_gradio_component(self)
28
+
29
+ scripts.script_callbacks.after_component_callback(self, **kwargs)
30
+
31
+ if scripts.scripts_current is not None:
32
+ scripts.scripts_current.after_component(self, **kwargs)
33
+
34
+ return res
35
+
36
+
37
+ def Block_get_config(self):
38
+ config = original_Block_get_config(self)
39
+
40
+ webui_tooltip = getattr(self, 'webui_tooltip', None)
41
+ if webui_tooltip:
42
+ config["webui_tooltip"] = webui_tooltip
43
+
44
+ config.pop('example_inputs', None)
45
+
46
+ return config
47
+
48
+
49
+ def BlockContext_init(self, *args, **kwargs):
50
+ res = original_BlockContext_init(self, *args, **kwargs)
51
+
52
+ add_classes_to_gradio_component(self)
53
+
54
+ return res
55
+
56
+
57
+ def Blocks_get_config_file(self, *args, **kwargs):
58
+ config = original_Blocks_get_config_file(self, *args, **kwargs)
59
+
60
+ for comp_config in config["components"]:
61
+ if "example_inputs" in comp_config:
62
+ comp_config["example_inputs"] = {"serialized": []}
63
+
64
+ return config
65
+
66
+
67
+ original_IOComponent_init = patches.patch(__name__, obj=gr.components.IOComponent, field="__init__", replacement=IOComponent_init)
68
+ original_Block_get_config = patches.patch(__name__, obj=gr.blocks.Block, field="get_config", replacement=Block_get_config)
69
+ original_BlockContext_init = patches.patch(__name__, obj=gr.blocks.BlockContext, field="__init__", replacement=BlockContext_init)
70
+ original_Blocks_get_config_file = patches.patch(__name__, obj=gr.blocks.Blocks, field="get_config_file", replacement=Blocks_get_config_file)
71
+
72
+
73
+ ui_tempdir.install_ui_tempdir_override()
modules/hashes.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import hashlib
2
+ import os.path
3
+
4
+ from modules import shared
5
+ import modules.cache
6
+
7
+ dump_cache = modules.cache.dump_cache
8
+ cache = modules.cache.cache
9
+
10
+
11
+ def calculate_sha256(filename):
12
+ hash_sha256 = hashlib.sha256()
13
+ blksize = 1024 * 1024
14
+
15
+ with open(filename, "rb") as f:
16
+ for chunk in iter(lambda: f.read(blksize), b""):
17
+ hash_sha256.update(chunk)
18
+
19
+ return hash_sha256.hexdigest()
20
+
21
+
22
+ def sha256_from_cache(filename, title, use_addnet_hash=False):
23
+ hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
24
+ ondisk_mtime = os.path.getmtime(filename)
25
+
26
+ if title not in hashes:
27
+ return None
28
+
29
+ cached_sha256 = hashes[title].get("sha256", None)
30
+ cached_mtime = hashes[title].get("mtime", 0)
31
+
32
+ if ondisk_mtime > cached_mtime or cached_sha256 is None:
33
+ return None
34
+
35
+ return cached_sha256
36
+
37
+
38
+ def sha256(filename, title, use_addnet_hash=False):
39
+ hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes")
40
+
41
+ sha256_value = sha256_from_cache(filename, title, use_addnet_hash)
42
+ if sha256_value is not None:
43
+ return sha256_value
44
+
45
+ if shared.cmd_opts.no_hashing:
46
+ return None
47
+
48
+ print(f"Calculating sha256 for {filename}: ", end='')
49
+ if use_addnet_hash:
50
+ with open(filename, "rb") as file:
51
+ sha256_value = addnet_hash_safetensors(file)
52
+ else:
53
+ sha256_value = calculate_sha256(filename)
54
+ print(f"{sha256_value}")
55
+
56
+ hashes[title] = {
57
+ "mtime": os.path.getmtime(filename),
58
+ "sha256": sha256_value,
59
+ }
60
+
61
+ dump_cache()
62
+
63
+ return sha256_value
64
+
65
+
66
+ def addnet_hash_safetensors(b):
67
+ """kohya-ss hash for safetensors from https://github.com/kohya-ss/sd-scripts/blob/main/library/train_util.py"""
68
+ hash_sha256 = hashlib.sha256()
69
+ blksize = 1024 * 1024
70
+
71
+ b.seek(0)
72
+ header = b.read(8)
73
+ n = int.from_bytes(header, "little")
74
+
75
+ offset = n + 8
76
+ b.seek(offset)
77
+ for chunk in iter(lambda: b.read(blksize), b""):
78
+ hash_sha256.update(chunk)
79
+
80
+ return hash_sha256.hexdigest()
81
+
modules/hypernetworks/hypernetwork.py ADDED
@@ -0,0 +1,782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import glob
3
+ import html
4
+ import os
5
+ import inspect
6
+ from contextlib import closing
7
+
8
+ import modules.textual_inversion.dataset
9
+ import torch
10
+ import tqdm
11
+ from einops import rearrange, repeat
12
+ from ldm.util import default
13
+ from modules import devices, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
14
+ from modules.textual_inversion import textual_inversion, logging
15
+ from modules.textual_inversion.learn_schedule import LearnRateScheduler
16
+ from torch import einsum
17
+ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
18
+
19
+ from collections import deque
20
+ from statistics import stdev, mean
21
+
22
+
23
+ optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
24
+
25
+ class HypernetworkModule(torch.nn.Module):
26
+ activation_dict = {
27
+ "linear": torch.nn.Identity,
28
+ "relu": torch.nn.ReLU,
29
+ "leakyrelu": torch.nn.LeakyReLU,
30
+ "elu": torch.nn.ELU,
31
+ "swish": torch.nn.Hardswish,
32
+ "tanh": torch.nn.Tanh,
33
+ "sigmoid": torch.nn.Sigmoid,
34
+ }
35
+ activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
36
+
37
+ def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
38
+ add_layer_norm=False, activate_output=False, dropout_structure=None):
39
+ super().__init__()
40
+
41
+ self.multiplier = 1.0
42
+
43
+ assert layer_structure is not None, "layer_structure must not be None"
44
+ assert layer_structure[0] == 1, "Multiplier Sequence should start with size 1!"
45
+ assert layer_structure[-1] == 1, "Multiplier Sequence should end with size 1!"
46
+
47
+ linears = []
48
+ for i in range(len(layer_structure) - 1):
49
+
50
+ # Add a fully-connected layer
51
+ linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
52
+
53
+ # Add an activation func except last layer
54
+ if activation_func == "linear" or activation_func is None or (i >= len(layer_structure) - 2 and not activate_output):
55
+ pass
56
+ elif activation_func in self.activation_dict:
57
+ linears.append(self.activation_dict[activation_func]())
58
+ else:
59
+ raise RuntimeError(f'hypernetwork uses an unsupported activation function: {activation_func}')
60
+
61
+ # Add layer normalization
62
+ if add_layer_norm:
63
+ linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
64
+
65
+ # Everything should be now parsed into dropout structure, and applied here.
66
+ # Since we only have dropouts after layers, dropout structure should start with 0 and end with 0.
67
+ if dropout_structure is not None and dropout_structure[i+1] > 0:
68
+ assert 0 < dropout_structure[i+1] < 1, "Dropout probability should be 0 or float between 0 and 1!"
69
+ linears.append(torch.nn.Dropout(p=dropout_structure[i+1]))
70
+ # Code explanation : [1, 2, 1] -> dropout is missing when last_layer_dropout is false. [1, 2, 2, 1] -> [0, 0.3, 0, 0], when its True, [0, 0.3, 0.3, 0].
71
+
72
+ self.linear = torch.nn.Sequential(*linears)
73
+
74
+ if state_dict is not None:
75
+ self.fix_old_state_dict(state_dict)
76
+ self.load_state_dict(state_dict)
77
+ else:
78
+ for layer in self.linear:
79
+ if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
80
+ w, b = layer.weight.data, layer.bias.data
81
+ if weight_init == "Normal" or type(layer) == torch.nn.LayerNorm:
82
+ normal_(w, mean=0.0, std=0.01)
83
+ normal_(b, mean=0.0, std=0)
84
+ elif weight_init == 'XavierUniform':
85
+ xavier_uniform_(w)
86
+ zeros_(b)
87
+ elif weight_init == 'XavierNormal':
88
+ xavier_normal_(w)
89
+ zeros_(b)
90
+ elif weight_init == 'KaimingUniform':
91
+ kaiming_uniform_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
92
+ zeros_(b)
93
+ elif weight_init == 'KaimingNormal':
94
+ kaiming_normal_(w, nonlinearity='leaky_relu' if 'leakyrelu' == activation_func else 'relu')
95
+ zeros_(b)
96
+ else:
97
+ raise KeyError(f"Key {weight_init} is not defined as initialization!")
98
+ self.to(devices.device)
99
+
100
+ def fix_old_state_dict(self, state_dict):
101
+ changes = {
102
+ 'linear1.bias': 'linear.0.bias',
103
+ 'linear1.weight': 'linear.0.weight',
104
+ 'linear2.bias': 'linear.1.bias',
105
+ 'linear2.weight': 'linear.1.weight',
106
+ }
107
+
108
+ for fr, to in changes.items():
109
+ x = state_dict.get(fr, None)
110
+ if x is None:
111
+ continue
112
+
113
+ del state_dict[fr]
114
+ state_dict[to] = x
115
+
116
+ def forward(self, x):
117
+ return x + self.linear(x) * (self.multiplier if not self.training else 1)
118
+
119
+ def trainables(self):
120
+ layer_structure = []
121
+ for layer in self.linear:
122
+ if type(layer) == torch.nn.Linear or type(layer) == torch.nn.LayerNorm:
123
+ layer_structure += [layer.weight, layer.bias]
124
+ return layer_structure
125
+
126
+
127
+ #param layer_structure : sequence used for length, use_dropout : controlling boolean, last_layer_dropout : for compatibility check.
128
+ def parse_dropout_structure(layer_structure, use_dropout, last_layer_dropout):
129
+ if layer_structure is None:
130
+ layer_structure = [1, 2, 1]
131
+ if not use_dropout:
132
+ return [0] * len(layer_structure)
133
+ dropout_values = [0]
134
+ dropout_values.extend([0.3] * (len(layer_structure) - 3))
135
+ if last_layer_dropout:
136
+ dropout_values.append(0.3)
137
+ else:
138
+ dropout_values.append(0)
139
+ dropout_values.append(0)
140
+ return dropout_values
141
+
142
+
143
+ class Hypernetwork:
144
+ filename = None
145
+ name = None
146
+
147
+ def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, activate_output=False, **kwargs):
148
+ self.filename = None
149
+ self.name = name
150
+ self.layers = {}
151
+ self.step = 0
152
+ self.sd_checkpoint = None
153
+ self.sd_checkpoint_name = None
154
+ self.layer_structure = layer_structure
155
+ self.activation_func = activation_func
156
+ self.weight_init = weight_init
157
+ self.add_layer_norm = add_layer_norm
158
+ self.use_dropout = use_dropout
159
+ self.activate_output = activate_output
160
+ self.last_layer_dropout = kwargs.get('last_layer_dropout', True)
161
+ self.dropout_structure = kwargs.get('dropout_structure', None)
162
+ if self.dropout_structure is None:
163
+ self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
164
+ self.optimizer_name = None
165
+ self.optimizer_state_dict = None
166
+ self.optional_info = None
167
+
168
+ for size in enable_sizes or []:
169
+ self.layers[size] = (
170
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
171
+ self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
172
+ HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
173
+ self.add_layer_norm, self.activate_output, dropout_structure=self.dropout_structure),
174
+ )
175
+ self.eval()
176
+
177
+ def weights(self):
178
+ res = []
179
+ for layers in self.layers.values():
180
+ for layer in layers:
181
+ res += layer.parameters()
182
+ return res
183
+
184
+ def train(self, mode=True):
185
+ for layers in self.layers.values():
186
+ for layer in layers:
187
+ layer.train(mode=mode)
188
+ for param in layer.parameters():
189
+ param.requires_grad = mode
190
+
191
+ def to(self, device):
192
+ for layers in self.layers.values():
193
+ for layer in layers:
194
+ layer.to(device)
195
+
196
+ return self
197
+
198
+ def set_multiplier(self, multiplier):
199
+ for layers in self.layers.values():
200
+ for layer in layers:
201
+ layer.multiplier = multiplier
202
+
203
+ return self
204
+
205
+ def eval(self):
206
+ for layers in self.layers.values():
207
+ for layer in layers:
208
+ layer.eval()
209
+ for param in layer.parameters():
210
+ param.requires_grad = False
211
+
212
+ def save(self, filename):
213
+ state_dict = {}
214
+ optimizer_saved_dict = {}
215
+
216
+ for k, v in self.layers.items():
217
+ state_dict[k] = (v[0].state_dict(), v[1].state_dict())
218
+
219
+ state_dict['step'] = self.step
220
+ state_dict['name'] = self.name
221
+ state_dict['layer_structure'] = self.layer_structure
222
+ state_dict['activation_func'] = self.activation_func
223
+ state_dict['is_layer_norm'] = self.add_layer_norm
224
+ state_dict['weight_initialization'] = self.weight_init
225
+ state_dict['sd_checkpoint'] = self.sd_checkpoint
226
+ state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
227
+ state_dict['activate_output'] = self.activate_output
228
+ state_dict['use_dropout'] = self.use_dropout
229
+ state_dict['dropout_structure'] = self.dropout_structure
230
+ state_dict['last_layer_dropout'] = (self.dropout_structure[-2] != 0) if self.dropout_structure is not None else self.last_layer_dropout
231
+ state_dict['optional_info'] = self.optional_info if self.optional_info else None
232
+
233
+ if self.optimizer_name is not None:
234
+ optimizer_saved_dict['optimizer_name'] = self.optimizer_name
235
+
236
+ torch.save(state_dict, filename)
237
+ if shared.opts.save_optimizer_state and self.optimizer_state_dict:
238
+ optimizer_saved_dict['hash'] = self.shorthash()
239
+ optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
240
+ torch.save(optimizer_saved_dict, filename + '.optim')
241
+
242
+ def load(self, filename):
243
+ self.filename = filename
244
+ if self.name is None:
245
+ self.name = os.path.splitext(os.path.basename(filename))[0]
246
+
247
+ state_dict = torch.load(filename, map_location='cpu')
248
+
249
+ self.layer_structure = state_dict.get('layer_structure', [1, 2, 1])
250
+ self.optional_info = state_dict.get('optional_info', None)
251
+ self.activation_func = state_dict.get('activation_func', None)
252
+ self.weight_init = state_dict.get('weight_initialization', 'Normal')
253
+ self.add_layer_norm = state_dict.get('is_layer_norm', False)
254
+ self.dropout_structure = state_dict.get('dropout_structure', None)
255
+ self.use_dropout = True if self.dropout_structure is not None and any(self.dropout_structure) else state_dict.get('use_dropout', False)
256
+ self.activate_output = state_dict.get('activate_output', True)
257
+ self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
258
+ # Dropout structure should have same length as layer structure, Every digits should be in [0,1), and last digit must be 0.
259
+ if self.dropout_structure is None:
260
+ self.dropout_structure = parse_dropout_structure(self.layer_structure, self.use_dropout, self.last_layer_dropout)
261
+
262
+ if shared.opts.print_hypernet_extra:
263
+ if self.optional_info is not None:
264
+ print(f" INFO:\n {self.optional_info}\n")
265
+
266
+ print(f" Layer structure: {self.layer_structure}")
267
+ print(f" Activation function: {self.activation_func}")
268
+ print(f" Weight initialization: {self.weight_init}")
269
+ print(f" Layer norm: {self.add_layer_norm}")
270
+ print(f" Dropout usage: {self.use_dropout}" )
271
+ print(f" Activate last layer: {self.activate_output}")
272
+ print(f" Dropout structure: {self.dropout_structure}")
273
+
274
+ optimizer_saved_dict = torch.load(self.filename + '.optim', map_location='cpu') if os.path.exists(self.filename + '.optim') else {}
275
+
276
+ if self.shorthash() == optimizer_saved_dict.get('hash', None):
277
+ self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
278
+ else:
279
+ self.optimizer_state_dict = None
280
+ if self.optimizer_state_dict:
281
+ self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
282
+ if shared.opts.print_hypernet_extra:
283
+ print("Loaded existing optimizer from checkpoint")
284
+ print(f"Optimizer name is {self.optimizer_name}")
285
+ else:
286
+ self.optimizer_name = "AdamW"
287
+ if shared.opts.print_hypernet_extra:
288
+ print("No saved optimizer exists in checkpoint")
289
+
290
+ for size, sd in state_dict.items():
291
+ if type(size) == int:
292
+ self.layers[size] = (
293
+ HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
294
+ self.add_layer_norm, self.activate_output, self.dropout_structure),
295
+ HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
296
+ self.add_layer_norm, self.activate_output, self.dropout_structure),
297
+ )
298
+
299
+ self.name = state_dict.get('name', self.name)
300
+ self.step = state_dict.get('step', 0)
301
+ self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
302
+ self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
303
+ self.eval()
304
+
305
+ def shorthash(self):
306
+ sha256 = hashes.sha256(self.filename, f'hypernet/{self.name}')
307
+
308
+ return sha256[0:10] if sha256 else None
309
+
310
+
311
+ def list_hypernetworks(path):
312
+ res = {}
313
+ for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True), key=str.lower):
314
+ name = os.path.splitext(os.path.basename(filename))[0]
315
+ # Prevent a hypothetical "None.pt" from being listed.
316
+ if name != "None":
317
+ res[name] = filename
318
+ return res
319
+
320
+
321
+ def load_hypernetwork(name):
322
+ path = shared.hypernetworks.get(name, None)
323
+
324
+ if path is None:
325
+ return None
326
+
327
+ try:
328
+ hypernetwork = Hypernetwork()
329
+ hypernetwork.load(path)
330
+ return hypernetwork
331
+ except Exception:
332
+ errors.report(f"Error loading hypernetwork {path}", exc_info=True)
333
+ return None
334
+
335
+
336
+ def load_hypernetworks(names, multipliers=None):
337
+ already_loaded = {}
338
+
339
+ for hypernetwork in shared.loaded_hypernetworks:
340
+ if hypernetwork.name in names:
341
+ already_loaded[hypernetwork.name] = hypernetwork
342
+
343
+ shared.loaded_hypernetworks.clear()
344
+
345
+ for i, name in enumerate(names):
346
+ hypernetwork = already_loaded.get(name, None)
347
+ if hypernetwork is None:
348
+ hypernetwork = load_hypernetwork(name)
349
+
350
+ if hypernetwork is None:
351
+ continue
352
+
353
+ hypernetwork.set_multiplier(multipliers[i] if multipliers else 1.0)
354
+ shared.loaded_hypernetworks.append(hypernetwork)
355
+
356
+
357
+ def apply_single_hypernetwork(hypernetwork, context_k, context_v, layer=None):
358
+ hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context_k.shape[2], None)
359
+
360
+ if hypernetwork_layers is None:
361
+ return context_k, context_v
362
+
363
+ if layer is not None:
364
+ layer.hyper_k = hypernetwork_layers[0]
365
+ layer.hyper_v = hypernetwork_layers[1]
366
+
367
+ context_k = devices.cond_cast_unet(hypernetwork_layers[0](devices.cond_cast_float(context_k)))
368
+ context_v = devices.cond_cast_unet(hypernetwork_layers[1](devices.cond_cast_float(context_v)))
369
+ return context_k, context_v
370
+
371
+
372
+ def apply_hypernetworks(hypernetworks, context, layer=None):
373
+ context_k = context
374
+ context_v = context
375
+ for hypernetwork in hypernetworks:
376
+ context_k, context_v = apply_single_hypernetwork(hypernetwork, context_k, context_v, layer)
377
+
378
+ return context_k, context_v
379
+
380
+
381
+ def attention_CrossAttention_forward(self, x, context=None, mask=None, **kwargs):
382
+ h = self.heads
383
+
384
+ q = self.to_q(x)
385
+ context = default(context, x)
386
+
387
+ context_k, context_v = apply_hypernetworks(shared.loaded_hypernetworks, context, self)
388
+ k = self.to_k(context_k)
389
+ v = self.to_v(context_v)
390
+
391
+ q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
392
+
393
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
394
+
395
+ if mask is not None:
396
+ mask = rearrange(mask, 'b ... -> b (...)')
397
+ max_neg_value = -torch.finfo(sim.dtype).max
398
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
399
+ sim.masked_fill_(~mask, max_neg_value)
400
+
401
+ # attention, what we cannot get enough of
402
+ attn = sim.softmax(dim=-1)
403
+
404
+ out = einsum('b i j, b j d -> b i d', attn, v)
405
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
406
+ return self.to_out(out)
407
+
408
+
409
+ def stack_conds(conds):
410
+ if len(conds) == 1:
411
+ return torch.stack(conds)
412
+
413
+ # same as in reconstruct_multicond_batch
414
+ token_count = max([x.shape[0] for x in conds])
415
+ for i in range(len(conds)):
416
+ if conds[i].shape[0] != token_count:
417
+ last_vector = conds[i][-1:]
418
+ last_vector_repeated = last_vector.repeat([token_count - conds[i].shape[0], 1])
419
+ conds[i] = torch.vstack([conds[i], last_vector_repeated])
420
+
421
+ return torch.stack(conds)
422
+
423
+
424
+ def statistics(data):
425
+ if len(data) < 2:
426
+ std = 0
427
+ else:
428
+ std = stdev(data)
429
+ total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
430
+ recent_data = data[-32:]
431
+ if len(recent_data) < 2:
432
+ std = 0
433
+ else:
434
+ std = stdev(recent_data)
435
+ recent_information = f"recent 32 loss:{mean(recent_data):.3f}" + u"\u00B1" + f"({std / (len(recent_data) ** 0.5):.3f})"
436
+ return total_information, recent_information
437
+
438
+
439
+ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
440
+ # Remove illegal characters from name.
441
+ name = "".join( x for x in name if (x.isalnum() or x in "._- "))
442
+ assert name, "Name cannot be empty!"
443
+
444
+ fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt")
445
+ if not overwrite_old:
446
+ assert not os.path.exists(fn), f"file {fn} already exists"
447
+
448
+ if type(layer_structure) == str:
449
+ layer_structure = [float(x.strip()) for x in layer_structure.split(",")]
450
+
451
+ if use_dropout and dropout_structure and type(dropout_structure) == str:
452
+ dropout_structure = [float(x.strip()) for x in dropout_structure.split(",")]
453
+ else:
454
+ dropout_structure = [0] * len(layer_structure)
455
+
456
+ hypernet = modules.hypernetworks.hypernetwork.Hypernetwork(
457
+ name=name,
458
+ enable_sizes=[int(x) for x in enable_sizes],
459
+ layer_structure=layer_structure,
460
+ activation_func=activation_func,
461
+ weight_init=weight_init,
462
+ add_layer_norm=add_layer_norm,
463
+ use_dropout=use_dropout,
464
+ dropout_structure=dropout_structure
465
+ )
466
+ hypernet.save(fn)
467
+
468
+ shared.reload_hypernetworks()
469
+
470
+
471
+ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, varsize, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, use_weight, create_image_every, save_hypernetwork_every, template_filename, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
472
+ from modules import images, processing
473
+
474
+ save_hypernetwork_every = save_hypernetwork_every or 0
475
+ create_image_every = create_image_every or 0
476
+ template_file = textual_inversion.textual_inversion_templates.get(template_filename, None)
477
+ textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, template_file, template_filename, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
478
+ template_file = template_file.path
479
+
480
+ path = shared.hypernetworks.get(hypernetwork_name, None)
481
+ hypernetwork = Hypernetwork()
482
+ hypernetwork.load(path)
483
+ shared.loaded_hypernetworks = [hypernetwork]
484
+
485
+ shared.state.job = "train-hypernetwork"
486
+ shared.state.textinfo = "Initializing hypernetwork training..."
487
+ shared.state.job_count = steps
488
+
489
+ hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
490
+ filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
491
+
492
+ log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
493
+ unload = shared.opts.unload_models_when_training
494
+
495
+ if save_hypernetwork_every > 0:
496
+ hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
497
+ os.makedirs(hypernetwork_dir, exist_ok=True)
498
+ else:
499
+ hypernetwork_dir = None
500
+
501
+ if create_image_every > 0:
502
+ images_dir = os.path.join(log_directory, "images")
503
+ os.makedirs(images_dir, exist_ok=True)
504
+ else:
505
+ images_dir = None
506
+
507
+ checkpoint = sd_models.select_checkpoint()
508
+
509
+ initial_step = hypernetwork.step or 0
510
+ if initial_step >= steps:
511
+ shared.state.textinfo = "Model has already been trained beyond specified max steps"
512
+ return hypernetwork, filename
513
+
514
+ scheduler = LearnRateScheduler(learn_rate, steps, initial_step)
515
+
516
+ clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else None
517
+ if clip_grad:
518
+ clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False)
519
+
520
+ if shared.opts.training_enable_tensorboard:
521
+ tensorboard_writer = textual_inversion.tensorboard_setup(log_directory)
522
+
523
+ # dataset loading may take a while, so input validations and early returns should be done before this
524
+ shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
525
+
526
+ pin_memory = shared.opts.pin_memory
527
+
528
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize, use_weight=use_weight)
529
+
530
+ if shared.opts.save_training_settings_to_txt:
531
+ saved_params = dict(
532
+ model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds),
533
+ **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
534
+ )
535
+ logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
536
+
537
+ latent_sampling_method = ds.latent_sampling_method
538
+
539
+ dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)
540
+
541
+ old_parallel_processing_allowed = shared.parallel_processing_allowed
542
+
543
+ if unload:
544
+ shared.parallel_processing_allowed = False
545
+ shared.sd_model.cond_stage_model.to(devices.cpu)
546
+ shared.sd_model.first_stage_model.to(devices.cpu)
547
+
548
+ weights = hypernetwork.weights()
549
+ hypernetwork.train()
550
+
551
+ # Here we use optimizer from saved HN, or we can specify as UI option.
552
+ if hypernetwork.optimizer_name in optimizer_dict:
553
+ optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
554
+ optimizer_name = hypernetwork.optimizer_name
555
+ else:
556
+ print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
557
+ optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
558
+ optimizer_name = 'AdamW'
559
+
560
+ if hypernetwork.optimizer_state_dict: # This line must be changed if Optimizer type can be different from saved optimizer.
561
+ try:
562
+ optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
563
+ except RuntimeError as e:
564
+ print("Cannot resume from saved optimizer!")
565
+ print(e)
566
+
567
+ scaler = torch.cuda.amp.GradScaler()
568
+
569
+ batch_size = ds.batch_size
570
+ gradient_step = ds.gradient_step
571
+ # n steps = batch_size * gradient_step * n image processed
572
+ steps_per_epoch = len(ds) // batch_size // gradient_step
573
+ max_steps_per_epoch = len(ds) // batch_size - (len(ds) // batch_size) % gradient_step
574
+ loss_step = 0
575
+ _loss_step = 0 #internal
576
+ # size = len(ds.indexes)
577
+ # loss_dict = defaultdict(lambda : deque(maxlen = 1024))
578
+ loss_logging = deque(maxlen=len(ds) * 3) # this should be configurable parameter, this is 3 * epoch(dataset size)
579
+ # losses = torch.zeros((size,))
580
+ # previous_mean_losses = [0]
581
+ # previous_mean_loss = 0
582
+ # print("Mean loss of {} elements".format(size))
583
+
584
+ steps_without_grad = 0
585
+
586
+ last_saved_file = "<none>"
587
+ last_saved_image = "<none>"
588
+ forced_filename = "<none>"
589
+
590
+ pbar = tqdm.tqdm(total=steps - initial_step)
591
+ try:
592
+ sd_hijack_checkpoint.add()
593
+
594
+ for _ in range((steps-initial_step) * gradient_step):
595
+ if scheduler.finished:
596
+ break
597
+ if shared.state.interrupted:
598
+ break
599
+ for j, batch in enumerate(dl):
600
+ # works as a drop_last=True for gradient accumulation
601
+ if j == max_steps_per_epoch:
602
+ break
603
+ scheduler.apply(optimizer, hypernetwork.step)
604
+ if scheduler.finished:
605
+ break
606
+ if shared.state.interrupted:
607
+ break
608
+
609
+ if clip_grad:
610
+ clip_grad_sched.step(hypernetwork.step)
611
+
612
+ with devices.autocast():
613
+ x = batch.latent_sample.to(devices.device, non_blocking=pin_memory)
614
+ if use_weight:
615
+ w = batch.weight.to(devices.device, non_blocking=pin_memory)
616
+ if tag_drop_out != 0 or shuffle_tags:
617
+ shared.sd_model.cond_stage_model.to(devices.device)
618
+ c = shared.sd_model.cond_stage_model(batch.cond_text).to(devices.device, non_blocking=pin_memory)
619
+ shared.sd_model.cond_stage_model.to(devices.cpu)
620
+ else:
621
+ c = stack_conds(batch.cond).to(devices.device, non_blocking=pin_memory)
622
+ if use_weight:
623
+ loss = shared.sd_model.weighted_forward(x, c, w)[0] / gradient_step
624
+ del w
625
+ else:
626
+ loss = shared.sd_model.forward(x, c)[0] / gradient_step
627
+ del x
628
+ del c
629
+
630
+ _loss_step += loss.item()
631
+ scaler.scale(loss).backward()
632
+
633
+ # go back until we reach gradient accumulation steps
634
+ if (j + 1) % gradient_step != 0:
635
+ continue
636
+ loss_logging.append(_loss_step)
637
+ if clip_grad:
638
+ clip_grad(weights, clip_grad_sched.learn_rate)
639
+
640
+ scaler.step(optimizer)
641
+ scaler.update()
642
+ hypernetwork.step += 1
643
+ pbar.update()
644
+ optimizer.zero_grad(set_to_none=True)
645
+ loss_step = _loss_step
646
+ _loss_step = 0
647
+
648
+ steps_done = hypernetwork.step + 1
649
+
650
+ epoch_num = hypernetwork.step // steps_per_epoch
651
+ epoch_step = hypernetwork.step % steps_per_epoch
652
+
653
+ description = f"Training hypernetwork [Epoch {epoch_num}: {epoch_step+1}/{steps_per_epoch}]loss: {loss_step:.7f}"
654
+ pbar.set_description(description)
655
+ if hypernetwork_dir is not None and steps_done % save_hypernetwork_every == 0:
656
+ # Before saving, change name to match current checkpoint.
657
+ hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
658
+ last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
659
+ hypernetwork.optimizer_name = optimizer_name
660
+ if shared.opts.save_optimizer_state:
661
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
662
+ save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
663
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
664
+
665
+
666
+
667
+ if shared.opts.training_enable_tensorboard:
668
+ epoch_num = hypernetwork.step // len(ds)
669
+ epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1
670
+ mean_loss = sum(loss_logging) / len(loss_logging)
671
+ textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, global_step=hypernetwork.step, step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num)
672
+
673
+ textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, steps_per_epoch, {
674
+ "loss": f"{loss_step:.7f}",
675
+ "learn_rate": scheduler.learn_rate
676
+ })
677
+
678
+ if images_dir is not None and steps_done % create_image_every == 0:
679
+ forced_filename = f'{hypernetwork_name}-{steps_done}'
680
+ last_saved_image = os.path.join(images_dir, forced_filename)
681
+ hypernetwork.eval()
682
+ rng_state = torch.get_rng_state()
683
+ cuda_rng_state = None
684
+ if torch.cuda.is_available():
685
+ cuda_rng_state = torch.cuda.get_rng_state_all()
686
+ shared.sd_model.cond_stage_model.to(devices.device)
687
+ shared.sd_model.first_stage_model.to(devices.device)
688
+
689
+ p = processing.StableDiffusionProcessingTxt2Img(
690
+ sd_model=shared.sd_model,
691
+ do_not_save_grid=True,
692
+ do_not_save_samples=True,
693
+ )
694
+
695
+ p.disable_extra_networks = True
696
+
697
+ if preview_from_txt2img:
698
+ p.prompt = preview_prompt
699
+ p.negative_prompt = preview_negative_prompt
700
+ p.steps = preview_steps
701
+ p.sampler_name = sd_samplers.samplers[preview_sampler_index].name
702
+ p.cfg_scale = preview_cfg_scale
703
+ p.seed = preview_seed
704
+ p.width = preview_width
705
+ p.height = preview_height
706
+ else:
707
+ p.prompt = batch.cond_text[0]
708
+ p.steps = 20
709
+ p.width = training_width
710
+ p.height = training_height
711
+
712
+ preview_text = p.prompt
713
+
714
+ with closing(p):
715
+ processed = processing.process_images(p)
716
+ image = processed.images[0] if len(processed.images) > 0 else None
717
+
718
+ if unload:
719
+ shared.sd_model.cond_stage_model.to(devices.cpu)
720
+ shared.sd_model.first_stage_model.to(devices.cpu)
721
+ torch.set_rng_state(rng_state)
722
+ if torch.cuda.is_available():
723
+ torch.cuda.set_rng_state_all(cuda_rng_state)
724
+ hypernetwork.train()
725
+ if image is not None:
726
+ shared.state.assign_current_image(image)
727
+ if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images:
728
+ textual_inversion.tensorboard_add_image(tensorboard_writer,
729
+ f"Validation at epoch {epoch_num}", image,
730
+ hypernetwork.step)
731
+ last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
732
+ last_saved_image += f", prompt: {preview_text}"
733
+
734
+ shared.state.job_no = hypernetwork.step
735
+
736
+ shared.state.textinfo = f"""
737
+ <p>
738
+ Loss: {loss_step:.7f}<br/>
739
+ Step: {steps_done}<br/>
740
+ Last prompt: {html.escape(batch.cond_text[0])}<br/>
741
+ Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
742
+ Last saved image: {html.escape(last_saved_image)}<br/>
743
+ </p>
744
+ """
745
+ except Exception:
746
+ errors.report("Exception in training hypernetwork", exc_info=True)
747
+ finally:
748
+ pbar.leave = False
749
+ pbar.close()
750
+ hypernetwork.eval()
751
+ sd_hijack_checkpoint.remove()
752
+
753
+
754
+
755
+ filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
756
+ hypernetwork.optimizer_name = optimizer_name
757
+ if shared.opts.save_optimizer_state:
758
+ hypernetwork.optimizer_state_dict = optimizer.state_dict()
759
+ save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
760
+
761
+ del optimizer
762
+ hypernetwork.optimizer_state_dict = None # dereference it after saving, to save memory.
763
+ shared.sd_model.cond_stage_model.to(devices.device)
764
+ shared.sd_model.first_stage_model.to(devices.device)
765
+ shared.parallel_processing_allowed = old_parallel_processing_allowed
766
+
767
+ return hypernetwork, filename
768
+
769
+ def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
770
+ old_hypernetwork_name = hypernetwork.name
771
+ old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
772
+ old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
773
+ try:
774
+ hypernetwork.sd_checkpoint = checkpoint.shorthash
775
+ hypernetwork.sd_checkpoint_name = checkpoint.model_name
776
+ hypernetwork.name = hypernetwork_name
777
+ hypernetwork.save(filename)
778
+ except:
779
+ hypernetwork.sd_checkpoint = old_sd_checkpoint
780
+ hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
781
+ hypernetwork.name = old_hypernetwork_name
782
+ raise
modules/hypernetworks/ui.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import html
2
+
3
+ import gradio as gr
4
+ import modules.hypernetworks.hypernetwork
5
+ from modules import devices, sd_hijack, shared
6
+
7
+ not_available = ["hardswish", "multiheadattention"]
8
+ keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available]
9
+
10
+
11
+ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
12
+ filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
13
+
14
+ return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", ""
15
+
16
+
17
+ def train_hypernetwork(*args):
18
+ shared.loaded_hypernetworks = []
19
+
20
+ assert not shared.cmd_opts.lowvram, 'Training models with lowvram is not possible'
21
+
22
+ try:
23
+ sd_hijack.undo_optimizations()
24
+
25
+ hypernetwork, filename = modules.hypernetworks.hypernetwork.train_hypernetwork(*args)
26
+
27
+ res = f"""
28
+ Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps.
29
+ Hypernetwork saved to {html.escape(filename)}
30
+ """
31
+ return res, ""
32
+ except Exception:
33
+ raise
34
+ finally:
35
+ shared.sd_model.cond_stage_model.to(devices.device)
36
+ shared.sd_model.first_stage_model.to(devices.device)
37
+ sd_hijack.apply_optimizations()
38
+
modules/images.py ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import datetime
4
+
5
+ import pytz
6
+ import io
7
+ import math
8
+ import os
9
+ from collections import namedtuple
10
+ import re
11
+
12
+ import numpy as np
13
+ import piexif
14
+ import piexif.helper
15
+ from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin
16
+ import string
17
+ import json
18
+ import hashlib
19
+
20
+ from modules import sd_samplers, shared, script_callbacks, errors
21
+ from modules.paths_internal import roboto_ttf_file
22
+ from modules.shared import opts
23
+
24
+ LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
25
+
26
+
27
+ def get_font(fontsize: int):
28
+ try:
29
+ return ImageFont.truetype(opts.font or roboto_ttf_file, fontsize)
30
+ except Exception:
31
+ return ImageFont.truetype(roboto_ttf_file, fontsize)
32
+
33
+
34
+ def image_grid(imgs, batch_size=1, rows=None):
35
+ if rows is None:
36
+ if opts.n_rows > 0:
37
+ rows = opts.n_rows
38
+ elif opts.n_rows == 0:
39
+ rows = batch_size
40
+ elif opts.grid_prevent_empty_spots:
41
+ rows = math.floor(math.sqrt(len(imgs)))
42
+ while len(imgs) % rows != 0:
43
+ rows -= 1
44
+ else:
45
+ rows = math.sqrt(len(imgs))
46
+ rows = round(rows)
47
+ if rows > len(imgs):
48
+ rows = len(imgs)
49
+
50
+ cols = math.ceil(len(imgs) / rows)
51
+
52
+ params = script_callbacks.ImageGridLoopParams(imgs, cols, rows)
53
+ script_callbacks.image_grid_callback(params)
54
+
55
+ w, h = imgs[0].size
56
+ grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black')
57
+
58
+ for i, img in enumerate(params.imgs):
59
+ grid.paste(img, box=(i % params.cols * w, i // params.cols * h))
60
+
61
+ return grid
62
+
63
+
64
+ Grid = namedtuple("Grid", ["tiles", "tile_w", "tile_h", "image_w", "image_h", "overlap"])
65
+
66
+
67
+ def split_grid(image, tile_w=512, tile_h=512, overlap=64):
68
+ w = image.width
69
+ h = image.height
70
+
71
+ non_overlap_width = tile_w - overlap
72
+ non_overlap_height = tile_h - overlap
73
+
74
+ cols = math.ceil((w - overlap) / non_overlap_width)
75
+ rows = math.ceil((h - overlap) / non_overlap_height)
76
+
77
+ dx = (w - tile_w) / (cols - 1) if cols > 1 else 0
78
+ dy = (h - tile_h) / (rows - 1) if rows > 1 else 0
79
+
80
+ grid = Grid([], tile_w, tile_h, w, h, overlap)
81
+ for row in range(rows):
82
+ row_images = []
83
+
84
+ y = int(row * dy)
85
+
86
+ if y + tile_h >= h:
87
+ y = h - tile_h
88
+
89
+ for col in range(cols):
90
+ x = int(col * dx)
91
+
92
+ if x + tile_w >= w:
93
+ x = w - tile_w
94
+
95
+ tile = image.crop((x, y, x + tile_w, y + tile_h))
96
+
97
+ row_images.append([x, tile_w, tile])
98
+
99
+ grid.tiles.append([y, tile_h, row_images])
100
+
101
+ return grid
102
+
103
+
104
+ def combine_grid(grid):
105
+ def make_mask_image(r):
106
+ r = r * 255 / grid.overlap
107
+ r = r.astype(np.uint8)
108
+ return Image.fromarray(r, 'L')
109
+
110
+ mask_w = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((1, grid.overlap)).repeat(grid.tile_h, axis=0))
111
+ mask_h = make_mask_image(np.arange(grid.overlap, dtype=np.float32).reshape((grid.overlap, 1)).repeat(grid.image_w, axis=1))
112
+
113
+ combined_image = Image.new("RGB", (grid.image_w, grid.image_h))
114
+ for y, h, row in grid.tiles:
115
+ combined_row = Image.new("RGB", (grid.image_w, h))
116
+ for x, w, tile in row:
117
+ if x == 0:
118
+ combined_row.paste(tile, (0, 0))
119
+ continue
120
+
121
+ combined_row.paste(tile.crop((0, 0, grid.overlap, h)), (x, 0), mask=mask_w)
122
+ combined_row.paste(tile.crop((grid.overlap, 0, w, h)), (x + grid.overlap, 0))
123
+
124
+ if y == 0:
125
+ combined_image.paste(combined_row, (0, 0))
126
+ continue
127
+
128
+ combined_image.paste(combined_row.crop((0, 0, combined_row.width, grid.overlap)), (0, y), mask=mask_h)
129
+ combined_image.paste(combined_row.crop((0, grid.overlap, combined_row.width, h)), (0, y + grid.overlap))
130
+
131
+ return combined_image
132
+
133
+
134
+ class GridAnnotation:
135
+ def __init__(self, text='', is_active=True):
136
+ self.text = text
137
+ self.is_active = is_active
138
+ self.size = None
139
+
140
+
141
+ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
142
+
143
+ color_active = ImageColor.getcolor(opts.grid_text_active_color, 'RGB')
144
+ color_inactive = ImageColor.getcolor(opts.grid_text_inactive_color, 'RGB')
145
+ color_background = ImageColor.getcolor(opts.grid_background_color, 'RGB')
146
+
147
+ def wrap(drawing, text, font, line_length):
148
+ lines = ['']
149
+ for word in text.split():
150
+ line = f'{lines[-1]} {word}'.strip()
151
+ if drawing.textlength(line, font=font) <= line_length:
152
+ lines[-1] = line
153
+ else:
154
+ lines.append(word)
155
+ return lines
156
+
157
+ def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
158
+ for line in lines:
159
+ fnt = initial_fnt
160
+ fontsize = initial_fontsize
161
+ while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
162
+ fontsize -= 1
163
+ fnt = get_font(fontsize)
164
+ drawing.multiline_text((draw_x, draw_y + line.size[1] / 2), line.text, font=fnt, fill=color_active if line.is_active else color_inactive, anchor="mm", align="center")
165
+
166
+ if not line.is_active:
167
+ drawing.line((draw_x - line.size[0] // 2, draw_y + line.size[1] // 2, draw_x + line.size[0] // 2, draw_y + line.size[1] // 2), fill=color_inactive, width=4)
168
+
169
+ draw_y += line.size[1] + line_spacing
170
+
171
+ fontsize = (width + height) // 25
172
+ line_spacing = fontsize // 2
173
+
174
+ fnt = get_font(fontsize)
175
+
176
+ pad_left = 0 if sum([sum([len(line.text) for line in lines]) for lines in ver_texts]) == 0 else width * 3 // 4
177
+
178
+ cols = im.width // width
179
+ rows = im.height // height
180
+
181
+ assert cols == len(hor_texts), f'bad number of horizontal texts: {len(hor_texts)}; must be {cols}'
182
+ assert rows == len(ver_texts), f'bad number of vertical texts: {len(ver_texts)}; must be {rows}'
183
+
184
+ calc_img = Image.new("RGB", (1, 1), color_background)
185
+ calc_d = ImageDraw.Draw(calc_img)
186
+
187
+ for texts, allowed_width in zip(hor_texts + ver_texts, [width] * len(hor_texts) + [pad_left] * len(ver_texts)):
188
+ items = [] + texts
189
+ texts.clear()
190
+
191
+ for line in items:
192
+ wrapped = wrap(calc_d, line.text, fnt, allowed_width)
193
+ texts += [GridAnnotation(x, line.is_active) for x in wrapped]
194
+
195
+ for line in texts:
196
+ bbox = calc_d.multiline_textbbox((0, 0), line.text, font=fnt)
197
+ line.size = (bbox[2] - bbox[0], bbox[3] - bbox[1])
198
+ line.allowed_width = allowed_width
199
+
200
+ hor_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing for lines in hor_texts]
201
+ ver_text_heights = [sum([line.size[1] + line_spacing for line in lines]) - line_spacing * len(lines) for lines in ver_texts]
202
+
203
+ pad_top = 0 if sum(hor_text_heights) == 0 else max(hor_text_heights) + line_spacing * 2
204
+
205
+ result = Image.new("RGB", (im.width + pad_left + margin * (cols-1), im.height + pad_top + margin * (rows-1)), color_background)
206
+
207
+ for row in range(rows):
208
+ for col in range(cols):
209
+ cell = im.crop((width * col, height * row, width * (col+1), height * (row+1)))
210
+ result.paste(cell, (pad_left + (width + margin) * col, pad_top + (height + margin) * row))
211
+
212
+ d = ImageDraw.Draw(result)
213
+
214
+ for col in range(cols):
215
+ x = pad_left + (width + margin) * col + width / 2
216
+ y = pad_top / 2 - hor_text_heights[col] / 2
217
+
218
+ draw_texts(d, x, y, hor_texts[col], fnt, fontsize)
219
+
220
+ for row in range(rows):
221
+ x = pad_left / 2
222
+ y = pad_top + (height + margin) * row + height / 2 - ver_text_heights[row] / 2
223
+
224
+ draw_texts(d, x, y, ver_texts[row], fnt, fontsize)
225
+
226
+ return result
227
+
228
+
229
+ def draw_prompt_matrix(im, width, height, all_prompts, margin=0):
230
+ prompts = all_prompts[1:]
231
+ boundary = math.ceil(len(prompts) / 2)
232
+
233
+ prompts_horiz = prompts[:boundary]
234
+ prompts_vert = prompts[boundary:]
235
+
236
+ hor_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_horiz)] for pos in range(1 << len(prompts_horiz))]
237
+ ver_texts = [[GridAnnotation(x, is_active=pos & (1 << i) != 0) for i, x in enumerate(prompts_vert)] for pos in range(1 << len(prompts_vert))]
238
+
239
+ return draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin)
240
+
241
+
242
+ def resize_image(resize_mode, im, width, height, upscaler_name=None):
243
+ """
244
+ Resizes an image with the specified resize_mode, width, and height.
245
+
246
+ Args:
247
+ resize_mode: The mode to use when resizing the image.
248
+ 0: Resize the image to the specified width and height.
249
+ 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess.
250
+ 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image.
251
+ im: The image to resize.
252
+ width: The width to resize the image to.
253
+ height: The height to resize the image to.
254
+ upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img.
255
+ """
256
+
257
+ upscaler_name = upscaler_name or opts.upscaler_for_img2img
258
+
259
+ def resize(im, w, h):
260
+ if upscaler_name is None or upscaler_name == "None" or im.mode == 'L':
261
+ return im.resize((w, h), resample=LANCZOS)
262
+
263
+ scale = max(w / im.width, h / im.height)
264
+
265
+ if scale > 1.0:
266
+ upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name]
267
+ if len(upscalers) == 0:
268
+ upscaler = shared.sd_upscalers[0]
269
+ print(f"could not find upscaler named {upscaler_name or '<empty string>'}, using {upscaler.name} as a fallback")
270
+ else:
271
+ upscaler = upscalers[0]
272
+
273
+ im = upscaler.scaler.upscale(im, scale, upscaler.data_path)
274
+
275
+ if im.width != w or im.height != h:
276
+ im = im.resize((w, h), resample=LANCZOS)
277
+
278
+ return im
279
+
280
+ if resize_mode == 0:
281
+ res = resize(im, width, height)
282
+
283
+ elif resize_mode == 1:
284
+ ratio = width / height
285
+ src_ratio = im.width / im.height
286
+
287
+ src_w = width if ratio > src_ratio else im.width * height // im.height
288
+ src_h = height if ratio <= src_ratio else im.height * width // im.width
289
+
290
+ resized = resize(im, src_w, src_h)
291
+ res = Image.new("RGB", (width, height))
292
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
293
+
294
+ else:
295
+ ratio = width / height
296
+ src_ratio = im.width / im.height
297
+
298
+ src_w = width if ratio < src_ratio else im.width * height // im.height
299
+ src_h = height if ratio >= src_ratio else im.height * width // im.width
300
+
301
+ resized = resize(im, src_w, src_h)
302
+ res = Image.new("RGB", (width, height))
303
+ res.paste(resized, box=(width // 2 - src_w // 2, height // 2 - src_h // 2))
304
+
305
+ if ratio < src_ratio:
306
+ fill_height = height // 2 - src_h // 2
307
+ if fill_height > 0:
308
+ res.paste(resized.resize((width, fill_height), box=(0, 0, width, 0)), box=(0, 0))
309
+ res.paste(resized.resize((width, fill_height), box=(0, resized.height, width, resized.height)), box=(0, fill_height + src_h))
310
+ elif ratio > src_ratio:
311
+ fill_width = width // 2 - src_w // 2
312
+ if fill_width > 0:
313
+ res.paste(resized.resize((fill_width, height), box=(0, 0, 0, height)), box=(0, 0))
314
+ res.paste(resized.resize((fill_width, height), box=(resized.width, 0, resized.width, height)), box=(fill_width + src_w, 0))
315
+
316
+ return res
317
+
318
+
319
+ invalid_filename_chars = '<>:"/\\|?*\n\r\t'
320
+ invalid_filename_prefix = ' '
321
+ invalid_filename_postfix = ' .'
322
+ re_nonletters = re.compile(r'[\s' + string.punctuation + ']+')
323
+ re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)")
324
+ re_pattern_arg = re.compile(r"(.*)<([^>]*)>$")
325
+ max_filename_part_length = 128
326
+ NOTHING_AND_SKIP_PREVIOUS_TEXT = object()
327
+
328
+
329
+ def sanitize_filename_part(text, replace_spaces=True):
330
+ if text is None:
331
+ return None
332
+
333
+ if replace_spaces:
334
+ text = text.replace(' ', '_')
335
+
336
+ text = text.translate({ord(x): '_' for x in invalid_filename_chars})
337
+ text = text.lstrip(invalid_filename_prefix)[:max_filename_part_length]
338
+ text = text.rstrip(invalid_filename_postfix)
339
+ return text
340
+
341
+
342
+ class FilenameGenerator:
343
+ replacements = {
344
+ 'seed': lambda self: self.seed if self.seed is not None else '',
345
+ 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
346
+ 'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1],
347
+ 'steps': lambda self: self.p and self.p.steps,
348
+ 'cfg': lambda self: self.p and self.p.cfg_scale,
349
+ 'width': lambda self: self.image.width,
350
+ 'height': lambda self: self.image.height,
351
+ 'styles': lambda self: self.p and sanitize_filename_part(", ".join([style for style in self.p.styles if not style == "None"]) or "None", replace_spaces=False),
352
+ 'sampler': lambda self: self.p and sanitize_filename_part(self.p.sampler_name, replace_spaces=False),
353
+ 'model_hash': lambda self: getattr(self.p, "sd_model_hash", shared.sd_model.sd_model_hash),
354
+ 'model_name': lambda self: sanitize_filename_part(shared.sd_model.sd_checkpoint_info.name_for_extra, replace_spaces=False),
355
+ 'date': lambda self: datetime.datetime.now().strftime('%Y-%m-%d'),
356
+ 'datetime': lambda self, *args: self.datetime(*args), # accepts formats: [datetime], [datetime<Format>], [datetime<Format><Time Zone>]
357
+ 'job_timestamp': lambda self: getattr(self.p, "job_timestamp", shared.state.job_timestamp),
358
+ 'prompt_hash': lambda self, *args: self.string_hash(self.prompt, *args),
359
+ 'negative_prompt_hash': lambda self, *args: self.string_hash(self.p.negative_prompt, *args),
360
+ 'full_prompt_hash': lambda self, *args: self.string_hash(f"{self.p.prompt} {self.p.negative_prompt}", *args), # a space in between to create a unique string
361
+ 'prompt': lambda self: sanitize_filename_part(self.prompt),
362
+ 'prompt_no_styles': lambda self: self.prompt_no_style(),
363
+ 'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
364
+ 'prompt_words': lambda self: self.prompt_words(),
365
+ 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 or self.zip else self.p.batch_index + 1,
366
+ 'batch_size': lambda self: self.p.batch_size,
367
+ 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if (self.p.n_iter == 1 and self.p.batch_size == 1) or self.zip else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
368
+ 'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt<prompt1|default><prompt2>..]
369
+ 'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
370
+ 'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
371
+ 'user': lambda self: self.p.user,
372
+ 'vae_filename': lambda self: self.get_vae_filename(),
373
+ 'none': lambda self: '', # Overrides the default, so you can get just the sequence number
374
+ 'image_hash': lambda self, *args: self.image_hash(*args) # accepts formats: [image_hash<length>] default full hash
375
+ }
376
+ default_time_format = '%Y%m%d%H%M%S'
377
+
378
+ def __init__(self, p, seed, prompt, image, zip=False):
379
+ self.p = p
380
+ self.seed = seed
381
+ self.prompt = prompt
382
+ self.image = image
383
+ self.zip = zip
384
+
385
+ def get_vae_filename(self):
386
+ """Get the name of the VAE file."""
387
+
388
+ import modules.sd_vae as sd_vae
389
+
390
+ if sd_vae.loaded_vae_file is None:
391
+ return "NoneType"
392
+
393
+ file_name = os.path.basename(sd_vae.loaded_vae_file)
394
+ split_file_name = file_name.split('.')
395
+ if len(split_file_name) > 1 and split_file_name[0] == '':
396
+ return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
397
+ else:
398
+ return split_file_name[0]
399
+
400
+
401
+ def hasprompt(self, *args):
402
+ lower = self.prompt.lower()
403
+ if self.p is None or self.prompt is None:
404
+ return None
405
+ outres = ""
406
+ for arg in args:
407
+ if arg != "":
408
+ division = arg.split("|")
409
+ expected = division[0].lower()
410
+ default = division[1] if len(division) > 1 else ""
411
+ if lower.find(expected) >= 0:
412
+ outres = f'{outres}{expected}'
413
+ else:
414
+ outres = outres if default == "" else f'{outres}{default}'
415
+ return sanitize_filename_part(outres)
416
+
417
+ def prompt_no_style(self):
418
+ if self.p is None or self.prompt is None:
419
+ return None
420
+
421
+ prompt_no_style = self.prompt
422
+ for style in shared.prompt_styles.get_style_prompts(self.p.styles):
423
+ if style:
424
+ for part in style.split("{prompt}"):
425
+ prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
426
+
427
+ prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
428
+
429
+ return sanitize_filename_part(prompt_no_style, replace_spaces=False)
430
+
431
+ def prompt_words(self):
432
+ words = [x for x in re_nonletters.split(self.prompt or "") if x]
433
+ if len(words) == 0:
434
+ words = ["empty"]
435
+ return sanitize_filename_part(" ".join(words[0:opts.directories_max_prompt_words]), replace_spaces=False)
436
+
437
+ def datetime(self, *args):
438
+ time_datetime = datetime.datetime.now()
439
+
440
+ time_format = args[0] if (args and args[0] != "") else self.default_time_format
441
+ try:
442
+ time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
443
+ except pytz.exceptions.UnknownTimeZoneError:
444
+ time_zone = None
445
+
446
+ time_zone_time = time_datetime.astimezone(time_zone)
447
+ try:
448
+ formatted_time = time_zone_time.strftime(time_format)
449
+ except (ValueError, TypeError):
450
+ formatted_time = time_zone_time.strftime(self.default_time_format)
451
+
452
+ return sanitize_filename_part(formatted_time, replace_spaces=False)
453
+
454
+ def image_hash(self, *args):
455
+ length = int(args[0]) if (args and args[0] != "") else None
456
+ return hashlib.sha256(self.image.tobytes()).hexdigest()[0:length]
457
+
458
+ def string_hash(self, text, *args):
459
+ length = int(args[0]) if (args and args[0] != "") else 8
460
+ return hashlib.sha256(text.encode()).hexdigest()[0:length]
461
+
462
+ def apply(self, x):
463
+ res = ''
464
+
465
+ for m in re_pattern.finditer(x):
466
+ text, pattern = m.groups()
467
+
468
+ if pattern is None:
469
+ res += text
470
+ continue
471
+
472
+ pattern_args = []
473
+ while True:
474
+ m = re_pattern_arg.match(pattern)
475
+ if m is None:
476
+ break
477
+
478
+ pattern, arg = m.groups()
479
+ pattern_args.insert(0, arg)
480
+
481
+ fun = self.replacements.get(pattern.lower())
482
+ if fun is not None:
483
+ try:
484
+ replacement = fun(self, *pattern_args)
485
+ except Exception:
486
+ replacement = None
487
+ errors.report(f"Error adding [{pattern}] to filename", exc_info=True)
488
+
489
+ if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT:
490
+ continue
491
+ elif replacement is not None:
492
+ res += text + str(replacement)
493
+ continue
494
+
495
+ res += f'{text}[{pattern}]'
496
+
497
+ return res
498
+
499
+
500
+ def get_next_sequence_number(path, basename):
501
+ """
502
+ Determines and returns the next sequence number to use when saving an image in the specified directory.
503
+
504
+ The sequence starts at 0.
505
+ """
506
+ result = -1
507
+ if basename != '':
508
+ basename = f"{basename}-"
509
+
510
+ prefix_length = len(basename)
511
+ for p in os.listdir(path):
512
+ if p.startswith(basename):
513
+ parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
514
+ try:
515
+ result = max(int(parts[0]), result)
516
+ except ValueError:
517
+ pass
518
+
519
+ return result + 1
520
+
521
+
522
+ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_pnginfo=None, pnginfo_section_name='parameters'):
523
+ """
524
+ Saves image to filename, including geninfo as text information for generation info.
525
+ For PNG images, geninfo is added to existing pnginfo dictionary using the pnginfo_section_name argument as key.
526
+ For JPG images, there's no dictionary and geninfo just replaces the EXIF description.
527
+ """
528
+
529
+ if extension is None:
530
+ extension = os.path.splitext(filename)[1]
531
+
532
+ image_format = Image.registered_extensions()[extension]
533
+
534
+ if extension.lower() == '.png':
535
+ existing_pnginfo = existing_pnginfo or {}
536
+ if opts.enable_pnginfo:
537
+ existing_pnginfo[pnginfo_section_name] = geninfo
538
+
539
+ if opts.enable_pnginfo:
540
+ pnginfo_data = PngImagePlugin.PngInfo()
541
+ for k, v in (existing_pnginfo or {}).items():
542
+ pnginfo_data.add_text(k, str(v))
543
+ else:
544
+ pnginfo_data = None
545
+
546
+ image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
547
+
548
+ elif extension.lower() in (".jpg", ".jpeg", ".webp"):
549
+ if image.mode == 'RGBA':
550
+ image = image.convert("RGB")
551
+ elif image.mode == 'I;16':
552
+ image = image.point(lambda p: p * 0.0038910505836576).convert("RGB" if extension.lower() == ".webp" else "L")
553
+
554
+ image.save(filename, format=image_format, quality=opts.jpeg_quality, lossless=opts.webp_lossless)
555
+
556
+ if opts.enable_pnginfo and geninfo is not None:
557
+ exif_bytes = piexif.dump({
558
+ "Exif": {
559
+ piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode")
560
+ },
561
+ })
562
+
563
+ piexif.insert(exif_bytes, filename)
564
+ else:
565
+ image.save(filename, format=image_format, quality=opts.jpeg_quality)
566
+
567
+
568
+ def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
569
+ """Save an image.
570
+
571
+ Args:
572
+ image (`PIL.Image`):
573
+ The image to be saved.
574
+ path (`str`):
575
+ The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
576
+ basename (`str`):
577
+ The base filename which will be applied to `filename pattern`.
578
+ seed, prompt, short_filename,
579
+ extension (`str`):
580
+ Image file extension, default is `png`.
581
+ pngsectionname (`str`):
582
+ Specify the name of the section which `info` will be saved in.
583
+ info (`str` or `PngImagePlugin.iTXt`):
584
+ PNG info chunks.
585
+ existing_info (`dict`):
586
+ Additional PNG info. `existing_info == {pngsectionname: info, ...}`
587
+ no_prompt:
588
+ TODO I don't know its meaning.
589
+ p (`StableDiffusionProcessing`)
590
+ forced_filename (`str`):
591
+ If specified, `basename` and filename pattern will be ignored.
592
+ save_to_dirs (bool):
593
+ If true, the image will be saved into a subdirectory of `path`.
594
+
595
+ Returns: (fullfn, txt_fullfn)
596
+ fullfn (`str`):
597
+ The full path of the saved imaged.
598
+ txt_fullfn (`str` or None):
599
+ If a text file is saved for this image, this will be its full path. Otherwise None.
600
+ """
601
+ namegen = FilenameGenerator(p, seed, prompt, image)
602
+
603
+ # WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
604
+ if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
605
+ print('Image dimensions too large; saving as PNG')
606
+ extension = ".png"
607
+
608
+ if save_to_dirs is None:
609
+ save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
610
+
611
+ if save_to_dirs:
612
+ dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
613
+ path = os.path.join(path, dirname)
614
+
615
+ os.makedirs(path, exist_ok=True)
616
+
617
+ if forced_filename is None:
618
+ if short_filename or seed is None:
619
+ file_decoration = ""
620
+ elif opts.save_to_dirs:
621
+ file_decoration = opts.samples_filename_pattern or "[seed]"
622
+ else:
623
+ file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
624
+
625
+ file_decoration = namegen.apply(file_decoration) + suffix
626
+
627
+ add_number = opts.save_images_add_number or file_decoration == ''
628
+
629
+ if file_decoration != "" and add_number:
630
+ file_decoration = f"-{file_decoration}"
631
+
632
+ if add_number:
633
+ basecount = get_next_sequence_number(path, basename)
634
+ fullfn = None
635
+ for i in range(500):
636
+ fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
637
+ fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
638
+ if not os.path.exists(fullfn):
639
+ break
640
+ else:
641
+ fullfn = os.path.join(path, f"{file_decoration}.{extension}")
642
+ else:
643
+ fullfn = os.path.join(path, f"{forced_filename}.{extension}")
644
+
645
+ pnginfo = existing_info or {}
646
+ if info is not None:
647
+ pnginfo[pnginfo_section_name] = info
648
+
649
+ params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
650
+ script_callbacks.before_image_saved_callback(params)
651
+
652
+ image = params.image
653
+ fullfn = params.filename
654
+ info = params.pnginfo.get(pnginfo_section_name, None)
655
+
656
+ def _atomically_save_image(image_to_save, filename_without_extension, extension):
657
+ """
658
+ save image with .tmp extension to avoid race condition when another process detects new image in the directory
659
+ """
660
+ temp_file_path = f"{filename_without_extension}.tmp"
661
+
662
+ save_image_with_geninfo(image_to_save, info, temp_file_path, extension, existing_pnginfo=params.pnginfo, pnginfo_section_name=pnginfo_section_name)
663
+
664
+ os.replace(temp_file_path, filename_without_extension + extension)
665
+
666
+ fullfn_without_extension, extension = os.path.splitext(params.filename)
667
+ if hasattr(os, 'statvfs'):
668
+ max_name_len = os.statvfs(path).f_namemax
669
+ fullfn_without_extension = fullfn_without_extension[:max_name_len - max(4, len(extension))]
670
+ params.filename = fullfn_without_extension + extension
671
+ fullfn = params.filename
672
+ _atomically_save_image(image, fullfn_without_extension, extension)
673
+
674
+ image.already_saved_as = fullfn
675
+
676
+ oversize = image.width > opts.target_side_length or image.height > opts.target_side_length
677
+ if opts.export_for_4chan and (oversize or os.stat(fullfn).st_size > opts.img_downscale_threshold * 1024 * 1024):
678
+ ratio = image.width / image.height
679
+ resize_to = None
680
+ if oversize and ratio > 1:
681
+ resize_to = round(opts.target_side_length), round(image.height * opts.target_side_length / image.width)
682
+ elif oversize:
683
+ resize_to = round(image.width * opts.target_side_length / image.height), round(opts.target_side_length)
684
+
685
+ if resize_to is not None:
686
+ try:
687
+ # Resizing image with LANCZOS could throw an exception if e.g. image mode is I;16
688
+ image = image.resize(resize_to, LANCZOS)
689
+ except Exception:
690
+ image = image.resize(resize_to)
691
+ try:
692
+ _atomically_save_image(image, fullfn_without_extension, ".jpg")
693
+ except Exception as e:
694
+ errors.display(e, "saving image as downscaled JPG")
695
+
696
+ if opts.save_txt and info is not None:
697
+ txt_fullfn = f"{fullfn_without_extension}.txt"
698
+ with open(txt_fullfn, "w", encoding="utf8") as file:
699
+ file.write(f"{info}\n")
700
+ else:
701
+ txt_fullfn = None
702
+
703
+ script_callbacks.image_saved_callback(params)
704
+
705
+ return fullfn, txt_fullfn
706
+
707
+
708
+ IGNORED_INFO_KEYS = {
709
+ 'jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
710
+ 'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression',
711
+ 'icc_profile', 'chromaticity', 'photoshop',
712
+ }
713
+
714
+
715
+ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
716
+ items = (image.info or {}).copy()
717
+
718
+ geninfo = items.pop('parameters', None)
719
+
720
+ if "exif" in items:
721
+ exif = piexif.load(items["exif"])
722
+ exif_comment = (exif or {}).get("Exif", {}).get(piexif.ExifIFD.UserComment, b'')
723
+ try:
724
+ exif_comment = piexif.helper.UserComment.load(exif_comment)
725
+ except ValueError:
726
+ exif_comment = exif_comment.decode('utf8', errors="ignore")
727
+
728
+ if exif_comment:
729
+ items['exif comment'] = exif_comment
730
+ geninfo = exif_comment
731
+
732
+ for field in IGNORED_INFO_KEYS:
733
+ items.pop(field, None)
734
+
735
+ if items.get("Software", None) == "NovelAI":
736
+ try:
737
+ json_info = json.loads(items["Comment"])
738
+ sampler = sd_samplers.samplers_map.get(json_info["sampler"], "Euler a")
739
+
740
+ geninfo = f"""{items["Description"]}
741
+ Negative prompt: {json_info["uc"]}
742
+ Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
743
+ except Exception:
744
+ errors.report("Error parsing NovelAI image generation parameters", exc_info=True)
745
+
746
+ return geninfo, items
747
+
748
+
749
+ def image_data(data):
750
+ import gradio as gr
751
+
752
+ try:
753
+ image = Image.open(io.BytesIO(data))
754
+ textinfo, _ = read_info_from_image(image)
755
+ return textinfo, None
756
+ except Exception:
757
+ pass
758
+
759
+ try:
760
+ text = data.decode('utf8')
761
+ assert len(text) < 10000
762
+ return text, None
763
+
764
+ except Exception:
765
+ pass
766
+
767
+ return gr.update(), None
768
+
769
+
770
+ def flatten(img, bgcolor):
771
+ """replaces transparency with bgcolor (example: "#ffffff"), returning an RGB mode image with no transparency"""
772
+
773
+ if img.mode == "RGBA":
774
+ background = Image.new('RGBA', img.size, bgcolor)
775
+ background.paste(img, mask=img)
776
+ img = background
777
+
778
+ return img.convert('RGB')
modules/img2img.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from contextlib import closing
3
+ from pathlib import Path
4
+
5
+ import numpy as np
6
+ from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError
7
+ import gradio as gr
8
+
9
+ from modules import images as imgutil
10
+ from modules.generation_parameters_copypaste import create_override_settings_dict, parse_generation_parameters
11
+ from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
12
+ from modules.shared import opts, state
13
+ import modules.shared as shared
14
+ import modules.processing as processing
15
+ from modules.ui import plaintext_to_html
16
+ import modules.scripts
17
+
18
+
19
+ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
20
+ output_dir = output_dir.strip()
21
+ processing.fix_seed(p)
22
+
23
+ images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
24
+
25
+ is_inpaint_batch = False
26
+ if inpaint_mask_dir:
27
+ inpaint_masks = shared.listfiles(inpaint_mask_dir)
28
+ is_inpaint_batch = bool(inpaint_masks)
29
+
30
+ if is_inpaint_batch:
31
+ print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.")
32
+
33
+ print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
34
+
35
+ state.job_count = len(images) * p.n_iter
36
+
37
+ # extract "default" params to use in case getting png info fails
38
+ prompt = p.prompt
39
+ negative_prompt = p.negative_prompt
40
+ seed = p.seed
41
+ cfg_scale = p.cfg_scale
42
+ sampler_name = p.sampler_name
43
+ steps = p.steps
44
+
45
+ for i, image in enumerate(images):
46
+ state.job = f"{i+1} out of {len(images)}"
47
+ if state.skipped:
48
+ state.skipped = False
49
+
50
+ if state.interrupted:
51
+ break
52
+
53
+ try:
54
+ img = Image.open(image)
55
+ except UnidentifiedImageError as e:
56
+ print(e)
57
+ continue
58
+ # Use the EXIF orientation of photos taken by smartphones.
59
+ img = ImageOps.exif_transpose(img)
60
+
61
+ if to_scale:
62
+ p.width = int(img.width * scale_by)
63
+ p.height = int(img.height * scale_by)
64
+
65
+ p.init_images = [img] * p.batch_size
66
+
67
+ image_path = Path(image)
68
+ if is_inpaint_batch:
69
+ # try to find corresponding mask for an image using simple filename matching
70
+ if len(inpaint_masks) == 1:
71
+ mask_image_path = inpaint_masks[0]
72
+ else:
73
+ # try to find corresponding mask for an image using simple filename matching
74
+ mask_image_dir = Path(inpaint_mask_dir)
75
+ masks_found = list(mask_image_dir.glob(f"{image_path.stem}.*"))
76
+
77
+ if len(masks_found) == 0:
78
+ print(f"Warning: mask is not found for {image_path} in {mask_image_dir}. Skipping it.")
79
+ continue
80
+
81
+ # it should contain only 1 matching mask
82
+ # otherwise user has many masks with the same name but different extensions
83
+ mask_image_path = masks_found[0]
84
+
85
+ mask_image = Image.open(mask_image_path)
86
+ p.image_mask = mask_image
87
+
88
+ if use_png_info:
89
+ try:
90
+ info_img = img
91
+ if png_info_dir:
92
+ info_img_path = os.path.join(png_info_dir, os.path.basename(image))
93
+ info_img = Image.open(info_img_path)
94
+ geninfo, _ = imgutil.read_info_from_image(info_img)
95
+ parsed_parameters = parse_generation_parameters(geninfo)
96
+ parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}
97
+ except Exception:
98
+ parsed_parameters = {}
99
+
100
+ p.prompt = prompt + (" " + parsed_parameters["Prompt"] if "Prompt" in parsed_parameters else "")
101
+ p.negative_prompt = negative_prompt + (" " + parsed_parameters["Negative prompt"] if "Negative prompt" in parsed_parameters else "")
102
+ p.seed = int(parsed_parameters.get("Seed", seed))
103
+ p.cfg_scale = float(parsed_parameters.get("CFG scale", cfg_scale))
104
+ p.sampler_name = parsed_parameters.get("Sampler", sampler_name)
105
+ p.steps = int(parsed_parameters.get("Steps", steps))
106
+
107
+ proc = modules.scripts.scripts_img2img.run(p, *args)
108
+ if proc is None:
109
+ if output_dir:
110
+ p.outpath_samples = output_dir
111
+ p.override_settings['save_to_dirs'] = False
112
+ if p.n_iter > 1 or p.batch_size > 1:
113
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]'
114
+ else:
115
+ p.override_settings['samples_filename_pattern'] = f'{image_path.stem}'
116
+ process_images(p)
117
+
118
+
119
+ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
120
+ override_settings = create_override_settings_dict(override_settings_texts)
121
+
122
+ is_batch = mode == 5
123
+
124
+ if mode == 0: # img2img
125
+ image = init_img
126
+ mask = None
127
+ elif mode == 1: # img2img sketch
128
+ image = sketch
129
+ mask = None
130
+ elif mode == 2: # inpaint
131
+ image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
132
+ mask = processing.create_binary_mask(mask)
133
+ elif mode == 3: # inpaint sketch
134
+ image = inpaint_color_sketch
135
+ orig = inpaint_color_sketch_orig or inpaint_color_sketch
136
+ pred = np.any(np.array(image) != np.array(orig), axis=-1)
137
+ mask = Image.fromarray(pred.astype(np.uint8) * 255, "L")
138
+ mask = ImageEnhance.Brightness(mask).enhance(1 - mask_alpha / 100)
139
+ blur = ImageFilter.GaussianBlur(mask_blur)
140
+ image = Image.composite(image.filter(blur), orig, mask.filter(blur))
141
+ elif mode == 4: # inpaint upload mask
142
+ image = init_img_inpaint
143
+ mask = init_mask_inpaint
144
+ else:
145
+ image = None
146
+ mask = None
147
+
148
+ # Use the EXIF orientation of photos taken by smartphones.
149
+ if image is not None:
150
+ image = ImageOps.exif_transpose(image)
151
+
152
+ if selected_scale_tab == 1 and not is_batch:
153
+ assert image, "Can't scale by because no image is selected"
154
+
155
+ width = int(image.width * scale_by)
156
+ height = int(image.height * scale_by)
157
+
158
+ assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
159
+
160
+ p = StableDiffusionProcessingImg2Img(
161
+ sd_model=shared.sd_model,
162
+ outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
163
+ outpath_grids=opts.outdir_grids or opts.outdir_img2img_grids,
164
+ prompt=prompt,
165
+ negative_prompt=negative_prompt,
166
+ styles=prompt_styles,
167
+ sampler_name=sampler_name,
168
+ batch_size=batch_size,
169
+ n_iter=n_iter,
170
+ steps=steps,
171
+ cfg_scale=cfg_scale,
172
+ width=width,
173
+ height=height,
174
+ init_images=[image],
175
+ mask=mask,
176
+ mask_blur=mask_blur,
177
+ inpainting_fill=inpainting_fill,
178
+ resize_mode=resize_mode,
179
+ denoising_strength=denoising_strength,
180
+ image_cfg_scale=image_cfg_scale,
181
+ inpaint_full_res=inpaint_full_res,
182
+ inpaint_full_res_padding=inpaint_full_res_padding,
183
+ inpainting_mask_invert=inpainting_mask_invert,
184
+ override_settings=override_settings,
185
+ )
186
+
187
+ p.scripts = modules.scripts.scripts_img2img
188
+ p.script_args = args
189
+
190
+ p.user = request.username
191
+
192
+ if shared.cmd_opts.enable_console_prompts:
193
+ print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
194
+
195
+ if mask:
196
+ p.extra_generation_params["Mask blur"] = mask_blur
197
+
198
+ with closing(p):
199
+ if is_batch:
200
+ assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
201
+
202
+ process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
203
+
204
+ processed = Processed(p, [], p.seed, "")
205
+ else:
206
+ processed = modules.scripts.scripts_img2img.run(p, *args)
207
+ if processed is None:
208
+ processed = process_images(p)
209
+
210
+ shared.total_tqdm.clear()
211
+
212
+ generation_info_js = processed.js()
213
+ if opts.samples_log_stdout:
214
+ print(generation_info_js)
215
+
216
+ if opts.do_not_show_images:
217
+ processed.images = []
218
+
219
+ return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
modules/import_hook.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import sys
2
+
3
+ # this will break any attempt to import xformers which will prevent stability diffusion repo from trying to use it
4
+ if "--xformers" not in "".join(sys.argv):
5
+ sys.modules["xformers"] = None
modules/initialize.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import logging
3
+ import sys
4
+ import warnings
5
+ from threading import Thread
6
+
7
+ from modules.timer import startup_timer
8
+
9
+
10
+ def imports():
11
+ logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
12
+ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
13
+
14
+ import torch # noqa: F401
15
+ startup_timer.record("import torch")
16
+ import pytorch_lightning # noqa: F401
17
+ startup_timer.record("import torch")
18
+ warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
19
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
20
+
21
+ import gradio # noqa: F401
22
+ startup_timer.record("import gradio")
23
+
24
+ from modules import paths, timer, import_hook, errors # noqa: F401
25
+ startup_timer.record("setup paths")
26
+
27
+ import ldm.modules.encoders.modules # noqa: F401
28
+ startup_timer.record("import ldm")
29
+
30
+ import sgm.modules.encoders.modules # noqa: F401
31
+ startup_timer.record("import sgm")
32
+
33
+ from modules import shared_init
34
+ shared_init.initialize()
35
+ startup_timer.record("initialize shared")
36
+
37
+ from modules import processing, gradio_extensons, ui # noqa: F401
38
+ startup_timer.record("other imports")
39
+
40
+
41
+ def check_versions():
42
+ from modules.shared_cmd_options import cmd_opts
43
+
44
+ if not cmd_opts.skip_version_check:
45
+ from modules import errors
46
+ errors.check_versions()
47
+
48
+
49
+ def initialize():
50
+ from modules import initialize_util
51
+ initialize_util.fix_torch_version()
52
+ initialize_util.fix_asyncio_event_loop_policy()
53
+ initialize_util.validate_tls_options()
54
+ initialize_util.configure_sigint_handler()
55
+ initialize_util.configure_opts_onchange()
56
+
57
+ from modules import modelloader
58
+ modelloader.cleanup_models()
59
+
60
+ from modules import sd_models
61
+ sd_models.setup_model()
62
+ startup_timer.record("setup SD model")
63
+
64
+ from modules.shared_cmd_options import cmd_opts
65
+
66
+ from modules import codeformer_model
67
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision.transforms.functional_tensor")
68
+ codeformer_model.setup_model(cmd_opts.codeformer_models_path)
69
+ startup_timer.record("setup codeformer")
70
+
71
+ from modules import gfpgan_model
72
+ gfpgan_model.setup_model(cmd_opts.gfpgan_models_path)
73
+ startup_timer.record("setup gfpgan")
74
+
75
+ initialize_rest(reload_script_modules=False)
76
+
77
+
78
+ def initialize_rest(*, reload_script_modules=False):
79
+ """
80
+ Called both from initialize() and when reloading the webui.
81
+ """
82
+ from modules.shared_cmd_options import cmd_opts
83
+
84
+ from modules import sd_samplers
85
+ sd_samplers.set_samplers()
86
+ startup_timer.record("set samplers")
87
+
88
+ from modules import extensions
89
+ extensions.list_extensions()
90
+ startup_timer.record("list extensions")
91
+
92
+ from modules import initialize_util
93
+ initialize_util.restore_config_state_file()
94
+ startup_timer.record("restore config state file")
95
+
96
+ from modules import shared, upscaler, scripts
97
+ if cmd_opts.ui_debug_mode:
98
+ shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
99
+ scripts.load_scripts()
100
+ return
101
+
102
+ from modules import sd_models
103
+ sd_models.list_models()
104
+ startup_timer.record("list SD models")
105
+
106
+ from modules import localization
107
+ localization.list_localizations(cmd_opts.localizations_dir)
108
+ startup_timer.record("list localizations")
109
+
110
+ with startup_timer.subcategory("load scripts"):
111
+ scripts.load_scripts()
112
+
113
+ if reload_script_modules:
114
+ for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
115
+ importlib.reload(module)
116
+ startup_timer.record("reload script modules")
117
+
118
+ from modules import modelloader
119
+ modelloader.load_upscalers()
120
+ startup_timer.record("load upscalers")
121
+
122
+ from modules import sd_vae
123
+ sd_vae.refresh_vae_list()
124
+ startup_timer.record("refresh VAE")
125
+
126
+ from modules import textual_inversion
127
+ textual_inversion.textual_inversion.list_textual_inversion_templates()
128
+ startup_timer.record("refresh textual inversion templates")
129
+
130
+ from modules import script_callbacks, sd_hijack_optimizations, sd_hijack
131
+ script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers)
132
+ sd_hijack.list_optimizers()
133
+ startup_timer.record("scripts list_optimizers")
134
+
135
+ from modules import sd_unet
136
+ sd_unet.list_unets()
137
+ startup_timer.record("scripts list_unets")
138
+
139
+ def load_model():
140
+ """
141
+ Accesses shared.sd_model property to load model.
142
+ After it's available, if it has been loaded before this access by some extension,
143
+ its optimization may be None because the list of optimizaers has neet been filled
144
+ by that time, so we apply optimization again.
145
+ """
146
+
147
+ shared.sd_model # noqa: B018
148
+
149
+ if sd_hijack.current_optimizer is None:
150
+ sd_hijack.apply_optimizations()
151
+
152
+ from modules import devices
153
+ devices.first_time_calculation()
154
+
155
+ Thread(target=load_model).start()
156
+
157
+ from modules import shared_items
158
+ shared_items.reload_hypernetworks()
159
+ startup_timer.record("reload hypernetworks")
160
+
161
+ from modules import ui_extra_networks
162
+ ui_extra_networks.initialize()
163
+ ui_extra_networks.register_default_pages()
164
+
165
+ from modules import extra_networks
166
+ extra_networks.initialize()
167
+ extra_networks.register_default_extra_networks()
168
+ startup_timer.record("initialize extra networks")
modules/initialize_util.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import signal
4
+ import sys
5
+ import re
6
+
7
+ from modules.timer import startup_timer
8
+
9
+
10
+ def gradio_server_name():
11
+ from modules.shared_cmd_options import cmd_opts
12
+
13
+ if cmd_opts.server_name:
14
+ return cmd_opts.server_name
15
+ else:
16
+ return "0.0.0.0" if cmd_opts.listen else None
17
+
18
+
19
+ def fix_torch_version():
20
+ import torch
21
+
22
+ # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
23
+ if ".dev" in torch.__version__ or "+git" in torch.__version__:
24
+ torch.__long_version__ = torch.__version__
25
+ torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
26
+
27
+
28
+ def fix_asyncio_event_loop_policy():
29
+ """
30
+ The default `asyncio` event loop policy only automatically creates
31
+ event loops in the main threads. Other threads must create event
32
+ loops explicitly or `asyncio.get_event_loop` (and therefore
33
+ `.IOLoop.current`) will fail. Installing this policy allows event
34
+ loops to be created automatically on any thread, matching the
35
+ behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
36
+ """
37
+
38
+ import asyncio
39
+
40
+ if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
41
+ # "Any thread" and "selector" should be orthogonal, but there's not a clean
42
+ # interface for composing policies so pick the right base.
43
+ _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
44
+ else:
45
+ _BasePolicy = asyncio.DefaultEventLoopPolicy
46
+
47
+ class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
48
+ """Event loop policy that allows loop creation on any thread.
49
+ Usage::
50
+
51
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
52
+ """
53
+
54
+ def get_event_loop(self) -> asyncio.AbstractEventLoop:
55
+ try:
56
+ return super().get_event_loop()
57
+ except (RuntimeError, AssertionError):
58
+ # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
59
+ # and changed to a RuntimeError in 3.4.3.
60
+ # "There is no current event loop in thread %r"
61
+ loop = self.new_event_loop()
62
+ self.set_event_loop(loop)
63
+ return loop
64
+
65
+ asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
66
+
67
+
68
+ def restore_config_state_file():
69
+ from modules import shared, config_states
70
+
71
+ config_state_file = shared.opts.restore_config_state_file
72
+ if config_state_file == "":
73
+ return
74
+
75
+ shared.opts.restore_config_state_file = ""
76
+ shared.opts.save(shared.config_filename)
77
+
78
+ if os.path.isfile(config_state_file):
79
+ print(f"*** About to restore extension state from file: {config_state_file}")
80
+ with open(config_state_file, "r", encoding="utf-8") as f:
81
+ config_state = json.load(f)
82
+ config_states.restore_extension_config(config_state)
83
+ startup_timer.record("restore extension config")
84
+ elif config_state_file:
85
+ print(f"!!! Config state backup not found: {config_state_file}")
86
+
87
+
88
+ def validate_tls_options():
89
+ from modules.shared_cmd_options import cmd_opts
90
+
91
+ if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
92
+ return
93
+
94
+ try:
95
+ if not os.path.exists(cmd_opts.tls_keyfile):
96
+ print("Invalid path to TLS keyfile given")
97
+ if not os.path.exists(cmd_opts.tls_certfile):
98
+ print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
99
+ except TypeError:
100
+ cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
101
+ print("TLS setup invalid, running webui without TLS")
102
+ else:
103
+ print("Running with TLS")
104
+ startup_timer.record("TLS")
105
+
106
+
107
+ def get_gradio_auth_creds():
108
+ """
109
+ Convert the gradio_auth and gradio_auth_path commandline arguments into
110
+ an iterable of (username, password) tuples.
111
+ """
112
+ from modules.shared_cmd_options import cmd_opts
113
+
114
+ def process_credential_line(s):
115
+ s = s.strip()
116
+ if not s:
117
+ return None
118
+ return tuple(s.split(':', 1))
119
+
120
+ if cmd_opts.gradio_auth:
121
+ for cred in cmd_opts.gradio_auth.split(','):
122
+ cred = process_credential_line(cred)
123
+ if cred:
124
+ yield cred
125
+
126
+ if cmd_opts.gradio_auth_path:
127
+ with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
128
+ for line in file.readlines():
129
+ for cred in line.strip().split(','):
130
+ cred = process_credential_line(cred)
131
+ if cred:
132
+ yield cred
133
+
134
+
135
+ def dumpstacks():
136
+ import threading
137
+ import traceback
138
+
139
+ id2name = {th.ident: th.name for th in threading.enumerate()}
140
+ code = []
141
+ for threadId, stack in sys._current_frames().items():
142
+ code.append(f"\n# Thread: {id2name.get(threadId, '')}({threadId})")
143
+ for filename, lineno, name, line in traceback.extract_stack(stack):
144
+ code.append(f"""File: "{filename}", line {lineno}, in {name}""")
145
+ if line:
146
+ code.append(" " + line.strip())
147
+
148
+ print("\n".join(code))
149
+
150
+
151
+ def configure_sigint_handler():
152
+ # make the program just exit at ctrl+c without waiting for anything
153
+ def sigint_handler(sig, frame):
154
+ print(f'Interrupted with signal {sig} in {frame}')
155
+
156
+ dumpstacks()
157
+
158
+ os._exit(0)
159
+
160
+ if not os.environ.get("COVERAGE_RUN"):
161
+ # Don't install the immediate-quit handler when running under coverage,
162
+ # as then the coverage report won't be generated.
163
+ signal.signal(signal.SIGINT, sigint_handler)
164
+
165
+
166
+ def configure_opts_onchange():
167
+ from modules import shared, sd_models, sd_vae, ui_tempdir, sd_hijack
168
+ from modules.call_queue import wrap_queued_call
169
+
170
+ shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
171
+ shared.opts.onchange("sd_vae", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
172
+ shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
173
+ shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
174
+ shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
175
+ shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
176
+ startup_timer.record("opts onchange")
177
+
178
+
179
+ def setup_middleware(app):
180
+ from starlette.middleware.gzip import GZipMiddleware
181
+
182
+ app.middleware_stack = None # reset current middleware to allow modifying user provided list
183
+ app.add_middleware(GZipMiddleware, minimum_size=1000)
184
+ configure_cors_middleware(app)
185
+ app.build_middleware_stack() # rebuild middleware stack on-the-fly
186
+
187
+
188
+ def configure_cors_middleware(app):
189
+ from starlette.middleware.cors import CORSMiddleware
190
+ from modules.shared_cmd_options import cmd_opts
191
+
192
+ cors_options = {
193
+ "allow_methods": ["*"],
194
+ "allow_headers": ["*"],
195
+ "allow_credentials": True,
196
+ }
197
+ if cmd_opts.cors_allow_origins:
198
+ cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
199
+ if cmd_opts.cors_allow_origins_regex:
200
+ cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
201
+ app.add_middleware(CORSMiddleware, **cors_options)
202
+
modules/interrogate.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from collections import namedtuple
4
+ from pathlib import Path
5
+ import re
6
+
7
+ import torch
8
+ import torch.hub
9
+
10
+ from torchvision import transforms
11
+ from torchvision.transforms.functional import InterpolationMode
12
+
13
+ from modules import devices, paths, shared, lowvram, modelloader, errors
14
+
15
+ blip_image_eval_size = 384
16
+ clip_model_name = 'ViT-L/14'
17
+
18
+ Category = namedtuple("Category", ["name", "topn", "items"])
19
+
20
+ re_topn = re.compile(r"\.top(\d+)\.")
21
+
22
+ def category_types():
23
+ return [f.stem for f in Path(shared.interrogator.content_dir).glob('*.txt')]
24
+
25
+
26
+ def download_default_clip_interrogate_categories(content_dir):
27
+ print("Downloading CLIP categories...")
28
+
29
+ tmpdir = f"{content_dir}_tmp"
30
+ category_types = ["artists", "flavors", "mediums", "movements"]
31
+
32
+ try:
33
+ os.makedirs(tmpdir, exist_ok=True)
34
+ for category_type in category_types:
35
+ torch.hub.download_url_to_file(f"https://raw.githubusercontent.com/pharmapsychotic/clip-interrogator/main/clip_interrogator/data/{category_type}.txt", os.path.join(tmpdir, f"{category_type}.txt"))
36
+ os.rename(tmpdir, content_dir)
37
+
38
+ except Exception as e:
39
+ errors.display(e, "downloading default CLIP interrogate categories")
40
+ finally:
41
+ if os.path.exists(tmpdir):
42
+ os.removedirs(tmpdir)
43
+
44
+
45
+ class InterrogateModels:
46
+ blip_model = None
47
+ clip_model = None
48
+ clip_preprocess = None
49
+ dtype = None
50
+ running_on_cpu = None
51
+
52
+ def __init__(self, content_dir):
53
+ self.loaded_categories = None
54
+ self.skip_categories = []
55
+ self.content_dir = content_dir
56
+ self.running_on_cpu = devices.device_interrogate == torch.device("cpu")
57
+
58
+ def categories(self):
59
+ if not os.path.exists(self.content_dir):
60
+ download_default_clip_interrogate_categories(self.content_dir)
61
+
62
+ if self.loaded_categories is not None and self.skip_categories == shared.opts.interrogate_clip_skip_categories:
63
+ return self.loaded_categories
64
+
65
+ self.loaded_categories = []
66
+
67
+ if os.path.exists(self.content_dir):
68
+ self.skip_categories = shared.opts.interrogate_clip_skip_categories
69
+ category_types = []
70
+ for filename in Path(self.content_dir).glob('*.txt'):
71
+ category_types.append(filename.stem)
72
+ if filename.stem in self.skip_categories:
73
+ continue
74
+ m = re_topn.search(filename.stem)
75
+ topn = 1 if m is None else int(m.group(1))
76
+ with open(filename, "r", encoding="utf8") as file:
77
+ lines = [x.strip() for x in file.readlines()]
78
+
79
+ self.loaded_categories.append(Category(name=filename.stem, topn=topn, items=lines))
80
+
81
+ return self.loaded_categories
82
+
83
+ def create_fake_fairscale(self):
84
+ class FakeFairscale:
85
+ def checkpoint_wrapper(self):
86
+ pass
87
+
88
+ sys.modules["fairscale.nn.checkpoint.checkpoint_activations"] = FakeFairscale
89
+
90
+ def load_blip_model(self):
91
+ self.create_fake_fairscale()
92
+ import models.blip
93
+
94
+ files = modelloader.load_models(
95
+ model_path=os.path.join(paths.models_path, "BLIP"),
96
+ model_url='https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_caption_capfilt_large.pth',
97
+ ext_filter=[".pth"],
98
+ download_name='model_base_caption_capfilt_large.pth',
99
+ )
100
+
101
+ blip_model = models.blip.blip_decoder(pretrained=files[0], image_size=blip_image_eval_size, vit='base', med_config=os.path.join(paths.paths["BLIP"], "configs", "med_config.json"))
102
+ blip_model.eval()
103
+
104
+ return blip_model
105
+
106
+ def load_clip_model(self):
107
+ import clip
108
+
109
+ if self.running_on_cpu:
110
+ model, preprocess = clip.load(clip_model_name, device="cpu", download_root=shared.cmd_opts.clip_models_path)
111
+ else:
112
+ model, preprocess = clip.load(clip_model_name, download_root=shared.cmd_opts.clip_models_path)
113
+
114
+ model.eval()
115
+ model = model.to(devices.device_interrogate)
116
+
117
+ return model, preprocess
118
+
119
+ def load(self):
120
+ if self.blip_model is None:
121
+ self.blip_model = self.load_blip_model()
122
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
123
+ self.blip_model = self.blip_model.half()
124
+
125
+ self.blip_model = self.blip_model.to(devices.device_interrogate)
126
+
127
+ if self.clip_model is None:
128
+ self.clip_model, self.clip_preprocess = self.load_clip_model()
129
+ if not shared.cmd_opts.no_half and not self.running_on_cpu:
130
+ self.clip_model = self.clip_model.half()
131
+
132
+ self.clip_model = self.clip_model.to(devices.device_interrogate)
133
+
134
+ self.dtype = next(self.clip_model.parameters()).dtype
135
+
136
+ def send_clip_to_ram(self):
137
+ if not shared.opts.interrogate_keep_models_in_memory:
138
+ if self.clip_model is not None:
139
+ self.clip_model = self.clip_model.to(devices.cpu)
140
+
141
+ def send_blip_to_ram(self):
142
+ if not shared.opts.interrogate_keep_models_in_memory:
143
+ if self.blip_model is not None:
144
+ self.blip_model = self.blip_model.to(devices.cpu)
145
+
146
+ def unload(self):
147
+ self.send_clip_to_ram()
148
+ self.send_blip_to_ram()
149
+
150
+ devices.torch_gc()
151
+
152
+ def rank(self, image_features, text_array, top_count=1):
153
+ import clip
154
+
155
+ devices.torch_gc()
156
+
157
+ if shared.opts.interrogate_clip_dict_limit != 0:
158
+ text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
159
+
160
+ top_count = min(top_count, len(text_array))
161
+ text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate)
162
+ text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
163
+ text_features /= text_features.norm(dim=-1, keepdim=True)
164
+
165
+ similarity = torch.zeros((1, len(text_array))).to(devices.device_interrogate)
166
+ for i in range(image_features.shape[0]):
167
+ similarity += (100.0 * image_features[i].unsqueeze(0) @ text_features.T).softmax(dim=-1)
168
+ similarity /= image_features.shape[0]
169
+
170
+ top_probs, top_labels = similarity.cpu().topk(top_count, dim=-1)
171
+ return [(text_array[top_labels[0][i].numpy()], (top_probs[0][i].numpy()*100)) for i in range(top_count)]
172
+
173
+ def generate_caption(self, pil_image):
174
+ gpu_image = transforms.Compose([
175
+ transforms.Resize((blip_image_eval_size, blip_image_eval_size), interpolation=InterpolationMode.BICUBIC),
176
+ transforms.ToTensor(),
177
+ transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
178
+ ])(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
179
+
180
+ with torch.no_grad():
181
+ caption = self.blip_model.generate(gpu_image, sample=False, num_beams=shared.opts.interrogate_clip_num_beams, min_length=shared.opts.interrogate_clip_min_length, max_length=shared.opts.interrogate_clip_max_length)
182
+
183
+ return caption[0]
184
+
185
+ def interrogate(self, pil_image):
186
+ res = ""
187
+ shared.state.begin(job="interrogate")
188
+ try:
189
+ lowvram.send_everything_to_cpu()
190
+ devices.torch_gc()
191
+
192
+ self.load()
193
+
194
+ caption = self.generate_caption(pil_image)
195
+ self.send_blip_to_ram()
196
+ devices.torch_gc()
197
+
198
+ res = caption
199
+
200
+ clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(devices.device_interrogate)
201
+
202
+ with torch.no_grad(), devices.autocast():
203
+ image_features = self.clip_model.encode_image(clip_image).type(self.dtype)
204
+
205
+ image_features /= image_features.norm(dim=-1, keepdim=True)
206
+
207
+ for cat in self.categories():
208
+ matches = self.rank(image_features, cat.items, top_count=cat.topn)
209
+ for match, score in matches:
210
+ if shared.opts.interrogate_return_ranks:
211
+ res += f", ({match}:{score/100:.3f})"
212
+ else:
213
+ res += f", {match}"
214
+
215
+ except Exception:
216
+ errors.report("Error interrogating", exc_info=True)
217
+ res += "<error>"
218
+
219
+ self.unload()
220
+ shared.state.end()
221
+
222
+ return res
modules/launch_utils.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # this scripts installs necessary requirements and launches main program in webui.py
2
+ import logging
3
+ import re
4
+ import subprocess
5
+ import os
6
+ import shutil
7
+ import sys
8
+ import importlib.util
9
+ import platform
10
+ import json
11
+ from functools import lru_cache
12
+
13
+ from modules import cmd_args, errors
14
+ from modules.paths_internal import script_path, extensions_dir
15
+ from modules.timer import startup_timer
16
+ from modules import logging_config
17
+
18
+ args, _ = cmd_args.parser.parse_known_args()
19
+ logging_config.setup_logging(args.loglevel)
20
+
21
+ python = sys.executable
22
+ git = os.environ.get('GIT', "git")
23
+ index_url = os.environ.get('INDEX_URL', "")
24
+ dir_repos = "repositories"
25
+
26
+ # Whether to default to printing command output
27
+ default_command_live = (os.environ.get('WEBUI_LAUNCH_LIVE_OUTPUT') == "1")
28
+
29
+ if 'GRADIO_ANALYTICS_ENABLED' not in os.environ:
30
+ os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
31
+
32
+
33
+ def check_python_version():
34
+ is_windows = platform.system() == "Windows"
35
+ major = sys.version_info.major
36
+ minor = sys.version_info.minor
37
+ micro = sys.version_info.micro
38
+
39
+ if is_windows:
40
+ supported_minors = [10]
41
+ else:
42
+ supported_minors = [7, 8, 9, 10, 11]
43
+
44
+ if not (major == 3 and minor in supported_minors):
45
+ import modules.errors
46
+
47
+ modules.errors.print_error_explanation(f"""
48
+ INCOMPATIBLE PYTHON VERSION
49
+
50
+ This program is tested with 3.10.6 Python, but you have {major}.{minor}.{micro}.
51
+ If you encounter an error with "RuntimeError: Couldn't install torch." message,
52
+ or any other error regarding unsuccessful package (library) installation,
53
+ please downgrade (or upgrade) to the latest version of 3.10 Python
54
+ and delete current Python and "venv" folder in WebUI's directory.
55
+
56
+ You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/
57
+
58
+ {"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""}
59
+
60
+ Use --skip-python-version-check to suppress this warning.
61
+ """)
62
+
63
+
64
+ @lru_cache()
65
+ def commit_hash():
66
+ try:
67
+ return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
68
+ except Exception:
69
+ return "<none>"
70
+
71
+
72
+ @lru_cache()
73
+ def git_tag():
74
+ try:
75
+ return subprocess.check_output([git, "describe", "--tags"], shell=False, encoding='utf8').strip()
76
+ except Exception:
77
+ try:
78
+
79
+ changelog_md = os.path.join(os.path.dirname(os.path.dirname(__file__)), "CHANGELOG.md")
80
+ with open(changelog_md, "r", encoding="utf-8") as file:
81
+ line = next((line.strip() for line in file if line.strip()), "<none>")
82
+ line = line.replace("## ", "")
83
+ return line
84
+ except Exception:
85
+ return "<none>"
86
+
87
+
88
+ def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str:
89
+ if desc is not None:
90
+ print(desc)
91
+
92
+ run_kwargs = {
93
+ "args": command,
94
+ "shell": True,
95
+ "env": os.environ if custom_env is None else custom_env,
96
+ "encoding": 'utf8',
97
+ "errors": 'ignore',
98
+ }
99
+
100
+ if not live:
101
+ run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE
102
+
103
+ result = subprocess.run(**run_kwargs)
104
+
105
+ if result.returncode != 0:
106
+ error_bits = [
107
+ f"{errdesc or 'Error running command'}.",
108
+ f"Command: {command}",
109
+ f"Error code: {result.returncode}",
110
+ ]
111
+ if result.stdout:
112
+ error_bits.append(f"stdout: {result.stdout}")
113
+ if result.stderr:
114
+ error_bits.append(f"stderr: {result.stderr}")
115
+ raise RuntimeError("\n".join(error_bits))
116
+
117
+ return (result.stdout or "")
118
+
119
+
120
+ def is_installed(package):
121
+ try:
122
+ spec = importlib.util.find_spec(package)
123
+ except ModuleNotFoundError:
124
+ return False
125
+
126
+ return spec is not None
127
+
128
+
129
+ def repo_dir(name):
130
+ return os.path.join(script_path, dir_repos, name)
131
+
132
+
133
+ def run_pip(command, desc=None, live=default_command_live):
134
+ if args.skip_install:
135
+ return
136
+
137
+ index_url_line = f' --index-url {index_url}' if index_url != '' else ''
138
+ return run(f'"{python}" -m pip {command} --prefer-binary{index_url_line}', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}", live=live)
139
+
140
+
141
+ def check_run_python(code: str) -> bool:
142
+ result = subprocess.run([python, "-c", code], capture_output=True, shell=False)
143
+ return result.returncode == 0
144
+
145
+
146
+ def git_fix_workspace(dir, name):
147
+ run(f'"{git}" -C "{dir}" fetch --refetch --no-auto-gc', f"Fetching all contents for {name}", f"Couldn't fetch {name}", live=True)
148
+ run(f'"{git}" -C "{dir}" gc --aggressive --prune=now', f"Pruning {name}", f"Couldn't prune {name}", live=True)
149
+ return
150
+
151
+
152
+ def run_git(dir, name, command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live, autofix=True):
153
+ try:
154
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
155
+ except RuntimeError:
156
+ if not autofix:
157
+ raise
158
+
159
+ print(f"{errdesc}, attempting autofix...")
160
+ git_fix_workspace(dir, name)
161
+
162
+ return run(f'"{git}" -C "{dir}" {command}', desc=desc, errdesc=errdesc, custom_env=custom_env, live=live)
163
+
164
+
165
+ def git_clone(url, dir, name, commithash=None):
166
+ # TODO clone into temporary dir and move if successful
167
+
168
+ if os.path.exists(dir):
169
+ if commithash is None:
170
+ return
171
+
172
+ current_hash = run_git(dir, name, 'rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}", live=False).strip()
173
+ if current_hash == commithash:
174
+ return
175
+
176
+ if run_git(dir, name, 'config --get remote.origin.url', None, f"Couldn't determine {name}'s origin URL", live=False).strip() != url:
177
+ run_git(dir, name, f'remote set-url origin "{url}"', None, f"Failed to set {name}'s origin URL", live=False)
178
+
179
+ run_git(dir, name, 'fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}", autofix=False)
180
+
181
+ run_git(dir, name, f'checkout {commithash}', f"Checking out commit for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}", live=True)
182
+
183
+ return
184
+
185
+ try:
186
+ run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True)
187
+ except RuntimeError:
188
+ shutil.rmtree(dir, ignore_errors=True)
189
+ raise
190
+
191
+ if commithash is not None:
192
+ run(f'"{git}" -C "{dir}" checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
193
+
194
+
195
+ def git_pull_recursive(dir):
196
+ for subdir, _, _ in os.walk(dir):
197
+ if os.path.exists(os.path.join(subdir, '.git')):
198
+ try:
199
+ output = subprocess.check_output([git, '-C', subdir, 'pull', '--autostash'])
200
+ print(f"Pulled changes for repository in '{subdir}':\n{output.decode('utf-8').strip()}\n")
201
+ except subprocess.CalledProcessError as e:
202
+ print(f"Couldn't perform 'git pull' on repository in '{subdir}':\n{e.output.decode('utf-8').strip()}\n")
203
+
204
+
205
+ def version_check(commit):
206
+ try:
207
+ import requests
208
+ commits = requests.get('https://api.github.com/repos/AUTOMATIC1111/stable-diffusion-webui/branches/master').json()
209
+ if commit != "<none>" and commits['commit']['sha'] != commit:
210
+ print("--------------------------------------------------------")
211
+ print("| You are not up to date with the most recent release. |")
212
+ print("| Consider running `git pull` to update. |")
213
+ print("--------------------------------------------------------")
214
+ elif commits['commit']['sha'] == commit:
215
+ print("You are up to date with the most recent release.")
216
+ else:
217
+ print("Not a git clone, can't perform version check.")
218
+ except Exception as e:
219
+ print("version check failed", e)
220
+
221
+
222
+ def run_extension_installer(extension_dir):
223
+ path_installer = os.path.join(extension_dir, "install.py")
224
+ if not os.path.isfile(path_installer):
225
+ return
226
+
227
+ try:
228
+ env = os.environ.copy()
229
+ env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"
230
+
231
+ stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip()
232
+ if stdout:
233
+ print(stdout)
234
+ except Exception as e:
235
+ errors.report(str(e))
236
+
237
+
238
+ def list_extensions(settings_file):
239
+ settings = {}
240
+
241
+ try:
242
+ if os.path.isfile(settings_file):
243
+ with open(settings_file, "r", encoding="utf8") as file:
244
+ settings = json.load(file)
245
+ except Exception:
246
+ errors.report("Could not load settings", exc_info=True)
247
+
248
+ disabled_extensions = set(settings.get('disabled_extensions', []))
249
+ disable_all_extensions = settings.get('disable_all_extensions', 'none')
250
+
251
+ if disable_all_extensions != 'none' or args.disable_extra_extensions or args.disable_all_extensions or not os.path.isdir(extensions_dir):
252
+ return []
253
+
254
+ return [x for x in os.listdir(extensions_dir) if x not in disabled_extensions]
255
+
256
+
257
+ def run_extensions_installers(settings_file):
258
+ if not os.path.isdir(extensions_dir):
259
+ return
260
+
261
+ with startup_timer.subcategory("run extensions installers"):
262
+ for dirname_extension in list_extensions(settings_file):
263
+ logging.debug(f"Installing {dirname_extension}")
264
+
265
+ path = os.path.join(extensions_dir, dirname_extension)
266
+
267
+ if os.path.isdir(path):
268
+ run_extension_installer(path)
269
+ startup_timer.record(dirname_extension)
270
+
271
+
272
+ re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
273
+
274
+
275
+ def requirements_met(requirements_file):
276
+ """
277
+ Does a simple parse of a requirements.txt file to determine if all rerqirements in it
278
+ are already installed. Returns True if so, False if not installed or parsing fails.
279
+ """
280
+
281
+ import importlib.metadata
282
+ import packaging.version
283
+
284
+ with open(requirements_file, "r", encoding="utf8") as file:
285
+ for line in file:
286
+ if line.strip() == "":
287
+ continue
288
+
289
+ m = re.match(re_requirement, line)
290
+ if m is None:
291
+ return False
292
+
293
+ package = m.group(1).strip()
294
+ version_required = (m.group(2) or "").strip()
295
+
296
+ if version_required == "":
297
+ continue
298
+
299
+ try:
300
+ version_installed = importlib.metadata.version(package)
301
+ except Exception:
302
+ return False
303
+
304
+ if packaging.version.parse(version_required) != packaging.version.parse(version_installed):
305
+ return False
306
+
307
+ return True
308
+
309
+
310
+ def prepare_environment():
311
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu118")
312
+ torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
313
+ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
314
+
315
+ xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
316
+ clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
317
+ openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
318
+
319
+ stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
320
+ stable_diffusion_xl_repo = os.environ.get('STABLE_DIFFUSION_XL_REPO', "https://github.com/Stability-AI/generative-models.git")
321
+ k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
322
+ codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
323
+ blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
324
+
325
+ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
326
+ stable_diffusion_xl_commit_hash = os.environ.get('STABLE_DIFFUSION_XL_COMMIT_HASH', "45c443b316737a4ab6e40413d7794a7f5657c19f")
327
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "ab527a9a6d347f364e3d185ba6d714e22d80cb3c")
328
+ codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
329
+ blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
330
+
331
+ try:
332
+ # the existence of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
333
+ os.remove(os.path.join(script_path, "tmp", "restart"))
334
+ os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
335
+ except OSError:
336
+ pass
337
+
338
+ if not args.skip_python_version_check:
339
+ check_python_version()
340
+
341
+ startup_timer.record("checks")
342
+
343
+ commit = commit_hash()
344
+ tag = git_tag()
345
+ startup_timer.record("git version info")
346
+
347
+ print(f"Python {sys.version}")
348
+ print(f"Version: {tag}")
349
+ print(f"Commit hash: {commit}")
350
+
351
+ if args.reinstall_torch or not is_installed("torch") or not is_installed("torchvision"):
352
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
353
+ startup_timer.record("install torch")
354
+
355
+ if not args.skip_torch_cuda_test and not check_run_python("import torch; assert torch.cuda.is_available()"):
356
+ raise RuntimeError(
357
+ 'Torch is not able to use GPU; '
358
+ 'add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'
359
+ )
360
+ startup_timer.record("torch GPU test")
361
+
362
+ if not is_installed("clip"):
363
+ run_pip(f"install {clip_package}", "clip")
364
+ startup_timer.record("install clip")
365
+
366
+ if not is_installed("open_clip"):
367
+ run_pip(f"install {openclip_package}", "open_clip")
368
+ startup_timer.record("install open_clip")
369
+
370
+ if (not is_installed("xformers") or args.reinstall_xformers) and args.xformers:
371
+ run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
372
+ startup_timer.record("install xformers")
373
+
374
+ if not is_installed("ngrok") and args.ngrok:
375
+ run_pip("install ngrok", "ngrok")
376
+ startup_timer.record("install ngrok")
377
+
378
+ os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
379
+
380
+ git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
381
+ git_clone(stable_diffusion_xl_repo, repo_dir('generative-models'), "Stable Diffusion XL", stable_diffusion_xl_commit_hash)
382
+ git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
383
+ git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
384
+ git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
385
+
386
+ startup_timer.record("clone repositores")
387
+
388
+ if not is_installed("lpips"):
389
+ run_pip(f"install -r \"{os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}\"", "requirements for CodeFormer")
390
+ startup_timer.record("install CodeFormer requirements")
391
+
392
+ if not os.path.isfile(requirements_file):
393
+ requirements_file = os.path.join(script_path, requirements_file)
394
+
395
+ if not requirements_met(requirements_file):
396
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
397
+ startup_timer.record("install requirements")
398
+
399
+ if not args.skip_install:
400
+ run_extensions_installers(settings_file=args.ui_settings_file)
401
+
402
+ if args.update_check:
403
+ version_check(commit)
404
+ startup_timer.record("check version")
405
+
406
+ if args.update_all_extensions:
407
+ git_pull_recursive(extensions_dir)
408
+ startup_timer.record("update extensions")
409
+
410
+ if "--exit" in sys.argv:
411
+ print("Exiting because of --exit argument")
412
+ exit(0)
413
+
414
+
415
+
416
+ def configure_for_tests():
417
+ if "--api" not in sys.argv:
418
+ sys.argv.append("--api")
419
+ if "--ckpt" not in sys.argv:
420
+ sys.argv.append("--ckpt")
421
+ sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt"))
422
+ if "--skip-torch-cuda-test" not in sys.argv:
423
+ sys.argv.append("--skip-torch-cuda-test")
424
+ if "--disable-nan-check" not in sys.argv:
425
+ sys.argv.append("--disable-nan-check")
426
+
427
+ os.environ['COMMANDLINE_ARGS'] = ""
428
+
429
+
430
+ def start():
431
+ print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
432
+ import webui
433
+ if '--nowebui' in sys.argv:
434
+ webui.api_only()
435
+ else:
436
+ webui.webui()
437
+
438
+
439
+ def dump_sysinfo():
440
+ from modules import sysinfo
441
+ import datetime
442
+
443
+ text = sysinfo.get()
444
+ filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
445
+
446
+ with open(filename, "w", encoding="utf8") as file:
447
+ file.write(text)
448
+
449
+ return filename
modules/localization.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ from modules import errors, scripts
5
+
6
+ localizations = {}
7
+
8
+
9
+ def list_localizations(dirname):
10
+ localizations.clear()
11
+
12
+ for file in os.listdir(dirname):
13
+ fn, ext = os.path.splitext(file)
14
+ if ext.lower() != ".json":
15
+ continue
16
+
17
+ localizations[fn] = os.path.join(dirname, file)
18
+
19
+ for file in scripts.list_scripts("localizations", ".json"):
20
+ fn, ext = os.path.splitext(file.filename)
21
+ localizations[fn] = file.path
22
+
23
+
24
+ def localization_js(current_localization_name: str) -> str:
25
+ fn = localizations.get(current_localization_name, None)
26
+ data = {}
27
+ if fn is not None:
28
+ try:
29
+ with open(fn, "r", encoding="utf8") as file:
30
+ data = json.load(file)
31
+ except Exception:
32
+ errors.report(f"Error loading localization from {fn}", exc_info=True)
33
+
34
+ return f"window.localization = {json.dumps(data)}"
modules/logging_config.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+
4
+
5
+ def setup_logging(loglevel):
6
+ if loglevel is None:
7
+ loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL")
8
+
9
+ if loglevel:
10
+ log_level = getattr(logging, loglevel.upper(), None) or logging.INFO
11
+ logging.basicConfig(
12
+ level=log_level,
13
+ format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
14
+ datefmt='%Y-%m-%d %H:%M:%S',
15
+ )
16
+
modules/lowvram.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from modules import devices, shared
3
+
4
+ module_in_gpu = None
5
+ cpu = torch.device("cpu")
6
+
7
+
8
+ def send_everything_to_cpu():
9
+ global module_in_gpu
10
+
11
+ if module_in_gpu is not None:
12
+ module_in_gpu.to(cpu)
13
+
14
+ module_in_gpu = None
15
+
16
+
17
+ def is_needed(sd_model):
18
+ return shared.cmd_opts.lowvram or shared.cmd_opts.medvram or shared.cmd_opts.medvram_sdxl and hasattr(sd_model, 'conditioner')
19
+
20
+
21
+ def apply(sd_model):
22
+ enable = is_needed(sd_model)
23
+ shared.parallel_processing_allowed = not enable
24
+
25
+ if enable:
26
+ setup_for_low_vram(sd_model, not shared.cmd_opts.lowvram)
27
+ else:
28
+ sd_model.lowvram = False
29
+
30
+
31
+ def setup_for_low_vram(sd_model, use_medvram):
32
+ if getattr(sd_model, 'lowvram', False):
33
+ return
34
+
35
+ sd_model.lowvram = True
36
+
37
+ parents = {}
38
+
39
+ def send_me_to_gpu(module, _):
40
+ """send this module to GPU; send whatever tracked module was previous in GPU to CPU;
41
+ we add this as forward_pre_hook to a lot of modules and this way all but one of them will
42
+ be in CPU
43
+ """
44
+ global module_in_gpu
45
+
46
+ module = parents.get(module, module)
47
+
48
+ if module_in_gpu == module:
49
+ return
50
+
51
+ if module_in_gpu is not None:
52
+ module_in_gpu.to(cpu)
53
+
54
+ module.to(devices.device)
55
+ module_in_gpu = module
56
+
57
+ # see below for register_forward_pre_hook;
58
+ # first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
59
+ # useless here, and we just replace those methods
60
+
61
+ first_stage_model = sd_model.first_stage_model
62
+ first_stage_model_encode = sd_model.first_stage_model.encode
63
+ first_stage_model_decode = sd_model.first_stage_model.decode
64
+
65
+ def first_stage_model_encode_wrap(x):
66
+ send_me_to_gpu(first_stage_model, None)
67
+ return first_stage_model_encode(x)
68
+
69
+ def first_stage_model_decode_wrap(z):
70
+ send_me_to_gpu(first_stage_model, None)
71
+ return first_stage_model_decode(z)
72
+
73
+ to_remain_in_cpu = [
74
+ (sd_model, 'first_stage_model'),
75
+ (sd_model, 'depth_model'),
76
+ (sd_model, 'embedder'),
77
+ (sd_model, 'model'),
78
+ (sd_model, 'embedder'),
79
+ ]
80
+
81
+ is_sdxl = hasattr(sd_model, 'conditioner')
82
+ is_sd2 = not is_sdxl and hasattr(sd_model.cond_stage_model, 'model')
83
+
84
+ if is_sdxl:
85
+ to_remain_in_cpu.append((sd_model, 'conditioner'))
86
+ elif is_sd2:
87
+ to_remain_in_cpu.append((sd_model.cond_stage_model, 'model'))
88
+ else:
89
+ to_remain_in_cpu.append((sd_model.cond_stage_model, 'transformer'))
90
+
91
+ # remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model
92
+ stored = []
93
+ for obj, field in to_remain_in_cpu:
94
+ module = getattr(obj, field, None)
95
+ stored.append(module)
96
+ setattr(obj, field, None)
97
+
98
+ # send the model to GPU.
99
+ sd_model.to(devices.device)
100
+
101
+ # put modules back. the modules will be in CPU.
102
+ for (obj, field), module in zip(to_remain_in_cpu, stored):
103
+ setattr(obj, field, module)
104
+
105
+ # register hooks for those the first three models
106
+ if is_sdxl:
107
+ sd_model.conditioner.register_forward_pre_hook(send_me_to_gpu)
108
+ elif is_sd2:
109
+ sd_model.cond_stage_model.model.register_forward_pre_hook(send_me_to_gpu)
110
+ sd_model.cond_stage_model.model.token_embedding.register_forward_pre_hook(send_me_to_gpu)
111
+ parents[sd_model.cond_stage_model.model] = sd_model.cond_stage_model
112
+ parents[sd_model.cond_stage_model.model.token_embedding] = sd_model.cond_stage_model
113
+ else:
114
+ sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
115
+ parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
116
+
117
+ sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
118
+ sd_model.first_stage_model.encode = first_stage_model_encode_wrap
119
+ sd_model.first_stage_model.decode = first_stage_model_decode_wrap
120
+ if sd_model.depth_model:
121
+ sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
122
+ if sd_model.embedder:
123
+ sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
124
+
125
+ if use_medvram:
126
+ sd_model.model.register_forward_pre_hook(send_me_to_gpu)
127
+ else:
128
+ diff_model = sd_model.model.diffusion_model
129
+
130
+ # the third remaining model is still too big for 4 GB, so we also do the same for its submodules
131
+ # so that only one of them is in GPU at a time
132
+ stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed
133
+ diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None
134
+ sd_model.model.to(devices.device)
135
+ diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored
136
+
137
+ # install hooks for bits of third model
138
+ diff_model.time_embed.register_forward_pre_hook(send_me_to_gpu)
139
+ for block in diff_model.input_blocks:
140
+ block.register_forward_pre_hook(send_me_to_gpu)
141
+ diff_model.middle_block.register_forward_pre_hook(send_me_to_gpu)
142
+ for block in diff_model.output_blocks:
143
+ block.register_forward_pre_hook(send_me_to_gpu)
144
+
145
+
146
+ def is_enabled(sd_model):
147
+ return sd_model.lowvram
modules/mac_specific.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import torch
4
+ import platform
5
+ from modules.sd_hijack_utils import CondFunc
6
+ from packaging import version
7
+ from modules import shared
8
+
9
+ log = logging.getLogger(__name__)
10
+
11
+
12
+ # before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+,
13
+ # use check `getattr` and try it for compatibility.
14
+ # in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty,
15
+ # since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279
16
+ def check_for_mps() -> bool:
17
+ if version.parse(torch.__version__) <= version.parse("2.0.1"):
18
+ if not getattr(torch, 'has_mps', False):
19
+ return False
20
+ try:
21
+ torch.zeros(1).to(torch.device("mps"))
22
+ return True
23
+ except Exception:
24
+ return False
25
+ else:
26
+ return torch.backends.mps.is_available() and torch.backends.mps.is_built()
27
+
28
+
29
+ has_mps = check_for_mps()
30
+
31
+
32
+ def torch_mps_gc() -> None:
33
+ try:
34
+ if shared.state.current_latent is not None:
35
+ log.debug("`current_latent` is set, skipping MPS garbage collection")
36
+ return
37
+ from torch.mps import empty_cache
38
+ empty_cache()
39
+ except Exception:
40
+ log.warning("MPS garbage collection failed", exc_info=True)
41
+
42
+
43
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/89784
44
+ def cumsum_fix(input, cumsum_func, *args, **kwargs):
45
+ if input.device.type == 'mps':
46
+ output_dtype = kwargs.get('dtype', input.dtype)
47
+ if output_dtype == torch.int64:
48
+ return cumsum_func(input.cpu(), *args, **kwargs).to(input.device)
49
+ elif output_dtype == torch.bool or cumsum_needs_int_fix and (output_dtype == torch.int8 or output_dtype == torch.int16):
50
+ return cumsum_func(input.to(torch.int32), *args, **kwargs).to(torch.int64)
51
+ return cumsum_func(input, *args, **kwargs)
52
+
53
+
54
+ if has_mps:
55
+ if platform.mac_ver()[0].startswith("13.2."):
56
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/95188, thanks to danieldk (https://github.com/explosion/curated-transformers/pull/124)
57
+ CondFunc('torch.nn.functional.linear', lambda _, input, weight, bias: (torch.matmul(input, weight.t()) + bias) if bias is not None else torch.matmul(input, weight.t()), lambda _, input, weight, bias: input.numel() > 10485760)
58
+
59
+ if version.parse(torch.__version__) < version.parse("1.13"):
60
+ # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working
61
+
62
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/79383
63
+ CondFunc('torch.Tensor.to', lambda orig_func, self, *args, **kwargs: orig_func(self.contiguous(), *args, **kwargs),
64
+ lambda _, self, *args, **kwargs: self.device.type != 'mps' and (args and isinstance(args[0], torch.device) and args[0].type == 'mps' or isinstance(kwargs.get('device'), torch.device) and kwargs['device'].type == 'mps'))
65
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/80800
66
+ CondFunc('torch.nn.functional.layer_norm', lambda orig_func, *args, **kwargs: orig_func(*([args[0].contiguous()] + list(args[1:])), **kwargs),
67
+ lambda _, *args, **kwargs: args and isinstance(args[0], torch.Tensor) and args[0].device.type == 'mps')
68
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/90532
69
+ CondFunc('torch.Tensor.numpy', lambda orig_func, self, *args, **kwargs: orig_func(self.detach(), *args, **kwargs), lambda _, self, *args, **kwargs: self.requires_grad)
70
+ elif version.parse(torch.__version__) > version.parse("1.13.1"):
71
+ cumsum_needs_int_fix = not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.ShortTensor([1,1]).to(torch.device("mps")).cumsum(0))
72
+ cumsum_fix_func = lambda orig_func, input, *args, **kwargs: cumsum_fix(input, orig_func, *args, **kwargs)
73
+ CondFunc('torch.cumsum', cumsum_fix_func, None)
74
+ CondFunc('torch.Tensor.cumsum', cumsum_fix_func, None)
75
+ CondFunc('torch.narrow', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).clone(), None)
76
+
77
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/96113
78
+ CondFunc('torch.nn.functional.layer_norm', lambda orig_func, x, normalized_shape, weight, bias, eps, **kwargs: orig_func(x.float(), normalized_shape, weight.float() if weight is not None else None, bias.float() if bias is not None else bias, eps).to(x.dtype), lambda _, input, *args, **kwargs: len(args) == 4 and input.device.type == 'mps')
79
+
80
+ # MPS workaround for https://github.com/pytorch/pytorch/issues/92311
81
+ if platform.processor() == 'i386':
82
+ for funcName in ['torch.argmax', 'torch.Tensor.argmax']:
83
+ CondFunc(funcName, lambda _, input, *args, **kwargs: torch.max(input.float() if input.dtype == torch.int64 else input, *args, **kwargs)[1], lambda _, input, *args, **kwargs: input.device.type == 'mps')
modules/masking.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageFilter, ImageOps
2
+
3
+
4
+ def get_crop_region(mask, pad=0):
5
+ """finds a rectangular region that contains all masked ares in an image. Returns (x1, y1, x2, y2) coordinates of the rectangle.
6
+ For example, if a user has painted the top-right part of a 512x512 image", the result may be (256, 0, 512, 256)"""
7
+
8
+ h, w = mask.shape
9
+
10
+ crop_left = 0
11
+ for i in range(w):
12
+ if not (mask[:, i] == 0).all():
13
+ break
14
+ crop_left += 1
15
+
16
+ crop_right = 0
17
+ for i in reversed(range(w)):
18
+ if not (mask[:, i] == 0).all():
19
+ break
20
+ crop_right += 1
21
+
22
+ crop_top = 0
23
+ for i in range(h):
24
+ if not (mask[i] == 0).all():
25
+ break
26
+ crop_top += 1
27
+
28
+ crop_bottom = 0
29
+ for i in reversed(range(h)):
30
+ if not (mask[i] == 0).all():
31
+ break
32
+ crop_bottom += 1
33
+
34
+ return (
35
+ int(max(crop_left-pad, 0)),
36
+ int(max(crop_top-pad, 0)),
37
+ int(min(w - crop_right + pad, w)),
38
+ int(min(h - crop_bottom + pad, h))
39
+ )
40
+
41
+
42
+ def expand_crop_region(crop_region, processing_width, processing_height, image_width, image_height):
43
+ """expands crop region get_crop_region() to match the ratio of the image the region will processed in; returns expanded region
44
+ for example, if user drew mask in a 128x32 region, and the dimensions for processing are 512x512, the region will be expanded to 128x128."""
45
+
46
+ x1, y1, x2, y2 = crop_region
47
+
48
+ ratio_crop_region = (x2 - x1) / (y2 - y1)
49
+ ratio_processing = processing_width / processing_height
50
+
51
+ if ratio_crop_region > ratio_processing:
52
+ desired_height = (x2 - x1) / ratio_processing
53
+ desired_height_diff = int(desired_height - (y2-y1))
54
+ y1 -= desired_height_diff//2
55
+ y2 += desired_height_diff - desired_height_diff//2
56
+ if y2 >= image_height:
57
+ diff = y2 - image_height
58
+ y2 -= diff
59
+ y1 -= diff
60
+ if y1 < 0:
61
+ y2 -= y1
62
+ y1 -= y1
63
+ if y2 >= image_height:
64
+ y2 = image_height
65
+ else:
66
+ desired_width = (y2 - y1) * ratio_processing
67
+ desired_width_diff = int(desired_width - (x2-x1))
68
+ x1 -= desired_width_diff//2
69
+ x2 += desired_width_diff - desired_width_diff//2
70
+ if x2 >= image_width:
71
+ diff = x2 - image_width
72
+ x2 -= diff
73
+ x1 -= diff
74
+ if x1 < 0:
75
+ x2 -= x1
76
+ x1 -= x1
77
+ if x2 >= image_width:
78
+ x2 = image_width
79
+
80
+ return x1, y1, x2, y2
81
+
82
+
83
+ def fill(image, mask):
84
+ """fills masked regions with colors from image using blur. Not extremely effective."""
85
+
86
+ image_mod = Image.new('RGBA', (image.width, image.height))
87
+
88
+ image_masked = Image.new('RGBa', (image.width, image.height))
89
+ image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(mask.convert('L')))
90
+
91
+ image_masked = image_masked.convert('RGBa')
92
+
93
+ for radius, repeats in [(256, 1), (64, 1), (16, 2), (4, 4), (2, 2), (0, 1)]:
94
+ blurred = image_masked.filter(ImageFilter.GaussianBlur(radius)).convert('RGBA')
95
+ for _ in range(repeats):
96
+ image_mod.alpha_composite(blurred)
97
+
98
+ return image_mod.convert("RGB")
99
+
modules/memmon.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading
2
+ import time
3
+ from collections import defaultdict
4
+
5
+ import torch
6
+
7
+
8
+ class MemUsageMonitor(threading.Thread):
9
+ run_flag = None
10
+ device = None
11
+ disabled = False
12
+ opts = None
13
+ data = None
14
+
15
+ def __init__(self, name, device, opts):
16
+ threading.Thread.__init__(self)
17
+ self.name = name
18
+ self.device = device
19
+ self.opts = opts
20
+
21
+ self.daemon = True
22
+ self.run_flag = threading.Event()
23
+ self.data = defaultdict(int)
24
+
25
+ try:
26
+ self.cuda_mem_get_info()
27
+ torch.cuda.memory_stats(self.device)
28
+ except Exception as e: # AMD or whatever
29
+ print(f"Warning: caught exception '{e}', memory monitor disabled")
30
+ self.disabled = True
31
+
32
+ def cuda_mem_get_info(self):
33
+ index = self.device.index if self.device.index is not None else torch.cuda.current_device()
34
+ return torch.cuda.mem_get_info(index)
35
+
36
+ def run(self):
37
+ if self.disabled:
38
+ return
39
+
40
+ while True:
41
+ self.run_flag.wait()
42
+
43
+ torch.cuda.reset_peak_memory_stats()
44
+ self.data.clear()
45
+
46
+ if self.opts.memmon_poll_rate <= 0:
47
+ self.run_flag.clear()
48
+ continue
49
+
50
+ self.data["min_free"] = self.cuda_mem_get_info()[0]
51
+
52
+ while self.run_flag.is_set():
53
+ free, total = self.cuda_mem_get_info()
54
+ self.data["min_free"] = min(self.data["min_free"], free)
55
+
56
+ time.sleep(1 / self.opts.memmon_poll_rate)
57
+
58
+ def dump_debug(self):
59
+ print(self, 'recorded data:')
60
+ for k, v in self.read().items():
61
+ print(k, -(v // -(1024 ** 2)))
62
+
63
+ print(self, 'raw torch memory stats:')
64
+ tm = torch.cuda.memory_stats(self.device)
65
+ for k, v in tm.items():
66
+ if 'bytes' not in k:
67
+ continue
68
+ print('\t' if 'peak' in k else '', k, -(v // -(1024 ** 2)))
69
+
70
+ print(torch.cuda.memory_summary())
71
+
72
+ def monitor(self):
73
+ self.run_flag.set()
74
+
75
+ def read(self):
76
+ if not self.disabled:
77
+ free, total = self.cuda_mem_get_info()
78
+ self.data["free"] = free
79
+ self.data["total"] = total
80
+
81
+ torch_stats = torch.cuda.memory_stats(self.device)
82
+ self.data["active"] = torch_stats["active.all.current"]
83
+ self.data["active_peak"] = torch_stats["active_bytes.all.peak"]
84
+ self.data["reserved"] = torch_stats["reserved_bytes.all.current"]
85
+ self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"]
86
+ self.data["system_peak"] = total - self.data["min_free"]
87
+
88
+ return self.data
89
+
90
+ def stop(self):
91
+ self.run_flag.clear()
92
+ return self.read()
modules/modelloader.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import shutil
5
+ import importlib
6
+ from urllib.parse import urlparse
7
+
8
+ from modules import shared
9
+ from modules.upscaler import Upscaler, UpscalerLanczos, UpscalerNearest, UpscalerNone
10
+ from modules.paths import script_path, models_path
11
+
12
+
13
+ def load_file_from_url(
14
+ url: str,
15
+ *,
16
+ model_dir: str,
17
+ progress: bool = True,
18
+ file_name: str | None = None,
19
+ ) -> str:
20
+ """Download a file from `url` into `model_dir`, using the file present if possible.
21
+
22
+ Returns the path to the downloaded file.
23
+ """
24
+ os.makedirs(model_dir, exist_ok=True)
25
+ if not file_name:
26
+ parts = urlparse(url)
27
+ file_name = os.path.basename(parts.path)
28
+ cached_file = os.path.abspath(os.path.join(model_dir, file_name))
29
+ if not os.path.exists(cached_file):
30
+ print(f'Downloading: "{url}" to {cached_file}\n')
31
+ from torch.hub import download_url_to_file
32
+ download_url_to_file(url, cached_file, progress=progress)
33
+ return cached_file
34
+
35
+
36
+ def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list:
37
+ """
38
+ A one-and done loader to try finding the desired models in specified directories.
39
+
40
+ @param download_name: Specify to download from model_url immediately.
41
+ @param model_url: If no other models are found, this will be downloaded on upscale.
42
+ @param model_path: The location to store/find models in.
43
+ @param command_path: A command-line argument to search for models in first.
44
+ @param ext_filter: An optional list of filename extensions to filter by
45
+ @return: A list of paths containing the desired model(s)
46
+ """
47
+ output = []
48
+
49
+ try:
50
+ places = []
51
+
52
+ if command_path is not None and command_path != model_path:
53
+ pretrained_path = os.path.join(command_path, 'experiments/pretrained_models')
54
+ if os.path.exists(pretrained_path):
55
+ print(f"Appending path: {pretrained_path}")
56
+ places.append(pretrained_path)
57
+ elif os.path.exists(command_path):
58
+ places.append(command_path)
59
+
60
+ places.append(model_path)
61
+
62
+ for place in places:
63
+ for full_path in shared.walk_files(place, allowed_extensions=ext_filter):
64
+ if os.path.islink(full_path) and not os.path.exists(full_path):
65
+ print(f"Skipping broken symlink: {full_path}")
66
+ continue
67
+ if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
68
+ continue
69
+ if full_path not in output:
70
+ output.append(full_path)
71
+
72
+ if model_url is not None and len(output) == 0:
73
+ if download_name is not None:
74
+ output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name))
75
+ else:
76
+ output.append(model_url)
77
+
78
+ except Exception:
79
+ pass
80
+
81
+ return output
82
+
83
+
84
+ def friendly_name(file: str):
85
+ if file.startswith("http"):
86
+ file = urlparse(file).path
87
+
88
+ file = os.path.basename(file)
89
+ model_name, extension = os.path.splitext(file)
90
+ return model_name
91
+
92
+
93
+ def cleanup_models():
94
+ # This code could probably be more efficient if we used a tuple list or something to store the src/destinations
95
+ # and then enumerate that, but this works for now. In the future, it'd be nice to just have every "model" scaler
96
+ # somehow auto-register and just do these things...
97
+ root_path = script_path
98
+ src_path = models_path
99
+ dest_path = os.path.join(models_path, "Stable-diffusion")
100
+ move_files(src_path, dest_path, ".ckpt")
101
+ move_files(src_path, dest_path, ".safetensors")
102
+ src_path = os.path.join(root_path, "ESRGAN")
103
+ dest_path = os.path.join(models_path, "ESRGAN")
104
+ move_files(src_path, dest_path)
105
+ src_path = os.path.join(models_path, "BSRGAN")
106
+ dest_path = os.path.join(models_path, "ESRGAN")
107
+ move_files(src_path, dest_path, ".pth")
108
+ src_path = os.path.join(root_path, "gfpgan")
109
+ dest_path = os.path.join(models_path, "GFPGAN")
110
+ move_files(src_path, dest_path)
111
+ src_path = os.path.join(root_path, "SwinIR")
112
+ dest_path = os.path.join(models_path, "SwinIR")
113
+ move_files(src_path, dest_path)
114
+ src_path = os.path.join(root_path, "repositories/latent-diffusion/experiments/pretrained_models/")
115
+ dest_path = os.path.join(models_path, "LDSR")
116
+ move_files(src_path, dest_path)
117
+
118
+
119
+ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
120
+ try:
121
+ os.makedirs(dest_path, exist_ok=True)
122
+ if os.path.exists(src_path):
123
+ for file in os.listdir(src_path):
124
+ fullpath = os.path.join(src_path, file)
125
+ if os.path.isfile(fullpath):
126
+ if ext_filter is not None:
127
+ if ext_filter not in file:
128
+ continue
129
+ print(f"Moving {file} from {src_path} to {dest_path}.")
130
+ try:
131
+ shutil.move(fullpath, dest_path)
132
+ except Exception:
133
+ pass
134
+ if len(os.listdir(src_path)) == 0:
135
+ print(f"Removing empty folder: {src_path}")
136
+ shutil.rmtree(src_path, True)
137
+ except Exception:
138
+ pass
139
+
140
+
141
+ def load_upscalers():
142
+ # We can only do this 'magic' method to dynamically load upscalers if they are referenced,
143
+ # so we'll try to import any _model.py files before looking in __subclasses__
144
+ modules_dir = os.path.join(shared.script_path, "modules")
145
+ for file in os.listdir(modules_dir):
146
+ if "_model.py" in file:
147
+ model_name = file.replace("_model.py", "")
148
+ full_model = f"modules.{model_name}_model"
149
+ try:
150
+ importlib.import_module(full_model)
151
+ except Exception:
152
+ pass
153
+
154
+ datas = []
155
+ commandline_options = vars(shared.cmd_opts)
156
+
157
+ # some of upscaler classes will not go away after reloading their modules, and we'll end
158
+ # up with two copies of those classes. The newest copy will always be the last in the list,
159
+ # so we go from end to beginning and ignore duplicates
160
+ used_classes = {}
161
+ for cls in reversed(Upscaler.__subclasses__()):
162
+ classname = str(cls)
163
+ if classname not in used_classes:
164
+ used_classes[classname] = cls
165
+
166
+ for cls in reversed(used_classes.values()):
167
+ name = cls.__name__
168
+ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
169
+ commandline_model_path = commandline_options.get(cmd_name, None)
170
+ scaler = cls(commandline_model_path)
171
+ scaler.user_path = commandline_model_path
172
+ scaler.model_download_path = commandline_model_path or scaler.model_path
173
+ datas += scaler.scalers
174
+
175
+ shared.sd_upscalers = sorted(
176
+ datas,
177
+ # Special case for UpscalerNone keeps it at the beginning of the list.
178
+ key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else ""
179
+ )
modules/models/diffusion/ddpm_edit.py ADDED
@@ -0,0 +1,1455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ wild mixture of
3
+ https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
4
+ https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
5
+ https://github.com/CompVis/taming-transformers
6
+ -- merci
7
+ """
8
+
9
+ # File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
10
+ # See more details in LICENSE.
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import numpy as np
15
+ import pytorch_lightning as pl
16
+ from torch.optim.lr_scheduler import LambdaLR
17
+ from einops import rearrange, repeat
18
+ from contextlib import contextmanager
19
+ from functools import partial
20
+ from tqdm import tqdm
21
+ from torchvision.utils import make_grid
22
+ from pytorch_lightning.utilities.distributed import rank_zero_only
23
+
24
+ from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
25
+ from ldm.modules.ema import LitEma
26
+ from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
27
+ from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
28
+ from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
29
+ from ldm.models.diffusion.ddim import DDIMSampler
30
+
31
+
32
+ __conditioning_keys__ = {'concat': 'c_concat',
33
+ 'crossattn': 'c_crossattn',
34
+ 'adm': 'y'}
35
+
36
+
37
+ def disabled_train(self, mode=True):
38
+ """Overwrite model.train with this function to make sure train/eval mode
39
+ does not change anymore."""
40
+ return self
41
+
42
+
43
+ def uniform_on_device(r1, r2, shape, device):
44
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
45
+
46
+
47
+ class DDPM(pl.LightningModule):
48
+ # classic DDPM with Gaussian diffusion, in image space
49
+ def __init__(self,
50
+ unet_config,
51
+ timesteps=1000,
52
+ beta_schedule="linear",
53
+ loss_type="l2",
54
+ ckpt_path=None,
55
+ ignore_keys=None,
56
+ load_only_unet=False,
57
+ monitor="val/loss",
58
+ use_ema=True,
59
+ first_stage_key="image",
60
+ image_size=256,
61
+ channels=3,
62
+ log_every_t=100,
63
+ clip_denoised=True,
64
+ linear_start=1e-4,
65
+ linear_end=2e-2,
66
+ cosine_s=8e-3,
67
+ given_betas=None,
68
+ original_elbo_weight=0.,
69
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
70
+ l_simple_weight=1.,
71
+ conditioning_key=None,
72
+ parameterization="eps", # all assuming fixed variance schedules
73
+ scheduler_config=None,
74
+ use_positional_encodings=False,
75
+ learn_logvar=False,
76
+ logvar_init=0.,
77
+ load_ema=True,
78
+ ):
79
+ super().__init__()
80
+ assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
81
+ self.parameterization = parameterization
82
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
83
+ self.cond_stage_model = None
84
+ self.clip_denoised = clip_denoised
85
+ self.log_every_t = log_every_t
86
+ self.first_stage_key = first_stage_key
87
+ self.image_size = image_size # try conv?
88
+ self.channels = channels
89
+ self.use_positional_encodings = use_positional_encodings
90
+ self.model = DiffusionWrapper(unet_config, conditioning_key)
91
+ count_params(self.model, verbose=True)
92
+ self.use_ema = use_ema
93
+
94
+ self.use_scheduler = scheduler_config is not None
95
+ if self.use_scheduler:
96
+ self.scheduler_config = scheduler_config
97
+
98
+ self.v_posterior = v_posterior
99
+ self.original_elbo_weight = original_elbo_weight
100
+ self.l_simple_weight = l_simple_weight
101
+
102
+ if monitor is not None:
103
+ self.monitor = monitor
104
+
105
+ if self.use_ema and load_ema:
106
+ self.model_ema = LitEma(self.model)
107
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
108
+
109
+ if ckpt_path is not None:
110
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
111
+
112
+ # If initialing from EMA-only checkpoint, create EMA model after loading.
113
+ if self.use_ema and not load_ema:
114
+ self.model_ema = LitEma(self.model)
115
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
116
+
117
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
118
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
119
+
120
+ self.loss_type = loss_type
121
+
122
+ self.learn_logvar = learn_logvar
123
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
124
+ if self.learn_logvar:
125
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
126
+
127
+
128
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
129
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
130
+ if exists(given_betas):
131
+ betas = given_betas
132
+ else:
133
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
134
+ cosine_s=cosine_s)
135
+ alphas = 1. - betas
136
+ alphas_cumprod = np.cumprod(alphas, axis=0)
137
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
138
+
139
+ timesteps, = betas.shape
140
+ self.num_timesteps = int(timesteps)
141
+ self.linear_start = linear_start
142
+ self.linear_end = linear_end
143
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
144
+
145
+ to_torch = partial(torch.tensor, dtype=torch.float32)
146
+
147
+ self.register_buffer('betas', to_torch(betas))
148
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
149
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
150
+
151
+ # calculations for diffusion q(x_t | x_{t-1}) and others
152
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
153
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
154
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
155
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
156
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
157
+
158
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
159
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
160
+ 1. - alphas_cumprod) + self.v_posterior * betas
161
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
162
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
163
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
164
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
165
+ self.register_buffer('posterior_mean_coef1', to_torch(
166
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
167
+ self.register_buffer('posterior_mean_coef2', to_torch(
168
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
169
+
170
+ if self.parameterization == "eps":
171
+ lvlb_weights = self.betas ** 2 / (
172
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
173
+ elif self.parameterization == "x0":
174
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
175
+ else:
176
+ raise NotImplementedError("mu not supported")
177
+ # TODO how to choose this term
178
+ lvlb_weights[0] = lvlb_weights[1]
179
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
180
+ assert not torch.isnan(self.lvlb_weights).all()
181
+
182
+ @contextmanager
183
+ def ema_scope(self, context=None):
184
+ if self.use_ema:
185
+ self.model_ema.store(self.model.parameters())
186
+ self.model_ema.copy_to(self.model)
187
+ if context is not None:
188
+ print(f"{context}: Switched to EMA weights")
189
+ try:
190
+ yield None
191
+ finally:
192
+ if self.use_ema:
193
+ self.model_ema.restore(self.model.parameters())
194
+ if context is not None:
195
+ print(f"{context}: Restored training weights")
196
+
197
+ def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
198
+ ignore_keys = ignore_keys or []
199
+
200
+ sd = torch.load(path, map_location="cpu")
201
+ if "state_dict" in list(sd.keys()):
202
+ sd = sd["state_dict"]
203
+ keys = list(sd.keys())
204
+
205
+ # Our model adds additional channels to the first layer to condition on an input image.
206
+ # For the first layer, copy existing channel weights and initialize new channel weights to zero.
207
+ input_keys = [
208
+ "model.diffusion_model.input_blocks.0.0.weight",
209
+ "model_ema.diffusion_modelinput_blocks00weight",
210
+ ]
211
+
212
+ self_sd = self.state_dict()
213
+ for input_key in input_keys:
214
+ if input_key not in sd or input_key not in self_sd:
215
+ continue
216
+
217
+ input_weight = self_sd[input_key]
218
+
219
+ if input_weight.size() != sd[input_key].size():
220
+ print(f"Manual init: {input_key}")
221
+ input_weight.zero_()
222
+ input_weight[:, :4, :, :].copy_(sd[input_key])
223
+ ignore_keys.append(input_key)
224
+
225
+ for k in keys:
226
+ for ik in ignore_keys:
227
+ if k.startswith(ik):
228
+ print(f"Deleting key {k} from state_dict.")
229
+ del sd[k]
230
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
231
+ sd, strict=False)
232
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
233
+ if missing:
234
+ print(f"Missing Keys: {missing}")
235
+ if unexpected:
236
+ print(f"Unexpected Keys: {unexpected}")
237
+
238
+ def q_mean_variance(self, x_start, t):
239
+ """
240
+ Get the distribution q(x_t | x_0).
241
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
242
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
243
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
244
+ """
245
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
246
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
247
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
248
+ return mean, variance, log_variance
249
+
250
+ def predict_start_from_noise(self, x_t, t, noise):
251
+ return (
252
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
253
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
254
+ )
255
+
256
+ def q_posterior(self, x_start, x_t, t):
257
+ posterior_mean = (
258
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
259
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
260
+ )
261
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
262
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
263
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
264
+
265
+ def p_mean_variance(self, x, t, clip_denoised: bool):
266
+ model_out = self.model(x, t)
267
+ if self.parameterization == "eps":
268
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
269
+ elif self.parameterization == "x0":
270
+ x_recon = model_out
271
+ if clip_denoised:
272
+ x_recon.clamp_(-1., 1.)
273
+
274
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
275
+ return model_mean, posterior_variance, posterior_log_variance
276
+
277
+ @torch.no_grad()
278
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
279
+ b, *_, device = *x.shape, x.device
280
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
281
+ noise = noise_like(x.shape, device, repeat_noise)
282
+ # no noise when t == 0
283
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
284
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
285
+
286
+ @torch.no_grad()
287
+ def p_sample_loop(self, shape, return_intermediates=False):
288
+ device = self.betas.device
289
+ b = shape[0]
290
+ img = torch.randn(shape, device=device)
291
+ intermediates = [img]
292
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
293
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
294
+ clip_denoised=self.clip_denoised)
295
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
296
+ intermediates.append(img)
297
+ if return_intermediates:
298
+ return img, intermediates
299
+ return img
300
+
301
+ @torch.no_grad()
302
+ def sample(self, batch_size=16, return_intermediates=False):
303
+ image_size = self.image_size
304
+ channels = self.channels
305
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
306
+ return_intermediates=return_intermediates)
307
+
308
+ def q_sample(self, x_start, t, noise=None):
309
+ noise = default(noise, lambda: torch.randn_like(x_start))
310
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
311
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
312
+
313
+ def get_loss(self, pred, target, mean=True):
314
+ if self.loss_type == 'l1':
315
+ loss = (target - pred).abs()
316
+ if mean:
317
+ loss = loss.mean()
318
+ elif self.loss_type == 'l2':
319
+ if mean:
320
+ loss = torch.nn.functional.mse_loss(target, pred)
321
+ else:
322
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
323
+ else:
324
+ raise NotImplementedError("unknown loss type '{loss_type}'")
325
+
326
+ return loss
327
+
328
+ def p_losses(self, x_start, t, noise=None):
329
+ noise = default(noise, lambda: torch.randn_like(x_start))
330
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
331
+ model_out = self.model(x_noisy, t)
332
+
333
+ loss_dict = {}
334
+ if self.parameterization == "eps":
335
+ target = noise
336
+ elif self.parameterization == "x0":
337
+ target = x_start
338
+ else:
339
+ raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
340
+
341
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
342
+
343
+ log_prefix = 'train' if self.training else 'val'
344
+
345
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
346
+ loss_simple = loss.mean() * self.l_simple_weight
347
+
348
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
349
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
350
+
351
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
352
+
353
+ loss_dict.update({f'{log_prefix}/loss': loss})
354
+
355
+ return loss, loss_dict
356
+
357
+ def forward(self, x, *args, **kwargs):
358
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
359
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
360
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
361
+ return self.p_losses(x, t, *args, **kwargs)
362
+
363
+ def get_input(self, batch, k):
364
+ return batch[k]
365
+
366
+ def shared_step(self, batch):
367
+ x = self.get_input(batch, self.first_stage_key)
368
+ loss, loss_dict = self(x)
369
+ return loss, loss_dict
370
+
371
+ def training_step(self, batch, batch_idx):
372
+ loss, loss_dict = self.shared_step(batch)
373
+
374
+ self.log_dict(loss_dict, prog_bar=True,
375
+ logger=True, on_step=True, on_epoch=True)
376
+
377
+ self.log("global_step", self.global_step,
378
+ prog_bar=True, logger=True, on_step=True, on_epoch=False)
379
+
380
+ if self.use_scheduler:
381
+ lr = self.optimizers().param_groups[0]['lr']
382
+ self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
383
+
384
+ return loss
385
+
386
+ @torch.no_grad()
387
+ def validation_step(self, batch, batch_idx):
388
+ _, loss_dict_no_ema = self.shared_step(batch)
389
+ with self.ema_scope():
390
+ _, loss_dict_ema = self.shared_step(batch)
391
+ loss_dict_ema = {f"{key}_ema": loss_dict_ema[key] for key in loss_dict_ema}
392
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
393
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
394
+
395
+ def on_train_batch_end(self, *args, **kwargs):
396
+ if self.use_ema:
397
+ self.model_ema(self.model)
398
+
399
+ def _get_rows_from_list(self, samples):
400
+ n_imgs_per_row = len(samples)
401
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
402
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
403
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
404
+ return denoise_grid
405
+
406
+ @torch.no_grad()
407
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
408
+ log = {}
409
+ x = self.get_input(batch, self.first_stage_key)
410
+ N = min(x.shape[0], N)
411
+ n_row = min(x.shape[0], n_row)
412
+ x = x.to(self.device)[:N]
413
+ log["inputs"] = x
414
+
415
+ # get diffusion row
416
+ diffusion_row = []
417
+ x_start = x[:n_row]
418
+
419
+ for t in range(self.num_timesteps):
420
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
421
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
422
+ t = t.to(self.device).long()
423
+ noise = torch.randn_like(x_start)
424
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
425
+ diffusion_row.append(x_noisy)
426
+
427
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
428
+
429
+ if sample:
430
+ # get denoise row
431
+ with self.ema_scope("Plotting"):
432
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
433
+
434
+ log["samples"] = samples
435
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
436
+
437
+ if return_keys:
438
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
439
+ return log
440
+ else:
441
+ return {key: log[key] for key in return_keys}
442
+ return log
443
+
444
+ def configure_optimizers(self):
445
+ lr = self.learning_rate
446
+ params = list(self.model.parameters())
447
+ if self.learn_logvar:
448
+ params = params + [self.logvar]
449
+ opt = torch.optim.AdamW(params, lr=lr)
450
+ return opt
451
+
452
+
453
+ class LatentDiffusion(DDPM):
454
+ """main class"""
455
+ def __init__(self,
456
+ first_stage_config,
457
+ cond_stage_config,
458
+ num_timesteps_cond=None,
459
+ cond_stage_key="image",
460
+ cond_stage_trainable=False,
461
+ concat_mode=True,
462
+ cond_stage_forward=None,
463
+ conditioning_key=None,
464
+ scale_factor=1.0,
465
+ scale_by_std=False,
466
+ load_ema=True,
467
+ *args, **kwargs):
468
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
469
+ self.scale_by_std = scale_by_std
470
+ assert self.num_timesteps_cond <= kwargs['timesteps']
471
+ # for backwards compatibility after implementation of DiffusionWrapper
472
+ if conditioning_key is None:
473
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
474
+ if cond_stage_config == '__is_unconditional__':
475
+ conditioning_key = None
476
+ ckpt_path = kwargs.pop("ckpt_path", None)
477
+ ignore_keys = kwargs.pop("ignore_keys", [])
478
+ super().__init__(*args, conditioning_key=conditioning_key, load_ema=load_ema, **kwargs)
479
+ self.concat_mode = concat_mode
480
+ self.cond_stage_trainable = cond_stage_trainable
481
+ self.cond_stage_key = cond_stage_key
482
+ try:
483
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
484
+ except Exception:
485
+ self.num_downs = 0
486
+ if not scale_by_std:
487
+ self.scale_factor = scale_factor
488
+ else:
489
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
490
+ self.instantiate_first_stage(first_stage_config)
491
+ self.instantiate_cond_stage(cond_stage_config)
492
+ self.cond_stage_forward = cond_stage_forward
493
+ self.clip_denoised = False
494
+ self.bbox_tokenizer = None
495
+
496
+ self.restarted_from_ckpt = False
497
+ if ckpt_path is not None:
498
+ self.init_from_ckpt(ckpt_path, ignore_keys)
499
+ self.restarted_from_ckpt = True
500
+
501
+ if self.use_ema and not load_ema:
502
+ self.model_ema = LitEma(self.model)
503
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
504
+
505
+ def make_cond_schedule(self, ):
506
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
507
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
508
+ self.cond_ids[:self.num_timesteps_cond] = ids
509
+
510
+ @rank_zero_only
511
+ @torch.no_grad()
512
+ def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
513
+ # only for very first batch
514
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
515
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
516
+ # set rescale weight to 1./std of encodings
517
+ print("### USING STD-RESCALING ###")
518
+ x = super().get_input(batch, self.first_stage_key)
519
+ x = x.to(self.device)
520
+ encoder_posterior = self.encode_first_stage(x)
521
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
522
+ del self.scale_factor
523
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
524
+ print(f"setting self.scale_factor to {self.scale_factor}")
525
+ print("### USING STD-RESCALING ###")
526
+
527
+ def register_schedule(self,
528
+ given_betas=None, beta_schedule="linear", timesteps=1000,
529
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
530
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
531
+
532
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
533
+ if self.shorten_cond_schedule:
534
+ self.make_cond_schedule()
535
+
536
+ def instantiate_first_stage(self, config):
537
+ model = instantiate_from_config(config)
538
+ self.first_stage_model = model.eval()
539
+ self.first_stage_model.train = disabled_train
540
+ for param in self.first_stage_model.parameters():
541
+ param.requires_grad = False
542
+
543
+ def instantiate_cond_stage(self, config):
544
+ if not self.cond_stage_trainable:
545
+ if config == "__is_first_stage__":
546
+ print("Using first stage also as cond stage.")
547
+ self.cond_stage_model = self.first_stage_model
548
+ elif config == "__is_unconditional__":
549
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
550
+ self.cond_stage_model = None
551
+ # self.be_unconditional = True
552
+ else:
553
+ model = instantiate_from_config(config)
554
+ self.cond_stage_model = model.eval()
555
+ self.cond_stage_model.train = disabled_train
556
+ for param in self.cond_stage_model.parameters():
557
+ param.requires_grad = False
558
+ else:
559
+ assert config != '__is_first_stage__'
560
+ assert config != '__is_unconditional__'
561
+ model = instantiate_from_config(config)
562
+ self.cond_stage_model = model
563
+
564
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
565
+ denoise_row = []
566
+ for zd in tqdm(samples, desc=desc):
567
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
568
+ force_not_quantize=force_no_decoder_quantization))
569
+ n_imgs_per_row = len(denoise_row)
570
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
571
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
572
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
573
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
574
+ return denoise_grid
575
+
576
+ def get_first_stage_encoding(self, encoder_posterior):
577
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
578
+ z = encoder_posterior.sample()
579
+ elif isinstance(encoder_posterior, torch.Tensor):
580
+ z = encoder_posterior
581
+ else:
582
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
583
+ return self.scale_factor * z
584
+
585
+ def get_learned_conditioning(self, c):
586
+ if self.cond_stage_forward is None:
587
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
588
+ c = self.cond_stage_model.encode(c)
589
+ if isinstance(c, DiagonalGaussianDistribution):
590
+ c = c.mode()
591
+ else:
592
+ c = self.cond_stage_model(c)
593
+ else:
594
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
595
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
596
+ return c
597
+
598
+ def meshgrid(self, h, w):
599
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
600
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
601
+
602
+ arr = torch.cat([y, x], dim=-1)
603
+ return arr
604
+
605
+ def delta_border(self, h, w):
606
+ """
607
+ :param h: height
608
+ :param w: width
609
+ :return: normalized distance to image border,
610
+ wtith min distance = 0 at border and max dist = 0.5 at image center
611
+ """
612
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
613
+ arr = self.meshgrid(h, w) / lower_right_corner
614
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
615
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
616
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
617
+ return edge_dist
618
+
619
+ def get_weighting(self, h, w, Ly, Lx, device):
620
+ weighting = self.delta_border(h, w)
621
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
622
+ self.split_input_params["clip_max_weight"], )
623
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
624
+
625
+ if self.split_input_params["tie_braker"]:
626
+ L_weighting = self.delta_border(Ly, Lx)
627
+ L_weighting = torch.clip(L_weighting,
628
+ self.split_input_params["clip_min_tie_weight"],
629
+ self.split_input_params["clip_max_tie_weight"])
630
+
631
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
632
+ weighting = weighting * L_weighting
633
+ return weighting
634
+
635
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
636
+ """
637
+ :param x: img of size (bs, c, h, w)
638
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
639
+ """
640
+ bs, nc, h, w = x.shape
641
+
642
+ # number of crops in image
643
+ Ly = (h - kernel_size[0]) // stride[0] + 1
644
+ Lx = (w - kernel_size[1]) // stride[1] + 1
645
+
646
+ if uf == 1 and df == 1:
647
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
648
+ unfold = torch.nn.Unfold(**fold_params)
649
+
650
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
651
+
652
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
653
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
654
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
655
+
656
+ elif uf > 1 and df == 1:
657
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
658
+ unfold = torch.nn.Unfold(**fold_params)
659
+
660
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
661
+ dilation=1, padding=0,
662
+ stride=(stride[0] * uf, stride[1] * uf))
663
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
664
+
665
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
666
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
667
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
668
+
669
+ elif df > 1 and uf == 1:
670
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
671
+ unfold = torch.nn.Unfold(**fold_params)
672
+
673
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
674
+ dilation=1, padding=0,
675
+ stride=(stride[0] // df, stride[1] // df))
676
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
677
+
678
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
679
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
680
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
681
+
682
+ else:
683
+ raise NotImplementedError
684
+
685
+ return fold, unfold, normalization, weighting
686
+
687
+ @torch.no_grad()
688
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
689
+ cond_key=None, return_original_cond=False, bs=None, uncond=0.05):
690
+ x = super().get_input(batch, k)
691
+ if bs is not None:
692
+ x = x[:bs]
693
+ x = x.to(self.device)
694
+ encoder_posterior = self.encode_first_stage(x)
695
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
696
+ cond_key = cond_key or self.cond_stage_key
697
+ xc = super().get_input(batch, cond_key)
698
+ if bs is not None:
699
+ xc["c_crossattn"] = xc["c_crossattn"][:bs]
700
+ xc["c_concat"] = xc["c_concat"][:bs]
701
+ cond = {}
702
+
703
+ # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%.
704
+ random = torch.rand(x.size(0), device=x.device)
705
+ prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1")
706
+ input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1")
707
+
708
+ null_prompt = self.get_learned_conditioning([""])
709
+ cond["c_crossattn"] = [torch.where(prompt_mask, null_prompt, self.get_learned_conditioning(xc["c_crossattn"]).detach())]
710
+ cond["c_concat"] = [input_mask * self.encode_first_stage((xc["c_concat"].to(self.device))).mode().detach()]
711
+
712
+ out = [z, cond]
713
+ if return_first_stage_outputs:
714
+ xrec = self.decode_first_stage(z)
715
+ out.extend([x, xrec])
716
+ if return_original_cond:
717
+ out.append(xc)
718
+ return out
719
+
720
+ @torch.no_grad()
721
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
722
+ if predict_cids:
723
+ if z.dim() == 4:
724
+ z = torch.argmax(z.exp(), dim=1).long()
725
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
726
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
727
+
728
+ z = 1. / self.scale_factor * z
729
+
730
+ if hasattr(self, "split_input_params"):
731
+ if self.split_input_params["patch_distributed_vq"]:
732
+ ks = self.split_input_params["ks"] # eg. (128, 128)
733
+ stride = self.split_input_params["stride"] # eg. (64, 64)
734
+ uf = self.split_input_params["vqf"]
735
+ bs, nc, h, w = z.shape
736
+ if ks[0] > h or ks[1] > w:
737
+ ks = (min(ks[0], h), min(ks[1], w))
738
+ print("reducing Kernel")
739
+
740
+ if stride[0] > h or stride[1] > w:
741
+ stride = (min(stride[0], h), min(stride[1], w))
742
+ print("reducing stride")
743
+
744
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
745
+
746
+ z = unfold(z) # (bn, nc * prod(**ks), L)
747
+ # 1. Reshape to img shape
748
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
749
+
750
+ # 2. apply model loop over last dim
751
+ if isinstance(self.first_stage_model, VQModelInterface):
752
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
753
+ force_not_quantize=predict_cids or force_not_quantize)
754
+ for i in range(z.shape[-1])]
755
+ else:
756
+
757
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
758
+ for i in range(z.shape[-1])]
759
+
760
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
761
+ o = o * weighting
762
+ # Reverse 1. reshape to img shape
763
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
764
+ # stitch crops together
765
+ decoded = fold(o)
766
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
767
+ return decoded
768
+ else:
769
+ if isinstance(self.first_stage_model, VQModelInterface):
770
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
771
+ else:
772
+ return self.first_stage_model.decode(z)
773
+
774
+ else:
775
+ if isinstance(self.first_stage_model, VQModelInterface):
776
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
777
+ else:
778
+ return self.first_stage_model.decode(z)
779
+
780
+ # same as above but without decorator
781
+ def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
782
+ if predict_cids:
783
+ if z.dim() == 4:
784
+ z = torch.argmax(z.exp(), dim=1).long()
785
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
786
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
787
+
788
+ z = 1. / self.scale_factor * z
789
+
790
+ if hasattr(self, "split_input_params"):
791
+ if self.split_input_params["patch_distributed_vq"]:
792
+ ks = self.split_input_params["ks"] # eg. (128, 128)
793
+ stride = self.split_input_params["stride"] # eg. (64, 64)
794
+ uf = self.split_input_params["vqf"]
795
+ bs, nc, h, w = z.shape
796
+ if ks[0] > h or ks[1] > w:
797
+ ks = (min(ks[0], h), min(ks[1], w))
798
+ print("reducing Kernel")
799
+
800
+ if stride[0] > h or stride[1] > w:
801
+ stride = (min(stride[0], h), min(stride[1], w))
802
+ print("reducing stride")
803
+
804
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
805
+
806
+ z = unfold(z) # (bn, nc * prod(**ks), L)
807
+ # 1. Reshape to img shape
808
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
809
+
810
+ # 2. apply model loop over last dim
811
+ if isinstance(self.first_stage_model, VQModelInterface):
812
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
813
+ force_not_quantize=predict_cids or force_not_quantize)
814
+ for i in range(z.shape[-1])]
815
+ else:
816
+
817
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
818
+ for i in range(z.shape[-1])]
819
+
820
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
821
+ o = o * weighting
822
+ # Reverse 1. reshape to img shape
823
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
824
+ # stitch crops together
825
+ decoded = fold(o)
826
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
827
+ return decoded
828
+ else:
829
+ if isinstance(self.first_stage_model, VQModelInterface):
830
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
831
+ else:
832
+ return self.first_stage_model.decode(z)
833
+
834
+ else:
835
+ if isinstance(self.first_stage_model, VQModelInterface):
836
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
837
+ else:
838
+ return self.first_stage_model.decode(z)
839
+
840
+ @torch.no_grad()
841
+ def encode_first_stage(self, x):
842
+ if hasattr(self, "split_input_params"):
843
+ if self.split_input_params["patch_distributed_vq"]:
844
+ ks = self.split_input_params["ks"] # eg. (128, 128)
845
+ stride = self.split_input_params["stride"] # eg. (64, 64)
846
+ df = self.split_input_params["vqf"]
847
+ self.split_input_params['original_image_size'] = x.shape[-2:]
848
+ bs, nc, h, w = x.shape
849
+ if ks[0] > h or ks[1] > w:
850
+ ks = (min(ks[0], h), min(ks[1], w))
851
+ print("reducing Kernel")
852
+
853
+ if stride[0] > h or stride[1] > w:
854
+ stride = (min(stride[0], h), min(stride[1], w))
855
+ print("reducing stride")
856
+
857
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
858
+ z = unfold(x) # (bn, nc * prod(**ks), L)
859
+ # Reshape to img shape
860
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
861
+
862
+ output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
863
+ for i in range(z.shape[-1])]
864
+
865
+ o = torch.stack(output_list, axis=-1)
866
+ o = o * weighting
867
+
868
+ # Reverse reshape to img shape
869
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
870
+ # stitch crops together
871
+ decoded = fold(o)
872
+ decoded = decoded / normalization
873
+ return decoded
874
+
875
+ else:
876
+ return self.first_stage_model.encode(x)
877
+ else:
878
+ return self.first_stage_model.encode(x)
879
+
880
+ def shared_step(self, batch, **kwargs):
881
+ x, c = self.get_input(batch, self.first_stage_key)
882
+ loss = self(x, c)
883
+ return loss
884
+
885
+ def forward(self, x, c, *args, **kwargs):
886
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
887
+ if self.model.conditioning_key is not None:
888
+ assert c is not None
889
+ if self.cond_stage_trainable:
890
+ c = self.get_learned_conditioning(c)
891
+ if self.shorten_cond_schedule: # TODO: drop this option
892
+ tc = self.cond_ids[t].to(self.device)
893
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
894
+ return self.p_losses(x, c, t, *args, **kwargs)
895
+
896
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
897
+
898
+ if isinstance(cond, dict):
899
+ # hybrid case, cond is exptected to be a dict
900
+ pass
901
+ else:
902
+ if not isinstance(cond, list):
903
+ cond = [cond]
904
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
905
+ cond = {key: cond}
906
+
907
+ if hasattr(self, "split_input_params"):
908
+ assert len(cond) == 1 # todo can only deal with one conditioning atm
909
+ assert not return_ids
910
+ ks = self.split_input_params["ks"] # eg. (128, 128)
911
+ stride = self.split_input_params["stride"] # eg. (64, 64)
912
+
913
+ h, w = x_noisy.shape[-2:]
914
+
915
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
916
+
917
+ z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
918
+ # Reshape to img shape
919
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
920
+ z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
921
+
922
+ if self.cond_stage_key in ["image", "LR_image", "segmentation",
923
+ 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
924
+ c_key = next(iter(cond.keys())) # get key
925
+ c = next(iter(cond.values())) # get value
926
+ assert (len(c) == 1) # todo extend to list with more than one elem
927
+ c = c[0] # get element
928
+
929
+ c = unfold(c)
930
+ c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
931
+
932
+ cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
933
+
934
+ elif self.cond_stage_key == 'coordinates_bbox':
935
+ assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
936
+
937
+ # assuming padding of unfold is always 0 and its dilation is always 1
938
+ n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
939
+ full_img_h, full_img_w = self.split_input_params['original_image_size']
940
+ # as we are operating on latents, we need the factor from the original image size to the
941
+ # spatial latent size to properly rescale the crops for regenerating the bbox annotations
942
+ num_downs = self.first_stage_model.encoder.num_resolutions - 1
943
+ rescale_latent = 2 ** (num_downs)
944
+
945
+ # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
946
+ # need to rescale the tl patch coordinates to be in between (0,1)
947
+ tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
948
+ rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
949
+ for patch_nr in range(z.shape[-1])]
950
+
951
+ # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
952
+ patch_limits = [(x_tl, y_tl,
953
+ rescale_latent * ks[0] / full_img_w,
954
+ rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
955
+ # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
956
+
957
+ # tokenize crop coordinates for the bounding boxes of the respective patches
958
+ patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
959
+ for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
960
+ print(patch_limits_tknzd[0].shape)
961
+ # cut tknzd crop position from conditioning
962
+ assert isinstance(cond, dict), 'cond must be dict to be fed into model'
963
+ cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
964
+ print(cut_cond.shape)
965
+
966
+ adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
967
+ adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
968
+ print(adapted_cond.shape)
969
+ adapted_cond = self.get_learned_conditioning(adapted_cond)
970
+ print(adapted_cond.shape)
971
+ adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
972
+ print(adapted_cond.shape)
973
+
974
+ cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
975
+
976
+ else:
977
+ cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
978
+
979
+ # apply model by loop over crops
980
+ output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
981
+ assert not isinstance(output_list[0],
982
+ tuple) # todo cant deal with multiple model outputs check this never happens
983
+
984
+ o = torch.stack(output_list, axis=-1)
985
+ o = o * weighting
986
+ # Reverse reshape to img shape
987
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
988
+ # stitch crops together
989
+ x_recon = fold(o) / normalization
990
+
991
+ else:
992
+ x_recon = self.model(x_noisy, t, **cond)
993
+
994
+ if isinstance(x_recon, tuple) and not return_ids:
995
+ return x_recon[0]
996
+ else:
997
+ return x_recon
998
+
999
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
1000
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
1001
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
1002
+
1003
+ def _prior_bpd(self, x_start):
1004
+ """
1005
+ Get the prior KL term for the variational lower-bound, measured in
1006
+ bits-per-dim.
1007
+ This term can't be optimized, as it only depends on the encoder.
1008
+ :param x_start: the [N x C x ...] tensor of inputs.
1009
+ :return: a batch of [N] KL values (in bits), one per batch element.
1010
+ """
1011
+ batch_size = x_start.shape[0]
1012
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
1013
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
1014
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
1015
+ return mean_flat(kl_prior) / np.log(2.0)
1016
+
1017
+ def p_losses(self, x_start, cond, t, noise=None):
1018
+ noise = default(noise, lambda: torch.randn_like(x_start))
1019
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
1020
+ model_output = self.apply_model(x_noisy, t, cond)
1021
+
1022
+ loss_dict = {}
1023
+ prefix = 'train' if self.training else 'val'
1024
+
1025
+ if self.parameterization == "x0":
1026
+ target = x_start
1027
+ elif self.parameterization == "eps":
1028
+ target = noise
1029
+ else:
1030
+ raise NotImplementedError()
1031
+
1032
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
1033
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
1034
+
1035
+ logvar_t = self.logvar[t].to(self.device)
1036
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
1037
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
1038
+ if self.learn_logvar:
1039
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
1040
+ loss_dict.update({'logvar': self.logvar.data.mean()})
1041
+
1042
+ loss = self.l_simple_weight * loss.mean()
1043
+
1044
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
1045
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
1046
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
1047
+ loss += (self.original_elbo_weight * loss_vlb)
1048
+ loss_dict.update({f'{prefix}/loss': loss})
1049
+
1050
+ return loss, loss_dict
1051
+
1052
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
1053
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
1054
+ t_in = t
1055
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
1056
+
1057
+ if score_corrector is not None:
1058
+ assert self.parameterization == "eps"
1059
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
1060
+
1061
+ if return_codebook_ids:
1062
+ model_out, logits = model_out
1063
+
1064
+ if self.parameterization == "eps":
1065
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
1066
+ elif self.parameterization == "x0":
1067
+ x_recon = model_out
1068
+ else:
1069
+ raise NotImplementedError()
1070
+
1071
+ if clip_denoised:
1072
+ x_recon.clamp_(-1., 1.)
1073
+ if quantize_denoised:
1074
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
1075
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
1076
+ if return_codebook_ids:
1077
+ return model_mean, posterior_variance, posterior_log_variance, logits
1078
+ elif return_x0:
1079
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
1080
+ else:
1081
+ return model_mean, posterior_variance, posterior_log_variance
1082
+
1083
+ @torch.no_grad()
1084
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
1085
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
1086
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
1087
+ b, *_, device = *x.shape, x.device
1088
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
1089
+ return_codebook_ids=return_codebook_ids,
1090
+ quantize_denoised=quantize_denoised,
1091
+ return_x0=return_x0,
1092
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1093
+ if return_codebook_ids:
1094
+ raise DeprecationWarning("Support dropped.")
1095
+ model_mean, _, model_log_variance, logits = outputs
1096
+ elif return_x0:
1097
+ model_mean, _, model_log_variance, x0 = outputs
1098
+ else:
1099
+ model_mean, _, model_log_variance = outputs
1100
+
1101
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
1102
+ if noise_dropout > 0.:
1103
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
1104
+ # no noise when t == 0
1105
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
1106
+
1107
+ if return_codebook_ids:
1108
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
1109
+ if return_x0:
1110
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
1111
+ else:
1112
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
1113
+
1114
+ @torch.no_grad()
1115
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
1116
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
1117
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
1118
+ log_every_t=None):
1119
+ if not log_every_t:
1120
+ log_every_t = self.log_every_t
1121
+ timesteps = self.num_timesteps
1122
+ if batch_size is not None:
1123
+ b = batch_size if batch_size is not None else shape[0]
1124
+ shape = [batch_size] + list(shape)
1125
+ else:
1126
+ b = batch_size = shape[0]
1127
+ if x_T is None:
1128
+ img = torch.randn(shape, device=self.device)
1129
+ else:
1130
+ img = x_T
1131
+ intermediates = []
1132
+ if cond is not None:
1133
+ if isinstance(cond, dict):
1134
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1135
+ [x[:batch_size] for x in cond[key]] for key in cond}
1136
+ else:
1137
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1138
+
1139
+ if start_T is not None:
1140
+ timesteps = min(timesteps, start_T)
1141
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1142
+ total=timesteps) if verbose else reversed(
1143
+ range(0, timesteps))
1144
+ if type(temperature) == float:
1145
+ temperature = [temperature] * timesteps
1146
+
1147
+ for i in iterator:
1148
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1149
+ if self.shorten_cond_schedule:
1150
+ assert self.model.conditioning_key != 'hybrid'
1151
+ tc = self.cond_ids[ts].to(cond.device)
1152
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1153
+
1154
+ img, x0_partial = self.p_sample(img, cond, ts,
1155
+ clip_denoised=self.clip_denoised,
1156
+ quantize_denoised=quantize_denoised, return_x0=True,
1157
+ temperature=temperature[i], noise_dropout=noise_dropout,
1158
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1159
+ if mask is not None:
1160
+ assert x0 is not None
1161
+ img_orig = self.q_sample(x0, ts)
1162
+ img = img_orig * mask + (1. - mask) * img
1163
+
1164
+ if i % log_every_t == 0 or i == timesteps - 1:
1165
+ intermediates.append(x0_partial)
1166
+ if callback:
1167
+ callback(i)
1168
+ if img_callback:
1169
+ img_callback(img, i)
1170
+ return img, intermediates
1171
+
1172
+ @torch.no_grad()
1173
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
1174
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1175
+ mask=None, x0=None, img_callback=None, start_T=None,
1176
+ log_every_t=None):
1177
+
1178
+ if not log_every_t:
1179
+ log_every_t = self.log_every_t
1180
+ device = self.betas.device
1181
+ b = shape[0]
1182
+ if x_T is None:
1183
+ img = torch.randn(shape, device=device)
1184
+ else:
1185
+ img = x_T
1186
+
1187
+ intermediates = [img]
1188
+ if timesteps is None:
1189
+ timesteps = self.num_timesteps
1190
+
1191
+ if start_T is not None:
1192
+ timesteps = min(timesteps, start_T)
1193
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1194
+ range(0, timesteps))
1195
+
1196
+ if mask is not None:
1197
+ assert x0 is not None
1198
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1199
+
1200
+ for i in iterator:
1201
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
1202
+ if self.shorten_cond_schedule:
1203
+ assert self.model.conditioning_key != 'hybrid'
1204
+ tc = self.cond_ids[ts].to(cond.device)
1205
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1206
+
1207
+ img = self.p_sample(img, cond, ts,
1208
+ clip_denoised=self.clip_denoised,
1209
+ quantize_denoised=quantize_denoised)
1210
+ if mask is not None:
1211
+ img_orig = self.q_sample(x0, ts)
1212
+ img = img_orig * mask + (1. - mask) * img
1213
+
1214
+ if i % log_every_t == 0 or i == timesteps - 1:
1215
+ intermediates.append(img)
1216
+ if callback:
1217
+ callback(i)
1218
+ if img_callback:
1219
+ img_callback(img, i)
1220
+
1221
+ if return_intermediates:
1222
+ return img, intermediates
1223
+ return img
1224
+
1225
+ @torch.no_grad()
1226
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1227
+ verbose=True, timesteps=None, quantize_denoised=False,
1228
+ mask=None, x0=None, shape=None,**kwargs):
1229
+ if shape is None:
1230
+ shape = (batch_size, self.channels, self.image_size, self.image_size)
1231
+ if cond is not None:
1232
+ if isinstance(cond, dict):
1233
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1234
+ [x[:batch_size] for x in cond[key]] for key in cond}
1235
+ else:
1236
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1237
+ return self.p_sample_loop(cond,
1238
+ shape,
1239
+ return_intermediates=return_intermediates, x_T=x_T,
1240
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1241
+ mask=mask, x0=x0)
1242
+
1243
+ @torch.no_grad()
1244
+ def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
1245
+
1246
+ if ddim:
1247
+ ddim_sampler = DDIMSampler(self)
1248
+ shape = (self.channels, self.image_size, self.image_size)
1249
+ samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
1250
+ shape,cond,verbose=False,**kwargs)
1251
+
1252
+ else:
1253
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1254
+ return_intermediates=True,**kwargs)
1255
+
1256
+ return samples, intermediates
1257
+
1258
+
1259
+ @torch.no_grad()
1260
+ def log_images(self, batch, N=4, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1261
+ quantize_denoised=True, inpaint=False, plot_denoise_rows=False, plot_progressive_rows=False,
1262
+ plot_diffusion_rows=False, **kwargs):
1263
+
1264
+ use_ddim = False
1265
+
1266
+ log = {}
1267
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1268
+ return_first_stage_outputs=True,
1269
+ force_c_encode=True,
1270
+ return_original_cond=True,
1271
+ bs=N, uncond=0)
1272
+ N = min(x.shape[0], N)
1273
+ n_row = min(x.shape[0], n_row)
1274
+ log["inputs"] = x
1275
+ log["reals"] = xc["c_concat"]
1276
+ log["reconstruction"] = xrec
1277
+ if self.model.conditioning_key is not None:
1278
+ if hasattr(self.cond_stage_model, "decode"):
1279
+ xc = self.cond_stage_model.decode(c)
1280
+ log["conditioning"] = xc
1281
+ elif self.cond_stage_key in ["caption"]:
1282
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1283
+ log["conditioning"] = xc
1284
+ elif self.cond_stage_key == 'class_label':
1285
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1286
+ log['conditioning'] = xc
1287
+ elif isimage(xc):
1288
+ log["conditioning"] = xc
1289
+ if ismap(xc):
1290
+ log["original_conditioning"] = self.to_rgb(xc)
1291
+
1292
+ if plot_diffusion_rows:
1293
+ # get diffusion row
1294
+ diffusion_row = []
1295
+ z_start = z[:n_row]
1296
+ for t in range(self.num_timesteps):
1297
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1298
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1299
+ t = t.to(self.device).long()
1300
+ noise = torch.randn_like(z_start)
1301
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1302
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1303
+
1304
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1305
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1306
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1307
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1308
+ log["diffusion_row"] = diffusion_grid
1309
+
1310
+ if sample:
1311
+ # get denoise row
1312
+ with self.ema_scope("Plotting"):
1313
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1314
+ ddim_steps=ddim_steps,eta=ddim_eta)
1315
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1316
+ x_samples = self.decode_first_stage(samples)
1317
+ log["samples"] = x_samples
1318
+ if plot_denoise_rows:
1319
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1320
+ log["denoise_row"] = denoise_grid
1321
+
1322
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1323
+ self.first_stage_model, IdentityFirstStage):
1324
+ # also display when quantizing x0 while sampling
1325
+ with self.ema_scope("Plotting Quantized Denoised"):
1326
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1327
+ ddim_steps=ddim_steps,eta=ddim_eta,
1328
+ quantize_denoised=True)
1329
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1330
+ # quantize_denoised=True)
1331
+ x_samples = self.decode_first_stage(samples.to(self.device))
1332
+ log["samples_x0_quantized"] = x_samples
1333
+
1334
+ if inpaint:
1335
+ # make a simple center square
1336
+ h, w = z.shape[2], z.shape[3]
1337
+ mask = torch.ones(N, h, w).to(self.device)
1338
+ # zeros will be filled in
1339
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1340
+ mask = mask[:, None, ...]
1341
+ with self.ema_scope("Plotting Inpaint"):
1342
+
1343
+ samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1344
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1345
+ x_samples = self.decode_first_stage(samples.to(self.device))
1346
+ log["samples_inpainting"] = x_samples
1347
+ log["mask"] = mask
1348
+
1349
+ # outpaint
1350
+ with self.ema_scope("Plotting Outpaint"):
1351
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1352
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1353
+ x_samples = self.decode_first_stage(samples.to(self.device))
1354
+ log["samples_outpainting"] = x_samples
1355
+
1356
+ if plot_progressive_rows:
1357
+ with self.ema_scope("Plotting Progressives"):
1358
+ img, progressives = self.progressive_denoising(c,
1359
+ shape=(self.channels, self.image_size, self.image_size),
1360
+ batch_size=N)
1361
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1362
+ log["progressive_row"] = prog_row
1363
+
1364
+ if return_keys:
1365
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1366
+ return log
1367
+ else:
1368
+ return {key: log[key] for key in return_keys}
1369
+ return log
1370
+
1371
+ def configure_optimizers(self):
1372
+ lr = self.learning_rate
1373
+ params = list(self.model.parameters())
1374
+ if self.cond_stage_trainable:
1375
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1376
+ params = params + list(self.cond_stage_model.parameters())
1377
+ if self.learn_logvar:
1378
+ print('Diffusion model optimizing logvar')
1379
+ params.append(self.logvar)
1380
+ opt = torch.optim.AdamW(params, lr=lr)
1381
+ if self.use_scheduler:
1382
+ assert 'target' in self.scheduler_config
1383
+ scheduler = instantiate_from_config(self.scheduler_config)
1384
+
1385
+ print("Setting up LambdaLR scheduler...")
1386
+ scheduler = [
1387
+ {
1388
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1389
+ 'interval': 'step',
1390
+ 'frequency': 1
1391
+ }]
1392
+ return [opt], scheduler
1393
+ return opt
1394
+
1395
+ @torch.no_grad()
1396
+ def to_rgb(self, x):
1397
+ x = x.float()
1398
+ if not hasattr(self, "colorize"):
1399
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1400
+ x = nn.functional.conv2d(x, weight=self.colorize)
1401
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1402
+ return x
1403
+
1404
+
1405
+ class DiffusionWrapper(pl.LightningModule):
1406
+ def __init__(self, diff_model_config, conditioning_key):
1407
+ super().__init__()
1408
+ self.diffusion_model = instantiate_from_config(diff_model_config)
1409
+ self.conditioning_key = conditioning_key
1410
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
1411
+
1412
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
1413
+ if self.conditioning_key is None:
1414
+ out = self.diffusion_model(x, t)
1415
+ elif self.conditioning_key == 'concat':
1416
+ xc = torch.cat([x] + c_concat, dim=1)
1417
+ out = self.diffusion_model(xc, t)
1418
+ elif self.conditioning_key == 'crossattn':
1419
+ cc = torch.cat(c_crossattn, 1)
1420
+ out = self.diffusion_model(x, t, context=cc)
1421
+ elif self.conditioning_key == 'hybrid':
1422
+ xc = torch.cat([x] + c_concat, dim=1)
1423
+ cc = torch.cat(c_crossattn, 1)
1424
+ out = self.diffusion_model(xc, t, context=cc)
1425
+ elif self.conditioning_key == 'adm':
1426
+ cc = c_crossattn[0]
1427
+ out = self.diffusion_model(x, t, y=cc)
1428
+ else:
1429
+ raise NotImplementedError()
1430
+
1431
+ return out
1432
+
1433
+
1434
+ class Layout2ImgDiffusion(LatentDiffusion):
1435
+ # TODO: move all layout-specific hacks to this class
1436
+ def __init__(self, cond_stage_key, *args, **kwargs):
1437
+ assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
1438
+ super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
1439
+
1440
+ def log_images(self, batch, N=8, *args, **kwargs):
1441
+ logs = super().log_images(*args, batch=batch, N=N, **kwargs)
1442
+
1443
+ key = 'train' if self.training else 'validation'
1444
+ dset = self.trainer.datamodule.datasets[key]
1445
+ mapper = dset.conditional_builders[self.cond_stage_key]
1446
+
1447
+ bbox_imgs = []
1448
+ map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
1449
+ for tknzd_bbox in batch[self.cond_stage_key][:N]:
1450
+ bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
1451
+ bbox_imgs.append(bboximg)
1452
+
1453
+ cond_img = torch.stack(bbox_imgs, dim=0)
1454
+ logs['bbox_image'] = cond_img
1455
+ return logs
modules/models/diffusion/uni_pc/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .sampler import UniPCSampler # noqa: F401
modules/models/diffusion/uni_pc/sampler.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SAMPLING ONLY."""
2
+
3
+ import torch
4
+
5
+ from .uni_pc import NoiseScheduleVP, model_wrapper, UniPC
6
+ from modules import shared, devices
7
+
8
+
9
+ class UniPCSampler(object):
10
+ def __init__(self, model, **kwargs):
11
+ super().__init__()
12
+ self.model = model
13
+ to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device)
14
+ self.before_sample = None
15
+ self.after_sample = None
16
+ self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))
17
+
18
+ def register_buffer(self, name, attr):
19
+ if type(attr) == torch.Tensor:
20
+ if attr.device != devices.device:
21
+ attr = attr.to(devices.device)
22
+ setattr(self, name, attr)
23
+
24
+ def set_hooks(self, before_sample, after_sample, after_update):
25
+ self.before_sample = before_sample
26
+ self.after_sample = after_sample
27
+ self.after_update = after_update
28
+
29
+ @torch.no_grad()
30
+ def sample(self,
31
+ S,
32
+ batch_size,
33
+ shape,
34
+ conditioning=None,
35
+ callback=None,
36
+ normals_sequence=None,
37
+ img_callback=None,
38
+ quantize_x0=False,
39
+ eta=0.,
40
+ mask=None,
41
+ x0=None,
42
+ temperature=1.,
43
+ noise_dropout=0.,
44
+ score_corrector=None,
45
+ corrector_kwargs=None,
46
+ verbose=True,
47
+ x_T=None,
48
+ log_every_t=100,
49
+ unconditional_guidance_scale=1.,
50
+ unconditional_conditioning=None,
51
+ # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
52
+ **kwargs
53
+ ):
54
+ if conditioning is not None:
55
+ if isinstance(conditioning, dict):
56
+ ctmp = conditioning[list(conditioning.keys())[0]]
57
+ while isinstance(ctmp, list):
58
+ ctmp = ctmp[0]
59
+ cbs = ctmp.shape[0]
60
+ if cbs != batch_size:
61
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
62
+
63
+ elif isinstance(conditioning, list):
64
+ for ctmp in conditioning:
65
+ if ctmp.shape[0] != batch_size:
66
+ print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
67
+
68
+ else:
69
+ if conditioning.shape[0] != batch_size:
70
+ print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
71
+
72
+ # sampling
73
+ C, H, W = shape
74
+ size = (batch_size, C, H, W)
75
+ # print(f'Data shape for UniPC sampling is {size}')
76
+
77
+ device = self.model.betas.device
78
+ if x_T is None:
79
+ img = torch.randn(size, device=device)
80
+ else:
81
+ img = x_T
82
+
83
+ ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
84
+
85
+ # SD 1.X is "noise", SD 2.X is "v"
86
+ model_type = "v" if self.model.parameterization == "v" else "noise"
87
+
88
+ model_fn = model_wrapper(
89
+ lambda x, t, c: self.model.apply_model(x, t, c),
90
+ ns,
91
+ model_type=model_type,
92
+ guidance_type="classifier-free",
93
+ #condition=conditioning,
94
+ #unconditional_condition=unconditional_conditioning,
95
+ guidance_scale=unconditional_guidance_scale,
96
+ )
97
+
98
+ uni_pc = UniPC(model_fn, ns, predict_x0=True, thresholding=False, variant=shared.opts.uni_pc_variant, condition=conditioning, unconditional_condition=unconditional_conditioning, before_sample=self.before_sample, after_sample=self.after_sample, after_update=self.after_update)
99
+ x = uni_pc.sample(img, steps=S, skip_type=shared.opts.uni_pc_skip_type, method="multistep", order=shared.opts.uni_pc_order, lower_order_final=shared.opts.uni_pc_lower_order_final)
100
+
101
+ return x.to(device), None
modules/models/diffusion/uni_pc/uni_pc.py ADDED
@@ -0,0 +1,863 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import math
3
+ import tqdm
4
+
5
+
6
+ class NoiseScheduleVP:
7
+ def __init__(
8
+ self,
9
+ schedule='discrete',
10
+ betas=None,
11
+ alphas_cumprod=None,
12
+ continuous_beta_0=0.1,
13
+ continuous_beta_1=20.,
14
+ ):
15
+ """Create a wrapper class for the forward SDE (VP type).
16
+
17
+ ***
18
+ Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.
19
+ We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.
20
+ ***
21
+
22
+ The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).
23
+ We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).
24
+ Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:
25
+
26
+ log_alpha_t = self.marginal_log_mean_coeff(t)
27
+ sigma_t = self.marginal_std(t)
28
+ lambda_t = self.marginal_lambda(t)
29
+
30
+ Moreover, as lambda(t) is an invertible function, we also support its inverse function:
31
+
32
+ t = self.inverse_lambda(lambda_t)
33
+
34
+ ===============================================================
35
+
36
+ We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).
37
+
38
+ 1. For discrete-time DPMs:
39
+
40
+ For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:
41
+ t_i = (i + 1) / N
42
+ e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.
43
+ We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.
44
+
45
+ Args:
46
+ betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)
47
+ alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)
48
+
49
+ Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.
50
+
51
+ **Important**: Please pay special attention for the args for `alphas_cumprod`:
52
+ The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that
53
+ q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ).
54
+ Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have
55
+ alpha_{t_n} = \sqrt{\hat{alpha_n}},
56
+ and
57
+ log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}).
58
+
59
+
60
+ 2. For continuous-time DPMs:
61
+
62
+ We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise
63
+ schedule are the default settings in DDPM and improved-DDPM:
64
+
65
+ Args:
66
+ beta_min: A `float` number. The smallest beta for the linear schedule.
67
+ beta_max: A `float` number. The largest beta for the linear schedule.
68
+ cosine_s: A `float` number. The hyperparameter in the cosine schedule.
69
+ cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.
70
+ T: A `float` number. The ending time of the forward process.
71
+
72
+ ===============================================================
73
+
74
+ Args:
75
+ schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,
76
+ 'linear' or 'cosine' for continuous-time DPMs.
77
+ Returns:
78
+ A wrapper object of the forward SDE (VP type).
79
+
80
+ ===============================================================
81
+
82
+ Example:
83
+
84
+ # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):
85
+ >>> ns = NoiseScheduleVP('discrete', betas=betas)
86
+
87
+ # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1):
88
+ >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)
89
+
90
+ # For continuous-time DPMs (VPSDE), linear schedule:
91
+ >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)
92
+
93
+ """
94
+
95
+ if schedule not in ['discrete', 'linear', 'cosine']:
96
+ raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear' or 'cosine'")
97
+
98
+ self.schedule = schedule
99
+ if schedule == 'discrete':
100
+ if betas is not None:
101
+ log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)
102
+ else:
103
+ assert alphas_cumprod is not None
104
+ log_alphas = 0.5 * torch.log(alphas_cumprod)
105
+ self.total_N = len(log_alphas)
106
+ self.T = 1.
107
+ self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))
108
+ self.log_alpha_array = log_alphas.reshape((1, -1,))
109
+ else:
110
+ self.total_N = 1000
111
+ self.beta_0 = continuous_beta_0
112
+ self.beta_1 = continuous_beta_1
113
+ self.cosine_s = 0.008
114
+ self.cosine_beta_max = 999.
115
+ self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
116
+ self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))
117
+ self.schedule = schedule
118
+ if schedule == 'cosine':
119
+ # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.
120
+ # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.
121
+ self.T = 0.9946
122
+ else:
123
+ self.T = 1.
124
+
125
+ def marginal_log_mean_coeff(self, t):
126
+ """
127
+ Compute log(alpha_t) of a given continuous-time label t in [0, T].
128
+ """
129
+ if self.schedule == 'discrete':
130
+ return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))
131
+ elif self.schedule == 'linear':
132
+ return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
133
+ elif self.schedule == 'cosine':
134
+ log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))
135
+ log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0
136
+ return log_alpha_t
137
+
138
+ def marginal_alpha(self, t):
139
+ """
140
+ Compute alpha_t of a given continuous-time label t in [0, T].
141
+ """
142
+ return torch.exp(self.marginal_log_mean_coeff(t))
143
+
144
+ def marginal_std(self, t):
145
+ """
146
+ Compute sigma_t of a given continuous-time label t in [0, T].
147
+ """
148
+ return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))
149
+
150
+ def marginal_lambda(self, t):
151
+ """
152
+ Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].
153
+ """
154
+ log_mean_coeff = self.marginal_log_mean_coeff(t)
155
+ log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))
156
+ return log_mean_coeff - log_std
157
+
158
+ def inverse_lambda(self, lamb):
159
+ """
160
+ Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.
161
+ """
162
+ if self.schedule == 'linear':
163
+ tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
164
+ Delta = self.beta_0**2 + tmp
165
+ return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)
166
+ elif self.schedule == 'discrete':
167
+ log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)
168
+ t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))
169
+ return t.reshape((-1,))
170
+ else:
171
+ log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))
172
+ t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s
173
+ t = t_fn(log_alpha)
174
+ return t
175
+
176
+
177
+ def model_wrapper(
178
+ model,
179
+ noise_schedule,
180
+ model_type="noise",
181
+ model_kwargs=None,
182
+ guidance_type="uncond",
183
+ #condition=None,
184
+ #unconditional_condition=None,
185
+ guidance_scale=1.,
186
+ classifier_fn=None,
187
+ classifier_kwargs=None,
188
+ ):
189
+ """Create a wrapper function for the noise prediction model.
190
+
191
+ DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to
192
+ firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.
193
+
194
+ We support four types of the diffusion model by setting `model_type`:
195
+
196
+ 1. "noise": noise prediction model. (Trained by predicting noise).
197
+
198
+ 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0).
199
+
200
+ 3. "v": velocity prediction model. (Trained by predicting the velocity).
201
+ The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].
202
+
203
+ [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models."
204
+ arXiv preprint arXiv:2202.00512 (2022).
205
+ [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models."
206
+ arXiv preprint arXiv:2210.02303 (2022).
207
+
208
+ 4. "score": marginal score function. (Trained by denoising score matching).
209
+ Note that the score function and the noise prediction model follows a simple relationship:
210
+ ```
211
+ noise(x_t, t) = -sigma_t * score(x_t, t)
212
+ ```
213
+
214
+ We support three types of guided sampling by DPMs by setting `guidance_type`:
215
+ 1. "uncond": unconditional sampling by DPMs.
216
+ The input `model` has the following format:
217
+ ``
218
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
219
+ ``
220
+
221
+ 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier.
222
+ The input `model` has the following format:
223
+ ``
224
+ model(x, t_input, **model_kwargs) -> noise | x_start | v | score
225
+ ``
226
+
227
+ The input `classifier_fn` has the following format:
228
+ ``
229
+ classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)
230
+ ``
231
+
232
+ [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis,"
233
+ in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.
234
+
235
+ 3. "classifier-free": classifier-free guidance sampling by conditional DPMs.
236
+ The input `model` has the following format:
237
+ ``
238
+ model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score
239
+ ``
240
+ And if cond == `unconditional_condition`, the model output is the unconditional DPM output.
241
+
242
+ [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance."
243
+ arXiv preprint arXiv:2207.12598 (2022).
244
+
245
+
246
+ The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)
247
+ or continuous-time labels (i.e. epsilon to T).
248
+
249
+ We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:
250
+ ``
251
+ def model_fn(x, t_continuous) -> noise:
252
+ t_input = get_model_input_time(t_continuous)
253
+ return noise_pred(model, x, t_input, **model_kwargs)
254
+ ``
255
+ where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.
256
+
257
+ ===============================================================
258
+
259
+ Args:
260
+ model: A diffusion model with the corresponding format described above.
261
+ noise_schedule: A noise schedule object, such as NoiseScheduleVP.
262
+ model_type: A `str`. The parameterization type of the diffusion model.
263
+ "noise" or "x_start" or "v" or "score".
264
+ model_kwargs: A `dict`. A dict for the other inputs of the model function.
265
+ guidance_type: A `str`. The type of the guidance for sampling.
266
+ "uncond" or "classifier" or "classifier-free".
267
+ condition: A pytorch tensor. The condition for the guided sampling.
268
+ Only used for "classifier" or "classifier-free" guidance type.
269
+ unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.
270
+ Only used for "classifier-free" guidance type.
271
+ guidance_scale: A `float`. The scale for the guided sampling.
272
+ classifier_fn: A classifier function. Only used for the classifier guidance.
273
+ classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.
274
+ Returns:
275
+ A noise prediction model that accepts the noised data and the continuous time as the inputs.
276
+ """
277
+
278
+ model_kwargs = model_kwargs or {}
279
+ classifier_kwargs = classifier_kwargs or {}
280
+
281
+ def get_model_input_time(t_continuous):
282
+ """
283
+ Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
284
+ For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].
285
+ For continuous-time DPMs, we just use `t_continuous`.
286
+ """
287
+ if noise_schedule.schedule == 'discrete':
288
+ return (t_continuous - 1. / noise_schedule.total_N) * 1000.
289
+ else:
290
+ return t_continuous
291
+
292
+ def noise_pred_fn(x, t_continuous, cond=None):
293
+ if t_continuous.reshape((-1,)).shape[0] == 1:
294
+ t_continuous = t_continuous.expand((x.shape[0]))
295
+ t_input = get_model_input_time(t_continuous)
296
+ if cond is None:
297
+ output = model(x, t_input, None, **model_kwargs)
298
+ else:
299
+ output = model(x, t_input, cond, **model_kwargs)
300
+ if model_type == "noise":
301
+ return output
302
+ elif model_type == "x_start":
303
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
304
+ dims = x.dim()
305
+ return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)
306
+ elif model_type == "v":
307
+ alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)
308
+ dims = x.dim()
309
+ return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x
310
+ elif model_type == "score":
311
+ sigma_t = noise_schedule.marginal_std(t_continuous)
312
+ dims = x.dim()
313
+ return -expand_dims(sigma_t, dims) * output
314
+
315
+ def cond_grad_fn(x, t_input, condition):
316
+ """
317
+ Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).
318
+ """
319
+ with torch.enable_grad():
320
+ x_in = x.detach().requires_grad_(True)
321
+ log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)
322
+ return torch.autograd.grad(log_prob.sum(), x_in)[0]
323
+
324
+ def model_fn(x, t_continuous, condition, unconditional_condition):
325
+ """
326
+ The noise predicition model function that is used for DPM-Solver.
327
+ """
328
+ if t_continuous.reshape((-1,)).shape[0] == 1:
329
+ t_continuous = t_continuous.expand((x.shape[0]))
330
+ if guidance_type == "uncond":
331
+ return noise_pred_fn(x, t_continuous)
332
+ elif guidance_type == "classifier":
333
+ assert classifier_fn is not None
334
+ t_input = get_model_input_time(t_continuous)
335
+ cond_grad = cond_grad_fn(x, t_input, condition)
336
+ sigma_t = noise_schedule.marginal_std(t_continuous)
337
+ noise = noise_pred_fn(x, t_continuous)
338
+ return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad
339
+ elif guidance_type == "classifier-free":
340
+ if guidance_scale == 1. or unconditional_condition is None:
341
+ return noise_pred_fn(x, t_continuous, cond=condition)
342
+ else:
343
+ x_in = torch.cat([x] * 2)
344
+ t_in = torch.cat([t_continuous] * 2)
345
+ if isinstance(condition, dict):
346
+ assert isinstance(unconditional_condition, dict)
347
+ c_in = {}
348
+ for k in condition:
349
+ if isinstance(condition[k], list):
350
+ c_in[k] = [torch.cat([
351
+ unconditional_condition[k][i],
352
+ condition[k][i]]) for i in range(len(condition[k]))]
353
+ else:
354
+ c_in[k] = torch.cat([
355
+ unconditional_condition[k],
356
+ condition[k]])
357
+ elif isinstance(condition, list):
358
+ c_in = []
359
+ assert isinstance(unconditional_condition, list)
360
+ for i in range(len(condition)):
361
+ c_in.append(torch.cat([unconditional_condition[i], condition[i]]))
362
+ else:
363
+ c_in = torch.cat([unconditional_condition, condition])
364
+ noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)
365
+ return noise_uncond + guidance_scale * (noise - noise_uncond)
366
+
367
+ assert model_type in ["noise", "x_start", "v"]
368
+ assert guidance_type in ["uncond", "classifier", "classifier-free"]
369
+ return model_fn
370
+
371
+
372
+ class UniPC:
373
+ def __init__(
374
+ self,
375
+ model_fn,
376
+ noise_schedule,
377
+ predict_x0=True,
378
+ thresholding=False,
379
+ max_val=1.,
380
+ variant='bh1',
381
+ condition=None,
382
+ unconditional_condition=None,
383
+ before_sample=None,
384
+ after_sample=None,
385
+ after_update=None
386
+ ):
387
+ """Construct a UniPC.
388
+
389
+ We support both data_prediction and noise_prediction.
390
+ """
391
+ self.model_fn_ = model_fn
392
+ self.noise_schedule = noise_schedule
393
+ self.variant = variant
394
+ self.predict_x0 = predict_x0
395
+ self.thresholding = thresholding
396
+ self.max_val = max_val
397
+ self.condition = condition
398
+ self.unconditional_condition = unconditional_condition
399
+ self.before_sample = before_sample
400
+ self.after_sample = after_sample
401
+ self.after_update = after_update
402
+
403
+ def dynamic_thresholding_fn(self, x0, t=None):
404
+ """
405
+ The dynamic thresholding method.
406
+ """
407
+ dims = x0.dim()
408
+ p = self.dynamic_thresholding_ratio
409
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
410
+ s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)
411
+ x0 = torch.clamp(x0, -s, s) / s
412
+ return x0
413
+
414
+ def model(self, x, t):
415
+ cond = self.condition
416
+ uncond = self.unconditional_condition
417
+ if self.before_sample is not None:
418
+ x, t, cond, uncond = self.before_sample(x, t, cond, uncond)
419
+ res = self.model_fn_(x, t, cond, uncond)
420
+ if self.after_sample is not None:
421
+ x, t, cond, uncond, res = self.after_sample(x, t, cond, uncond, res)
422
+
423
+ if isinstance(res, tuple):
424
+ # (None, pred_x0)
425
+ res = res[1]
426
+
427
+ return res
428
+
429
+ def noise_prediction_fn(self, x, t):
430
+ """
431
+ Return the noise prediction model.
432
+ """
433
+ return self.model(x, t)
434
+
435
+ def data_prediction_fn(self, x, t):
436
+ """
437
+ Return the data prediction model (with thresholding).
438
+ """
439
+ noise = self.noise_prediction_fn(x, t)
440
+ dims = x.dim()
441
+ alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)
442
+ x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)
443
+ if self.thresholding:
444
+ p = 0.995 # A hyperparameter in the paper of "Imagen" [1].
445
+ s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)
446
+ s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)
447
+ x0 = torch.clamp(x0, -s, s) / s
448
+ return x0
449
+
450
+ def model_fn(self, x, t):
451
+ """
452
+ Convert the model to the noise prediction model or the data prediction model.
453
+ """
454
+ if self.predict_x0:
455
+ return self.data_prediction_fn(x, t)
456
+ else:
457
+ return self.noise_prediction_fn(x, t)
458
+
459
+ def get_time_steps(self, skip_type, t_T, t_0, N, device):
460
+ """Compute the intermediate time steps for sampling.
461
+ """
462
+ if skip_type == 'logSNR':
463
+ lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))
464
+ lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))
465
+ logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)
466
+ return self.noise_schedule.inverse_lambda(logSNR_steps)
467
+ elif skip_type == 'time_uniform':
468
+ return torch.linspace(t_T, t_0, N + 1).to(device)
469
+ elif skip_type == 'time_quadratic':
470
+ t_order = 2
471
+ t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)
472
+ return t
473
+ else:
474
+ raise ValueError(f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'")
475
+
476
+ def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):
477
+ """
478
+ Get the order of each step for sampling by the singlestep DPM-Solver.
479
+ """
480
+ if order == 3:
481
+ K = steps // 3 + 1
482
+ if steps % 3 == 0:
483
+ orders = [3,] * (K - 2) + [2, 1]
484
+ elif steps % 3 == 1:
485
+ orders = [3,] * (K - 1) + [1]
486
+ else:
487
+ orders = [3,] * (K - 1) + [2]
488
+ elif order == 2:
489
+ if steps % 2 == 0:
490
+ K = steps // 2
491
+ orders = [2,] * K
492
+ else:
493
+ K = steps // 2 + 1
494
+ orders = [2,] * (K - 1) + [1]
495
+ elif order == 1:
496
+ K = steps
497
+ orders = [1,] * steps
498
+ else:
499
+ raise ValueError("'order' must be '1' or '2' or '3'.")
500
+ if skip_type == 'logSNR':
501
+ # To reproduce the results in DPM-Solver paper
502
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)
503
+ else:
504
+ timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders), 0).to(device)]
505
+ return timesteps_outer, orders
506
+
507
+ def denoise_to_zero_fn(self, x, s):
508
+ """
509
+ Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.
510
+ """
511
+ return self.data_prediction_fn(x, s)
512
+
513
+ def multistep_uni_pc_update(self, x, model_prev_list, t_prev_list, t, order, **kwargs):
514
+ if len(t.shape) == 0:
515
+ t = t.view(-1)
516
+ if 'bh' in self.variant:
517
+ return self.multistep_uni_pc_bh_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
518
+ else:
519
+ assert self.variant == 'vary_coeff'
520
+ return self.multistep_uni_pc_vary_update(x, model_prev_list, t_prev_list, t, order, **kwargs)
521
+
522
+ def multistep_uni_pc_vary_update(self, x, model_prev_list, t_prev_list, t, order, use_corrector=True):
523
+ #print(f'using unified predictor-corrector with order {order} (solver type: vary coeff)')
524
+ ns = self.noise_schedule
525
+ assert order <= len(model_prev_list)
526
+
527
+ # first compute rks
528
+ t_prev_0 = t_prev_list[-1]
529
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
530
+ lambda_t = ns.marginal_lambda(t)
531
+ model_prev_0 = model_prev_list[-1]
532
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
533
+ log_alpha_t = ns.marginal_log_mean_coeff(t)
534
+ alpha_t = torch.exp(log_alpha_t)
535
+
536
+ h = lambda_t - lambda_prev_0
537
+
538
+ rks = []
539
+ D1s = []
540
+ for i in range(1, order):
541
+ t_prev_i = t_prev_list[-(i + 1)]
542
+ model_prev_i = model_prev_list[-(i + 1)]
543
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
544
+ rk = (lambda_prev_i - lambda_prev_0) / h
545
+ rks.append(rk)
546
+ D1s.append((model_prev_i - model_prev_0) / rk)
547
+
548
+ rks.append(1.)
549
+ rks = torch.tensor(rks, device=x.device)
550
+
551
+ K = len(rks)
552
+ # build C matrix
553
+ C = []
554
+
555
+ col = torch.ones_like(rks)
556
+ for k in range(1, K + 1):
557
+ C.append(col)
558
+ col = col * rks / (k + 1)
559
+ C = torch.stack(C, dim=1)
560
+
561
+ if len(D1s) > 0:
562
+ D1s = torch.stack(D1s, dim=1) # (B, K)
563
+ C_inv_p = torch.linalg.inv(C[:-1, :-1])
564
+ A_p = C_inv_p
565
+
566
+ if use_corrector:
567
+ #print('using corrector')
568
+ C_inv = torch.linalg.inv(C)
569
+ A_c = C_inv
570
+
571
+ hh = -h if self.predict_x0 else h
572
+ h_phi_1 = torch.expm1(hh)
573
+ h_phi_ks = []
574
+ factorial_k = 1
575
+ h_phi_k = h_phi_1
576
+ for k in range(1, K + 2):
577
+ h_phi_ks.append(h_phi_k)
578
+ h_phi_k = h_phi_k / hh - 1 / factorial_k
579
+ factorial_k *= (k + 1)
580
+
581
+ model_t = None
582
+ if self.predict_x0:
583
+ x_t_ = (
584
+ sigma_t / sigma_prev_0 * x
585
+ - alpha_t * h_phi_1 * model_prev_0
586
+ )
587
+ # now predictor
588
+ x_t = x_t_
589
+ if len(D1s) > 0:
590
+ # compute the residuals for predictor
591
+ for k in range(K - 1):
592
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
593
+ # now corrector
594
+ if use_corrector:
595
+ model_t = self.model_fn(x_t, t)
596
+ D1_t = (model_t - model_prev_0)
597
+ x_t = x_t_
598
+ k = 0
599
+ for k in range(K - 1):
600
+ x_t = x_t - alpha_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
601
+ x_t = x_t - alpha_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
602
+ else:
603
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
604
+ x_t_ = (
605
+ (torch.exp(log_alpha_t - log_alpha_prev_0)) * x
606
+ - (sigma_t * h_phi_1) * model_prev_0
607
+ )
608
+ # now predictor
609
+ x_t = x_t_
610
+ if len(D1s) > 0:
611
+ # compute the residuals for predictor
612
+ for k in range(K - 1):
613
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_p[k])
614
+ # now corrector
615
+ if use_corrector:
616
+ model_t = self.model_fn(x_t, t)
617
+ D1_t = (model_t - model_prev_0)
618
+ x_t = x_t_
619
+ k = 0
620
+ for k in range(K - 1):
621
+ x_t = x_t - sigma_t * h_phi_ks[k + 1] * torch.einsum('bkchw,k->bchw', D1s, A_c[k][:-1])
622
+ x_t = x_t - sigma_t * h_phi_ks[K] * (D1_t * A_c[k][-1])
623
+ return x_t, model_t
624
+
625
+ def multistep_uni_pc_bh_update(self, x, model_prev_list, t_prev_list, t, order, x_t=None, use_corrector=True):
626
+ #print(f'using unified predictor-corrector with order {order} (solver type: B(h))')
627
+ ns = self.noise_schedule
628
+ assert order <= len(model_prev_list)
629
+ dims = x.dim()
630
+
631
+ # first compute rks
632
+ t_prev_0 = t_prev_list[-1]
633
+ lambda_prev_0 = ns.marginal_lambda(t_prev_0)
634
+ lambda_t = ns.marginal_lambda(t)
635
+ model_prev_0 = model_prev_list[-1]
636
+ sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)
637
+ log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)
638
+ alpha_t = torch.exp(log_alpha_t)
639
+
640
+ h = lambda_t - lambda_prev_0
641
+
642
+ rks = []
643
+ D1s = []
644
+ for i in range(1, order):
645
+ t_prev_i = t_prev_list[-(i + 1)]
646
+ model_prev_i = model_prev_list[-(i + 1)]
647
+ lambda_prev_i = ns.marginal_lambda(t_prev_i)
648
+ rk = ((lambda_prev_i - lambda_prev_0) / h)[0]
649
+ rks.append(rk)
650
+ D1s.append((model_prev_i - model_prev_0) / rk)
651
+
652
+ rks.append(1.)
653
+ rks = torch.tensor(rks, device=x.device)
654
+
655
+ R = []
656
+ b = []
657
+
658
+ hh = -h[0] if self.predict_x0 else h[0]
659
+ h_phi_1 = torch.expm1(hh) # h\phi_1(h) = e^h - 1
660
+ h_phi_k = h_phi_1 / hh - 1
661
+
662
+ factorial_i = 1
663
+
664
+ if self.variant == 'bh1':
665
+ B_h = hh
666
+ elif self.variant == 'bh2':
667
+ B_h = torch.expm1(hh)
668
+ else:
669
+ raise NotImplementedError()
670
+
671
+ for i in range(1, order + 1):
672
+ R.append(torch.pow(rks, i - 1))
673
+ b.append(h_phi_k * factorial_i / B_h)
674
+ factorial_i *= (i + 1)
675
+ h_phi_k = h_phi_k / hh - 1 / factorial_i
676
+
677
+ R = torch.stack(R)
678
+ b = torch.tensor(b, device=x.device)
679
+
680
+ # now predictor
681
+ use_predictor = len(D1s) > 0 and x_t is None
682
+ if len(D1s) > 0:
683
+ D1s = torch.stack(D1s, dim=1) # (B, K)
684
+ if x_t is None:
685
+ # for order 2, we use a simplified version
686
+ if order == 2:
687
+ rhos_p = torch.tensor([0.5], device=b.device)
688
+ else:
689
+ rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1])
690
+ else:
691
+ D1s = None
692
+
693
+ if use_corrector:
694
+ #print('using corrector')
695
+ # for order 1, we use a simplified version
696
+ if order == 1:
697
+ rhos_c = torch.tensor([0.5], device=b.device)
698
+ else:
699
+ rhos_c = torch.linalg.solve(R, b)
700
+
701
+ model_t = None
702
+ if self.predict_x0:
703
+ x_t_ = (
704
+ expand_dims(sigma_t / sigma_prev_0, dims) * x
705
+ - expand_dims(alpha_t * h_phi_1, dims)* model_prev_0
706
+ )
707
+
708
+ if x_t is None:
709
+ if use_predictor:
710
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
711
+ else:
712
+ pred_res = 0
713
+ x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * pred_res
714
+
715
+ if use_corrector:
716
+ model_t = self.model_fn(x_t, t)
717
+ if D1s is not None:
718
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
719
+ else:
720
+ corr_res = 0
721
+ D1_t = (model_t - model_prev_0)
722
+ x_t = x_t_ - expand_dims(alpha_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
723
+ else:
724
+ x_t_ = (
725
+ expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x
726
+ - expand_dims(sigma_t * h_phi_1, dims) * model_prev_0
727
+ )
728
+ if x_t is None:
729
+ if use_predictor:
730
+ pred_res = torch.einsum('k,bkchw->bchw', rhos_p, D1s)
731
+ else:
732
+ pred_res = 0
733
+ x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * pred_res
734
+
735
+ if use_corrector:
736
+ model_t = self.model_fn(x_t, t)
737
+ if D1s is not None:
738
+ corr_res = torch.einsum('k,bkchw->bchw', rhos_c[:-1], D1s)
739
+ else:
740
+ corr_res = 0
741
+ D1_t = (model_t - model_prev_0)
742
+ x_t = x_t_ - expand_dims(sigma_t * B_h, dims) * (corr_res + rhos_c[-1] * D1_t)
743
+ return x_t, model_t
744
+
745
+
746
+ def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',
747
+ method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',
748
+ atol=0.0078, rtol=0.05, corrector=False,
749
+ ):
750
+ t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end
751
+ t_T = self.noise_schedule.T if t_start is None else t_start
752
+ device = x.device
753
+ if method == 'multistep':
754
+ assert steps >= order, "UniPC order must be < sampling steps"
755
+ timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)
756
+ #print(f"Running UniPC Sampling with {timesteps.shape[0]} timesteps, order {order}")
757
+ assert timesteps.shape[0] - 1 == steps
758
+ with torch.no_grad():
759
+ vec_t = timesteps[0].expand((x.shape[0]))
760
+ model_prev_list = [self.model_fn(x, vec_t)]
761
+ t_prev_list = [vec_t]
762
+ with tqdm.tqdm(total=steps) as pbar:
763
+ # Init the first `order` values by lower order multistep DPM-Solver.
764
+ for init_order in range(1, order):
765
+ vec_t = timesteps[init_order].expand(x.shape[0])
766
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, init_order, use_corrector=True)
767
+ if model_x is None:
768
+ model_x = self.model_fn(x, vec_t)
769
+ if self.after_update is not None:
770
+ self.after_update(x, model_x)
771
+ model_prev_list.append(model_x)
772
+ t_prev_list.append(vec_t)
773
+ pbar.update()
774
+
775
+ for step in range(order, steps + 1):
776
+ vec_t = timesteps[step].expand(x.shape[0])
777
+ if lower_order_final:
778
+ step_order = min(order, steps + 1 - step)
779
+ else:
780
+ step_order = order
781
+ #print('this step order:', step_order)
782
+ if step == steps:
783
+ #print('do not run corrector at the last step')
784
+ use_corrector = False
785
+ else:
786
+ use_corrector = True
787
+ x, model_x = self.multistep_uni_pc_update(x, model_prev_list, t_prev_list, vec_t, step_order, use_corrector=use_corrector)
788
+ if self.after_update is not None:
789
+ self.after_update(x, model_x)
790
+ for i in range(order - 1):
791
+ t_prev_list[i] = t_prev_list[i + 1]
792
+ model_prev_list[i] = model_prev_list[i + 1]
793
+ t_prev_list[-1] = vec_t
794
+ # We do not need to evaluate the final model value.
795
+ if step < steps:
796
+ if model_x is None:
797
+ model_x = self.model_fn(x, vec_t)
798
+ model_prev_list[-1] = model_x
799
+ pbar.update()
800
+ else:
801
+ raise NotImplementedError()
802
+ if denoise_to_zero:
803
+ x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)
804
+ return x
805
+
806
+
807
+ #############################################################
808
+ # other utility functions
809
+ #############################################################
810
+
811
+ def interpolate_fn(x, xp, yp):
812
+ """
813
+ A piecewise linear function y = f(x), using xp and yp as keypoints.
814
+ We implement f(x) in a differentiable way (i.e. applicable for autograd).
815
+ The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.)
816
+
817
+ Args:
818
+ x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver).
819
+ xp: PyTorch tensor with shape [C, K], where K is the number of keypoints.
820
+ yp: PyTorch tensor with shape [C, K].
821
+ Returns:
822
+ The function values f(x), with shape [N, C].
823
+ """
824
+ N, K = x.shape[0], xp.shape[1]
825
+ all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2)
826
+ sorted_all_x, x_indices = torch.sort(all_x, dim=2)
827
+ x_idx = torch.argmin(x_indices, dim=2)
828
+ cand_start_idx = x_idx - 1
829
+ start_idx = torch.where(
830
+ torch.eq(x_idx, 0),
831
+ torch.tensor(1, device=x.device),
832
+ torch.where(
833
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
834
+ ),
835
+ )
836
+ end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1)
837
+ start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2)
838
+ end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2)
839
+ start_idx2 = torch.where(
840
+ torch.eq(x_idx, 0),
841
+ torch.tensor(0, device=x.device),
842
+ torch.where(
843
+ torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx,
844
+ ),
845
+ )
846
+ y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1)
847
+ start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2)
848
+ end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2)
849
+ cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x)
850
+ return cand
851
+
852
+
853
+ def expand_dims(v, dims):
854
+ """
855
+ Expand the tensor `v` to the dim `dims`.
856
+
857
+ Args:
858
+ `v`: a PyTorch tensor with shape [N].
859
+ `dim`: a `int`.
860
+ Returns:
861
+ a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.
862
+ """
863
+ return v[(...,) + (None,)*(dims - 1)]
modules/ngrok.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ngrok
2
+
3
+ # Connect to ngrok for ingress
4
+ def connect(token, port, options):
5
+ account = None
6
+ if token is None:
7
+ token = 'None'
8
+ else:
9
+ if ':' in token:
10
+ # token = authtoken:username:password
11
+ token, username, password = token.split(':', 2)
12
+ account = f"{username}:{password}"
13
+
14
+ # For all options see: https://github.com/ngrok/ngrok-py/blob/main/examples/ngrok-connect-full.py
15
+ if not options.get('authtoken_from_env'):
16
+ options['authtoken'] = token
17
+ if account:
18
+ options['basic_auth'] = account
19
+ if not options.get('session_metadata'):
20
+ options['session_metadata'] = 'stable-diffusion-webui'
21
+
22
+
23
+ try:
24
+ public_url = ngrok.connect(f"127.0.0.1:{port}", **options).url()
25
+ except Exception as e:
26
+ print(f'Invalid ngrok authtoken? ngrok connection aborted due to: {e}\n'
27
+ f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
28
+ else:
29
+ print(f'ngrok connected to localhost:{port}! URL: {public_url}\n'
30
+ 'You can use this link after the launch is complete.')
modules/options.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import sys
3
+
4
+ import gradio as gr
5
+
6
+ from modules import errors
7
+ from modules.shared_cmd_options import cmd_opts
8
+
9
+
10
+ class OptionInfo:
11
+ def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None, restrict_api=False):
12
+ self.default = default
13
+ self.label = label
14
+ self.component = component
15
+ self.component_args = component_args
16
+ self.onchange = onchange
17
+ self.section = section
18
+ self.refresh = refresh
19
+ self.do_not_save = False
20
+
21
+ self.comment_before = comment_before
22
+ """HTML text that will be added after label in UI"""
23
+
24
+ self.comment_after = comment_after
25
+ """HTML text that will be added before label in UI"""
26
+
27
+ self.infotext = infotext
28
+
29
+ self.restrict_api = restrict_api
30
+ """If True, the setting will not be accessible via API"""
31
+
32
+ def link(self, label, url):
33
+ self.comment_before += f"[<a href='{url}' target='_blank'>{label}</a>]"
34
+ return self
35
+
36
+ def js(self, label, js_func):
37
+ self.comment_before += f"[<a onclick='{js_func}(); return false'>{label}</a>]"
38
+ return self
39
+
40
+ def info(self, info):
41
+ self.comment_after += f"<span class='info'>({info})</span>"
42
+ return self
43
+
44
+ def html(self, html):
45
+ self.comment_after += html
46
+ return self
47
+
48
+ def needs_restart(self):
49
+ self.comment_after += " <span class='info'>(requires restart)</span>"
50
+ return self
51
+
52
+ def needs_reload_ui(self):
53
+ self.comment_after += " <span class='info'>(requires Reload UI)</span>"
54
+ return self
55
+
56
+
57
+ class OptionHTML(OptionInfo):
58
+ def __init__(self, text):
59
+ super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs))
60
+
61
+ self.do_not_save = True
62
+
63
+
64
+ def options_section(section_identifier, options_dict):
65
+ for v in options_dict.values():
66
+ v.section = section_identifier
67
+
68
+ return options_dict
69
+
70
+
71
+ options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"}
72
+
73
+
74
+ class Options:
75
+ typemap = {int: float}
76
+
77
+ def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts):
78
+ self.data_labels = data_labels
79
+ self.data = {k: v.default for k, v in self.data_labels.items()}
80
+ self.restricted_opts = restricted_opts
81
+
82
+ def __setattr__(self, key, value):
83
+ if key in options_builtin_fields:
84
+ return super(Options, self).__setattr__(key, value)
85
+
86
+ if self.data is not None:
87
+ if key in self.data or key in self.data_labels:
88
+ assert not cmd_opts.freeze_settings, "changing settings is disabled"
89
+
90
+ info = self.data_labels.get(key, None)
91
+ if info.do_not_save:
92
+ return
93
+
94
+ comp_args = info.component_args if info else None
95
+ if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
96
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
97
+
98
+ if cmd_opts.hide_ui_dir_config and key in self.restricted_opts:
99
+ raise RuntimeError(f"not possible to set {key} because it is restricted")
100
+
101
+ self.data[key] = value
102
+ return
103
+
104
+ return super(Options, self).__setattr__(key, value)
105
+
106
+ def __getattr__(self, item):
107
+ if item in options_builtin_fields:
108
+ return super(Options, self).__getattribute__(item)
109
+
110
+ if self.data is not None:
111
+ if item in self.data:
112
+ return self.data[item]
113
+
114
+ if item in self.data_labels:
115
+ return self.data_labels[item].default
116
+
117
+ return super(Options, self).__getattribute__(item)
118
+
119
+ def set(self, key, value, is_api=False, run_callbacks=True):
120
+ """sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
121
+
122
+ oldval = self.data.get(key, None)
123
+ if oldval == value:
124
+ return False
125
+
126
+ option = self.data_labels[key]
127
+ if option.do_not_save:
128
+ return False
129
+
130
+ if is_api and option.restrict_api:
131
+ return False
132
+
133
+ try:
134
+ setattr(self, key, value)
135
+ except RuntimeError:
136
+ return False
137
+
138
+ if run_callbacks and option.onchange is not None:
139
+ try:
140
+ option.onchange()
141
+ except Exception as e:
142
+ errors.display(e, f"changing setting {key} to {value}")
143
+ setattr(self, key, oldval)
144
+ return False
145
+
146
+ return True
147
+
148
+ def get_default(self, key):
149
+ """returns the default value for the key"""
150
+
151
+ data_label = self.data_labels.get(key)
152
+ if data_label is None:
153
+ return None
154
+
155
+ return data_label.default
156
+
157
+ def save(self, filename):
158
+ assert not cmd_opts.freeze_settings, "saving settings is disabled"
159
+
160
+ with open(filename, "w", encoding="utf8") as file:
161
+ json.dump(self.data, file, indent=4)
162
+
163
+ def same_type(self, x, y):
164
+ if x is None or y is None:
165
+ return True
166
+
167
+ type_x = self.typemap.get(type(x), type(x))
168
+ type_y = self.typemap.get(type(y), type(y))
169
+
170
+ return type_x == type_y
171
+
172
+ def load(self, filename):
173
+ with open(filename, "r", encoding="utf8") as file:
174
+ self.data = json.load(file)
175
+
176
+ # 1.6.0 VAE defaults
177
+ if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None:
178
+ self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default')
179
+
180
+ # 1.1.1 quicksettings list migration
181
+ if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
182
+ self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
183
+
184
+ # 1.4.0 ui_reorder
185
+ if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data:
186
+ self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
187
+
188
+ bad_settings = 0
189
+ for k, v in self.data.items():
190
+ info = self.data_labels.get(k, None)
191
+ if info is not None and not self.same_type(info.default, v):
192
+ print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
193
+ bad_settings += 1
194
+
195
+ if bad_settings > 0:
196
+ print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
197
+
198
+ def onchange(self, key, func, call=True):
199
+ item = self.data_labels.get(key)
200
+ item.onchange = func
201
+
202
+ if call:
203
+ func()
204
+
205
+ def dumpjson(self):
206
+ d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
207
+ d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
208
+ d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
209
+ return json.dumps(d)
210
+
211
+ def add_option(self, key, info):
212
+ self.data_labels[key] = info
213
+
214
+ def reorder(self):
215
+ """reorder settings so that all items related to section always go together"""
216
+
217
+ section_ids = {}
218
+ settings_items = self.data_labels.items()
219
+ for _, item in settings_items:
220
+ if item.section not in section_ids:
221
+ section_ids[item.section] = len(section_ids)
222
+
223
+ self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
224
+
225
+ def cast_value(self, key, value):
226
+ """casts an arbitrary to the same type as this setting's value with key
227
+ Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
228
+ """
229
+
230
+ if value is None:
231
+ return None
232
+
233
+ default_value = self.data_labels[key].default
234
+ if default_value is None:
235
+ default_value = getattr(self, key, None)
236
+ if default_value is None:
237
+ return None
238
+
239
+ expected_type = type(default_value)
240
+ if expected_type == bool and value == "False":
241
+ value = False
242
+ else:
243
+ value = expected_type(value)
244
+
245
+ return value
modules/patches.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+
4
+ def patch(key, obj, field, replacement):
5
+ """Replaces a function in a module or a class.
6
+
7
+ Also stores the original function in this module, possible to be retrieved via original(key, obj, field).
8
+ If the function is already replaced by this caller (key), an exception is raised -- use undo() before that.
9
+
10
+ Arguments:
11
+ key: identifying information for who is doing the replacement. You can use __name__.
12
+ obj: the module or the class
13
+ field: name of the function as a string
14
+ replacement: the new function
15
+
16
+ Returns:
17
+ the original function
18
+ """
19
+
20
+ patch_key = (obj, field)
21
+ if patch_key in originals[key]:
22
+ raise RuntimeError(f"patch for {field} is already applied")
23
+
24
+ original_func = getattr(obj, field)
25
+ originals[key][patch_key] = original_func
26
+
27
+ setattr(obj, field, replacement)
28
+
29
+ return original_func
30
+
31
+
32
+ def undo(key, obj, field):
33
+ """Undoes the peplacement by the patch().
34
+
35
+ If the function is not replaced, raises an exception.
36
+
37
+ Arguments:
38
+ key: identifying information for who is doing the replacement. You can use __name__.
39
+ obj: the module or the class
40
+ field: name of the function as a string
41
+
42
+ Returns:
43
+ Always None
44
+ """
45
+
46
+ patch_key = (obj, field)
47
+
48
+ if patch_key not in originals[key]:
49
+ raise RuntimeError(f"there is no patch for {field} to undo")
50
+
51
+ original_func = originals[key].pop(patch_key)
52
+ setattr(obj, field, original_func)
53
+
54
+ return None
55
+
56
+
57
+ def original(key, obj, field):
58
+ """Returns the original function for the patch created by the patch() function"""
59
+ patch_key = (obj, field)
60
+
61
+ return originals[key].get(patch_key, None)
62
+
63
+
64
+ originals = defaultdict(dict)