ygohel18 commited on
Commit
f555b43
1 Parent(s): 6e284d1
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .eslintignore +4 -0
  2. .eslintrc.js +91 -0
  3. .git-blame-ignore-revs +2 -0
  4. .gitignore +40 -0
  5. .pylintrc +3 -0
  6. CHANGELOG.md +257 -0
  7. CODEOWNERS +12 -0
  8. LICENSE.txt +663 -0
  9. README.md +227 -0
  10. configs/alt-diffusion-inference.yaml +72 -0
  11. configs/instruct-pix2pix.yaml +98 -0
  12. configs/v1-inference.yaml +70 -0
  13. configs/v1-inpainting-inference.yaml +70 -0
  14. environment-wsl2.yaml +11 -0
  15. extensions-builtin/LDSR/ldsr_model_arch.py +252 -0
  16. extensions-builtin/LDSR/preload.py +6 -0
  17. extensions-builtin/LDSR/scripts/ldsr_model.py +72 -0
  18. extensions-builtin/LDSR/sd_hijack_autoencoder.py +293 -0
  19. extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +1443 -0
  20. extensions-builtin/LDSR/vqvae_quantize.py +147 -0
  21. extensions-builtin/Lora/extra_networks_lora.py +45 -0
  22. extensions-builtin/Lora/lora.py +506 -0
  23. extensions-builtin/Lora/preload.py +6 -0
  24. extensions-builtin/Lora/scripts/lora_script.py +116 -0
  25. extensions-builtin/Lora/ui_extra_networks_lora.py +36 -0
  26. extensions-builtin/ScuNET/preload.py +6 -0
  27. extensions-builtin/ScuNET/scripts/scunet_model.py +148 -0
  28. extensions-builtin/ScuNET/scunet_model_arch.py +268 -0
  29. extensions-builtin/SwinIR/preload.py +6 -0
  30. extensions-builtin/SwinIR/scripts/swinir_model.py +177 -0
  31. extensions-builtin/SwinIR/swinir_model_arch.py +867 -0
  32. extensions-builtin/SwinIR/swinir_model_arch_v2.py +1017 -0
  33. extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +640 -0
  34. extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +10 -0
  35. extensions-builtin/canvas-zoom-and-pan/style.css +63 -0
  36. extensions-builtin/extra-options-section/scripts/extra_options_section.py +48 -0
  37. extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +42 -0
  38. extensions-builtin/sd_theme_editor/install.py +1 -0
  39. extensions-builtin/sd_theme_editor/javascript/ui_theme.js +435 -0
  40. extensions-builtin/sd_theme_editor/scripts/ui_theme.py +177 -0
  41. extensions-builtin/sd_theme_editor/style.css +113 -0
  42. extensions-builtin/sd_theme_editor/themes/Golde.css +1 -0
  43. extensions-builtin/sd_theme_editor/themes/backup.css +1 -0
  44. extensions-builtin/sd_theme_editor/themes/d-230-52-94.css +1 -0
  45. extensions-builtin/sd_theme_editor/themes/default.css +1 -0
  46. extensions-builtin/sd_theme_editor/themes/default_cyan.css +1 -0
  47. extensions-builtin/sd_theme_editor/themes/default_orange.css +1 -0
  48. extensions-builtin/sd_theme_editor/themes/fun.css +1 -0
  49. extensions-builtin/sd_theme_editor/themes/minimal.css +1 -0
  50. extensions-builtin/sd_theme_editor/themes/minimal_orange.css +1 -0
.eslintignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ extensions
2
+ extensions-disabled
3
+ repositories
4
+ venv
.eslintrc.js ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* global module */
2
+ module.exports = {
3
+ env: {
4
+ browser: true,
5
+ es2021: true,
6
+ },
7
+ extends: "eslint:recommended",
8
+ parserOptions: {
9
+ ecmaVersion: "latest",
10
+ },
11
+ rules: {
12
+ "arrow-spacing": "error",
13
+ "block-spacing": "error",
14
+ "brace-style": "error",
15
+ "comma-dangle": ["error", "only-multiline"],
16
+ "comma-spacing": "error",
17
+ "comma-style": ["error", "last"],
18
+ "curly": ["error", "multi-line", "consistent"],
19
+ "eol-last": "error",
20
+ "func-call-spacing": "error",
21
+ "function-call-argument-newline": ["error", "consistent"],
22
+ "function-paren-newline": ["error", "consistent"],
23
+ "indent": ["error", 4],
24
+ "key-spacing": "error",
25
+ "keyword-spacing": "error",
26
+ "linebreak-style": ["error", "unix"],
27
+ "no-extra-semi": "error",
28
+ "no-mixed-spaces-and-tabs": "error",
29
+ "no-multi-spaces": "error",
30
+ "no-redeclare": ["error", {builtinGlobals: false}],
31
+ "no-trailing-spaces": "error",
32
+ "no-unused-vars": "off",
33
+ "no-whitespace-before-property": "error",
34
+ "object-curly-newline": ["error", {consistent: true, multiline: true}],
35
+ "object-curly-spacing": ["error", "never"],
36
+ "operator-linebreak": ["error", "after"],
37
+ "quote-props": ["error", "consistent-as-needed"],
38
+ "semi": ["error", "always"],
39
+ "semi-spacing": "error",
40
+ "semi-style": ["error", "last"],
41
+ "space-before-blocks": "error",
42
+ "space-before-function-paren": ["error", "never"],
43
+ "space-in-parens": ["error", "never"],
44
+ "space-infix-ops": "error",
45
+ "space-unary-ops": "error",
46
+ "switch-colon-spacing": "error",
47
+ "template-curly-spacing": ["error", "never"],
48
+ "unicode-bom": "error",
49
+ },
50
+ globals: {
51
+ //script.js
52
+ gradioApp: "readonly",
53
+ executeCallbacks: "readonly",
54
+ onAfterUiUpdate: "readonly",
55
+ onOptionsChanged: "readonly",
56
+ onUiLoaded: "readonly",
57
+ onUiUpdate: "readonly",
58
+ uiCurrentTab: "writable",
59
+ uiElementInSight: "readonly",
60
+ uiElementIsVisible: "readonly",
61
+ //ui.js
62
+ opts: "writable",
63
+ all_gallery_buttons: "readonly",
64
+ selected_gallery_button: "readonly",
65
+ selected_gallery_index: "readonly",
66
+ switch_to_txt2img: "readonly",
67
+ switch_to_img2img_tab: "readonly",
68
+ switch_to_img2img: "readonly",
69
+ switch_to_sketch: "readonly",
70
+ switch_to_inpaint: "readonly",
71
+ switch_to_inpaint_sketch: "readonly",
72
+ switch_to_extras: "readonly",
73
+ get_tab_index: "readonly",
74
+ create_submit_args: "readonly",
75
+ restart_reload: "readonly",
76
+ updateInput: "readonly",
77
+ //extraNetworks.js
78
+ requestGet: "readonly",
79
+ popup: "readonly",
80
+ // from python
81
+ localization: "readonly",
82
+ // progrssbar.js
83
+ randomId: "readonly",
84
+ requestProgress: "readonly",
85
+ // imageviewer.js
86
+ modalPrevImage: "readonly",
87
+ modalNextImage: "readonly",
88
+ // token-counters.js
89
+ setupTokenCounters: "readonly",
90
+ }
91
+ };
.git-blame-ignore-revs ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Apply ESlint
2
+ 9c54b78d9dde5601e916f308d9a9d6953ec39430
.gitignore ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.ckpt
3
+ *.safetensors
4
+ *.pth
5
+ /ESRGAN/*
6
+ /SwinIR/*
7
+ /repositories
8
+ /venv
9
+ /tmp
10
+ /model.ckpt
11
+ /models/**/*
12
+ /GFPGANv1.3.pth
13
+ /gfpgan/weights/*.pth
14
+ /ui-config.json
15
+ /outputs
16
+ /config.json
17
+ /log
18
+ /webui.settings.bat
19
+ /embeddings
20
+ /styles.csv
21
+ /params.txt
22
+ /styles.csv.bak
23
+ /webui-user.bat
24
+ /webui-user.sh
25
+ /interrogate
26
+ /user.css
27
+ /.idea
28
+ notification.mp3
29
+ /SwinIR
30
+ /textual_inversion
31
+ .vscode
32
+ /extensions
33
+ /test/stdout.txt
34
+ /test/stderr.txt
35
+ /cache.json*
36
+ /config_states/
37
+ /node_modules
38
+ /package-lock.json
39
+ /.coverage*
40
+ /venv
.pylintrc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # See https://pylint.pycqa.org/en/latest/user_guide/messages/message_control.html
2
+ [MESSAGES CONTROL]
3
+ disable=C,R,W,E,I
CHANGELOG.md ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## 1.4.0
2
+
3
+ ### Features:
4
+ * zoom controls for inpainting
5
+ * run basic torch calculation at startup in parallel to reduce the performance impact of first generation
6
+ * option to pad prompt/neg prompt to be same length
7
+ * remove taming_transformers dependency
8
+ * custom k-diffusion scheduler settings
9
+ * add an option to show selected settings in main txt2img/img2img UI
10
+ * sysinfo tab in settings
11
+ * infer styles from prompts when pasting params into the UI
12
+ * an option to control the behavior of the above
13
+
14
+ ### Minor:
15
+ * bump Gradio to 3.32.0
16
+ * bump xformers to 0.0.20
17
+ * Add option to disable token counters
18
+ * tooltip fixes & optimizations
19
+ * make it possible to configure filename for the zip download
20
+ * `[vae_filename]` pattern for filenames
21
+ * Revert discarding penultimate sigma for DPM-Solver++(2M) SDE
22
+ * change UI reorder setting to multiselect
23
+ * read version info form CHANGELOG.md if git version info is not available
24
+ * link footer API to Wiki when API is not active
25
+ * persistent conds cache (opt-in optimization)
26
+
27
+ ### Extensions:
28
+ * After installing extensions, webui properly restarts the process rather than reloads the UI
29
+ * Added VAE listing to web API. Via: /sdapi/v1/sd-vae
30
+ * custom unet support
31
+ * Add onAfterUiUpdate callback
32
+ * refactor EmbeddingDatabase.register_embedding() to allow unregistering
33
+ * add before_process callback for scripts
34
+ * add ability for alwayson scripts to specify section and let user reorder those sections
35
+
36
+ ### Bug Fixes:
37
+ * Fix dragging text to prompt
38
+ * fix incorrect quoting for infotext values with colon in them
39
+ * fix "hires. fix" prompt sharing same labels with txt2img_prompt
40
+ * Fix s_min_uncond default type int
41
+ * Fix for #10643 (Inpainting mask sometimes not working)
42
+ * fix bad styling for thumbs view in extra networks #10639
43
+ * fix for empty list of optimizations #10605
44
+ * small fixes to prepare_tcmalloc for Debian/Ubuntu compatibility
45
+ * fix --ui-debug-mode exit
46
+ * patch GitPython to not use leaky persistent processes
47
+ * fix duplicate Cross attention optimization after UI reload
48
+ * torch.cuda.is_available() check for SdOptimizationXformers
49
+ * fix hires fix using wrong conds in second pass if using Loras.
50
+ * handle exception when parsing generation parameters from png info
51
+ * fix upcast attention dtype error
52
+ * forcing Torch Version to 1.13.1 for RX 5000 series GPUs
53
+ * split mask blur into X and Y components, patch Outpainting MK2 accordingly
54
+ * don't die when a LoRA is a broken symlink
55
+ * allow activation of Generate Forever during generation
56
+
57
+
58
+ ## 1.3.2
59
+
60
+ ### Bug Fixes:
61
+ * fix files served out of tmp directory even if they are saved to disk
62
+ * fix postprocessing overwriting parameters
63
+
64
+ ## 1.3.1
65
+
66
+ ### Features:
67
+ * revert default cross attention optimization to Doggettx
68
+
69
+ ### Bug Fixes:
70
+ * fix bug: LoRA don't apply on dropdown list sd_lora
71
+ * fix png info always added even if setting is not enabled
72
+ * fix some fields not applying in xyz plot
73
+ * fix "hires. fix" prompt sharing same labels with txt2img_prompt
74
+ * fix lora hashes not being added properly to infotex if there is only one lora
75
+ * fix --use-cpu failing to work properly at startup
76
+ * make --disable-opt-split-attention command line option work again
77
+
78
+ ## 1.3.0
79
+
80
+ ### Features:
81
+ * add UI to edit defaults
82
+ * token merging (via dbolya/tomesd)
83
+ * settings tab rework: add a lot of additional explanations and links
84
+ * load extensions' Git metadata in parallel to loading the main program to save a ton of time during startup
85
+ * update extensions table: show branch, show date in separate column, and show version from tags if available
86
+ * TAESD - another option for cheap live previews
87
+ * allow choosing sampler and prompts for second pass of hires fix - hidden by default, enabled in settings
88
+ * calculate hashes for Lora
89
+ * add lora hashes to infotext
90
+ * when pasting infotext, use infotext's lora hashes to find local loras for `<lora:xxx:1>` entries whose hashes match loras the user has
91
+ * select cross attention optimization from UI
92
+
93
+ ### Minor:
94
+ * bump Gradio to 3.31.0
95
+ * bump PyTorch to 2.0.1 for macOS and Linux AMD
96
+ * allow setting defaults for elements in extensions' tabs
97
+ * allow selecting file type for live previews
98
+ * show "Loading..." for extra networks when displaying for the first time
99
+ * suppress ENSD infotext for samplers that don't use it
100
+ * clientside optimizations
101
+ * add options to show/hide hidden files and dirs in extra networks, and to not list models/files in hidden directories
102
+ * allow whitespace in styles.csv
103
+ * add option to reorder tabs
104
+ * move some functionality (swap resolution and set seed to -1) to client
105
+ * option to specify editor height for img2img
106
+ * button to copy image resolution into img2img width/height sliders
107
+ * switch from pyngrok to ngrok-py
108
+ * lazy-load images in extra networks UI
109
+ * set "Navigate image viewer with gamepad" option to false by default, by request
110
+ * change upscalers to download models into user-specified directory (from commandline args) rather than the default models/<...>
111
+ * allow hiding buttons in ui-config.json
112
+
113
+ ### Extensions:
114
+ * add /sdapi/v1/script-info api
115
+ * use Ruff to lint Python code
116
+ * use ESlint to lint Javascript code
117
+ * add/modify CFG callbacks for Self-Attention Guidance extension
118
+ * add command and endpoint for graceful server stopping
119
+ * add some locals (prompts/seeds/etc) from processing function into the Processing class as fields
120
+ * rework quoting for infotext items that have commas in them to use JSON (should be backwards compatible except for cases where it didn't work previously)
121
+ * add /sdapi/v1/refresh-loras api checkpoint post request
122
+ * tests overhaul
123
+
124
+ ### Bug Fixes:
125
+ * fix an issue preventing the program from starting if the user specifies a bad Gradio theme
126
+ * fix broken prompts from file script
127
+ * fix symlink scanning for extra networks
128
+ * fix --data-dir ignored when launching via webui-user.bat COMMANDLINE_ARGS
129
+ * allow web UI to be ran fully offline
130
+ * fix inability to run with --freeze-settings
131
+ * fix inability to merge checkpoint without adding metadata
132
+ * fix extra networks' save preview image not adding infotext for jpeg/webm
133
+ * remove blinking effect from text in hires fix and scale resolution preview
134
+ * make links to `http://<...>.git` extensions work in the extension tab
135
+ * fix bug with webui hanging at startup due to hanging git process
136
+
137
+
138
+ ## 1.2.1
139
+
140
+ ### Features:
141
+ * add an option to always refer to LoRA by filenames
142
+
143
+ ### Bug Fixes:
144
+ * never refer to LoRA by an alias if multiple LoRAs have same alias or the alias is called none
145
+ * fix upscalers disappearing after the user reloads UI
146
+ * allow bf16 in safe unpickler (resolves problems with loading some LoRAs)
147
+ * allow web UI to be ran fully offline
148
+ * fix localizations not working
149
+ * fix error for LoRAs: `'LatentDiffusion' object has no attribute 'lora_layer_mapping'`
150
+
151
+ ## 1.2.0
152
+
153
+ ### Features:
154
+ * do not wait for Stable Diffusion model to load at startup
155
+ * add filename patterns: `[denoising]`
156
+ * directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for
157
+ * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metdata of the file, if present, instead of filename (both can be used to activate LoRA)
158
+ * LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
159
+ * LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer)
160
+ * LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss)
161
+ * add version to infotext, footer and console output when starting
162
+ * add links to wiki for filename pattern settings
163
+ * add extended info for quicksettings setting and use multiselect input instead of a text field
164
+
165
+ ### Minor:
166
+ * bump Gradio to 3.29.0
167
+ * bump PyTorch to 2.0.1
168
+ * `--subpath` option for gradio for use with reverse proxy
169
+ * Linux/macOS: use existing virtualenv if already active (the VIRTUAL_ENV environment variable)
170
+ * do not apply localizations if there are none (possible frontend optimization)
171
+ * add extra `None` option for VAE in XYZ plot
172
+ * print error to console when batch processing in img2img fails
173
+ * create HTML for extra network pages only on demand
174
+ * allow directories starting with `.` to still list their models for LoRA, checkpoints, etc
175
+ * put infotext options into their own category in settings tab
176
+ * do not show licenses page when user selects Show all pages in settings
177
+
178
+ ### Extensions:
179
+ * tooltip localization support
180
+ * add API method to get LoRA models with prompt
181
+
182
+ ### Bug Fixes:
183
+ * re-add `/docs` endpoint
184
+ * fix gamepad navigation
185
+ * make the lightbox fullscreen image function properly
186
+ * fix squished thumbnails in extras tab
187
+ * keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed)
188
+ * fix webui showing the same image if you configure the generation to always save results into same file
189
+ * fix bug with upscalers not working properly
190
+ * fix MPS on PyTorch 2.0.1, Intel Macs
191
+ * make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events
192
+ * prevent Reload UI button/link from reloading the page when it's not yet ready
193
+ * fix prompts from file script failing to read contents from a drag/drop file
194
+
195
+
196
+ ## 1.1.1
197
+ ### Bug Fixes:
198
+ * fix an error that prevents running webui on PyTorch<2.0 without --disable-safe-unpickle
199
+
200
+ ## 1.1.0
201
+ ### Features:
202
+ * switch to PyTorch 2.0.0 (except for AMD GPUs)
203
+ * visual improvements to custom code scripts
204
+ * add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]`
205
+ * add support for saving init images in img2img, and record their hashes in infotext for reproducability
206
+ * automatically select current word when adjusting weight with ctrl+up/down
207
+ * add dropdowns for X/Y/Z plot
208
+ * add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs
209
+ * support Gradio's theme API
210
+ * use TCMalloc on Linux by default; possible fix for memory leaks
211
+ * add optimization option to remove negative conditioning at low sigma values #9177
212
+ * embed model merge metadata in .safetensors file
213
+ * extension settings backup/restore feature #9169
214
+ * add "resize by" and "resize to" tabs to img2img
215
+ * add option "keep original size" to textual inversion images preprocess
216
+ * image viewer scrolling via analog stick
217
+ * button to restore the progress from session lost / tab reload
218
+
219
+ ### Minor:
220
+ * bump Gradio to 3.28.1
221
+ * change "scale to" to sliders in Extras tab
222
+ * add labels to tool buttons to make it possible to hide them
223
+ * add tiled inference support for ScuNET
224
+ * add branch support for extension installation
225
+ * change Linux installation script to install into current directory rather than `/home/username`
226
+ * sort textual inversion embeddings by name (case-insensitive)
227
+ * allow styles.csv to be symlinked or mounted in docker
228
+ * remove the "do not add watermark to images" option
229
+ * make selected tab configurable with UI config
230
+ * make the extra networks UI fixed height and scrollable
231
+ * add `disable_tls_verify` arg for use with self-signed certs
232
+
233
+ ### Extensions:
234
+ * add reload callback
235
+ * add `is_hr_pass` field for processing
236
+
237
+ ### Bug Fixes:
238
+ * fix broken batch image processing on 'Extras/Batch Process' tab
239
+ * add "None" option to extra networks dropdowns
240
+ * fix FileExistsError for CLIP Interrogator
241
+ * fix /sdapi/v1/txt2img endpoint not working on Linux #9319
242
+ * fix disappearing live previews and progressbar during slow tasks
243
+ * fix fullscreen image view not working properly in some cases
244
+ * prevent alwayson_scripts args param resizing script_arg list when they are inserted in it
245
+ * fix prompt schedule for second order samplers
246
+ * fix image mask/composite for weird resolutions #9628
247
+ * use correct images for previews when using AND (see #9491)
248
+ * one broken image in img2img batch won't stop all processing
249
+ * fix image orientation bug in train/preprocess
250
+ * fix Ngrok recreating tunnels every reload
251
+ * fix `--realesrgan-models-path` and `--ldsr-models-path` not working
252
+ * fix `--skip-install` not working
253
+ * use SAMPLE file format in Outpainting Mk2 & Poorman
254
+ * do not fail all LoRAs if some have failed to load when making a picture
255
+
256
+ ## 1.0.0
257
+ * everything
CODEOWNERS ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ * @anapnoe
2
+
3
+ # if you were managing a localization and were removed from this file, this is because
4
+ # the intended way to do localizations now is via extensions. See:
5
+ # https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions
6
+ # Make a repo with your localization and since you are still listed as a collaborator
7
+ # you can add it to the wiki page yourself. This change is because some people complained
8
+ # the git commit log is cluttered with things unrelated to almost everyone and
9
+ # because I believe this is the best overall for the project to handle localizations almost
10
+ # entirely without my oversight.
11
+
12
+
LICENSE.txt ADDED
@@ -0,0 +1,663 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU AFFERO GENERAL PUBLIC LICENSE
2
+ Version 3, 19 November 2007
3
+
4
+ Copyright (c) 2023 AUTOMATIC1111
5
+
6
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
7
+ Everyone is permitted to copy and distribute verbatim copies
8
+ of this license document, but changing it is not allowed.
9
+
10
+ Preamble
11
+
12
+ The GNU Affero General Public License is a free, copyleft license for
13
+ software and other kinds of works, specifically designed to ensure
14
+ cooperation with the community in the case of network server software.
15
+
16
+ The licenses for most software and other practical works are designed
17
+ to take away your freedom to share and change the works. By contrast,
18
+ our General Public Licenses are intended to guarantee your freedom to
19
+ share and change all versions of a program--to make sure it remains free
20
+ software for all its users.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ Developers that use our General Public Licenses protect your rights
30
+ with two steps: (1) assert copyright on the software, and (2) offer
31
+ you this License which gives you legal permission to copy, distribute
32
+ and/or modify the software.
33
+
34
+ A secondary benefit of defending all users' freedom is that
35
+ improvements made in alternate versions of the program, if they
36
+ receive widespread use, become available for other developers to
37
+ incorporate. Many developers of free software are heartened and
38
+ encouraged by the resulting cooperation. However, in the case of
39
+ software used on network servers, this result may fail to come about.
40
+ The GNU General Public License permits making a modified version and
41
+ letting the public access it on a server without ever releasing its
42
+ source code to the public.
43
+
44
+ The GNU Affero General Public License is designed specifically to
45
+ ensure that, in such cases, the modified source code becomes available
46
+ to the community. It requires the operator of a network server to
47
+ provide the source code of the modified version running there to the
48
+ users of that server. Therefore, public use of a modified version, on
49
+ a publicly accessible server, gives the public access to the source
50
+ code of the modified version.
51
+
52
+ An older license, called the Affero General Public License and
53
+ published by Affero, was designed to accomplish similar goals. This is
54
+ a different license, not a version of the Affero GPL, but Affero has
55
+ released a new version of the Affero GPL which permits relicensing under
56
+ this license.
57
+
58
+ The precise terms and conditions for copying, distribution and
59
+ modification follow.
60
+
61
+ TERMS AND CONDITIONS
62
+
63
+ 0. Definitions.
64
+
65
+ "This License" refers to version 3 of the GNU Affero General Public License.
66
+
67
+ "Copyright" also means copyright-like laws that apply to other kinds of
68
+ works, such as semiconductor masks.
69
+
70
+ "The Program" refers to any copyrightable work licensed under this
71
+ License. Each licensee is addressed as "you". "Licensees" and
72
+ "recipients" may be individuals or organizations.
73
+
74
+ To "modify" a work means to copy from or adapt all or part of the work
75
+ in a fashion requiring copyright permission, other than the making of an
76
+ exact copy. The resulting work is called a "modified version" of the
77
+ earlier work or a work "based on" the earlier work.
78
+
79
+ A "covered work" means either the unmodified Program or a work based
80
+ on the Program.
81
+
82
+ To "propagate" a work means to do anything with it that, without
83
+ permission, would make you directly or secondarily liable for
84
+ infringement under applicable copyright law, except executing it on a
85
+ computer or modifying a private copy. Propagation includes copying,
86
+ distribution (with or without modification), making available to the
87
+ public, and in some countries other activities as well.
88
+
89
+ To "convey" a work means any kind of propagation that enables other
90
+ parties to make or receive copies. Mere interaction with a user through
91
+ a computer network, with no transfer of a copy, is not conveying.
92
+
93
+ An interactive user interface displays "Appropriate Legal Notices"
94
+ to the extent that it includes a convenient and prominently visible
95
+ feature that (1) displays an appropriate copyright notice, and (2)
96
+ tells the user that there is no warranty for the work (except to the
97
+ extent that warranties are provided), that licensees may convey the
98
+ work under this License, and how to view a copy of this License. If
99
+ the interface presents a list of user commands or options, such as a
100
+ menu, a prominent item in the list meets this criterion.
101
+
102
+ 1. Source Code.
103
+
104
+ The "source code" for a work means the preferred form of the work
105
+ for making modifications to it. "Object code" means any non-source
106
+ form of a work.
107
+
108
+ A "Standard Interface" means an interface that either is an official
109
+ standard defined by a recognized standards body, or, in the case of
110
+ interfaces specified for a particular programming language, one that
111
+ is widely used among developers working in that language.
112
+
113
+ The "System Libraries" of an executable work include anything, other
114
+ than the work as a whole, that (a) is included in the normal form of
115
+ packaging a Major Component, but which is not part of that Major
116
+ Component, and (b) serves only to enable use of the work with that
117
+ Major Component, or to implement a Standard Interface for which an
118
+ implementation is available to the public in source code form. A
119
+ "Major Component", in this context, means a major essential component
120
+ (kernel, window system, and so on) of the specific operating system
121
+ (if any) on which the executable work runs, or a compiler used to
122
+ produce the work, or an object code interpreter used to run it.
123
+
124
+ The "Corresponding Source" for a work in object code form means all
125
+ the source code needed to generate, install, and (for an executable
126
+ work) run the object code and to modify the work, including scripts to
127
+ control those activities. However, it does not include the work's
128
+ System Libraries, or general-purpose tools or generally available free
129
+ programs which are used unmodified in performing those activities but
130
+ which are not part of the work. For example, Corresponding Source
131
+ includes interface definition files associated with source files for
132
+ the work, and the source code for shared libraries and dynamically
133
+ linked subprograms that the work is specifically designed to require,
134
+ such as by intimate data communication or control flow between those
135
+ subprograms and other parts of the work.
136
+
137
+ The Corresponding Source need not include anything that users
138
+ can regenerate automatically from other parts of the Corresponding
139
+ Source.
140
+
141
+ The Corresponding Source for a work in source code form is that
142
+ same work.
143
+
144
+ 2. Basic Permissions.
145
+
146
+ All rights granted under this License are granted for the term of
147
+ copyright on the Program, and are irrevocable provided the stated
148
+ conditions are met. This License explicitly affirms your unlimited
149
+ permission to run the unmodified Program. The output from running a
150
+ covered work is covered by this License only if the output, given its
151
+ content, constitutes a covered work. This License acknowledges your
152
+ rights of fair use or other equivalent, as provided by copyright law.
153
+
154
+ You may make, run and propagate covered works that you do not
155
+ convey, without conditions so long as your license otherwise remains
156
+ in force. You may convey covered works to others for the sole purpose
157
+ of having them make modifications exclusively for you, or provide you
158
+ with facilities for running those works, provided that you comply with
159
+ the terms of this License in conveying all material for which you do
160
+ not control copyright. Those thus making or running the covered works
161
+ for you must do so exclusively on your behalf, under your direction
162
+ and control, on terms that prohibit them from making any copies of
163
+ your copyrighted material outside their relationship with you.
164
+
165
+ Conveying under any other circumstances is permitted solely under
166
+ the conditions stated below. Sublicensing is not allowed; section 10
167
+ makes it unnecessary.
168
+
169
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
170
+
171
+ No covered work shall be deemed part of an effective technological
172
+ measure under any applicable law fulfilling obligations under article
173
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
174
+ similar laws prohibiting or restricting circumvention of such
175
+ measures.
176
+
177
+ When you convey a covered work, you waive any legal power to forbid
178
+ circumvention of technological measures to the extent such circumvention
179
+ is effected by exercising rights under this License with respect to
180
+ the covered work, and you disclaim any intention to limit operation or
181
+ modification of the work as a means of enforcing, against the work's
182
+ users, your or third parties' legal rights to forbid circumvention of
183
+ technological measures.
184
+
185
+ 4. Conveying Verbatim Copies.
186
+
187
+ You may convey verbatim copies of the Program's source code as you
188
+ receive it, in any medium, provided that you conspicuously and
189
+ appropriately publish on each copy an appropriate copyright notice;
190
+ keep intact all notices stating that this License and any
191
+ non-permissive terms added in accord with section 7 apply to the code;
192
+ keep intact all notices of the absence of any warranty; and give all
193
+ recipients a copy of this License along with the Program.
194
+
195
+ You may charge any price or no price for each copy that you convey,
196
+ and you may offer support or warranty protection for a fee.
197
+
198
+ 5. Conveying Modified Source Versions.
199
+
200
+ You may convey a work based on the Program, or the modifications to
201
+ produce it from the Program, in the form of source code under the
202
+ terms of section 4, provided that you also meet all of these conditions:
203
+
204
+ a) The work must carry prominent notices stating that you modified
205
+ it, and giving a relevant date.
206
+
207
+ b) The work must carry prominent notices stating that it is
208
+ released under this License and any conditions added under section
209
+ 7. This requirement modifies the requirement in section 4 to
210
+ "keep intact all notices".
211
+
212
+ c) You must license the entire work, as a whole, under this
213
+ License to anyone who comes into possession of a copy. This
214
+ License will therefore apply, along with any applicable section 7
215
+ additional terms, to the whole of the work, and all its parts,
216
+ regardless of how they are packaged. This License gives no
217
+ permission to license the work in any other way, but it does not
218
+ invalidate such permission if you have separately received it.
219
+
220
+ d) If the work has interactive user interfaces, each must display
221
+ Appropriate Legal Notices; however, if the Program has interactive
222
+ interfaces that do not display Appropriate Legal Notices, your
223
+ work need not make them do so.
224
+
225
+ A compilation of a covered work with other separate and independent
226
+ works, which are not by their nature extensions of the covered work,
227
+ and which are not combined with it such as to form a larger program,
228
+ in or on a volume of a storage or distribution medium, is called an
229
+ "aggregate" if the compilation and its resulting copyright are not
230
+ used to limit the access or legal rights of the compilation's users
231
+ beyond what the individual works permit. Inclusion of a covered work
232
+ in an aggregate does not cause this License to apply to the other
233
+ parts of the aggregate.
234
+
235
+ 6. Conveying Non-Source Forms.
236
+
237
+ You may convey a covered work in object code form under the terms
238
+ of sections 4 and 5, provided that you also convey the
239
+ machine-readable Corresponding Source under the terms of this License,
240
+ in one of these ways:
241
+
242
+ a) Convey the object code in, or embodied in, a physical product
243
+ (including a physical distribution medium), accompanied by the
244
+ Corresponding Source fixed on a durable physical medium
245
+ customarily used for software interchange.
246
+
247
+ b) Convey the object code in, or embodied in, a physical product
248
+ (including a physical distribution medium), accompanied by a
249
+ written offer, valid for at least three years and valid for as
250
+ long as you offer spare parts or customer support for that product
251
+ model, to give anyone who possesses the object code either (1) a
252
+ copy of the Corresponding Source for all the software in the
253
+ product that is covered by this License, on a durable physical
254
+ medium customarily used for software interchange, for a price no
255
+ more than your reasonable cost of physically performing this
256
+ conveying of source, or (2) access to copy the
257
+ Corresponding Source from a network server at no charge.
258
+
259
+ c) Convey individual copies of the object code with a copy of the
260
+ written offer to provide the Corresponding Source. This
261
+ alternative is allowed only occasionally and noncommercially, and
262
+ only if you received the object code with such an offer, in accord
263
+ with subsection 6b.
264
+
265
+ d) Convey the object code by offering access from a designated
266
+ place (gratis or for a charge), and offer equivalent access to the
267
+ Corresponding Source in the same way through the same place at no
268
+ further charge. You need not require recipients to copy the
269
+ Corresponding Source along with the object code. If the place to
270
+ copy the object code is a network server, the Corresponding Source
271
+ may be on a different server (operated by you or a third party)
272
+ that supports equivalent copying facilities, provided you maintain
273
+ clear directions next to the object code saying where to find the
274
+ Corresponding Source. Regardless of what server hosts the
275
+ Corresponding Source, you remain obligated to ensure that it is
276
+ available for as long as needed to satisfy these requirements.
277
+
278
+ e) Convey the object code using peer-to-peer transmission, provided
279
+ you inform other peers where the object code and Corresponding
280
+ Source of the work are being offered to the general public at no
281
+ charge under subsection 6d.
282
+
283
+ A separable portion of the object code, whose source code is excluded
284
+ from the Corresponding Source as a System Library, need not be
285
+ included in conveying the object code work.
286
+
287
+ A "User Product" is either (1) a "consumer product", which means any
288
+ tangible personal property which is normally used for personal, family,
289
+ or household purposes, or (2) anything designed or sold for incorporation
290
+ into a dwelling. In determining whether a product is a consumer product,
291
+ doubtful cases shall be resolved in favor of coverage. For a particular
292
+ product received by a particular user, "normally used" refers to a
293
+ typical or common use of that class of product, regardless of the status
294
+ of the particular user or of the way in which the particular user
295
+ actually uses, or expects or is expected to use, the product. A product
296
+ is a consumer product regardless of whether the product has substantial
297
+ commercial, industrial or non-consumer uses, unless such uses represent
298
+ the only significant mode of use of the product.
299
+
300
+ "Installation Information" for a User Product means any methods,
301
+ procedures, authorization keys, or other information required to install
302
+ and execute modified versions of a covered work in that User Product from
303
+ a modified version of its Corresponding Source. The information must
304
+ suffice to ensure that the continued functioning of the modified object
305
+ code is in no case prevented or interfered with solely because
306
+ modification has been made.
307
+
308
+ If you convey an object code work under this section in, or with, or
309
+ specifically for use in, a User Product, and the conveying occurs as
310
+ part of a transaction in which the right of possession and use of the
311
+ User Product is transferred to the recipient in perpetuity or for a
312
+ fixed term (regardless of how the transaction is characterized), the
313
+ Corresponding Source conveyed under this section must be accompanied
314
+ by the Installation Information. But this requirement does not apply
315
+ if neither you nor any third party retains the ability to install
316
+ modified object code on the User Product (for example, the work has
317
+ been installed in ROM).
318
+
319
+ The requirement to provide Installation Information does not include a
320
+ requirement to continue to provide support service, warranty, or updates
321
+ for a work that has been modified or installed by the recipient, or for
322
+ the User Product in which it has been modified or installed. Access to a
323
+ network may be denied when the modification itself materially and
324
+ adversely affects the operation of the network or violates the rules and
325
+ protocols for communication across the network.
326
+
327
+ Corresponding Source conveyed, and Installation Information provided,
328
+ in accord with this section must be in a format that is publicly
329
+ documented (and with an implementation available to the public in
330
+ source code form), and must require no special password or key for
331
+ unpacking, reading or copying.
332
+
333
+ 7. Additional Terms.
334
+
335
+ "Additional permissions" are terms that supplement the terms of this
336
+ License by making exceptions from one or more of its conditions.
337
+ Additional permissions that are applicable to the entire Program shall
338
+ be treated as though they were included in this License, to the extent
339
+ that they are valid under applicable law. If additional permissions
340
+ apply only to part of the Program, that part may be used separately
341
+ under those permissions, but the entire Program remains governed by
342
+ this License without regard to the additional permissions.
343
+
344
+ When you convey a copy of a covered work, you may at your option
345
+ remove any additional permissions from that copy, or from any part of
346
+ it. (Additional permissions may be written to require their own
347
+ removal in certain cases when you modify the work.) You may place
348
+ additional permissions on material, added by you to a covered work,
349
+ for which you have or can give appropriate copyright permission.
350
+
351
+ Notwithstanding any other provision of this License, for material you
352
+ add to a covered work, you may (if authorized by the copyright holders of
353
+ that material) supplement the terms of this License with terms:
354
+
355
+ a) Disclaiming warranty or limiting liability differently from the
356
+ terms of sections 15 and 16 of this License; or
357
+
358
+ b) Requiring preservation of specified reasonable legal notices or
359
+ author attributions in that material or in the Appropriate Legal
360
+ Notices displayed by works containing it; or
361
+
362
+ c) Prohibiting misrepresentation of the origin of that material, or
363
+ requiring that modified versions of such material be marked in
364
+ reasonable ways as different from the original version; or
365
+
366
+ d) Limiting the use for publicity purposes of names of licensors or
367
+ authors of the material; or
368
+
369
+ e) Declining to grant rights under trademark law for use of some
370
+ trade names, trademarks, or service marks; or
371
+
372
+ f) Requiring indemnification of licensors and authors of that
373
+ material by anyone who conveys the material (or modified versions of
374
+ it) with contractual assumptions of liability to the recipient, for
375
+ any liability that these contractual assumptions directly impose on
376
+ those licensors and authors.
377
+
378
+ All other non-permissive additional terms are considered "further
379
+ restrictions" within the meaning of section 10. If the Program as you
380
+ received it, or any part of it, contains a notice stating that it is
381
+ governed by this License along with a term that is a further
382
+ restriction, you may remove that term. If a license document contains
383
+ a further restriction but permits relicensing or conveying under this
384
+ License, you may add to a covered work material governed by the terms
385
+ of that license document, provided that the further restriction does
386
+ not survive such relicensing or conveying.
387
+
388
+ If you add terms to a covered work in accord with this section, you
389
+ must place, in the relevant source files, a statement of the
390
+ additional terms that apply to those files, or a notice indicating
391
+ where to find the applicable terms.
392
+
393
+ Additional terms, permissive or non-permissive, may be stated in the
394
+ form of a separately written license, or stated as exceptions;
395
+ the above requirements apply either way.
396
+
397
+ 8. Termination.
398
+
399
+ You may not propagate or modify a covered work except as expressly
400
+ provided under this License. Any attempt otherwise to propagate or
401
+ modify it is void, and will automatically terminate your rights under
402
+ this License (including any patent licenses granted under the third
403
+ paragraph of section 11).
404
+
405
+ However, if you cease all violation of this License, then your
406
+ license from a particular copyright holder is reinstated (a)
407
+ provisionally, unless and until the copyright holder explicitly and
408
+ finally terminates your license, and (b) permanently, if the copyright
409
+ holder fails to notify you of the violation by some reasonable means
410
+ prior to 60 days after the cessation.
411
+
412
+ Moreover, your license from a particular copyright holder is
413
+ reinstated permanently if the copyright holder notifies you of the
414
+ violation by some reasonable means, this is the first time you have
415
+ received notice of violation of this License (for any work) from that
416
+ copyright holder, and you cure the violation prior to 30 days after
417
+ your receipt of the notice.
418
+
419
+ Termination of your rights under this section does not terminate the
420
+ licenses of parties who have received copies or rights from you under
421
+ this License. If your rights have been terminated and not permanently
422
+ reinstated, you do not qualify to receive new licenses for the same
423
+ material under section 10.
424
+
425
+ 9. Acceptance Not Required for Having Copies.
426
+
427
+ You are not required to accept this License in order to receive or
428
+ run a copy of the Program. Ancillary propagation of a covered work
429
+ occurring solely as a consequence of using peer-to-peer transmission
430
+ to receive a copy likewise does not require acceptance. However,
431
+ nothing other than this License grants you permission to propagate or
432
+ modify any covered work. These actions infringe copyright if you do
433
+ not accept this License. Therefore, by modifying or propagating a
434
+ covered work, you indicate your acceptance of this License to do so.
435
+
436
+ 10. Automatic Licensing of Downstream Recipients.
437
+
438
+ Each time you convey a covered work, the recipient automatically
439
+ receives a license from the original licensors, to run, modify and
440
+ propagate that work, subject to this License. You are not responsible
441
+ for enforcing compliance by third parties with this License.
442
+
443
+ An "entity transaction" is a transaction transferring control of an
444
+ organization, or substantially all assets of one, or subdividing an
445
+ organization, or merging organizations. If propagation of a covered
446
+ work results from an entity transaction, each party to that
447
+ transaction who receives a copy of the work also receives whatever
448
+ licenses to the work the party's predecessor in interest had or could
449
+ give under the previous paragraph, plus a right to possession of the
450
+ Corresponding Source of the work from the predecessor in interest, if
451
+ the predecessor has it or can get it with reasonable efforts.
452
+
453
+ You may not impose any further restrictions on the exercise of the
454
+ rights granted or affirmed under this License. For example, you may
455
+ not impose a license fee, royalty, or other charge for exercise of
456
+ rights granted under this License, and you may not initiate litigation
457
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
458
+ any patent claim is infringed by making, using, selling, offering for
459
+ sale, or importing the Program or any portion of it.
460
+
461
+ 11. Patents.
462
+
463
+ A "contributor" is a copyright holder who authorizes use under this
464
+ License of the Program or a work on which the Program is based. The
465
+ work thus licensed is called the contributor's "contributor version".
466
+
467
+ A contributor's "essential patent claims" are all patent claims
468
+ owned or controlled by the contributor, whether already acquired or
469
+ hereafter acquired, that would be infringed by some manner, permitted
470
+ by this License, of making, using, or selling its contributor version,
471
+ but do not include claims that would be infringed only as a
472
+ consequence of further modification of the contributor version. For
473
+ purposes of this definition, "control" includes the right to grant
474
+ patent sublicenses in a manner consistent with the requirements of
475
+ this License.
476
+
477
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
478
+ patent license under the contributor's essential patent claims, to
479
+ make, use, sell, offer for sale, import and otherwise run, modify and
480
+ propagate the contents of its contributor version.
481
+
482
+ In the following three paragraphs, a "patent license" is any express
483
+ agreement or commitment, however denominated, not to enforce a patent
484
+ (such as an express permission to practice a patent or covenant not to
485
+ sue for patent infringement). To "grant" such a patent license to a
486
+ party means to make such an agreement or commitment not to enforce a
487
+ patent against the party.
488
+
489
+ If you convey a covered work, knowingly relying on a patent license,
490
+ and the Corresponding Source of the work is not available for anyone
491
+ to copy, free of charge and under the terms of this License, through a
492
+ publicly available network server or other readily accessible means,
493
+ then you must either (1) cause the Corresponding Source to be so
494
+ available, or (2) arrange to deprive yourself of the benefit of the
495
+ patent license for this particular work, or (3) arrange, in a manner
496
+ consistent with the requirements of this License, to extend the patent
497
+ license to downstream recipients. "Knowingly relying" means you have
498
+ actual knowledge that, but for the patent license, your conveying the
499
+ covered work in a country, or your recipient's use of the covered work
500
+ in a country, would infringe one or more identifiable patents in that
501
+ country that you have reason to believe are valid.
502
+
503
+ If, pursuant to or in connection with a single transaction or
504
+ arrangement, you convey, or propagate by procuring conveyance of, a
505
+ covered work, and grant a patent license to some of the parties
506
+ receiving the covered work authorizing them to use, propagate, modify
507
+ or convey a specific copy of the covered work, then the patent license
508
+ you grant is automatically extended to all recipients of the covered
509
+ work and works based on it.
510
+
511
+ A patent license is "discriminatory" if it does not include within
512
+ the scope of its coverage, prohibits the exercise of, or is
513
+ conditioned on the non-exercise of one or more of the rights that are
514
+ specifically granted under this License. You may not convey a covered
515
+ work if you are a party to an arrangement with a third party that is
516
+ in the business of distributing software, under which you make payment
517
+ to the third party based on the extent of your activity of conveying
518
+ the work, and under which the third party grants, to any of the
519
+ parties who would receive the covered work from you, a discriminatory
520
+ patent license (a) in connection with copies of the covered work
521
+ conveyed by you (or copies made from those copies), or (b) primarily
522
+ for and in connection with specific products or compilations that
523
+ contain the covered work, unless you entered into that arrangement,
524
+ or that patent license was granted, prior to 28 March 2007.
525
+
526
+ Nothing in this License shall be construed as excluding or limiting
527
+ any implied license or other defenses to infringement that may
528
+ otherwise be available to you under applicable patent law.
529
+
530
+ 12. No Surrender of Others' Freedom.
531
+
532
+ If conditions are imposed on you (whether by court order, agreement or
533
+ otherwise) that contradict the conditions of this License, they do not
534
+ excuse you from the conditions of this License. If you cannot convey a
535
+ covered work so as to satisfy simultaneously your obligations under this
536
+ License and any other pertinent obligations, then as a consequence you may
537
+ not convey it at all. For example, if you agree to terms that obligate you
538
+ to collect a royalty for further conveying from those to whom you convey
539
+ the Program, the only way you could satisfy both those terms and this
540
+ License would be to refrain entirely from conveying the Program.
541
+
542
+ 13. Remote Network Interaction; Use with the GNU General Public License.
543
+
544
+ Notwithstanding any other provision of this License, if you modify the
545
+ Program, your modified version must prominently offer all users
546
+ interacting with it remotely through a computer network (if your version
547
+ supports such interaction) an opportunity to receive the Corresponding
548
+ Source of your version by providing access to the Corresponding Source
549
+ from a network server at no charge, through some standard or customary
550
+ means of facilitating copying of software. This Corresponding Source
551
+ shall include the Corresponding Source for any work covered by version 3
552
+ of the GNU General Public License that is incorporated pursuant to the
553
+ following paragraph.
554
+
555
+ Notwithstanding any other provision of this License, you have
556
+ permission to link or combine any covered work with a work licensed
557
+ under version 3 of the GNU General Public License into a single
558
+ combined work, and to convey the resulting work. The terms of this
559
+ License will continue to apply to the part which is the covered work,
560
+ but the work with which it is combined will remain governed by version
561
+ 3 of the GNU General Public License.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU Affero General Public License from time to time. Such new versions
567
+ will be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU Affero General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU Affero General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU Affero General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU Affero General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU Affero General Public License for more details.
646
+
647
+ You should have received a copy of the GNU Affero General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If your software can interact with users remotely through a computer
653
+ network, you should also make sure that it provides a way for users to
654
+ get its source. For example, if your program is a web application, its
655
+ interface could display a "Source" link that leads users to an archive
656
+ of the code. There are many ways you could offer source, and different
657
+ solutions will be better for different programs; see section 13 for the
658
+ specific requirements.
659
+
660
+ You should also get your employer (if you work as a programmer) or school,
661
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
662
+ For more information on this, and how to apply and follow the GNU AGPL, see
663
+ <https://www.gnu.org/licenses/>.
README.md ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Stable Diffusion web UI-UX
2
+ Not just a browser interface based on Gradio library for Stable Diffusion.
3
+ A pixel perfect design, mobile friendly, customizable interface that adds accessibility, ease of use and extended functionallity to the stable diffusion web ui.
4
+ Enjoy!
5
+
6
+
7
+ Default theme
8
+
9
+ ![anapnoe_uiux](https://user-images.githubusercontent.com/124302297/227973574-6003142d-0c7c-41c6-9966-0792a94549e9.png)
10
+
11
+ ## Features of ui-ux
12
+ - resizable viewport
13
+ - switchable viewports (DoubleClick on the split handler to swap views) option in settings for default position
14
+ - mobile navigation
15
+ - top header tabs (option setting)
16
+ - hidden tabs (option setting) no need to restart this is a different implementation
17
+ - drag and drop reordable quick settings offcanvas aside view
18
+ - drag and drop images to txt2img and img2img and import generation info parameters along with a preview image
19
+ - ignore - remove overrides when import [multiselect] (option setting)
20
+ - resizable cards for extra networks and number of rows (option setting)
21
+ - lazy loading alternative offcanvas aside view for extra networks (option setting)
22
+ - live preview image fit method (option setting)
23
+ - generated image fit method (option setting)
24
+ - max resolution output for txt2img and img2img (option setting)
25
+ - performant dispatch for gradio's range slider and input number field issue: https://github.com/gradio-app/gradio/issues/3204 (option setting) latest update uses only one instance clone to mediate for the release event
26
+ - ticks input range sliders (option setting)
27
+ - pacman preloader unified colors on reload ui
28
+ - frame border animation when generating images
29
+ - progress bar on top of the page always visible (when scroll for mobile)
30
+ - remix icons
31
+ - style theme configurator extension to customize every aspect of theme in real time with cool global functions to change the hue / saturation / brightness or invert the theme colors
32
+ - pan and zoom in out functionality for sketch, inpaint, inpaint sketch
33
+ - fullscreen support for sketch, inpaint, inpaint sketch
34
+ - better lightbox with zoom in-out mobile gestures support etc..
35
+
36
+ ## TODO
37
+ - small arrows next to icons sent to inpaint, extras, img2img etc
38
+ - component gallery navigate to previous generations inside the txt2img, img2img interface
39
+ - and auto load the current generation settings
40
+ - credits/about page display all 300+ contributors so far inside the UI
41
+
42
+ Quick Settings aside off-canvas view - drag and drop to custom sort your settings
43
+
44
+ ![anapnoe_uiux_quicksettings](https://user-images.githubusercontent.com/124302297/227967695-f8bb01b5-5cc9-4238-80dd-06e261378d6e.png)
45
+
46
+
47
+ Extra Networks aside off-canvas view
48
+
49
+ ![anapnoe_uiux_extra_networks](https://user-images.githubusercontent.com/124302297/227968001-20eab8f5-da91-4a11-9fe0-230fec4ba720.png)
50
+
51
+
52
+ Detail img2img sketch view
53
+
54
+ ![anapnoe_uiux_sketch](https://user-images.githubusercontent.com/124302297/227973727-084da8e0-931a-4c62-ab73-39e988fd4523.png)
55
+
56
+
57
+ Theme Configurator - aside off-canvas view
58
+
59
+ ![anapnoe_uiux_theme_config](https://user-images.githubusercontent.com/124302297/227967844-45063edb-eb40-4224-9666-f506d21d7780.png)
60
+
61
+
62
+ Mobile 395px width
63
+
64
+ ![anapnoe_uiux_mobile](https://user-images.githubusercontent.com/124302297/227987709-36231d30-e6da-424a-8930-cc0c55a0b979.png)
65
+
66
+
67
+
68
+ ## Features
69
+ [Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features):
70
+ - Original txt2img and img2img modes
71
+ - One click install and run script (but you still must install python and git)
72
+ - Outpainting
73
+ - Inpainting
74
+ - Color Sketch
75
+ - Prompt Matrix
76
+ - Stable Diffusion Upscale
77
+ - Attention, specify parts of text that the model should pay more attention to
78
+ - a man in a `((tuxedo))` - will pay more attention to tuxedo
79
+ - a man in a `(tuxedo:1.21)` - alternative syntax
80
+ - select text and press `Ctrl+Up` or `Ctrl+Down` (or `Command+Up` or `Command+Down` if you're on a MacOS) to automatically adjust attention to selected text (code contributed by anonymous user)
81
+ - Loopback, run img2img processing multiple times
82
+ - X/Y/Z plot, a way to draw a 3 dimensional plot of images with different parameters
83
+ - Textual Inversion
84
+ - have as many embeddings as you want and use any names you like for them
85
+ - use multiple embeddings with different numbers of vectors per token
86
+ - works with half precision floating point numbers
87
+ - train embeddings on 8GB (also reports of 6GB working)
88
+ - Extras tab with:
89
+ - GFPGAN, neural network that fixes faces
90
+ - CodeFormer, face restoration tool as an alternative to GFPGAN
91
+ - RealESRGAN, neural network upscaler
92
+ - ESRGAN, neural network upscaler with a lot of third party models
93
+ - SwinIR and Swin2SR([see here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/2092)), neural network upscalers
94
+ - LDSR, Latent diffusion super resolution upscaling
95
+ - Resizing aspect ratio options
96
+ - Sampling method selection
97
+ - Adjust sampler eta values (noise multiplier)
98
+ - More advanced noise setting options
99
+ - Interrupt processing at any time
100
+ - 4GB video card support (also reports of 2GB working)
101
+ - Correct seeds for batches
102
+ - Live prompt token length validation
103
+ - Generation parameters
104
+ - parameters you used to generate images are saved with that image
105
+ - in PNG chunks for PNG, in EXIF for JPEG
106
+ - can drag the image to PNG info tab to restore generation parameters and automatically copy them into UI
107
+ - can be disabled in settings
108
+ - drag and drop an image/text-parameters to promptbox
109
+ - Read Generation Parameters Button, loads parameters in promptbox to UI
110
+ - Settings page
111
+ - Running arbitrary python code from UI (must run with --allow-code to enable)
112
+ - Mouseover hints for most UI elements
113
+ - Possible to change defaults/mix/max/step values for UI elements via text config
114
+ - Tiling support, a checkbox to create images that can be tiled like textures
115
+ - Progress bar and live image generation preview
116
+ - Can use a separate neural network to produce previews with almost none VRAM or compute requirement
117
+ - Negative prompt, an extra text field that allows you to list what you don't want to see in generated image
118
+ - Styles, a way to save part of prompt and easily apply them via dropdown later
119
+ - Variations, a way to generate same image but with tiny differences
120
+ - Seed resizing, a way to generate same image but at slightly different resolution
121
+ - CLIP interrogator, a button that tries to guess prompt from an image
122
+ - Prompt Editing, a way to change prompt mid-generation, say to start making a watermelon and switch to anime girl midway
123
+ - Batch Processing, process a group of files using img2img
124
+ - Img2img Alternative, reverse Euler method of cross attention control
125
+ - Highres Fix, a convenience option to produce high resolution pictures in one click without usual distortions
126
+ - Reloading checkpoints on the fly
127
+ - Checkpoint Merger, a tab that allows you to merge up to 3 checkpoints into one
128
+ - [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community
129
+ - [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once
130
+ - separate prompts using uppercase `AND`
131
+ - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
132
+ - No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
133
+ - DeepDanbooru integration, creates danbooru style tags for anime prompts
134
+ - [xformers](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers), major speed increase for select cards: (add --xformers to commandline args)
135
+ - via extension: [History tab](https://github.com/yfszzx/stable-diffusion-webui-images-browser): view, direct and delete images conveniently within the UI
136
+ - Generate forever option
137
+ - Training tab
138
+ - hypernetworks and embeddings options
139
+ - Preprocessing images: cropping, mirroring, autotagging using BLIP or deepdanbooru (for anime)
140
+ - Clip skip
141
+ - Hypernetworks
142
+ - Loras (same as Hypernetworks but more pretty)
143
+ - A sparate UI where you can choose, with preview, which embeddings, hypernetworks or Loras to add to your prompt.
144
+ - Can select to load a different VAE from settings screen
145
+ - Estimated completion time in progress bar
146
+ - API
147
+ - Support for dedicated [inpainting model](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion) by RunwayML.
148
+ - via extension: [Aesthetic Gradients](https://github.com/AUTOMATIC1111/stable-diffusion-webui-aesthetic-gradients), a way to generate images with a specific aesthetic by using clip images embeds (implementation of [https://github.com/vicgalle/stable-diffusion-aesthetic-gradients](https://github.com/vicgalle/stable-diffusion-aesthetic-gradients))
149
+ - [Stable Diffusion 2.0](https://github.com/Stability-AI/stablediffusion) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20) for instructions
150
+ - [Alt-Diffusion](https://arxiv.org/abs/2211.06679) support - see [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#alt-diffusion) for instructions
151
+ - Now without any bad letters!
152
+ - Load checkpoints in safetensors format
153
+ - Eased resolution restriction: generated image's domension must be a multiple of 8 rather than 64
154
+ - Now with a license!
155
+ - Reorder elements in the UI from settings screen
156
+ -
157
+
158
+ ## Installation and Running
159
+ Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
160
+
161
+ Alternatively, use online services (like Google Colab):
162
+
163
+ - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services)
164
+
165
+ ### Installation on Windows
166
+ 1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH".
167
+ 2. Install [git](https://git-scm.com/download/win).
168
+ 3. Download the stable-diffusion-webui-ux repository, for example by running `git clone https://github.com/anapnoe/stable-diffusion-webui-ux.git`.
169
+ 4. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user.
170
+
171
+ ### Installation on Linux
172
+ 1. Install the dependencies:
173
+ ```bash
174
+ # Debian-based:
175
+ sudo apt install wget git python3 python3-venv
176
+ # Red Hat-based:
177
+ sudo dnf install wget git python3
178
+ # Arch-based:
179
+ sudo pacman -S wget git python3
180
+ ```
181
+ 2. Navigate to the directory you would like the webui to be installed and execute the following command:
182
+ ```bash
183
+ bash <(wget -qO- https://raw.githubusercontent.com/anapnoe/stable-diffusion-webui-ux/master/webui.sh)
184
+ ```
185
+ 3. Run `webui.sh`.
186
+ 4. Check `webui-user.sh` for options.
187
+ ### Installation on Apple Silicon
188
+
189
+ Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon).
190
+ and replace the path in step 3 with `git clone https://github.com/anapnoe/stable-diffusion-webui-ux`
191
+
192
+ ## Contributing
193
+ Here's how to add code to the original repo: [Contributing](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Contributing)
194
+
195
+ ## Documentation
196
+ The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki).
197
+
198
+ ## Credits
199
+ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file.
200
+
201
+ - Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers
202
+ - k-diffusion - https://github.com/crowsonkb/k-diffusion.git
203
+ - GFPGAN - https://github.com/TencentARC/GFPGAN.git
204
+ - CodeFormer - https://github.com/sczhou/CodeFormer
205
+ - ESRGAN - https://github.com/xinntao/ESRGAN
206
+ - SwinIR - https://github.com/JingyunLiang/SwinIR
207
+ - Swin2SR - https://github.com/mv-lab/swin2sr
208
+ - LDSR - https://github.com/Hafiidz/latent-diffusion
209
+ - MiDaS - https://github.com/isl-org/MiDaS
210
+ - Ideas for optimizations - https://github.com/basujindal/stable-diffusion
211
+ - Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
212
+ - Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion)
213
+ - Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention)
214
+ - Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
215
+ - Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
216
+ - Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
217
+ - CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
218
+ - Idea for Composable Diffusion - https://github.com/energy-based-model/Compositional-Visual-Generation-with-Composable-Diffusion-Models-PyTorch
219
+ - xformers - https://github.com/facebookresearch/xformers
220
+ - DeepDanbooru - interrogator for anime diffusers https://github.com/KichangKim/DeepDanbooru
221
+ - Sampling in float32 precision from a float16 UNet - marunine for the idea, Birch-san for the example Diffusers implementation (https://github.com/Birch-san/diffusers-play/tree/92feee6)
222
+ - Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix
223
+ - Security advice - RyotaK
224
+ - UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC
225
+ - TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
226
+ - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
227
+ - (You)
configs/alt-diffusion-inference.yaml ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: modules.xlmr.BertSeriesModelWithTransformation
71
+ params:
72
+ name: "XLMR-Large"
configs/instruct-pix2pix.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File modified by authors of InstructPix2Pix from original (https://github.com/CompVis/stable-diffusion).
2
+ # See more details in LICENSE.
3
+
4
+ model:
5
+ base_learning_rate: 1.0e-04
6
+ target: modules.models.diffusion.ddpm_edit.LatentDiffusion
7
+ params:
8
+ linear_start: 0.00085
9
+ linear_end: 0.0120
10
+ num_timesteps_cond: 1
11
+ log_every_t: 200
12
+ timesteps: 1000
13
+ first_stage_key: edited
14
+ cond_stage_key: edit
15
+ # image_size: 64
16
+ # image_size: 32
17
+ image_size: 16
18
+ channels: 4
19
+ cond_stage_trainable: false # Note: different from the one we trained before
20
+ conditioning_key: hybrid
21
+ monitor: val/loss_simple_ema
22
+ scale_factor: 0.18215
23
+ use_ema: false
24
+
25
+ scheduler_config: # 10000 warmup steps
26
+ target: ldm.lr_scheduler.LambdaLinearScheduler
27
+ params:
28
+ warm_up_steps: [ 0 ]
29
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
30
+ f_start: [ 1.e-6 ]
31
+ f_max: [ 1. ]
32
+ f_min: [ 1. ]
33
+
34
+ unet_config:
35
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
36
+ params:
37
+ image_size: 32 # unused
38
+ in_channels: 8
39
+ out_channels: 4
40
+ model_channels: 320
41
+ attention_resolutions: [ 4, 2, 1 ]
42
+ num_res_blocks: 2
43
+ channel_mult: [ 1, 2, 4, 4 ]
44
+ num_heads: 8
45
+ use_spatial_transformer: True
46
+ transformer_depth: 1
47
+ context_dim: 768
48
+ use_checkpoint: True
49
+ legacy: False
50
+
51
+ first_stage_config:
52
+ target: ldm.models.autoencoder.AutoencoderKL
53
+ params:
54
+ embed_dim: 4
55
+ monitor: val/rec_loss
56
+ ddconfig:
57
+ double_z: true
58
+ z_channels: 4
59
+ resolution: 256
60
+ in_channels: 3
61
+ out_ch: 3
62
+ ch: 128
63
+ ch_mult:
64
+ - 1
65
+ - 2
66
+ - 4
67
+ - 4
68
+ num_res_blocks: 2
69
+ attn_resolutions: []
70
+ dropout: 0.0
71
+ lossconfig:
72
+ target: torch.nn.Identity
73
+
74
+ cond_stage_config:
75
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
76
+
77
+ data:
78
+ target: main.DataModuleFromConfig
79
+ params:
80
+ batch_size: 128
81
+ num_workers: 1
82
+ wrap: false
83
+ validation:
84
+ target: edit_dataset.EditDataset
85
+ params:
86
+ path: data/clip-filtered-dataset
87
+ cache_dir: data/
88
+ cache_name: data_10k
89
+ split: val
90
+ min_text_sim: 0.2
91
+ min_image_sim: 0.75
92
+ min_direction_sim: 0.2
93
+ max_samples_per_prompt: 1
94
+ min_resize_res: 512
95
+ max_resize_res: 512
96
+ crop_res: 512
97
+ output_as_edit: False
98
+ real_input: True
configs/v1-inference.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 1.0e-04
3
+ target: ldm.models.diffusion.ddpm.LatentDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 10000 ]
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 4
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
configs/v1-inpainting-inference.yaml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ base_learning_rate: 7.5e-05
3
+ target: ldm.models.diffusion.ddpm.LatentInpaintDiffusion
4
+ params:
5
+ linear_start: 0.00085
6
+ linear_end: 0.0120
7
+ num_timesteps_cond: 1
8
+ log_every_t: 200
9
+ timesteps: 1000
10
+ first_stage_key: "jpg"
11
+ cond_stage_key: "txt"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false # Note: different from the one we trained before
15
+ conditioning_key: hybrid # important
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ finetune_keys: null
19
+
20
+ scheduler_config: # 10000 warmup steps
21
+ target: ldm.lr_scheduler.LambdaLinearScheduler
22
+ params:
23
+ warm_up_steps: [ 2500 ] # NOTE for resuming. use 10000 if starting from scratch
24
+ cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
25
+ f_start: [ 1.e-6 ]
26
+ f_max: [ 1. ]
27
+ f_min: [ 1. ]
28
+
29
+ unet_config:
30
+ target: ldm.modules.diffusionmodules.openaimodel.UNetModel
31
+ params:
32
+ image_size: 32 # unused
33
+ in_channels: 9 # 4 data + 4 downscaled image + 1 mask
34
+ out_channels: 4
35
+ model_channels: 320
36
+ attention_resolutions: [ 4, 2, 1 ]
37
+ num_res_blocks: 2
38
+ channel_mult: [ 1, 2, 4, 4 ]
39
+ num_heads: 8
40
+ use_spatial_transformer: True
41
+ transformer_depth: 1
42
+ context_dim: 768
43
+ use_checkpoint: True
44
+ legacy: False
45
+
46
+ first_stage_config:
47
+ target: ldm.models.autoencoder.AutoencoderKL
48
+ params:
49
+ embed_dim: 4
50
+ monitor: val/rec_loss
51
+ ddconfig:
52
+ double_z: true
53
+ z_channels: 4
54
+ resolution: 256
55
+ in_channels: 3
56
+ out_ch: 3
57
+ ch: 128
58
+ ch_mult:
59
+ - 1
60
+ - 2
61
+ - 4
62
+ - 4
63
+ num_res_blocks: 2
64
+ attn_resolutions: []
65
+ dropout: 0.0
66
+ lossconfig:
67
+ target: torch.nn.Identity
68
+
69
+ cond_stage_config:
70
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
environment-wsl2.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: automatic
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ dependencies:
6
+ - python=3.10
7
+ - pip=23.0
8
+ - cudatoolkit=11.8
9
+ - pytorch=2.0
10
+ - torchvision=0.15
11
+ - numpy=1.23
extensions-builtin/LDSR/ldsr_model_arch.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+ import time
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torchvision
8
+ from PIL import Image
9
+ from einops import rearrange, repeat
10
+ from omegaconf import OmegaConf
11
+ import safetensors.torch
12
+
13
+ from ldm.models.diffusion.ddim import DDIMSampler
14
+ from ldm.util import instantiate_from_config, ismap
15
+ from modules import shared, sd_hijack
16
+
17
+ cached_ldsr_model: torch.nn.Module = None
18
+
19
+
20
+ # Create LDSR Class
21
+ class LDSR:
22
+ def load_model_from_config(self, half_attention):
23
+ global cached_ldsr_model
24
+
25
+ if shared.opts.ldsr_cached and cached_ldsr_model is not None:
26
+ print("Loading model from cache")
27
+ model: torch.nn.Module = cached_ldsr_model
28
+ else:
29
+ print(f"Loading model from {self.modelPath}")
30
+ _, extension = os.path.splitext(self.modelPath)
31
+ if extension.lower() == ".safetensors":
32
+ pl_sd = safetensors.torch.load_file(self.modelPath, device="cpu")
33
+ else:
34
+ pl_sd = torch.load(self.modelPath, map_location="cpu")
35
+ sd = pl_sd["state_dict"] if "state_dict" in pl_sd else pl_sd
36
+ config = OmegaConf.load(self.yamlPath)
37
+ config.model.target = "ldm.models.diffusion.ddpm.LatentDiffusionV1"
38
+ model: torch.nn.Module = instantiate_from_config(config.model)
39
+ model.load_state_dict(sd, strict=False)
40
+ model = model.to(shared.device)
41
+ if half_attention:
42
+ model = model.half()
43
+ if shared.cmd_opts.opt_channelslast:
44
+ model = model.to(memory_format=torch.channels_last)
45
+
46
+ sd_hijack.model_hijack.hijack(model) # apply optimization
47
+ model.eval()
48
+
49
+ if shared.opts.ldsr_cached:
50
+ cached_ldsr_model = model
51
+
52
+ return {"model": model}
53
+
54
+ def __init__(self, model_path, yaml_path):
55
+ self.modelPath = model_path
56
+ self.yamlPath = yaml_path
57
+
58
+ @staticmethod
59
+ def run(model, selected_path, custom_steps, eta):
60
+ example = get_cond(selected_path)
61
+
62
+ n_runs = 1
63
+ guider = None
64
+ ckwargs = None
65
+ ddim_use_x0_pred = False
66
+ temperature = 1.
67
+ eta = eta
68
+ custom_shape = None
69
+
70
+ height, width = example["image"].shape[1:3]
71
+ split_input = height >= 128 and width >= 128
72
+
73
+ if split_input:
74
+ ks = 128
75
+ stride = 64
76
+ vqf = 4 #
77
+ model.split_input_params = {"ks": (ks, ks), "stride": (stride, stride),
78
+ "vqf": vqf,
79
+ "patch_distributed_vq": True,
80
+ "tie_braker": False,
81
+ "clip_max_weight": 0.5,
82
+ "clip_min_weight": 0.01,
83
+ "clip_max_tie_weight": 0.5,
84
+ "clip_min_tie_weight": 0.01}
85
+ else:
86
+ if hasattr(model, "split_input_params"):
87
+ delattr(model, "split_input_params")
88
+
89
+ x_t = None
90
+ logs = None
91
+ for _ in range(n_runs):
92
+ if custom_shape is not None:
93
+ x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
94
+ x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
95
+
96
+ logs = make_convolutional_sample(example, model,
97
+ custom_steps=custom_steps,
98
+ eta=eta, quantize_x0=False,
99
+ custom_shape=custom_shape,
100
+ temperature=temperature, noise_dropout=0.,
101
+ corrector=guider, corrector_kwargs=ckwargs, x_T=x_t,
102
+ ddim_use_x0_pred=ddim_use_x0_pred
103
+ )
104
+ return logs
105
+
106
+ def super_resolution(self, image, steps=100, target_scale=2, half_attention=False):
107
+ model = self.load_model_from_config(half_attention)
108
+
109
+ # Run settings
110
+ diffusion_steps = int(steps)
111
+ eta = 1.0
112
+
113
+
114
+ gc.collect()
115
+ if torch.cuda.is_available:
116
+ torch.cuda.empty_cache()
117
+
118
+ im_og = image
119
+ width_og, height_og = im_og.size
120
+ # If we can adjust the max upscale size, then the 4 below should be our variable
121
+ down_sample_rate = target_scale / 4
122
+ wd = width_og * down_sample_rate
123
+ hd = height_og * down_sample_rate
124
+ width_downsampled_pre = int(np.ceil(wd))
125
+ height_downsampled_pre = int(np.ceil(hd))
126
+
127
+ if down_sample_rate != 1:
128
+ print(
129
+ f'Downsampling from [{width_og}, {height_og}] to [{width_downsampled_pre}, {height_downsampled_pre}]')
130
+ im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS)
131
+ else:
132
+ print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)")
133
+
134
+ # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts
135
+ pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size
136
+ im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge'))
137
+
138
+ logs = self.run(model["model"], im_padded, diffusion_steps, eta)
139
+
140
+ sample = logs["sample"]
141
+ sample = sample.detach().cpu()
142
+ sample = torch.clamp(sample, -1., 1.)
143
+ sample = (sample + 1.) / 2. * 255
144
+ sample = sample.numpy().astype(np.uint8)
145
+ sample = np.transpose(sample, (0, 2, 3, 1))
146
+ a = Image.fromarray(sample[0])
147
+
148
+ # remove padding
149
+ a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4))
150
+
151
+ del model
152
+ gc.collect()
153
+ if torch.cuda.is_available:
154
+ torch.cuda.empty_cache()
155
+
156
+ return a
157
+
158
+
159
+ def get_cond(selected_path):
160
+ example = {}
161
+ up_f = 4
162
+ c = selected_path.convert('RGB')
163
+ c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
164
+ c_up = torchvision.transforms.functional.resize(c, size=[up_f * c.shape[2], up_f * c.shape[3]],
165
+ antialias=True)
166
+ c_up = rearrange(c_up, '1 c h w -> 1 h w c')
167
+ c = rearrange(c, '1 c h w -> 1 h w c')
168
+ c = 2. * c - 1.
169
+
170
+ c = c.to(shared.device)
171
+ example["LR_image"] = c
172
+ example["image"] = c_up
173
+
174
+ return example
175
+
176
+
177
+ @torch.no_grad()
178
+ def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_sequence=None,
179
+ mask=None, x0=None, quantize_x0=False, temperature=1., score_corrector=None,
180
+ corrector_kwargs=None, x_t=None
181
+ ):
182
+ ddim = DDIMSampler(model)
183
+ bs = shape[0]
184
+ shape = shape[1:]
185
+ print(f"Sampling with eta = {eta}; steps: {steps}")
186
+ samples, intermediates = ddim.sample(steps, batch_size=bs, shape=shape, conditioning=cond, callback=callback,
187
+ normals_sequence=normals_sequence, quantize_x0=quantize_x0, eta=eta,
188
+ mask=mask, x0=x0, temperature=temperature, verbose=False,
189
+ score_corrector=score_corrector,
190
+ corrector_kwargs=corrector_kwargs, x_t=x_t)
191
+
192
+ return samples, intermediates
193
+
194
+
195
+ @torch.no_grad()
196
+ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
197
+ corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
198
+ log = {}
199
+
200
+ z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
201
+ return_first_stage_outputs=True,
202
+ force_c_encode=not (hasattr(model, 'split_input_params')
203
+ and model.cond_stage_key == 'coordinates_bbox'),
204
+ return_original_cond=True)
205
+
206
+ if custom_shape is not None:
207
+ z = torch.randn(custom_shape)
208
+ print(f"Generating {custom_shape[0]} samples of shape {custom_shape[1:]}")
209
+
210
+ z0 = None
211
+
212
+ log["input"] = x
213
+ log["reconstruction"] = xrec
214
+
215
+ if ismap(xc):
216
+ log["original_conditioning"] = model.to_rgb(xc)
217
+ if hasattr(model, 'cond_stage_key'):
218
+ log[model.cond_stage_key] = model.to_rgb(xc)
219
+
220
+ else:
221
+ log["original_conditioning"] = xc if xc is not None else torch.zeros_like(x)
222
+ if model.cond_stage_model:
223
+ log[model.cond_stage_key] = xc if xc is not None else torch.zeros_like(x)
224
+ if model.cond_stage_key == 'class_label':
225
+ log[model.cond_stage_key] = xc[model.cond_stage_key]
226
+
227
+ with model.ema_scope("Plotting"):
228
+ t0 = time.time()
229
+
230
+ sample, intermediates = convsample_ddim(model, c, steps=custom_steps, shape=z.shape,
231
+ eta=eta,
232
+ quantize_x0=quantize_x0, mask=None, x0=z0,
233
+ temperature=temperature, score_corrector=corrector, corrector_kwargs=corrector_kwargs,
234
+ x_t=x_T)
235
+ t1 = time.time()
236
+
237
+ if ddim_use_x0_pred:
238
+ sample = intermediates['pred_x0'][-1]
239
+
240
+ x_sample = model.decode_first_stage(sample)
241
+
242
+ try:
243
+ x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
244
+ log["sample_noquant"] = x_sample_noquant
245
+ log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
246
+ except Exception:
247
+ pass
248
+
249
+ log["sample"] = x_sample
250
+ log["time"] = t1 - t0
251
+
252
+ return log
extensions-builtin/LDSR/preload.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import os
2
+ from modules import paths
3
+
4
+
5
+ def preload(parser):
6
+ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(paths.models_path, 'LDSR'))
extensions-builtin/LDSR/scripts/ldsr_model.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from basicsr.utils.download_util import load_file_from_url
4
+
5
+ from modules.upscaler import Upscaler, UpscalerData
6
+ from ldsr_model_arch import LDSR
7
+ from modules import shared, script_callbacks, errors
8
+ import sd_hijack_autoencoder # noqa: F401
9
+ import sd_hijack_ddpm_v1 # noqa: F401
10
+
11
+
12
+ class UpscalerLDSR(Upscaler):
13
+ def __init__(self, user_path):
14
+ self.name = "LDSR"
15
+ self.user_path = user_path
16
+ self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
17
+ self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
18
+ super().__init__()
19
+ scaler_data = UpscalerData("LDSR", None, self)
20
+ self.scalers = [scaler_data]
21
+
22
+ def load_model(self, path: str):
23
+ # Remove incorrect project.yaml file if too big
24
+ yaml_path = os.path.join(self.model_path, "project.yaml")
25
+ old_model_path = os.path.join(self.model_path, "model.pth")
26
+ new_model_path = os.path.join(self.model_path, "model.ckpt")
27
+
28
+ local_model_paths = self.find_models(ext_filter=[".ckpt", ".safetensors"])
29
+ local_ckpt_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.ckpt")]), None)
30
+ local_safetensors_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("model.safetensors")]), None)
31
+ local_yaml_path = next(iter([local_model for local_model in local_model_paths if local_model.endswith("project.yaml")]), None)
32
+
33
+ if os.path.exists(yaml_path):
34
+ statinfo = os.stat(yaml_path)
35
+ if statinfo.st_size >= 10485760:
36
+ print("Removing invalid LDSR YAML file.")
37
+ os.remove(yaml_path)
38
+
39
+ if os.path.exists(old_model_path):
40
+ print("Renaming model from model.pth to model.ckpt")
41
+ os.rename(old_model_path, new_model_path)
42
+
43
+ if local_safetensors_path is not None and os.path.exists(local_safetensors_path):
44
+ model = local_safetensors_path
45
+ else:
46
+ model = local_ckpt_path if local_ckpt_path is not None else load_file_from_url(url=self.model_url, model_dir=self.model_download_path, file_name="model.ckpt", progress=True)
47
+
48
+ yaml = local_yaml_path if local_yaml_path is not None else load_file_from_url(url=self.yaml_url, model_dir=self.model_download_path, file_name="project.yaml", progress=True)
49
+
50
+ try:
51
+ return LDSR(model, yaml)
52
+ except Exception:
53
+ errors.report("Error importing LDSR", exc_info=True)
54
+ return None
55
+
56
+ def do_upscale(self, img, path):
57
+ ldsr = self.load_model(path)
58
+ if ldsr is None:
59
+ print("NO LDSR!")
60
+ return img
61
+ ddim_steps = shared.opts.ldsr_steps
62
+ return ldsr.super_resolution(img, ddim_steps, self.scale)
63
+
64
+
65
+ def on_ui_settings():
66
+ import gradio as gr
67
+
68
+ shared.opts.add_option("ldsr_steps", shared.OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}, section=('upscaling', "Upscaling")))
69
+ shared.opts.add_option("ldsr_cached", shared.OptionInfo(False, "Cache LDSR model in memory", gr.Checkbox, {"interactive": True}, section=('upscaling', "Upscaling")))
70
+
71
+
72
+ script_callbacks.on_ui_settings(on_ui_settings)
extensions-builtin/LDSR/sd_hijack_autoencoder.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
2
+ # The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
3
+ # As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
4
+ import numpy as np
5
+ import torch
6
+ import pytorch_lightning as pl
7
+ import torch.nn.functional as F
8
+ from contextlib import contextmanager
9
+
10
+ from torch.optim.lr_scheduler import LambdaLR
11
+
12
+ from ldm.modules.ema import LitEma
13
+ from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
14
+ from ldm.modules.diffusionmodules.model import Encoder, Decoder
15
+ from ldm.util import instantiate_from_config
16
+
17
+ import ldm.models.autoencoder
18
+ from packaging import version
19
+
20
+ class VQModel(pl.LightningModule):
21
+ def __init__(self,
22
+ ddconfig,
23
+ lossconfig,
24
+ n_embed,
25
+ embed_dim,
26
+ ckpt_path=None,
27
+ ignore_keys=None,
28
+ image_key="image",
29
+ colorize_nlabels=None,
30
+ monitor=None,
31
+ batch_resize_range=None,
32
+ scheduler_config=None,
33
+ lr_g_factor=1.0,
34
+ remap=None,
35
+ sane_index_shape=False, # tell vector quantizer to return indices as bhw
36
+ use_ema=False
37
+ ):
38
+ super().__init__()
39
+ self.embed_dim = embed_dim
40
+ self.n_embed = n_embed
41
+ self.image_key = image_key
42
+ self.encoder = Encoder(**ddconfig)
43
+ self.decoder = Decoder(**ddconfig)
44
+ self.loss = instantiate_from_config(lossconfig)
45
+ self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
46
+ remap=remap,
47
+ sane_index_shape=sane_index_shape)
48
+ self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
49
+ self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
50
+ if colorize_nlabels is not None:
51
+ assert type(colorize_nlabels)==int
52
+ self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
53
+ if monitor is not None:
54
+ self.monitor = monitor
55
+ self.batch_resize_range = batch_resize_range
56
+ if self.batch_resize_range is not None:
57
+ print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
58
+
59
+ self.use_ema = use_ema
60
+ if self.use_ema:
61
+ self.model_ema = LitEma(self)
62
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
63
+
64
+ if ckpt_path is not None:
65
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
66
+ self.scheduler_config = scheduler_config
67
+ self.lr_g_factor = lr_g_factor
68
+
69
+ @contextmanager
70
+ def ema_scope(self, context=None):
71
+ if self.use_ema:
72
+ self.model_ema.store(self.parameters())
73
+ self.model_ema.copy_to(self)
74
+ if context is not None:
75
+ print(f"{context}: Switched to EMA weights")
76
+ try:
77
+ yield None
78
+ finally:
79
+ if self.use_ema:
80
+ self.model_ema.restore(self.parameters())
81
+ if context is not None:
82
+ print(f"{context}: Restored training weights")
83
+
84
+ def init_from_ckpt(self, path, ignore_keys=None):
85
+ sd = torch.load(path, map_location="cpu")["state_dict"]
86
+ keys = list(sd.keys())
87
+ for k in keys:
88
+ for ik in ignore_keys or []:
89
+ if k.startswith(ik):
90
+ print("Deleting key {} from state_dict.".format(k))
91
+ del sd[k]
92
+ missing, unexpected = self.load_state_dict(sd, strict=False)
93
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
94
+ if missing:
95
+ print(f"Missing Keys: {missing}")
96
+ if unexpected:
97
+ print(f"Unexpected Keys: {unexpected}")
98
+
99
+ def on_train_batch_end(self, *args, **kwargs):
100
+ if self.use_ema:
101
+ self.model_ema(self)
102
+
103
+ def encode(self, x):
104
+ h = self.encoder(x)
105
+ h = self.quant_conv(h)
106
+ quant, emb_loss, info = self.quantize(h)
107
+ return quant, emb_loss, info
108
+
109
+ def encode_to_prequant(self, x):
110
+ h = self.encoder(x)
111
+ h = self.quant_conv(h)
112
+ return h
113
+
114
+ def decode(self, quant):
115
+ quant = self.post_quant_conv(quant)
116
+ dec = self.decoder(quant)
117
+ return dec
118
+
119
+ def decode_code(self, code_b):
120
+ quant_b = self.quantize.embed_code(code_b)
121
+ dec = self.decode(quant_b)
122
+ return dec
123
+
124
+ def forward(self, input, return_pred_indices=False):
125
+ quant, diff, (_,_,ind) = self.encode(input)
126
+ dec = self.decode(quant)
127
+ if return_pred_indices:
128
+ return dec, diff, ind
129
+ return dec, diff
130
+
131
+ def get_input(self, batch, k):
132
+ x = batch[k]
133
+ if len(x.shape) == 3:
134
+ x = x[..., None]
135
+ x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
136
+ if self.batch_resize_range is not None:
137
+ lower_size = self.batch_resize_range[0]
138
+ upper_size = self.batch_resize_range[1]
139
+ if self.global_step <= 4:
140
+ # do the first few batches with max size to avoid later oom
141
+ new_resize = upper_size
142
+ else:
143
+ new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
144
+ if new_resize != x.shape[2]:
145
+ x = F.interpolate(x, size=new_resize, mode="bicubic")
146
+ x = x.detach()
147
+ return x
148
+
149
+ def training_step(self, batch, batch_idx, optimizer_idx):
150
+ # https://github.com/pytorch/pytorch/issues/37142
151
+ # try not to fool the heuristics
152
+ x = self.get_input(batch, self.image_key)
153
+ xrec, qloss, ind = self(x, return_pred_indices=True)
154
+
155
+ if optimizer_idx == 0:
156
+ # autoencode
157
+ aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
158
+ last_layer=self.get_last_layer(), split="train",
159
+ predicted_indices=ind)
160
+
161
+ self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
162
+ return aeloss
163
+
164
+ if optimizer_idx == 1:
165
+ # discriminator
166
+ discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
167
+ last_layer=self.get_last_layer(), split="train")
168
+ self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
169
+ return discloss
170
+
171
+ def validation_step(self, batch, batch_idx):
172
+ log_dict = self._validation_step(batch, batch_idx)
173
+ with self.ema_scope():
174
+ self._validation_step(batch, batch_idx, suffix="_ema")
175
+ return log_dict
176
+
177
+ def _validation_step(self, batch, batch_idx, suffix=""):
178
+ x = self.get_input(batch, self.image_key)
179
+ xrec, qloss, ind = self(x, return_pred_indices=True)
180
+ aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
181
+ self.global_step,
182
+ last_layer=self.get_last_layer(),
183
+ split="val"+suffix,
184
+ predicted_indices=ind
185
+ )
186
+
187
+ discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
188
+ self.global_step,
189
+ last_layer=self.get_last_layer(),
190
+ split="val"+suffix,
191
+ predicted_indices=ind
192
+ )
193
+ rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
194
+ self.log(f"val{suffix}/rec_loss", rec_loss,
195
+ prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
196
+ self.log(f"val{suffix}/aeloss", aeloss,
197
+ prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
198
+ if version.parse(pl.__version__) >= version.parse('1.4.0'):
199
+ del log_dict_ae[f"val{suffix}/rec_loss"]
200
+ self.log_dict(log_dict_ae)
201
+ self.log_dict(log_dict_disc)
202
+ return self.log_dict
203
+
204
+ def configure_optimizers(self):
205
+ lr_d = self.learning_rate
206
+ lr_g = self.lr_g_factor*self.learning_rate
207
+ print("lr_d", lr_d)
208
+ print("lr_g", lr_g)
209
+ opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
210
+ list(self.decoder.parameters())+
211
+ list(self.quantize.parameters())+
212
+ list(self.quant_conv.parameters())+
213
+ list(self.post_quant_conv.parameters()),
214
+ lr=lr_g, betas=(0.5, 0.9))
215
+ opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
216
+ lr=lr_d, betas=(0.5, 0.9))
217
+
218
+ if self.scheduler_config is not None:
219
+ scheduler = instantiate_from_config(self.scheduler_config)
220
+
221
+ print("Setting up LambdaLR scheduler...")
222
+ scheduler = [
223
+ {
224
+ 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
225
+ 'interval': 'step',
226
+ 'frequency': 1
227
+ },
228
+ {
229
+ 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
230
+ 'interval': 'step',
231
+ 'frequency': 1
232
+ },
233
+ ]
234
+ return [opt_ae, opt_disc], scheduler
235
+ return [opt_ae, opt_disc], []
236
+
237
+ def get_last_layer(self):
238
+ return self.decoder.conv_out.weight
239
+
240
+ def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
241
+ log = {}
242
+ x = self.get_input(batch, self.image_key)
243
+ x = x.to(self.device)
244
+ if only_inputs:
245
+ log["inputs"] = x
246
+ return log
247
+ xrec, _ = self(x)
248
+ if x.shape[1] > 3:
249
+ # colorize with random projection
250
+ assert xrec.shape[1] > 3
251
+ x = self.to_rgb(x)
252
+ xrec = self.to_rgb(xrec)
253
+ log["inputs"] = x
254
+ log["reconstructions"] = xrec
255
+ if plot_ema:
256
+ with self.ema_scope():
257
+ xrec_ema, _ = self(x)
258
+ if x.shape[1] > 3:
259
+ xrec_ema = self.to_rgb(xrec_ema)
260
+ log["reconstructions_ema"] = xrec_ema
261
+ return log
262
+
263
+ def to_rgb(self, x):
264
+ assert self.image_key == "segmentation"
265
+ if not hasattr(self, "colorize"):
266
+ self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
267
+ x = F.conv2d(x, weight=self.colorize)
268
+ x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
269
+ return x
270
+
271
+
272
+ class VQModelInterface(VQModel):
273
+ def __init__(self, embed_dim, *args, **kwargs):
274
+ super().__init__(*args, embed_dim=embed_dim, **kwargs)
275
+ self.embed_dim = embed_dim
276
+
277
+ def encode(self, x):
278
+ h = self.encoder(x)
279
+ h = self.quant_conv(h)
280
+ return h
281
+
282
+ def decode(self, h, force_not_quantize=False):
283
+ # also go through quantization layer
284
+ if not force_not_quantize:
285
+ quant, emb_loss, info = self.quantize(h)
286
+ else:
287
+ quant = h
288
+ quant = self.post_quant_conv(quant)
289
+ dec = self.decoder(quant)
290
+ return dec
291
+
292
+ ldm.models.autoencoder.VQModel = VQModel
293
+ ldm.models.autoencoder.VQModelInterface = VQModelInterface
extensions-builtin/LDSR/sd_hijack_ddpm_v1.py ADDED
@@ -0,0 +1,1443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This script is copied from the compvis/stable-diffusion repo (aka the SD V1 repo)
2
+ # Original filename: ldm/models/diffusion/ddpm.py
3
+ # The purpose to reinstate the old DDPM logic which works with VQ, whereas the V2 one doesn't
4
+ # Some models such as LDSR require VQ to work correctly
5
+ # The classes are suffixed with "V1" and added back to the "ldm.models.diffusion.ddpm" module
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import numpy as np
10
+ import pytorch_lightning as pl
11
+ from torch.optim.lr_scheduler import LambdaLR
12
+ from einops import rearrange, repeat
13
+ from contextlib import contextmanager
14
+ from functools import partial
15
+ from tqdm import tqdm
16
+ from torchvision.utils import make_grid
17
+ from pytorch_lightning.utilities.distributed import rank_zero_only
18
+
19
+ from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
20
+ from ldm.modules.ema import LitEma
21
+ from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
22
+ from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
23
+ from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
24
+ from ldm.models.diffusion.ddim import DDIMSampler
25
+
26
+ import ldm.models.diffusion.ddpm
27
+
28
+ __conditioning_keys__ = {'concat': 'c_concat',
29
+ 'crossattn': 'c_crossattn',
30
+ 'adm': 'y'}
31
+
32
+
33
+ def disabled_train(self, mode=True):
34
+ """Overwrite model.train with this function to make sure train/eval mode
35
+ does not change anymore."""
36
+ return self
37
+
38
+
39
+ def uniform_on_device(r1, r2, shape, device):
40
+ return (r1 - r2) * torch.rand(*shape, device=device) + r2
41
+
42
+
43
+ class DDPMV1(pl.LightningModule):
44
+ # classic DDPM with Gaussian diffusion, in image space
45
+ def __init__(self,
46
+ unet_config,
47
+ timesteps=1000,
48
+ beta_schedule="linear",
49
+ loss_type="l2",
50
+ ckpt_path=None,
51
+ ignore_keys=None,
52
+ load_only_unet=False,
53
+ monitor="val/loss",
54
+ use_ema=True,
55
+ first_stage_key="image",
56
+ image_size=256,
57
+ channels=3,
58
+ log_every_t=100,
59
+ clip_denoised=True,
60
+ linear_start=1e-4,
61
+ linear_end=2e-2,
62
+ cosine_s=8e-3,
63
+ given_betas=None,
64
+ original_elbo_weight=0.,
65
+ v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
66
+ l_simple_weight=1.,
67
+ conditioning_key=None,
68
+ parameterization="eps", # all assuming fixed variance schedules
69
+ scheduler_config=None,
70
+ use_positional_encodings=False,
71
+ learn_logvar=False,
72
+ logvar_init=0.,
73
+ ):
74
+ super().__init__()
75
+ assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
76
+ self.parameterization = parameterization
77
+ print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
78
+ self.cond_stage_model = None
79
+ self.clip_denoised = clip_denoised
80
+ self.log_every_t = log_every_t
81
+ self.first_stage_key = first_stage_key
82
+ self.image_size = image_size # try conv?
83
+ self.channels = channels
84
+ self.use_positional_encodings = use_positional_encodings
85
+ self.model = DiffusionWrapperV1(unet_config, conditioning_key)
86
+ count_params(self.model, verbose=True)
87
+ self.use_ema = use_ema
88
+ if self.use_ema:
89
+ self.model_ema = LitEma(self.model)
90
+ print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
91
+
92
+ self.use_scheduler = scheduler_config is not None
93
+ if self.use_scheduler:
94
+ self.scheduler_config = scheduler_config
95
+
96
+ self.v_posterior = v_posterior
97
+ self.original_elbo_weight = original_elbo_weight
98
+ self.l_simple_weight = l_simple_weight
99
+
100
+ if monitor is not None:
101
+ self.monitor = monitor
102
+ if ckpt_path is not None:
103
+ self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
104
+
105
+ self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
106
+ linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
107
+
108
+ self.loss_type = loss_type
109
+
110
+ self.learn_logvar = learn_logvar
111
+ self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
112
+ if self.learn_logvar:
113
+ self.logvar = nn.Parameter(self.logvar, requires_grad=True)
114
+
115
+
116
+ def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
117
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
118
+ if exists(given_betas):
119
+ betas = given_betas
120
+ else:
121
+ betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
122
+ cosine_s=cosine_s)
123
+ alphas = 1. - betas
124
+ alphas_cumprod = np.cumprod(alphas, axis=0)
125
+ alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
126
+
127
+ timesteps, = betas.shape
128
+ self.num_timesteps = int(timesteps)
129
+ self.linear_start = linear_start
130
+ self.linear_end = linear_end
131
+ assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
132
+
133
+ to_torch = partial(torch.tensor, dtype=torch.float32)
134
+
135
+ self.register_buffer('betas', to_torch(betas))
136
+ self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
137
+ self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
138
+
139
+ # calculations for diffusion q(x_t | x_{t-1}) and others
140
+ self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
141
+ self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
142
+ self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
143
+ self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
144
+ self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
145
+
146
+ # calculations for posterior q(x_{t-1} | x_t, x_0)
147
+ posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
148
+ 1. - alphas_cumprod) + self.v_posterior * betas
149
+ # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
150
+ self.register_buffer('posterior_variance', to_torch(posterior_variance))
151
+ # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
152
+ self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
153
+ self.register_buffer('posterior_mean_coef1', to_torch(
154
+ betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
155
+ self.register_buffer('posterior_mean_coef2', to_torch(
156
+ (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
157
+
158
+ if self.parameterization == "eps":
159
+ lvlb_weights = self.betas ** 2 / (
160
+ 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
161
+ elif self.parameterization == "x0":
162
+ lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
163
+ else:
164
+ raise NotImplementedError("mu not supported")
165
+ # TODO how to choose this term
166
+ lvlb_weights[0] = lvlb_weights[1]
167
+ self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
168
+ assert not torch.isnan(self.lvlb_weights).all()
169
+
170
+ @contextmanager
171
+ def ema_scope(self, context=None):
172
+ if self.use_ema:
173
+ self.model_ema.store(self.model.parameters())
174
+ self.model_ema.copy_to(self.model)
175
+ if context is not None:
176
+ print(f"{context}: Switched to EMA weights")
177
+ try:
178
+ yield None
179
+ finally:
180
+ if self.use_ema:
181
+ self.model_ema.restore(self.model.parameters())
182
+ if context is not None:
183
+ print(f"{context}: Restored training weights")
184
+
185
+ def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
186
+ sd = torch.load(path, map_location="cpu")
187
+ if "state_dict" in list(sd.keys()):
188
+ sd = sd["state_dict"]
189
+ keys = list(sd.keys())
190
+ for k in keys:
191
+ for ik in ignore_keys or []:
192
+ if k.startswith(ik):
193
+ print("Deleting key {} from state_dict.".format(k))
194
+ del sd[k]
195
+ missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
196
+ sd, strict=False)
197
+ print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
198
+ if missing:
199
+ print(f"Missing Keys: {missing}")
200
+ if unexpected:
201
+ print(f"Unexpected Keys: {unexpected}")
202
+
203
+ def q_mean_variance(self, x_start, t):
204
+ """
205
+ Get the distribution q(x_t | x_0).
206
+ :param x_start: the [N x C x ...] tensor of noiseless inputs.
207
+ :param t: the number of diffusion steps (minus 1). Here, 0 means one step.
208
+ :return: A tuple (mean, variance, log_variance), all of x_start's shape.
209
+ """
210
+ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
211
+ variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
212
+ log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
213
+ return mean, variance, log_variance
214
+
215
+ def predict_start_from_noise(self, x_t, t, noise):
216
+ return (
217
+ extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
218
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
219
+ )
220
+
221
+ def q_posterior(self, x_start, x_t, t):
222
+ posterior_mean = (
223
+ extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
224
+ extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
225
+ )
226
+ posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
227
+ posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
228
+ return posterior_mean, posterior_variance, posterior_log_variance_clipped
229
+
230
+ def p_mean_variance(self, x, t, clip_denoised: bool):
231
+ model_out = self.model(x, t)
232
+ if self.parameterization == "eps":
233
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
234
+ elif self.parameterization == "x0":
235
+ x_recon = model_out
236
+ if clip_denoised:
237
+ x_recon.clamp_(-1., 1.)
238
+
239
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
240
+ return model_mean, posterior_variance, posterior_log_variance
241
+
242
+ @torch.no_grad()
243
+ def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
244
+ b, *_, device = *x.shape, x.device
245
+ model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
246
+ noise = noise_like(x.shape, device, repeat_noise)
247
+ # no noise when t == 0
248
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
249
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
250
+
251
+ @torch.no_grad()
252
+ def p_sample_loop(self, shape, return_intermediates=False):
253
+ device = self.betas.device
254
+ b = shape[0]
255
+ img = torch.randn(shape, device=device)
256
+ intermediates = [img]
257
+ for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
258
+ img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
259
+ clip_denoised=self.clip_denoised)
260
+ if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
261
+ intermediates.append(img)
262
+ if return_intermediates:
263
+ return img, intermediates
264
+ return img
265
+
266
+ @torch.no_grad()
267
+ def sample(self, batch_size=16, return_intermediates=False):
268
+ image_size = self.image_size
269
+ channels = self.channels
270
+ return self.p_sample_loop((batch_size, channels, image_size, image_size),
271
+ return_intermediates=return_intermediates)
272
+
273
+ def q_sample(self, x_start, t, noise=None):
274
+ noise = default(noise, lambda: torch.randn_like(x_start))
275
+ return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
276
+ extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
277
+
278
+ def get_loss(self, pred, target, mean=True):
279
+ if self.loss_type == 'l1':
280
+ loss = (target - pred).abs()
281
+ if mean:
282
+ loss = loss.mean()
283
+ elif self.loss_type == 'l2':
284
+ if mean:
285
+ loss = torch.nn.functional.mse_loss(target, pred)
286
+ else:
287
+ loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
288
+ else:
289
+ raise NotImplementedError("unknown loss type '{loss_type}'")
290
+
291
+ return loss
292
+
293
+ def p_losses(self, x_start, t, noise=None):
294
+ noise = default(noise, lambda: torch.randn_like(x_start))
295
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
296
+ model_out = self.model(x_noisy, t)
297
+
298
+ loss_dict = {}
299
+ if self.parameterization == "eps":
300
+ target = noise
301
+ elif self.parameterization == "x0":
302
+ target = x_start
303
+ else:
304
+ raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
305
+
306
+ loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
307
+
308
+ log_prefix = 'train' if self.training else 'val'
309
+
310
+ loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
311
+ loss_simple = loss.mean() * self.l_simple_weight
312
+
313
+ loss_vlb = (self.lvlb_weights[t] * loss).mean()
314
+ loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
315
+
316
+ loss = loss_simple + self.original_elbo_weight * loss_vlb
317
+
318
+ loss_dict.update({f'{log_prefix}/loss': loss})
319
+
320
+ return loss, loss_dict
321
+
322
+ def forward(self, x, *args, **kwargs):
323
+ # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
324
+ # assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
325
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
326
+ return self.p_losses(x, t, *args, **kwargs)
327
+
328
+ def get_input(self, batch, k):
329
+ x = batch[k]
330
+ if len(x.shape) == 3:
331
+ x = x[..., None]
332
+ x = rearrange(x, 'b h w c -> b c h w')
333
+ x = x.to(memory_format=torch.contiguous_format).float()
334
+ return x
335
+
336
+ def shared_step(self, batch):
337
+ x = self.get_input(batch, self.first_stage_key)
338
+ loss, loss_dict = self(x)
339
+ return loss, loss_dict
340
+
341
+ def training_step(self, batch, batch_idx):
342
+ loss, loss_dict = self.shared_step(batch)
343
+
344
+ self.log_dict(loss_dict, prog_bar=True,
345
+ logger=True, on_step=True, on_epoch=True)
346
+
347
+ self.log("global_step", self.global_step,
348
+ prog_bar=True, logger=True, on_step=True, on_epoch=False)
349
+
350
+ if self.use_scheduler:
351
+ lr = self.optimizers().param_groups[0]['lr']
352
+ self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
353
+
354
+ return loss
355
+
356
+ @torch.no_grad()
357
+ def validation_step(self, batch, batch_idx):
358
+ _, loss_dict_no_ema = self.shared_step(batch)
359
+ with self.ema_scope():
360
+ _, loss_dict_ema = self.shared_step(batch)
361
+ loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
362
+ self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
363
+ self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
364
+
365
+ def on_train_batch_end(self, *args, **kwargs):
366
+ if self.use_ema:
367
+ self.model_ema(self.model)
368
+
369
+ def _get_rows_from_list(self, samples):
370
+ n_imgs_per_row = len(samples)
371
+ denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
372
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
373
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
374
+ return denoise_grid
375
+
376
+ @torch.no_grad()
377
+ def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
378
+ log = {}
379
+ x = self.get_input(batch, self.first_stage_key)
380
+ N = min(x.shape[0], N)
381
+ n_row = min(x.shape[0], n_row)
382
+ x = x.to(self.device)[:N]
383
+ log["inputs"] = x
384
+
385
+ # get diffusion row
386
+ diffusion_row = []
387
+ x_start = x[:n_row]
388
+
389
+ for t in range(self.num_timesteps):
390
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
391
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
392
+ t = t.to(self.device).long()
393
+ noise = torch.randn_like(x_start)
394
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
395
+ diffusion_row.append(x_noisy)
396
+
397
+ log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
398
+
399
+ if sample:
400
+ # get denoise row
401
+ with self.ema_scope("Plotting"):
402
+ samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
403
+
404
+ log["samples"] = samples
405
+ log["denoise_row"] = self._get_rows_from_list(denoise_row)
406
+
407
+ if return_keys:
408
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
409
+ return log
410
+ else:
411
+ return {key: log[key] for key in return_keys}
412
+ return log
413
+
414
+ def configure_optimizers(self):
415
+ lr = self.learning_rate
416
+ params = list(self.model.parameters())
417
+ if self.learn_logvar:
418
+ params = params + [self.logvar]
419
+ opt = torch.optim.AdamW(params, lr=lr)
420
+ return opt
421
+
422
+
423
+ class LatentDiffusionV1(DDPMV1):
424
+ """main class"""
425
+ def __init__(self,
426
+ first_stage_config,
427
+ cond_stage_config,
428
+ num_timesteps_cond=None,
429
+ cond_stage_key="image",
430
+ cond_stage_trainable=False,
431
+ concat_mode=True,
432
+ cond_stage_forward=None,
433
+ conditioning_key=None,
434
+ scale_factor=1.0,
435
+ scale_by_std=False,
436
+ *args, **kwargs):
437
+ self.num_timesteps_cond = default(num_timesteps_cond, 1)
438
+ self.scale_by_std = scale_by_std
439
+ assert self.num_timesteps_cond <= kwargs['timesteps']
440
+ # for backwards compatibility after implementation of DiffusionWrapper
441
+ if conditioning_key is None:
442
+ conditioning_key = 'concat' if concat_mode else 'crossattn'
443
+ if cond_stage_config == '__is_unconditional__':
444
+ conditioning_key = None
445
+ ckpt_path = kwargs.pop("ckpt_path", None)
446
+ ignore_keys = kwargs.pop("ignore_keys", [])
447
+ super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
448
+ self.concat_mode = concat_mode
449
+ self.cond_stage_trainable = cond_stage_trainable
450
+ self.cond_stage_key = cond_stage_key
451
+ try:
452
+ self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
453
+ except Exception:
454
+ self.num_downs = 0
455
+ if not scale_by_std:
456
+ self.scale_factor = scale_factor
457
+ else:
458
+ self.register_buffer('scale_factor', torch.tensor(scale_factor))
459
+ self.instantiate_first_stage(first_stage_config)
460
+ self.instantiate_cond_stage(cond_stage_config)
461
+ self.cond_stage_forward = cond_stage_forward
462
+ self.clip_denoised = False
463
+ self.bbox_tokenizer = None
464
+
465
+ self.restarted_from_ckpt = False
466
+ if ckpt_path is not None:
467
+ self.init_from_ckpt(ckpt_path, ignore_keys)
468
+ self.restarted_from_ckpt = True
469
+
470
+ def make_cond_schedule(self, ):
471
+ self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
472
+ ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
473
+ self.cond_ids[:self.num_timesteps_cond] = ids
474
+
475
+ @rank_zero_only
476
+ @torch.no_grad()
477
+ def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
478
+ # only for very first batch
479
+ if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
480
+ assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
481
+ # set rescale weight to 1./std of encodings
482
+ print("### USING STD-RESCALING ###")
483
+ x = super().get_input(batch, self.first_stage_key)
484
+ x = x.to(self.device)
485
+ encoder_posterior = self.encode_first_stage(x)
486
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
487
+ del self.scale_factor
488
+ self.register_buffer('scale_factor', 1. / z.flatten().std())
489
+ print(f"setting self.scale_factor to {self.scale_factor}")
490
+ print("### USING STD-RESCALING ###")
491
+
492
+ def register_schedule(self,
493
+ given_betas=None, beta_schedule="linear", timesteps=1000,
494
+ linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
495
+ super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
496
+
497
+ self.shorten_cond_schedule = self.num_timesteps_cond > 1
498
+ if self.shorten_cond_schedule:
499
+ self.make_cond_schedule()
500
+
501
+ def instantiate_first_stage(self, config):
502
+ model = instantiate_from_config(config)
503
+ self.first_stage_model = model.eval()
504
+ self.first_stage_model.train = disabled_train
505
+ for param in self.first_stage_model.parameters():
506
+ param.requires_grad = False
507
+
508
+ def instantiate_cond_stage(self, config):
509
+ if not self.cond_stage_trainable:
510
+ if config == "__is_first_stage__":
511
+ print("Using first stage also as cond stage.")
512
+ self.cond_stage_model = self.first_stage_model
513
+ elif config == "__is_unconditional__":
514
+ print(f"Training {self.__class__.__name__} as an unconditional model.")
515
+ self.cond_stage_model = None
516
+ # self.be_unconditional = True
517
+ else:
518
+ model = instantiate_from_config(config)
519
+ self.cond_stage_model = model.eval()
520
+ self.cond_stage_model.train = disabled_train
521
+ for param in self.cond_stage_model.parameters():
522
+ param.requires_grad = False
523
+ else:
524
+ assert config != '__is_first_stage__'
525
+ assert config != '__is_unconditional__'
526
+ model = instantiate_from_config(config)
527
+ self.cond_stage_model = model
528
+
529
+ def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
530
+ denoise_row = []
531
+ for zd in tqdm(samples, desc=desc):
532
+ denoise_row.append(self.decode_first_stage(zd.to(self.device),
533
+ force_not_quantize=force_no_decoder_quantization))
534
+ n_imgs_per_row = len(denoise_row)
535
+ denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
536
+ denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
537
+ denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
538
+ denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
539
+ return denoise_grid
540
+
541
+ def get_first_stage_encoding(self, encoder_posterior):
542
+ if isinstance(encoder_posterior, DiagonalGaussianDistribution):
543
+ z = encoder_posterior.sample()
544
+ elif isinstance(encoder_posterior, torch.Tensor):
545
+ z = encoder_posterior
546
+ else:
547
+ raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
548
+ return self.scale_factor * z
549
+
550
+ def get_learned_conditioning(self, c):
551
+ if self.cond_stage_forward is None:
552
+ if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
553
+ c = self.cond_stage_model.encode(c)
554
+ if isinstance(c, DiagonalGaussianDistribution):
555
+ c = c.mode()
556
+ else:
557
+ c = self.cond_stage_model(c)
558
+ else:
559
+ assert hasattr(self.cond_stage_model, self.cond_stage_forward)
560
+ c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
561
+ return c
562
+
563
+ def meshgrid(self, h, w):
564
+ y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
565
+ x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
566
+
567
+ arr = torch.cat([y, x], dim=-1)
568
+ return arr
569
+
570
+ def delta_border(self, h, w):
571
+ """
572
+ :param h: height
573
+ :param w: width
574
+ :return: normalized distance to image border,
575
+ wtith min distance = 0 at border and max dist = 0.5 at image center
576
+ """
577
+ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
578
+ arr = self.meshgrid(h, w) / lower_right_corner
579
+ dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
580
+ dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
581
+ edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
582
+ return edge_dist
583
+
584
+ def get_weighting(self, h, w, Ly, Lx, device):
585
+ weighting = self.delta_border(h, w)
586
+ weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
587
+ self.split_input_params["clip_max_weight"], )
588
+ weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
589
+
590
+ if self.split_input_params["tie_braker"]:
591
+ L_weighting = self.delta_border(Ly, Lx)
592
+ L_weighting = torch.clip(L_weighting,
593
+ self.split_input_params["clip_min_tie_weight"],
594
+ self.split_input_params["clip_max_tie_weight"])
595
+
596
+ L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
597
+ weighting = weighting * L_weighting
598
+ return weighting
599
+
600
+ def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
601
+ """
602
+ :param x: img of size (bs, c, h, w)
603
+ :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
604
+ """
605
+ bs, nc, h, w = x.shape
606
+
607
+ # number of crops in image
608
+ Ly = (h - kernel_size[0]) // stride[0] + 1
609
+ Lx = (w - kernel_size[1]) // stride[1] + 1
610
+
611
+ if uf == 1 and df == 1:
612
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
613
+ unfold = torch.nn.Unfold(**fold_params)
614
+
615
+ fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
616
+
617
+ weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
618
+ normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
619
+ weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
620
+
621
+ elif uf > 1 and df == 1:
622
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
623
+ unfold = torch.nn.Unfold(**fold_params)
624
+
625
+ fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
626
+ dilation=1, padding=0,
627
+ stride=(stride[0] * uf, stride[1] * uf))
628
+ fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
629
+
630
+ weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
631
+ normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
632
+ weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
633
+
634
+ elif df > 1 and uf == 1:
635
+ fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
636
+ unfold = torch.nn.Unfold(**fold_params)
637
+
638
+ fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
639
+ dilation=1, padding=0,
640
+ stride=(stride[0] // df, stride[1] // df))
641
+ fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
642
+
643
+ weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
644
+ normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
645
+ weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
646
+
647
+ else:
648
+ raise NotImplementedError
649
+
650
+ return fold, unfold, normalization, weighting
651
+
652
+ @torch.no_grad()
653
+ def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
654
+ cond_key=None, return_original_cond=False, bs=None):
655
+ x = super().get_input(batch, k)
656
+ if bs is not None:
657
+ x = x[:bs]
658
+ x = x.to(self.device)
659
+ encoder_posterior = self.encode_first_stage(x)
660
+ z = self.get_first_stage_encoding(encoder_posterior).detach()
661
+
662
+ if self.model.conditioning_key is not None:
663
+ if cond_key is None:
664
+ cond_key = self.cond_stage_key
665
+ if cond_key != self.first_stage_key:
666
+ if cond_key in ['caption', 'coordinates_bbox']:
667
+ xc = batch[cond_key]
668
+ elif cond_key == 'class_label':
669
+ xc = batch
670
+ else:
671
+ xc = super().get_input(batch, cond_key).to(self.device)
672
+ else:
673
+ xc = x
674
+ if not self.cond_stage_trainable or force_c_encode:
675
+ if isinstance(xc, dict) or isinstance(xc, list):
676
+ # import pudb; pudb.set_trace()
677
+ c = self.get_learned_conditioning(xc)
678
+ else:
679
+ c = self.get_learned_conditioning(xc.to(self.device))
680
+ else:
681
+ c = xc
682
+ if bs is not None:
683
+ c = c[:bs]
684
+
685
+ if self.use_positional_encodings:
686
+ pos_x, pos_y = self.compute_latent_shifts(batch)
687
+ ckey = __conditioning_keys__[self.model.conditioning_key]
688
+ c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
689
+
690
+ else:
691
+ c = None
692
+ xc = None
693
+ if self.use_positional_encodings:
694
+ pos_x, pos_y = self.compute_latent_shifts(batch)
695
+ c = {'pos_x': pos_x, 'pos_y': pos_y}
696
+ out = [z, c]
697
+ if return_first_stage_outputs:
698
+ xrec = self.decode_first_stage(z)
699
+ out.extend([x, xrec])
700
+ if return_original_cond:
701
+ out.append(xc)
702
+ return out
703
+
704
+ @torch.no_grad()
705
+ def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
706
+ if predict_cids:
707
+ if z.dim() == 4:
708
+ z = torch.argmax(z.exp(), dim=1).long()
709
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
710
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
711
+
712
+ z = 1. / self.scale_factor * z
713
+
714
+ if hasattr(self, "split_input_params"):
715
+ if self.split_input_params["patch_distributed_vq"]:
716
+ ks = self.split_input_params["ks"] # eg. (128, 128)
717
+ stride = self.split_input_params["stride"] # eg. (64, 64)
718
+ uf = self.split_input_params["vqf"]
719
+ bs, nc, h, w = z.shape
720
+ if ks[0] > h or ks[1] > w:
721
+ ks = (min(ks[0], h), min(ks[1], w))
722
+ print("reducing Kernel")
723
+
724
+ if stride[0] > h or stride[1] > w:
725
+ stride = (min(stride[0], h), min(stride[1], w))
726
+ print("reducing stride")
727
+
728
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
729
+
730
+ z = unfold(z) # (bn, nc * prod(**ks), L)
731
+ # 1. Reshape to img shape
732
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
733
+
734
+ # 2. apply model loop over last dim
735
+ if isinstance(self.first_stage_model, VQModelInterface):
736
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
737
+ force_not_quantize=predict_cids or force_not_quantize)
738
+ for i in range(z.shape[-1])]
739
+ else:
740
+
741
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
742
+ for i in range(z.shape[-1])]
743
+
744
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
745
+ o = o * weighting
746
+ # Reverse 1. reshape to img shape
747
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
748
+ # stitch crops together
749
+ decoded = fold(o)
750
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
751
+ return decoded
752
+ else:
753
+ if isinstance(self.first_stage_model, VQModelInterface):
754
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
755
+ else:
756
+ return self.first_stage_model.decode(z)
757
+
758
+ else:
759
+ if isinstance(self.first_stage_model, VQModelInterface):
760
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
761
+ else:
762
+ return self.first_stage_model.decode(z)
763
+
764
+ # same as above but without decorator
765
+ def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
766
+ if predict_cids:
767
+ if z.dim() == 4:
768
+ z = torch.argmax(z.exp(), dim=1).long()
769
+ z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
770
+ z = rearrange(z, 'b h w c -> b c h w').contiguous()
771
+
772
+ z = 1. / self.scale_factor * z
773
+
774
+ if hasattr(self, "split_input_params"):
775
+ if self.split_input_params["patch_distributed_vq"]:
776
+ ks = self.split_input_params["ks"] # eg. (128, 128)
777
+ stride = self.split_input_params["stride"] # eg. (64, 64)
778
+ uf = self.split_input_params["vqf"]
779
+ bs, nc, h, w = z.shape
780
+ if ks[0] > h or ks[1] > w:
781
+ ks = (min(ks[0], h), min(ks[1], w))
782
+ print("reducing Kernel")
783
+
784
+ if stride[0] > h or stride[1] > w:
785
+ stride = (min(stride[0], h), min(stride[1], w))
786
+ print("reducing stride")
787
+
788
+ fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
789
+
790
+ z = unfold(z) # (bn, nc * prod(**ks), L)
791
+ # 1. Reshape to img shape
792
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
793
+
794
+ # 2. apply model loop over last dim
795
+ if isinstance(self.first_stage_model, VQModelInterface):
796
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
797
+ force_not_quantize=predict_cids or force_not_quantize)
798
+ for i in range(z.shape[-1])]
799
+ else:
800
+
801
+ output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
802
+ for i in range(z.shape[-1])]
803
+
804
+ o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
805
+ o = o * weighting
806
+ # Reverse 1. reshape to img shape
807
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
808
+ # stitch crops together
809
+ decoded = fold(o)
810
+ decoded = decoded / normalization # norm is shape (1, 1, h, w)
811
+ return decoded
812
+ else:
813
+ if isinstance(self.first_stage_model, VQModelInterface):
814
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
815
+ else:
816
+ return self.first_stage_model.decode(z)
817
+
818
+ else:
819
+ if isinstance(self.first_stage_model, VQModelInterface):
820
+ return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
821
+ else:
822
+ return self.first_stage_model.decode(z)
823
+
824
+ @torch.no_grad()
825
+ def encode_first_stage(self, x):
826
+ if hasattr(self, "split_input_params"):
827
+ if self.split_input_params["patch_distributed_vq"]:
828
+ ks = self.split_input_params["ks"] # eg. (128, 128)
829
+ stride = self.split_input_params["stride"] # eg. (64, 64)
830
+ df = self.split_input_params["vqf"]
831
+ self.split_input_params['original_image_size'] = x.shape[-2:]
832
+ bs, nc, h, w = x.shape
833
+ if ks[0] > h or ks[1] > w:
834
+ ks = (min(ks[0], h), min(ks[1], w))
835
+ print("reducing Kernel")
836
+
837
+ if stride[0] > h or stride[1] > w:
838
+ stride = (min(stride[0], h), min(stride[1], w))
839
+ print("reducing stride")
840
+
841
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
842
+ z = unfold(x) # (bn, nc * prod(**ks), L)
843
+ # Reshape to img shape
844
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
845
+
846
+ output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
847
+ for i in range(z.shape[-1])]
848
+
849
+ o = torch.stack(output_list, axis=-1)
850
+ o = o * weighting
851
+
852
+ # Reverse reshape to img shape
853
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
854
+ # stitch crops together
855
+ decoded = fold(o)
856
+ decoded = decoded / normalization
857
+ return decoded
858
+
859
+ else:
860
+ return self.first_stage_model.encode(x)
861
+ else:
862
+ return self.first_stage_model.encode(x)
863
+
864
+ def shared_step(self, batch, **kwargs):
865
+ x, c = self.get_input(batch, self.first_stage_key)
866
+ loss = self(x, c)
867
+ return loss
868
+
869
+ def forward(self, x, c, *args, **kwargs):
870
+ t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
871
+ if self.model.conditioning_key is not None:
872
+ assert c is not None
873
+ if self.cond_stage_trainable:
874
+ c = self.get_learned_conditioning(c)
875
+ if self.shorten_cond_schedule: # TODO: drop this option
876
+ tc = self.cond_ids[t].to(self.device)
877
+ c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
878
+ return self.p_losses(x, c, t, *args, **kwargs)
879
+
880
+ def apply_model(self, x_noisy, t, cond, return_ids=False):
881
+
882
+ if isinstance(cond, dict):
883
+ # hybrid case, cond is exptected to be a dict
884
+ pass
885
+ else:
886
+ if not isinstance(cond, list):
887
+ cond = [cond]
888
+ key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
889
+ cond = {key: cond}
890
+
891
+ if hasattr(self, "split_input_params"):
892
+ assert len(cond) == 1 # todo can only deal with one conditioning atm
893
+ assert not return_ids
894
+ ks = self.split_input_params["ks"] # eg. (128, 128)
895
+ stride = self.split_input_params["stride"] # eg. (64, 64)
896
+
897
+ h, w = x_noisy.shape[-2:]
898
+
899
+ fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
900
+
901
+ z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
902
+ # Reshape to img shape
903
+ z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
904
+ z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
905
+
906
+ if self.cond_stage_key in ["image", "LR_image", "segmentation",
907
+ 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
908
+ c_key = next(iter(cond.keys())) # get key
909
+ c = next(iter(cond.values())) # get value
910
+ assert (len(c) == 1) # todo extend to list with more than one elem
911
+ c = c[0] # get element
912
+
913
+ c = unfold(c)
914
+ c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
915
+
916
+ cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
917
+
918
+ elif self.cond_stage_key == 'coordinates_bbox':
919
+ assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
920
+
921
+ # assuming padding of unfold is always 0 and its dilation is always 1
922
+ n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
923
+ full_img_h, full_img_w = self.split_input_params['original_image_size']
924
+ # as we are operating on latents, we need the factor from the original image size to the
925
+ # spatial latent size to properly rescale the crops for regenerating the bbox annotations
926
+ num_downs = self.first_stage_model.encoder.num_resolutions - 1
927
+ rescale_latent = 2 ** (num_downs)
928
+
929
+ # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
930
+ # need to rescale the tl patch coordinates to be in between (0,1)
931
+ tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
932
+ rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
933
+ for patch_nr in range(z.shape[-1])]
934
+
935
+ # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
936
+ patch_limits = [(x_tl, y_tl,
937
+ rescale_latent * ks[0] / full_img_w,
938
+ rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
939
+ # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
940
+
941
+ # tokenize crop coordinates for the bounding boxes of the respective patches
942
+ patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
943
+ for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
944
+ print(patch_limits_tknzd[0].shape)
945
+ # cut tknzd crop position from conditioning
946
+ assert isinstance(cond, dict), 'cond must be dict to be fed into model'
947
+ cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
948
+ print(cut_cond.shape)
949
+
950
+ adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
951
+ adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
952
+ print(adapted_cond.shape)
953
+ adapted_cond = self.get_learned_conditioning(adapted_cond)
954
+ print(adapted_cond.shape)
955
+ adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
956
+ print(adapted_cond.shape)
957
+
958
+ cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
959
+
960
+ else:
961
+ cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
962
+
963
+ # apply model by loop over crops
964
+ output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
965
+ assert not isinstance(output_list[0],
966
+ tuple) # todo cant deal with multiple model outputs check this never happens
967
+
968
+ o = torch.stack(output_list, axis=-1)
969
+ o = o * weighting
970
+ # Reverse reshape to img shape
971
+ o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
972
+ # stitch crops together
973
+ x_recon = fold(o) / normalization
974
+
975
+ else:
976
+ x_recon = self.model(x_noisy, t, **cond)
977
+
978
+ if isinstance(x_recon, tuple) and not return_ids:
979
+ return x_recon[0]
980
+ else:
981
+ return x_recon
982
+
983
+ def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
984
+ return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
985
+ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
986
+
987
+ def _prior_bpd(self, x_start):
988
+ """
989
+ Get the prior KL term for the variational lower-bound, measured in
990
+ bits-per-dim.
991
+ This term can't be optimized, as it only depends on the encoder.
992
+ :param x_start: the [N x C x ...] tensor of inputs.
993
+ :return: a batch of [N] KL values (in bits), one per batch element.
994
+ """
995
+ batch_size = x_start.shape[0]
996
+ t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
997
+ qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
998
+ kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
999
+ return mean_flat(kl_prior) / np.log(2.0)
1000
+
1001
+ def p_losses(self, x_start, cond, t, noise=None):
1002
+ noise = default(noise, lambda: torch.randn_like(x_start))
1003
+ x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
1004
+ model_output = self.apply_model(x_noisy, t, cond)
1005
+
1006
+ loss_dict = {}
1007
+ prefix = 'train' if self.training else 'val'
1008
+
1009
+ if self.parameterization == "x0":
1010
+ target = x_start
1011
+ elif self.parameterization == "eps":
1012
+ target = noise
1013
+ else:
1014
+ raise NotImplementedError()
1015
+
1016
+ loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
1017
+ loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
1018
+
1019
+ logvar_t = self.logvar[t].to(self.device)
1020
+ loss = loss_simple / torch.exp(logvar_t) + logvar_t
1021
+ # loss = loss_simple / torch.exp(self.logvar) + self.logvar
1022
+ if self.learn_logvar:
1023
+ loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
1024
+ loss_dict.update({'logvar': self.logvar.data.mean()})
1025
+
1026
+ loss = self.l_simple_weight * loss.mean()
1027
+
1028
+ loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
1029
+ loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
1030
+ loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
1031
+ loss += (self.original_elbo_weight * loss_vlb)
1032
+ loss_dict.update({f'{prefix}/loss': loss})
1033
+
1034
+ return loss, loss_dict
1035
+
1036
+ def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
1037
+ return_x0=False, score_corrector=None, corrector_kwargs=None):
1038
+ t_in = t
1039
+ model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
1040
+
1041
+ if score_corrector is not None:
1042
+ assert self.parameterization == "eps"
1043
+ model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
1044
+
1045
+ if return_codebook_ids:
1046
+ model_out, logits = model_out
1047
+
1048
+ if self.parameterization == "eps":
1049
+ x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
1050
+ elif self.parameterization == "x0":
1051
+ x_recon = model_out
1052
+ else:
1053
+ raise NotImplementedError()
1054
+
1055
+ if clip_denoised:
1056
+ x_recon.clamp_(-1., 1.)
1057
+ if quantize_denoised:
1058
+ x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
1059
+ model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
1060
+ if return_codebook_ids:
1061
+ return model_mean, posterior_variance, posterior_log_variance, logits
1062
+ elif return_x0:
1063
+ return model_mean, posterior_variance, posterior_log_variance, x_recon
1064
+ else:
1065
+ return model_mean, posterior_variance, posterior_log_variance
1066
+
1067
+ @torch.no_grad()
1068
+ def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
1069
+ return_codebook_ids=False, quantize_denoised=False, return_x0=False,
1070
+ temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
1071
+ b, *_, device = *x.shape, x.device
1072
+ outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
1073
+ return_codebook_ids=return_codebook_ids,
1074
+ quantize_denoised=quantize_denoised,
1075
+ return_x0=return_x0,
1076
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1077
+ if return_codebook_ids:
1078
+ raise DeprecationWarning("Support dropped.")
1079
+ model_mean, _, model_log_variance, logits = outputs
1080
+ elif return_x0:
1081
+ model_mean, _, model_log_variance, x0 = outputs
1082
+ else:
1083
+ model_mean, _, model_log_variance = outputs
1084
+
1085
+ noise = noise_like(x.shape, device, repeat_noise) * temperature
1086
+ if noise_dropout > 0.:
1087
+ noise = torch.nn.functional.dropout(noise, p=noise_dropout)
1088
+ # no noise when t == 0
1089
+ nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
1090
+
1091
+ if return_codebook_ids:
1092
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
1093
+ if return_x0:
1094
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
1095
+ else:
1096
+ return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
1097
+
1098
+ @torch.no_grad()
1099
+ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
1100
+ img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
1101
+ score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
1102
+ log_every_t=None):
1103
+ if not log_every_t:
1104
+ log_every_t = self.log_every_t
1105
+ timesteps = self.num_timesteps
1106
+ if batch_size is not None:
1107
+ b = batch_size if batch_size is not None else shape[0]
1108
+ shape = [batch_size] + list(shape)
1109
+ else:
1110
+ b = batch_size = shape[0]
1111
+ if x_T is None:
1112
+ img = torch.randn(shape, device=self.device)
1113
+ else:
1114
+ img = x_T
1115
+ intermediates = []
1116
+ if cond is not None:
1117
+ if isinstance(cond, dict):
1118
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1119
+ [x[:batch_size] for x in cond[key]] for key in cond}
1120
+ else:
1121
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1122
+
1123
+ if start_T is not None:
1124
+ timesteps = min(timesteps, start_T)
1125
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
1126
+ total=timesteps) if verbose else reversed(
1127
+ range(0, timesteps))
1128
+ if type(temperature) == float:
1129
+ temperature = [temperature] * timesteps
1130
+
1131
+ for i in iterator:
1132
+ ts = torch.full((b,), i, device=self.device, dtype=torch.long)
1133
+ if self.shorten_cond_schedule:
1134
+ assert self.model.conditioning_key != 'hybrid'
1135
+ tc = self.cond_ids[ts].to(cond.device)
1136
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1137
+
1138
+ img, x0_partial = self.p_sample(img, cond, ts,
1139
+ clip_denoised=self.clip_denoised,
1140
+ quantize_denoised=quantize_denoised, return_x0=True,
1141
+ temperature=temperature[i], noise_dropout=noise_dropout,
1142
+ score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
1143
+ if mask is not None:
1144
+ assert x0 is not None
1145
+ img_orig = self.q_sample(x0, ts)
1146
+ img = img_orig * mask + (1. - mask) * img
1147
+
1148
+ if i % log_every_t == 0 or i == timesteps - 1:
1149
+ intermediates.append(x0_partial)
1150
+ if callback:
1151
+ callback(i)
1152
+ if img_callback:
1153
+ img_callback(img, i)
1154
+ return img, intermediates
1155
+
1156
+ @torch.no_grad()
1157
+ def p_sample_loop(self, cond, shape, return_intermediates=False,
1158
+ x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
1159
+ mask=None, x0=None, img_callback=None, start_T=None,
1160
+ log_every_t=None):
1161
+
1162
+ if not log_every_t:
1163
+ log_every_t = self.log_every_t
1164
+ device = self.betas.device
1165
+ b = shape[0]
1166
+ if x_T is None:
1167
+ img = torch.randn(shape, device=device)
1168
+ else:
1169
+ img = x_T
1170
+
1171
+ intermediates = [img]
1172
+ if timesteps is None:
1173
+ timesteps = self.num_timesteps
1174
+
1175
+ if start_T is not None:
1176
+ timesteps = min(timesteps, start_T)
1177
+ iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
1178
+ range(0, timesteps))
1179
+
1180
+ if mask is not None:
1181
+ assert x0 is not None
1182
+ assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
1183
+
1184
+ for i in iterator:
1185
+ ts = torch.full((b,), i, device=device, dtype=torch.long)
1186
+ if self.shorten_cond_schedule:
1187
+ assert self.model.conditioning_key != 'hybrid'
1188
+ tc = self.cond_ids[ts].to(cond.device)
1189
+ cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
1190
+
1191
+ img = self.p_sample(img, cond, ts,
1192
+ clip_denoised=self.clip_denoised,
1193
+ quantize_denoised=quantize_denoised)
1194
+ if mask is not None:
1195
+ img_orig = self.q_sample(x0, ts)
1196
+ img = img_orig * mask + (1. - mask) * img
1197
+
1198
+ if i % log_every_t == 0 or i == timesteps - 1:
1199
+ intermediates.append(img)
1200
+ if callback:
1201
+ callback(i)
1202
+ if img_callback:
1203
+ img_callback(img, i)
1204
+
1205
+ if return_intermediates:
1206
+ return img, intermediates
1207
+ return img
1208
+
1209
+ @torch.no_grad()
1210
+ def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
1211
+ verbose=True, timesteps=None, quantize_denoised=False,
1212
+ mask=None, x0=None, shape=None,**kwargs):
1213
+ if shape is None:
1214
+ shape = (batch_size, self.channels, self.image_size, self.image_size)
1215
+ if cond is not None:
1216
+ if isinstance(cond, dict):
1217
+ cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
1218
+ [x[:batch_size] for x in cond[key]] for key in cond}
1219
+ else:
1220
+ cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
1221
+ return self.p_sample_loop(cond,
1222
+ shape,
1223
+ return_intermediates=return_intermediates, x_T=x_T,
1224
+ verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
1225
+ mask=mask, x0=x0)
1226
+
1227
+ @torch.no_grad()
1228
+ def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
1229
+
1230
+ if ddim:
1231
+ ddim_sampler = DDIMSampler(self)
1232
+ shape = (self.channels, self.image_size, self.image_size)
1233
+ samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
1234
+ shape,cond,verbose=False,**kwargs)
1235
+
1236
+ else:
1237
+ samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
1238
+ return_intermediates=True,**kwargs)
1239
+
1240
+ return samples, intermediates
1241
+
1242
+
1243
+ @torch.no_grad()
1244
+ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
1245
+ quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
1246
+ plot_diffusion_rows=True, **kwargs):
1247
+
1248
+ use_ddim = ddim_steps is not None
1249
+
1250
+ log = {}
1251
+ z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
1252
+ return_first_stage_outputs=True,
1253
+ force_c_encode=True,
1254
+ return_original_cond=True,
1255
+ bs=N)
1256
+ N = min(x.shape[0], N)
1257
+ n_row = min(x.shape[0], n_row)
1258
+ log["inputs"] = x
1259
+ log["reconstruction"] = xrec
1260
+ if self.model.conditioning_key is not None:
1261
+ if hasattr(self.cond_stage_model, "decode"):
1262
+ xc = self.cond_stage_model.decode(c)
1263
+ log["conditioning"] = xc
1264
+ elif self.cond_stage_key in ["caption"]:
1265
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
1266
+ log["conditioning"] = xc
1267
+ elif self.cond_stage_key == 'class_label':
1268
+ xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
1269
+ log['conditioning'] = xc
1270
+ elif isimage(xc):
1271
+ log["conditioning"] = xc
1272
+ if ismap(xc):
1273
+ log["original_conditioning"] = self.to_rgb(xc)
1274
+
1275
+ if plot_diffusion_rows:
1276
+ # get diffusion row
1277
+ diffusion_row = []
1278
+ z_start = z[:n_row]
1279
+ for t in range(self.num_timesteps):
1280
+ if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
1281
+ t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
1282
+ t = t.to(self.device).long()
1283
+ noise = torch.randn_like(z_start)
1284
+ z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
1285
+ diffusion_row.append(self.decode_first_stage(z_noisy))
1286
+
1287
+ diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
1288
+ diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
1289
+ diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
1290
+ diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
1291
+ log["diffusion_row"] = diffusion_grid
1292
+
1293
+ if sample:
1294
+ # get denoise row
1295
+ with self.ema_scope("Plotting"):
1296
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1297
+ ddim_steps=ddim_steps,eta=ddim_eta)
1298
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
1299
+ x_samples = self.decode_first_stage(samples)
1300
+ log["samples"] = x_samples
1301
+ if plot_denoise_rows:
1302
+ denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
1303
+ log["denoise_row"] = denoise_grid
1304
+
1305
+ if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
1306
+ self.first_stage_model, IdentityFirstStage):
1307
+ # also display when quantizing x0 while sampling
1308
+ with self.ema_scope("Plotting Quantized Denoised"):
1309
+ samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
1310
+ ddim_steps=ddim_steps,eta=ddim_eta,
1311
+ quantize_denoised=True)
1312
+ # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
1313
+ # quantize_denoised=True)
1314
+ x_samples = self.decode_first_stage(samples.to(self.device))
1315
+ log["samples_x0_quantized"] = x_samples
1316
+
1317
+ if inpaint:
1318
+ # make a simple center square
1319
+ h, w = z.shape[2], z.shape[3]
1320
+ mask = torch.ones(N, h, w).to(self.device)
1321
+ # zeros will be filled in
1322
+ mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
1323
+ mask = mask[:, None, ...]
1324
+ with self.ema_scope("Plotting Inpaint"):
1325
+
1326
+ samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
1327
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1328
+ x_samples = self.decode_first_stage(samples.to(self.device))
1329
+ log["samples_inpainting"] = x_samples
1330
+ log["mask"] = mask
1331
+
1332
+ # outpaint
1333
+ with self.ema_scope("Plotting Outpaint"):
1334
+ samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
1335
+ ddim_steps=ddim_steps, x0=z[:N], mask=mask)
1336
+ x_samples = self.decode_first_stage(samples.to(self.device))
1337
+ log["samples_outpainting"] = x_samples
1338
+
1339
+ if plot_progressive_rows:
1340
+ with self.ema_scope("Plotting Progressives"):
1341
+ img, progressives = self.progressive_denoising(c,
1342
+ shape=(self.channels, self.image_size, self.image_size),
1343
+ batch_size=N)
1344
+ prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
1345
+ log["progressive_row"] = prog_row
1346
+
1347
+ if return_keys:
1348
+ if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
1349
+ return log
1350
+ else:
1351
+ return {key: log[key] for key in return_keys}
1352
+ return log
1353
+
1354
+ def configure_optimizers(self):
1355
+ lr = self.learning_rate
1356
+ params = list(self.model.parameters())
1357
+ if self.cond_stage_trainable:
1358
+ print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
1359
+ params = params + list(self.cond_stage_model.parameters())
1360
+ if self.learn_logvar:
1361
+ print('Diffusion model optimizing logvar')
1362
+ params.append(self.logvar)
1363
+ opt = torch.optim.AdamW(params, lr=lr)
1364
+ if self.use_scheduler:
1365
+ assert 'target' in self.scheduler_config
1366
+ scheduler = instantiate_from_config(self.scheduler_config)
1367
+
1368
+ print("Setting up LambdaLR scheduler...")
1369
+ scheduler = [
1370
+ {
1371
+ 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
1372
+ 'interval': 'step',
1373
+ 'frequency': 1
1374
+ }]
1375
+ return [opt], scheduler
1376
+ return opt
1377
+
1378
+ @torch.no_grad()
1379
+ def to_rgb(self, x):
1380
+ x = x.float()
1381
+ if not hasattr(self, "colorize"):
1382
+ self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
1383
+ x = nn.functional.conv2d(x, weight=self.colorize)
1384
+ x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
1385
+ return x
1386
+
1387
+
1388
+ class DiffusionWrapperV1(pl.LightningModule):
1389
+ def __init__(self, diff_model_config, conditioning_key):
1390
+ super().__init__()
1391
+ self.diffusion_model = instantiate_from_config(diff_model_config)
1392
+ self.conditioning_key = conditioning_key
1393
+ assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
1394
+
1395
+ def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
1396
+ if self.conditioning_key is None:
1397
+ out = self.diffusion_model(x, t)
1398
+ elif self.conditioning_key == 'concat':
1399
+ xc = torch.cat([x] + c_concat, dim=1)
1400
+ out = self.diffusion_model(xc, t)
1401
+ elif self.conditioning_key == 'crossattn':
1402
+ cc = torch.cat(c_crossattn, 1)
1403
+ out = self.diffusion_model(x, t, context=cc)
1404
+ elif self.conditioning_key == 'hybrid':
1405
+ xc = torch.cat([x] + c_concat, dim=1)
1406
+ cc = torch.cat(c_crossattn, 1)
1407
+ out = self.diffusion_model(xc, t, context=cc)
1408
+ elif self.conditioning_key == 'adm':
1409
+ cc = c_crossattn[0]
1410
+ out = self.diffusion_model(x, t, y=cc)
1411
+ else:
1412
+ raise NotImplementedError()
1413
+
1414
+ return out
1415
+
1416
+
1417
+ class Layout2ImgDiffusionV1(LatentDiffusionV1):
1418
+ # TODO: move all layout-specific hacks to this class
1419
+ def __init__(self, cond_stage_key, *args, **kwargs):
1420
+ assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
1421
+ super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
1422
+
1423
+ def log_images(self, batch, N=8, *args, **kwargs):
1424
+ logs = super().log_images(*args, batch=batch, N=N, **kwargs)
1425
+
1426
+ key = 'train' if self.training else 'validation'
1427
+ dset = self.trainer.datamodule.datasets[key]
1428
+ mapper = dset.conditional_builders[self.cond_stage_key]
1429
+
1430
+ bbox_imgs = []
1431
+ map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
1432
+ for tknzd_bbox in batch[self.cond_stage_key][:N]:
1433
+ bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
1434
+ bbox_imgs.append(bboximg)
1435
+
1436
+ cond_img = torch.stack(bbox_imgs, dim=0)
1437
+ logs['bbox_image'] = cond_img
1438
+ return logs
1439
+
1440
+ ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
1441
+ ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
1442
+ ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
1443
+ ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
extensions-builtin/LDSR/vqvae_quantize.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
2
+ # where the license is as follows:
3
+ #
4
+ # Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
5
+ #
6
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
7
+ # of this software and associated documentation files (the "Software"), to deal
8
+ # in the Software without restriction, including without limitation the rights
9
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
+ # copies of the Software, and to permit persons to whom the Software is
11
+ # furnished to do so, subject to the following conditions:
12
+ #
13
+ # The above copyright notice and this permission notice shall be included in all
14
+ # copies or substantial portions of the Software.
15
+ #
16
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19
+ # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
20
+ # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21
+ # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
22
+ # OR OTHER DEALINGS IN THE SOFTWARE./
23
+
24
+ import torch
25
+ import torch.nn as nn
26
+ import numpy as np
27
+ from einops import rearrange
28
+
29
+
30
+ class VectorQuantizer2(nn.Module):
31
+ """
32
+ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
33
+ avoids costly matrix multiplications and allows for post-hoc remapping of indices.
34
+ """
35
+
36
+ # NOTE: due to a bug the beta term was applied to the wrong term. for
37
+ # backwards compatibility we use the buggy version by default, but you can
38
+ # specify legacy=False to fix it.
39
+ def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
40
+ sane_index_shape=False, legacy=True):
41
+ super().__init__()
42
+ self.n_e = n_e
43
+ self.e_dim = e_dim
44
+ self.beta = beta
45
+ self.legacy = legacy
46
+
47
+ self.embedding = nn.Embedding(self.n_e, self.e_dim)
48
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
49
+
50
+ self.remap = remap
51
+ if self.remap is not None:
52
+ self.register_buffer("used", torch.tensor(np.load(self.remap)))
53
+ self.re_embed = self.used.shape[0]
54
+ self.unknown_index = unknown_index # "random" or "extra" or integer
55
+ if self.unknown_index == "extra":
56
+ self.unknown_index = self.re_embed
57
+ self.re_embed = self.re_embed + 1
58
+ print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
59
+ f"Using {self.unknown_index} for unknown indices.")
60
+ else:
61
+ self.re_embed = n_e
62
+
63
+ self.sane_index_shape = sane_index_shape
64
+
65
+ def remap_to_used(self, inds):
66
+ ishape = inds.shape
67
+ assert len(ishape) > 1
68
+ inds = inds.reshape(ishape[0], -1)
69
+ used = self.used.to(inds)
70
+ match = (inds[:, :, None] == used[None, None, ...]).long()
71
+ new = match.argmax(-1)
72
+ unknown = match.sum(2) < 1
73
+ if self.unknown_index == "random":
74
+ new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
75
+ else:
76
+ new[unknown] = self.unknown_index
77
+ return new.reshape(ishape)
78
+
79
+ def unmap_to_all(self, inds):
80
+ ishape = inds.shape
81
+ assert len(ishape) > 1
82
+ inds = inds.reshape(ishape[0], -1)
83
+ used = self.used.to(inds)
84
+ if self.re_embed > self.used.shape[0]: # extra token
85
+ inds[inds >= self.used.shape[0]] = 0 # simply set to zero
86
+ back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
87
+ return back.reshape(ishape)
88
+
89
+ def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
90
+ assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
91
+ assert rescale_logits is False, "Only for interface compatible with Gumbel"
92
+ assert return_logits is False, "Only for interface compatible with Gumbel"
93
+ # reshape z -> (batch, height, width, channel) and flatten
94
+ z = rearrange(z, 'b c h w -> b h w c').contiguous()
95
+ z_flattened = z.view(-1, self.e_dim)
96
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
97
+
98
+ d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
99
+ torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
100
+ torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
101
+
102
+ min_encoding_indices = torch.argmin(d, dim=1)
103
+ z_q = self.embedding(min_encoding_indices).view(z.shape)
104
+ perplexity = None
105
+ min_encodings = None
106
+
107
+ # compute loss for embedding
108
+ if not self.legacy:
109
+ loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
110
+ torch.mean((z_q - z.detach()) ** 2)
111
+ else:
112
+ loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
113
+ torch.mean((z_q - z.detach()) ** 2)
114
+
115
+ # preserve gradients
116
+ z_q = z + (z_q - z).detach()
117
+
118
+ # reshape back to match original input shape
119
+ z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
120
+
121
+ if self.remap is not None:
122
+ min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
123
+ min_encoding_indices = self.remap_to_used(min_encoding_indices)
124
+ min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
125
+
126
+ if self.sane_index_shape:
127
+ min_encoding_indices = min_encoding_indices.reshape(
128
+ z_q.shape[0], z_q.shape[2], z_q.shape[3])
129
+
130
+ return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
131
+
132
+ def get_codebook_entry(self, indices, shape):
133
+ # shape specifying (batch, height, width, channel)
134
+ if self.remap is not None:
135
+ indices = indices.reshape(shape[0], -1) # add batch axis
136
+ indices = self.unmap_to_all(indices)
137
+ indices = indices.reshape(-1) # flatten again
138
+
139
+ # get quantized latent vectors
140
+ z_q = self.embedding(indices)
141
+
142
+ if shape is not None:
143
+ z_q = z_q.view(shape)
144
+ # reshape back to match original input shape
145
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
146
+
147
+ return z_q
extensions-builtin/Lora/extra_networks_lora.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules import extra_networks, shared
2
+ import lora
3
+
4
+
5
+ class ExtraNetworkLora(extra_networks.ExtraNetwork):
6
+ def __init__(self):
7
+ super().__init__('lora')
8
+
9
+ def activate(self, p, params_list):
10
+ additional = shared.opts.sd_lora
11
+
12
+ if additional != "None" and additional in lora.available_loras and not any(x for x in params_list if x.items[0] == additional):
13
+ p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
14
+ params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
15
+
16
+ names = []
17
+ multipliers = []
18
+ for params in params_list:
19
+ assert params.items
20
+
21
+ names.append(params.items[0])
22
+ multipliers.append(float(params.items[1]) if len(params.items) > 1 else 1.0)
23
+
24
+ lora.load_loras(names, multipliers)
25
+
26
+ if shared.opts.lora_add_hashes_to_infotext:
27
+ lora_hashes = []
28
+ for item in lora.loaded_loras:
29
+ shorthash = item.lora_on_disk.shorthash
30
+ if not shorthash:
31
+ continue
32
+
33
+ alias = item.mentioned_name
34
+ if not alias:
35
+ continue
36
+
37
+ alias = alias.replace(":", "").replace(",", "")
38
+
39
+ lora_hashes.append(f"{alias}: {shorthash}")
40
+
41
+ if lora_hashes:
42
+ p.extra_generation_params["Lora hashes"] = ", ".join(lora_hashes)
43
+
44
+ def deactivate(self, p):
45
+ pass
extensions-builtin/Lora/lora.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import torch
4
+ from typing import Union
5
+
6
+ from modules import shared, devices, sd_models, errors, scripts, sd_hijack, hashes
7
+
8
+ metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}
9
+
10
+ re_digits = re.compile(r"\d+")
11
+ re_x_proj = re.compile(r"(.*)_([qkv]_proj)$")
12
+ re_compiled = {}
13
+
14
+ suffix_conversion = {
15
+ "attentions": {},
16
+ "resnets": {
17
+ "conv1": "in_layers_2",
18
+ "conv2": "out_layers_3",
19
+ "time_emb_proj": "emb_layers_1",
20
+ "conv_shortcut": "skip_connection",
21
+ }
22
+ }
23
+
24
+
25
+ def convert_diffusers_name_to_compvis(key, is_sd2):
26
+ def match(match_list, regex_text):
27
+ regex = re_compiled.get(regex_text)
28
+ if regex is None:
29
+ regex = re.compile(regex_text)
30
+ re_compiled[regex_text] = regex
31
+
32
+ r = re.match(regex, key)
33
+ if not r:
34
+ return False
35
+
36
+ match_list.clear()
37
+ match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
38
+ return True
39
+
40
+ m = []
41
+
42
+ if match(m, r"lora_unet_down_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
43
+ suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
44
+ return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
45
+
46
+ if match(m, r"lora_unet_mid_block_(attentions|resnets)_(\d+)_(.+)"):
47
+ suffix = suffix_conversion.get(m[0], {}).get(m[2], m[2])
48
+ return f"diffusion_model_middle_block_{1 if m[0] == 'attentions' else m[1] * 2}_{suffix}"
49
+
50
+ if match(m, r"lora_unet_up_blocks_(\d+)_(attentions|resnets)_(\d+)_(.+)"):
51
+ suffix = suffix_conversion.get(m[1], {}).get(m[3], m[3])
52
+ return f"diffusion_model_output_blocks_{m[0] * 3 + m[2]}_{1 if m[1] == 'attentions' else 0}_{suffix}"
53
+
54
+ if match(m, r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv"):
55
+ return f"diffusion_model_input_blocks_{3 + m[0] * 3}_0_op"
56
+
57
+ if match(m, r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv"):
58
+ return f"diffusion_model_output_blocks_{2 + m[0] * 3}_{2 if m[0]>0 else 1}_conv"
59
+
60
+ if match(m, r"lora_te_text_model_encoder_layers_(\d+)_(.+)"):
61
+ if is_sd2:
62
+ if 'mlp_fc1' in m[1]:
63
+ return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc1', 'mlp_c_fc')}"
64
+ elif 'mlp_fc2' in m[1]:
65
+ return f"model_transformer_resblocks_{m[0]}_{m[1].replace('mlp_fc2', 'mlp_c_proj')}"
66
+ else:
67
+ return f"model_transformer_resblocks_{m[0]}_{m[1].replace('self_attn', 'attn')}"
68
+
69
+ return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
70
+
71
+ return key
72
+
73
+
74
+ class LoraOnDisk:
75
+ def __init__(self, name, filename):
76
+ self.name = name
77
+ self.filename = filename
78
+ self.metadata = {}
79
+ self.is_safetensors = os.path.splitext(filename)[1].lower() == ".safetensors"
80
+
81
+ if self.is_safetensors:
82
+ try:
83
+ self.metadata = sd_models.read_metadata_from_safetensors(filename)
84
+ except Exception as e:
85
+ errors.display(e, f"reading lora {filename}")
86
+
87
+ if self.metadata:
88
+ m = {}
89
+ for k, v in sorted(self.metadata.items(), key=lambda x: metadata_tags_order.get(x[0], 999)):
90
+ m[k] = v
91
+
92
+ self.metadata = m
93
+
94
+ self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
95
+ self.alias = self.metadata.get('ss_output_name', self.name)
96
+
97
+ self.hash = None
98
+ self.shorthash = None
99
+ self.set_hash(
100
+ self.metadata.get('sshs_model_hash') or
101
+ hashes.sha256_from_cache(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or
102
+ ''
103
+ )
104
+
105
+ def set_hash(self, v):
106
+ self.hash = v
107
+ self.shorthash = self.hash[0:12]
108
+
109
+ if self.shorthash:
110
+ available_lora_hash_lookup[self.shorthash] = self
111
+
112
+ def read_hash(self):
113
+ if not self.hash:
114
+ self.set_hash(hashes.sha256(self.filename, "lora/" + self.name, use_addnet_hash=self.is_safetensors) or '')
115
+
116
+ def get_alias(self):
117
+ if shared.opts.lora_preferred_name == "Filename" or self.alias.lower() in forbidden_lora_aliases:
118
+ return self.name
119
+ else:
120
+ return self.alias
121
+
122
+
123
+ class LoraModule:
124
+ def __init__(self, name, lora_on_disk: LoraOnDisk):
125
+ self.name = name
126
+ self.lora_on_disk = lora_on_disk
127
+ self.multiplier = 1.0
128
+ self.modules = {}
129
+ self.mtime = None
130
+
131
+ self.mentioned_name = None
132
+ """the text that was used to add lora to prompt - can be either name or an alias"""
133
+
134
+
135
+ class LoraUpDownModule:
136
+ def __init__(self):
137
+ self.up = None
138
+ self.down = None
139
+ self.alpha = None
140
+
141
+
142
+ def assign_lora_names_to_compvis_modules(sd_model):
143
+ lora_layer_mapping = {}
144
+
145
+ for name, module in shared.sd_model.cond_stage_model.wrapped.named_modules():
146
+ lora_name = name.replace(".", "_")
147
+ lora_layer_mapping[lora_name] = module
148
+ module.lora_layer_name = lora_name
149
+
150
+ for name, module in shared.sd_model.model.named_modules():
151
+ lora_name = name.replace(".", "_")
152
+ lora_layer_mapping[lora_name] = module
153
+ module.lora_layer_name = lora_name
154
+
155
+ sd_model.lora_layer_mapping = lora_layer_mapping
156
+
157
+
158
+ def load_lora(name, lora_on_disk):
159
+ lora = LoraModule(name, lora_on_disk)
160
+ lora.mtime = os.path.getmtime(lora_on_disk.filename)
161
+
162
+ sd = sd_models.read_state_dict(lora_on_disk.filename)
163
+
164
+ # this should not be needed but is here as an emergency fix for an unknown error people are experiencing in 1.2.0
165
+ if not hasattr(shared.sd_model, 'lora_layer_mapping'):
166
+ assign_lora_names_to_compvis_modules(shared.sd_model)
167
+
168
+ keys_failed_to_match = {}
169
+ is_sd2 = 'model_transformer_resblocks' in shared.sd_model.lora_layer_mapping
170
+
171
+ for key_diffusers, weight in sd.items():
172
+ key_diffusers_without_lora_parts, lora_key = key_diffusers.split(".", 1)
173
+ key = convert_diffusers_name_to_compvis(key_diffusers_without_lora_parts, is_sd2)
174
+
175
+ sd_module = shared.sd_model.lora_layer_mapping.get(key, None)
176
+
177
+ if sd_module is None:
178
+ m = re_x_proj.match(key)
179
+ if m:
180
+ sd_module = shared.sd_model.lora_layer_mapping.get(m.group(1), None)
181
+
182
+ if sd_module is None:
183
+ keys_failed_to_match[key_diffusers] = key
184
+ continue
185
+
186
+ lora_module = lora.modules.get(key, None)
187
+ if lora_module is None:
188
+ lora_module = LoraUpDownModule()
189
+ lora.modules[key] = lora_module
190
+
191
+ if lora_key == "alpha":
192
+ lora_module.alpha = weight.item()
193
+ continue
194
+
195
+ if type(sd_module) == torch.nn.Linear:
196
+ module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
197
+ elif type(sd_module) == torch.nn.modules.linear.NonDynamicallyQuantizableLinear:
198
+ module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
199
+ elif type(sd_module) == torch.nn.MultiheadAttention:
200
+ module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
201
+ elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (1, 1):
202
+ module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
203
+ elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (3, 3):
204
+ module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (3, 3), bias=False)
205
+ else:
206
+ print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
207
+ continue
208
+ raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")
209
+
210
+ with torch.no_grad():
211
+ module.weight.copy_(weight)
212
+
213
+ module.to(device=devices.cpu, dtype=devices.dtype)
214
+
215
+ if lora_key == "lora_up.weight":
216
+ lora_module.up = module
217
+ elif lora_key == "lora_down.weight":
218
+ lora_module.down = module
219
+ else:
220
+ raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
221
+
222
+ if keys_failed_to_match:
223
+ print(f"Failed to match keys when loading Lora {lora_on_disk.filename}: {keys_failed_to_match}")
224
+
225
+ return lora
226
+
227
+
228
+ def load_loras(names, multipliers=None):
229
+ already_loaded = {}
230
+
231
+ for lora in loaded_loras:
232
+ if lora.name in names:
233
+ already_loaded[lora.name] = lora
234
+
235
+ loaded_loras.clear()
236
+
237
+ loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
238
+ if any(x is None for x in loras_on_disk):
239
+ list_available_loras()
240
+
241
+ loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
242
+
243
+ failed_to_load_loras = []
244
+
245
+ for i, name in enumerate(names):
246
+ lora = already_loaded.get(name, None)
247
+
248
+ lora_on_disk = loras_on_disk[i]
249
+
250
+ if lora_on_disk is not None:
251
+ if lora is None or os.path.getmtime(lora_on_disk.filename) > lora.mtime:
252
+ try:
253
+ lora = load_lora(name, lora_on_disk)
254
+ except Exception as e:
255
+ errors.display(e, f"loading Lora {lora_on_disk.filename}")
256
+ continue
257
+
258
+ lora.mentioned_name = name
259
+
260
+ lora_on_disk.read_hash()
261
+
262
+ if lora is None:
263
+ failed_to_load_loras.append(name)
264
+ print(f"Couldn't find Lora with name {name}")
265
+ continue
266
+
267
+ lora.multiplier = multipliers[i] if multipliers else 1.0
268
+ loaded_loras.append(lora)
269
+
270
+ if failed_to_load_loras:
271
+ sd_hijack.model_hijack.comments.append("Failed to find Loras: " + ", ".join(failed_to_load_loras))
272
+
273
+
274
+ def lora_calc_updown(lora, module, target):
275
+ with torch.no_grad():
276
+ up = module.up.weight.to(target.device, dtype=target.dtype)
277
+ down = module.down.weight.to(target.device, dtype=target.dtype)
278
+
279
+ if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1):
280
+ updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3)
281
+ elif up.shape[2:] == (3, 3) or down.shape[2:] == (3, 3):
282
+ updown = torch.nn.functional.conv2d(down.permute(1, 0, 2, 3), up).permute(1, 0, 2, 3)
283
+ else:
284
+ updown = up @ down
285
+
286
+ updown = updown * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
287
+
288
+ return updown
289
+
290
+
291
+ def lora_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
292
+ weights_backup = getattr(self, "lora_weights_backup", None)
293
+
294
+ if weights_backup is None:
295
+ return
296
+
297
+ if isinstance(self, torch.nn.MultiheadAttention):
298
+ self.in_proj_weight.copy_(weights_backup[0])
299
+ self.out_proj.weight.copy_(weights_backup[1])
300
+ else:
301
+ self.weight.copy_(weights_backup)
302
+
303
+
304
+ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
305
+ """
306
+ Applies the currently selected set of Loras to the weights of torch layer self.
307
+ If weights already have this particular set of loras applied, does nothing.
308
+ If not, restores orginal weights from backup and alters weights according to loras.
309
+ """
310
+
311
+ lora_layer_name = getattr(self, 'lora_layer_name', None)
312
+ if lora_layer_name is None:
313
+ return
314
+
315
+ current_names = getattr(self, "lora_current_names", ())
316
+ wanted_names = tuple((x.name, x.multiplier) for x in loaded_loras)
317
+
318
+ weights_backup = getattr(self, "lora_weights_backup", None)
319
+ if weights_backup is None:
320
+ if isinstance(self, torch.nn.MultiheadAttention):
321
+ weights_backup = (self.in_proj_weight.to(devices.cpu, copy=True), self.out_proj.weight.to(devices.cpu, copy=True))
322
+ else:
323
+ weights_backup = self.weight.to(devices.cpu, copy=True)
324
+
325
+ self.lora_weights_backup = weights_backup
326
+
327
+ if current_names != wanted_names:
328
+ lora_restore_weights_from_backup(self)
329
+
330
+ for lora in loaded_loras:
331
+ module = lora.modules.get(lora_layer_name, None)
332
+ if module is not None and hasattr(self, 'weight'):
333
+ self.weight += lora_calc_updown(lora, module, self.weight)
334
+ continue
335
+
336
+ module_q = lora.modules.get(lora_layer_name + "_q_proj", None)
337
+ module_k = lora.modules.get(lora_layer_name + "_k_proj", None)
338
+ module_v = lora.modules.get(lora_layer_name + "_v_proj", None)
339
+ module_out = lora.modules.get(lora_layer_name + "_out_proj", None)
340
+
341
+ if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
342
+ updown_q = lora_calc_updown(lora, module_q, self.in_proj_weight)
343
+ updown_k = lora_calc_updown(lora, module_k, self.in_proj_weight)
344
+ updown_v = lora_calc_updown(lora, module_v, self.in_proj_weight)
345
+ updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
346
+
347
+ self.in_proj_weight += updown_qkv
348
+ self.out_proj.weight += lora_calc_updown(lora, module_out, self.out_proj.weight)
349
+ continue
350
+
351
+ if module is None:
352
+ continue
353
+
354
+ print(f'failed to calculate lora weights for layer {lora_layer_name}')
355
+
356
+ self.lora_current_names = wanted_names
357
+
358
+
359
+ def lora_forward(module, input, original_forward):
360
+ """
361
+ Old way of applying Lora by executing operations during layer's forward.
362
+ Stacking many loras this way results in big performance degradation.
363
+ """
364
+
365
+ if len(loaded_loras) == 0:
366
+ return original_forward(module, input)
367
+
368
+ input = devices.cond_cast_unet(input)
369
+
370
+ lora_restore_weights_from_backup(module)
371
+ lora_reset_cached_weight(module)
372
+
373
+ res = original_forward(module, input)
374
+
375
+ lora_layer_name = getattr(module, 'lora_layer_name', None)
376
+ for lora in loaded_loras:
377
+ module = lora.modules.get(lora_layer_name, None)
378
+ if module is None:
379
+ continue
380
+
381
+ module.up.to(device=devices.device)
382
+ module.down.to(device=devices.device)
383
+
384
+ res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)
385
+
386
+ return res
387
+
388
+
389
+ def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
390
+ self.lora_current_names = ()
391
+ self.lora_weights_backup = None
392
+
393
+
394
+ def lora_Linear_forward(self, input):
395
+ if shared.opts.lora_functional:
396
+ return lora_forward(self, input, torch.nn.Linear_forward_before_lora)
397
+
398
+ lora_apply_weights(self)
399
+
400
+ return torch.nn.Linear_forward_before_lora(self, input)
401
+
402
+
403
+ def lora_Linear_load_state_dict(self, *args, **kwargs):
404
+ lora_reset_cached_weight(self)
405
+
406
+ return torch.nn.Linear_load_state_dict_before_lora(self, *args, **kwargs)
407
+
408
+
409
+ def lora_Conv2d_forward(self, input):
410
+ if shared.opts.lora_functional:
411
+ return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora)
412
+
413
+ lora_apply_weights(self)
414
+
415
+ return torch.nn.Conv2d_forward_before_lora(self, input)
416
+
417
+
418
+ def lora_Conv2d_load_state_dict(self, *args, **kwargs):
419
+ lora_reset_cached_weight(self)
420
+
421
+ return torch.nn.Conv2d_load_state_dict_before_lora(self, *args, **kwargs)
422
+
423
+
424
+ def lora_MultiheadAttention_forward(self, *args, **kwargs):
425
+ lora_apply_weights(self)
426
+
427
+ return torch.nn.MultiheadAttention_forward_before_lora(self, *args, **kwargs)
428
+
429
+
430
+ def lora_MultiheadAttention_load_state_dict(self, *args, **kwargs):
431
+ lora_reset_cached_weight(self)
432
+
433
+ return torch.nn.MultiheadAttention_load_state_dict_before_lora(self, *args, **kwargs)
434
+
435
+
436
+ def list_available_loras():
437
+ available_loras.clear()
438
+ available_lora_aliases.clear()
439
+ forbidden_lora_aliases.clear()
440
+ available_lora_hash_lookup.clear()
441
+ forbidden_lora_aliases.update({"none": 1, "Addams": 1})
442
+
443
+ os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
444
+
445
+ candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
446
+ for filename in sorted(candidates, key=str.lower):
447
+ if os.path.isdir(filename):
448
+ continue
449
+
450
+ name = os.path.splitext(os.path.basename(filename))[0]
451
+ try:
452
+ entry = LoraOnDisk(name, filename)
453
+ except OSError: # should catch FileNotFoundError and PermissionError etc.
454
+ errors.report(f"Failed to load LoRA {name} from {filename}", exc_info=True)
455
+ continue
456
+
457
+ available_loras[name] = entry
458
+
459
+ if entry.alias in available_lora_aliases:
460
+ forbidden_lora_aliases[entry.alias.lower()] = 1
461
+
462
+ available_lora_aliases[name] = entry
463
+ available_lora_aliases[entry.alias] = entry
464
+
465
+
466
+ re_lora_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
467
+
468
+
469
+ def infotext_pasted(infotext, params):
470
+ if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
471
+ return # if the other extension is active, it will handle those fields, no need to do anything
472
+
473
+ added = []
474
+
475
+ for k in params:
476
+ if not k.startswith("AddNet Model "):
477
+ continue
478
+
479
+ num = k[13:]
480
+
481
+ if params.get("AddNet Module " + num) != "LoRA":
482
+ continue
483
+
484
+ name = params.get("AddNet Model " + num)
485
+ if name is None:
486
+ continue
487
+
488
+ m = re_lora_name.match(name)
489
+ if m:
490
+ name = m.group(1)
491
+
492
+ multiplier = params.get("AddNet Weight A " + num, "1.0")
493
+
494
+ added.append(f"<lora:{name}:{multiplier}>")
495
+
496
+ if added:
497
+ params["Prompt"] += "\n" + "".join(added)
498
+
499
+
500
+ available_loras = {}
501
+ available_lora_aliases = {}
502
+ available_lora_hash_lookup = {}
503
+ forbidden_lora_aliases = {}
504
+ loaded_loras = []
505
+
506
+ list_available_loras()
extensions-builtin/Lora/preload.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import os
2
+ from modules import paths
3
+
4
+
5
+ def preload(parser):
6
+ parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora'))
extensions-builtin/Lora/scripts/lora_script.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import torch
4
+ import gradio as gr
5
+ from fastapi import FastAPI
6
+
7
+ import lora
8
+ import extra_networks_lora
9
+ import ui_extra_networks_lora
10
+ from modules import script_callbacks, ui_extra_networks, extra_networks, shared
11
+
12
+ def unload():
13
+ torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
14
+ torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_lora
15
+ torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora
16
+ torch.nn.Conv2d._load_from_state_dict = torch.nn.Conv2d_load_state_dict_before_lora
17
+ torch.nn.MultiheadAttention.forward = torch.nn.MultiheadAttention_forward_before_lora
18
+ torch.nn.MultiheadAttention._load_from_state_dict = torch.nn.MultiheadAttention_load_state_dict_before_lora
19
+
20
+
21
+ def before_ui():
22
+ ui_extra_networks.register_page(ui_extra_networks_lora.ExtraNetworksPageLora())
23
+ extra_networks.register_extra_network(extra_networks_lora.ExtraNetworkLora())
24
+
25
+
26
+ if not hasattr(torch.nn, 'Linear_forward_before_lora'):
27
+ torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward
28
+
29
+ if not hasattr(torch.nn, 'Linear_load_state_dict_before_lora'):
30
+ torch.nn.Linear_load_state_dict_before_lora = torch.nn.Linear._load_from_state_dict
31
+
32
+ if not hasattr(torch.nn, 'Conv2d_forward_before_lora'):
33
+ torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward
34
+
35
+ if not hasattr(torch.nn, 'Conv2d_load_state_dict_before_lora'):
36
+ torch.nn.Conv2d_load_state_dict_before_lora = torch.nn.Conv2d._load_from_state_dict
37
+
38
+ if not hasattr(torch.nn, 'MultiheadAttention_forward_before_lora'):
39
+ torch.nn.MultiheadAttention_forward_before_lora = torch.nn.MultiheadAttention.forward
40
+
41
+ if not hasattr(torch.nn, 'MultiheadAttention_load_state_dict_before_lora'):
42
+ torch.nn.MultiheadAttention_load_state_dict_before_lora = torch.nn.MultiheadAttention._load_from_state_dict
43
+
44
+ torch.nn.Linear.forward = lora.lora_Linear_forward
45
+ torch.nn.Linear._load_from_state_dict = lora.lora_Linear_load_state_dict
46
+ torch.nn.Conv2d.forward = lora.lora_Conv2d_forward
47
+ torch.nn.Conv2d._load_from_state_dict = lora.lora_Conv2d_load_state_dict
48
+ torch.nn.MultiheadAttention.forward = lora.lora_MultiheadAttention_forward
49
+ torch.nn.MultiheadAttention._load_from_state_dict = lora.lora_MultiheadAttention_load_state_dict
50
+
51
+ script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
52
+ script_callbacks.on_script_unloaded(unload)
53
+ script_callbacks.on_before_ui(before_ui)
54
+ script_callbacks.on_infotext_pasted(lora.infotext_pasted)
55
+
56
+
57
+ shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
58
+ "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras),
59
+ "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
60
+ "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
61
+ }))
62
+
63
+
64
+ shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
65
+ "lora_functional": shared.OptionInfo(False, "Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
66
+ }))
67
+
68
+
69
+ def create_lora_json(obj: lora.LoraOnDisk):
70
+ return {
71
+ "name": obj.name,
72
+ "alias": obj.alias,
73
+ "path": obj.filename,
74
+ "metadata": obj.metadata,
75
+ }
76
+
77
+
78
+ def api_loras(_: gr.Blocks, app: FastAPI):
79
+ @app.get("/sdapi/v1/loras")
80
+ async def get_loras():
81
+ return [create_lora_json(obj) for obj in lora.available_loras.values()]
82
+
83
+ @app.post("/sdapi/v1/refresh-loras")
84
+ async def refresh_loras():
85
+ return lora.list_available_loras()
86
+
87
+
88
+ script_callbacks.on_app_started(api_loras)
89
+
90
+ re_lora = re.compile("<lora:([^:]+):")
91
+
92
+
93
+ def infotext_pasted(infotext, d):
94
+ hashes = d.get("Lora hashes")
95
+ if not hashes:
96
+ return
97
+
98
+ hashes = [x.strip().split(':', 1) for x in hashes.split(",")]
99
+ hashes = {x[0].strip().replace(",", ""): x[1].strip() for x in hashes}
100
+
101
+ def lora_replacement(m):
102
+ alias = m.group(1)
103
+ shorthash = hashes.get(alias)
104
+ if shorthash is None:
105
+ return m.group(0)
106
+
107
+ lora_on_disk = lora.available_lora_hash_lookup.get(shorthash)
108
+ if lora_on_disk is None:
109
+ return m.group(0)
110
+
111
+ return f'<lora:{lora_on_disk.get_alias()}:'
112
+
113
+ d["Prompt"] = re.sub(re_lora, lora_replacement, d["Prompt"])
114
+
115
+
116
+ script_callbacks.on_infotext_pasted(infotext_pasted)
extensions-builtin/Lora/ui_extra_networks_lora.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import lora
4
+
5
+ from modules import shared, ui_extra_networks
6
+
7
+
8
+ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
9
+ def __init__(self):
10
+ super().__init__('Lora')
11
+
12
+ def refresh(self):
13
+ lora.list_available_loras()
14
+
15
+ def list_items(self):
16
+ for index, (name, lora_on_disk) in enumerate(lora.available_loras.items()):
17
+ path, ext = os.path.splitext(lora_on_disk.filename)
18
+
19
+ alias = lora_on_disk.get_alias()
20
+
21
+ yield {
22
+ "name": name,
23
+ "filename": path,
24
+ "preview": self.find_preview(path) if self.find_preview(path) else './file=html/card-no-preview.png',
25
+ "description": self.find_description(path),
26
+ "search_term": self.search_terms_from_path(lora_on_disk.filename),
27
+ "prompt": json.dumps(f"<lora:{alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
28
+ "local_preview": f"{path}.{shared.opts.samples_format}",
29
+ "metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
30
+ "sort_keys": {'default': index, **self.get_sort_keys(lora_on_disk.filename)},
31
+
32
+ }
33
+
34
+ def allowed_directories_for_previews(self):
35
+ return [shared.cmd_opts.lora_dir]
36
+
extensions-builtin/ScuNET/preload.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import os
2
+ from modules import paths
3
+
4
+
5
+ def preload(parser):
6
+ parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(paths.models_path, 'ScuNET'))
extensions-builtin/ScuNET/scripts/scunet_model.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os.path
2
+ import sys
3
+
4
+ import PIL.Image
5
+ import numpy as np
6
+ import torch
7
+ from tqdm import tqdm
8
+
9
+ from basicsr.utils.download_util import load_file_from_url
10
+
11
+ import modules.upscaler
12
+ from modules import devices, modelloader, script_callbacks, errors
13
+ from scunet_model_arch import SCUNet as net
14
+
15
+ from modules.shared import opts
16
+
17
+
18
+ class UpscalerScuNET(modules.upscaler.Upscaler):
19
+ def __init__(self, dirname):
20
+ self.name = "ScuNET"
21
+ self.model_name = "ScuNET GAN"
22
+ self.model_name2 = "ScuNET PSNR"
23
+ self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth"
24
+ self.model_url2 = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth"
25
+ self.user_path = dirname
26
+ super().__init__()
27
+ model_paths = self.find_models(ext_filter=[".pth"])
28
+ scalers = []
29
+ add_model2 = True
30
+ for file in model_paths:
31
+ if "http" in file:
32
+ name = self.model_name
33
+ else:
34
+ name = modelloader.friendly_name(file)
35
+ if name == self.model_name2 or file == self.model_url2:
36
+ add_model2 = False
37
+ try:
38
+ scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
39
+ scalers.append(scaler_data)
40
+ except Exception:
41
+ errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
42
+ if add_model2:
43
+ scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
44
+ scalers.append(scaler_data2)
45
+ self.scalers = scalers
46
+
47
+ @staticmethod
48
+ @torch.no_grad()
49
+ def tiled_inference(img, model):
50
+ # test the image tile by tile
51
+ h, w = img.shape[2:]
52
+ tile = opts.SCUNET_tile
53
+ tile_overlap = opts.SCUNET_tile_overlap
54
+ if tile == 0:
55
+ return model(img)
56
+
57
+ device = devices.get_device_for('scunet')
58
+ assert tile % 8 == 0, "tile size should be a multiple of window_size"
59
+ sf = 1
60
+
61
+ stride = tile - tile_overlap
62
+ h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
63
+ w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
64
+ E = torch.zeros(1, 3, h * sf, w * sf, dtype=img.dtype, device=device)
65
+ W = torch.zeros_like(E, dtype=devices.dtype, device=device)
66
+
67
+ with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="ScuNET tiles") as pbar:
68
+ for h_idx in h_idx_list:
69
+
70
+ for w_idx in w_idx_list:
71
+
72
+ in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
73
+
74
+ out_patch = model(in_patch)
75
+ out_patch_mask = torch.ones_like(out_patch)
76
+
77
+ E[
78
+ ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
79
+ ].add_(out_patch)
80
+ W[
81
+ ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
82
+ ].add_(out_patch_mask)
83
+ pbar.update(1)
84
+ output = E.div_(W)
85
+
86
+ return output
87
+
88
+ def do_upscale(self, img: PIL.Image.Image, selected_file):
89
+
90
+ torch.cuda.empty_cache()
91
+
92
+ model = self.load_model(selected_file)
93
+ if model is None:
94
+ print(f"ScuNET: Unable to load model from {selected_file}", file=sys.stderr)
95
+ return img
96
+
97
+ device = devices.get_device_for('scunet')
98
+ tile = opts.SCUNET_tile
99
+ h, w = img.height, img.width
100
+ np_img = np.array(img)
101
+ np_img = np_img[:, :, ::-1] # RGB to BGR
102
+ np_img = np_img.transpose((2, 0, 1)) / 255 # HWC to CHW
103
+ torch_img = torch.from_numpy(np_img).float().unsqueeze(0).to(device) # type: ignore
104
+
105
+ if tile > h or tile > w:
106
+ _img = torch.zeros(1, 3, max(h, tile), max(w, tile), dtype=torch_img.dtype, device=torch_img.device)
107
+ _img[:, :, :h, :w] = torch_img # pad image
108
+ torch_img = _img
109
+
110
+ torch_output = self.tiled_inference(torch_img, model).squeeze(0)
111
+ torch_output = torch_output[:, :h * 1, :w * 1] # remove padding, if any
112
+ np_output: np.ndarray = torch_output.float().cpu().clamp_(0, 1).numpy()
113
+ del torch_img, torch_output
114
+ torch.cuda.empty_cache()
115
+
116
+ output = np_output.transpose((1, 2, 0)) # CHW to HWC
117
+ output = output[:, :, ::-1] # BGR to RGB
118
+ return PIL.Image.fromarray((output * 255).astype(np.uint8))
119
+
120
+ def load_model(self, path: str):
121
+ device = devices.get_device_for('scunet')
122
+ if "http" in path:
123
+ filename = load_file_from_url(url=self.model_url, model_dir=self.model_download_path, file_name="%s.pth" % self.name, progress=True)
124
+ else:
125
+ filename = path
126
+ if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None:
127
+ print(f"ScuNET: Unable to load model from {filename}", file=sys.stderr)
128
+ return None
129
+
130
+ model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
131
+ model.load_state_dict(torch.load(filename), strict=True)
132
+ model.eval()
133
+ for _, v in model.named_parameters():
134
+ v.requires_grad = False
135
+ model = model.to(device)
136
+
137
+ return model
138
+
139
+
140
+ def on_ui_settings():
141
+ import gradio as gr
142
+ from modules import shared
143
+
144
+ shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling"))
145
+ shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam"))
146
+
147
+
148
+ script_callbacks.on_ui_settings(on_ui_settings)
extensions-builtin/ScuNET/scunet_model_arch.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn as nn
5
+ from einops import rearrange
6
+ from einops.layers.torch import Rearrange
7
+ from timm.models.layers import trunc_normal_, DropPath
8
+
9
+
10
+ class WMSA(nn.Module):
11
+ """ Self-attention module in Swin Transformer
12
+ """
13
+
14
+ def __init__(self, input_dim, output_dim, head_dim, window_size, type):
15
+ super(WMSA, self).__init__()
16
+ self.input_dim = input_dim
17
+ self.output_dim = output_dim
18
+ self.head_dim = head_dim
19
+ self.scale = self.head_dim ** -0.5
20
+ self.n_heads = input_dim // head_dim
21
+ self.window_size = window_size
22
+ self.type = type
23
+ self.embedding_layer = nn.Linear(self.input_dim, 3 * self.input_dim, bias=True)
24
+
25
+ self.relative_position_params = nn.Parameter(
26
+ torch.zeros((2 * window_size - 1) * (2 * window_size - 1), self.n_heads))
27
+
28
+ self.linear = nn.Linear(self.input_dim, self.output_dim)
29
+
30
+ trunc_normal_(self.relative_position_params, std=.02)
31
+ self.relative_position_params = torch.nn.Parameter(
32
+ self.relative_position_params.view(2 * window_size - 1, 2 * window_size - 1, self.n_heads).transpose(1,
33
+ 2).transpose(
34
+ 0, 1))
35
+
36
+ def generate_mask(self, h, w, p, shift):
37
+ """ generating the mask of SW-MSA
38
+ Args:
39
+ shift: shift parameters in CyclicShift.
40
+ Returns:
41
+ attn_mask: should be (1 1 w p p),
42
+ """
43
+ # supporting square.
44
+ attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device)
45
+ if self.type == 'W':
46
+ return attn_mask
47
+
48
+ s = p - shift
49
+ attn_mask[-1, :, :s, :, s:, :] = True
50
+ attn_mask[-1, :, s:, :, :s, :] = True
51
+ attn_mask[:, -1, :, :s, :, s:] = True
52
+ attn_mask[:, -1, :, s:, :, :s] = True
53
+ attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)')
54
+ return attn_mask
55
+
56
+ def forward(self, x):
57
+ """ Forward pass of Window Multi-head Self-attention module.
58
+ Args:
59
+ x: input tensor with shape of [b h w c];
60
+ attn_mask: attention mask, fill -inf where the value is True;
61
+ Returns:
62
+ output: tensor shape [b h w c]
63
+ """
64
+ if self.type != 'W':
65
+ x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
66
+
67
+ x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
68
+ h_windows = x.size(1)
69
+ w_windows = x.size(2)
70
+ # square validation
71
+ # assert h_windows == w_windows
72
+
73
+ x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size)
74
+ qkv = self.embedding_layer(x)
75
+ q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0)
76
+ sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale
77
+ # Adding learnable relative embedding
78
+ sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q')
79
+ # Using Attn Mask to distinguish different subwindows.
80
+ if self.type != 'W':
81
+ attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size // 2)
82
+ sim = sim.masked_fill_(attn_mask, float("-inf"))
83
+
84
+ probs = nn.functional.softmax(sim, dim=-1)
85
+ output = torch.einsum('hbwij,hbwjc->hbwic', probs, v)
86
+ output = rearrange(output, 'h b w p c -> b w p (h c)')
87
+ output = self.linear(output)
88
+ output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
89
+
90
+ if self.type != 'W':
91
+ output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
92
+
93
+ return output
94
+
95
+ def relative_embedding(self):
96
+ cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)]))
97
+ relation = cord[:, None, :] - cord[None, :, :] + self.window_size - 1
98
+ # negative is allowed
99
+ return self.relative_position_params[:, relation[:, :, 0].long(), relation[:, :, 1].long()]
100
+
101
+
102
+ class Block(nn.Module):
103
+ def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
104
+ """ SwinTransformer Block
105
+ """
106
+ super(Block, self).__init__()
107
+ self.input_dim = input_dim
108
+ self.output_dim = output_dim
109
+ assert type in ['W', 'SW']
110
+ self.type = type
111
+ if input_resolution <= window_size:
112
+ self.type = 'W'
113
+
114
+ self.ln1 = nn.LayerNorm(input_dim)
115
+ self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type)
116
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
117
+ self.ln2 = nn.LayerNorm(input_dim)
118
+ self.mlp = nn.Sequential(
119
+ nn.Linear(input_dim, 4 * input_dim),
120
+ nn.GELU(),
121
+ nn.Linear(4 * input_dim, output_dim),
122
+ )
123
+
124
+ def forward(self, x):
125
+ x = x + self.drop_path(self.msa(self.ln1(x)))
126
+ x = x + self.drop_path(self.mlp(self.ln2(x)))
127
+ return x
128
+
129
+
130
+ class ConvTransBlock(nn.Module):
131
+ def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
132
+ """ SwinTransformer and Conv Block
133
+ """
134
+ super(ConvTransBlock, self).__init__()
135
+ self.conv_dim = conv_dim
136
+ self.trans_dim = trans_dim
137
+ self.head_dim = head_dim
138
+ self.window_size = window_size
139
+ self.drop_path = drop_path
140
+ self.type = type
141
+ self.input_resolution = input_resolution
142
+
143
+ assert self.type in ['W', 'SW']
144
+ if self.input_resolution <= self.window_size:
145
+ self.type = 'W'
146
+
147
+ self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path,
148
+ self.type, self.input_resolution)
149
+ self.conv1_1 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
150
+ self.conv1_2 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
151
+
152
+ self.conv_block = nn.Sequential(
153
+ nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False),
154
+ nn.ReLU(True),
155
+ nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False)
156
+ )
157
+
158
+ def forward(self, x):
159
+ conv_x, trans_x = torch.split(self.conv1_1(x), (self.conv_dim, self.trans_dim), dim=1)
160
+ conv_x = self.conv_block(conv_x) + conv_x
161
+ trans_x = Rearrange('b c h w -> b h w c')(trans_x)
162
+ trans_x = self.trans_block(trans_x)
163
+ trans_x = Rearrange('b h w c -> b c h w')(trans_x)
164
+ res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1))
165
+ x = x + res
166
+
167
+ return x
168
+
169
+
170
+ class SCUNet(nn.Module):
171
+ # def __init__(self, in_nc=3, config=[2, 2, 2, 2, 2, 2, 2], dim=64, drop_path_rate=0.0, input_resolution=256):
172
+ def __init__(self, in_nc=3, config=None, dim=64, drop_path_rate=0.0, input_resolution=256):
173
+ super(SCUNet, self).__init__()
174
+ if config is None:
175
+ config = [2, 2, 2, 2, 2, 2, 2]
176
+ self.config = config
177
+ self.dim = dim
178
+ self.head_dim = 32
179
+ self.window_size = 8
180
+
181
+ # drop path rate for each layer
182
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))]
183
+
184
+ self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)]
185
+
186
+ begin = 0
187
+ self.m_down1 = [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
188
+ 'W' if not i % 2 else 'SW', input_resolution)
189
+ for i in range(config[0])] + \
190
+ [nn.Conv2d(dim, 2 * dim, 2, 2, 0, bias=False)]
191
+
192
+ begin += config[0]
193
+ self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
194
+ 'W' if not i % 2 else 'SW', input_resolution // 2)
195
+ for i in range(config[1])] + \
196
+ [nn.Conv2d(2 * dim, 4 * dim, 2, 2, 0, bias=False)]
197
+
198
+ begin += config[1]
199
+ self.m_down3 = [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
200
+ 'W' if not i % 2 else 'SW', input_resolution // 4)
201
+ for i in range(config[2])] + \
202
+ [nn.Conv2d(4 * dim, 8 * dim, 2, 2, 0, bias=False)]
203
+
204
+ begin += config[2]
205
+ self.m_body = [ConvTransBlock(4 * dim, 4 * dim, self.head_dim, self.window_size, dpr[i + begin],
206
+ 'W' if not i % 2 else 'SW', input_resolution // 8)
207
+ for i in range(config[3])]
208
+
209
+ begin += config[3]
210
+ self.m_up3 = [nn.ConvTranspose2d(8 * dim, 4 * dim, 2, 2, 0, bias=False), ] + \
211
+ [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
212
+ 'W' if not i % 2 else 'SW', input_resolution // 4)
213
+ for i in range(config[4])]
214
+
215
+ begin += config[4]
216
+ self.m_up2 = [nn.ConvTranspose2d(4 * dim, 2 * dim, 2, 2, 0, bias=False), ] + \
217
+ [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
218
+ 'W' if not i % 2 else 'SW', input_resolution // 2)
219
+ for i in range(config[5])]
220
+
221
+ begin += config[5]
222
+ self.m_up1 = [nn.ConvTranspose2d(2 * dim, dim, 2, 2, 0, bias=False), ] + \
223
+ [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
224
+ 'W' if not i % 2 else 'SW', input_resolution)
225
+ for i in range(config[6])]
226
+
227
+ self.m_tail = [nn.Conv2d(dim, in_nc, 3, 1, 1, bias=False)]
228
+
229
+ self.m_head = nn.Sequential(*self.m_head)
230
+ self.m_down1 = nn.Sequential(*self.m_down1)
231
+ self.m_down2 = nn.Sequential(*self.m_down2)
232
+ self.m_down3 = nn.Sequential(*self.m_down3)
233
+ self.m_body = nn.Sequential(*self.m_body)
234
+ self.m_up3 = nn.Sequential(*self.m_up3)
235
+ self.m_up2 = nn.Sequential(*self.m_up2)
236
+ self.m_up1 = nn.Sequential(*self.m_up1)
237
+ self.m_tail = nn.Sequential(*self.m_tail)
238
+ # self.apply(self._init_weights)
239
+
240
+ def forward(self, x0):
241
+
242
+ h, w = x0.size()[-2:]
243
+ paddingBottom = int(np.ceil(h / 64) * 64 - h)
244
+ paddingRight = int(np.ceil(w / 64) * 64 - w)
245
+ x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0)
246
+
247
+ x1 = self.m_head(x0)
248
+ x2 = self.m_down1(x1)
249
+ x3 = self.m_down2(x2)
250
+ x4 = self.m_down3(x3)
251
+ x = self.m_body(x4)
252
+ x = self.m_up3(x + x4)
253
+ x = self.m_up2(x + x3)
254
+ x = self.m_up1(x + x2)
255
+ x = self.m_tail(x + x1)
256
+
257
+ x = x[..., :h, :w]
258
+
259
+ return x
260
+
261
+ def _init_weights(self, m):
262
+ if isinstance(m, nn.Linear):
263
+ trunc_normal_(m.weight, std=.02)
264
+ if m.bias is not None:
265
+ nn.init.constant_(m.bias, 0)
266
+ elif isinstance(m, nn.LayerNorm):
267
+ nn.init.constant_(m.bias, 0)
268
+ nn.init.constant_(m.weight, 1.0)
extensions-builtin/SwinIR/preload.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import os
2
+ from modules import paths
3
+
4
+
5
+ def preload(parser):
6
+ parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(paths.models_path, 'SwinIR'))
extensions-builtin/SwinIR/scripts/swinir_model.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import numpy as np
4
+ import torch
5
+ from PIL import Image
6
+ from basicsr.utils.download_util import load_file_from_url
7
+ from tqdm import tqdm
8
+
9
+ from modules import modelloader, devices, script_callbacks, shared
10
+ from modules.shared import opts, state
11
+ from swinir_model_arch import SwinIR as net
12
+ from swinir_model_arch_v2 import Swin2SR as net2
13
+ from modules.upscaler import Upscaler, UpscalerData
14
+
15
+
16
+ device_swinir = devices.get_device_for('swinir')
17
+
18
+
19
+ class UpscalerSwinIR(Upscaler):
20
+ def __init__(self, dirname):
21
+ self.name = "SwinIR"
22
+ self.model_url = "https://github.com/JingyunLiang/SwinIR/releases/download/v0.0" \
23
+ "/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \
24
+ "-L_x4_GAN.pth "
25
+ self.model_name = "SwinIR 4x"
26
+ self.user_path = dirname
27
+ super().__init__()
28
+ scalers = []
29
+ model_files = self.find_models(ext_filter=[".pt", ".pth"])
30
+ for model in model_files:
31
+ if "http" in model:
32
+ name = self.model_name
33
+ else:
34
+ name = modelloader.friendly_name(model)
35
+ model_data = UpscalerData(name, model, self)
36
+ scalers.append(model_data)
37
+ self.scalers = scalers
38
+
39
+ def do_upscale(self, img, model_file):
40
+ model = self.load_model(model_file)
41
+ if model is None:
42
+ return img
43
+ model = model.to(device_swinir, dtype=devices.dtype)
44
+ img = upscale(img, model)
45
+ try:
46
+ torch.cuda.empty_cache()
47
+ except Exception:
48
+ pass
49
+ return img
50
+
51
+ def load_model(self, path, scale=4):
52
+ if "http" in path:
53
+ dl_name = "%s%s" % (self.model_name.replace(" ", "_"), ".pth")
54
+ filename = load_file_from_url(url=path, model_dir=self.model_download_path, file_name=dl_name, progress=True)
55
+ else:
56
+ filename = path
57
+ if filename is None or not os.path.exists(filename):
58
+ return None
59
+ if filename.endswith(".v2.pth"):
60
+ model = net2(
61
+ upscale=scale,
62
+ in_chans=3,
63
+ img_size=64,
64
+ window_size=8,
65
+ img_range=1.0,
66
+ depths=[6, 6, 6, 6, 6, 6],
67
+ embed_dim=180,
68
+ num_heads=[6, 6, 6, 6, 6, 6],
69
+ mlp_ratio=2,
70
+ upsampler="nearest+conv",
71
+ resi_connection="1conv",
72
+ )
73
+ params = None
74
+ else:
75
+ model = net(
76
+ upscale=scale,
77
+ in_chans=3,
78
+ img_size=64,
79
+ window_size=8,
80
+ img_range=1.0,
81
+ depths=[6, 6, 6, 6, 6, 6, 6, 6, 6],
82
+ embed_dim=240,
83
+ num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
84
+ mlp_ratio=2,
85
+ upsampler="nearest+conv",
86
+ resi_connection="3conv",
87
+ )
88
+ params = "params_ema"
89
+
90
+ pretrained_model = torch.load(filename)
91
+ if params is not None:
92
+ model.load_state_dict(pretrained_model[params], strict=True)
93
+ else:
94
+ model.load_state_dict(pretrained_model, strict=True)
95
+ return model
96
+
97
+
98
+ def upscale(
99
+ img,
100
+ model,
101
+ tile=None,
102
+ tile_overlap=None,
103
+ window_size=8,
104
+ scale=4,
105
+ ):
106
+ tile = tile or opts.SWIN_tile
107
+ tile_overlap = tile_overlap or opts.SWIN_tile_overlap
108
+
109
+
110
+ img = np.array(img)
111
+ img = img[:, :, ::-1]
112
+ img = np.moveaxis(img, 2, 0) / 255
113
+ img = torch.from_numpy(img).float()
114
+ img = img.unsqueeze(0).to(device_swinir, dtype=devices.dtype)
115
+ with torch.no_grad(), devices.autocast():
116
+ _, _, h_old, w_old = img.size()
117
+ h_pad = (h_old // window_size + 1) * window_size - h_old
118
+ w_pad = (w_old // window_size + 1) * window_size - w_old
119
+ img = torch.cat([img, torch.flip(img, [2])], 2)[:, :, : h_old + h_pad, :]
120
+ img = torch.cat([img, torch.flip(img, [3])], 3)[:, :, :, : w_old + w_pad]
121
+ output = inference(img, model, tile, tile_overlap, window_size, scale)
122
+ output = output[..., : h_old * scale, : w_old * scale]
123
+ output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
124
+ if output.ndim == 3:
125
+ output = np.transpose(
126
+ output[[2, 1, 0], :, :], (1, 2, 0)
127
+ ) # CHW-RGB to HCW-BGR
128
+ output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
129
+ return Image.fromarray(output, "RGB")
130
+
131
+
132
+ def inference(img, model, tile, tile_overlap, window_size, scale):
133
+ # test the image tile by tile
134
+ b, c, h, w = img.size()
135
+ tile = min(tile, h, w)
136
+ assert tile % window_size == 0, "tile size should be a multiple of window_size"
137
+ sf = scale
138
+
139
+ stride = tile - tile_overlap
140
+ h_idx_list = list(range(0, h - tile, stride)) + [h - tile]
141
+ w_idx_list = list(range(0, w - tile, stride)) + [w - tile]
142
+ E = torch.zeros(b, c, h * sf, w * sf, dtype=devices.dtype, device=device_swinir).type_as(img)
143
+ W = torch.zeros_like(E, dtype=devices.dtype, device=device_swinir)
144
+
145
+ with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
146
+ for h_idx in h_idx_list:
147
+ if state.interrupted or state.skipped:
148
+ break
149
+
150
+ for w_idx in w_idx_list:
151
+ if state.interrupted or state.skipped:
152
+ break
153
+
154
+ in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
155
+ out_patch = model(in_patch)
156
+ out_patch_mask = torch.ones_like(out_patch)
157
+
158
+ E[
159
+ ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
160
+ ].add_(out_patch)
161
+ W[
162
+ ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
163
+ ].add_(out_patch_mask)
164
+ pbar.update(1)
165
+ output = E.div_(W)
166
+
167
+ return output
168
+
169
+
170
+ def on_ui_settings():
171
+ import gradio as gr
172
+
173
+ shared.opts.add_option("SWIN_tile", shared.OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")))
174
+ shared.opts.add_option("SWIN_tile_overlap", shared.OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}, section=('upscaling', "Upscaling")))
175
+
176
+
177
+ script_callbacks.on_ui_settings(on_ui_settings)
extensions-builtin/SwinIR/swinir_model_arch.py ADDED
@@ -0,0 +1,867 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -----------------------------------------------------------------------------------
2
+ # SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
3
+ # Originally Written by Ze Liu, Modified by Jingyun Liang.
4
+ # -----------------------------------------------------------------------------------
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ import torch.utils.checkpoint as checkpoint
11
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
12
+
13
+
14
+ class Mlp(nn.Module):
15
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
16
+ super().__init__()
17
+ out_features = out_features or in_features
18
+ hidden_features = hidden_features or in_features
19
+ self.fc1 = nn.Linear(in_features, hidden_features)
20
+ self.act = act_layer()
21
+ self.fc2 = nn.Linear(hidden_features, out_features)
22
+ self.drop = nn.Dropout(drop)
23
+
24
+ def forward(self, x):
25
+ x = self.fc1(x)
26
+ x = self.act(x)
27
+ x = self.drop(x)
28
+ x = self.fc2(x)
29
+ x = self.drop(x)
30
+ return x
31
+
32
+
33
+ def window_partition(x, window_size):
34
+ """
35
+ Args:
36
+ x: (B, H, W, C)
37
+ window_size (int): window size
38
+
39
+ Returns:
40
+ windows: (num_windows*B, window_size, window_size, C)
41
+ """
42
+ B, H, W, C = x.shape
43
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
44
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
45
+ return windows
46
+
47
+
48
+ def window_reverse(windows, window_size, H, W):
49
+ """
50
+ Args:
51
+ windows: (num_windows*B, window_size, window_size, C)
52
+ window_size (int): Window size
53
+ H (int): Height of image
54
+ W (int): Width of image
55
+
56
+ Returns:
57
+ x: (B, H, W, C)
58
+ """
59
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
60
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
61
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
62
+ return x
63
+
64
+
65
+ class WindowAttention(nn.Module):
66
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
67
+ It supports both of shifted and non-shifted window.
68
+
69
+ Args:
70
+ dim (int): Number of input channels.
71
+ window_size (tuple[int]): The height and width of the window.
72
+ num_heads (int): Number of attention heads.
73
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
74
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
75
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
76
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
77
+ """
78
+
79
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
80
+
81
+ super().__init__()
82
+ self.dim = dim
83
+ self.window_size = window_size # Wh, Ww
84
+ self.num_heads = num_heads
85
+ head_dim = dim // num_heads
86
+ self.scale = qk_scale or head_dim ** -0.5
87
+
88
+ # define a parameter table of relative position bias
89
+ self.relative_position_bias_table = nn.Parameter(
90
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
91
+
92
+ # get pair-wise relative position index for each token inside the window
93
+ coords_h = torch.arange(self.window_size[0])
94
+ coords_w = torch.arange(self.window_size[1])
95
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
96
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
97
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
98
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
99
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
100
+ relative_coords[:, :, 1] += self.window_size[1] - 1
101
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
102
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
103
+ self.register_buffer("relative_position_index", relative_position_index)
104
+
105
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
106
+ self.attn_drop = nn.Dropout(attn_drop)
107
+ self.proj = nn.Linear(dim, dim)
108
+
109
+ self.proj_drop = nn.Dropout(proj_drop)
110
+
111
+ trunc_normal_(self.relative_position_bias_table, std=.02)
112
+ self.softmax = nn.Softmax(dim=-1)
113
+
114
+ def forward(self, x, mask=None):
115
+ """
116
+ Args:
117
+ x: input features with shape of (num_windows*B, N, C)
118
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
119
+ """
120
+ B_, N, C = x.shape
121
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
122
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
123
+
124
+ q = q * self.scale
125
+ attn = (q @ k.transpose(-2, -1))
126
+
127
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
128
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
129
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
130
+ attn = attn + relative_position_bias.unsqueeze(0)
131
+
132
+ if mask is not None:
133
+ nW = mask.shape[0]
134
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
135
+ attn = attn.view(-1, self.num_heads, N, N)
136
+ attn = self.softmax(attn)
137
+ else:
138
+ attn = self.softmax(attn)
139
+
140
+ attn = self.attn_drop(attn)
141
+
142
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
143
+ x = self.proj(x)
144
+ x = self.proj_drop(x)
145
+ return x
146
+
147
+ def extra_repr(self) -> str:
148
+ return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
149
+
150
+ def flops(self, N):
151
+ # calculate flops for 1 window with token length of N
152
+ flops = 0
153
+ # qkv = self.qkv(x)
154
+ flops += N * self.dim * 3 * self.dim
155
+ # attn = (q @ k.transpose(-2, -1))
156
+ flops += self.num_heads * N * (self.dim // self.num_heads) * N
157
+ # x = (attn @ v)
158
+ flops += self.num_heads * N * N * (self.dim // self.num_heads)
159
+ # x = self.proj(x)
160
+ flops += N * self.dim * self.dim
161
+ return flops
162
+
163
+
164
+ class SwinTransformerBlock(nn.Module):
165
+ r""" Swin Transformer Block.
166
+
167
+ Args:
168
+ dim (int): Number of input channels.
169
+ input_resolution (tuple[int]): Input resolution.
170
+ num_heads (int): Number of attention heads.
171
+ window_size (int): Window size.
172
+ shift_size (int): Shift size for SW-MSA.
173
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
174
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
175
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
176
+ drop (float, optional): Dropout rate. Default: 0.0
177
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
178
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
179
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
180
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
181
+ """
182
+
183
+ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
184
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
185
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
186
+ super().__init__()
187
+ self.dim = dim
188
+ self.input_resolution = input_resolution
189
+ self.num_heads = num_heads
190
+ self.window_size = window_size
191
+ self.shift_size = shift_size
192
+ self.mlp_ratio = mlp_ratio
193
+ if min(self.input_resolution) <= self.window_size:
194
+ # if window size is larger than input resolution, we don't partition windows
195
+ self.shift_size = 0
196
+ self.window_size = min(self.input_resolution)
197
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
198
+
199
+ self.norm1 = norm_layer(dim)
200
+ self.attn = WindowAttention(
201
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
202
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
203
+
204
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
205
+ self.norm2 = norm_layer(dim)
206
+ mlp_hidden_dim = int(dim * mlp_ratio)
207
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
208
+
209
+ if self.shift_size > 0:
210
+ attn_mask = self.calculate_mask(self.input_resolution)
211
+ else:
212
+ attn_mask = None
213
+
214
+ self.register_buffer("attn_mask", attn_mask)
215
+
216
+ def calculate_mask(self, x_size):
217
+ # calculate attention mask for SW-MSA
218
+ H, W = x_size
219
+ img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
220
+ h_slices = (slice(0, -self.window_size),
221
+ slice(-self.window_size, -self.shift_size),
222
+ slice(-self.shift_size, None))
223
+ w_slices = (slice(0, -self.window_size),
224
+ slice(-self.window_size, -self.shift_size),
225
+ slice(-self.shift_size, None))
226
+ cnt = 0
227
+ for h in h_slices:
228
+ for w in w_slices:
229
+ img_mask[:, h, w, :] = cnt
230
+ cnt += 1
231
+
232
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
233
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
234
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
235
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
236
+
237
+ return attn_mask
238
+
239
+ def forward(self, x, x_size):
240
+ H, W = x_size
241
+ B, L, C = x.shape
242
+ # assert L == H * W, "input feature has wrong size"
243
+
244
+ shortcut = x
245
+ x = self.norm1(x)
246
+ x = x.view(B, H, W, C)
247
+
248
+ # cyclic shift
249
+ if self.shift_size > 0:
250
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
251
+ else:
252
+ shifted_x = x
253
+
254
+ # partition windows
255
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
256
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
257
+
258
+ # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
259
+ if self.input_resolution == x_size:
260
+ attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
261
+ else:
262
+ attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
263
+
264
+ # merge windows
265
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
266
+ shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
267
+
268
+ # reverse cyclic shift
269
+ if self.shift_size > 0:
270
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
271
+ else:
272
+ x = shifted_x
273
+ x = x.view(B, H * W, C)
274
+
275
+ # FFN
276
+ x = shortcut + self.drop_path(x)
277
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
278
+
279
+ return x
280
+
281
+ def extra_repr(self) -> str:
282
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
283
+ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
284
+
285
+ def flops(self):
286
+ flops = 0
287
+ H, W = self.input_resolution
288
+ # norm1
289
+ flops += self.dim * H * W
290
+ # W-MSA/SW-MSA
291
+ nW = H * W / self.window_size / self.window_size
292
+ flops += nW * self.attn.flops(self.window_size * self.window_size)
293
+ # mlp
294
+ flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
295
+ # norm2
296
+ flops += self.dim * H * W
297
+ return flops
298
+
299
+
300
+ class PatchMerging(nn.Module):
301
+ r""" Patch Merging Layer.
302
+
303
+ Args:
304
+ input_resolution (tuple[int]): Resolution of input feature.
305
+ dim (int): Number of input channels.
306
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
307
+ """
308
+
309
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
310
+ super().__init__()
311
+ self.input_resolution = input_resolution
312
+ self.dim = dim
313
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
314
+ self.norm = norm_layer(4 * dim)
315
+
316
+ def forward(self, x):
317
+ """
318
+ x: B, H*W, C
319
+ """
320
+ H, W = self.input_resolution
321
+ B, L, C = x.shape
322
+ assert L == H * W, "input feature has wrong size"
323
+ assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
324
+
325
+ x = x.view(B, H, W, C)
326
+
327
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
328
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
329
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
330
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
331
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
332
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
333
+
334
+ x = self.norm(x)
335
+ x = self.reduction(x)
336
+
337
+ return x
338
+
339
+ def extra_repr(self) -> str:
340
+ return f"input_resolution={self.input_resolution}, dim={self.dim}"
341
+
342
+ def flops(self):
343
+ H, W = self.input_resolution
344
+ flops = H * W * self.dim
345
+ flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
346
+ return flops
347
+
348
+
349
+ class BasicLayer(nn.Module):
350
+ """ A basic Swin Transformer layer for one stage.
351
+
352
+ Args:
353
+ dim (int): Number of input channels.
354
+ input_resolution (tuple[int]): Input resolution.
355
+ depth (int): Number of blocks.
356
+ num_heads (int): Number of attention heads.
357
+ window_size (int): Local window size.
358
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
359
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
360
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
361
+ drop (float, optional): Dropout rate. Default: 0.0
362
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
363
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
364
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
365
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
366
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
367
+ """
368
+
369
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
370
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
371
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False):
372
+
373
+ super().__init__()
374
+ self.dim = dim
375
+ self.input_resolution = input_resolution
376
+ self.depth = depth
377
+ self.use_checkpoint = use_checkpoint
378
+
379
+ # build blocks
380
+ self.blocks = nn.ModuleList([
381
+ SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
382
+ num_heads=num_heads, window_size=window_size,
383
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
384
+ mlp_ratio=mlp_ratio,
385
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
386
+ drop=drop, attn_drop=attn_drop,
387
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
388
+ norm_layer=norm_layer)
389
+ for i in range(depth)])
390
+
391
+ # patch merging layer
392
+ if downsample is not None:
393
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
394
+ else:
395
+ self.downsample = None
396
+
397
+ def forward(self, x, x_size):
398
+ for blk in self.blocks:
399
+ if self.use_checkpoint:
400
+ x = checkpoint.checkpoint(blk, x, x_size)
401
+ else:
402
+ x = blk(x, x_size)
403
+ if self.downsample is not None:
404
+ x = self.downsample(x)
405
+ return x
406
+
407
+ def extra_repr(self) -> str:
408
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
409
+
410
+ def flops(self):
411
+ flops = 0
412
+ for blk in self.blocks:
413
+ flops += blk.flops()
414
+ if self.downsample is not None:
415
+ flops += self.downsample.flops()
416
+ return flops
417
+
418
+
419
+ class RSTB(nn.Module):
420
+ """Residual Swin Transformer Block (RSTB).
421
+
422
+ Args:
423
+ dim (int): Number of input channels.
424
+ input_resolution (tuple[int]): Input resolution.
425
+ depth (int): Number of blocks.
426
+ num_heads (int): Number of attention heads.
427
+ window_size (int): Local window size.
428
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
429
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
430
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
431
+ drop (float, optional): Dropout rate. Default: 0.0
432
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
433
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
434
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
435
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
436
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
437
+ img_size: Input image size.
438
+ patch_size: Patch size.
439
+ resi_connection: The convolutional block before residual connection.
440
+ """
441
+
442
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
443
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
444
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
445
+ img_size=224, patch_size=4, resi_connection='1conv'):
446
+ super(RSTB, self).__init__()
447
+
448
+ self.dim = dim
449
+ self.input_resolution = input_resolution
450
+
451
+ self.residual_group = BasicLayer(dim=dim,
452
+ input_resolution=input_resolution,
453
+ depth=depth,
454
+ num_heads=num_heads,
455
+ window_size=window_size,
456
+ mlp_ratio=mlp_ratio,
457
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
458
+ drop=drop, attn_drop=attn_drop,
459
+ drop_path=drop_path,
460
+ norm_layer=norm_layer,
461
+ downsample=downsample,
462
+ use_checkpoint=use_checkpoint)
463
+
464
+ if resi_connection == '1conv':
465
+ self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
466
+ elif resi_connection == '3conv':
467
+ # to save parameters and memory
468
+ self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
469
+ nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
470
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
471
+ nn.Conv2d(dim // 4, dim, 3, 1, 1))
472
+
473
+ self.patch_embed = PatchEmbed(
474
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
475
+ norm_layer=None)
476
+
477
+ self.patch_unembed = PatchUnEmbed(
478
+ img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
479
+ norm_layer=None)
480
+
481
+ def forward(self, x, x_size):
482
+ return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
483
+
484
+ def flops(self):
485
+ flops = 0
486
+ flops += self.residual_group.flops()
487
+ H, W = self.input_resolution
488
+ flops += H * W * self.dim * self.dim * 9
489
+ flops += self.patch_embed.flops()
490
+ flops += self.patch_unembed.flops()
491
+
492
+ return flops
493
+
494
+
495
+ class PatchEmbed(nn.Module):
496
+ r""" Image to Patch Embedding
497
+
498
+ Args:
499
+ img_size (int): Image size. Default: 224.
500
+ patch_size (int): Patch token size. Default: 4.
501
+ in_chans (int): Number of input image channels. Default: 3.
502
+ embed_dim (int): Number of linear projection output channels. Default: 96.
503
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
504
+ """
505
+
506
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
507
+ super().__init__()
508
+ img_size = to_2tuple(img_size)
509
+ patch_size = to_2tuple(patch_size)
510
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
511
+ self.img_size = img_size
512
+ self.patch_size = patch_size
513
+ self.patches_resolution = patches_resolution
514
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
515
+
516
+ self.in_chans = in_chans
517
+ self.embed_dim = embed_dim
518
+
519
+ if norm_layer is not None:
520
+ self.norm = norm_layer(embed_dim)
521
+ else:
522
+ self.norm = None
523
+
524
+ def forward(self, x):
525
+ x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
526
+ if self.norm is not None:
527
+ x = self.norm(x)
528
+ return x
529
+
530
+ def flops(self):
531
+ flops = 0
532
+ H, W = self.img_size
533
+ if self.norm is not None:
534
+ flops += H * W * self.embed_dim
535
+ return flops
536
+
537
+
538
+ class PatchUnEmbed(nn.Module):
539
+ r""" Image to Patch Unembedding
540
+
541
+ Args:
542
+ img_size (int): Image size. Default: 224.
543
+ patch_size (int): Patch token size. Default: 4.
544
+ in_chans (int): Number of input image channels. Default: 3.
545
+ embed_dim (int): Number of linear projection output channels. Default: 96.
546
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
547
+ """
548
+
549
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
550
+ super().__init__()
551
+ img_size = to_2tuple(img_size)
552
+ patch_size = to_2tuple(patch_size)
553
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
554
+ self.img_size = img_size
555
+ self.patch_size = patch_size
556
+ self.patches_resolution = patches_resolution
557
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
558
+
559
+ self.in_chans = in_chans
560
+ self.embed_dim = embed_dim
561
+
562
+ def forward(self, x, x_size):
563
+ B, HW, C = x.shape
564
+ x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
565
+ return x
566
+
567
+ def flops(self):
568
+ flops = 0
569
+ return flops
570
+
571
+
572
+ class Upsample(nn.Sequential):
573
+ """Upsample module.
574
+
575
+ Args:
576
+ scale (int): Scale factor. Supported scales: 2^n and 3.
577
+ num_feat (int): Channel number of intermediate features.
578
+ """
579
+
580
+ def __init__(self, scale, num_feat):
581
+ m = []
582
+ if (scale & (scale - 1)) == 0: # scale = 2^n
583
+ for _ in range(int(math.log(scale, 2))):
584
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
585
+ m.append(nn.PixelShuffle(2))
586
+ elif scale == 3:
587
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
588
+ m.append(nn.PixelShuffle(3))
589
+ else:
590
+ raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
591
+ super(Upsample, self).__init__(*m)
592
+
593
+
594
+ class UpsampleOneStep(nn.Sequential):
595
+ """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
596
+ Used in lightweight SR to save parameters.
597
+
598
+ Args:
599
+ scale (int): Scale factor. Supported scales: 2^n and 3.
600
+ num_feat (int): Channel number of intermediate features.
601
+
602
+ """
603
+
604
+ def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
605
+ self.num_feat = num_feat
606
+ self.input_resolution = input_resolution
607
+ m = []
608
+ m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
609
+ m.append(nn.PixelShuffle(scale))
610
+ super(UpsampleOneStep, self).__init__(*m)
611
+
612
+ def flops(self):
613
+ H, W = self.input_resolution
614
+ flops = H * W * self.num_feat * 3 * 9
615
+ return flops
616
+
617
+
618
+ class SwinIR(nn.Module):
619
+ r""" SwinIR
620
+ A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
621
+
622
+ Args:
623
+ img_size (int | tuple(int)): Input image size. Default 64
624
+ patch_size (int | tuple(int)): Patch size. Default: 1
625
+ in_chans (int): Number of input image channels. Default: 3
626
+ embed_dim (int): Patch embedding dimension. Default: 96
627
+ depths (tuple(int)): Depth of each Swin Transformer layer.
628
+ num_heads (tuple(int)): Number of attention heads in different layers.
629
+ window_size (int): Window size. Default: 7
630
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
631
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
632
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
633
+ drop_rate (float): Dropout rate. Default: 0
634
+ attn_drop_rate (float): Attention dropout rate. Default: 0
635
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
636
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
637
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
638
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
639
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
640
+ upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
641
+ img_range: Image range. 1. or 255.
642
+ upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
643
+ resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
644
+ """
645
+
646
+ def __init__(self, img_size=64, patch_size=1, in_chans=3,
647
+ embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
648
+ window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
649
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
650
+ norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
651
+ use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
652
+ **kwargs):
653
+ super(SwinIR, self).__init__()
654
+ num_in_ch = in_chans
655
+ num_out_ch = in_chans
656
+ num_feat = 64
657
+ self.img_range = img_range
658
+ if in_chans == 3:
659
+ rgb_mean = (0.4488, 0.4371, 0.4040)
660
+ self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
661
+ else:
662
+ self.mean = torch.zeros(1, 1, 1, 1)
663
+ self.upscale = upscale
664
+ self.upsampler = upsampler
665
+ self.window_size = window_size
666
+
667
+ #####################################################################################################
668
+ ################################### 1, shallow feature extraction ###################################
669
+ self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
670
+
671
+ #####################################################################################################
672
+ ################################### 2, deep feature extraction ######################################
673
+ self.num_layers = len(depths)
674
+ self.embed_dim = embed_dim
675
+ self.ape = ape
676
+ self.patch_norm = patch_norm
677
+ self.num_features = embed_dim
678
+ self.mlp_ratio = mlp_ratio
679
+
680
+ # split image into non-overlapping patches
681
+ self.patch_embed = PatchEmbed(
682
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
683
+ norm_layer=norm_layer if self.patch_norm else None)
684
+ num_patches = self.patch_embed.num_patches
685
+ patches_resolution = self.patch_embed.patches_resolution
686
+ self.patches_resolution = patches_resolution
687
+
688
+ # merge non-overlapping patches into image
689
+ self.patch_unembed = PatchUnEmbed(
690
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
691
+ norm_layer=norm_layer if self.patch_norm else None)
692
+
693
+ # absolute position embedding
694
+ if self.ape:
695
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
696
+ trunc_normal_(self.absolute_pos_embed, std=.02)
697
+
698
+ self.pos_drop = nn.Dropout(p=drop_rate)
699
+
700
+ # stochastic depth
701
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
702
+
703
+ # build Residual Swin Transformer blocks (RSTB)
704
+ self.layers = nn.ModuleList()
705
+ for i_layer in range(self.num_layers):
706
+ layer = RSTB(dim=embed_dim,
707
+ input_resolution=(patches_resolution[0],
708
+ patches_resolution[1]),
709
+ depth=depths[i_layer],
710
+ num_heads=num_heads[i_layer],
711
+ window_size=window_size,
712
+ mlp_ratio=self.mlp_ratio,
713
+ qkv_bias=qkv_bias, qk_scale=qk_scale,
714
+ drop=drop_rate, attn_drop=attn_drop_rate,
715
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
716
+ norm_layer=norm_layer,
717
+ downsample=None,
718
+ use_checkpoint=use_checkpoint,
719
+ img_size=img_size,
720
+ patch_size=patch_size,
721
+ resi_connection=resi_connection
722
+
723
+ )
724
+ self.layers.append(layer)
725
+ self.norm = norm_layer(self.num_features)
726
+
727
+ # build the last conv layer in deep feature extraction
728
+ if resi_connection == '1conv':
729
+ self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
730
+ elif resi_connection == '3conv':
731
+ # to save parameters and memory
732
+ self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
733
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
734
+ nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
735
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
736
+ nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
737
+
738
+ #####################################################################################################
739
+ ################################ 3, high quality image reconstruction ################################
740
+ if self.upsampler == 'pixelshuffle':
741
+ # for classical SR
742
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
743
+ nn.LeakyReLU(inplace=True))
744
+ self.upsample = Upsample(upscale, num_feat)
745
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
746
+ elif self.upsampler == 'pixelshuffledirect':
747
+ # for lightweight SR (to save parameters)
748
+ self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
749
+ (patches_resolution[0], patches_resolution[1]))
750
+ elif self.upsampler == 'nearest+conv':
751
+ # for real-world SR (less artifacts)
752
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
753
+ nn.LeakyReLU(inplace=True))
754
+ self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
755
+ if self.upscale == 4:
756
+ self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
757
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
758
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
759
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
760
+ else:
761
+ # for image denoising and JPEG compression artifact reduction
762
+ self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
763
+
764
+ self.apply(self._init_weights)
765
+
766
+ def _init_weights(self, m):
767
+ if isinstance(m, nn.Linear):
768
+ trunc_normal_(m.weight, std=.02)
769
+ if isinstance(m, nn.Linear) and m.bias is not None:
770
+ nn.init.constant_(m.bias, 0)
771
+ elif isinstance(m, nn.LayerNorm):
772
+ nn.init.constant_(m.bias, 0)
773
+ nn.init.constant_(m.weight, 1.0)
774
+
775
+ @torch.jit.ignore
776
+ def no_weight_decay(self):
777
+ return {'absolute_pos_embed'}
778
+
779
+ @torch.jit.ignore
780
+ def no_weight_decay_keywords(self):
781
+ return {'relative_position_bias_table'}
782
+
783
+ def check_image_size(self, x):
784
+ _, _, h, w = x.size()
785
+ mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
786
+ mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
787
+ x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
788
+ return x
789
+
790
+ def forward_features(self, x):
791
+ x_size = (x.shape[2], x.shape[3])
792
+ x = self.patch_embed(x)
793
+ if self.ape:
794
+ x = x + self.absolute_pos_embed
795
+ x = self.pos_drop(x)
796
+
797
+ for layer in self.layers:
798
+ x = layer(x, x_size)
799
+
800
+ x = self.norm(x) # B L C
801
+ x = self.patch_unembed(x, x_size)
802
+
803
+ return x
804
+
805
+ def forward(self, x):
806
+ H, W = x.shape[2:]
807
+ x = self.check_image_size(x)
808
+
809
+ self.mean = self.mean.type_as(x)
810
+ x = (x - self.mean) * self.img_range
811
+
812
+ if self.upsampler == 'pixelshuffle':
813
+ # for classical SR
814
+ x = self.conv_first(x)
815
+ x = self.conv_after_body(self.forward_features(x)) + x
816
+ x = self.conv_before_upsample(x)
817
+ x = self.conv_last(self.upsample(x))
818
+ elif self.upsampler == 'pixelshuffledirect':
819
+ # for lightweight SR
820
+ x = self.conv_first(x)
821
+ x = self.conv_after_body(self.forward_features(x)) + x
822
+ x = self.upsample(x)
823
+ elif self.upsampler == 'nearest+conv':
824
+ # for real-world SR
825
+ x = self.conv_first(x)
826
+ x = self.conv_after_body(self.forward_features(x)) + x
827
+ x = self.conv_before_upsample(x)
828
+ x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
829
+ if self.upscale == 4:
830
+ x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
831
+ x = self.conv_last(self.lrelu(self.conv_hr(x)))
832
+ else:
833
+ # for image denoising and JPEG compression artifact reduction
834
+ x_first = self.conv_first(x)
835
+ res = self.conv_after_body(self.forward_features(x_first)) + x_first
836
+ x = x + self.conv_last(res)
837
+
838
+ x = x / self.img_range + self.mean
839
+
840
+ return x[:, :, :H*self.upscale, :W*self.upscale]
841
+
842
+ def flops(self):
843
+ flops = 0
844
+ H, W = self.patches_resolution
845
+ flops += H * W * 3 * self.embed_dim * 9
846
+ flops += self.patch_embed.flops()
847
+ for layer in self.layers:
848
+ flops += layer.flops()
849
+ flops += H * W * 3 * self.embed_dim * self.embed_dim
850
+ flops += self.upsample.flops()
851
+ return flops
852
+
853
+
854
+ if __name__ == '__main__':
855
+ upscale = 4
856
+ window_size = 8
857
+ height = (1024 // upscale // window_size + 1) * window_size
858
+ width = (720 // upscale // window_size + 1) * window_size
859
+ model = SwinIR(upscale=2, img_size=(height, width),
860
+ window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
861
+ embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
862
+ print(model)
863
+ print(height, width, model.flops() / 1e9)
864
+
865
+ x = torch.randn((1, 3, height, width))
866
+ x = model(x)
867
+ print(x.shape)
extensions-builtin/SwinIR/swinir_model_arch_v2.py ADDED
@@ -0,0 +1,1017 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -----------------------------------------------------------------------------------
2
+ # Swin2SR: Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration, https://arxiv.org/abs/
3
+ # Written by Conde and Choi et al.
4
+ # -----------------------------------------------------------------------------------
5
+
6
+ import math
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ import torch.utils.checkpoint as checkpoint
12
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
13
+
14
+
15
+ class Mlp(nn.Module):
16
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
17
+ super().__init__()
18
+ out_features = out_features or in_features
19
+ hidden_features = hidden_features or in_features
20
+ self.fc1 = nn.Linear(in_features, hidden_features)
21
+ self.act = act_layer()
22
+ self.fc2 = nn.Linear(hidden_features, out_features)
23
+ self.drop = nn.Dropout(drop)
24
+
25
+ def forward(self, x):
26
+ x = self.fc1(x)
27
+ x = self.act(x)
28
+ x = self.drop(x)
29
+ x = self.fc2(x)
30
+ x = self.drop(x)
31
+ return x
32
+
33
+
34
+ def window_partition(x, window_size):
35
+ """
36
+ Args:
37
+ x: (B, H, W, C)
38
+ window_size (int): window size
39
+ Returns:
40
+ windows: (num_windows*B, window_size, window_size, C)
41
+ """
42
+ B, H, W, C = x.shape
43
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
44
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
45
+ return windows
46
+
47
+
48
+ def window_reverse(windows, window_size, H, W):
49
+ """
50
+ Args:
51
+ windows: (num_windows*B, window_size, window_size, C)
52
+ window_size (int): Window size
53
+ H (int): Height of image
54
+ W (int): Width of image
55
+ Returns:
56
+ x: (B, H, W, C)
57
+ """
58
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
59
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
60
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
61
+ return x
62
+
63
+ class WindowAttention(nn.Module):
64
+ r""" Window based multi-head self attention (W-MSA) module with relative position bias.
65
+ It supports both of shifted and non-shifted window.
66
+ Args:
67
+ dim (int): Number of input channels.
68
+ window_size (tuple[int]): The height and width of the window.
69
+ num_heads (int): Number of attention heads.
70
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
71
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
72
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
73
+ pretrained_window_size (tuple[int]): The height and width of the window in pre-training.
74
+ """
75
+
76
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
77
+ pretrained_window_size=(0, 0)):
78
+
79
+ super().__init__()
80
+ self.dim = dim
81
+ self.window_size = window_size # Wh, Ww
82
+ self.pretrained_window_size = pretrained_window_size
83
+ self.num_heads = num_heads
84
+
85
+ self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True)
86
+
87
+ # mlp to generate continuous relative position bias
88
+ self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True),
89
+ nn.ReLU(inplace=True),
90
+ nn.Linear(512, num_heads, bias=False))
91
+
92
+ # get relative_coords_table
93
+ relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32)
94
+ relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32)
95
+ relative_coords_table = torch.stack(
96
+ torch.meshgrid([relative_coords_h,
97
+ relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2
98
+ if pretrained_window_size[0] > 0:
99
+ relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1)
100
+ relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1)
101
+ else:
102
+ relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1)
103
+ relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1)
104
+ relative_coords_table *= 8 # normalize to -8, 8
105
+ relative_coords_table = torch.sign(relative_coords_table) * torch.log2(
106
+ torch.abs(relative_coords_table) + 1.0) / np.log2(8)
107
+
108
+ self.register_buffer("relative_coords_table", relative_coords_table)
109
+
110
+ # get pair-wise relative position index for each token inside the window
111
+ coords_h = torch.arange(self.window_size[0])
112
+ coords_w = torch.arange(self.window_size[1])
113
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
114
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
115
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
116
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
117
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
118
+ relative_coords[:, :, 1] += self.window_size[1] - 1
119
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
120
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
121
+ self.register_buffer("relative_position_index", relative_position_index)
122
+
123
+ self.qkv = nn.Linear(dim, dim * 3, bias=False)
124
+ if qkv_bias:
125
+ self.q_bias = nn.Parameter(torch.zeros(dim))
126
+ self.v_bias = nn.Parameter(torch.zeros(dim))
127
+ else:
128
+ self.q_bias = None
129
+ self.v_bias = None
130
+ self.attn_drop = nn.Dropout(attn_drop)
131
+ self.proj = nn.Linear(dim, dim)
132
+ self.proj_drop = nn.Dropout(proj_drop)
133
+ self.softmax = nn.Softmax(dim=-1)
134
+
135
+ def forward(self, x, mask=None):
136
+ """
137
+ Args:
138
+ x: input features with shape of (num_windows*B, N, C)
139
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
140
+ """
141
+ B_, N, C = x.shape
142
+ qkv_bias = None
143
+ if self.q_bias is not None:
144
+ qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
145
+ qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
146
+ qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
147
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
148
+
149
+ # cosine attention
150
+ attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1))
151
+ logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01)).to(self.logit_scale.device)).exp()
152
+ attn = attn * logit_scale
153
+
154
+ relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads)
155
+ relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view(
156
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
157
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
158
+ relative_position_bias = 16 * torch.sigmoid(relative_position_bias)
159
+ attn = attn + relative_position_bias.unsqueeze(0)
160
+
161
+ if mask is not None:
162
+ nW = mask.shape[0]
163
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
164
+ attn = attn.view(-1, self.num_heads, N, N)
165
+ attn = self.softmax(attn)
166
+ else:
167
+ attn = self.softmax(attn)
168
+
169
+ attn = self.attn_drop(attn)
170
+
171
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
172
+ x = self.proj(x)
173
+ x = self.proj_drop(x)
174
+ return x
175
+
176
+ def extra_repr(self) -> str:
177
+ return f'dim={self.dim}, window_size={self.window_size}, ' \
178
+ f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}'
179
+
180
+ def flops(self, N):
181
+ # calculate flops for 1 window with token length of N
182
+ flops = 0
183
+ # qkv = self.qkv(x)
184
+ flops += N * self.dim * 3 * self.dim
185
+ # attn = (q @ k.transpose(-2, -1))
186
+ flops += self.num_heads * N * (self.dim // self.num_heads) * N
187
+ # x = (attn @ v)
188
+ flops += self.num_heads * N * N * (self.dim // self.num_heads)
189
+ # x = self.proj(x)
190
+ flops += N * self.dim * self.dim
191
+ return flops
192
+
193
+ class SwinTransformerBlock(nn.Module):
194
+ r""" Swin Transformer Block.
195
+ Args:
196
+ dim (int): Number of input channels.
197
+ input_resolution (tuple[int]): Input resulotion.
198
+ num_heads (int): Number of attention heads.
199
+ window_size (int): Window size.
200
+ shift_size (int): Shift size for SW-MSA.
201
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
202
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
203
+ drop (float, optional): Dropout rate. Default: 0.0
204
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
205
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
206
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
207
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
208
+ pretrained_window_size (int): Window size in pre-training.
209
+ """
210
+
211
+ def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
212
+ mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0.,
213
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm, pretrained_window_size=0):
214
+ super().__init__()
215
+ self.dim = dim
216
+ self.input_resolution = input_resolution
217
+ self.num_heads = num_heads
218
+ self.window_size = window_size
219
+ self.shift_size = shift_size
220
+ self.mlp_ratio = mlp_ratio
221
+ if min(self.input_resolution) <= self.window_size:
222
+ # if window size is larger than input resolution, we don't partition windows
223
+ self.shift_size = 0
224
+ self.window_size = min(self.input_resolution)
225
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
226
+
227
+ self.norm1 = norm_layer(dim)
228
+ self.attn = WindowAttention(
229
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
230
+ qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop,
231
+ pretrained_window_size=to_2tuple(pretrained_window_size))
232
+
233
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
234
+ self.norm2 = norm_layer(dim)
235
+ mlp_hidden_dim = int(dim * mlp_ratio)
236
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
237
+
238
+ if self.shift_size > 0:
239
+ attn_mask = self.calculate_mask(self.input_resolution)
240
+ else:
241
+ attn_mask = None
242
+
243
+ self.register_buffer("attn_mask", attn_mask)
244
+
245
+ def calculate_mask(self, x_size):
246
+ # calculate attention mask for SW-MSA
247
+ H, W = x_size
248
+ img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
249
+ h_slices = (slice(0, -self.window_size),
250
+ slice(-self.window_size, -self.shift_size),
251
+ slice(-self.shift_size, None))
252
+ w_slices = (slice(0, -self.window_size),
253
+ slice(-self.window_size, -self.shift_size),
254
+ slice(-self.shift_size, None))
255
+ cnt = 0
256
+ for h in h_slices:
257
+ for w in w_slices:
258
+ img_mask[:, h, w, :] = cnt
259
+ cnt += 1
260
+
261
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
262
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
263
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
264
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
265
+
266
+ return attn_mask
267
+
268
+ def forward(self, x, x_size):
269
+ H, W = x_size
270
+ B, L, C = x.shape
271
+ #assert L == H * W, "input feature has wrong size"
272
+
273
+ shortcut = x
274
+ x = x.view(B, H, W, C)
275
+
276
+ # cyclic shift
277
+ if self.shift_size > 0:
278
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
279
+ else:
280
+ shifted_x = x
281
+
282
+ # partition windows
283
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
284
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
285
+
286
+ # W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
287
+ if self.input_resolution == x_size:
288
+ attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
289
+ else:
290
+ attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
291
+
292
+ # merge windows
293
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
294
+ shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
295
+
296
+ # reverse cyclic shift
297
+ if self.shift_size > 0:
298
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
299
+ else:
300
+ x = shifted_x
301
+ x = x.view(B, H * W, C)
302
+ x = shortcut + self.drop_path(self.norm1(x))
303
+
304
+ # FFN
305
+ x = x + self.drop_path(self.norm2(self.mlp(x)))
306
+
307
+ return x
308
+
309
+ def extra_repr(self) -> str:
310
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
311
+ f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
312
+
313
+ def flops(self):
314
+ flops = 0
315
+ H, W = self.input_resolution
316
+ # norm1
317
+ flops += self.dim * H * W
318
+ # W-MSA/SW-MSA
319
+ nW = H * W / self.window_size / self.window_size
320
+ flops += nW * self.attn.flops(self.window_size * self.window_size)
321
+ # mlp
322
+ flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
323
+ # norm2
324
+ flops += self.dim * H * W
325
+ return flops
326
+
327
+ class PatchMerging(nn.Module):
328
+ r""" Patch Merging Layer.
329
+ Args:
330
+ input_resolution (tuple[int]): Resolution of input feature.
331
+ dim (int): Number of input channels.
332
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
333
+ """
334
+
335
+ def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
336
+ super().__init__()
337
+ self.input_resolution = input_resolution
338
+ self.dim = dim
339
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
340
+ self.norm = norm_layer(2 * dim)
341
+
342
+ def forward(self, x):
343
+ """
344
+ x: B, H*W, C
345
+ """
346
+ H, W = self.input_resolution
347
+ B, L, C = x.shape
348
+ assert L == H * W, "input feature has wrong size"
349
+ assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
350
+
351
+ x = x.view(B, H, W, C)
352
+
353
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
354
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
355
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
356
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
357
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
358
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
359
+
360
+ x = self.reduction(x)
361
+ x = self.norm(x)
362
+
363
+ return x
364
+
365
+ def extra_repr(self) -> str:
366
+ return f"input_resolution={self.input_resolution}, dim={self.dim}"
367
+
368
+ def flops(self):
369
+ H, W = self.input_resolution
370
+ flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
371
+ flops += H * W * self.dim // 2
372
+ return flops
373
+
374
+ class BasicLayer(nn.Module):
375
+ """ A basic Swin Transformer layer for one stage.
376
+ Args:
377
+ dim (int): Number of input channels.
378
+ input_resolution (tuple[int]): Input resolution.
379
+ depth (int): Number of blocks.
380
+ num_heads (int): Number of attention heads.
381
+ window_size (int): Local window size.
382
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
383
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
384
+ drop (float, optional): Dropout rate. Default: 0.0
385
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
386
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
387
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
388
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
389
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
390
+ pretrained_window_size (int): Local window size in pre-training.
391
+ """
392
+
393
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
394
+ mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
395
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
396
+ pretrained_window_size=0):
397
+
398
+ super().__init__()
399
+ self.dim = dim
400
+ self.input_resolution = input_resolution
401
+ self.depth = depth
402
+ self.use_checkpoint = use_checkpoint
403
+
404
+ # build blocks
405
+ self.blocks = nn.ModuleList([
406
+ SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
407
+ num_heads=num_heads, window_size=window_size,
408
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
409
+ mlp_ratio=mlp_ratio,
410
+ qkv_bias=qkv_bias,
411
+ drop=drop, attn_drop=attn_drop,
412
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
413
+ norm_layer=norm_layer,
414
+ pretrained_window_size=pretrained_window_size)
415
+ for i in range(depth)])
416
+
417
+ # patch merging layer
418
+ if downsample is not None:
419
+ self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
420
+ else:
421
+ self.downsample = None
422
+
423
+ def forward(self, x, x_size):
424
+ for blk in self.blocks:
425
+ if self.use_checkpoint:
426
+ x = checkpoint.checkpoint(blk, x, x_size)
427
+ else:
428
+ x = blk(x, x_size)
429
+ if self.downsample is not None:
430
+ x = self.downsample(x)
431
+ return x
432
+
433
+ def extra_repr(self) -> str:
434
+ return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
435
+
436
+ def flops(self):
437
+ flops = 0
438
+ for blk in self.blocks:
439
+ flops += blk.flops()
440
+ if self.downsample is not None:
441
+ flops += self.downsample.flops()
442
+ return flops
443
+
444
+ def _init_respostnorm(self):
445
+ for blk in self.blocks:
446
+ nn.init.constant_(blk.norm1.bias, 0)
447
+ nn.init.constant_(blk.norm1.weight, 0)
448
+ nn.init.constant_(blk.norm2.bias, 0)
449
+ nn.init.constant_(blk.norm2.weight, 0)
450
+
451
+ class PatchEmbed(nn.Module):
452
+ r""" Image to Patch Embedding
453
+ Args:
454
+ img_size (int): Image size. Default: 224.
455
+ patch_size (int): Patch token size. Default: 4.
456
+ in_chans (int): Number of input image channels. Default: 3.
457
+ embed_dim (int): Number of linear projection output channels. Default: 96.
458
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
459
+ """
460
+
461
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
462
+ super().__init__()
463
+ img_size = to_2tuple(img_size)
464
+ patch_size = to_2tuple(patch_size)
465
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
466
+ self.img_size = img_size
467
+ self.patch_size = patch_size
468
+ self.patches_resolution = patches_resolution
469
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
470
+
471
+ self.in_chans = in_chans
472
+ self.embed_dim = embed_dim
473
+
474
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
475
+ if norm_layer is not None:
476
+ self.norm = norm_layer(embed_dim)
477
+ else:
478
+ self.norm = None
479
+
480
+ def forward(self, x):
481
+ B, C, H, W = x.shape
482
+ # FIXME look at relaxing size constraints
483
+ # assert H == self.img_size[0] and W == self.img_size[1],
484
+ # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
485
+ x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
486
+ if self.norm is not None:
487
+ x = self.norm(x)
488
+ return x
489
+
490
+ def flops(self):
491
+ Ho, Wo = self.patches_resolution
492
+ flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
493
+ if self.norm is not None:
494
+ flops += Ho * Wo * self.embed_dim
495
+ return flops
496
+
497
+ class RSTB(nn.Module):
498
+ """Residual Swin Transformer Block (RSTB).
499
+
500
+ Args:
501
+ dim (int): Number of input channels.
502
+ input_resolution (tuple[int]): Input resolution.
503
+ depth (int): Number of blocks.
504
+ num_heads (int): Number of attention heads.
505
+ window_size (int): Local window size.
506
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
507
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
508
+ drop (float, optional): Dropout rate. Default: 0.0
509
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
510
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
511
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
512
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
513
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
514
+ img_size: Input image size.
515
+ patch_size: Patch size.
516
+ resi_connection: The convolutional block before residual connection.
517
+ """
518
+
519
+ def __init__(self, dim, input_resolution, depth, num_heads, window_size,
520
+ mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0.,
521
+ drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
522
+ img_size=224, patch_size=4, resi_connection='1conv'):
523
+ super(RSTB, self).__init__()
524
+
525
+ self.dim = dim
526
+ self.input_resolution = input_resolution
527
+
528
+ self.residual_group = BasicLayer(dim=dim,
529
+ input_resolution=input_resolution,
530
+ depth=depth,
531
+ num_heads=num_heads,
532
+ window_size=window_size,
533
+ mlp_ratio=mlp_ratio,
534
+ qkv_bias=qkv_bias,
535
+ drop=drop, attn_drop=attn_drop,
536
+ drop_path=drop_path,
537
+ norm_layer=norm_layer,
538
+ downsample=downsample,
539
+ use_checkpoint=use_checkpoint)
540
+
541
+ if resi_connection == '1conv':
542
+ self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
543
+ elif resi_connection == '3conv':
544
+ # to save parameters and memory
545
+ self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
546
+ nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
547
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
548
+ nn.Conv2d(dim // 4, dim, 3, 1, 1))
549
+
550
+ self.patch_embed = PatchEmbed(
551
+ img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim,
552
+ norm_layer=None)
553
+
554
+ self.patch_unembed = PatchUnEmbed(
555
+ img_size=img_size, patch_size=patch_size, in_chans=dim, embed_dim=dim,
556
+ norm_layer=None)
557
+
558
+ def forward(self, x, x_size):
559
+ return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
560
+
561
+ def flops(self):
562
+ flops = 0
563
+ flops += self.residual_group.flops()
564
+ H, W = self.input_resolution
565
+ flops += H * W * self.dim * self.dim * 9
566
+ flops += self.patch_embed.flops()
567
+ flops += self.patch_unembed.flops()
568
+
569
+ return flops
570
+
571
+ class PatchUnEmbed(nn.Module):
572
+ r""" Image to Patch Unembedding
573
+
574
+ Args:
575
+ img_size (int): Image size. Default: 224.
576
+ patch_size (int): Patch token size. Default: 4.
577
+ in_chans (int): Number of input image channels. Default: 3.
578
+ embed_dim (int): Number of linear projection output channels. Default: 96.
579
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
580
+ """
581
+
582
+ def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
583
+ super().__init__()
584
+ img_size = to_2tuple(img_size)
585
+ patch_size = to_2tuple(patch_size)
586
+ patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
587
+ self.img_size = img_size
588
+ self.patch_size = patch_size
589
+ self.patches_resolution = patches_resolution
590
+ self.num_patches = patches_resolution[0] * patches_resolution[1]
591
+
592
+ self.in_chans = in_chans
593
+ self.embed_dim = embed_dim
594
+
595
+ def forward(self, x, x_size):
596
+ B, HW, C = x.shape
597
+ x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
598
+ return x
599
+
600
+ def flops(self):
601
+ flops = 0
602
+ return flops
603
+
604
+
605
+ class Upsample(nn.Sequential):
606
+ """Upsample module.
607
+
608
+ Args:
609
+ scale (int): Scale factor. Supported scales: 2^n and 3.
610
+ num_feat (int): Channel number of intermediate features.
611
+ """
612
+
613
+ def __init__(self, scale, num_feat):
614
+ m = []
615
+ if (scale & (scale - 1)) == 0: # scale = 2^n
616
+ for _ in range(int(math.log(scale, 2))):
617
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
618
+ m.append(nn.PixelShuffle(2))
619
+ elif scale == 3:
620
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
621
+ m.append(nn.PixelShuffle(3))
622
+ else:
623
+ raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
624
+ super(Upsample, self).__init__(*m)
625
+
626
+ class Upsample_hf(nn.Sequential):
627
+ """Upsample module.
628
+
629
+ Args:
630
+ scale (int): Scale factor. Supported scales: 2^n and 3.
631
+ num_feat (int): Channel number of intermediate features.
632
+ """
633
+
634
+ def __init__(self, scale, num_feat):
635
+ m = []
636
+ if (scale & (scale - 1)) == 0: # scale = 2^n
637
+ for _ in range(int(math.log(scale, 2))):
638
+ m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
639
+ m.append(nn.PixelShuffle(2))
640
+ elif scale == 3:
641
+ m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
642
+ m.append(nn.PixelShuffle(3))
643
+ else:
644
+ raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
645
+ super(Upsample_hf, self).__init__(*m)
646
+
647
+
648
+ class UpsampleOneStep(nn.Sequential):
649
+ """UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
650
+ Used in lightweight SR to save parameters.
651
+
652
+ Args:
653
+ scale (int): Scale factor. Supported scales: 2^n and 3.
654
+ num_feat (int): Channel number of intermediate features.
655
+
656
+ """
657
+
658
+ def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
659
+ self.num_feat = num_feat
660
+ self.input_resolution = input_resolution
661
+ m = []
662
+ m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
663
+ m.append(nn.PixelShuffle(scale))
664
+ super(UpsampleOneStep, self).__init__(*m)
665
+
666
+ def flops(self):
667
+ H, W = self.input_resolution
668
+ flops = H * W * self.num_feat * 3 * 9
669
+ return flops
670
+
671
+
672
+
673
+ class Swin2SR(nn.Module):
674
+ r""" Swin2SR
675
+ A PyTorch impl of : `Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration`.
676
+
677
+ Args:
678
+ img_size (int | tuple(int)): Input image size. Default 64
679
+ patch_size (int | tuple(int)): Patch size. Default: 1
680
+ in_chans (int): Number of input image channels. Default: 3
681
+ embed_dim (int): Patch embedding dimension. Default: 96
682
+ depths (tuple(int)): Depth of each Swin Transformer layer.
683
+ num_heads (tuple(int)): Number of attention heads in different layers.
684
+ window_size (int): Window size. Default: 7
685
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
686
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
687
+ drop_rate (float): Dropout rate. Default: 0
688
+ attn_drop_rate (float): Attention dropout rate. Default: 0
689
+ drop_path_rate (float): Stochastic depth rate. Default: 0.1
690
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
691
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
692
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True
693
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
694
+ upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
695
+ img_range: Image range. 1. or 255.
696
+ upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
697
+ resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
698
+ """
699
+
700
+ def __init__(self, img_size=64, patch_size=1, in_chans=3,
701
+ embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
702
+ window_size=7, mlp_ratio=4., qkv_bias=True,
703
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
704
+ norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
705
+ use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
706
+ **kwargs):
707
+ super(Swin2SR, self).__init__()
708
+ num_in_ch = in_chans
709
+ num_out_ch = in_chans
710
+ num_feat = 64
711
+ self.img_range = img_range
712
+ if in_chans == 3:
713
+ rgb_mean = (0.4488, 0.4371, 0.4040)
714
+ self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
715
+ else:
716
+ self.mean = torch.zeros(1, 1, 1, 1)
717
+ self.upscale = upscale
718
+ self.upsampler = upsampler
719
+ self.window_size = window_size
720
+
721
+ #####################################################################################################
722
+ ################################### 1, shallow feature extraction ###################################
723
+ self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1)
724
+
725
+ #####################################################################################################
726
+ ################################### 2, deep feature extraction ######################################
727
+ self.num_layers = len(depths)
728
+ self.embed_dim = embed_dim
729
+ self.ape = ape
730
+ self.patch_norm = patch_norm
731
+ self.num_features = embed_dim
732
+ self.mlp_ratio = mlp_ratio
733
+
734
+ # split image into non-overlapping patches
735
+ self.patch_embed = PatchEmbed(
736
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
737
+ norm_layer=norm_layer if self.patch_norm else None)
738
+ num_patches = self.patch_embed.num_patches
739
+ patches_resolution = self.patch_embed.patches_resolution
740
+ self.patches_resolution = patches_resolution
741
+
742
+ # merge non-overlapping patches into image
743
+ self.patch_unembed = PatchUnEmbed(
744
+ img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
745
+ norm_layer=norm_layer if self.patch_norm else None)
746
+
747
+ # absolute position embedding
748
+ if self.ape:
749
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
750
+ trunc_normal_(self.absolute_pos_embed, std=.02)
751
+
752
+ self.pos_drop = nn.Dropout(p=drop_rate)
753
+
754
+ # stochastic depth
755
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
756
+
757
+ # build Residual Swin Transformer blocks (RSTB)
758
+ self.layers = nn.ModuleList()
759
+ for i_layer in range(self.num_layers):
760
+ layer = RSTB(dim=embed_dim,
761
+ input_resolution=(patches_resolution[0],
762
+ patches_resolution[1]),
763
+ depth=depths[i_layer],
764
+ num_heads=num_heads[i_layer],
765
+ window_size=window_size,
766
+ mlp_ratio=self.mlp_ratio,
767
+ qkv_bias=qkv_bias,
768
+ drop=drop_rate, attn_drop=attn_drop_rate,
769
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
770
+ norm_layer=norm_layer,
771
+ downsample=None,
772
+ use_checkpoint=use_checkpoint,
773
+ img_size=img_size,
774
+ patch_size=patch_size,
775
+ resi_connection=resi_connection
776
+
777
+ )
778
+ self.layers.append(layer)
779
+
780
+ if self.upsampler == 'pixelshuffle_hf':
781
+ self.layers_hf = nn.ModuleList()
782
+ for i_layer in range(self.num_layers):
783
+ layer = RSTB(dim=embed_dim,
784
+ input_resolution=(patches_resolution[0],
785
+ patches_resolution[1]),
786
+ depth=depths[i_layer],
787
+ num_heads=num_heads[i_layer],
788
+ window_size=window_size,
789
+ mlp_ratio=self.mlp_ratio,
790
+ qkv_bias=qkv_bias,
791
+ drop=drop_rate, attn_drop=attn_drop_rate,
792
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
793
+ norm_layer=norm_layer,
794
+ downsample=None,
795
+ use_checkpoint=use_checkpoint,
796
+ img_size=img_size,
797
+ patch_size=patch_size,
798
+ resi_connection=resi_connection
799
+
800
+ )
801
+ self.layers_hf.append(layer)
802
+
803
+ self.norm = norm_layer(self.num_features)
804
+
805
+ # build the last conv layer in deep feature extraction
806
+ if resi_connection == '1conv':
807
+ self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
808
+ elif resi_connection == '3conv':
809
+ # to save parameters and memory
810
+ self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
811
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
812
+ nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
813
+ nn.LeakyReLU(negative_slope=0.2, inplace=True),
814
+ nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
815
+
816
+ #####################################################################################################
817
+ ################################ 3, high quality image reconstruction ################################
818
+ if self.upsampler == 'pixelshuffle':
819
+ # for classical SR
820
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
821
+ nn.LeakyReLU(inplace=True))
822
+ self.upsample = Upsample(upscale, num_feat)
823
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
824
+ elif self.upsampler == 'pixelshuffle_aux':
825
+ self.conv_bicubic = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
826
+ self.conv_before_upsample = nn.Sequential(
827
+ nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
828
+ nn.LeakyReLU(inplace=True))
829
+ self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
830
+ self.conv_after_aux = nn.Sequential(
831
+ nn.Conv2d(3, num_feat, 3, 1, 1),
832
+ nn.LeakyReLU(inplace=True))
833
+ self.upsample = Upsample(upscale, num_feat)
834
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
835
+
836
+ elif self.upsampler == 'pixelshuffle_hf':
837
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
838
+ nn.LeakyReLU(inplace=True))
839
+ self.upsample = Upsample(upscale, num_feat)
840
+ self.upsample_hf = Upsample_hf(upscale, num_feat)
841
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
842
+ self.conv_first_hf = nn.Sequential(nn.Conv2d(num_feat, embed_dim, 3, 1, 1),
843
+ nn.LeakyReLU(inplace=True))
844
+ self.conv_after_body_hf = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
845
+ self.conv_before_upsample_hf = nn.Sequential(
846
+ nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
847
+ nn.LeakyReLU(inplace=True))
848
+ self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
849
+
850
+ elif self.upsampler == 'pixelshuffledirect':
851
+ # for lightweight SR (to save parameters)
852
+ self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
853
+ (patches_resolution[0], patches_resolution[1]))
854
+ elif self.upsampler == 'nearest+conv':
855
+ # for real-world SR (less artifacts)
856
+ assert self.upscale == 4, 'only support x4 now.'
857
+ self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
858
+ nn.LeakyReLU(inplace=True))
859
+ self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
860
+ self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
861
+ self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
862
+ self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
863
+ self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
864
+ else:
865
+ # for image denoising and JPEG compression artifact reduction
866
+ self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
867
+
868
+ self.apply(self._init_weights)
869
+
870
+ def _init_weights(self, m):
871
+ if isinstance(m, nn.Linear):
872
+ trunc_normal_(m.weight, std=.02)
873
+ if isinstance(m, nn.Linear) and m.bias is not None:
874
+ nn.init.constant_(m.bias, 0)
875
+ elif isinstance(m, nn.LayerNorm):
876
+ nn.init.constant_(m.bias, 0)
877
+ nn.init.constant_(m.weight, 1.0)
878
+
879
+ @torch.jit.ignore
880
+ def no_weight_decay(self):
881
+ return {'absolute_pos_embed'}
882
+
883
+ @torch.jit.ignore
884
+ def no_weight_decay_keywords(self):
885
+ return {'relative_position_bias_table'}
886
+
887
+ def check_image_size(self, x):
888
+ _, _, h, w = x.size()
889
+ mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
890
+ mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
891
+ x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
892
+ return x
893
+
894
+ def forward_features(self, x):
895
+ x_size = (x.shape[2], x.shape[3])
896
+ x = self.patch_embed(x)
897
+ if self.ape:
898
+ x = x + self.absolute_pos_embed
899
+ x = self.pos_drop(x)
900
+
901
+ for layer in self.layers:
902
+ x = layer(x, x_size)
903
+
904
+ x = self.norm(x) # B L C
905
+ x = self.patch_unembed(x, x_size)
906
+
907
+ return x
908
+
909
+ def forward_features_hf(self, x):
910
+ x_size = (x.shape[2], x.shape[3])
911
+ x = self.patch_embed(x)
912
+ if self.ape:
913
+ x = x + self.absolute_pos_embed
914
+ x = self.pos_drop(x)
915
+
916
+ for layer in self.layers_hf:
917
+ x = layer(x, x_size)
918
+
919
+ x = self.norm(x) # B L C
920
+ x = self.patch_unembed(x, x_size)
921
+
922
+ return x
923
+
924
+ def forward(self, x):
925
+ H, W = x.shape[2:]
926
+ x = self.check_image_size(x)
927
+
928
+ self.mean = self.mean.type_as(x)
929
+ x = (x - self.mean) * self.img_range
930
+
931
+ if self.upsampler == 'pixelshuffle':
932
+ # for classical SR
933
+ x = self.conv_first(x)
934
+ x = self.conv_after_body(self.forward_features(x)) + x
935
+ x = self.conv_before_upsample(x)
936
+ x = self.conv_last(self.upsample(x))
937
+ elif self.upsampler == 'pixelshuffle_aux':
938
+ bicubic = F.interpolate(x, size=(H * self.upscale, W * self.upscale), mode='bicubic', align_corners=False)
939
+ bicubic = self.conv_bicubic(bicubic)
940
+ x = self.conv_first(x)
941
+ x = self.conv_after_body(self.forward_features(x)) + x
942
+ x = self.conv_before_upsample(x)
943
+ aux = self.conv_aux(x) # b, 3, LR_H, LR_W
944
+ x = self.conv_after_aux(aux)
945
+ x = self.upsample(x)[:, :, :H * self.upscale, :W * self.upscale] + bicubic[:, :, :H * self.upscale, :W * self.upscale]
946
+ x = self.conv_last(x)
947
+ aux = aux / self.img_range + self.mean
948
+ elif self.upsampler == 'pixelshuffle_hf':
949
+ # for classical SR with HF
950
+ x = self.conv_first(x)
951
+ x = self.conv_after_body(self.forward_features(x)) + x
952
+ x_before = self.conv_before_upsample(x)
953
+ x_out = self.conv_last(self.upsample(x_before))
954
+
955
+ x_hf = self.conv_first_hf(x_before)
956
+ x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf
957
+ x_hf = self.conv_before_upsample_hf(x_hf)
958
+ x_hf = self.conv_last_hf(self.upsample_hf(x_hf))
959
+ x = x_out + x_hf
960
+ x_hf = x_hf / self.img_range + self.mean
961
+
962
+ elif self.upsampler == 'pixelshuffledirect':
963
+ # for lightweight SR
964
+ x = self.conv_first(x)
965
+ x = self.conv_after_body(self.forward_features(x)) + x
966
+ x = self.upsample(x)
967
+ elif self.upsampler == 'nearest+conv':
968
+ # for real-world SR
969
+ x = self.conv_first(x)
970
+ x = self.conv_after_body(self.forward_features(x)) + x
971
+ x = self.conv_before_upsample(x)
972
+ x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
973
+ x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
974
+ x = self.conv_last(self.lrelu(self.conv_hr(x)))
975
+ else:
976
+ # for image denoising and JPEG compression artifact reduction
977
+ x_first = self.conv_first(x)
978
+ res = self.conv_after_body(self.forward_features(x_first)) + x_first
979
+ x = x + self.conv_last(res)
980
+
981
+ x = x / self.img_range + self.mean
982
+ if self.upsampler == "pixelshuffle_aux":
983
+ return x[:, :, :H*self.upscale, :W*self.upscale], aux
984
+
985
+ elif self.upsampler == "pixelshuffle_hf":
986
+ x_out = x_out / self.img_range + self.mean
987
+ return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale]
988
+
989
+ else:
990
+ return x[:, :, :H*self.upscale, :W*self.upscale]
991
+
992
+ def flops(self):
993
+ flops = 0
994
+ H, W = self.patches_resolution
995
+ flops += H * W * 3 * self.embed_dim * 9
996
+ flops += self.patch_embed.flops()
997
+ for layer in self.layers:
998
+ flops += layer.flops()
999
+ flops += H * W * 3 * self.embed_dim * self.embed_dim
1000
+ flops += self.upsample.flops()
1001
+ return flops
1002
+
1003
+
1004
+ if __name__ == '__main__':
1005
+ upscale = 4
1006
+ window_size = 8
1007
+ height = (1024 // upscale // window_size + 1) * window_size
1008
+ width = (720 // upscale // window_size + 1) * window_size
1009
+ model = Swin2SR(upscale=2, img_size=(height, width),
1010
+ window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
1011
+ embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
1012
+ print(model)
1013
+ print(height, width, model.flops() / 1e9)
1014
+
1015
+ x = torch.randn((1, 3, height, width))
1016
+ x = model(x)
1017
+ print(x.shape)
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js ADDED
@@ -0,0 +1,640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ onUiLoaded(async() => {
2
+ const elementIDs = {
3
+ img2imgTabs: "#mode_img2img .tab-nav",
4
+ inpaint: "#img2maskimg",
5
+ inpaintSketch: "#inpaint_sketch",
6
+ rangeGroup: "#img2img_column_size",
7
+ sketch: "#img2img_sketch",
8
+ };
9
+ const tabNameToElementId = {
10
+ "Inpaint sketch": elementIDs.inpaintSketch,
11
+ "Inpaint": elementIDs.inpaint,
12
+ "Sketch": elementIDs.sketch,
13
+ };
14
+
15
+ // Helper functions
16
+ // Get active tab
17
+ function getActiveTab(elements, all = false) {
18
+ const tabs = elements.img2imgTabs.querySelectorAll("button");
19
+
20
+ if (all) return tabs;
21
+
22
+ for (let tab of tabs) {
23
+ if (tab.classList.contains("selected")) {
24
+ return tab;
25
+ }
26
+ }
27
+ }
28
+
29
+ // Get tab ID
30
+ function getTabId(elements) {
31
+ const activeTab = getActiveTab(elements);
32
+ return tabNameToElementId[activeTab.innerText];
33
+ }
34
+
35
+ // Wait until opts loaded
36
+ async function waitForOpts() {
37
+ for (;;) {
38
+ if (window.opts && Object.keys(window.opts).length) {
39
+ return window.opts;
40
+ }
41
+ await new Promise(resolve => setTimeout(resolve, 100));
42
+ }
43
+ }
44
+
45
+ // Check is hotkey valid
46
+ function isSingleLetter(value) {
47
+ return (
48
+ typeof value === "string" && value.length === 1 && /[a-z]/i.test(value)
49
+ );
50
+ }
51
+
52
+ // Create hotkeyConfig from opts
53
+ function createHotkeyConfig(defaultHotkeysConfig, hotkeysConfigOpts) {
54
+ const result = {};
55
+ const usedKeys = new Set();
56
+
57
+ for (const key in defaultHotkeysConfig) {
58
+ if (typeof hotkeysConfigOpts[key] === "boolean") {
59
+ result[key] = hotkeysConfigOpts[key];
60
+ continue;
61
+ }
62
+ if (
63
+ hotkeysConfigOpts[key] &&
64
+ isSingleLetter(hotkeysConfigOpts[key]) &&
65
+ !usedKeys.has(hotkeysConfigOpts[key].toUpperCase())
66
+ ) {
67
+ // If the property passed the test and has not yet been used, add 'Key' before it and save it
68
+ result[key] = "Key" + hotkeysConfigOpts[key].toUpperCase();
69
+ usedKeys.add(hotkeysConfigOpts[key].toUpperCase());
70
+ } else {
71
+ // If the property does not pass the test or has already been used, we keep the default value
72
+ console.error(
73
+ `Hotkey: ${hotkeysConfigOpts[key]} for ${key} is repeated and conflicts with another hotkey or is not 1 letter. The default hotkey is used: ${defaultHotkeysConfig[key][3]}`
74
+ );
75
+ result[key] = defaultHotkeysConfig[key];
76
+ }
77
+ }
78
+
79
+ return result;
80
+ }
81
+
82
+ /**
83
+ * The restoreImgRedMask function displays a red mask around an image to indicate the aspect ratio.
84
+ * If the image display property is set to 'none', the mask breaks. To fix this, the function
85
+ * temporarily sets the display property to 'block' and then hides the mask again after 300 milliseconds
86
+ * to avoid breaking the canvas. Additionally, the function adjusts the mask to work correctly on
87
+ * very long images.
88
+ */
89
+ function restoreImgRedMask(elements) {
90
+ const mainTabId = getTabId(elements);
91
+
92
+ if (!mainTabId) return;
93
+
94
+ const mainTab = gradioApp().querySelector(mainTabId);
95
+ const img = mainTab.querySelector("img");
96
+ const imageARPreview = gradioApp().querySelector("#imageARPreview");
97
+
98
+ if (!img || !imageARPreview) return;
99
+
100
+ imageARPreview.style.transform = "";
101
+ if (parseFloat(mainTab.style.width) > 865) {
102
+ const transformString = mainTab.style.transform;
103
+ const scaleMatch = transformString.match(/scale\(([-+]?[0-9]*\.?[0-9]+)\)/);
104
+ let zoom = 1; // default zoom
105
+
106
+ if (scaleMatch && scaleMatch[1]) {
107
+ zoom = Number(scaleMatch[1]);
108
+ }
109
+
110
+ imageARPreview.style.transformOrigin = "0 0";
111
+ imageARPreview.style.transform = `scale(${zoom})`;
112
+ }
113
+
114
+ if (img.style.display !== "none") return;
115
+
116
+ img.style.display = "block";
117
+
118
+ setTimeout(() => {
119
+ img.style.display = "none";
120
+ }, 400);
121
+ }
122
+
123
+ const hotkeysConfigOpts = await waitForOpts();
124
+
125
+ // Default config
126
+ const defaultHotkeysConfig = {
127
+ canvas_hotkey_reset: "KeyR",
128
+ canvas_hotkey_fullscreen: "KeyS",
129
+ canvas_hotkey_move: "KeyF",
130
+ canvas_hotkey_overlap: "KeyO",
131
+ canvas_show_tooltip: true,
132
+ canvas_swap_controls: false
133
+ };
134
+ // swap the actions for ctr + wheel and shift + wheel
135
+ const hotkeysConfig = createHotkeyConfig(
136
+ defaultHotkeysConfig,
137
+ hotkeysConfigOpts
138
+ );
139
+
140
+ let isMoving = false;
141
+ let mouseX, mouseY;
142
+ let activeElement;
143
+
144
+ const elements = Object.fromEntries(Object.keys(elementIDs).map((id) => [
145
+ id,
146
+ gradioApp().querySelector(elementIDs[id]),
147
+ ]));
148
+ const elemData = {};
149
+
150
+ // Apply functionality to the range inputs. Restore redmask and correct for long images.
151
+ const rangeInputs = elements.rangeGroup ? Array.from(elements.rangeGroup.querySelectorAll("input")) :
152
+ [
153
+ gradioApp().querySelector("#img2img_width input[type='range']"),
154
+ gradioApp().querySelector("#img2img_height input[type='range']")
155
+ ];
156
+
157
+ for (const input of rangeInputs) {
158
+ input?.addEventListener("input", () => restoreImgRedMask(elements));
159
+ }
160
+
161
+ function applyZoomAndPan(elemId) {
162
+ const targetElement = gradioApp().querySelector(elemId);
163
+
164
+ if (!targetElement) {
165
+ console.log("Element not found");
166
+ return;
167
+ }
168
+
169
+ targetElement.style.transformOrigin = "0 0";
170
+
171
+ elemData[elemId] = {
172
+ zoom: 1,
173
+ panX: 0,
174
+ panY: 0
175
+ };
176
+ let fullScreenMode = false;
177
+
178
+ // Create tooltip
179
+ function createTooltip() {
180
+ const toolTipElemnt =
181
+ targetElement.querySelector(".image-container");
182
+ const tooltip = document.createElement("div");
183
+ tooltip.className = "tooltip";
184
+
185
+ // Creating an item of information
186
+ const info = document.createElement("i");
187
+ info.className = "tooltip-info";
188
+ info.textContent = "";
189
+
190
+ // Create a container for the contents of the tooltip
191
+ const tooltipContent = document.createElement("div");
192
+ tooltipContent.className = "tooltip-content";
193
+
194
+ // Add info about hotkeys
195
+ const zoomKey = hotkeysConfig.canvas_swap_controls ? "Ctrl" : "Shift";
196
+ const adjustKey = hotkeysConfig.canvas_swap_controls ? "Shift" : "Ctrl";
197
+
198
+ const hotkeys = [
199
+ {key: `${zoomKey} + wheel`, action: "Zoom canvas"},
200
+ {key: `${adjustKey} + wheel`, action: "Adjust brush size"},
201
+ {
202
+ key: hotkeysConfig.canvas_hotkey_reset.charAt(hotkeysConfig.canvas_hotkey_reset.length - 1),
203
+ action: "Reset zoom"
204
+ },
205
+ {
206
+ key: hotkeysConfig.canvas_hotkey_fullscreen.charAt(hotkeysConfig.canvas_hotkey_fullscreen.length - 1),
207
+ action: "Fullscreen mode"
208
+ },
209
+ {
210
+ key: hotkeysConfig.canvas_hotkey_move.charAt(hotkeysConfig.canvas_hotkey_move.length - 1),
211
+ action: "Move canvas"
212
+ }
213
+ ];
214
+ for (const hotkey of hotkeys) {
215
+ const p = document.createElement("p");
216
+ p.innerHTML = `<b>${hotkey.key}</b> - ${hotkey.action}`;
217
+ tooltipContent.appendChild(p);
218
+ }
219
+
220
+ // Add information and content elements to the tooltip element
221
+ tooltip.appendChild(info);
222
+ tooltip.appendChild(tooltipContent);
223
+
224
+ // Add a hint element to the target element
225
+ toolTipElemnt.appendChild(tooltip);
226
+ }
227
+
228
+ //Show tool tip if setting enable
229
+ if (hotkeysConfig.canvas_show_tooltip) {
230
+ createTooltip();
231
+ }
232
+
233
+ // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
234
+ function fixCanvas() {
235
+ const activeTab = getActiveTab(elements).textContent.trim();
236
+
237
+ if (activeTab !== "img2img") {
238
+ const img = targetElement.querySelector(`${elemId} img`);
239
+
240
+ if (img && img.style.display !== "none") {
241
+ img.style.display = "none";
242
+ img.style.visibility = "hidden";
243
+ }
244
+ }
245
+ }
246
+
247
+ // Reset the zoom level and pan position of the target element to their initial values
248
+ function resetZoom() {
249
+ elemData[elemId] = {
250
+ zoomLevel: 1,
251
+ panX: 0,
252
+ panY: 0
253
+ };
254
+
255
+ fixCanvas();
256
+ targetElement.style.transform = `scale(${elemData[elemId].zoomLevel}) translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px)`;
257
+
258
+ const canvas = gradioApp().querySelector(
259
+ `${elemId} canvas[key="interface"]`
260
+ );
261
+
262
+ toggleOverlap("off");
263
+ fullScreenMode = false;
264
+
265
+ if (
266
+ canvas &&
267
+ parseFloat(canvas.style.width) > 865 &&
268
+ parseFloat(targetElement.style.width) > 865
269
+ ) {
270
+ fitToElement();
271
+ return;
272
+ }
273
+
274
+ targetElement.style.width = "";
275
+ if (canvas) {
276
+ targetElement.style.height = canvas.style.height;
277
+ }
278
+ }
279
+
280
+ // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
281
+ function toggleOverlap(forced = "") {
282
+ const zIndex1 = "0";
283
+ const zIndex2 = "998";
284
+
285
+ targetElement.style.zIndex =
286
+ targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
287
+
288
+ if (forced === "off") {
289
+ targetElement.style.zIndex = zIndex1;
290
+ } else if (forced === "on") {
291
+ targetElement.style.zIndex = zIndex2;
292
+ }
293
+ }
294
+
295
+ // Adjust the brush size based on the deltaY value from a mouse wheel event
296
+ function adjustBrushSize(
297
+ elemId,
298
+ deltaY,
299
+ withoutValue = false,
300
+ percentage = 5
301
+ ) {
302
+ const input =
303
+ gradioApp().querySelector(
304
+ `${elemId} input[aria-label='Brush radius']`
305
+ ) ||
306
+ gradioApp().querySelector(
307
+ `${elemId} button[aria-label="Use brush"]`
308
+ );
309
+
310
+ if (input) {
311
+ input.click();
312
+ if (!withoutValue) {
313
+ const maxValue =
314
+ parseFloat(input.getAttribute("max")) || 100;
315
+ const changeAmount = maxValue * (percentage / 100);
316
+ const newValue =
317
+ parseFloat(input.value) +
318
+ (deltaY > 0 ? -changeAmount : changeAmount);
319
+ input.value = Math.min(Math.max(newValue, 0), maxValue);
320
+ input.dispatchEvent(new Event("change"));
321
+ }
322
+ }
323
+ }
324
+
325
+ // Reset zoom when uploading a new image
326
+ const fileInput = gradioApp().querySelector(
327
+ `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
328
+ );
329
+ fileInput.addEventListener("click", resetZoom);
330
+
331
+ // Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
332
+ function updateZoom(newZoomLevel, mouseX, mouseY) {
333
+ newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
334
+
335
+ elemData[elemId].panX +=
336
+ mouseX - (mouseX * newZoomLevel) / elemData[elemId].zoomLevel;
337
+ elemData[elemId].panY +=
338
+ mouseY - (mouseY * newZoomLevel) / elemData[elemId].zoomLevel;
339
+
340
+ targetElement.style.transformOrigin = "0 0";
341
+ targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${newZoomLevel})`;
342
+
343
+ toggleOverlap("on");
344
+ return newZoomLevel;
345
+ }
346
+
347
+ // Change the zoom level based on user interaction
348
+ function changeZoomLevel(operation, e) {
349
+ if (
350
+ (!hotkeysConfig.canvas_swap_controls && e.shiftKey) ||
351
+ (hotkeysConfig.canvas_swap_controls && e.ctrlKey)
352
+ ) {
353
+ e.preventDefault();
354
+
355
+ let zoomPosX, zoomPosY;
356
+ let delta = 0.2;
357
+ if (elemData[elemId].zoomLevel > 7) {
358
+ delta = 0.9;
359
+ } else if (elemData[elemId].zoomLevel > 2) {
360
+ delta = 0.6;
361
+ }
362
+
363
+ zoomPosX = e.clientX;
364
+ zoomPosY = e.clientY;
365
+
366
+ fullScreenMode = false;
367
+ elemData[elemId].zoomLevel = updateZoom(
368
+ elemData[elemId].zoomLevel +
369
+ (operation === "+" ? delta : -delta),
370
+ zoomPosX - targetElement.getBoundingClientRect().left,
371
+ zoomPosY - targetElement.getBoundingClientRect().top
372
+ );
373
+ }
374
+ }
375
+
376
+ /**
377
+ * This function fits the target element to the screen by calculating
378
+ * the required scale and offsets. It also updates the global variables
379
+ * zoomLevel, panX, and panY to reflect the new state.
380
+ */
381
+
382
+ function fitToElement() {
383
+ //Reset Zoom
384
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
385
+
386
+ // Get element and screen dimensions
387
+ const elementWidth = targetElement.offsetWidth;
388
+ const elementHeight = targetElement.offsetHeight;
389
+ const parentElement = targetElement.parentElement;
390
+ const screenWidth = parentElement.clientWidth;
391
+ const screenHeight = parentElement.clientHeight;
392
+
393
+ // Get element's coordinates relative to the parent element
394
+ const elementRect = targetElement.getBoundingClientRect();
395
+ const parentRect = parentElement.getBoundingClientRect();
396
+ const elementX = elementRect.x - parentRect.x;
397
+
398
+ // Calculate scale and offsets
399
+ const scaleX = screenWidth / elementWidth;
400
+ const scaleY = screenHeight / elementHeight;
401
+ const scale = Math.min(scaleX, scaleY);
402
+
403
+ const transformOrigin =
404
+ window.getComputedStyle(targetElement).transformOrigin;
405
+ const [originX, originY] = transformOrigin.split(" ");
406
+ const originXValue = parseFloat(originX);
407
+ const originYValue = parseFloat(originY);
408
+
409
+ const offsetX =
410
+ (screenWidth - elementWidth * scale) / 2 -
411
+ originXValue * (1 - scale);
412
+ const offsetY =
413
+ (screenHeight - elementHeight * scale) / 2.5 -
414
+ originYValue * (1 - scale);
415
+
416
+ // Apply scale and offsets to the element
417
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
418
+
419
+ // Update global variables
420
+ elemData[elemId].zoomLevel = scale;
421
+ elemData[elemId].panX = offsetX;
422
+ elemData[elemId].panY = offsetY;
423
+
424
+ fullScreenMode = false;
425
+ toggleOverlap("off");
426
+ }
427
+
428
+ /**
429
+ * This function fits the target element to the screen by calculating
430
+ * the required scale and offsets. It also updates the global variables
431
+ * zoomLevel, panX, and panY to reflect the new state.
432
+ */
433
+
434
+ // Fullscreen mode
435
+ function fitToScreen() {
436
+ const canvas = gradioApp().querySelector(
437
+ `${elemId} canvas[key="interface"]`
438
+ );
439
+
440
+ if (!canvas) return;
441
+
442
+ if (canvas.offsetWidth > 862) {
443
+ targetElement.style.width = canvas.offsetWidth + "px";
444
+ }
445
+
446
+ if (fullScreenMode) {
447
+ resetZoom();
448
+ fullScreenMode = false;
449
+ return;
450
+ }
451
+
452
+ //Reset Zoom
453
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
454
+
455
+ // Get scrollbar width to right-align the image
456
+ const scrollbarWidth =
457
+ window.innerWidth - document.documentElement.clientWidth;
458
+
459
+ // Get element and screen dimensions
460
+ const elementWidth = targetElement.offsetWidth;
461
+ const elementHeight = targetElement.offsetHeight;
462
+ const screenWidth = window.innerWidth - scrollbarWidth;
463
+ const screenHeight = window.innerHeight;
464
+
465
+ // Get element's coordinates relative to the page
466
+ const elementRect = targetElement.getBoundingClientRect();
467
+ const elementY = elementRect.y;
468
+ const elementX = elementRect.x;
469
+
470
+ // Calculate scale and offsets
471
+ const scaleX = screenWidth / elementWidth;
472
+ const scaleY = screenHeight / elementHeight;
473
+ const scale = Math.min(scaleX, scaleY);
474
+
475
+ // Get the current transformOrigin
476
+ const computedStyle = window.getComputedStyle(targetElement);
477
+ const transformOrigin = computedStyle.transformOrigin;
478
+ const [originX, originY] = transformOrigin.split(" ");
479
+ const originXValue = parseFloat(originX);
480
+ const originYValue = parseFloat(originY);
481
+
482
+ // Calculate offsets with respect to the transformOrigin
483
+ const offsetX =
484
+ (screenWidth - elementWidth * scale) / 2 -
485
+ elementX -
486
+ originXValue * (1 - scale);
487
+ const offsetY =
488
+ (screenHeight - elementHeight * scale) / 2 -
489
+ elementY -
490
+ originYValue * (1 - scale);
491
+
492
+ // Apply scale and offsets to the element
493
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
494
+
495
+ // Update global variables
496
+ elemData[elemId].zoomLevel = scale;
497
+ elemData[elemId].panX = offsetX;
498
+ elemData[elemId].panY = offsetY;
499
+
500
+ fullScreenMode = true;
501
+ toggleOverlap("on");
502
+ }
503
+
504
+ // Handle keydown events
505
+ function handleKeyDown(event) {
506
+ const hotkeyActions = {
507
+ [hotkeysConfig.canvas_hotkey_reset]: resetZoom,
508
+ [hotkeysConfig.canvas_hotkey_overlap]: toggleOverlap,
509
+ [hotkeysConfig.canvas_hotkey_fullscreen]: fitToScreen
510
+ };
511
+
512
+ const action = hotkeyActions[event.code];
513
+ if (action) {
514
+ event.preventDefault();
515
+ action(event);
516
+ }
517
+ }
518
+
519
+ // Get Mouse position
520
+ function getMousePosition(e) {
521
+ mouseX = e.offsetX;
522
+ mouseY = e.offsetY;
523
+ }
524
+
525
+ targetElement.addEventListener("mousemove", getMousePosition);
526
+
527
+ // Handle events only inside the targetElement
528
+ let isKeyDownHandlerAttached = false;
529
+
530
+ function handleMouseMove() {
531
+ if (!isKeyDownHandlerAttached) {
532
+ document.addEventListener("keydown", handleKeyDown);
533
+ isKeyDownHandlerAttached = true;
534
+
535
+ activeElement = elemId;
536
+ }
537
+ }
538
+
539
+ function handleMouseLeave() {
540
+ if (isKeyDownHandlerAttached) {
541
+ document.removeEventListener("keydown", handleKeyDown);
542
+ isKeyDownHandlerAttached = false;
543
+
544
+ activeElement = null;
545
+ }
546
+ }
547
+
548
+ // Add mouse event handlers
549
+ targetElement.addEventListener("mousemove", handleMouseMove);
550
+ targetElement.addEventListener("mouseleave", handleMouseLeave);
551
+
552
+ // Reset zoom when click on another tab
553
+ elements.img2imgTabs.addEventListener("click", resetZoom);
554
+ elements.img2imgTabs.addEventListener("click", () => {
555
+ // targetElement.style.width = "";
556
+ if (parseInt(targetElement.style.width) > 865) {
557
+ setTimeout(fitToElement, 0);
558
+ }
559
+ });
560
+
561
+ targetElement.addEventListener("wheel", e => {
562
+ // change zoom level
563
+ const operation = e.deltaY > 0 ? "-" : "+";
564
+ changeZoomLevel(operation, e);
565
+
566
+ // Handle brush size adjustment with ctrl key pressed
567
+ if (
568
+ (hotkeysConfig.canvas_swap_controls && e.shiftKey) ||
569
+ (!hotkeysConfig.canvas_swap_controls &&
570
+ (e.ctrlKey || e.metaKey))
571
+ ) {
572
+ e.preventDefault();
573
+
574
+ // Increase or decrease brush size based on scroll direction
575
+ adjustBrushSize(elemId, e.deltaY);
576
+ }
577
+ });
578
+
579
+ // Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
580
+ function handleMoveKeyDown(e) {
581
+ if (e.code === hotkeysConfig.canvas_hotkey_move) {
582
+ if (!e.ctrlKey && !e.metaKey && isKeyDownHandlerAttached) {
583
+ e.preventDefault();
584
+ document.activeElement.blur();
585
+ isMoving = true;
586
+ }
587
+ }
588
+ }
589
+
590
+ function handleMoveKeyUp(e) {
591
+ if (e.code === hotkeysConfig.canvas_hotkey_move) {
592
+ isMoving = false;
593
+ }
594
+ }
595
+
596
+ document.addEventListener("keydown", handleMoveKeyDown);
597
+ document.addEventListener("keyup", handleMoveKeyUp);
598
+
599
+ // Detect zoom level and update the pan speed.
600
+ function updatePanPosition(movementX, movementY) {
601
+ let panSpeed = 2;
602
+
603
+ if (elemData[elemId].zoomLevel > 8) {
604
+ panSpeed = 3.5;
605
+ }
606
+
607
+ elemData[elemId].panX += movementX * panSpeed;
608
+ elemData[elemId].panY += movementY * panSpeed;
609
+
610
+ // Delayed redraw of an element
611
+ requestAnimationFrame(() => {
612
+ targetElement.style.transform = `translate(${elemData[elemId].panX}px, ${elemData[elemId].panY}px) scale(${elemData[elemId].zoomLevel})`;
613
+ toggleOverlap("on");
614
+ });
615
+ }
616
+
617
+ function handleMoveByKey(e) {
618
+ if (isMoving && elemId === activeElement) {
619
+ updatePanPosition(e.movementX, e.movementY);
620
+ targetElement.style.pointerEvents = "none";
621
+ } else {
622
+ targetElement.style.pointerEvents = "auto";
623
+ }
624
+ }
625
+
626
+ // Prevents sticking to the mouse
627
+ window.onblur = function() {
628
+ isMoving = false;
629
+ };
630
+
631
+ gradioApp().addEventListener("mousemove", handleMoveByKey);
632
+ }
633
+
634
+ applyZoomAndPan(elementIDs.sketch);
635
+ applyZoomAndPan(elementIDs.inpaint);
636
+ applyZoomAndPan(elementIDs.inpaintSketch);
637
+
638
+ // Make the function global so that other extensions can take advantage of this solution
639
+ window.applyZoomAndPan = applyZoomAndPan;
640
+ });
extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules import shared
2
+
3
+ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas Hotkeys"), {
4
+ "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas"),
5
+ "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
6
+ "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
7
+ "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap ( Technical button, neededs for testing )"),
8
+ "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
9
+ "canvas_swap_controls": shared.OptionInfo(False, "Swap hotkey combinations for Zoom and Adjust brush resize"),
10
+ }))
extensions-builtin/canvas-zoom-and-pan/style.css ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .tooltip-info {
2
+ position: absolute;
3
+ top: 10px;
4
+ left: 10px;
5
+ cursor: help;
6
+ background-color: rgba(0, 0, 0, 0.3);
7
+ width: 20px;
8
+ height: 20px;
9
+ border-radius: 50%;
10
+ display: flex;
11
+ align-items: center;
12
+ justify-content: center;
13
+ flex-direction: column;
14
+
15
+ z-index: 100;
16
+ }
17
+
18
+ .tooltip-info::after {
19
+ content: '';
20
+ display: block;
21
+ width: 2px;
22
+ height: 7px;
23
+ background-color: white;
24
+ margin-top: 2px;
25
+ }
26
+
27
+ .tooltip-info::before {
28
+ content: '';
29
+ display: block;
30
+ width: 2px;
31
+ height: 2px;
32
+ background-color: white;
33
+ }
34
+
35
+ .tooltip-content {
36
+ display: none;
37
+ background-color: #f9f9f9;
38
+ color: #333;
39
+ border: 1px solid #ddd;
40
+ padding: 15px;
41
+ position: absolute;
42
+ top: 40px;
43
+ left: 10px;
44
+ width: 250px;
45
+ font-size: 16px;
46
+ opacity: 0;
47
+ border-radius: 8px;
48
+ box-shadow: 0px 8px 16px 0px rgba(0,0,0,0.2);
49
+
50
+ z-index: 100;
51
+ }
52
+
53
+ .tooltip:hover .tooltip-content {
54
+ display: block;
55
+ animation: fadeIn 0.5s;
56
+ opacity: 1;
57
+ }
58
+
59
+ @keyframes fadeIn {
60
+ from {opacity: 0;}
61
+ to {opacity: 1;}
62
+ }
63
+
extensions-builtin/extra-options-section/scripts/extra_options_section.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from modules import scripts, shared, ui_components, ui_settings
3
+ from modules.ui_components import FormColumn
4
+
5
+
6
+ class ExtraOptionsSection(scripts.Script):
7
+ section = "extra_options"
8
+
9
+ def __init__(self):
10
+ self.comps = None
11
+ self.setting_names = None
12
+
13
+ def title(self):
14
+ return "Extra options"
15
+
16
+ def show(self, is_img2img):
17
+ return scripts.AlwaysVisible
18
+
19
+ def ui(self, is_img2img):
20
+ self.comps = []
21
+ self.setting_names = []
22
+
23
+ with gr.Blocks() as interface:
24
+ with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and shared.opts.extra_options else gr.Group(), gr.Row():
25
+ for setting_name in shared.opts.extra_options:
26
+ with FormColumn():
27
+ comp = ui_settings.create_setting_component(setting_name)
28
+
29
+ self.comps.append(comp)
30
+ self.setting_names.append(setting_name)
31
+
32
+ def get_settings_values():
33
+ return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
34
+
35
+ interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
36
+
37
+ return self.comps
38
+
39
+ def before_process(self, p, *args):
40
+ for name, value in zip(self.setting_names, args):
41
+ if name not in p.override_settings:
42
+ p.override_settings[name] = value
43
+
44
+
45
+ shared.options_templates.update(shared.options_section(('ui', "User interface"), {
46
+ "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(),
47
+ "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion")
48
+ }))
extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Stable Diffusion WebUI - Bracket checker
2
+ // By Hingashi no Florin/Bwin4L & @akx
3
+ // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs.
4
+ // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong.
5
+
6
+ function checkBrackets(textArea, counterElt) {
7
+ var counts = {};
8
+ (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => {
9
+ counts[bracket] = (counts[bracket] || 0) + 1;
10
+ });
11
+ var errors = [];
12
+
13
+ function checkPair(open, close, kind) {
14
+ if (counts[open] !== counts[close]) {
15
+ errors.push(
16
+ `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.`
17
+ );
18
+ }
19
+ }
20
+
21
+ checkPair('(', ')', 'round brackets');
22
+ checkPair('[', ']', 'square brackets');
23
+ checkPair('{', '}', 'curly brackets');
24
+ counterElt.title = errors.join('\n');
25
+ counterElt.classList.toggle('error', errors.length !== 0);
26
+ }
27
+
28
+ function setupBracketChecking(id_prompt, id_counter) {
29
+ var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea");
30
+ var counter = gradioApp().getElementById(id_counter);
31
+
32
+ if (textarea && counter) {
33
+ textarea.addEventListener("input", () => checkBrackets(textarea, counter));
34
+ }
35
+ }
36
+
37
+ onUiLoaded(function() {
38
+ setupBracketChecking('txt2img_prompt', 'txt2img_token_counter');
39
+ setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter');
40
+ setupBracketChecking('img2img_prompt', 'img2img_token_counter');
41
+ setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter');
42
+ });
extensions-builtin/sd_theme_editor/install.py ADDED
@@ -0,0 +1 @@
 
 
1
+ import launch
extensions-builtin/sd_theme_editor/javascript/ui_theme.js ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function hexToRgb(color) {
2
+ let hex = color[0] === "#" ? color.slice(1) : color;
3
+ let c;
4
+
5
+ // expand the short hex by doubling each character, fc0 -> ffcc00
6
+ if (hex.length !== 6) {
7
+ hex = (() => {
8
+ const result = [];
9
+ for (c of Array.from(hex)) {
10
+ result.push(`${c}${c}`);
11
+ }
12
+ return result;
13
+ })().join("");
14
+ }
15
+ const colorStr = hex.match(/#?(.{2})(.{2})(.{2})/).slice(1);
16
+ const rgb = colorStr.map((col) => parseInt(col, 16));
17
+ rgb.push(1);
18
+ return rgb;
19
+ }
20
+
21
+ function rgbToHsl(rgb) {
22
+ const r = rgb[0] / 255;
23
+ const g = rgb[1] / 255;
24
+ const b = rgb[2] / 255;
25
+
26
+ const max = Math.max(r, g, b);
27
+ const min = Math.min(r, g, b);
28
+ const diff = max - min;
29
+ const add = max + min;
30
+
31
+ const hue =
32
+ min === max
33
+ ? 0
34
+ : r === max
35
+ ? ((60 * (g - b)) / diff + 360) % 360
36
+ : g === max
37
+ ? (60 * (b - r)) / diff + 120
38
+ : (60 * (r - g)) / diff + 240;
39
+
40
+ const lum = 0.5 * add;
41
+
42
+ const sat =
43
+ lum === 0 ? 0 : lum === 1 ? 1 : lum <= 0.5 ? diff / add : diff / (2 - add);
44
+
45
+ const h = Math.round(hue);
46
+ const s = Math.round(sat * 100);
47
+ const l = Math.round(lum * 100);
48
+ const a = rgb[3] || 1;
49
+
50
+ return [h, s, l, a];
51
+ }
52
+
53
+ function hexToHsl(color) {
54
+ const rgb = hexToRgb(color);
55
+ const hsl = rgbToHsl(rgb);
56
+ return "hsl(" + hsl[0] + "deg " + hsl[1] + "% " + hsl[2] + "%)";
57
+ }
58
+
59
+ function hslToHex(h, s, l) {
60
+ l /= 100;
61
+ const a = (s * Math.min(l, 1 - l)) / 100;
62
+ const f = (n) => {
63
+ const k = (n + h / 30) % 12;
64
+ const color = l - a * Math.max(Math.min(k - 3, 9 - k, 1), -1);
65
+ return Math.round(255 * Math.max(0, Math.min(color, 1)))
66
+ .toString(16)
67
+ .padStart(2, "0"); // convert to Hex and prefix "0" if needed
68
+ };
69
+ return `#${f(0)}${f(8)}${f(4)}`;
70
+ }
71
+
72
+ function hsl2rgb(h, s, l) {
73
+ let a = s * Math.min(l, 1 - l);
74
+ let f = (n, k = (n + h / 30) % 12) =>
75
+ l - a * Math.max(Math.min(k - 3, 9 - k, 1), -1);
76
+ return [f(0), f(8), f(4)];
77
+ }
78
+
79
+ function invertColor(hex) {
80
+ if (hex.indexOf("#") === 0) {
81
+ hex = hex.slice(1);
82
+ }
83
+ // convert 3-digit hex to 6-digits.
84
+ if (hex.length === 3) {
85
+ hex = hex[0] + hex[0] + hex[1] + hex[1] + hex[2] + hex[2];
86
+ }
87
+ if (hex.length !== 6) {
88
+ throw new Error("Invalid HEX color.");
89
+ }
90
+ // invert color components
91
+ var r = (255 - parseInt(hex.slice(0, 2), 16)).toString(16),
92
+ g = (255 - parseInt(hex.slice(2, 4), 16)).toString(16),
93
+ b = (255 - parseInt(hex.slice(4, 6), 16)).toString(16);
94
+ // pad each with zeros and return
95
+ return "#" + padZero(r) + padZero(g) + padZero(b);
96
+ }
97
+
98
+ function padZero(str, len) {
99
+ len = len || 2;
100
+ var zeros = new Array(len).join("0");
101
+ return (zeros + str).slice(-len);
102
+ }
103
+
104
+ function getValsWrappedIn(str, c1, c2) {
105
+ var rg = new RegExp("(?<=\\" + c1 + ")(.*?)(?=\\" + c2 + ")", "g");
106
+ return str.match(rg);
107
+ }
108
+
109
+ let styleobj = {};
110
+ let hslobj = {};
111
+ let isColorsInv;
112
+
113
+ const toHSLArray = (hslStr) => hslStr.match(/\d+/g).map(Number);
114
+
115
+ function offsetColorsHSV(ohsl) {
116
+ let inner_styles = "";
117
+
118
+ for (const key in styleobj) {
119
+ let keyVal = styleobj[key];
120
+
121
+ if (keyVal.indexOf("#") != -1 || keyVal.indexOf("hsl") != -1) {
122
+ let colcomp = gradioApp().querySelector("#" + key + " input");
123
+ if (colcomp) {
124
+ let hsl;
125
+
126
+ if (keyVal.indexOf("#") != -1) {
127
+ keyVal = keyVal.replace(/\s+/g, "");
128
+ //inv ? keyVal = invertColor(keyVal) : 0;
129
+ if (isColorsInv) {
130
+ keyVal = invertColor(keyVal);
131
+ styleobj[key] = keyVal;
132
+ }
133
+ hsl = rgbToHsl(hexToRgb(keyVal));
134
+ } else {
135
+ if (isColorsInv) {
136
+ let c = toHSLArray(keyVal);
137
+ let hex = hslToHex(c[0], c[1], c[2]);
138
+ keyVal = invertColor(hex);
139
+ styleobj[key] = keyVal;
140
+ hsl = rgbToHsl(hexToRgb(keyVal));
141
+ } else {
142
+ hsl = toHSLArray(keyVal);
143
+ }
144
+ }
145
+
146
+ let h = (parseInt(hsl[0]) + parseInt(ohsl[0])) % 360;
147
+ let s = parseInt(hsl[1]) + parseInt(ohsl[1]);
148
+ let l = parseInt(hsl[2]) + parseInt(ohsl[2]);
149
+
150
+ let hex = hslToHex(
151
+ h,
152
+ Math.min(Math.max(s, 0), 100),
153
+ Math.min(Math.max(l, 0), 100)
154
+ );
155
+
156
+ colcomp.value = hex;
157
+
158
+ hslobj[key] = "hsl(" + h + "deg " + s + "% " + l + "%)";
159
+ inner_styles += key + ":" + hslobj[key] + ";";
160
+ }
161
+ } else {
162
+ inner_styles += key + ":" + styleobj[key] + ";";
163
+ }
164
+ }
165
+
166
+ isColorsInv = false;
167
+
168
+ const preview_styles = gradioApp().querySelector("#preview-styles");
169
+ preview_styles.innerHTML = ":root {" + inner_styles + "}";
170
+ preview_styles.innerHTML +=
171
+ "@media only screen and (max-width: 860px) {:root{--ae-outside-gap-size: var(--ae-mobile-outside-gap-size);--ae-inside-padding-size: var(--ae-mobile-inside-padding-size);}}";
172
+
173
+ const vars_textarea = gradioApp().querySelector("#theme_vars textarea");
174
+ vars_textarea.value = inner_styles;
175
+
176
+ const inputEvent = new Event("input");
177
+ Object.defineProperty(inputEvent, "target", { value: vars_textarea });
178
+ vars_textarea.dispatchEvent(inputEvent);
179
+ }
180
+
181
+ function updateTheme(vars) {
182
+ let inner_styles = "";
183
+
184
+ for (let i = 0; i < vars.length - 1; i++) {
185
+ let key = vars[i].split(":");
186
+ let id = key[0].replace(/\s+/g, "");
187
+ let val = key[1].trim();
188
+
189
+ styleobj[id] = val;
190
+ inner_styles += id + ":" + val + ";";
191
+
192
+ gradioApp()
193
+ .querySelectorAll("#" + id + " input")
194
+ .forEach((elem) => {
195
+ if (val.indexOf("hsl") != -1) {
196
+ let hsl = toHSLArray(val);
197
+ let hex = hslToHex(hsl[0], hsl[1], hsl[2]);
198
+ elem.value = hex;
199
+ } else {
200
+ elem.value = val.split("px")[0];
201
+ }
202
+ });
203
+ }
204
+
205
+ const preview_styles = gradioApp().querySelector("#preview-styles");
206
+
207
+ if (preview_styles) {
208
+ preview_styles.innerHTML = ":root {" + inner_styles + "}";
209
+ preview_styles.innerHTML +=
210
+ "@media only screen and (max-width: 860px) {:root{--ae-outside-gap-size: var(--ae-mobile-outside-gap-size);--ae-inside-padding-size: var(--ae-mobile-inside-padding-size);}}";
211
+ } else {
212
+ const r = gradioApp();
213
+ const style = document.createElement("style");
214
+ style.id = "preview-styles";
215
+ style.innerHTML = ":root {" + inner_styles + "}";
216
+ style.innerHTML +=
217
+ "@media only screen and (max-width: 860px) {:root{--ae-outside-gap-size: var(--ae-mobile-outside-gap-size);--ae-inside-padding-size: var(--ae-mobile-inside-padding-size);}}";
218
+ r.appendChild(style);
219
+ }
220
+
221
+ const vars_textarea = gradioApp().querySelector("#theme_vars textarea");
222
+ const css_textarea = gradioApp().querySelector("#theme_css textarea");
223
+
224
+ vars_textarea.value = inner_styles;
225
+ css_textarea.value = css_textarea.value;
226
+
227
+ //console.log(Object);
228
+
229
+ const vEvent = new Event("input");
230
+ const cEvent = new Event("input");
231
+ Object.defineProperty(vEvent, "target", { value: vars_textarea });
232
+ Object.defineProperty(cEvent, "target", { value: css_textarea });
233
+ vars_textarea.dispatchEvent(vEvent);
234
+ css_textarea.dispatchEvent(cEvent);
235
+ }
236
+
237
+ function applyTheme() {
238
+ console.log("apply");
239
+ }
240
+
241
+ function initTheme() {
242
+ const current_style = gradioApp().querySelector(".gradio-container > style");
243
+ //console.log(current_style);
244
+ //const head = document.head;
245
+ //head.appendChild(current_style);
246
+
247
+ const css_styles = current_style.innerHTML.split(
248
+ "/*BREAKPOINT_CSS_CONTENT*/"
249
+ );
250
+ let init_css_vars = css_styles[0].split("}")[0].split("{")[1];
251
+ init_css_vars = init_css_vars.replace(/\n|\r/g, "");
252
+
253
+ let init_vars = init_css_vars.split(";");
254
+ let vars = init_vars;
255
+
256
+ //console.log(vars);
257
+
258
+ const vars_textarea = gradioApp().querySelector("#theme_vars textarea");
259
+ const css_textarea = gradioApp().querySelector("#theme_css textarea");
260
+ //const result_textarea = gradioApp().querySelector('#theme_result textarea');
261
+ vars_textarea.value = init_css_vars;
262
+ css_textarea.value =
263
+ "/*BREAKPOINT_CSS_CONTENT*/" + css_styles[1] + "/*BREAKPOINT_CSS_CONTENT*/";
264
+
265
+ updateTheme(vars);
266
+
267
+ //vars_textarea.addEventListener("change", function(e) {
268
+ //e.preventDefault();
269
+ //e.stopPropagation();
270
+ //vars = vars_textarea.value.split(";");
271
+ //console.log(e);
272
+ //updateTheme(vars);
273
+
274
+ //})
275
+
276
+ const preview_styles = gradioApp().querySelector("#preview-styles");
277
+ let intervalChange;
278
+
279
+ gradioApp()
280
+ .querySelectorAll("#ui_theme_settings input")
281
+ .forEach((elem) => {
282
+ elem.addEventListener("input", function (e) {
283
+ let celem = e.currentTarget;
284
+ let val = e.currentTarget.value;
285
+ let curr_val;
286
+
287
+ switch (e.currentTarget.type) {
288
+ case "range":
289
+ celem = celem.parentElement;
290
+ val = e.currentTarget.value + "px";
291
+ break;
292
+ case "color":
293
+ celem = celem.parentElement.parentElement;
294
+ val = e.currentTarget.value;
295
+ break;
296
+ case "number":
297
+ celem = celem.parentElement.parentElement.parentElement;
298
+ val = e.currentTarget.value + "px";
299
+ break;
300
+ }
301
+
302
+ styleobj[celem.id] = val;
303
+
304
+ //console.log(styleobj);
305
+
306
+ if (intervalChange != null) clearInterval(intervalChange);
307
+ intervalChange = setTimeout(() => {
308
+ let inner_styles = "";
309
+
310
+ for (const key in styleobj) {
311
+ inner_styles += key + ":" + styleobj[key] + ";";
312
+ }
313
+
314
+ vars = inner_styles.split(";");
315
+ preview_styles.innerHTML = ":root {" + inner_styles + "}";
316
+ preview_styles.innerHTML +=
317
+ "@media only screen and (max-width: 860px) {:root{--ae-outside-gap-size: var(--ae-mobile-outside-gap-size);--ae-inside-padding-size: var(--ae-mobile-inside-padding-size);}}";
318
+
319
+ vars_textarea.value = inner_styles;
320
+ const vEvent = new Event("input");
321
+ Object.defineProperty(vEvent, "target", { value: vars_textarea });
322
+ vars_textarea.dispatchEvent(vEvent);
323
+
324
+ offsetColorsHSV(hsloffset);
325
+ }, 1000);
326
+ });
327
+ });
328
+
329
+ const reset_btn = gradioApp().getElementById("theme_reset_btn");
330
+ reset_btn.addEventListener("click", function (e) {
331
+ e.preventDefault();
332
+ e.stopPropagation();
333
+ gradioApp()
334
+ .querySelectorAll("#ui_theme_hsv input")
335
+ .forEach((elem) => {
336
+ elem.value = 0;
337
+ });
338
+ hsloffset = [0, 0, 0];
339
+ updateTheme(init_vars);
340
+ });
341
+
342
+ /*
343
+ const apply_btn = gradioApp().getElementById('theme_apply_btn');
344
+ apply_btn.addEventListener("click", function(e) {
345
+ e.preventDefault();
346
+ e.stopPropagation();
347
+ init_css_vars = vars_textarea.value.replace(/\n|\r/g, "");
348
+ vars_textarea.value = init_css_vars;
349
+
350
+ init_vars = init_css_vars.split(";");
351
+ vars = init_vars;
352
+ updateTheme(vars);
353
+ })
354
+ */
355
+
356
+ let intervalCheck;
357
+ function dropDownOnChange() {
358
+ if (init_css_vars != vars_textarea.value) {
359
+ clearInterval(intervalCheck);
360
+ init_css_vars = vars_textarea.value.replace(/\n|\r/g, "");
361
+ vars_textarea.value = init_css_vars;
362
+ init_vars = init_css_vars.split(";");
363
+ vars = init_vars;
364
+ updateTheme(vars);
365
+ }
366
+ }
367
+
368
+ const drop_down = gradioApp().querySelector("#themes_drop_down");
369
+ drop_down.addEventListener("click", function (e) {
370
+ if (intervalCheck != null) clearInterval(intervalCheck);
371
+ intervalCheck = setInterval(dropDownOnChange, 100);
372
+ //console.log("ok");
373
+ });
374
+
375
+ let hsloffset = [0, 0, 0];
376
+
377
+ const hue = gradioApp()
378
+ .querySelectorAll("#theme_hue input")
379
+ .forEach((elem) => {
380
+ elem.addEventListener("change", function (e) {
381
+ e.preventDefault();
382
+ e.stopPropagation();
383
+ hsloffset[0] = e.currentTarget.value;
384
+ offsetColorsHSV(hsloffset);
385
+ });
386
+ });
387
+
388
+ const sat = gradioApp()
389
+ .querySelectorAll("#theme_sat input")
390
+ .forEach((elem) => {
391
+ elem.addEventListener("change", function (e) {
392
+ e.preventDefault();
393
+ e.stopPropagation();
394
+ hsloffset[1] = e.currentTarget.value;
395
+ offsetColorsHSV(hsloffset);
396
+ });
397
+ });
398
+
399
+ const brt = gradioApp()
400
+ .querySelectorAll("#theme_brt input")
401
+ .forEach((elem) => {
402
+ elem.addEventListener("change", function (e) {
403
+ e.preventDefault();
404
+ e.stopPropagation();
405
+ hsloffset[2] = e.currentTarget.value;
406
+ offsetColorsHSV(hsloffset);
407
+ });
408
+ });
409
+
410
+ const inv_btn = gradioApp().getElementById("theme_invert_btn");
411
+ inv_btn.addEventListener("click", function (e) {
412
+ e.preventDefault();
413
+ e.stopPropagation();
414
+ isColorsInv = !isColorsInv;
415
+ offsetColorsHSV(hsloffset);
416
+ });
417
+ }
418
+
419
+ function observeGradioApp() {
420
+ const observer = new MutationObserver(() => {
421
+ const block = gradioApp().getElementById("tab_ui_theme");
422
+ if (block) {
423
+ observer.disconnect();
424
+
425
+ setTimeout(() => {
426
+ initTheme();
427
+ }, "500");
428
+ }
429
+ });
430
+ observer.observe(gradioApp(), { childList: true, subtree: true });
431
+ }
432
+
433
+ document.addEventListener("DOMContentLoaded", () => {
434
+ observeGradioApp();
435
+ });
extensions-builtin/sd_theme_editor/scripts/ui_theme.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ from pathlib import Path
4
+ import gradio as gr
5
+ import modules.scripts as scripts
6
+ from modules import script_callbacks, shared
7
+
8
+ basedir = scripts.basedir()
9
+ webui_dir = Path(basedir).parents[1]
10
+
11
+ themes_folder = os.path.join(basedir, "themes")
12
+ javascript_folder = os.path.join(basedir, "javascript")
13
+ webui_style_path = os.path.join(webui_dir, "style.css")
14
+
15
+ def get_files(folder, file_filter=[], file_list=[], split=False):
16
+ file_list = [file_name if not split else os.path.splitext(file_name)[0] for file_name in os.listdir(folder) if os.path.isfile(os.path.join(folder, file_name)) and file_name not in file_filter]
17
+ return file_list
18
+
19
+
20
+ def on_ui_tabs():
21
+
22
+ with gr.Blocks(analytics_enabled=False) as ui_theme:
23
+ with gr.Row():
24
+ with gr.Column():
25
+ with gr.Row():
26
+ themes_dropdown = gr.Dropdown(label="Themes", elem_id="themes_drop_down", interactive=True, choices=get_files(themes_folder,[".css, .txt"]), type="value")
27
+ save_as_filename = gr.Text(label="Save / Save as")
28
+ with gr.Row():
29
+ reset_button = gr.Button(elem_id="theme_reset_btn", value="Reset", variant="primary")
30
+ #apply_button = gr.Button(elem_id="theme_apply_btn", value="Apply", variant="primary")
31
+ save_button = gr.Button(value="Save", variant="primary")
32
+ #delete_button = gr.Button(value="Delete", variant="primary")
33
+
34
+ #with gr.Accordion(label="Debug View", open=True):
35
+ with gr.Row(elem_id="theme_hidden"):
36
+ vars_text = gr.Textbox(label="Vars", elem_id="theme_vars", show_label=True, lines=7, interactive=False, visible=True)
37
+ css_text = gr.Textbox(label="Css", elem_id="theme_css", show_label=True, lines=7, interactive=False, visible=True)
38
+ #result_text = gr.Text(elem_id="theme_result", interactive=False, visible=False)
39
+ with gr.Column(elem_id="theme_overflow_container"):
40
+ with gr.Accordion(label="Theme Color adjustments", open=True):
41
+ with gr.Row():
42
+ with gr.Column(scale=6, elem_id="ui_theme_hsv"):
43
+ gr.Slider(elem_id="theme_hue", label='Hue', minimum=0, maximum=360, step=1)
44
+ gr.Slider(elem_id="theme_sat", label='Saturation', minimum=-100, maximum=100, step=1, value=0, interactive=True)
45
+ gr.Slider(elem_id="theme_brt", label='Lightness', minimum=-50, maximum=50, step=1, value=0, interactive=True)
46
+
47
+ gr.Button(elem_id="theme_invert_btn", value="Invert", variant="primary")
48
+
49
+
50
+ with gr.Row(elem_id="ui_theme_settings"):
51
+ with gr.Column():
52
+ with gr.Column():
53
+ with gr.Accordion(label="Main", open=True):
54
+ gr.ColorPicker(elem_id="--ae-main-bg-color", interactive=True, label="Background color")
55
+ gr.ColorPicker(elem_id="--ae-primary-color", label="Primary color")
56
+
57
+ with gr.Accordion(label="Focus", open=True):
58
+ gr.ColorPicker(elem_id="--ae-textarea-focus-color", label="Textarea color")
59
+ gr.ColorPicker(elem_id="--ae-input-focus-color", label="Input color")
60
+
61
+ with gr.Accordion(label="Spacing", open=True):
62
+ gr.Slider(elem_id="--ae-outside-gap-size", label='Gap size', minimum=1, maximum=16, step=1, interactive=True)
63
+ gr.Slider(elem_id="--ae-inside-padding-size", label='Padding size', minimum=1, maximum=16, step=1, interactive=True)
64
+
65
+ with gr.Accordion(label="Spacing (Mobile)", open=True):
66
+ gr.Slider(elem_id="--ae-mobile-outside-gap-size", label='Mobile Gap size', minimum=1, maximum=16, step=1, interactive=True)
67
+ gr.Slider(elem_id="--ae-mobile-inside-padding-size", label='Mobile Padding size', minimum=1, maximum=16, step=1, interactive=True)
68
+
69
+ with gr.Accordion(label="Panel", open=True):
70
+ gr.ColorPicker(elem_id="--ae-label-color", label="Label color")
71
+ gr.ColorPicker(elem_id="--ae-frame-bg-color", label="Frame Background color")
72
+ gr.ColorPicker(elem_id="--ae-panel-bg-color", label="Background color")
73
+ gr.ColorPicker(elem_id="--ae-panel-border-color", label="Border color")
74
+ gr.Slider(elem_id="--ae-panel-border-radius", label='Border radius', minimum=0, maximum=16, step=1)
75
+
76
+ gr.ColorPicker(elem_id="--ae-input-color", label="Input text color")
77
+ gr.ColorPicker(elem_id="--ae-input-bg-color", label="Input background color")
78
+ gr.ColorPicker(elem_id="--ae-input-border-color", label="Input border color")
79
+ with gr.Column():
80
+ with gr.Row(elem_id="theme_sub-panel"):
81
+
82
+ with gr.Accordion(label="SubPanel", open=True):
83
+ gr.ColorPicker(elem_id="--ae-subgroup-bg-color", label="Subgoup background color")
84
+ #gr.ColorPicker(elem_id="--ae-subgroup-label-color", label="Label color", value="#000000")
85
+ gr.ColorPicker(elem_id="--ae-subpanel-bg-color", label="Background color")
86
+ gr.ColorPicker(elem_id="--ae-subpanel-border-color", label="Border color")
87
+ gr.Slider(elem_id="--ae-subpanel-border-radius", label='Border radius', minimum=0, maximum=16, step=1)
88
+
89
+ gr.ColorPicker(elem_id="--ae-subgroup-input-color", label="Input text color")
90
+ gr.ColorPicker(elem_id="--ae-subgroup-input-bg-color", label="Input background color")
91
+ gr.ColorPicker(elem_id="--ae-subgroup-input-border-color", label="Input border color")
92
+
93
+ with gr.Row():
94
+ with gr.Column():
95
+ with gr.Accordion(label="Navigation menu", open=True):
96
+ gr.ColorPicker(elem_id="--ae-nav-bg-color", label="Background color")
97
+ gr.ColorPicker(elem_id="--ae-nav-color", label="Text color")
98
+ gr.ColorPicker(elem_id="--ae-nav-hover-color", label="Hover color")
99
+
100
+ with gr.Accordion(label="Icon", open=True):
101
+ gr.ColorPicker(elem_id="--ae-icon-color", label="Color")
102
+ gr.ColorPicker(elem_id="--ae-icon-hover-color", label="Hover color")
103
+
104
+ with gr.Accordion(label="Other", open=True):
105
+ gr.ColorPicker(elem_id="--ae-text-color", label="Text color")
106
+ gr.ColorPicker(elem_id="--ae-placeholder-color", label="Placeholder color")
107
+ gr.ColorPicker(elem_id="--ae-cancel-color", label="Cancel/Interrupt color")
108
+
109
+ with gr.Accordion(label="Modal", open=True):
110
+ gr.ColorPicker(elem_id="--ae-modal-bg-color", label="Background color")
111
+ gr.ColorPicker(elem_id="--ae-modal-icon-color", label="Icon color")
112
+
113
+
114
+
115
+ def save_theme( vars_text, css_text, filename):
116
+ style_data= ":root{" + vars_text + "}" + css_text
117
+ with open(os.path.join(themes_folder, f"{filename}.css"), 'w', encoding="utf-8") as file:
118
+ file.write(vars_text)
119
+ file.close()
120
+ with open(webui_style_path, 'w', encoding="utf-8") as file:
121
+ file.write(style_data)
122
+ file.close()
123
+ themes_dropdown.choices=get_files(themes_folder,[".css, .txt"])
124
+ return gr.update(choices=themes_dropdown.choices, value=f"{filename}.css")
125
+
126
+ def open_theme(filename, css_text):
127
+ with open(os.path.join(themes_folder, f"{filename}"), 'r') as file:
128
+ vars_text=file.read()
129
+ no_ext=filename.rsplit('.', 1)[0]
130
+ #save_theme( vars_text, css_text, no_ext)
131
+ # shared.state.interrupt()
132
+ # shared.state.need_restart = True
133
+ return [vars_text, no_ext]
134
+
135
+ # def delete_theme(filename):
136
+ # try:
137
+ # os.remove(os.path.join(themes_folder, filename))
138
+ # except FileNotFoundError:
139
+ # pass
140
+
141
+ # delete_button.click(
142
+ # fn = lambda: delete_theme()
143
+ # )
144
+
145
+ save_button.click(
146
+ fn=save_theme,
147
+ inputs=[vars_text, css_text, save_as_filename],
148
+ outputs=themes_dropdown
149
+ )
150
+
151
+ themes_dropdown.change(
152
+ fn=open_theme,
153
+ #_js = "applyTheme",
154
+ inputs=[themes_dropdown, css_text],
155
+ outputs=[vars_text, save_as_filename]
156
+ )
157
+
158
+ # apply_button.click(
159
+ # fn=None,
160
+ # _js = "applyTheme"
161
+ # )
162
+
163
+ # vars_text.change(
164
+ # fn=None,
165
+ # _js = "applyTheme",
166
+ # inputs=[],
167
+ # outputs=[vars_text, css_text]
168
+ # )
169
+
170
+
171
+
172
+
173
+ return (ui_theme, 'Theme', 'ui_theme'),
174
+
175
+
176
+
177
+ script_callbacks.on_ui_tabs(on_ui_tabs)
extensions-builtin/sd_theme_editor/style.css ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #theme_menu {
2
+ z-index: 9999;
3
+ background-color: var(--ae-input-bg-color);
4
+ position: relative;
5
+ width: 38px;
6
+ height: 38px;
7
+ border-radius: 100%;
8
+ cursor: pointer;
9
+ min-width: unset;
10
+ max-width: 38px;
11
+ align-self: center;
12
+ }
13
+
14
+ #theme_menu::before {
15
+ content: " ";
16
+ display: inline-block;
17
+ -webkit-mask-size: cover;
18
+ mask-size: cover;
19
+ background-color: var(--ae-icon-color);
20
+ width: var(--ae-icon-size);
21
+ height: var(--ae-icon-size);
22
+ -webkit-mask: url(./file=html/svg/contrast-drop-2-line.svg) no-repeat 50% 50%;
23
+ mask: url(./file=html/svg/contrast-drop-2-line.svg) no-repeat 50% 50%;
24
+ cursor: pointer;
25
+ position: relative;
26
+ left: 50%;
27
+ top: 50%;
28
+ transform: translate(-50%, -50%) scale(1);
29
+ }
30
+
31
+ #theme_menu.fixed,
32
+ #theme_menu:hover {
33
+ background-color: var(--ae-icon-color);
34
+ }
35
+
36
+ #theme_menu.fixed::before,
37
+ #theme_menu:hover::before {
38
+ background-color: var(--ae-icon-hover-color);
39
+ }
40
+
41
+ #theme_overflow_container {
42
+ overflow-y: auto;
43
+ height: calc(
44
+ 100vh - var(--ae-top-header-height) - (var(--ae-outside-gap-size) * 2) -
45
+ (var(--ae-inside-padding-size) * 4) - 96px
46
+ );
47
+ overflow-x: hidden;
48
+ }
49
+
50
+ #tab_ui_theme.open {
51
+ transform: translateX(0);
52
+ box-shadow: rgba(0, 0, 0, 0.4) -30px 0 30px -30px;
53
+ }
54
+
55
+ #tab_ui_theme.aside {
56
+ display: block !important;
57
+ }
58
+
59
+ #tab_ui_theme.aside {
60
+ position: fixed;
61
+ top: var(--ae-top-header-height);
62
+ width: 90%;
63
+ right: 0;
64
+ height: calc(100% - var(--ae-top-header-height));
65
+ max-width: 480px;
66
+ z-index: 9999;
67
+ transform: translateX(100%);
68
+ transition: all 0.25s ease 0s;
69
+ box-shadow: rgba(0, 0, 0, 0) -30px 0 30px -30px;
70
+ padding: calc(1rem - var(--ae-outside-gap-size));
71
+ background-color: var(--ae-main-bg-color) !important;
72
+ }
73
+ #tab_ui_theme.aside.open {
74
+ transform: translateX(0);
75
+ box-shadow: rgba(0, 0, 0, 0.4) -30px 0 30px -30px;
76
+ }
77
+
78
+ #theme_hidden,
79
+ #setting_ui_header_tabs .theme,
80
+ #setting_ui_hidden_tabs .theme {
81
+ display: none !important;
82
+ }
83
+
84
+ #tab_ui_theme [id*="color"] label {
85
+ display: flex;
86
+ align-items: center;
87
+ pointer-events: none;
88
+ }
89
+ #tab_ui_theme [id*="color"] label span {
90
+ min-width: 50% !important;
91
+ }
92
+ #tab_ui_theme [id*="color"] label input {
93
+ flex-grow: 1;
94
+ pointer-events: all;
95
+ cursor: pointer;
96
+ }
97
+
98
+ #settings_ui_theme > div > div {
99
+ flex-direction: row;
100
+ flex-wrap: wrap;
101
+ }
102
+ #settings_ui_theme > div > div > div {
103
+ max-width: 30%;
104
+ }
105
+
106
+ #tab_ui_theme > div {
107
+ padding: 16px !important;
108
+ padding-top: 0 !important;
109
+ }
110
+
111
+ #ui_theme_hsv + button {
112
+ min-width: unset;
113
+ }
extensions-builtin/sd_theme_editor/themes/Golde.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(99deg 11% 8%);--ae-primary-color:hsl(44deg 63% 55%);--ae-input-bg-color:hsl(106deg 8% 12%);--ae-input-border-color:hsl(104deg 9% 32%);--ae-panel-bg-color:hsl(104deg 9% 20%);--ae-panel-border-color:hsl(104deg 9% 32%);--ae-panel-border-radius:4px;--ae-subgroup-bg-color:hsl(99deg 11% 8%);--ae-subgroup-input-bg-color:hsl(99deg 11% 8%);--ae-subgroup-input-border-color:hsl(104deg 9% 32%);--ae-subpanel-bg-color:hsl(106deg 8% 12%);--ae-subpanel-border-color:hsl(104deg 9% 32%);--ae-subpanel-border-radius:8px;--ae-textarea-focus-color:hsl(56deg 30% 36%);--ae-input-focus-color:hsl(44deg 63% 55%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(104deg 9% 32%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(105deg 9% 77%);--ae-icon-hover-color:hsl(99deg 11% 8%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(98deg 9% 4%);--ae-nav-color:hsl(105deg 9% 77%);--ae-nav-hover-color:hsl(98deg 9% 4%);--ae-input-color:hsl(44deg 63% 55%);--ae-label-color:hsl(105deg 9% 77%);--ae-subgroup-input-color:hsl(44deg 63% 55%);--ae-placeholder-color:hsl(104deg 9% 32%);--ae-text-color:hsl(105deg 9% 77%);--ae-mobile-outside-gap-size:2px;--ae-mobile-inside-padding-size:2px;--ae-frame-bg-color:hsl(108deg 8% 12%);--ae-modal-bg-color:hsl(96deg 12% 8%);--ae-modal-icon-color:hsl(44deg 63% 55%);
extensions-builtin/sd_theme_editor/themes/backup.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(0deg 0% 10%);--ae-primary-color:hsl(168deg 96% 42%);--ae-input-bg-color:hsl(225deg 6% 13%);--ae-input-border-color:hsl(214deg 5% 30%);--ae-panel-bg-color:hsl(225deg 5% 17%);--ae-panel-border-color:hsl(214deg 5% 30%);--ae-panel-border-radius:0px;--ae-subgroup-bg-color:hsl(0deg 0% 10%);--ae-subgroup-input-bg-color:hsl(225deg 6% 13%);--ae-subgroup-input-border-color:hsl(214deg 5% 30%);--ae-subpanel-bg-color:hsl(220deg 4% 14%);--ae-subpanel-border-color:hsl(214deg 5% 30%);--ae-subpanel-border-radius:8px;--ae-textarea-focus-color:hsl(210deg 3% 36%);--ae-input-focus-color:hsl(168deg 97% 41%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(0deg 84% 60%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(168deg 96% 42%);--ae-icon-hover-color:hsl(0deg 0% 10%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(0deg 0% 4%);--ae-nav-color:hsl(210deg 4% 80%);--ae-nav-hover-color:hsl(0deg 0% 4%);--ae-input-color:hsl(210deg 4% 80%);--ae-label-color:hsl(210deg 4% 80%);--ae-subgroup-input-color:hsl(0deg 100% 100%);--ae-placeholder-color:hsl(214deg 5% 30%);--ae-text-color:hsl(210deg 4% 80%);--ae-mobile-outside-gap-size:3px;--ae-mobile-inside-padding-size:3px;--ae-frame-bg-color:hsl(225deg 6% 13%);--ae-modal-bg-color:hsl(0deg 0% 10%);--ae-modal-icon-color:hsl(168deg 97% 41%);
extensions-builtin/sd_theme_editor/themes/d-230-52-94.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(230deg 52% 4%);--ae-primary-color:hsl(38deg 148% 36%);--ae-input-bg-color:hsl(95deg 58% 7%);--ae-input-border-color:hsl(84deg 57% 24%);--ae-panel-bg-color:hsl(95deg 57% 11%);--ae-panel-border-color:hsl(84deg 57% 24%);--ae-panel-border-radius:0px;--ae-subgroup-bg-color:hsl(230deg 52% 4%);--ae-subgroup-input-bg-color:hsl(95deg 58% 7%);--ae-subgroup-input-border-color:hsl(84deg 57% 24%);--ae-subpanel-bg-color:hsl(90deg 56% 8%);--ae-subpanel-border-color:hsl(84deg 57% 24%);--ae-subpanel-border-radius:8px;--ae-textarea-focus-color:hsl(80deg 55% 30%);--ae-input-focus-color:hsl(38deg 149% 35%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(230deg 136% 54%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(80deg 56% 74%);--ae-icon-hover-color:hsl(230deg 52% 4%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(230deg 52% 98%);--ae-nav-color:hsl(80deg 56% 74%);--ae-nav-hover-color:hsl(230deg 52% 98%);--ae-input-color:hsl(80deg 56% 74%);--ae-label-color:hsl(80deg 56% 74%);--ae-subgroup-input-color:hsl(230deg 152% 94%);--ae-placeholder-color:hsl(84deg 57% 24%);--ae-text-color:hsl(80deg 56% 74%);--ae-mobile-outside-gap-size:3px;--ae-mobile-inside-padding-size:3px;--ae-frame-bg-color:hsl(94deg 60% 7%);--ae-modal-bg-color:hsl(229deg 52% 4%);--ae-modal-icon-color:hsl(38deg 100% 36%);
extensions-builtin/sd_theme_editor/themes/default.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(0deg 0% 10%);--ae-primary-color:hsl(168deg 97% 41%);--ae-input-bg-color:hsl(225deg 6% 13%);--ae-input-border-color:hsl(214deg 5% 30%);--ae-panel-bg-color:hsl(225deg 5% 17%);--ae-panel-border-color:hsl(214deg 5% 30%);--ae-panel-border-radius:0px;--ae-subgroup-bg-color:hsl(0deg 0% 10%);--ae-subgroup-input-bg-color:hsl(225deg 6% 13%);--ae-subgroup-input-border-color:hsl(214deg 5% 30%);--ae-subpanel-bg-color:hsl(220deg 4% 14%);--ae-subpanel-border-color:hsl(214deg 5% 30%);--ae-subpanel-border-radius:8px;--ae-textarea-focus-color:hsl(210deg 3% 36%);--ae-input-focus-color:hsl(168deg 97% 41%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(0deg 84% 60%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(168deg 97% 41%);--ae-icon-hover-color:hsl(0deg 0% 10%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(0deg 0% 4%);--ae-nav-color:hsl(210deg 4% 80%);--ae-nav-hover-color:hsl(0deg 0% 4%);--ae-input-color:hsl(210deg 4% 80%);--ae-label-color:hsl(210deg 4% 80%);--ae-subgroup-input-color:hsl(210deg 4% 80%);--ae-placeholder-color:hsl(214deg 5% 30%);--ae-text-color:hsl(210deg 4% 80%);--ae-mobile-outside-gap-size:2px;--ae-mobile-inside-padding-size:2px;--ae-frame-bg-color:hsl(225deg 6% 13%);--ae-modal-bg-color:hsl(0deg 0% 10%);--ae-modal-icon-color:hsl(168deg 97% 41%);
extensions-builtin/sd_theme_editor/themes/default_cyan.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(0deg 0% 10%);--ae-primary-color:hsl(199deg 60% 60%);--ae-input-bg-color:hsl(225deg 6% 13%);--ae-input-border-color:hsl(214deg 5% 30%);--ae-panel-bg-color:hsl(225deg 5% 17%);--ae-panel-border-color:hsl(214deg 5% 30%);--ae-panel-border-radius:0px;--ae-subgroup-bg-color:hsl(0deg 0% 10%);--ae-subgroup-input-bg-color:hsl(225deg 6% 13%);--ae-subgroup-input-border-color:hsl(214deg 5% 30%);--ae-subpanel-bg-color:hsl(220deg 4% 14%);--ae-subpanel-border-color:hsl(214deg 5% 30%);--ae-subpanel-border-radius:8px;--ae-textarea-focus-color:hsl(210deg 3% 36%);--ae-input-focus-color:hsl(199deg 60% 60%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(357deg 50% 57%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(210deg 4% 80%);--ae-icon-hover-color:hsl(0deg 0% 10%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(0deg 0% 4%);--ae-nav-color:hsl(210deg 4% 80%);--ae-nav-hover-color:hsl(0deg 0% 4%);--ae-input-color:hsl(210deg 4% 80%);--ae-label-color:hsl(210deg 4% 80%);--ae-subgroup-input-color:hsl(210deg 4% 80%);--ae-placeholder-color:hsl(214deg 5% 30%);--ae-text-color:hsl(210deg 4% 80%);--ae-mobile-outside-gap-size:2px;--ae-mobile-inside-padding-size:2px;--ae-frame-bg-color:hsl(225deg 6% 13%);--ae-modal-bg-color:hsl(0deg 0% 10%);--ae-modal-icon-color:hsl(199deg 60% 60%);
extensions-builtin/sd_theme_editor/themes/default_orange.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(0deg 0% 10%);--ae-primary-color:hsl(16deg 77% 60%);--ae-input-bg-color:hsl(225deg 6% 13%);--ae-input-border-color:hsl(214deg 5% 30%);--ae-panel-bg-color:hsl(225deg 5% 17%);--ae-panel-border-color:hsl(214deg 5% 30%);--ae-panel-border-radius:8px;--ae-subgroup-bg-color:hsl(0deg 0% 10%);--ae-subgroup-input-bg-color:hsl(225deg 6% 13%);--ae-subgroup-input-border-color:hsl(214deg 5% 30%);--ae-subpanel-bg-color:hsl(220deg 4% 14%);--ae-subpanel-border-color:hsl(214deg 5% 30%);--ae-subpanel-border-radius:8px;--ae-textarea-focus-color:hsl(210deg 3% 36%);--ae-input-focus-color:hsl(16deg 77% 60%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(193deg 54% 55%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(210deg 4% 80%);--ae-icon-hover-color:hsl(0deg 0% 10%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(0deg 0% 4%);--ae-nav-color:hsl(210deg 4% 80%);--ae-nav-hover-color:hsl(0deg 0% 4%);--ae-input-color:hsl(210deg 4% 80%);--ae-label-color:hsl(210deg 4% 80%);--ae-subgroup-input-color:hsl(210deg 4% 80%);--ae-placeholder-color:hsl(214deg 5% 30%);--ae-text-color:hsl(210deg 4% 80%);--ae-mobile-outside-gap-size:2px;--ae-mobile-inside-padding-size:2px;--ae-frame-bg-color:hsl(225deg 6% 13%);--ae-modal-bg-color:hsl(0deg 0% 10%);--ae-modal-icon-color:hsl(16deg 77% 60%);
extensions-builtin/sd_theme_editor/themes/fun.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(253deg 22% 8%);--ae-primary-color:hsl(76deg 96% 55%);--ae-input-bg-color:hsl(260deg 25% 12%);--ae-input-border-color:hsl(258deg 24% 32%);--ae-panel-bg-color:hsl(258deg 24% 20%);--ae-panel-border-color:hsl(258deg 24% 32%);--ae-panel-border-radius:4px;--ae-subgroup-bg-color:hsl(253deg 22% 8%);--ae-subgroup-input-bg-color:hsl(258deg 24% 8%);--ae-subgroup-input-border-color:hsl(258deg 24% 32%);--ae-subpanel-bg-color:hsl(260deg 25% 12%);--ae-subpanel-border-color:hsl(258deg 24% 32%);--ae-subpanel-border-radius:8px;--ae-textarea-focus-color:hsl(210deg 3% 36%);--ae-input-focus-color:hsl(296deg 96% 55%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(258deg 24% 32%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(259deg 24% 77%);--ae-icon-hover-color:hsl(253deg 22% 8%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(252deg 24% 4%);--ae-nav-color:hsl(259deg 24% 77%);--ae-nav-hover-color:hsl(252deg 24% 4%);--ae-input-color:hsl(305deg 96% 55%);--ae-label-color:hsl(259deg 24% 77%);--ae-subgroup-input-color:hsl(76deg 96% 55%);--ae-placeholder-color:hsl(258deg 24% 32%);--ae-text-color:hsl(259deg 24% 77%);--ae-mobile-outside-gap-size:2px;--ae-mobile-inside-padding-size:2px;--ae-frame-bg-color:hsl(260deg 25% 12%);--ae-modal-bg-color:hsl(253deg 22% 8%);--ae-modal-icon-color:hsl(76deg 96% 55%);
extensions-builtin/sd_theme_editor/themes/minimal.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(0deg 0% 8%);--ae-primary-color:hsl(168deg 96% 42%);--ae-input-bg-color:hsl(0deg 0% 10%);--ae-input-border-color:hsl(0deg 0% 10%);--ae-panel-bg-color:hsl(0deg 0% 17%);--ae-panel-border-color:hsl(0deg 0% 17%);--ae-panel-border-radius:4px;--ae-subgroup-bg-color:hsl(0deg 0% 10%);--ae-subgroup-input-bg-color:hsl(0deg 0% 10%);--ae-subgroup-input-border-color:hsl(0deg 0% 10%);--ae-subpanel-bg-color:hsl(0deg 0% 14%);--ae-subpanel-border-color:hsl(0deg 0% 15%);--ae-subpanel-border-radius:4px;--ae-textarea-focus-color:hsl(0deg 0% 36%);--ae-input-focus-color:hsl(168deg 97% 41%);--ae-outside-gap-size:1px;--ae-inside-padding-size:5px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(0deg 84% 60%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(168deg 96% 42%);--ae-icon-hover-color:hsl(0deg 0% 10%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(0deg 0% 4%);--ae-nav-color:hsl(0deg 0% 80%);--ae-nav-hover-color:hsl(0deg 0% 4%);--ae-input-color:hsl(210deg 4% 80%);--ae-label-color:hsl(0deg 0% 65%);--ae-subgroup-input-color:hsl(0deg 100% 100%);--ae-placeholder-color:hsl(0deg 0% 30%);--ae-text-color:hsl(0deg 0% 80%);--ae-mobile-outside-gap-size:3px;--ae-mobile-inside-padding-size:3px;--ae-frame-bg-color:hsl(0deg 0% 14%);--ae-modal-bg-color:hsl(0deg 0% 10%);--ae-modal-icon-color:hsl(168deg 97% 41%);
extensions-builtin/sd_theme_editor/themes/minimal_orange.css ADDED
@@ -0,0 +1 @@
 
 
1
+ --ae-main-bg-color:hsl(210deg 28% 8%);--ae-primary-color:hsl(18deg 124% 42%);--ae-input-bg-color:hsl(210deg 28% 10%);--ae-input-border-color:hsl(210deg 28% 10%);--ae-panel-bg-color:hsl(210deg 28% 17%);--ae-panel-border-color:hsl(210deg 28% 17%);--ae-panel-border-radius:4px;--ae-subgroup-bg-color:hsl(210deg 28% 10%);--ae-subgroup-input-bg-color:hsl(210deg 28% 10%);--ae-subgroup-input-border-color:hsl(210deg 28% 10%);--ae-subpanel-bg-color:hsl(210deg 28% 14%);--ae-subpanel-border-color:hsl(210deg 28% 15%);--ae-subpanel-border-radius:4px;--ae-textarea-focus-color:hsl(210deg 28% 36%);--ae-input-focus-color:hsl(18deg 125% 41%);--ae-outside-gap-size:8px;--ae-inside-padding-size:8px;--ae-tool-button-size:34px;--ae-tool-button-radius:16px;--ae-generate-button-height:70px;--ae-cancel-color:hsl(210deg 112% 60%);--ae-max-padding:max(var(--ae-outside-gap-size),var(--ae-inside-padding-size));--ae-icon-color:hsl(18deg 124% 42%);--ae-icon-hover-color:hsl(210deg 28% 10%);--ae-icon-size:22px;--ae-nav-bg-color:hsl(210deg 28% 4%);--ae-nav-color:hsl(210deg 28% 80%);--ae-nav-hover-color:hsl(210deg 28% 4%);--ae-input-color:hsl(60deg 32% 80%);--ae-label-color:hsl(210deg 28% 65%);--ae-subgroup-input-color:hsl(210deg 128% 100%);--ae-placeholder-color:hsl(210deg 28% 30%);--ae-text-color:hsl(210deg 28% 80%);--ae-mobile-outside-gap-size:3px;--ae-mobile-inside-padding-size:3px;--ae-frame-bg-color:hsl(210deg 28% 14%);--ae-modal-bg-color:hsl(210deg 28% 10%);--ae-modal-icon-color:hsl(18deg 125% 41%);