f31c4ab57b53cde8341c10ec5dc18ad37ce7ce0c5359a832639c64914cfc2c52
Browse files- extensions/addtional/models/lora/sukuna.safetensors +3 -0
- extensions/addtional/models/lora/vacuum fellatio1.1ãvacuum fellatioã.civitai.info +85 -0
- extensions/addtional/models/lora/vacuum fellatio1.1/303/243/302/200/302/220vacuum fellatio/303/243/302/200/302/221.preview.png +0 -0
- extensions/addtional/models/lora/vacuum fellatio1.1ãvacuum fellatioã.safetensors +3 -0
- extensions/addtional/preload.py +6 -0
- extensions/addtional/scripts/__pycache__/additional_networks.cpython-310.pyc +0 -0
- extensions/addtional/scripts/__pycache__/addnet_xyz_grid_support.cpython-310.pyc +0 -0
- extensions/addtional/scripts/__pycache__/lora_compvis.cpython-310.pyc +0 -0
- extensions/addtional/scripts/__pycache__/metadata_editor.cpython-310.pyc +0 -0
- extensions/addtional/scripts/__pycache__/model_util.cpython-310.pyc +0 -0
- extensions/addtional/scripts/__pycache__/safetensors_hack.cpython-310.pyc +0 -0
- extensions/addtional/scripts/__pycache__/util.cpython-310.pyc +0 -0
- extensions/addtional/scripts/additional_networks.py +399 -0
- extensions/addtional/scripts/addnet_xyz_grid_support.py +189 -0
- extensions/addtional/scripts/lora_compvis.py +634 -0
- extensions/addtional/scripts/metadata_editor.py +622 -0
- extensions/addtional/scripts/model_util.py +338 -0
- extensions/addtional/scripts/safetensors_hack.py +116 -0
- extensions/addtional/scripts/util.py +12 -0
- extensions/addtional/style.css +9 -0
- extensions/adetailer/.github/ISSUE_TEMPLATE/bug_report.yaml +23 -0
- extensions/adetailer/.github/ISSUE_TEMPLATE/feature_request.md +20 -0
- extensions/adetailer/.github/workflows/stale.yml +13 -0
- extensions/adetailer/.gitignore +196 -0
- extensions/adetailer/.pre-commit-config.yaml +24 -0
- extensions/adetailer/CHANGELOG.md +171 -0
- extensions/adetailer/LICENSE.md +662 -0
- extensions/adetailer/README.md +105 -0
- extensions/adetailer/__pycache__/preload.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__init__.py +20 -0
- extensions/adetailer/adetailer/__pycache__/__init__.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__pycache__/__version__.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__pycache__/args.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__pycache__/common.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__pycache__/mask.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__pycache__/mediapipe.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__pycache__/ui.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__pycache__/ultralytics.cpython-310.pyc +0 -0
- extensions/adetailer/adetailer/__version__.py +1 -0
- extensions/adetailer/adetailer/args.py +187 -0
- extensions/adetailer/adetailer/common.py +123 -0
- extensions/adetailer/adetailer/mask.py +247 -0
- extensions/adetailer/adetailer/mediapipe.py +111 -0
- extensions/adetailer/adetailer/ui.py +451 -0
- extensions/adetailer/adetailer/ultralytics.py +54 -0
- extensions/adetailer/controlnet_ext/__init__.py +7 -0
- extensions/adetailer/controlnet_ext/__pycache__/__init__.cpython-310.pyc +0 -0
- extensions/adetailer/controlnet_ext/__pycache__/controlnet_ext.cpython-310.pyc +0 -0
- extensions/adetailer/controlnet_ext/__pycache__/restore.cpython-310.pyc +0 -0
- extensions/adetailer/controlnet_ext/controlnet_ext.py +134 -0
extensions/addtional/models/lora/sukuna.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f15673b5ea25ed5f1e5cee150ddf4e0d094a65e30f08b0813566f9580abdb627
|
3 |
+
size 78018750
|
extensions/addtional/models/lora/vacuum fellatio1.1ãvacuum fellatioã.civitai.info
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"id": 22489,
|
3 |
+
"modelId": 18735,
|
4 |
+
"name": "v1.1",
|
5 |
+
"createdAt": "2023-03-13T09:00:46.954Z",
|
6 |
+
"updatedAt": "2023-03-18T10:17:13.538Z",
|
7 |
+
"trainedWords": [
|
8 |
+
"vacuum fellatio"
|
9 |
+
],
|
10 |
+
"baseModel": "SD 1.5",
|
11 |
+
"earlyAccessTimeFrame": 0,
|
12 |
+
"description": "<p>There is no significant change, but considering that the previous TriggerWords cannot be saved as a file name, I modified TriggerWords. </p><p>By the way, I added several pictures of POV deepthroat, which may have better performance</p>",
|
13 |
+
"stats": {
|
14 |
+
"downloadCount": 17393,
|
15 |
+
"ratingCount": 11,
|
16 |
+
"rating": 5
|
17 |
+
},
|
18 |
+
"model": {
|
19 |
+
"name": "concept Vacuum fellatio(:>=)",
|
20 |
+
"type": "LORA",
|
21 |
+
"nsfw": true,
|
22 |
+
"poi": false
|
23 |
+
},
|
24 |
+
"files": [
|
25 |
+
{
|
26 |
+
"id": 18716,
|
27 |
+
"url": "https://5ac0637cfd0766c97916cefa3764fbdf.r2.cloudflarestorage.com/civitai-prod-settled/105182/model/vacuum20fellatio11E380.pABG.safetensors",
|
28 |
+
"sizeKB": 36985.2275390625,
|
29 |
+
"name": "vacuum fellatio1.1\u3010vacuum fellatio\u3011.safetensors",
|
30 |
+
"type": "Model",
|
31 |
+
"metadata": {
|
32 |
+
"fp": "fp16",
|
33 |
+
"size": "full",
|
34 |
+
"format": "SafeTensor"
|
35 |
+
},
|
36 |
+
"pickleScanResult": "Success",
|
37 |
+
"pickleScanMessage": "No Pickle imports",
|
38 |
+
"virusScanResult": "Success",
|
39 |
+
"virusScanMessage": null,
|
40 |
+
"scannedAt": "2023-03-13T09:01:34.994Z",
|
41 |
+
"hashes": {
|
42 |
+
"AutoV1": "378523B1",
|
43 |
+
"AutoV2": "04D50C1BA2",
|
44 |
+
"SHA256": "04D50C1BA2BD0A1B5F87118C29A8C4C596CEA0BC0991A3EAE7952AA86ED540EB",
|
45 |
+
"CRC32": "EAD3612B",
|
46 |
+
"BLAKE3": "F9652C1CC61EA0B42904CF95C9544EAF792EC7AF3B7B7A4F7CD01FAE67052962"
|
47 |
+
},
|
48 |
+
"primary": true,
|
49 |
+
"downloadUrl": "https://civitai.com/api/download/models/22489"
|
50 |
+
}
|
51 |
+
],
|
52 |
+
"images": [
|
53 |
+
{
|
54 |
+
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/3bbcf1a0-0058-4d4b-9d2f-857163335400/width=450/241954.jpeg",
|
55 |
+
"nsfw": "X",
|
56 |
+
"width": 512,
|
57 |
+
"height": 640,
|
58 |
+
"hash": "UEHL-;Q800N@00E,wukX00Tykr^iDO~B?c4:",
|
59 |
+
"meta": {
|
60 |
+
"seed": 2965610135,
|
61 |
+
"steps": 20,
|
62 |
+
"prompt": "masterpiece,1girl,vacuum fellatio,1boy,penis,fellatio,oral,from side, surprise deepthroat, <lora:vacuum fellatio1.1\u3010vacuum fellatio\u3011:1>",
|
63 |
+
"sampler": "Euler a",
|
64 |
+
"cfgScale": 7,
|
65 |
+
"negativePrompt": "(worst quality:1.4), (low quality:1.4), (normal quality:1.4), lowres"
|
66 |
+
}
|
67 |
+
},
|
68 |
+
{
|
69 |
+
"url": "https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/1f78213f-2c53-4e52-7b11-a946bb98d400/width=450/241953.jpeg",
|
70 |
+
"nsfw": "None",
|
71 |
+
"width": 512,
|
72 |
+
"height": 640,
|
73 |
+
"hash": "UAEoGm0N00%E^%E3nOxU00?E~CM*DR-.%f9b",
|
74 |
+
"meta": {
|
75 |
+
"seed": 3859538840,
|
76 |
+
"steps": 20,
|
77 |
+
"prompt": "masterpiece,1girl,vacuum fellatio,1boy,penis,fellatio,oral,pov, surprise deepthroat, <lora:vacuum fellatio1.1\u3010vacuum fellatio\u3011:1>",
|
78 |
+
"sampler": "Euler a",
|
79 |
+
"cfgScale": 7,
|
80 |
+
"negativePrompt": "(worst quality:1.4), (low quality:1.4), (normal quality:1.4), lowres"
|
81 |
+
}
|
82 |
+
}
|
83 |
+
],
|
84 |
+
"downloadUrl": "https://civitai.com/api/download/models/22489"
|
85 |
+
}
|
extensions/addtional/models/lora/vacuum fellatio1.1/303/243/302/200/302/220vacuum fellatio/303/243/302/200/302/221.preview.png
ADDED
extensions/addtional/models/lora/vacuum fellatio1.1ãvacuum fellatioã.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:04d50c1ba2bd0a1b5f87118c29a8c4c596cea0bc0991a3eae7952aa86ed540eb
|
3 |
+
size 37872873
|
extensions/addtional/preload.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from modules import paths
|
3 |
+
|
4 |
+
|
5 |
+
def preload(parser):
|
6 |
+
parser.add_argument("--addnet-max-model-count", type=int, help="The maximum number of additional network model can be used.", default=5)
|
extensions/addtional/scripts/__pycache__/additional_networks.cpython-310.pyc
ADDED
Binary file (11.3 kB). View file
|
|
extensions/addtional/scripts/__pycache__/addnet_xyz_grid_support.cpython-310.pyc
ADDED
Binary file (6.13 kB). View file
|
|
extensions/addtional/scripts/__pycache__/lora_compvis.cpython-310.pyc
ADDED
Binary file (14 kB). View file
|
|
extensions/addtional/scripts/__pycache__/metadata_editor.cpython-310.pyc
ADDED
Binary file (16.5 kB). View file
|
|
extensions/addtional/scripts/__pycache__/model_util.cpython-310.pyc
ADDED
Binary file (9.9 kB). View file
|
|
extensions/addtional/scripts/__pycache__/safetensors_hack.cpython-310.pyc
ADDED
Binary file (4 kB). View file
|
|
extensions/addtional/scripts/__pycache__/util.cpython-310.pyc
ADDED
Binary file (539 Bytes). View file
|
|
extensions/addtional/scripts/additional_networks.py
ADDED
@@ -0,0 +1,399 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
import modules.scripts as scripts
|
7 |
+
from modules import shared, script_callbacks
|
8 |
+
import gradio as gr
|
9 |
+
|
10 |
+
import modules.ui
|
11 |
+
from modules.ui_components import ToolButton, FormRow
|
12 |
+
|
13 |
+
from scripts import addnet_xyz_grid_support, lora_compvis, model_util, metadata_editor
|
14 |
+
from scripts.model_util import lora_models, MAX_MODEL_COUNT
|
15 |
+
|
16 |
+
|
17 |
+
memo_symbol = "\U0001F4DD" # 📝
|
18 |
+
addnet_paste_params = {"txt2img": [], "img2img": []}
|
19 |
+
|
20 |
+
|
21 |
+
class Script(scripts.Script):
|
22 |
+
def __init__(self) -> None:
|
23 |
+
super().__init__()
|
24 |
+
self.latest_params = [(None, None, None, None)] * MAX_MODEL_COUNT
|
25 |
+
self.latest_networks = []
|
26 |
+
self.latest_model_hash = ""
|
27 |
+
|
28 |
+
def title(self):
|
29 |
+
return "Additional networks for generating"
|
30 |
+
|
31 |
+
def show(self, is_img2img):
|
32 |
+
return scripts.AlwaysVisible
|
33 |
+
|
34 |
+
def ui(self, is_img2img):
|
35 |
+
global addnet_paste_params
|
36 |
+
# NOTE: Changing the contents of `ctrls` means the XY Grid support may need
|
37 |
+
# to be updated, see xyz_grid_support.py
|
38 |
+
ctrls = []
|
39 |
+
weight_sliders = []
|
40 |
+
model_dropdowns = []
|
41 |
+
|
42 |
+
tabname = "txt2img"
|
43 |
+
if is_img2img:
|
44 |
+
tabname = "img2img"
|
45 |
+
|
46 |
+
paste_params = addnet_paste_params[tabname]
|
47 |
+
paste_params.clear()
|
48 |
+
|
49 |
+
self.infotext_fields = []
|
50 |
+
self.paste_field_names = []
|
51 |
+
|
52 |
+
with gr.Group():
|
53 |
+
with gr.Accordion("Additional Networks", open=False):
|
54 |
+
with gr.Row():
|
55 |
+
enabled = gr.Checkbox(label="Enable", value=False)
|
56 |
+
ctrls.append(enabled)
|
57 |
+
self.infotext_fields.append((enabled, "AddNet Enabled"))
|
58 |
+
separate_weights = gr.Checkbox(label="Separate UNet/Text Encoder weights", value=False)
|
59 |
+
ctrls.append(separate_weights)
|
60 |
+
self.infotext_fields.append((separate_weights, "AddNet Separate Weights"))
|
61 |
+
|
62 |
+
for i in range(MAX_MODEL_COUNT):
|
63 |
+
with FormRow(variant="compact"):
|
64 |
+
module = gr.Dropdown(["LoRA"], label=f"Network module {i+1}", value="LoRA")
|
65 |
+
model = gr.Dropdown(list(lora_models.keys()), label=f"Model {i+1}", value="None")
|
66 |
+
with gr.Row(visible=False):
|
67 |
+
model_path = gr.Textbox(value="None", interactive=False, visible=False)
|
68 |
+
model.change(
|
69 |
+
lambda module, model, i=i: model_util.lora_models.get(model, "None"),
|
70 |
+
inputs=[module, model],
|
71 |
+
outputs=[model_path],
|
72 |
+
)
|
73 |
+
|
74 |
+
# Sending from the script UI to the metadata editor has to bypass
|
75 |
+
# gradio since this button will exit the gr.Blocks context by the
|
76 |
+
# time the metadata editor tab is created, so event handlers can't
|
77 |
+
# be registered on it by then.
|
78 |
+
model_info = ToolButton(value=memo_symbol, elem_id=f"additional_networks_send_to_metadata_editor_{i}")
|
79 |
+
model_info.click(fn=None, _js="addnet_send_to_metadata_editor", inputs=[module, model_path], outputs=[])
|
80 |
+
|
81 |
+
module.change(
|
82 |
+
lambda module, model, i=i: addnet_xyz_grid_support.update_axis_params(i, module, model),
|
83 |
+
inputs=[module, model],
|
84 |
+
outputs=[],
|
85 |
+
)
|
86 |
+
model.change(
|
87 |
+
lambda module, model, i=i: addnet_xyz_grid_support.update_axis_params(i, module, model),
|
88 |
+
inputs=[module, model],
|
89 |
+
outputs=[],
|
90 |
+
)
|
91 |
+
|
92 |
+
# perhaps there is no user to train Text Encoder only, Weight A is U-Net
|
93 |
+
# The name of label will be changed in future (Weight A and B), but UNet and TEnc for now for easy understanding
|
94 |
+
with gr.Column() as col:
|
95 |
+
weight = gr.Slider(label=f"Weight {i+1}", value=1.0, minimum=-1.0, maximum=2.0, step=0.05, visible=True)
|
96 |
+
weight_unet = gr.Slider(
|
97 |
+
label=f"UNet Weight {i+1}", value=1.0, minimum=-1.0, maximum=2.0, step=0.05, visible=False
|
98 |
+
)
|
99 |
+
weight_tenc = gr.Slider(
|
100 |
+
label=f"TEnc Weight {i+1}", value=1.0, minimum=-1.0, maximum=2.0, step=0.05, visible=False
|
101 |
+
)
|
102 |
+
|
103 |
+
weight.change(lambda w: (w, w), inputs=[weight], outputs=[weight_unet, weight_tenc])
|
104 |
+
weight.release(lambda w: (w, w), inputs=[weight], outputs=[weight_unet, weight_tenc])
|
105 |
+
paste_params.append({"module": module, "model": model})
|
106 |
+
|
107 |
+
ctrls.extend((module, model, weight_unet, weight_tenc))
|
108 |
+
weight_sliders.extend((weight, weight_unet, weight_tenc))
|
109 |
+
model_dropdowns.append(model)
|
110 |
+
|
111 |
+
self.infotext_fields.extend(
|
112 |
+
[
|
113 |
+
(module, f"AddNet Module {i+1}"),
|
114 |
+
(model, f"AddNet Model {i+1}"),
|
115 |
+
(weight, f"AddNet Weight {i+1}"),
|
116 |
+
(weight_unet, f"AddNet Weight A {i+1}"),
|
117 |
+
(weight_tenc, f"AddNet Weight B {i+1}"),
|
118 |
+
]
|
119 |
+
)
|
120 |
+
|
121 |
+
for _, field_name in self.infotext_fields:
|
122 |
+
self.paste_field_names.append(field_name)
|
123 |
+
|
124 |
+
def update_weight_sliders(separate, *sliders):
|
125 |
+
updates = []
|
126 |
+
for w, w_unet, w_tenc in zip(*(iter(sliders),) * 3):
|
127 |
+
if not separate:
|
128 |
+
w_unet = w
|
129 |
+
w_tenc = w
|
130 |
+
updates.append(gr.Slider.update(visible=not separate)) # Combined
|
131 |
+
updates.append(gr.Slider.update(visible=separate, value=w_unet)) # UNet
|
132 |
+
updates.append(gr.Slider.update(visible=separate, value=w_tenc)) # TEnc
|
133 |
+
return updates
|
134 |
+
|
135 |
+
separate_weights.change(update_weight_sliders, inputs=[separate_weights] + weight_sliders, outputs=weight_sliders)
|
136 |
+
|
137 |
+
def refresh_all_models(*dropdowns):
|
138 |
+
model_util.update_models()
|
139 |
+
updates = []
|
140 |
+
for dd in dropdowns:
|
141 |
+
if dd in lora_models:
|
142 |
+
selected = dd
|
143 |
+
else:
|
144 |
+
selected = "None"
|
145 |
+
update = gr.Dropdown.update(value=selected, choices=list(lora_models.keys()))
|
146 |
+
updates.append(update)
|
147 |
+
return updates
|
148 |
+
|
149 |
+
# mask for regions
|
150 |
+
with gr.Accordion("Extra args", open=False):
|
151 |
+
with gr.Row():
|
152 |
+
mask_image = gr.Image(label="mask image:")
|
153 |
+
ctrls.append(mask_image)
|
154 |
+
|
155 |
+
refresh_models = gr.Button(value="Refresh models")
|
156 |
+
refresh_models.click(refresh_all_models, inputs=model_dropdowns, outputs=model_dropdowns)
|
157 |
+
ctrls.append(refresh_models)
|
158 |
+
|
159 |
+
return ctrls
|
160 |
+
|
161 |
+
def set_infotext_fields(self, p, params):
|
162 |
+
for i, t in enumerate(params):
|
163 |
+
module, model, weight_unet, weight_tenc = t
|
164 |
+
if model is None or model == "None" or len(model) == 0 or (weight_unet == 0 and weight_tenc == 0):
|
165 |
+
continue
|
166 |
+
p.extra_generation_params.update(
|
167 |
+
{
|
168 |
+
"AddNet Enabled": True,
|
169 |
+
f"AddNet Module {i+1}": module,
|
170 |
+
f"AddNet Model {i+1}": model,
|
171 |
+
f"AddNet Weight A {i+1}": weight_unet,
|
172 |
+
f"AddNet Weight B {i+1}": weight_tenc,
|
173 |
+
}
|
174 |
+
)
|
175 |
+
|
176 |
+
def restore_networks(self, sd_model):
|
177 |
+
unet = sd_model.model.diffusion_model
|
178 |
+
text_encoder = sd_model.cond_stage_model
|
179 |
+
|
180 |
+
if len(self.latest_networks) > 0:
|
181 |
+
print("restoring last networks")
|
182 |
+
for network, _ in self.latest_networks[::-1]:
|
183 |
+
network.restore(text_encoder, unet)
|
184 |
+
self.latest_networks.clear()
|
185 |
+
|
186 |
+
def process_batch(self, p, *args, **kwargs):
|
187 |
+
unet = p.sd_model.model.diffusion_model
|
188 |
+
text_encoder = p.sd_model.cond_stage_model
|
189 |
+
|
190 |
+
if not args[0]:
|
191 |
+
self.restore_networks(p.sd_model)
|
192 |
+
return
|
193 |
+
|
194 |
+
params = []
|
195 |
+
for i, ctrl in enumerate(args[2:]):
|
196 |
+
if i % 4 == 0:
|
197 |
+
param = [ctrl]
|
198 |
+
else:
|
199 |
+
param.append(ctrl)
|
200 |
+
if i % 4 == 3:
|
201 |
+
params.append(param)
|
202 |
+
|
203 |
+
models_changed = len(self.latest_networks) == 0 # no latest network (cleared by check-off)
|
204 |
+
models_changed = models_changed or self.latest_model_hash != p.sd_model.sd_model_hash
|
205 |
+
if not models_changed:
|
206 |
+
for (l_module, l_model, l_weight_unet, l_weight_tenc), (module, model, weight_unet, weight_tenc) in zip(
|
207 |
+
self.latest_params, params
|
208 |
+
):
|
209 |
+
if l_module != module or l_model != model or l_weight_unet != weight_unet or l_weight_tenc != weight_tenc:
|
210 |
+
models_changed = True
|
211 |
+
break
|
212 |
+
|
213 |
+
if models_changed:
|
214 |
+
self.restore_networks(p.sd_model)
|
215 |
+
self.latest_params = params
|
216 |
+
self.latest_model_hash = p.sd_model.sd_model_hash
|
217 |
+
|
218 |
+
for module, model, weight_unet, weight_tenc in self.latest_params:
|
219 |
+
if model is None or model == "None" or len(model) == 0:
|
220 |
+
continue
|
221 |
+
if weight_unet == 0 and weight_tenc == 0:
|
222 |
+
print(f"ignore because weight is 0: {model}")
|
223 |
+
continue
|
224 |
+
|
225 |
+
model_path = lora_models.get(model, None)
|
226 |
+
if model_path is None:
|
227 |
+
raise RuntimeError(f"model not found: {model}")
|
228 |
+
|
229 |
+
if model_path.startswith('"') and model_path.endswith('"'): # trim '"' at start/end
|
230 |
+
model_path = model_path[1:-1]
|
231 |
+
if not os.path.exists(model_path):
|
232 |
+
print(f"file not found: {model_path}")
|
233 |
+
continue
|
234 |
+
|
235 |
+
print(f"{module} weight_unet: {weight_unet}, weight_tenc: {weight_tenc}, model: {model}")
|
236 |
+
if module == "LoRA":
|
237 |
+
if os.path.splitext(model_path)[1] == ".safetensors":
|
238 |
+
from safetensors.torch import load_file
|
239 |
+
|
240 |
+
du_state_dict = load_file(model_path)
|
241 |
+
else:
|
242 |
+
du_state_dict = torch.load(model_path, map_location="cpu")
|
243 |
+
|
244 |
+
network, info = lora_compvis.create_network_and_apply_compvis(
|
245 |
+
du_state_dict, weight_tenc, weight_unet, text_encoder, unet
|
246 |
+
)
|
247 |
+
# in medvram, device is different for u-net and sd_model, so use sd_model's
|
248 |
+
network.to(p.sd_model.device, dtype=p.sd_model.dtype)
|
249 |
+
|
250 |
+
print(f"LoRA model {model} loaded: {info}")
|
251 |
+
self.latest_networks.append((network, model))
|
252 |
+
if len(self.latest_networks) > 0:
|
253 |
+
print("setting (or sd model) changed. new networks created.")
|
254 |
+
|
255 |
+
# apply mask: currently only top 3 networks are supported
|
256 |
+
if len(self.latest_networks) > 0:
|
257 |
+
mask_image = args[-2]
|
258 |
+
if mask_image is not None:
|
259 |
+
mask_image = mask_image.astype(np.float32) / 255.0
|
260 |
+
print(f"use mask image to control LoRA regions.")
|
261 |
+
for i, (network, model) in enumerate(self.latest_networks[:3]):
|
262 |
+
if not hasattr(network, "set_mask"):
|
263 |
+
continue
|
264 |
+
mask = mask_image[:, :, i] # R,G,B
|
265 |
+
if mask.max() <= 0:
|
266 |
+
continue
|
267 |
+
mask = torch.tensor(mask, dtype=p.sd_model.dtype, device=p.sd_model.device)
|
268 |
+
|
269 |
+
network.set_mask(mask, height=p.height, width=p.width, hr_height=p.hr_upscale_to_y, hr_width=p.hr_upscale_to_x)
|
270 |
+
print(f"apply mask. channel: {i}, model: {model}")
|
271 |
+
else:
|
272 |
+
for network, _ in self.latest_networks:
|
273 |
+
if hasattr(network, "set_mask"):
|
274 |
+
network.set_mask(None)
|
275 |
+
|
276 |
+
self.set_infotext_fields(p, self.latest_params)
|
277 |
+
|
278 |
+
|
279 |
+
def on_script_unloaded():
|
280 |
+
if shared.sd_model:
|
281 |
+
for s in scripts.scripts_txt2img.alwayson_scripts:
|
282 |
+
if isinstance(s, Script):
|
283 |
+
s.restore_networks(shared.sd_model)
|
284 |
+
break
|
285 |
+
|
286 |
+
|
287 |
+
def on_ui_tabs():
|
288 |
+
global addnet_paste_params
|
289 |
+
with gr.Blocks(analytics_enabled=False) as additional_networks_interface:
|
290 |
+
metadata_editor.setup_ui(addnet_paste_params)
|
291 |
+
|
292 |
+
return [(additional_networks_interface, "Additional Networks", "additional_networks")]
|
293 |
+
|
294 |
+
|
295 |
+
def on_ui_settings():
|
296 |
+
section = ("additional_networks", "Additional Networks")
|
297 |
+
shared.opts.add_option(
|
298 |
+
"additional_networks_extra_lora_path",
|
299 |
+
shared.OptionInfo(
|
300 |
+
"",
|
301 |
+
"""Extra paths to scan for LoRA models, comma-separated. Paths containing commas must be enclosed in double quotes. In the path, " (one quote) must be replaced by "" (two quotes).""",
|
302 |
+
section=section,
|
303 |
+
),
|
304 |
+
)
|
305 |
+
shared.opts.add_option(
|
306 |
+
"additional_networks_sort_models_by",
|
307 |
+
shared.OptionInfo(
|
308 |
+
"name",
|
309 |
+
"Sort LoRA models by",
|
310 |
+
gr.Radio,
|
311 |
+
{"choices": ["name", "date", "path name", "rating", "has user metadata"]},
|
312 |
+
section=section,
|
313 |
+
),
|
314 |
+
)
|
315 |
+
shared.opts.add_option(
|
316 |
+
"additional_networks_reverse_sort_order", shared.OptionInfo(False, "Reverse model sort order", section=section)
|
317 |
+
)
|
318 |
+
shared.opts.add_option(
|
319 |
+
"additional_networks_model_name_filter", shared.OptionInfo("", "LoRA model name filter", section=section)
|
320 |
+
)
|
321 |
+
shared.opts.add_option(
|
322 |
+
"additional_networks_xy_grid_model_metadata",
|
323 |
+
shared.OptionInfo(
|
324 |
+
"",
|
325 |
+
'Metadata to show in XY-Grid label for Model axes, comma-separated (example: "ss_learning_rate, ss_num_epochs")',
|
326 |
+
section=section,
|
327 |
+
),
|
328 |
+
)
|
329 |
+
shared.opts.add_option(
|
330 |
+
"additional_networks_hash_thread_count",
|
331 |
+
shared.OptionInfo(1, "# of threads to use for hash calculation (increase if using an SSD)", section=section),
|
332 |
+
)
|
333 |
+
shared.opts.add_option(
|
334 |
+
"additional_networks_back_up_model_when_saving",
|
335 |
+
shared.OptionInfo(True, "Make a backup copy of the model being edited when saving its metadata.", section=section),
|
336 |
+
)
|
337 |
+
shared.opts.add_option(
|
338 |
+
"additional_networks_show_only_safetensors",
|
339 |
+
shared.OptionInfo(False, "Only show .safetensors format models", section=section),
|
340 |
+
)
|
341 |
+
shared.opts.add_option(
|
342 |
+
"additional_networks_show_only_models_with_metadata",
|
343 |
+
shared.OptionInfo(
|
344 |
+
"disabled",
|
345 |
+
"Only show models that have/don't have user-added metadata",
|
346 |
+
gr.Radio,
|
347 |
+
{"choices": ["disabled", "has metadata", "missing metadata"]},
|
348 |
+
section=section,
|
349 |
+
),
|
350 |
+
)
|
351 |
+
shared.opts.add_option(
|
352 |
+
"additional_networks_max_top_tags", shared.OptionInfo(20, "Max number of top tags to show", section=section)
|
353 |
+
)
|
354 |
+
shared.opts.add_option(
|
355 |
+
"additional_networks_max_dataset_folders", shared.OptionInfo(20, "Max number of dataset folders to show", section=section)
|
356 |
+
)
|
357 |
+
|
358 |
+
|
359 |
+
def on_infotext_pasted(infotext, params):
|
360 |
+
if "AddNet Enabled" not in params:
|
361 |
+
params["AddNet Enabled"] = "False"
|
362 |
+
|
363 |
+
# TODO changing "AddNet Separate Weights" does not seem to work
|
364 |
+
if "AddNet Separate Weights" not in params:
|
365 |
+
params["AddNet Separate Weights"] = "False"
|
366 |
+
|
367 |
+
for i in range(MAX_MODEL_COUNT):
|
368 |
+
# Convert combined weight into new format
|
369 |
+
if f"AddNet Weight {i+1}" in params:
|
370 |
+
params[f"AddNet Weight A {i+1}"] = params[f"AddNet Weight {i+1}"]
|
371 |
+
params[f"AddNet Weight B {i+1}"] = params[f"AddNet Weight {i+1}"]
|
372 |
+
|
373 |
+
if f"AddNet Module {i+1}" not in params:
|
374 |
+
params[f"AddNet Module {i+1}"] = "LoRA"
|
375 |
+
if f"AddNet Model {i+1}" not in params:
|
376 |
+
params[f"AddNet Model {i+1}"] = "None"
|
377 |
+
if f"AddNet Weight A {i+1}" not in params:
|
378 |
+
params[f"AddNet Weight A {i+1}"] = "0"
|
379 |
+
if f"AddNet Weight B {i+1}" not in params:
|
380 |
+
params[f"AddNet Weight B {i+1}"] = "0"
|
381 |
+
|
382 |
+
params[f"AddNet Weight {i+1}"] = params[f"AddNet Weight A {i+1}"]
|
383 |
+
|
384 |
+
if params[f"AddNet Weight A {i+1}"] != params[f"AddNet Weight B {i+1}"]:
|
385 |
+
params["AddNet Separate Weights"] = "True"
|
386 |
+
|
387 |
+
# Convert potential legacy name/hash to new format
|
388 |
+
params[f"AddNet Model {i+1}"] = str(model_util.find_closest_lora_model_name(params[f"AddNet Model {i+1}"]))
|
389 |
+
|
390 |
+
addnet_xyz_grid_support.update_axis_params(i, params[f"AddNet Module {i+1}"], params[f"AddNet Model {i+1}"])
|
391 |
+
|
392 |
+
|
393 |
+
addnet_xyz_grid_support.initialize(Script)
|
394 |
+
|
395 |
+
|
396 |
+
script_callbacks.on_script_unloaded(on_script_unloaded)
|
397 |
+
script_callbacks.on_ui_tabs(on_ui_tabs)
|
398 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
399 |
+
script_callbacks.on_infotext_pasted(on_infotext_pasted)
|
extensions/addtional/scripts/addnet_xyz_grid_support.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import os.path
|
3 |
+
from modules import shared
|
4 |
+
import modules.scripts as scripts
|
5 |
+
from scripts import model_util, util
|
6 |
+
from scripts.model_util import MAX_MODEL_COUNT
|
7 |
+
|
8 |
+
|
9 |
+
LORA_TRAIN_METADATA_NAMES = {
|
10 |
+
"ss_session_id": "Session ID",
|
11 |
+
"ss_training_started_at": "Training started at",
|
12 |
+
"ss_output_name": "Output name",
|
13 |
+
"ss_learning_rate": "Learning rate",
|
14 |
+
"ss_text_encoder_lr": "Text encoder LR",
|
15 |
+
"ss_unet_lr": "UNet LR",
|
16 |
+
"ss_num_train_images": "# of training images",
|
17 |
+
"ss_num_reg_images": "# of reg images",
|
18 |
+
"ss_num_batches_per_epoch": "Batches per epoch",
|
19 |
+
"ss_num_epochs": "Total epochs",
|
20 |
+
"ss_epoch": "Epoch",
|
21 |
+
"ss_batch_size_per_device": "Batch size/device",
|
22 |
+
"ss_total_batch_size": "Total batch size",
|
23 |
+
"ss_gradient_checkpointing": "Gradient checkpointing",
|
24 |
+
"ss_gradient_accumulation_steps": "Gradient accum. steps",
|
25 |
+
"ss_max_train_steps": "Max train steps",
|
26 |
+
"ss_lr_warmup_steps": "LR warmup steps",
|
27 |
+
"ss_lr_scheduler": "LR scheduler",
|
28 |
+
"ss_network_module": "Network module",
|
29 |
+
"ss_network_dim": "Network dim",
|
30 |
+
"ss_network_alpha": "Network alpha",
|
31 |
+
"ss_mixed_precision": "Mixed precision",
|
32 |
+
"ss_full_fp16": "Full FP16",
|
33 |
+
"ss_v2": "V2",
|
34 |
+
"ss_resolution": "Resolution",
|
35 |
+
"ss_clip_skip": "Clip skip",
|
36 |
+
"ss_max_token_length": "Max token length",
|
37 |
+
"ss_color_aug": "Color aug",
|
38 |
+
"ss_flip_aug": "Flip aug",
|
39 |
+
"ss_random_crop": "Random crop",
|
40 |
+
"ss_shuffle_caption": "Shuffle caption",
|
41 |
+
"ss_cache_latents": "Cache latents",
|
42 |
+
"ss_enable_bucket": "Enable bucket",
|
43 |
+
"ss_min_bucket_reso": "Min bucket reso.",
|
44 |
+
"ss_max_bucket_reso": "Max bucket reso.",
|
45 |
+
"ss_seed": "Seed",
|
46 |
+
"ss_keep_tokens": "Keep tokens",
|
47 |
+
"ss_dataset_dirs": "Dataset dirs.",
|
48 |
+
"ss_reg_dataset_dirs": "Reg dataset dirs.",
|
49 |
+
"ss_sd_model_name": "SD model name",
|
50 |
+
"ss_vae_name": "VAE name",
|
51 |
+
"ss_training_comment": "Comment",
|
52 |
+
}
|
53 |
+
|
54 |
+
|
55 |
+
xy_grid = None # XY Grid module
|
56 |
+
script_class = None # additional_networks scripts.Script class
|
57 |
+
axis_params = [{}] * MAX_MODEL_COUNT
|
58 |
+
|
59 |
+
|
60 |
+
def update_axis_params(i, module, model):
|
61 |
+
axis_params[i] = {"module": module, "model": model}
|
62 |
+
|
63 |
+
|
64 |
+
def get_axis_model_choices(i):
|
65 |
+
module = axis_params[i].get("module", "None")
|
66 |
+
model = axis_params[i].get("model", "None")
|
67 |
+
|
68 |
+
if module == "LoRA":
|
69 |
+
if model != "None":
|
70 |
+
sort_by = shared.opts.data.get("additional_networks_sort_models_by", "name")
|
71 |
+
return ["None"] + model_util.get_model_list(module, model, "", sort_by)
|
72 |
+
|
73 |
+
return [f"select `Model {i+1}` in `Additional Networks`. models in same folder for selected one will be shown here."]
|
74 |
+
|
75 |
+
|
76 |
+
def update_script_args(p, value, arg_idx):
|
77 |
+
global script_class
|
78 |
+
for s in scripts.scripts_txt2img.alwayson_scripts:
|
79 |
+
if isinstance(s, script_class):
|
80 |
+
args = list(p.script_args)
|
81 |
+
# print(f"Changed arg {arg_idx} from {args[s.args_from + arg_idx - 1]} to {value}")
|
82 |
+
args[s.args_from + arg_idx] = value
|
83 |
+
p.script_args = tuple(args)
|
84 |
+
break
|
85 |
+
|
86 |
+
|
87 |
+
def confirm_models(p, xs):
|
88 |
+
for x in xs:
|
89 |
+
if x in ["", "None"]:
|
90 |
+
continue
|
91 |
+
if not model_util.find_closest_lora_model_name(x):
|
92 |
+
raise RuntimeError(f"Unknown LoRA model: {x}")
|
93 |
+
|
94 |
+
|
95 |
+
def apply_module(p, x, xs, i):
|
96 |
+
update_script_args(p, True, 0) # set Enabled to True
|
97 |
+
update_script_args(p, x, 2 + 4 * i) # enabled, separate_weights, ({module}, model, weight_unet, weight_tenc), ...
|
98 |
+
|
99 |
+
|
100 |
+
def apply_model(p, x, xs, i):
|
101 |
+
name = model_util.find_closest_lora_model_name(x)
|
102 |
+
update_script_args(p, True, 0)
|
103 |
+
update_script_args(p, name, 3 + 4 * i) # enabled, separate_weights, (module, {model}, weight_unet, weight_tenc), ...
|
104 |
+
|
105 |
+
|
106 |
+
def apply_weight(p, x, xs, i):
|
107 |
+
update_script_args(p, True, 0)
|
108 |
+
update_script_args(p, x, 4 + 4 * i) # enabled, separate_weights, (module, model, {weight_unet, weight_tenc}), ...
|
109 |
+
update_script_args(p, x, 5 + 4 * i)
|
110 |
+
|
111 |
+
|
112 |
+
def apply_weight_unet(p, x, xs, i):
|
113 |
+
update_script_args(p, True, 0)
|
114 |
+
update_script_args(p, x, 4 + 4 * i) # enabled, separate_weights, (module, model, {weight_unet}, weight_tenc), ...
|
115 |
+
|
116 |
+
|
117 |
+
def apply_weight_tenc(p, x, xs, i):
|
118 |
+
update_script_args(p, True, 0)
|
119 |
+
update_script_args(p, x, 5 + 4 * i) # enabled, separate_weights, (module, model, weight_unet, {weight_tenc}), ...
|
120 |
+
|
121 |
+
|
122 |
+
def format_lora_model(p, opt, x):
|
123 |
+
global xy_grid
|
124 |
+
model = model_util.find_closest_lora_model_name(x)
|
125 |
+
if model is None or model.lower() in ["", "none"]:
|
126 |
+
return "None"
|
127 |
+
|
128 |
+
value = xy_grid.format_value(p, opt, model)
|
129 |
+
|
130 |
+
model_path = model_util.lora_models.get(model)
|
131 |
+
metadata = model_util.read_model_metadata(model_path, "LoRA")
|
132 |
+
if not metadata:
|
133 |
+
return value
|
134 |
+
|
135 |
+
metadata_names = util.split_path_list(shared.opts.data.get("additional_networks_xy_grid_model_metadata", ""))
|
136 |
+
if not metadata_names:
|
137 |
+
return value
|
138 |
+
|
139 |
+
for name in metadata_names:
|
140 |
+
name = name.strip()
|
141 |
+
if name in metadata:
|
142 |
+
formatted_name = LORA_TRAIN_METADATA_NAMES.get(name, name)
|
143 |
+
value += f"\n{formatted_name}: {metadata[name]}, "
|
144 |
+
|
145 |
+
return value.strip(" ").strip(",")
|
146 |
+
|
147 |
+
|
148 |
+
def initialize(script):
|
149 |
+
global xy_grid, script_class
|
150 |
+
xy_grid = None
|
151 |
+
script_class = script
|
152 |
+
for scriptDataTuple in scripts.scripts_data:
|
153 |
+
if os.path.basename(scriptDataTuple.path) == "xy_grid.py" or os.path.basename(scriptDataTuple.path) == "xyz_grid.py":
|
154 |
+
xy_grid = scriptDataTuple.module
|
155 |
+
for i in range(MAX_MODEL_COUNT):
|
156 |
+
model = xy_grid.AxisOption(
|
157 |
+
f"AddNet Model {i+1}",
|
158 |
+
str,
|
159 |
+
lambda p, x, xs, i=i: apply_model(p, x, xs, i),
|
160 |
+
format_lora_model,
|
161 |
+
confirm_models,
|
162 |
+
cost=0.5,
|
163 |
+
choices=lambda i=i: get_axis_model_choices(i),
|
164 |
+
)
|
165 |
+
weight = xy_grid.AxisOption(
|
166 |
+
f"AddNet Weight {i+1}",
|
167 |
+
float,
|
168 |
+
lambda p, x, xs, i=i: apply_weight(p, x, xs, i),
|
169 |
+
xy_grid.format_value_add_label,
|
170 |
+
None,
|
171 |
+
cost=0.5,
|
172 |
+
)
|
173 |
+
weight_unet = xy_grid.AxisOption(
|
174 |
+
f"AddNet UNet Weight {i+1}",
|
175 |
+
float,
|
176 |
+
lambda p, x, xs, i=i: apply_weight_unet(p, x, xs, i),
|
177 |
+
xy_grid.format_value_add_label,
|
178 |
+
None,
|
179 |
+
cost=0.5,
|
180 |
+
)
|
181 |
+
weight_tenc = xy_grid.AxisOption(
|
182 |
+
f"AddNet TEnc Weight {i+1}",
|
183 |
+
float,
|
184 |
+
lambda p, x, xs, i=i: apply_weight_tenc(p, x, xs, i),
|
185 |
+
xy_grid.format_value_add_label,
|
186 |
+
None,
|
187 |
+
cost=0.5,
|
188 |
+
)
|
189 |
+
xy_grid.axis_options.extend([model, weight, weight_unet, weight_tenc])
|
extensions/addtional/scripts/lora_compvis.py
ADDED
@@ -0,0 +1,634 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# LoRA network module
|
2 |
+
# reference:
|
3 |
+
# https://github.com/microsoft/LoRA/blob/main/loralib/layers.py
|
4 |
+
# https://github.com/cloneofsimo/lora/blob/master/lora_diffusion/lora.py
|
5 |
+
|
6 |
+
import copy
|
7 |
+
import math
|
8 |
+
import re
|
9 |
+
from typing import NamedTuple
|
10 |
+
import torch
|
11 |
+
|
12 |
+
|
13 |
+
class LoRAInfo(NamedTuple):
|
14 |
+
lora_name: str
|
15 |
+
module_name: str
|
16 |
+
module: torch.nn.Module
|
17 |
+
multiplier: float
|
18 |
+
dim: int
|
19 |
+
alpha: float
|
20 |
+
|
21 |
+
|
22 |
+
class LoRAModule(torch.nn.Module):
|
23 |
+
"""
|
24 |
+
replaces forward method of the original Linear, instead of replacing the original Linear module.
|
25 |
+
"""
|
26 |
+
|
27 |
+
def __init__(self, lora_name, org_module: torch.nn.Module, multiplier=1.0, lora_dim=4, alpha=1):
|
28 |
+
"""if alpha == 0 or None, alpha is rank (no scaling)."""
|
29 |
+
super().__init__()
|
30 |
+
self.lora_name = lora_name
|
31 |
+
self.lora_dim = lora_dim
|
32 |
+
|
33 |
+
if org_module.__class__.__name__ == "Conv2d":
|
34 |
+
in_dim = org_module.in_channels
|
35 |
+
out_dim = org_module.out_channels
|
36 |
+
|
37 |
+
# self.lora_dim = min(self.lora_dim, in_dim, out_dim)
|
38 |
+
# if self.lora_dim != lora_dim:
|
39 |
+
# print(f"{lora_name} dim (rank) is changed to: {self.lora_dim}")
|
40 |
+
|
41 |
+
kernel_size = org_module.kernel_size
|
42 |
+
stride = org_module.stride
|
43 |
+
padding = org_module.padding
|
44 |
+
self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False)
|
45 |
+
self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False)
|
46 |
+
else:
|
47 |
+
in_dim = org_module.in_features
|
48 |
+
out_dim = org_module.out_features
|
49 |
+
self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False)
|
50 |
+
self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False)
|
51 |
+
|
52 |
+
if type(alpha) == torch.Tensor:
|
53 |
+
alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
|
54 |
+
alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
|
55 |
+
self.scale = alpha / self.lora_dim
|
56 |
+
self.register_buffer("alpha", torch.tensor(alpha)) # 定数として扱える
|
57 |
+
|
58 |
+
# same as microsoft's
|
59 |
+
torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
|
60 |
+
torch.nn.init.zeros_(self.lora_up.weight)
|
61 |
+
|
62 |
+
self.multiplier = multiplier
|
63 |
+
self.org_forward = org_module.forward
|
64 |
+
self.org_module = org_module # remove in applying
|
65 |
+
self.mask_dic = None
|
66 |
+
self.mask = None
|
67 |
+
self.mask_area = -1
|
68 |
+
|
69 |
+
def apply_to(self):
|
70 |
+
self.org_forward = self.org_module.forward
|
71 |
+
self.org_module.forward = self.forward
|
72 |
+
del self.org_module
|
73 |
+
|
74 |
+
def set_mask_dic(self, mask_dic):
|
75 |
+
# called before every generation
|
76 |
+
|
77 |
+
# check this module is related to h,w (not context and time emb)
|
78 |
+
if "attn2_to_k" in self.lora_name or "attn2_to_v" in self.lora_name or "emb_layers" in self.lora_name:
|
79 |
+
# print(f"LoRA for context or time emb: {self.lora_name}")
|
80 |
+
self.mask_dic = None
|
81 |
+
else:
|
82 |
+
self.mask_dic = mask_dic
|
83 |
+
|
84 |
+
self.mask = None
|
85 |
+
|
86 |
+
def forward(self, x):
|
87 |
+
"""
|
88 |
+
may be cascaded.
|
89 |
+
"""
|
90 |
+
if self.mask_dic is None:
|
91 |
+
return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
|
92 |
+
|
93 |
+
# regional LoRA
|
94 |
+
|
95 |
+
# calculate lora and get size
|
96 |
+
lx = self.lora_up(self.lora_down(x))
|
97 |
+
|
98 |
+
if len(lx.size()) == 4: # b,c,h,w
|
99 |
+
area = lx.size()[2] * lx.size()[3]
|
100 |
+
else:
|
101 |
+
area = lx.size()[1] # b,seq,dim
|
102 |
+
|
103 |
+
if self.mask is None or self.mask_area != area:
|
104 |
+
# get mask
|
105 |
+
# print(self.lora_name, x.size(), lx.size(), area)
|
106 |
+
mask = self.mask_dic[area]
|
107 |
+
if len(lx.size()) == 3:
|
108 |
+
mask = torch.reshape(mask, (1, -1, 1))
|
109 |
+
self.mask = mask
|
110 |
+
self.mask_area = area
|
111 |
+
|
112 |
+
return self.org_forward(x) + lx * self.multiplier * self.scale * self.mask
|
113 |
+
|
114 |
+
|
115 |
+
def create_network_and_apply_compvis(du_state_dict, multiplier_tenc, multiplier_unet, text_encoder, unet, **kwargs):
|
116 |
+
# get device and dtype from unet
|
117 |
+
for module in unet.modules():
|
118 |
+
if module.__class__.__name__ == "Linear":
|
119 |
+
param: torch.nn.Parameter = module.weight
|
120 |
+
# device = param.device
|
121 |
+
dtype = param.dtype
|
122 |
+
break
|
123 |
+
|
124 |
+
# get dims (rank) and alpha from state dict
|
125 |
+
modules_dim = {}
|
126 |
+
modules_alpha = {}
|
127 |
+
for key, value in du_state_dict.items():
|
128 |
+
if "." not in key:
|
129 |
+
continue
|
130 |
+
|
131 |
+
lora_name = key.split(".")[0]
|
132 |
+
if "alpha" in key:
|
133 |
+
modules_alpha[lora_name] = float(value.detach().to(torch.float).cpu().numpy())
|
134 |
+
elif "lora_down" in key:
|
135 |
+
dim = value.size()[0]
|
136 |
+
modules_dim[lora_name] = dim
|
137 |
+
|
138 |
+
# support old LoRA without alpha
|
139 |
+
for key in modules_dim.keys():
|
140 |
+
if key not in modules_alpha:
|
141 |
+
modules_alpha[key] = modules_dim[key]
|
142 |
+
|
143 |
+
print(
|
144 |
+
f"dimension: {set(modules_dim.values())}, alpha: {set(modules_alpha.values())}, multiplier_unet: {multiplier_unet}, multiplier_tenc: {multiplier_tenc}"
|
145 |
+
)
|
146 |
+
|
147 |
+
# if network_dim is None:
|
148 |
+
# print(f"The selected model is not LoRA or not trained by `sd-scripts`?")
|
149 |
+
# network_dim = 4
|
150 |
+
# network_alpha = 1
|
151 |
+
|
152 |
+
# create, apply and load weights
|
153 |
+
network = LoRANetworkCompvis(text_encoder, unet, multiplier_tenc, multiplier_unet, modules_dim, modules_alpha)
|
154 |
+
state_dict = network.apply_lora_modules(du_state_dict) # some weights are applied to text encoder
|
155 |
+
network.to(dtype) # with this, if error comes from next line, the model will be used
|
156 |
+
info = network.load_state_dict(state_dict, strict=False)
|
157 |
+
|
158 |
+
# remove redundant warnings
|
159 |
+
if len(info.missing_keys) > 4:
|
160 |
+
missing_keys = []
|
161 |
+
alpha_count = 0
|
162 |
+
for key in info.missing_keys:
|
163 |
+
if "alpha" not in key:
|
164 |
+
missing_keys.append(key)
|
165 |
+
else:
|
166 |
+
if alpha_count == 0:
|
167 |
+
missing_keys.append(key)
|
168 |
+
alpha_count += 1
|
169 |
+
if alpha_count > 1:
|
170 |
+
missing_keys.append(
|
171 |
+
f"... and {alpha_count-1} alphas. The model doesn't have alpha, use dim (rannk) as alpha. You can ignore this message."
|
172 |
+
)
|
173 |
+
|
174 |
+
info = torch.nn.modules.module._IncompatibleKeys(missing_keys, info.unexpected_keys)
|
175 |
+
|
176 |
+
return network, info
|
177 |
+
|
178 |
+
|
179 |
+
class LoRANetworkCompvis(torch.nn.Module):
|
180 |
+
# UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"]
|
181 |
+
# TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
|
182 |
+
UNET_TARGET_REPLACE_MODULE = ["SpatialTransformer", "ResBlock", "Downsample", "Upsample"] # , "Attention"]
|
183 |
+
TEXT_ENCODER_TARGET_REPLACE_MODULE = ["ResidualAttentionBlock", "CLIPAttention", "CLIPMLP"]
|
184 |
+
|
185 |
+
LORA_PREFIX_UNET = "lora_unet"
|
186 |
+
LORA_PREFIX_TEXT_ENCODER = "lora_te"
|
187 |
+
|
188 |
+
@classmethod
|
189 |
+
def convert_diffusers_name_to_compvis(cls, v2, du_name):
|
190 |
+
"""
|
191 |
+
convert diffusers's LoRA name to CompVis
|
192 |
+
"""
|
193 |
+
cv_name = None
|
194 |
+
if "lora_unet_" in du_name:
|
195 |
+
m = re.search(r"_down_blocks_(\d+)_attentions_(\d+)_(.+)", du_name)
|
196 |
+
if m:
|
197 |
+
du_block_index = int(m.group(1))
|
198 |
+
du_attn_index = int(m.group(2))
|
199 |
+
du_suffix = m.group(3)
|
200 |
+
|
201 |
+
cv_index = 1 + du_block_index * 3 + du_attn_index # 1,2, 4,5, 7,8
|
202 |
+
cv_name = f"lora_unet_input_blocks_{cv_index}_1_{du_suffix}"
|
203 |
+
return cv_name
|
204 |
+
|
205 |
+
m = re.search(r"_mid_block_attentions_(\d+)_(.+)", du_name)
|
206 |
+
if m:
|
207 |
+
du_suffix = m.group(2)
|
208 |
+
cv_name = f"lora_unet_middle_block_1_{du_suffix}"
|
209 |
+
return cv_name
|
210 |
+
|
211 |
+
m = re.search(r"_up_blocks_(\d+)_attentions_(\d+)_(.+)", du_name)
|
212 |
+
if m:
|
213 |
+
du_block_index = int(m.group(1))
|
214 |
+
du_attn_index = int(m.group(2))
|
215 |
+
du_suffix = m.group(3)
|
216 |
+
|
217 |
+
cv_index = du_block_index * 3 + du_attn_index # 3,4,5, 6,7,8, 9,10,11
|
218 |
+
cv_name = f"lora_unet_output_blocks_{cv_index}_1_{du_suffix}"
|
219 |
+
return cv_name
|
220 |
+
|
221 |
+
m = re.search(r"_down_blocks_(\d+)_resnets_(\d+)_(.+)", du_name)
|
222 |
+
if m:
|
223 |
+
du_block_index = int(m.group(1))
|
224 |
+
du_res_index = int(m.group(2))
|
225 |
+
du_suffix = m.group(3)
|
226 |
+
cv_suffix = {
|
227 |
+
"conv1": "in_layers_2",
|
228 |
+
"conv2": "out_layers_3",
|
229 |
+
"time_emb_proj": "emb_layers_1",
|
230 |
+
"conv_shortcut": "skip_connection",
|
231 |
+
}[du_suffix]
|
232 |
+
|
233 |
+
cv_index = 1 + du_block_index * 3 + du_res_index # 1,2, 4,5, 7,8
|
234 |
+
cv_name = f"lora_unet_input_blocks_{cv_index}_0_{cv_suffix}"
|
235 |
+
return cv_name
|
236 |
+
|
237 |
+
m = re.search(r"_down_blocks_(\d+)_downsamplers_0_conv", du_name)
|
238 |
+
if m:
|
239 |
+
block_index = int(m.group(1))
|
240 |
+
cv_index = 3 + block_index * 3
|
241 |
+
cv_name = f"lora_unet_input_blocks_{cv_index}_0_op"
|
242 |
+
return cv_name
|
243 |
+
|
244 |
+
m = re.search(r"_mid_block_resnets_(\d+)_(.+)", du_name)
|
245 |
+
if m:
|
246 |
+
index = int(m.group(1))
|
247 |
+
du_suffix = m.group(2)
|
248 |
+
cv_suffix = {
|
249 |
+
"conv1": "in_layers_2",
|
250 |
+
"conv2": "out_layers_3",
|
251 |
+
"time_emb_proj": "emb_layers_1",
|
252 |
+
"conv_shortcut": "skip_connection",
|
253 |
+
}[du_suffix]
|
254 |
+
cv_name = f"lora_unet_middle_block_{index*2}_{cv_suffix}"
|
255 |
+
return cv_name
|
256 |
+
|
257 |
+
m = re.search(r"_up_blocks_(\d+)_resnets_(\d+)_(.+)", du_name)
|
258 |
+
if m:
|
259 |
+
du_block_index = int(m.group(1))
|
260 |
+
du_res_index = int(m.group(2))
|
261 |
+
du_suffix = m.group(3)
|
262 |
+
cv_suffix = {
|
263 |
+
"conv1": "in_layers_2",
|
264 |
+
"conv2": "out_layers_3",
|
265 |
+
"time_emb_proj": "emb_layers_1",
|
266 |
+
"conv_shortcut": "skip_connection",
|
267 |
+
}[du_suffix]
|
268 |
+
|
269 |
+
cv_index = du_block_index * 3 + du_res_index # 1,2, 4,5, 7,8
|
270 |
+
cv_name = f"lora_unet_output_blocks_{cv_index}_0_{cv_suffix}"
|
271 |
+
return cv_name
|
272 |
+
|
273 |
+
m = re.search(r"_up_blocks_(\d+)_upsamplers_0_conv", du_name)
|
274 |
+
if m:
|
275 |
+
block_index = int(m.group(1))
|
276 |
+
cv_index = block_index * 3 + 2
|
277 |
+
cv_name = f"lora_unet_output_blocks_{cv_index}_{bool(block_index)+1}_conv"
|
278 |
+
return cv_name
|
279 |
+
|
280 |
+
elif "lora_te_" in du_name:
|
281 |
+
m = re.search(r"_model_encoder_layers_(\d+)_(.+)", du_name)
|
282 |
+
if m:
|
283 |
+
du_block_index = int(m.group(1))
|
284 |
+
du_suffix = m.group(2)
|
285 |
+
|
286 |
+
cv_index = du_block_index
|
287 |
+
if v2:
|
288 |
+
if "mlp_fc1" in du_suffix:
|
289 |
+
cv_name = (
|
290 |
+
f"lora_te_wrapped_model_transformer_resblocks_{cv_index}_{du_suffix.replace('mlp_fc1', 'mlp_c_fc')}"
|
291 |
+
)
|
292 |
+
elif "mlp_fc2" in du_suffix:
|
293 |
+
cv_name = (
|
294 |
+
f"lora_te_wrapped_model_transformer_resblocks_{cv_index}_{du_suffix.replace('mlp_fc2', 'mlp_c_proj')}"
|
295 |
+
)
|
296 |
+
elif "self_attn":
|
297 |
+
# handled later
|
298 |
+
cv_name = f"lora_te_wrapped_model_transformer_resblocks_{cv_index}_{du_suffix.replace('self_attn', 'attn')}"
|
299 |
+
else:
|
300 |
+
cv_name = f"lora_te_wrapped_transformer_text_model_encoder_layers_{cv_index}_{du_suffix}"
|
301 |
+
|
302 |
+
assert cv_name is not None, f"conversion failed: {du_name}. the model may not be trained by `sd-scripts`."
|
303 |
+
return cv_name
|
304 |
+
|
305 |
+
@classmethod
|
306 |
+
def convert_state_dict_name_to_compvis(cls, v2, state_dict):
|
307 |
+
"""
|
308 |
+
convert keys in state dict to load it by load_state_dict
|
309 |
+
"""
|
310 |
+
new_sd = {}
|
311 |
+
for key, value in state_dict.items():
|
312 |
+
tokens = key.split(".")
|
313 |
+
compvis_name = LoRANetworkCompvis.convert_diffusers_name_to_compvis(v2, tokens[0])
|
314 |
+
new_key = compvis_name + "." + ".".join(tokens[1:])
|
315 |
+
|
316 |
+
new_sd[new_key] = value
|
317 |
+
|
318 |
+
return new_sd
|
319 |
+
|
320 |
+
def __init__(self, text_encoder, unet, multiplier_tenc=1.0, multiplier_unet=1.0, modules_dim=None, modules_alpha=None) -> None:
|
321 |
+
super().__init__()
|
322 |
+
self.multiplier_unet = multiplier_unet
|
323 |
+
self.multiplier_tenc = multiplier_tenc
|
324 |
+
self.latest_mask_info = None
|
325 |
+
|
326 |
+
# check v1 or v2
|
327 |
+
self.v2 = False
|
328 |
+
for _, module in text_encoder.named_modules():
|
329 |
+
for _, child_module in module.named_modules():
|
330 |
+
if child_module.__class__.__name__ == "MultiheadAttention":
|
331 |
+
self.v2 = True
|
332 |
+
break
|
333 |
+
if self.v2:
|
334 |
+
break
|
335 |
+
|
336 |
+
# convert lora name to CompVis and get dim and alpha
|
337 |
+
comp_vis_loras_dim_alpha = {}
|
338 |
+
for du_lora_name in modules_dim.keys():
|
339 |
+
dim = modules_dim[du_lora_name]
|
340 |
+
alpha = modules_alpha[du_lora_name]
|
341 |
+
comp_vis_lora_name = LoRANetworkCompvis.convert_diffusers_name_to_compvis(self.v2, du_lora_name)
|
342 |
+
comp_vis_loras_dim_alpha[comp_vis_lora_name] = (dim, alpha)
|
343 |
+
|
344 |
+
# create module instances
|
345 |
+
def create_modules(prefix, root_module: torch.nn.Module, target_replace_modules, multiplier):
|
346 |
+
loras = []
|
347 |
+
replaced_modules = []
|
348 |
+
for name, module in root_module.named_modules():
|
349 |
+
if module.__class__.__name__ in target_replace_modules:
|
350 |
+
for child_name, child_module in module.named_modules():
|
351 |
+
# enumerate all Linear and Conv2d
|
352 |
+
if child_module.__class__.__name__ == "Linear" or child_module.__class__.__name__ == "Conv2d":
|
353 |
+
lora_name = prefix + "." + name + "." + child_name
|
354 |
+
lora_name = lora_name.replace(".", "_")
|
355 |
+
if "_resblocks_23_" in lora_name: # ignore last block in StabilityAi Text Encoder
|
356 |
+
break
|
357 |
+
if lora_name not in comp_vis_loras_dim_alpha:
|
358 |
+
continue
|
359 |
+
|
360 |
+
dim, alpha = comp_vis_loras_dim_alpha[lora_name]
|
361 |
+
lora = LoRAModule(lora_name, child_module, multiplier, dim, alpha)
|
362 |
+
loras.append(lora)
|
363 |
+
|
364 |
+
replaced_modules.append(child_module)
|
365 |
+
elif child_module.__class__.__name__ == "MultiheadAttention":
|
366 |
+
# make four modules: not replacing forward method but merge weights later
|
367 |
+
for suffix in ["q_proj", "k_proj", "v_proj", "out_proj"]:
|
368 |
+
module_name = prefix + "." + name + "." + child_name # ~.attn
|
369 |
+
module_name = module_name.replace(".", "_")
|
370 |
+
if "_resblocks_23_" in module_name: # ignore last block in StabilityAi Text Encoder
|
371 |
+
break
|
372 |
+
|
373 |
+
lora_name = module_name + "_" + suffix
|
374 |
+
if lora_name not in comp_vis_loras_dim_alpha:
|
375 |
+
continue
|
376 |
+
dim, alpha = comp_vis_loras_dim_alpha[lora_name]
|
377 |
+
lora_info = LoRAInfo(lora_name, module_name, child_module, multiplier, dim, alpha)
|
378 |
+
loras.append(lora_info)
|
379 |
+
|
380 |
+
replaced_modules.append(child_module)
|
381 |
+
return loras, replaced_modules
|
382 |
+
|
383 |
+
self.text_encoder_loras, te_rep_modules = create_modules(
|
384 |
+
LoRANetworkCompvis.LORA_PREFIX_TEXT_ENCODER,
|
385 |
+
text_encoder,
|
386 |
+
LoRANetworkCompvis.TEXT_ENCODER_TARGET_REPLACE_MODULE,
|
387 |
+
self.multiplier_tenc,
|
388 |
+
)
|
389 |
+
print(f"create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
|
390 |
+
|
391 |
+
self.unet_loras, unet_rep_modules = create_modules(
|
392 |
+
LoRANetworkCompvis.LORA_PREFIX_UNET, unet, LoRANetworkCompvis.UNET_TARGET_REPLACE_MODULE, self.multiplier_unet
|
393 |
+
)
|
394 |
+
print(f"create LoRA for U-Net: {len(self.unet_loras)} modules.")
|
395 |
+
|
396 |
+
# make backup of original forward/weights, if multiple modules are applied, do in 1st module only
|
397 |
+
backed_up = False # messaging purpose only
|
398 |
+
for rep_module in te_rep_modules + unet_rep_modules:
|
399 |
+
if (
|
400 |
+
rep_module.__class__.__name__ == "MultiheadAttention"
|
401 |
+
): # multiple MHA modules are in list, prevent to backed up forward
|
402 |
+
if not hasattr(rep_module, "_lora_org_weights"):
|
403 |
+
# avoid updating of original weights. state_dict is reference to original weights
|
404 |
+
rep_module._lora_org_weights = copy.deepcopy(rep_module.state_dict())
|
405 |
+
backed_up = True
|
406 |
+
elif not hasattr(rep_module, "_lora_org_forward"):
|
407 |
+
rep_module._lora_org_forward = rep_module.forward
|
408 |
+
backed_up = True
|
409 |
+
if backed_up:
|
410 |
+
print("original forward/weights is backed up.")
|
411 |
+
|
412 |
+
# assertion
|
413 |
+
names = set()
|
414 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
415 |
+
assert lora.lora_name not in names, f"duplicated lora name: {lora.lora_name}"
|
416 |
+
names.add(lora.lora_name)
|
417 |
+
|
418 |
+
def restore(self, text_encoder, unet):
|
419 |
+
# restore forward/weights from property for all modules
|
420 |
+
restored = False # messaging purpose only
|
421 |
+
modules = []
|
422 |
+
modules.extend(text_encoder.modules())
|
423 |
+
modules.extend(unet.modules())
|
424 |
+
for module in modules:
|
425 |
+
if hasattr(module, "_lora_org_forward"):
|
426 |
+
module.forward = module._lora_org_forward
|
427 |
+
del module._lora_org_forward
|
428 |
+
restored = True
|
429 |
+
if hasattr(
|
430 |
+
module, "_lora_org_weights"
|
431 |
+
): # module doesn't have forward and weights at same time currently, but supports it for future changing
|
432 |
+
module.load_state_dict(module._lora_org_weights)
|
433 |
+
del module._lora_org_weights
|
434 |
+
restored = True
|
435 |
+
|
436 |
+
if restored:
|
437 |
+
print("original forward/weights is restored.")
|
438 |
+
|
439 |
+
def apply_lora_modules(self, du_state_dict):
|
440 |
+
# conversion 1st step: convert names in state_dict
|
441 |
+
state_dict = LoRANetworkCompvis.convert_state_dict_name_to_compvis(self.v2, du_state_dict)
|
442 |
+
|
443 |
+
# check state_dict has text_encoder or unet
|
444 |
+
weights_has_text_encoder = weights_has_unet = False
|
445 |
+
for key in state_dict.keys():
|
446 |
+
if key.startswith(LoRANetworkCompvis.LORA_PREFIX_TEXT_ENCODER):
|
447 |
+
weights_has_text_encoder = True
|
448 |
+
elif key.startswith(LoRANetworkCompvis.LORA_PREFIX_UNET):
|
449 |
+
weights_has_unet = True
|
450 |
+
if weights_has_text_encoder and weights_has_unet:
|
451 |
+
break
|
452 |
+
|
453 |
+
apply_text_encoder = weights_has_text_encoder
|
454 |
+
apply_unet = weights_has_unet
|
455 |
+
|
456 |
+
if apply_text_encoder:
|
457 |
+
print("enable LoRA for text encoder")
|
458 |
+
else:
|
459 |
+
self.text_encoder_loras = []
|
460 |
+
|
461 |
+
if apply_unet:
|
462 |
+
print("enable LoRA for U-Net")
|
463 |
+
else:
|
464 |
+
self.unet_loras = []
|
465 |
+
|
466 |
+
# add modules to network: this makes state_dict can be got from LoRANetwork
|
467 |
+
mha_loras = {}
|
468 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
469 |
+
if type(lora) == LoRAModule:
|
470 |
+
lora.apply_to() # ensure remove reference to original Linear: reference makes key of state_dict
|
471 |
+
self.add_module(lora.lora_name, lora)
|
472 |
+
else:
|
473 |
+
# SD2.x MultiheadAttention merge weights to MHA weights
|
474 |
+
lora_info: LoRAInfo = lora
|
475 |
+
if lora_info.module_name not in mha_loras:
|
476 |
+
mha_loras[lora_info.module_name] = {}
|
477 |
+
|
478 |
+
lora_dic = mha_loras[lora_info.module_name]
|
479 |
+
lora_dic[lora_info.lora_name] = lora_info
|
480 |
+
if len(lora_dic) == 4:
|
481 |
+
# calculate and apply
|
482 |
+
module = lora_info.module
|
483 |
+
module_name = lora_info.module_name
|
484 |
+
w_q_dw = state_dict.get(module_name + "_q_proj.lora_down.weight")
|
485 |
+
if w_q_dw is not None: # corresponding LoRA module exists
|
486 |
+
w_q_up = state_dict[module_name + "_q_proj.lora_up.weight"]
|
487 |
+
w_k_dw = state_dict[module_name + "_k_proj.lora_down.weight"]
|
488 |
+
w_k_up = state_dict[module_name + "_k_proj.lora_up.weight"]
|
489 |
+
w_v_dw = state_dict[module_name + "_v_proj.lora_down.weight"]
|
490 |
+
w_v_up = state_dict[module_name + "_v_proj.lora_up.weight"]
|
491 |
+
w_out_dw = state_dict[module_name + "_out_proj.lora_down.weight"]
|
492 |
+
w_out_up = state_dict[module_name + "_out_proj.lora_up.weight"]
|
493 |
+
q_lora_info = lora_dic[module_name + "_q_proj"]
|
494 |
+
k_lora_info = lora_dic[module_name + "_k_proj"]
|
495 |
+
v_lora_info = lora_dic[module_name + "_v_proj"]
|
496 |
+
out_lora_info = lora_dic[module_name + "_out_proj"]
|
497 |
+
|
498 |
+
sd = module.state_dict()
|
499 |
+
qkv_weight = sd["in_proj_weight"]
|
500 |
+
out_weight = sd["out_proj.weight"]
|
501 |
+
dev = qkv_weight.device
|
502 |
+
|
503 |
+
def merge_weights(l_info, weight, up_weight, down_weight):
|
504 |
+
# calculate in float
|
505 |
+
scale = l_info.alpha / l_info.dim
|
506 |
+
dtype = weight.dtype
|
507 |
+
weight = (
|
508 |
+
weight.float()
|
509 |
+
+ l_info.multiplier
|
510 |
+
* (up_weight.to(dev, dtype=torch.float) @ down_weight.to(dev, dtype=torch.float))
|
511 |
+
* scale
|
512 |
+
)
|
513 |
+
weight = weight.to(dtype)
|
514 |
+
return weight
|
515 |
+
|
516 |
+
q_weight, k_weight, v_weight = torch.chunk(qkv_weight, 3)
|
517 |
+
if q_weight.size()[1] == w_q_up.size()[0]:
|
518 |
+
q_weight = merge_weights(q_lora_info, q_weight, w_q_up, w_q_dw)
|
519 |
+
k_weight = merge_weights(k_lora_info, k_weight, w_k_up, w_k_dw)
|
520 |
+
v_weight = merge_weights(v_lora_info, v_weight, w_v_up, w_v_dw)
|
521 |
+
qkv_weight = torch.cat([q_weight, k_weight, v_weight])
|
522 |
+
|
523 |
+
out_weight = merge_weights(out_lora_info, out_weight, w_out_up, w_out_dw)
|
524 |
+
|
525 |
+
sd["in_proj_weight"] = qkv_weight.to(dev)
|
526 |
+
sd["out_proj.weight"] = out_weight.to(dev)
|
527 |
+
|
528 |
+
lora_info.module.load_state_dict(sd)
|
529 |
+
else:
|
530 |
+
# different dim, version mismatch
|
531 |
+
print(f"shape of weight is different: {module_name}. SD version may be different")
|
532 |
+
|
533 |
+
for t in ["q", "k", "v", "out"]:
|
534 |
+
del state_dict[f"{module_name}_{t}_proj.lora_down.weight"]
|
535 |
+
del state_dict[f"{module_name}_{t}_proj.lora_up.weight"]
|
536 |
+
alpha_key = f"{module_name}_{t}_proj.alpha"
|
537 |
+
if alpha_key in state_dict:
|
538 |
+
del state_dict[alpha_key]
|
539 |
+
else:
|
540 |
+
# corresponding weight not exists: version mismatch
|
541 |
+
pass
|
542 |
+
|
543 |
+
# conversion 2nd step: convert weight's shape (and handle wrapped)
|
544 |
+
state_dict = self.convert_state_dict_shape_to_compvis(state_dict)
|
545 |
+
|
546 |
+
return state_dict
|
547 |
+
|
548 |
+
def convert_state_dict_shape_to_compvis(self, state_dict):
|
549 |
+
# shape conversion
|
550 |
+
current_sd = self.state_dict() # to get target shape
|
551 |
+
wrapped = False
|
552 |
+
count = 0
|
553 |
+
for key in list(state_dict.keys()):
|
554 |
+
if key not in current_sd:
|
555 |
+
continue # might be error or another version
|
556 |
+
if "wrapped" in key:
|
557 |
+
wrapped = True
|
558 |
+
|
559 |
+
value: torch.Tensor = state_dict[key]
|
560 |
+
if value.size() != current_sd[key].size():
|
561 |
+
# print(f"convert weights shape: {key}, from: {value.size()}, {len(value.size())}")
|
562 |
+
count += 1
|
563 |
+
if len(value.size()) == 4:
|
564 |
+
value = value.squeeze(3).squeeze(2)
|
565 |
+
else:
|
566 |
+
value = value.unsqueeze(2).unsqueeze(3)
|
567 |
+
state_dict[key] = value
|
568 |
+
if tuple(value.size()) != tuple(current_sd[key].size()):
|
569 |
+
print(
|
570 |
+
f"weight's shape is different: {key} expected {current_sd[key].size()} found {value.size()}. SD version may be different"
|
571 |
+
)
|
572 |
+
del state_dict[key]
|
573 |
+
print(f"shapes for {count} weights are converted.")
|
574 |
+
|
575 |
+
# convert wrapped
|
576 |
+
if not wrapped:
|
577 |
+
print("remove 'wrapped' from keys")
|
578 |
+
for key in list(state_dict.keys()):
|
579 |
+
if "_wrapped_" in key:
|
580 |
+
new_key = key.replace("_wrapped_", "_")
|
581 |
+
state_dict[new_key] = state_dict[key]
|
582 |
+
del state_dict[key]
|
583 |
+
|
584 |
+
return state_dict
|
585 |
+
|
586 |
+
def set_mask(self, mask, height=None, width=None, hr_height=None, hr_width=None):
|
587 |
+
if mask is None:
|
588 |
+
# clear latest mask
|
589 |
+
# print("clear mask")
|
590 |
+
self.latest_mask_info = None
|
591 |
+
for lora in self.unet_loras:
|
592 |
+
lora.set_mask_dic(None)
|
593 |
+
return
|
594 |
+
|
595 |
+
# check mask image and h/w are same
|
596 |
+
if (
|
597 |
+
self.latest_mask_info is not None
|
598 |
+
and torch.equal(mask, self.latest_mask_info[0])
|
599 |
+
and (height, width, hr_height, hr_width) == self.latest_mask_info[1:]
|
600 |
+
):
|
601 |
+
# print("mask not changed")
|
602 |
+
return
|
603 |
+
|
604 |
+
self.latest_mask_info = (mask, height, width, hr_height, hr_width)
|
605 |
+
|
606 |
+
org_dtype = mask.dtype
|
607 |
+
if mask.dtype == torch.bfloat16:
|
608 |
+
mask = mask.to(torch.float)
|
609 |
+
|
610 |
+
mask_dic = {}
|
611 |
+
mask = mask.unsqueeze(0).unsqueeze(1) # b(1),c(1),h,w
|
612 |
+
|
613 |
+
def resize_add(mh, mw):
|
614 |
+
# print(mh, mw, mh * mw)
|
615 |
+
m = torch.nn.functional.interpolate(mask, (mh, mw), mode="bilinear") # doesn't work in bf16
|
616 |
+
m = m.to(org_dtype)
|
617 |
+
mask_dic[mh * mw] = m
|
618 |
+
|
619 |
+
for h, w in [(height, width), (hr_height, hr_width)]:
|
620 |
+
if not h or not w:
|
621 |
+
continue
|
622 |
+
|
623 |
+
h = h // 8
|
624 |
+
w = w // 8
|
625 |
+
for i in range(4):
|
626 |
+
resize_add(h, w)
|
627 |
+
if h % 2 == 1 or w % 2 == 1: # add extra shape if h/w is not divisible by 2
|
628 |
+
resize_add(h + h % 2, w + w % 2)
|
629 |
+
h = (h + 1) // 2
|
630 |
+
w = (w + 1) // 2
|
631 |
+
|
632 |
+
for lora in self.unet_loras:
|
633 |
+
lora.set_mask_dic(mask_dic)
|
634 |
+
return
|
extensions/addtional/scripts/metadata_editor.py
ADDED
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import sys
|
4 |
+
import io
|
5 |
+
import base64
|
6 |
+
import platform
|
7 |
+
import subprocess as sp
|
8 |
+
from PIL import PngImagePlugin, Image
|
9 |
+
|
10 |
+
from modules import shared
|
11 |
+
import gradio as gr
|
12 |
+
|
13 |
+
import modules.ui
|
14 |
+
from modules.ui_components import ToolButton
|
15 |
+
import modules.extras
|
16 |
+
import modules.generation_parameters_copypaste as parameters_copypaste
|
17 |
+
|
18 |
+
from scripts import safetensors_hack, model_util
|
19 |
+
from scripts.model_util import MAX_MODEL_COUNT
|
20 |
+
|
21 |
+
|
22 |
+
folder_symbol = "\U0001f4c2" # 📂
|
23 |
+
keycap_symbols = [
|
24 |
+
"\u0031\ufe0f\u20e3", # 1️⃣
|
25 |
+
"\u0032\ufe0f\u20e3", # 2️⃣
|
26 |
+
"\u0033\ufe0f\u20e3", # 3️⃣
|
27 |
+
"\u0034\ufe0f\u20e3", # 4️⃣
|
28 |
+
"\u0035\ufe0f\u20e3", # 5️⃣
|
29 |
+
"\u0036\ufe0f\u20e3", # 6️⃣
|
30 |
+
"\u0037\ufe0f\u20e3", # 7️⃣
|
31 |
+
"\u0038\ufe0f\u20e3", # 8️
|
32 |
+
"\u0039\ufe0f\u20e3", # 9️
|
33 |
+
"\u1f51f", # 🔟
|
34 |
+
]
|
35 |
+
|
36 |
+
|
37 |
+
def write_webui_model_preview_image(model_path, image):
|
38 |
+
basename, ext = os.path.splitext(model_path)
|
39 |
+
preview_path = f"{basename}.png"
|
40 |
+
|
41 |
+
# Copy any text-only metadata
|
42 |
+
use_metadata = False
|
43 |
+
metadata = PngImagePlugin.PngInfo()
|
44 |
+
for key, value in image.info.items():
|
45 |
+
if isinstance(key, str) and isinstance(value, str):
|
46 |
+
metadata.add_text(key, value)
|
47 |
+
use_metadata = True
|
48 |
+
|
49 |
+
image.save(preview_path, "PNG", pnginfo=(metadata if use_metadata else None))
|
50 |
+
|
51 |
+
|
52 |
+
def delete_webui_model_preview_image(model_path):
|
53 |
+
basename, ext = os.path.splitext(model_path)
|
54 |
+
preview_paths = [f"{basename}.preview.png", f"{basename}.png"]
|
55 |
+
|
56 |
+
for preview_path in preview_paths:
|
57 |
+
if os.path.isfile(preview_path):
|
58 |
+
os.unlink(preview_path)
|
59 |
+
|
60 |
+
|
61 |
+
def decode_base64_to_pil(encoding):
|
62 |
+
if encoding.startswith("data:image/"):
|
63 |
+
encoding = encoding.split(";")[1].split(",")[1]
|
64 |
+
return Image.open(io.BytesIO(base64.b64decode(encoding)))
|
65 |
+
|
66 |
+
|
67 |
+
def encode_pil_to_base64(image):
|
68 |
+
with io.BytesIO() as output_bytes:
|
69 |
+
# Copy any text-only metadata
|
70 |
+
use_metadata = False
|
71 |
+
metadata = PngImagePlugin.PngInfo()
|
72 |
+
for key, value in image.info.items():
|
73 |
+
if isinstance(key, str) and isinstance(value, str):
|
74 |
+
metadata.add_text(key, value)
|
75 |
+
use_metadata = True
|
76 |
+
|
77 |
+
image.save(output_bytes, "PNG", pnginfo=(metadata if use_metadata else None))
|
78 |
+
bytes_data = output_bytes.getvalue()
|
79 |
+
return base64.b64encode(bytes_data)
|
80 |
+
|
81 |
+
|
82 |
+
def open_folder(f):
|
83 |
+
if not os.path.exists(f):
|
84 |
+
print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.')
|
85 |
+
return
|
86 |
+
elif not os.path.isdir(f):
|
87 |
+
print(
|
88 |
+
f"""
|
89 |
+
WARNING
|
90 |
+
An open_folder request was made with an argument that is not a folder.
|
91 |
+
This could be an error or a malicious attempt to run code on your computer.
|
92 |
+
Requested path was: {f}
|
93 |
+
""",
|
94 |
+
file=sys.stderr,
|
95 |
+
)
|
96 |
+
return
|
97 |
+
|
98 |
+
if not shared.cmd_opts.hide_ui_dir_config:
|
99 |
+
path = os.path.normpath(f)
|
100 |
+
if platform.system() == "Windows":
|
101 |
+
os.startfile(path)
|
102 |
+
elif platform.system() == "Darwin":
|
103 |
+
sp.Popen(["open", path])
|
104 |
+
elif "microsoft-standard-WSL2" in platform.uname().release:
|
105 |
+
sp.Popen(["wsl-open", path])
|
106 |
+
else:
|
107 |
+
sp.Popen(["xdg-open", path])
|
108 |
+
|
109 |
+
|
110 |
+
def copy_metadata_to_all(module, model_path, copy_dir, same_session_only, missing_meta_only, cover_image):
|
111 |
+
"""
|
112 |
+
Given a model with metadata, copies that metadata to all models in copy_dir.
|
113 |
+
|
114 |
+
:str module: Module name ("LoRA")
|
115 |
+
:str model: Model key in lora_models ("MyModel(123456abcdef)")
|
116 |
+
:str copy_dir: Directory to copy to
|
117 |
+
:bool same_session_only: Only copy to modules with the same ss_session_id
|
118 |
+
:bool missing_meta_only: Only copy to modules that are missing user metadata
|
119 |
+
:Optional[Image] cover_image: Cover image to embed in the file as base64
|
120 |
+
:returns: gr.HTML.update()
|
121 |
+
"""
|
122 |
+
if model_path == "None":
|
123 |
+
return "No model selected."
|
124 |
+
|
125 |
+
if not os.path.isfile(model_path):
|
126 |
+
return f"Model path not found: {model_path}"
|
127 |
+
|
128 |
+
model_path = os.path.realpath(model_path)
|
129 |
+
|
130 |
+
if os.path.splitext(model_path)[1] != ".safetensors":
|
131 |
+
return "Model is not in .safetensors format."
|
132 |
+
|
133 |
+
if not os.path.isdir(copy_dir):
|
134 |
+
return "Please provide a directory containing models in .safetensors format."
|
135 |
+
|
136 |
+
print(f"[MetadataEditor] Copying metadata to models in {copy_dir}.")
|
137 |
+
metadata = model_util.read_model_metadata(model_path, module)
|
138 |
+
count = 0
|
139 |
+
for entry in os.scandir(copy_dir):
|
140 |
+
if entry.is_file():
|
141 |
+
path = os.path.realpath(os.path.join(copy_dir, entry.name))
|
142 |
+
if path != model_path and model_util.is_safetensors(path):
|
143 |
+
if same_session_only:
|
144 |
+
other_metadata = safetensors_hack.read_metadata(path)
|
145 |
+
if missing_meta_only and other_metadata.get("ssmd_display_name", "").strip():
|
146 |
+
print(f"[MetadataEditor] Skipping {path} as it already has metadata")
|
147 |
+
continue
|
148 |
+
|
149 |
+
session_id = metadata.get("ss_session_id", None)
|
150 |
+
other_session_id = other_metadata.get("ss_session_id", None)
|
151 |
+
if session_id is None or other_session_id is None or session_id != other_session_id:
|
152 |
+
continue
|
153 |
+
|
154 |
+
updates = {
|
155 |
+
"ssmd_cover_images": "[]",
|
156 |
+
"ssmd_display_name": "",
|
157 |
+
"ssmd_version": "",
|
158 |
+
"ssmd_keywords": "",
|
159 |
+
"ssmd_author": "",
|
160 |
+
"ssmd_source": "",
|
161 |
+
"ssmd_description": "",
|
162 |
+
"ssmd_rating": "0",
|
163 |
+
"ssmd_tags": "",
|
164 |
+
}
|
165 |
+
|
166 |
+
for k, v in metadata.items():
|
167 |
+
if k.startswith("ssmd_") and k != "ssmd_cover_images":
|
168 |
+
updates[k] = v
|
169 |
+
|
170 |
+
model_util.write_model_metadata(path, module, updates)
|
171 |
+
count += 1
|
172 |
+
|
173 |
+
print(f"[MetadataEditor] Updated {count} models in directory {copy_dir}.")
|
174 |
+
return f"Updated {count} models in directory {copy_dir}."
|
175 |
+
|
176 |
+
|
177 |
+
def load_cover_image(model_path, metadata):
|
178 |
+
"""
|
179 |
+
Loads a cover image either from embedded metadata or an image file with
|
180 |
+
.preview.png/.png format
|
181 |
+
"""
|
182 |
+
cover_images = json.loads(metadata.get("ssmd_cover_images", "[]"))
|
183 |
+
cover_image = None
|
184 |
+
if len(cover_images) > 0:
|
185 |
+
print("[MetadataEditor] Loading embedded cover image.")
|
186 |
+
cover_image = decode_base64_to_pil(cover_images[0])
|
187 |
+
else:
|
188 |
+
basename, ext = os.path.splitext(model_path)
|
189 |
+
|
190 |
+
preview_paths = [f"{basename}.preview.png", f"{basename}.png"]
|
191 |
+
|
192 |
+
for preview_path in preview_paths:
|
193 |
+
if os.path.isfile(preview_path):
|
194 |
+
print(f"[MetadataEditor] Loading webui preview image: {preview_path}")
|
195 |
+
cover_image = Image.open(preview_path)
|
196 |
+
|
197 |
+
return cover_image
|
198 |
+
|
199 |
+
|
200 |
+
# Dummy value since gr.Dataframe cannot handle an empty list
|
201 |
+
# https://github.com/gradio-app/gradio/issues/3182
|
202 |
+
unknown_folders = ["(Unknown)", 0, 0, 0]
|
203 |
+
|
204 |
+
|
205 |
+
def refresh_metadata(module, model_path):
|
206 |
+
"""
|
207 |
+
Reads metadata from the model on disk and updates all Gradio components
|
208 |
+
"""
|
209 |
+
if model_path == "None":
|
210 |
+
return {}, None, "", "", "", "", "", 0, "", "", "", "", "", {}, [unknown_folders]
|
211 |
+
|
212 |
+
if not os.path.isfile(model_path):
|
213 |
+
return (
|
214 |
+
{"info": f"Model path not found: {model_path}"},
|
215 |
+
None,
|
216 |
+
"",
|
217 |
+
"",
|
218 |
+
"",
|
219 |
+
"",
|
220 |
+
"",
|
221 |
+
0,
|
222 |
+
"",
|
223 |
+
"",
|
224 |
+
"",
|
225 |
+
"",
|
226 |
+
"",
|
227 |
+
{},
|
228 |
+
[unknown_folders],
|
229 |
+
)
|
230 |
+
|
231 |
+
if os.path.splitext(model_path)[1] != ".safetensors":
|
232 |
+
return (
|
233 |
+
{"info": "Model is not in .safetensors format."},
|
234 |
+
None,
|
235 |
+
"",
|
236 |
+
"",
|
237 |
+
"",
|
238 |
+
"",
|
239 |
+
"",
|
240 |
+
0,
|
241 |
+
"",
|
242 |
+
"",
|
243 |
+
"",
|
244 |
+
"",
|
245 |
+
"",
|
246 |
+
{},
|
247 |
+
[unknown_folders],
|
248 |
+
)
|
249 |
+
|
250 |
+
metadata = model_util.read_model_metadata(model_path, module)
|
251 |
+
|
252 |
+
if metadata is None:
|
253 |
+
training_params = {}
|
254 |
+
metadata = {}
|
255 |
+
else:
|
256 |
+
training_params = {k: v for k, v in metadata.items() if k.startswith("ss_")}
|
257 |
+
|
258 |
+
cover_image = load_cover_image(model_path, metadata)
|
259 |
+
|
260 |
+
display_name = metadata.get("ssmd_display_name", "")
|
261 |
+
author = metadata.get("ssmd_author", "")
|
262 |
+
# version = metadata.get("ssmd_version", "")
|
263 |
+
source = metadata.get("ssmd_source", "")
|
264 |
+
keywords = metadata.get("ssmd_keywords", "")
|
265 |
+
description = metadata.get("ssmd_description", "")
|
266 |
+
rating = int(metadata.get("ssmd_rating", "0"))
|
267 |
+
tags = metadata.get("ssmd_tags", "")
|
268 |
+
model_hash = metadata.get("sshs_model_hash", model_util.cache("hashes").get(model_path, {}).get("model", ""))
|
269 |
+
legacy_hash = metadata.get("sshs_legacy_hash", model_util.cache("hashes").get(model_path, {}).get("legacy", ""))
|
270 |
+
|
271 |
+
top_tags = {}
|
272 |
+
if "ss_tag_frequency" in training_params:
|
273 |
+
tag_frequency = json.loads(training_params.pop("ss_tag_frequency"))
|
274 |
+
count_max = 0
|
275 |
+
for dir, frequencies in tag_frequency.items():
|
276 |
+
for tag, count in frequencies.items():
|
277 |
+
tag = tag.strip()
|
278 |
+
existing = top_tags.get(tag, 0)
|
279 |
+
top_tags[tag] = count + existing
|
280 |
+
if len(top_tags) > 0:
|
281 |
+
top_tags = dict(sorted(top_tags.items(), key=lambda x: x[1], reverse=True))
|
282 |
+
|
283 |
+
count_max = max(top_tags.values())
|
284 |
+
top_tags = {k: float(v / count_max) for k, v in top_tags.items()}
|
285 |
+
|
286 |
+
dataset_folders = []
|
287 |
+
if "ss_dataset_dirs" in training_params:
|
288 |
+
dataset_dirs = json.loads(training_params.pop("ss_dataset_dirs"))
|
289 |
+
for dir, counts in dataset_dirs.items():
|
290 |
+
img_count = int(counts["img_count"])
|
291 |
+
n_repeats = int(counts["n_repeats"])
|
292 |
+
dataset_folders.append([dir, img_count, n_repeats, img_count * n_repeats])
|
293 |
+
if dataset_folders:
|
294 |
+
dataset_folders.append(
|
295 |
+
["(Total)", sum(r[1] for r in dataset_folders), sum(r[2] for r in dataset_folders), sum(r[3] for r in dataset_folders)]
|
296 |
+
)
|
297 |
+
else:
|
298 |
+
dataset_folders.append(unknown_folders)
|
299 |
+
|
300 |
+
return (
|
301 |
+
training_params,
|
302 |
+
cover_image,
|
303 |
+
display_name,
|
304 |
+
author,
|
305 |
+
source,
|
306 |
+
keywords,
|
307 |
+
description,
|
308 |
+
rating,
|
309 |
+
tags,
|
310 |
+
model_hash,
|
311 |
+
legacy_hash,
|
312 |
+
model_path,
|
313 |
+
os.path.dirname(model_path),
|
314 |
+
top_tags,
|
315 |
+
dataset_folders,
|
316 |
+
)
|
317 |
+
|
318 |
+
|
319 |
+
def save_metadata(module, model_path, cover_image, display_name, author, source, keywords, description, rating, tags):
|
320 |
+
"""
|
321 |
+
Writes metadata from the Gradio components to the model file
|
322 |
+
"""
|
323 |
+
if model_path == "None":
|
324 |
+
return "No model selected.", "", ""
|
325 |
+
|
326 |
+
if not os.path.isfile(model_path):
|
327 |
+
return f"file not found: {model_path}", "", ""
|
328 |
+
|
329 |
+
if os.path.splitext(model_path)[1] != ".safetensors":
|
330 |
+
return "Model is not in .safetensors format", "", ""
|
331 |
+
|
332 |
+
metadata = safetensors_hack.read_metadata(model_path)
|
333 |
+
model_hash = safetensors_hack.hash_file(model_path)
|
334 |
+
legacy_hash = model_util.get_legacy_hash(metadata, model_path)
|
335 |
+
|
336 |
+
# TODO: Support multiple images
|
337 |
+
# Blocked on gradio not having a gallery upload option
|
338 |
+
# https://github.com/gradio-app/gradio/issues/1379
|
339 |
+
cover_images = []
|
340 |
+
if cover_image is not None:
|
341 |
+
cover_images.append(encode_pil_to_base64(cover_image).decode("ascii"))
|
342 |
+
|
343 |
+
# NOTE: User-specified metadata should NOT be prefixed with "ss_". This is
|
344 |
+
# to maintain backwards compatibility with the old hashing method. "ss_"
|
345 |
+
# should be used for training parameters that will never be manually
|
346 |
+
# updated on the model.
|
347 |
+
updates = {
|
348 |
+
"ssmd_cover_images": json.dumps(cover_images),
|
349 |
+
"ssmd_display_name": display_name,
|
350 |
+
"ssmd_author": author,
|
351 |
+
# "ssmd_version": version,
|
352 |
+
"ssmd_source": source,
|
353 |
+
"ssmd_keywords": keywords,
|
354 |
+
"ssmd_description": description,
|
355 |
+
"ssmd_rating": rating,
|
356 |
+
"ssmd_tags": tags,
|
357 |
+
"sshs_model_hash": model_hash,
|
358 |
+
"sshs_legacy_hash": legacy_hash,
|
359 |
+
}
|
360 |
+
|
361 |
+
model_util.write_model_metadata(model_path, module, updates)
|
362 |
+
if cover_image is None:
|
363 |
+
delete_webui_model_preview_image(model_path)
|
364 |
+
else:
|
365 |
+
write_webui_model_preview_image(model_path, cover_image)
|
366 |
+
|
367 |
+
model_name = os.path.basename(model_path)
|
368 |
+
return f"Model saved: {model_name}", model_hash, legacy_hash
|
369 |
+
|
370 |
+
|
371 |
+
model_name_filter = ""
|
372 |
+
|
373 |
+
|
374 |
+
def get_filtered_model_paths(s):
|
375 |
+
# newer Gradio seems to show None in the list?
|
376 |
+
# if not s:
|
377 |
+
# return ["None"] + list(model_util.lora_models.values())
|
378 |
+
# return ["None"] + [v for v in model_util.lora_models.values() if v and s in v.lower()]
|
379 |
+
if not s:
|
380 |
+
l = list(model_util.lora_models.values())
|
381 |
+
else:
|
382 |
+
l = [v for v in model_util.lora_models.values() if v and s in v.lower()]
|
383 |
+
l = [v for v in l if v] # remove None
|
384 |
+
l = ["None"] + l
|
385 |
+
return l
|
386 |
+
|
387 |
+
def get_filtered_model_paths_global():
|
388 |
+
global model_name_filter
|
389 |
+
return get_filtered_model_paths(model_name_filter)
|
390 |
+
|
391 |
+
|
392 |
+
def setup_ui(addnet_paste_params):
|
393 |
+
"""
|
394 |
+
:dict addnet_paste_params: Dictionary of txt2img/img2img controls for each model weight slider,
|
395 |
+
for sending module and model to them from the metadata editor
|
396 |
+
"""
|
397 |
+
can_edit = False
|
398 |
+
|
399 |
+
with gr.Row().style(equal_height=False):
|
400 |
+
# Lefthand column
|
401 |
+
with gr.Column(variant="panel"):
|
402 |
+
# Module and model selector
|
403 |
+
with gr.Row():
|
404 |
+
model_filter = gr.Textbox("", label="Model path filter", placeholder="Filter models by path name")
|
405 |
+
|
406 |
+
def update_model_filter(s):
|
407 |
+
global model_name_filter
|
408 |
+
model_name_filter = s.strip().lower()
|
409 |
+
|
410 |
+
model_filter.change(update_model_filter, inputs=[model_filter], outputs=[])
|
411 |
+
with gr.Row():
|
412 |
+
module = gr.Dropdown(
|
413 |
+
["LoRA"],
|
414 |
+
label="Network module",
|
415 |
+
value="LoRA",
|
416 |
+
interactive=True,
|
417 |
+
elem_id="additional_networks_metadata_editor_module",
|
418 |
+
)
|
419 |
+
model = gr.Dropdown(
|
420 |
+
get_filtered_model_paths_global(),
|
421 |
+
label="Model",
|
422 |
+
value="None",
|
423 |
+
interactive=True,
|
424 |
+
elem_id="additional_networks_metadata_editor_model",
|
425 |
+
)
|
426 |
+
modules.ui.create_refresh_button(
|
427 |
+
model, model_util.update_models, lambda: {"choices": get_filtered_model_paths_global()}, "refresh_lora_models"
|
428 |
+
)
|
429 |
+
|
430 |
+
def submit_model_filter(s):
|
431 |
+
global model_name_filter
|
432 |
+
model_name_filter = s
|
433 |
+
paths = get_filtered_model_paths(s)
|
434 |
+
return gr.Dropdown.update(choices=paths, value="None")
|
435 |
+
|
436 |
+
model_filter.submit(submit_model_filter, inputs=[model_filter], outputs=[model])
|
437 |
+
|
438 |
+
# Model hashes and path
|
439 |
+
with gr.Row():
|
440 |
+
model_hash = gr.Textbox("", label="Model hash", interactive=False)
|
441 |
+
legacy_hash = gr.Textbox("", label="Legacy hash", interactive=False)
|
442 |
+
with gr.Row():
|
443 |
+
model_path = gr.Textbox("", label="Model path", interactive=False)
|
444 |
+
open_folder_button = ToolButton(
|
445 |
+
value=folder_symbol,
|
446 |
+
elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else "open_folder_metadata_editor",
|
447 |
+
)
|
448 |
+
|
449 |
+
# Send to txt2img/img2img buttons
|
450 |
+
for tabname in ["txt2img", "img2img"]:
|
451 |
+
with gr.Row():
|
452 |
+
with gr.Box():
|
453 |
+
with gr.Row():
|
454 |
+
gr.HTML(f"Send to {tabname}:")
|
455 |
+
for i in range(MAX_MODEL_COUNT):
|
456 |
+
send_to_button = ToolButton(
|
457 |
+
value=keycap_symbols[i], elem_id=f"additional_networks_send_to_{tabname}_{i}"
|
458 |
+
)
|
459 |
+
send_to_button.click(
|
460 |
+
fn=lambda modu, mod: (modu, model_util.find_closest_lora_model_name(mod) or "None"),
|
461 |
+
inputs=[module, model],
|
462 |
+
outputs=[addnet_paste_params[tabname][i]["module"], addnet_paste_params[tabname][i]["model"]],
|
463 |
+
)
|
464 |
+
send_to_button.click(fn=None, _js=f"addnet_switch_to_{tabname}", inputs=None, outputs=None)
|
465 |
+
|
466 |
+
# "Copy metadata to other models" panel
|
467 |
+
with gr.Row():
|
468 |
+
with gr.Column():
|
469 |
+
gr.HTML(value="Copy metadata to other models in directory")
|
470 |
+
copy_metadata_dir = gr.Textbox(
|
471 |
+
"",
|
472 |
+
label="Containing directory",
|
473 |
+
placeholder="All models in this directory will receive the selected model's metadata",
|
474 |
+
)
|
475 |
+
with gr.Row():
|
476 |
+
copy_same_session = gr.Checkbox(True, label="Only copy to models with same session ID")
|
477 |
+
copy_no_metadata = gr.Checkbox(True, label="Only copy to models with no metadata")
|
478 |
+
copy_metadata_button = gr.Button("Copy Metadata", variant="primary")
|
479 |
+
|
480 |
+
# Center column, metadata viewer/editor
|
481 |
+
with gr.Column():
|
482 |
+
with gr.Row():
|
483 |
+
display_name = gr.Textbox(value="", label="Name", placeholder="Display name for this model", interactive=can_edit)
|
484 |
+
author = gr.Textbox(value="", label="Author", placeholder="Author of this model", interactive=can_edit)
|
485 |
+
with gr.Row():
|
486 |
+
keywords = gr.Textbox(
|
487 |
+
value="", label="Keywords", placeholder="Activation keywords, comma-separated", interactive=can_edit
|
488 |
+
)
|
489 |
+
with gr.Row():
|
490 |
+
description = gr.Textbox(
|
491 |
+
value="",
|
492 |
+
label="Description",
|
493 |
+
placeholder="Model description/readme/notes/instructions",
|
494 |
+
lines=15,
|
495 |
+
interactive=can_edit,
|
496 |
+
)
|
497 |
+
with gr.Row():
|
498 |
+
source = gr.Textbox(
|
499 |
+
value="", label="Source", placeholder="Source URL where this model could be found", interactive=can_edit
|
500 |
+
)
|
501 |
+
with gr.Row():
|
502 |
+
rating = gr.Slider(minimum=0, maximum=10, step=1, label="Rating", value=0, interactive=can_edit)
|
503 |
+
tags = gr.Textbox(
|
504 |
+
value="",
|
505 |
+
label="Tags",
|
506 |
+
placeholder='Comma-separated list of tags ("artist, style, character, 2d, 3d...")',
|
507 |
+
lines=2,
|
508 |
+
interactive=can_edit,
|
509 |
+
)
|
510 |
+
with gr.Row():
|
511 |
+
editing_enabled = gr.Checkbox(label="Editing Enabled", value=can_edit)
|
512 |
+
with gr.Row():
|
513 |
+
save_metadata_button = gr.Button("Save Metadata", variant="primary", interactive=can_edit)
|
514 |
+
with gr.Row():
|
515 |
+
save_output = gr.HTML("")
|
516 |
+
|
517 |
+
# Righthand column, cover image and training parameters view
|
518 |
+
with gr.Column():
|
519 |
+
# Cover image
|
520 |
+
with gr.Row():
|
521 |
+
cover_image = gr.Image(
|
522 |
+
label="Cover image",
|
523 |
+
elem_id="additional_networks_cover_image",
|
524 |
+
source="upload",
|
525 |
+
interactive=can_edit,
|
526 |
+
type="pil",
|
527 |
+
image_mode="RGBA",
|
528 |
+
).style(height=480)
|
529 |
+
|
530 |
+
# Image parameters
|
531 |
+
with gr.Accordion("Image Parameters", open=False):
|
532 |
+
with gr.Row():
|
533 |
+
info2 = gr.HTML()
|
534 |
+
with gr.Row():
|
535 |
+
try:
|
536 |
+
send_to_buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
|
537 |
+
except:
|
538 |
+
pass
|
539 |
+
|
540 |
+
# Training info, below cover image
|
541 |
+
with gr.Accordion("Training info", open=False):
|
542 |
+
# Top tags used
|
543 |
+
with gr.Row():
|
544 |
+
max_top_tags = int(shared.opts.data.get("additional_networks_max_top_tags", 20))
|
545 |
+
most_frequent_tags = gr.Label(value={}, label="Most frequent tags in captions", num_top_classes=max_top_tags)
|
546 |
+
|
547 |
+
# Dataset folders
|
548 |
+
with gr.Row():
|
549 |
+
max_dataset_folders = int(shared.opts.data.get("additional_networks_max_dataset_folders", 20))
|
550 |
+
dataset_folders = gr.Dataframe(
|
551 |
+
headers=["Name", "Image Count", "Repeats", "Total Images"],
|
552 |
+
datatype=["str", "number", "number", "number"],
|
553 |
+
label="Dataset folder structure",
|
554 |
+
max_rows=max_dataset_folders,
|
555 |
+
col_count=(4, "fixed"),
|
556 |
+
)
|
557 |
+
|
558 |
+
# Training Parameters
|
559 |
+
with gr.Row():
|
560 |
+
metadata_view = gr.JSON(value={}, label="Training parameters")
|
561 |
+
|
562 |
+
# Hidden/internal
|
563 |
+
with gr.Row(visible=False):
|
564 |
+
info1 = gr.HTML()
|
565 |
+
img_file_info = gr.Textbox(label="Generate Info", interactive=False, lines=6)
|
566 |
+
|
567 |
+
open_folder_button.click(fn=lambda p: open_folder(os.path.dirname(p)), inputs=[model_path], outputs=[])
|
568 |
+
copy_metadata_button.click(
|
569 |
+
fn=copy_metadata_to_all,
|
570 |
+
inputs=[module, model, copy_metadata_dir, copy_same_session, copy_no_metadata, cover_image],
|
571 |
+
outputs=[save_output],
|
572 |
+
)
|
573 |
+
|
574 |
+
def update_editing(enabled):
|
575 |
+
"""
|
576 |
+
Enable/disable components based on "Editing Enabled" status
|
577 |
+
"""
|
578 |
+
updates = [gr.Textbox.update(interactive=enabled)] * 6
|
579 |
+
updates.append(gr.Image.update(interactive=enabled))
|
580 |
+
updates.append(gr.Slider.update(interactive=enabled))
|
581 |
+
updates.append(gr.Button.update(interactive=enabled))
|
582 |
+
return updates
|
583 |
+
|
584 |
+
editing_enabled.change(
|
585 |
+
fn=update_editing,
|
586 |
+
inputs=[editing_enabled],
|
587 |
+
outputs=[display_name, author, source, keywords, description, tags, cover_image, rating, save_metadata_button],
|
588 |
+
)
|
589 |
+
|
590 |
+
cover_image.change(fn=modules.extras.run_pnginfo, inputs=[cover_image], outputs=[info1, img_file_info, info2])
|
591 |
+
|
592 |
+
try:
|
593 |
+
parameters_copypaste.bind_buttons(send_to_buttons, cover_image, img_file_info)
|
594 |
+
except:
|
595 |
+
pass
|
596 |
+
|
597 |
+
model.change(
|
598 |
+
refresh_metadata,
|
599 |
+
inputs=[module, model],
|
600 |
+
outputs=[
|
601 |
+
metadata_view,
|
602 |
+
cover_image,
|
603 |
+
display_name,
|
604 |
+
author,
|
605 |
+
source,
|
606 |
+
keywords,
|
607 |
+
description,
|
608 |
+
rating,
|
609 |
+
tags,
|
610 |
+
model_hash,
|
611 |
+
legacy_hash,
|
612 |
+
model_path,
|
613 |
+
copy_metadata_dir,
|
614 |
+
most_frequent_tags,
|
615 |
+
dataset_folders,
|
616 |
+
],
|
617 |
+
)
|
618 |
+
save_metadata_button.click(
|
619 |
+
save_metadata,
|
620 |
+
inputs=[module, model, cover_image, display_name, author, source, keywords, description, rating, tags],
|
621 |
+
outputs=[save_output, model_hash, legacy_hash],
|
622 |
+
)
|
extensions/addtional/scripts/model_util.py
ADDED
@@ -0,0 +1,338 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import os.path
|
3 |
+
import re
|
4 |
+
import shutil
|
5 |
+
import json
|
6 |
+
import stat
|
7 |
+
import tqdm
|
8 |
+
from collections import OrderedDict
|
9 |
+
from multiprocessing.pool import ThreadPool as Pool
|
10 |
+
|
11 |
+
from modules import shared, sd_models, hashes
|
12 |
+
from scripts import safetensors_hack, model_util, util
|
13 |
+
import modules.scripts as scripts
|
14 |
+
|
15 |
+
|
16 |
+
# MAX_MODEL_COUNT = shared.cmd_opts.addnet_max_model_count or 5
|
17 |
+
MAX_MODEL_COUNT = shared.cmd_opts.addnet_max_model_count if hasattr(shared.cmd_opts, "addnet_max_model_count") else 5
|
18 |
+
LORA_MODEL_EXTS = [".pt", ".ckpt", ".safetensors"]
|
19 |
+
re_legacy_hash = re.compile("\(([0-9a-f]{8})\)$") # matches 8-character hashes, new hash has 12 characters
|
20 |
+
lora_models = {} # "My_Lora(abcdef123456)" -> "C:/path/to/model.safetensors"
|
21 |
+
lora_model_names = {} # "my_lora" -> "My_Lora(My_Lora(abcdef123456)"
|
22 |
+
legacy_model_names = {}
|
23 |
+
lora_models_dir = os.path.join(scripts.basedir(), "models/lora")
|
24 |
+
os.makedirs(lora_models_dir, exist_ok=True)
|
25 |
+
|
26 |
+
|
27 |
+
def is_safetensors(filename):
|
28 |
+
return os.path.splitext(filename)[1] == ".safetensors"
|
29 |
+
|
30 |
+
|
31 |
+
def read_model_metadata(model_path, module):
|
32 |
+
if model_path.startswith('"') and model_path.endswith('"'): # trim '"' at start/end
|
33 |
+
model_path = model_path[1:-1]
|
34 |
+
if not os.path.exists(model_path):
|
35 |
+
return None
|
36 |
+
|
37 |
+
metadata = None
|
38 |
+
if module == "LoRA":
|
39 |
+
if os.path.splitext(model_path)[1] == ".safetensors":
|
40 |
+
metadata = safetensors_hack.read_metadata(model_path)
|
41 |
+
|
42 |
+
return metadata
|
43 |
+
|
44 |
+
|
45 |
+
def write_model_metadata(model_path, module, updates):
|
46 |
+
if model_path.startswith('"') and model_path.endswith('"'): # trim '"' at start/end
|
47 |
+
model_path = model_path[1:-1]
|
48 |
+
if not os.path.exists(model_path):
|
49 |
+
return None
|
50 |
+
|
51 |
+
from safetensors.torch import save_file
|
52 |
+
|
53 |
+
back_up = shared.opts.data.get("additional_networks_back_up_model_when_saving", True)
|
54 |
+
if back_up:
|
55 |
+
backup_path = model_path + ".backup"
|
56 |
+
if not os.path.exists(backup_path):
|
57 |
+
print(f"[MetadataEditor] Backing up current model to {backup_path}")
|
58 |
+
shutil.copyfile(model_path, backup_path)
|
59 |
+
|
60 |
+
metadata = None
|
61 |
+
tensors = {}
|
62 |
+
if module == "LoRA":
|
63 |
+
if os.path.splitext(model_path)[1] == ".safetensors":
|
64 |
+
tensors, metadata = safetensors_hack.load_file(model_path, "cpu")
|
65 |
+
|
66 |
+
for k, v in updates.items():
|
67 |
+
metadata[k] = str(v)
|
68 |
+
|
69 |
+
save_file(tensors, model_path, metadata)
|
70 |
+
print(f"[MetadataEditor] Model saved: {model_path}")
|
71 |
+
|
72 |
+
|
73 |
+
def get_model_list(module, model, model_dir, sort_by):
|
74 |
+
if model_dir == "":
|
75 |
+
# Get list of models with same folder as this one
|
76 |
+
model_path = lora_models.get(model, None)
|
77 |
+
if model_path is None:
|
78 |
+
return []
|
79 |
+
model_dir = os.path.dirname(model_path)
|
80 |
+
|
81 |
+
if not os.path.isdir(model_dir):
|
82 |
+
return []
|
83 |
+
|
84 |
+
found, _ = get_all_models([model_dir], sort_by, "")
|
85 |
+
return list(found.keys()) # convert dict_keys to list
|
86 |
+
|
87 |
+
|
88 |
+
def traverse_all_files(curr_path, model_list):
|
89 |
+
f_list = [(os.path.join(curr_path, entry.name), entry.stat()) for entry in os.scandir(curr_path)]
|
90 |
+
for f_info in f_list:
|
91 |
+
fname, fstat = f_info
|
92 |
+
if os.path.splitext(fname)[1] in LORA_MODEL_EXTS:
|
93 |
+
model_list.append(f_info)
|
94 |
+
elif stat.S_ISDIR(fstat.st_mode):
|
95 |
+
model_list = traverse_all_files(fname, model_list)
|
96 |
+
return model_list
|
97 |
+
|
98 |
+
|
99 |
+
def get_model_hash(metadata, filename):
|
100 |
+
if metadata is None:
|
101 |
+
return hashes.calculate_sha256(filename)
|
102 |
+
|
103 |
+
if "sshs_model_hash" in metadata:
|
104 |
+
return metadata["sshs_model_hash"]
|
105 |
+
|
106 |
+
return safetensors_hack.hash_file(filename)
|
107 |
+
|
108 |
+
|
109 |
+
def get_legacy_hash(metadata, filename):
|
110 |
+
if metadata is None:
|
111 |
+
return sd_models.model_hash(filename)
|
112 |
+
|
113 |
+
if "sshs_legacy_hash" in metadata:
|
114 |
+
return metadata["sshs_legacy_hash"]
|
115 |
+
|
116 |
+
return safetensors_hack.legacy_hash_file(filename)
|
117 |
+
|
118 |
+
|
119 |
+
import filelock
|
120 |
+
|
121 |
+
cache_filename = os.path.join(scripts.basedir(), "hashes.json")
|
122 |
+
cache_data = None
|
123 |
+
|
124 |
+
|
125 |
+
def cache(subsection):
|
126 |
+
global cache_data
|
127 |
+
|
128 |
+
if cache_data is None:
|
129 |
+
with filelock.FileLock(cache_filename + ".lock"):
|
130 |
+
if not os.path.isfile(cache_filename):
|
131 |
+
cache_data = {}
|
132 |
+
else:
|
133 |
+
with open(cache_filename, "r", encoding="utf8") as file:
|
134 |
+
cache_data = json.load(file)
|
135 |
+
|
136 |
+
s = cache_data.get(subsection, {})
|
137 |
+
cache_data[subsection] = s
|
138 |
+
|
139 |
+
return s
|
140 |
+
|
141 |
+
|
142 |
+
def dump_cache():
|
143 |
+
with filelock.FileLock(cache_filename + ".lock"):
|
144 |
+
with open(cache_filename, "w", encoding="utf8") as file:
|
145 |
+
json.dump(cache_data, file, indent=4)
|
146 |
+
|
147 |
+
|
148 |
+
def get_model_rating(filename):
|
149 |
+
if not model_util.is_safetensors(filename):
|
150 |
+
return 0
|
151 |
+
|
152 |
+
metadata = safetensors_hack.read_metadata(filename)
|
153 |
+
return int(metadata.get("ssmd_rating", "0"))
|
154 |
+
|
155 |
+
|
156 |
+
def has_user_metadata(filename):
|
157 |
+
if not model_util.is_safetensors(filename):
|
158 |
+
return False
|
159 |
+
|
160 |
+
metadata = safetensors_hack.read_metadata(filename)
|
161 |
+
return any(k.startswith("ssmd_") for k in metadata.keys())
|
162 |
+
|
163 |
+
|
164 |
+
def hash_model_file(finfo):
|
165 |
+
filename = finfo[0]
|
166 |
+
stat = finfo[1]
|
167 |
+
name = os.path.splitext(os.path.basename(filename))[0]
|
168 |
+
|
169 |
+
# Prevent a hypothetical "None.pt" from being listed.
|
170 |
+
if name != "None":
|
171 |
+
metadata = None
|
172 |
+
|
173 |
+
cached = cache("hashes").get(filename, None)
|
174 |
+
if cached is None or stat.st_mtime != cached["mtime"]:
|
175 |
+
if metadata is None and model_util.is_safetensors(filename):
|
176 |
+
try:
|
177 |
+
metadata = safetensors_hack.read_metadata(filename)
|
178 |
+
except Exception as ex:
|
179 |
+
return {"error": ex, "filename": filename}
|
180 |
+
model_hash = get_model_hash(metadata, filename)
|
181 |
+
legacy_hash = get_legacy_hash(metadata, filename)
|
182 |
+
else:
|
183 |
+
model_hash = cached["model"]
|
184 |
+
legacy_hash = cached["legacy"]
|
185 |
+
|
186 |
+
return {"model": model_hash, "legacy": legacy_hash, "fileinfo": finfo}
|
187 |
+
|
188 |
+
|
189 |
+
def get_all_models(paths, sort_by, filter_by):
|
190 |
+
fileinfos = []
|
191 |
+
for path in paths:
|
192 |
+
if os.path.isdir(path):
|
193 |
+
fileinfos += traverse_all_files(path, [])
|
194 |
+
|
195 |
+
show_only_safetensors = shared.opts.data.get("additional_networks_show_only_safetensors", False)
|
196 |
+
show_only_missing_meta = shared.opts.data.get("additional_networks_show_only_models_with_metadata", "disabled")
|
197 |
+
|
198 |
+
if show_only_safetensors:
|
199 |
+
fileinfos = [x for x in fileinfos if is_safetensors(x[0])]
|
200 |
+
|
201 |
+
if show_only_missing_meta == "has metadata":
|
202 |
+
fileinfos = [x for x in fileinfos if has_user_metadata(x[0])]
|
203 |
+
elif show_only_missing_meta == "missing metadata":
|
204 |
+
fileinfos = [x for x in fileinfos if not has_user_metadata(x[0])]
|
205 |
+
|
206 |
+
print("[AddNet] Updating model hashes...")
|
207 |
+
data = []
|
208 |
+
thread_count = max(1, int(shared.opts.data.get("additional_networks_hash_thread_count", 1)))
|
209 |
+
p = Pool(processes=thread_count)
|
210 |
+
with tqdm.tqdm(total=len(fileinfos)) as pbar:
|
211 |
+
for res in p.imap_unordered(hash_model_file, fileinfos):
|
212 |
+
pbar.update()
|
213 |
+
if "error" in res:
|
214 |
+
print(f"Failed to read model file {res['filename']}: {res['error']}")
|
215 |
+
else:
|
216 |
+
data.append(res)
|
217 |
+
p.close()
|
218 |
+
|
219 |
+
cache_hashes = cache("hashes")
|
220 |
+
|
221 |
+
res = OrderedDict()
|
222 |
+
res_legacy = OrderedDict()
|
223 |
+
filter_by = filter_by.strip(" ")
|
224 |
+
if len(filter_by) != 0:
|
225 |
+
data = [x for x in data if filter_by.lower() in os.path.basename(x["fileinfo"][0]).lower()]
|
226 |
+
if sort_by == "name":
|
227 |
+
data = sorted(data, key=lambda x: os.path.basename(x["fileinfo"][0]))
|
228 |
+
elif sort_by == "date":
|
229 |
+
data = sorted(data, key=lambda x: -x["fileinfo"][1].st_mtime)
|
230 |
+
elif sort_by == "path name":
|
231 |
+
data = sorted(data, key=lambda x: x["fileinfo"][0])
|
232 |
+
elif sort_by == "rating":
|
233 |
+
data = sorted(data, key=lambda x: get_model_rating(x["fileinfo"][0]), reverse=True)
|
234 |
+
elif sort_by == "has user metadata":
|
235 |
+
data = sorted(
|
236 |
+
data, key=lambda x: os.path.basename(x["fileinfo"][0]) if has_user_metadata(x["fileinfo"][0]) else "", reverse=True
|
237 |
+
)
|
238 |
+
|
239 |
+
reverse = shared.opts.data.get("additional_networks_reverse_sort_order", False)
|
240 |
+
if reverse:
|
241 |
+
data = reversed(data)
|
242 |
+
|
243 |
+
for result in data:
|
244 |
+
finfo = result["fileinfo"]
|
245 |
+
filename = finfo[0]
|
246 |
+
stat = finfo[1]
|
247 |
+
model_hash = result["model"]
|
248 |
+
legacy_hash = result["legacy"]
|
249 |
+
|
250 |
+
name = os.path.splitext(os.path.basename(filename))[0]
|
251 |
+
|
252 |
+
# Commas in the model name will mess up infotext restoration since the
|
253 |
+
# infotext is delimited by commas
|
254 |
+
name = name.replace(",", "_")
|
255 |
+
|
256 |
+
# Prevent a hypothetical "None.pt" from being listed.
|
257 |
+
if name != "None":
|
258 |
+
full_name = name + f"({model_hash[0:12]})"
|
259 |
+
res[full_name] = filename
|
260 |
+
res_legacy[legacy_hash] = full_name
|
261 |
+
cache_hashes[filename] = {"model": model_hash, "legacy": legacy_hash, "mtime": stat.st_mtime}
|
262 |
+
|
263 |
+
return res, res_legacy
|
264 |
+
|
265 |
+
|
266 |
+
def find_closest_lora_model_name(search: str):
|
267 |
+
if not search or search == "None":
|
268 |
+
return None
|
269 |
+
|
270 |
+
# Match name and hash, case-sensitive
|
271 |
+
# "MyModel-epoch00002(abcdef123456)"
|
272 |
+
if search in lora_models:
|
273 |
+
return search
|
274 |
+
|
275 |
+
# Match model path, case-sensitive (from metadata editor)
|
276 |
+
# "C:/path/to/mymodel-epoch00002.safetensors"
|
277 |
+
if os.path.isfile(search):
|
278 |
+
import json
|
279 |
+
|
280 |
+
find = os.path.normpath(search)
|
281 |
+
value = next((k for k in lora_models.keys() if lora_models[k] == find), None)
|
282 |
+
if value:
|
283 |
+
return value
|
284 |
+
|
285 |
+
search = search.lower()
|
286 |
+
|
287 |
+
# Match full name, case-insensitive
|
288 |
+
# "mymodel-epoch00002"
|
289 |
+
if search in lora_model_names:
|
290 |
+
return lora_model_names.get(search)
|
291 |
+
|
292 |
+
# Match legacy hash (8 characters)
|
293 |
+
# "MyModel(abcd1234)"
|
294 |
+
result = re_legacy_hash.search(search)
|
295 |
+
if result is not None:
|
296 |
+
model_hash = result.group(1)
|
297 |
+
if model_hash in legacy_model_names:
|
298 |
+
new_model_name = legacy_model_names[model_hash]
|
299 |
+
return new_model_name
|
300 |
+
|
301 |
+
# Use any model with the search term as the prefix, case-insensitive, sorted
|
302 |
+
# by name length
|
303 |
+
# "mymodel"
|
304 |
+
applicable = [name for name in lora_model_names.keys() if search in name.lower()]
|
305 |
+
if not applicable:
|
306 |
+
return None
|
307 |
+
applicable = sorted(applicable, key=lambda name: len(name))
|
308 |
+
return lora_model_names[applicable[0]]
|
309 |
+
|
310 |
+
|
311 |
+
def update_models():
|
312 |
+
global lora_models, lora_model_names, legacy_model_names
|
313 |
+
paths = [lora_models_dir]
|
314 |
+
extra_lora_paths = util.split_path_list(shared.opts.data.get("additional_networks_extra_lora_path", ""))
|
315 |
+
for path in extra_lora_paths:
|
316 |
+
path = path.lstrip()
|
317 |
+
if os.path.isdir(path):
|
318 |
+
paths.append(path)
|
319 |
+
|
320 |
+
sort_by = shared.opts.data.get("additional_networks_sort_models_by", "name")
|
321 |
+
filter_by = shared.opts.data.get("additional_networks_model_name_filter", "")
|
322 |
+
res, res_legacy = get_all_models(paths, sort_by, filter_by)
|
323 |
+
|
324 |
+
lora_models.clear()
|
325 |
+
lora_models["None"] = None
|
326 |
+
lora_models.update(res)
|
327 |
+
|
328 |
+
for name_and_hash, filename in lora_models.items():
|
329 |
+
if filename == None:
|
330 |
+
continue
|
331 |
+
name = os.path.splitext(os.path.basename(filename))[0].lower()
|
332 |
+
lora_model_names[name] = name_and_hash
|
333 |
+
|
334 |
+
legacy_model_names = res_legacy
|
335 |
+
dump_cache()
|
336 |
+
|
337 |
+
|
338 |
+
update_models()
|
extensions/addtional/scripts/safetensors_hack.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import os
|
3 |
+
import mmap
|
4 |
+
import torch
|
5 |
+
import json
|
6 |
+
import hashlib
|
7 |
+
import safetensors
|
8 |
+
import safetensors.torch
|
9 |
+
|
10 |
+
from modules import sd_models
|
11 |
+
|
12 |
+
# PyTorch 1.13 and later have _UntypedStorage renamed to UntypedStorage
|
13 |
+
UntypedStorage = torch.storage.UntypedStorage if hasattr(torch.storage, 'UntypedStorage') else torch.storage._UntypedStorage
|
14 |
+
|
15 |
+
def read_metadata(filename):
|
16 |
+
"""Reads the JSON metadata from a .safetensors file"""
|
17 |
+
with open(filename, mode="r", encoding="utf8") as file_obj:
|
18 |
+
with mmap.mmap(file_obj.fileno(), length=0, access=mmap.ACCESS_READ) as m:
|
19 |
+
header = m.read(8)
|
20 |
+
n = int.from_bytes(header, "little")
|
21 |
+
metadata_bytes = m.read(n)
|
22 |
+
metadata = json.loads(metadata_bytes)
|
23 |
+
|
24 |
+
return metadata.get("__metadata__", {})
|
25 |
+
|
26 |
+
|
27 |
+
def load_file(filename, device):
|
28 |
+
""""Loads a .safetensors file without memory mapping that locks the model file.
|
29 |
+
Works around safetensors issue: https://github.com/huggingface/safetensors/issues/164"""
|
30 |
+
with open(filename, mode="r", encoding="utf8") as file_obj:
|
31 |
+
with mmap.mmap(file_obj.fileno(), length=0, access=mmap.ACCESS_READ) as m:
|
32 |
+
header = m.read(8)
|
33 |
+
n = int.from_bytes(header, "little")
|
34 |
+
metadata_bytes = m.read(n)
|
35 |
+
metadata = json.loads(metadata_bytes)
|
36 |
+
|
37 |
+
size = os.stat(filename).st_size
|
38 |
+
storage = UntypedStorage.from_file(filename, False, size)
|
39 |
+
offset = n + 8
|
40 |
+
md = metadata.get("__metadata__", {})
|
41 |
+
return {name: create_tensor(storage, info, offset) for name, info in metadata.items() if name != "__metadata__"}, md
|
42 |
+
|
43 |
+
|
44 |
+
def hash_file(filename):
|
45 |
+
"""Hashes a .safetensors file using the new hashing method.
|
46 |
+
Only hashes the weights of the model."""
|
47 |
+
hash_sha256 = hashlib.sha256()
|
48 |
+
blksize = 1024 * 1024
|
49 |
+
|
50 |
+
with open(filename, mode="r", encoding="utf8") as file_obj:
|
51 |
+
with mmap.mmap(file_obj.fileno(), length=0, access=mmap.ACCESS_READ) as m:
|
52 |
+
header = m.read(8)
|
53 |
+
n = int.from_bytes(header, "little")
|
54 |
+
|
55 |
+
with open(filename, mode="rb") as file_obj:
|
56 |
+
offset = n + 8
|
57 |
+
file_obj.seek(offset)
|
58 |
+
for chunk in iter(lambda: file_obj.read(blksize), b""):
|
59 |
+
hash_sha256.update(chunk)
|
60 |
+
|
61 |
+
return hash_sha256.hexdigest()
|
62 |
+
|
63 |
+
|
64 |
+
def legacy_hash_file(filename):
|
65 |
+
"""Hashes a model file using the legacy `sd_models.model_hash()` method."""
|
66 |
+
hash_sha256 = hashlib.sha256()
|
67 |
+
|
68 |
+
metadata = read_metadata(filename)
|
69 |
+
|
70 |
+
# For compatibility with legacy models: This replicates the behavior of
|
71 |
+
# sd_models.model_hash as if there were no user-specified metadata in the
|
72 |
+
# .safetensors file. That leaves the training parameters, which are
|
73 |
+
# immutable. It is important the hash does not include the embedded user
|
74 |
+
# metadata as that would mean the hash could change every time the user
|
75 |
+
# updates the name/description/etc. The new hashing method fixes this
|
76 |
+
# problem by only hashing the region of the file containing the tensors.
|
77 |
+
if any(not k.startswith("ss_") for k in metadata):
|
78 |
+
# Strip the user metadata, re-serialize the file as if it were freshly
|
79 |
+
# created from sd-scripts, and hash that with model_hash's behavior.
|
80 |
+
tensors, metadata = load_file(filename, "cpu")
|
81 |
+
metadata = {k: v for k, v in metadata.items() if k.startswith("ss_")}
|
82 |
+
model_bytes = safetensors.torch.save(tensors, metadata)
|
83 |
+
|
84 |
+
hash_sha256.update(model_bytes[0x100000:0x110000])
|
85 |
+
return hash_sha256.hexdigest()[0:8]
|
86 |
+
else:
|
87 |
+
# This should work fine with model_hash since when the legacy hashing
|
88 |
+
# method was being used the user metadata system hadn't been implemented
|
89 |
+
# yet.
|
90 |
+
return sd_models.model_hash(filename)
|
91 |
+
|
92 |
+
|
93 |
+
DTYPES = {
|
94 |
+
"F64": torch.float64,
|
95 |
+
"F32": torch.float32,
|
96 |
+
"F16": torch.float16,
|
97 |
+
"BF16": torch.bfloat16,
|
98 |
+
"I64": torch.int64,
|
99 |
+
# "U64": torch.uint64,
|
100 |
+
"I32": torch.int32,
|
101 |
+
# "U32": torch.uint32,
|
102 |
+
"I16": torch.int16,
|
103 |
+
# "U16": torch.uint16,
|
104 |
+
"I8": torch.int8,
|
105 |
+
"U8": torch.uint8,
|
106 |
+
"BOOL": torch.bool
|
107 |
+
}
|
108 |
+
|
109 |
+
|
110 |
+
def create_tensor(storage, info, offset):
|
111 |
+
"""Creates a tensor without holding on to an open handle to the parent model
|
112 |
+
file."""
|
113 |
+
dtype = DTYPES[info["dtype"]]
|
114 |
+
shape = info["shape"]
|
115 |
+
start, stop = info["data_offsets"]
|
116 |
+
return torch.asarray(storage[start + offset : stop + offset], dtype=torch.uint8).view(dtype=dtype).reshape(shape).clone().detach()
|
extensions/addtional/scripts/util.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import csv
|
2 |
+
from io import StringIO
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
def split_path_list(path_list: str) -> List[str]:
|
6 |
+
pl = []
|
7 |
+
with StringIO() as f:
|
8 |
+
f.write(path_list)
|
9 |
+
f.seek(0)
|
10 |
+
for r in csv.reader(f):
|
11 |
+
pl += r
|
12 |
+
return pl
|
extensions/addtional/style.css
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#additional_networks_cover_image,
|
2 |
+
#additional_networks_cover_image > .h-60,
|
3 |
+
#additional_networks_cover_image > .h-60 > div,
|
4 |
+
#additional_networks_cover_image > .h-60 > div > img
|
5 |
+
{
|
6 |
+
height: 480px !important;
|
7 |
+
max-height: 480px !important;
|
8 |
+
min-height: 480px !important;
|
9 |
+
}
|
extensions/adetailer/.github/ISSUE_TEMPLATE/bug_report.yaml
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Bug report
|
2 |
+
description: Create a report
|
3 |
+
title: "[Bug]: "
|
4 |
+
|
5 |
+
body:
|
6 |
+
- type: textarea
|
7 |
+
attributes:
|
8 |
+
label: Describe the bug
|
9 |
+
description: A clear and concise description of what the bug is.
|
10 |
+
|
11 |
+
- type: textarea
|
12 |
+
attributes:
|
13 |
+
label: Full console logs
|
14 |
+
description: |
|
15 |
+
The full console log of your terminal.
|
16 |
+
From `Python 3.10.*, Version: v1.*, Commit hash: *` to the end.
|
17 |
+
render: Shell
|
18 |
+
validations:
|
19 |
+
required: true
|
20 |
+
|
21 |
+
- type: textarea
|
22 |
+
attributes:
|
23 |
+
label: List of installed extensions
|
extensions/adetailer/.github/ISSUE_TEMPLATE/feature_request.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: Feature request
|
3 |
+
about: Suggest an idea for this project
|
4 |
+
title: ''
|
5 |
+
labels: ''
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
**Is your feature request related to a problem? Please describe.**
|
11 |
+
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
12 |
+
|
13 |
+
**Describe the solution you'd like**
|
14 |
+
A clear and concise description of what you want to happen.
|
15 |
+
|
16 |
+
**Describe alternatives you've considered**
|
17 |
+
A clear and concise description of any alternative solutions or features you've considered.
|
18 |
+
|
19 |
+
**Additional context**
|
20 |
+
Add any other context or screenshots about the feature request here.
|
extensions/adetailer/.github/workflows/stale.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: 'Close stale issues and PRs'
|
2 |
+
on:
|
3 |
+
schedule:
|
4 |
+
- cron: '30 1 * * *'
|
5 |
+
|
6 |
+
jobs:
|
7 |
+
stale:
|
8 |
+
runs-on: ubuntu-latest
|
9 |
+
steps:
|
10 |
+
- uses: actions/stale@v8
|
11 |
+
with:
|
12 |
+
days-before-stale: 30
|
13 |
+
days-before-close: 5
|
extensions/adetailer/.gitignore
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
|
2 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python,visualstudiocode
|
3 |
+
|
4 |
+
### Python ###
|
5 |
+
# Byte-compiled / optimized / DLL files
|
6 |
+
__pycache__/
|
7 |
+
*.py[cod]
|
8 |
+
*$py.class
|
9 |
+
|
10 |
+
# C extensions
|
11 |
+
*.so
|
12 |
+
|
13 |
+
# Distribution / packaging
|
14 |
+
.Python
|
15 |
+
build/
|
16 |
+
develop-eggs/
|
17 |
+
dist/
|
18 |
+
downloads/
|
19 |
+
eggs/
|
20 |
+
.eggs/
|
21 |
+
lib/
|
22 |
+
lib64/
|
23 |
+
parts/
|
24 |
+
sdist/
|
25 |
+
var/
|
26 |
+
wheels/
|
27 |
+
share/python-wheels/
|
28 |
+
*.egg-info/
|
29 |
+
.installed.cfg
|
30 |
+
*.egg
|
31 |
+
MANIFEST
|
32 |
+
|
33 |
+
# PyInstaller
|
34 |
+
# Usually these files are written by a python script from a template
|
35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
36 |
+
*.manifest
|
37 |
+
*.spec
|
38 |
+
|
39 |
+
# Installer logs
|
40 |
+
pip-log.txt
|
41 |
+
pip-delete-this-directory.txt
|
42 |
+
|
43 |
+
# Unit test / coverage reports
|
44 |
+
htmlcov/
|
45 |
+
.tox/
|
46 |
+
.nox/
|
47 |
+
.coverage
|
48 |
+
.coverage.*
|
49 |
+
.cache
|
50 |
+
nosetests.xml
|
51 |
+
coverage.xml
|
52 |
+
*.cover
|
53 |
+
*.py,cover
|
54 |
+
.hypothesis/
|
55 |
+
.pytest_cache/
|
56 |
+
cover/
|
57 |
+
|
58 |
+
# Translations
|
59 |
+
*.mo
|
60 |
+
*.pot
|
61 |
+
|
62 |
+
# Django stuff:
|
63 |
+
*.log
|
64 |
+
local_settings.py
|
65 |
+
db.sqlite3
|
66 |
+
db.sqlite3-journal
|
67 |
+
|
68 |
+
# Flask stuff:
|
69 |
+
instance/
|
70 |
+
.webassets-cache
|
71 |
+
|
72 |
+
# Scrapy stuff:
|
73 |
+
.scrapy
|
74 |
+
|
75 |
+
# Sphinx documentation
|
76 |
+
docs/_build/
|
77 |
+
|
78 |
+
# PyBuilder
|
79 |
+
.pybuilder/
|
80 |
+
target/
|
81 |
+
|
82 |
+
# Jupyter Notebook
|
83 |
+
.ipynb_checkpoints
|
84 |
+
|
85 |
+
# IPython
|
86 |
+
profile_default/
|
87 |
+
ipython_config.py
|
88 |
+
|
89 |
+
# pyenv
|
90 |
+
# For a library or package, you might want to ignore these files since the code is
|
91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
92 |
+
# .python-version
|
93 |
+
|
94 |
+
# pipenv
|
95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
98 |
+
# install all needed dependencies.
|
99 |
+
#Pipfile.lock
|
100 |
+
|
101 |
+
# poetry
|
102 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
103 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
104 |
+
# commonly ignored for libraries.
|
105 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
106 |
+
#poetry.lock
|
107 |
+
|
108 |
+
# pdm
|
109 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
110 |
+
#pdm.lock
|
111 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
112 |
+
# in version control.
|
113 |
+
# https://pdm.fming.dev/#use-with-ide
|
114 |
+
.pdm.toml
|
115 |
+
|
116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
117 |
+
__pypackages__/
|
118 |
+
|
119 |
+
# Celery stuff
|
120 |
+
celerybeat-schedule
|
121 |
+
celerybeat.pid
|
122 |
+
|
123 |
+
# SageMath parsed files
|
124 |
+
*.sage.py
|
125 |
+
|
126 |
+
# Environments
|
127 |
+
.env
|
128 |
+
.venv
|
129 |
+
env/
|
130 |
+
venv/
|
131 |
+
ENV/
|
132 |
+
env.bak/
|
133 |
+
venv.bak/
|
134 |
+
|
135 |
+
# Spyder project settings
|
136 |
+
.spyderproject
|
137 |
+
.spyproject
|
138 |
+
|
139 |
+
# Rope project settings
|
140 |
+
.ropeproject
|
141 |
+
|
142 |
+
# mkdocs documentation
|
143 |
+
/site
|
144 |
+
|
145 |
+
# mypy
|
146 |
+
.mypy_cache/
|
147 |
+
.dmypy.json
|
148 |
+
dmypy.json
|
149 |
+
|
150 |
+
# Pyre type checker
|
151 |
+
.pyre/
|
152 |
+
|
153 |
+
# pytype static type analyzer
|
154 |
+
.pytype/
|
155 |
+
|
156 |
+
# Cython debug symbols
|
157 |
+
cython_debug/
|
158 |
+
|
159 |
+
# PyCharm
|
160 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
161 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
162 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
163 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
164 |
+
#.idea/
|
165 |
+
|
166 |
+
### Python Patch ###
|
167 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
168 |
+
poetry.toml
|
169 |
+
|
170 |
+
# ruff
|
171 |
+
.ruff_cache/
|
172 |
+
|
173 |
+
# LSP config files
|
174 |
+
pyrightconfig.json
|
175 |
+
|
176 |
+
### VisualStudioCode ###
|
177 |
+
.vscode/*
|
178 |
+
!.vscode/settings.json
|
179 |
+
!.vscode/tasks.json
|
180 |
+
!.vscode/launch.json
|
181 |
+
!.vscode/extensions.json
|
182 |
+
!.vscode/*.code-snippets
|
183 |
+
|
184 |
+
# Local History for Visual Studio Code
|
185 |
+
.history/
|
186 |
+
|
187 |
+
# Built Visual Studio Code Extensions
|
188 |
+
*.vsix
|
189 |
+
|
190 |
+
### VisualStudioCode Patch ###
|
191 |
+
# Ignore all local history of files
|
192 |
+
.history
|
193 |
+
.ionide
|
194 |
+
|
195 |
+
# End of https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
|
196 |
+
*.ipynb
|
extensions/adetailer/.pre-commit-config.yaml
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v4.4.0
|
4 |
+
hooks:
|
5 |
+
- id: trailing-whitespace
|
6 |
+
args: [--markdown-linebreak-ext=md]
|
7 |
+
- id: end-of-file-fixer
|
8 |
+
- id: mixed-line-ending
|
9 |
+
|
10 |
+
- repo: https://github.com/pycqa/isort
|
11 |
+
rev: 5.12.0
|
12 |
+
hooks:
|
13 |
+
- id: isort
|
14 |
+
|
15 |
+
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
16 |
+
rev: "v0.0.270"
|
17 |
+
hooks:
|
18 |
+
- id: ruff
|
19 |
+
args: [--fix, --exit-non-zero-on-fix]
|
20 |
+
|
21 |
+
- repo: https://github.com/psf/black
|
22 |
+
rev: 23.3.0
|
23 |
+
hooks:
|
24 |
+
- id: black
|
extensions/adetailer/CHANGELOG.md
ADDED
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Changelog
|
2 |
+
|
3 |
+
## 2023-06-01
|
4 |
+
|
5 |
+
- v23.6.1
|
6 |
+
- `inpaint, scribble, lineart, openpose, tile` 5가지 컨트롤넷 모델 지원 (PR #107)
|
7 |
+
- controlnet guidance start, end 인자 추가 (PR #107)
|
8 |
+
- `modules.extensions`를 사용하여 컨트롤넷 확장을 불러오고 경로를 알아내로록 변경
|
9 |
+
- ui에서 컨트롤넷을 별도 함수로 분리
|
10 |
+
|
11 |
+
## 2023-05-30
|
12 |
+
|
13 |
+
- v23.6.0
|
14 |
+
- 스크립트의 이름을 `After Detailer`에서 `ADetailer`로 변경
|
15 |
+
- API 사용자는 변경 필요함
|
16 |
+
- 몇몇 설정 변경
|
17 |
+
- `ad_conf` → `ad_confidence`. 0~100 사이의 int → 0.0~1.0 사이의 float
|
18 |
+
- `ad_inpaint_full_res` → `ad_inpaint_only_masked`
|
19 |
+
- `ad_inpaint_full_res_padding` → `ad_inpaint_only_masked_padding`
|
20 |
+
- mediapipe face mesh 모델 추가
|
21 |
+
- mediapipe 최소 버전 `0.10.0`
|
22 |
+
|
23 |
+
- rich traceback 제거함
|
24 |
+
- huggingface 다운로드 실패할 때 에러가 나지 않게 하고 해당 모델을 제거함
|
25 |
+
|
26 |
+
## 2023-05-26
|
27 |
+
|
28 |
+
- v23.5.19
|
29 |
+
- 1번째 탭에도 `None` 옵션을 추가함
|
30 |
+
- api로 ad controlnet model에 inpaint가 아닌 다른 컨트롤넷 모델을 사용하지 못하도록 막음
|
31 |
+
- adetailer 진행중에 total tqdm 진행바 업데이트를 멈춤
|
32 |
+
- state.inturrupted 상태에서 adetailer 과정을 중지함
|
33 |
+
- 컨트롤넷 process를 각 batch가 끝난 순간에만 호출하도록 변경
|
34 |
+
|
35 |
+
### 2023-05-25
|
36 |
+
|
37 |
+
- v23.5.18
|
38 |
+
- 컨트롤넷 관련 수정
|
39 |
+
- unit의 `input_mode`를 `SIMPLE`로 모두 변경
|
40 |
+
- 컨트롤넷 유넷 훅과 하이잭 함수들을 adetailer를 실행할 때에만 되돌리는 기능 추가
|
41 |
+
- adetailer 처리가 끝난 뒤 컨트롤넷 스크립트의 process를 다시 진행함. (batch count 2 이상일때의 문제 해결)
|
42 |
+
- 기본 활성 스크립트 목록에서 컨트롤넷을 뺌
|
43 |
+
|
44 |
+
### 2023-05-22
|
45 |
+
|
46 |
+
- v23.5.17
|
47 |
+
- 컨트롤넷 확장이 있으면 컨트롤넷 스크립트를 활성화함. (컨트롤넷 관련 문제 해결)
|
48 |
+
- 모든 컴포넌트에 elem_id 설정
|
49 |
+
- ui에 버전을 표시함
|
50 |
+
|
51 |
+
|
52 |
+
### 2023-05-19
|
53 |
+
|
54 |
+
- v23.5.16
|
55 |
+
- 추가한 옵션
|
56 |
+
- Mask min/max ratio
|
57 |
+
- Mask merge mode
|
58 |
+
- Restore faces after ADetailer
|
59 |
+
- 옵션들을 Accordion으로 묶음
|
60 |
+
|
61 |
+
### 2023-05-18
|
62 |
+
|
63 |
+
- v23.5.15
|
64 |
+
- 필요한 것만 임포트하도록 변경 (vae 로딩 오류 없어짐. 로딩 속도 빨라짐)
|
65 |
+
|
66 |
+
### 2023-05-17
|
67 |
+
|
68 |
+
- v23.5.14
|
69 |
+
- `[SKIP]`으로 ad prompt 일부를 건너뛰는 기능 추가
|
70 |
+
- bbox 정렬 옵션 추가
|
71 |
+
- sd_webui 타입힌트를 만들어냄
|
72 |
+
- enable checker와 관련된 api 오류 수정?
|
73 |
+
|
74 |
+
### 2023-05-15
|
75 |
+
|
76 |
+
- v23.5.13
|
77 |
+
- `[SEP]`으로 ad prompt를 분리하여 적용하는 기능 추가
|
78 |
+
- enable checker를 다시 pydantic으로 변경함
|
79 |
+
- ui 관련 함수를 adetailer.ui 폴더로 분리함
|
80 |
+
- controlnet을 사용할 때 모든 controlnet unit 비활성화
|
81 |
+
- adetailer 폴더가 없으면 만들게 함
|
82 |
+
|
83 |
+
### 2023-05-13
|
84 |
+
|
85 |
+
- v23.5.12
|
86 |
+
- `ad_enable`을 제외한 입력이 dict타입으로 들어오도록 변경
|
87 |
+
- web api로 사용할 때에 특히 사용하기 쉬움
|
88 |
+
- web api breaking change
|
89 |
+
- `mask_preprocess` 인자를 넣지 않았던 오류 수정 (PR #47)
|
90 |
+
- huggingface에서 모델을 다운로드하지 않는 옵션 추가 `--ad-no-huggingface`
|
91 |
+
|
92 |
+
### 2023-05-12
|
93 |
+
|
94 |
+
- v23.5.11
|
95 |
+
- `ultralytics` 알람 제거
|
96 |
+
- 필요없는 exif 인자 더 제거함
|
97 |
+
- `use separate steps` 옵션 추가
|
98 |
+
- ui 배치를 조정함
|
99 |
+
|
100 |
+
### 2023-05-09
|
101 |
+
|
102 |
+
- v23.5.10
|
103 |
+
- 선택한 스크립트만 ADetailer에 적용하는 옵션 추가, 기본값 `True`. 설정 탭에서 지정가능.
|
104 |
+
- 기본값: `dynamic_prompting,dynamic_thresholding,wildcards,wildcard_recursive`
|
105 |
+
- `person_yolov8s-seg.pt` 모델 추가
|
106 |
+
- `ultralytics`의 최소 버전을 `8.0.97`로 설정 (C:\\ 문제 해결된 버전)
|
107 |
+
|
108 |
+
### 2023-05-08
|
109 |
+
|
110 |
+
- v23.5.9
|
111 |
+
- 2가지 이상의 모델을 사용할 수 있음. 기본값: 2, 최대: 5
|
112 |
+
- segment 모델을 사용할 수 있게 함. `person_yolov8n-seg.pt` 추가
|
113 |
+
|
114 |
+
### 2023-05-07
|
115 |
+
|
116 |
+
- v23.5.8
|
117 |
+
- 프롬프트와 네거티브 프롬프트에 방향키 지원 (PR #24)
|
118 |
+
- `mask_preprocess`를 추가함. 이전 버전과 시드값이 달라질 가능성 있음!
|
119 |
+
- 이미지 처리가 일어났을 때에만 before이미지를 저장함
|
120 |
+
- 설정창의 레이블을 ADetailer 대신 더 적절하게 수정함
|
121 |
+
|
122 |
+
### 2023-05-06
|
123 |
+
|
124 |
+
- v23.5.7
|
125 |
+
- `ad_use_cfg_scale` 옵션 추가. cfg 스케일을 따로 사용할지 말지 결정함.
|
126 |
+
- `ad_enable` 기본값을 `True`에서 `False`로 변경
|
127 |
+
- `ad_model`의 기본값을 `None`에서 첫번째 모델로 변경
|
128 |
+
- 최소 2개의 입력(ad_enable, ad_model)만 들어오면 작동하게 변경.
|
129 |
+
|
130 |
+
- v23.5.7.post0
|
131 |
+
- `init_controlnet_ext`을 controlnet_exists == True일때에만 실행
|
132 |
+
- webui를 C드라이브 바로 밑에 설치한 사람들에게 `ultralytics` 경고 표시
|
133 |
+
|
134 |
+
### 2023-05-05 (어린이날)
|
135 |
+
|
136 |
+
- v23.5.5
|
137 |
+
- `Save images before ADetailer` 옵션 추가
|
138 |
+
- 입력으로 들어온 인자와 ALL_ARGS의 길이가 다르면 에러메세지
|
139 |
+
- README.md에 설치방법 추가
|
140 |
+
|
141 |
+
- v23.5.6
|
142 |
+
- get_args에서 IndexError가 발생하면 자세한 에러메세지를 �� 수 있음
|
143 |
+
- AdetailerArgs에 extra_params 내장
|
144 |
+
- scripts_args를 딥카피함
|
145 |
+
- postprocess_image를 약간 분리함
|
146 |
+
|
147 |
+
- v23.5.6.post0
|
148 |
+
- `init_controlnet_ext`에서 에러메세지를 자세히 볼 수 있음
|
149 |
+
|
150 |
+
### 2023-05-04
|
151 |
+
|
152 |
+
- v23.5.4
|
153 |
+
- use pydantic for arguments validation
|
154 |
+
- revert: ad_model to `None` as default
|
155 |
+
- revert: `__future__` imports
|
156 |
+
- lazily import yolo and mediapipe
|
157 |
+
|
158 |
+
### 2023-05-03
|
159 |
+
|
160 |
+
- v23.5.3.post0
|
161 |
+
- remove `__future__` imports
|
162 |
+
- change to copy scripts and scripts args
|
163 |
+
|
164 |
+
- v23.5.3.post1
|
165 |
+
- change default ad_model from `None`
|
166 |
+
|
167 |
+
### 2023-05-02
|
168 |
+
|
169 |
+
- v23.5.3
|
170 |
+
- Remove `None` from model list and add `Enable ADetailer` checkbox.
|
171 |
+
- install.py `skip_install` fix.
|
extensions/adetailer/LICENSE.md
ADDED
@@ -0,0 +1,662 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
GNU AFFERO GENERAL PUBLIC LICENSE
|
3 |
+
Version 3, 19 November 2007
|
4 |
+
|
5 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
6 |
+
Everyone is permitted to copy and distribute verbatim copies
|
7 |
+
of this license document, but changing it is not allowed.
|
8 |
+
|
9 |
+
Preamble
|
10 |
+
|
11 |
+
The GNU Affero General Public License is a free, copyleft license for
|
12 |
+
software and other kinds of works, specifically designed to ensure
|
13 |
+
cooperation with the community in the case of network server software.
|
14 |
+
|
15 |
+
The licenses for most software and other practical works are designed
|
16 |
+
to take away your freedom to share and change the works. By contrast,
|
17 |
+
our General Public Licenses are intended to guarantee your freedom to
|
18 |
+
share and change all versions of a program--to make sure it remains free
|
19 |
+
software for all its users.
|
20 |
+
|
21 |
+
When we speak of free software, we are referring to freedom, not
|
22 |
+
price. Our General Public Licenses are designed to make sure that you
|
23 |
+
have the freedom to distribute copies of free software (and charge for
|
24 |
+
them if you wish), that you receive source code or can get it if you
|
25 |
+
want it, that you can change the software or use pieces of it in new
|
26 |
+
free programs, and that you know you can do these things.
|
27 |
+
|
28 |
+
Developers that use our General Public Licenses protect your rights
|
29 |
+
with two steps: (1) assert copyright on the software, and (2) offer
|
30 |
+
you this License which gives you legal permission to copy, distribute
|
31 |
+
and/or modify the software.
|
32 |
+
|
33 |
+
A secondary benefit of defending all users' freedom is that
|
34 |
+
improvements made in alternate versions of the program, if they
|
35 |
+
receive widespread use, become available for other developers to
|
36 |
+
incorporate. Many developers of free software are heartened and
|
37 |
+
encouraged by the resulting cooperation. However, in the case of
|
38 |
+
software used on network servers, this result may fail to come about.
|
39 |
+
The GNU General Public License permits making a modified version and
|
40 |
+
letting the public access it on a server without ever releasing its
|
41 |
+
source code to the public.
|
42 |
+
|
43 |
+
The GNU Affero General Public License is designed specifically to
|
44 |
+
ensure that, in such cases, the modified source code becomes available
|
45 |
+
to the community. It requires the operator of a network server to
|
46 |
+
provide the source code of the modified version running there to the
|
47 |
+
users of that server. Therefore, public use of a modified version, on
|
48 |
+
a publicly accessible server, gives the public access to the source
|
49 |
+
code of the modified version.
|
50 |
+
|
51 |
+
An older license, called the Affero General Public License and
|
52 |
+
published by Affero, was designed to accomplish similar goals. This is
|
53 |
+
a different license, not a version of the Affero GPL, but Affero has
|
54 |
+
released a new version of the Affero GPL which permits relicensing under
|
55 |
+
this license.
|
56 |
+
|
57 |
+
The precise terms and conditions for copying, distribution and
|
58 |
+
modification follow.
|
59 |
+
|
60 |
+
TERMS AND CONDITIONS
|
61 |
+
|
62 |
+
0. Definitions.
|
63 |
+
|
64 |
+
"This License" refers to version 3 of the GNU Affero General Public License.
|
65 |
+
|
66 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
67 |
+
works, such as semiconductor masks.
|
68 |
+
|
69 |
+
"The Program" refers to any copyrightable work licensed under this
|
70 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
71 |
+
"recipients" may be individuals or organizations.
|
72 |
+
|
73 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
74 |
+
in a fashion requiring copyright permission, other than the making of an
|
75 |
+
exact copy. The resulting work is called a "modified version" of the
|
76 |
+
earlier work or a work "based on" the earlier work.
|
77 |
+
|
78 |
+
A "covered work" means either the unmodified Program or a work based
|
79 |
+
on the Program.
|
80 |
+
|
81 |
+
To "propagate" a work means to do anything with it that, without
|
82 |
+
permission, would make you directly or secondarily liable for
|
83 |
+
infringement under applicable copyright law, except executing it on a
|
84 |
+
computer or modifying a private copy. Propagation includes copying,
|
85 |
+
distribution (with or without modification), making available to the
|
86 |
+
public, and in some countries other activities as well.
|
87 |
+
|
88 |
+
To "convey" a work means any kind of propagation that enables other
|
89 |
+
parties to make or receive copies. Mere interaction with a user through
|
90 |
+
a computer network, with no transfer of a copy, is not conveying.
|
91 |
+
|
92 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
93 |
+
to the extent that it includes a convenient and prominently visible
|
94 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
95 |
+
tells the user that there is no warranty for the work (except to the
|
96 |
+
extent that warranties are provided), that licensees may convey the
|
97 |
+
work under this License, and how to view a copy of this License. If
|
98 |
+
the interface presents a list of user commands or options, such as a
|
99 |
+
menu, a prominent item in the list meets this criterion.
|
100 |
+
|
101 |
+
1. Source Code.
|
102 |
+
|
103 |
+
The "source code" for a work means the preferred form of the work
|
104 |
+
for making modifications to it. "Object code" means any non-source
|
105 |
+
form of a work.
|
106 |
+
|
107 |
+
A "Standard Interface" means an interface that either is an official
|
108 |
+
standard defined by a recognized standards body, or, in the case of
|
109 |
+
interfaces specified for a particular programming language, one that
|
110 |
+
is widely used among developers working in that language.
|
111 |
+
|
112 |
+
The "System Libraries" of an executable work include anything, other
|
113 |
+
than the work as a whole, that (a) is included in the normal form of
|
114 |
+
packaging a Major Component, but which is not part of that Major
|
115 |
+
Component, and (b) serves only to enable use of the work with that
|
116 |
+
Major Component, or to implement a Standard Interface for which an
|
117 |
+
implementation is available to the public in source code form. A
|
118 |
+
"Major Component", in this context, means a major essential component
|
119 |
+
(kernel, window system, and so on) of the specific operating system
|
120 |
+
(if any) on which the executable work runs, or a compiler used to
|
121 |
+
produce the work, or an object code interpreter used to run it.
|
122 |
+
|
123 |
+
The "Corresponding Source" for a work in object code form means all
|
124 |
+
the source code needed to generate, install, and (for an executable
|
125 |
+
work) run the object code and to modify the work, including scripts to
|
126 |
+
control those activities. However, it does not include the work's
|
127 |
+
System Libraries, or general-purpose tools or generally available free
|
128 |
+
programs which are used unmodified in performing those activities but
|
129 |
+
which are not part of the work. For example, Corresponding Source
|
130 |
+
includes interface definition files associated with source files for
|
131 |
+
the work, and the source code for shared libraries and dynamically
|
132 |
+
linked subprograms that the work is specifically designed to require,
|
133 |
+
such as by intimate data communication or control flow between those
|
134 |
+
subprograms and other parts of the work.
|
135 |
+
|
136 |
+
The Corresponding Source need not include anything that users
|
137 |
+
can regenerate automatically from other parts of the Corresponding
|
138 |
+
Source.
|
139 |
+
|
140 |
+
The Corresponding Source for a work in source code form is that
|
141 |
+
same work.
|
142 |
+
|
143 |
+
2. Basic Permissions.
|
144 |
+
|
145 |
+
All rights granted under this License are granted for the term of
|
146 |
+
copyright on the Program, and are irrevocable provided the stated
|
147 |
+
conditions are met. This License explicitly affirms your unlimited
|
148 |
+
permission to run the unmodified Program. The output from running a
|
149 |
+
covered work is covered by this License only if the output, given its
|
150 |
+
content, constitutes a covered work. This License acknowledges your
|
151 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
152 |
+
|
153 |
+
You may make, run and propagate covered works that you do not
|
154 |
+
convey, without conditions so long as your license otherwise remains
|
155 |
+
in force. You may convey covered works to others for the sole purpose
|
156 |
+
of having them make modifications exclusively for you, or provide you
|
157 |
+
with facilities for running those works, provided that you comply with
|
158 |
+
the terms of this License in conveying all material for which you do
|
159 |
+
not control copyright. Those thus making or running the covered works
|
160 |
+
for you must do so exclusively on your behalf, under your direction
|
161 |
+
and control, on terms that prohibit them from making any copies of
|
162 |
+
your copyrighted material outside their relationship with you.
|
163 |
+
|
164 |
+
Conveying under any other circumstances is permitted solely under
|
165 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
166 |
+
makes it unnecessary.
|
167 |
+
|
168 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
169 |
+
|
170 |
+
No covered work shall be deemed part of an effective technological
|
171 |
+
measure under any applicable law fulfilling obligations under article
|
172 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
173 |
+
similar laws prohibiting or restricting circumvention of such
|
174 |
+
measures.
|
175 |
+
|
176 |
+
When you convey a covered work, you waive any legal power to forbid
|
177 |
+
circumvention of technological measures to the extent such circumvention
|
178 |
+
is effected by exercising rights under this License with respect to
|
179 |
+
the covered work, and you disclaim any intention to limit operation or
|
180 |
+
modification of the work as a means of enforcing, against the work's
|
181 |
+
users, your or third parties' legal rights to forbid circumvention of
|
182 |
+
technological measures.
|
183 |
+
|
184 |
+
4. Conveying Verbatim Copies.
|
185 |
+
|
186 |
+
You may convey verbatim copies of the Program's source code as you
|
187 |
+
receive it, in any medium, provided that you conspicuously and
|
188 |
+
appropriately publish on each copy an appropriate copyright notice;
|
189 |
+
keep intact all notices stating that this License and any
|
190 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
191 |
+
keep intact all notices of the absence of any warranty; and give all
|
192 |
+
recipients a copy of this License along with the Program.
|
193 |
+
|
194 |
+
You may charge any price or no price for each copy that you convey,
|
195 |
+
and you may offer support or warranty protection for a fee.
|
196 |
+
|
197 |
+
5. Conveying Modified Source Versions.
|
198 |
+
|
199 |
+
You may convey a work based on the Program, or the modifications to
|
200 |
+
produce it from the Program, in the form of source code under the
|
201 |
+
terms of section 4, provided that you also meet all of these conditions:
|
202 |
+
|
203 |
+
a) The work must carry prominent notices stating that you modified
|
204 |
+
it, and giving a relevant date.
|
205 |
+
|
206 |
+
b) The work must carry prominent notices stating that it is
|
207 |
+
released under this License and any conditions added under section
|
208 |
+
7. This requirement modifies the requirement in section 4 to
|
209 |
+
"keep intact all notices".
|
210 |
+
|
211 |
+
c) You must license the entire work, as a whole, under this
|
212 |
+
License to anyone who comes into possession of a copy. This
|
213 |
+
License will therefore apply, along with any applicable section 7
|
214 |
+
additional terms, to the whole of the work, and all its parts,
|
215 |
+
regardless of how they are packaged. This License gives no
|
216 |
+
permission to license the work in any other way, but it does not
|
217 |
+
invalidate such permission if you have separately received it.
|
218 |
+
|
219 |
+
d) If the work has interactive user interfaces, each must display
|
220 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
221 |
+
interfaces that do not display Appropriate Legal Notices, your
|
222 |
+
work need not make them do so.
|
223 |
+
|
224 |
+
A compilation of a covered work with other separate and independent
|
225 |
+
works, which are not by their nature extensions of the covered work,
|
226 |
+
and which are not combined with it such as to form a larger program,
|
227 |
+
in or on a volume of a storage or distribution medium, is called an
|
228 |
+
"aggregate" if the compilation and its resulting copyright are not
|
229 |
+
used to limit the access or legal rights of the compilation's users
|
230 |
+
beyond what the individual works permit. Inclusion of a covered work
|
231 |
+
in an aggregate does not cause this License to apply to the other
|
232 |
+
parts of the aggregate.
|
233 |
+
|
234 |
+
6. Conveying Non-Source Forms.
|
235 |
+
|
236 |
+
You may convey a covered work in object code form under the terms
|
237 |
+
of sections 4 and 5, provided that you also convey the
|
238 |
+
machine-readable Corresponding Source under the terms of this License,
|
239 |
+
in one of these ways:
|
240 |
+
|
241 |
+
a) Convey the object code in, or embodied in, a physical product
|
242 |
+
(including a physical distribution medium), accompanied by the
|
243 |
+
Corresponding Source fixed on a durable physical medium
|
244 |
+
customarily used for software interchange.
|
245 |
+
|
246 |
+
b) Convey the object code in, or embodied in, a physical product
|
247 |
+
(including a physical distribution medium), accompanied by a
|
248 |
+
written offer, valid for at least three years and valid for as
|
249 |
+
long as you offer spare parts or customer support for that product
|
250 |
+
model, to give anyone who possesses the object code either (1) a
|
251 |
+
copy of the Corresponding Source for all the software in the
|
252 |
+
product that is covered by this License, on a durable physical
|
253 |
+
medium customarily used for software interchange, for a price no
|
254 |
+
more than your reasonable cost of physically performing this
|
255 |
+
conveying of source, or (2) access to copy the
|
256 |
+
Corresponding Source from a network server at no charge.
|
257 |
+
|
258 |
+
c) Convey individual copies of the object code with a copy of the
|
259 |
+
written offer to provide the Corresponding Source. This
|
260 |
+
alternative is allowed only occasionally and noncommercially, and
|
261 |
+
only if you received the object code with such an offer, in accord
|
262 |
+
with subsection 6b.
|
263 |
+
|
264 |
+
d) Convey the object code by offering access from a designated
|
265 |
+
place (gratis or for a charge), and offer equivalent access to the
|
266 |
+
Corresponding Source in the same way through the same place at no
|
267 |
+
further charge. You need not require recipients to copy the
|
268 |
+
Corresponding Source along with the object code. If the place to
|
269 |
+
copy the object code is a network server, the Corresponding Source
|
270 |
+
may be on a different server (operated by you or a third party)
|
271 |
+
that supports equivalent copying facilities, provided you maintain
|
272 |
+
clear directions next to the object code saying where to find the
|
273 |
+
Corresponding Source. Regardless of what server hosts the
|
274 |
+
Corresponding Source, you remain obligated to ensure that it is
|
275 |
+
available for as long as needed to satisfy these requirements.
|
276 |
+
|
277 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
278 |
+
you inform other peers where the object code and Corresponding
|
279 |
+
Source of the work are being offered to the general public at no
|
280 |
+
charge under subsection 6d.
|
281 |
+
|
282 |
+
A separable portion of the object code, whose source code is excluded
|
283 |
+
from the Corresponding Source as a System Library, need not be
|
284 |
+
included in conveying the object code work.
|
285 |
+
|
286 |
+
A "User Product" is either (1) a "consumer product", which means any
|
287 |
+
tangible personal property which is normally used for personal, family,
|
288 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
289 |
+
into a dwelling. In determining whether a product is a consumer product,
|
290 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
291 |
+
product received by a particular user, "normally used" refers to a
|
292 |
+
typical or common use of that class of product, regardless of the status
|
293 |
+
of the particular user or of the way in which the particular user
|
294 |
+
actually uses, or expects or is expected to use, the product. A product
|
295 |
+
is a consumer product regardless of whether the product has substantial
|
296 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
297 |
+
the only significant mode of use of the product.
|
298 |
+
|
299 |
+
"Installation Information" for a User Product means any methods,
|
300 |
+
procedures, authorization keys, or other information required to install
|
301 |
+
and execute modified versions of a covered work in that User Product from
|
302 |
+
a modified version of its Corresponding Source. The information must
|
303 |
+
suffice to ensure that the continued functioning of the modified object
|
304 |
+
code is in no case prevented or interfered with solely because
|
305 |
+
modification has been made.
|
306 |
+
|
307 |
+
If you convey an object code work under this section in, or with, or
|
308 |
+
specifically for use in, a User Product, and the conveying occurs as
|
309 |
+
part of a transaction in which the right of possession and use of the
|
310 |
+
User Product is transferred to the recipient in perpetuity or for a
|
311 |
+
fixed term (regardless of how the transaction is characterized), the
|
312 |
+
Corresponding Source conveyed under this section must be accompanied
|
313 |
+
by the Installation Information. But this requirement does not apply
|
314 |
+
if neither you nor any third party retains the ability to install
|
315 |
+
modified object code on the User Product (for example, the work has
|
316 |
+
been installed in ROM).
|
317 |
+
|
318 |
+
The requirement to provide Installation Information does not include a
|
319 |
+
requirement to continue to provide support service, warranty, or updates
|
320 |
+
for a work that has been modified or installed by the recipient, or for
|
321 |
+
the User Product in which it has been modified or installed. Access to a
|
322 |
+
network may be denied when the modification itself materially and
|
323 |
+
adversely affects the operation of the network or violates the rules and
|
324 |
+
protocols for communication across the network.
|
325 |
+
|
326 |
+
Corresponding Source conveyed, and Installation Information provided,
|
327 |
+
in accord with this section must be in a format that is publicly
|
328 |
+
documented (and with an implementation available to the public in
|
329 |
+
source code form), and must require no special password or key for
|
330 |
+
unpacking, reading or copying.
|
331 |
+
|
332 |
+
7. Additional Terms.
|
333 |
+
|
334 |
+
"Additional permissions" are terms that supplement the terms of this
|
335 |
+
License by making exceptions from one or more of its conditions.
|
336 |
+
Additional permissions that are applicable to the entire Program shall
|
337 |
+
be treated as though they were included in this License, to the extent
|
338 |
+
that they are valid under applicable law. If additional permissions
|
339 |
+
apply only to part of the Program, that part may be used separately
|
340 |
+
under those permissions, but the entire Program remains governed by
|
341 |
+
this License without regard to the additional permissions.
|
342 |
+
|
343 |
+
When you convey a copy of a covered work, you may at your option
|
344 |
+
remove any additional permissions from that copy, or from any part of
|
345 |
+
it. (Additional permissions may be written to require their own
|
346 |
+
removal in certain cases when you modify the work.) You may place
|
347 |
+
additional permissions on material, added by you to a covered work,
|
348 |
+
for which you have or can give appropriate copyright permission.
|
349 |
+
|
350 |
+
Notwithstanding any other provision of this License, for material you
|
351 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
352 |
+
that material) supplement the terms of this License with terms:
|
353 |
+
|
354 |
+
a) Disclaiming warranty or limiting liability differently from the
|
355 |
+
terms of sections 15 and 16 of this License; or
|
356 |
+
|
357 |
+
b) Requiring preservation of specified reasonable legal notices or
|
358 |
+
author attributions in that material or in the Appropriate Legal
|
359 |
+
Notices displayed by works containing it; or
|
360 |
+
|
361 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
362 |
+
requiring that modified versions of such material be marked in
|
363 |
+
reasonable ways as different from the original version; or
|
364 |
+
|
365 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
366 |
+
authors of the material; or
|
367 |
+
|
368 |
+
e) Declining to grant rights under trademark law for use of some
|
369 |
+
trade names, trademarks, or service marks; or
|
370 |
+
|
371 |
+
f) Requiring indemnification of licensors and authors of that
|
372 |
+
material by anyone who conveys the material (or modified versions of
|
373 |
+
it) with contractual assumptions of liability to the recipient, for
|
374 |
+
any liability that these contractual assumptions directly impose on
|
375 |
+
those licensors and authors.
|
376 |
+
|
377 |
+
All other non-permissive additional terms are considered "further
|
378 |
+
restrictions" within the meaning of section 10. If the Program as you
|
379 |
+
received it, or any part of it, contains a notice stating that it is
|
380 |
+
governed by this License along with a term that is a further
|
381 |
+
restriction, you may remove that term. If a license document contains
|
382 |
+
a further restriction but permits relicensing or conveying under this
|
383 |
+
License, you may add to a covered work material governed by the terms
|
384 |
+
of that license document, provided that the further restriction does
|
385 |
+
not survive such relicensing or conveying.
|
386 |
+
|
387 |
+
If you add terms to a covered work in accord with this section, you
|
388 |
+
must place, in the relevant source files, a statement of the
|
389 |
+
additional terms that apply to those files, or a notice indicating
|
390 |
+
where to find the applicable terms.
|
391 |
+
|
392 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
393 |
+
form of a separately written license, or stated as exceptions;
|
394 |
+
the above requirements apply either way.
|
395 |
+
|
396 |
+
8. Termination.
|
397 |
+
|
398 |
+
You may not propagate or modify a covered work except as expressly
|
399 |
+
provided under this License. Any attempt otherwise to propagate or
|
400 |
+
modify it is void, and will automatically terminate your rights under
|
401 |
+
this License (including any patent licenses granted under the third
|
402 |
+
paragraph of section 11).
|
403 |
+
|
404 |
+
However, if you cease all violation of this License, then your
|
405 |
+
license from a particular copyright holder is reinstated (a)
|
406 |
+
provisionally, unless and until the copyright holder explicitly and
|
407 |
+
finally terminates your license, and (b) permanently, if the copyright
|
408 |
+
holder fails to notify you of the violation by some reasonable means
|
409 |
+
prior to 60 days after the cessation.
|
410 |
+
|
411 |
+
Moreover, your license from a particular copyright holder is
|
412 |
+
reinstated permanently if the copyright holder notifies you of the
|
413 |
+
violation by some reasonable means, this is the first time you have
|
414 |
+
received notice of violation of this License (for any work) from that
|
415 |
+
copyright holder, and you cure the violation prior to 30 days after
|
416 |
+
your receipt of the notice.
|
417 |
+
|
418 |
+
Termination of your rights under this section does not terminate the
|
419 |
+
licenses of parties who have received copies or rights from you under
|
420 |
+
this License. If your rights have been terminated and not permanently
|
421 |
+
reinstated, you do not qualify to receive new licenses for the same
|
422 |
+
material under section 10.
|
423 |
+
|
424 |
+
9. Acceptance Not Required for Having Copies.
|
425 |
+
|
426 |
+
You are not required to accept this License in order to receive or
|
427 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
428 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
429 |
+
to receive a copy likewise does not require acceptance. However,
|
430 |
+
nothing other than this License grants you permission to propagate or
|
431 |
+
modify any covered work. These actions infringe copyright if you do
|
432 |
+
not accept this License. Therefore, by modifying or propagating a
|
433 |
+
covered work, you indicate your acceptance of this License to do so.
|
434 |
+
|
435 |
+
10. Automatic Licensing of Downstream Recipients.
|
436 |
+
|
437 |
+
Each time you convey a covered work, the recipient automatically
|
438 |
+
receives a license from the original licensors, to run, modify and
|
439 |
+
propagate that work, subject to this License. You are not responsible
|
440 |
+
for enforcing compliance by third parties with this License.
|
441 |
+
|
442 |
+
An "entity transaction" is a transaction transferring control of an
|
443 |
+
organization, or substantially all assets of one, or subdividing an
|
444 |
+
organization, or merging organizations. If propagation of a covered
|
445 |
+
work results from an entity transaction, each party to that
|
446 |
+
transaction who receives a copy of the work also receives whatever
|
447 |
+
licenses to the work the party's predecessor in interest had or could
|
448 |
+
give under the previous paragraph, plus a right to possession of the
|
449 |
+
Corresponding Source of the work from the predecessor in interest, if
|
450 |
+
the predecessor has it or can get it with reasonable efforts.
|
451 |
+
|
452 |
+
You may not impose any further restrictions on the exercise of the
|
453 |
+
rights granted or affirmed under this License. For example, you may
|
454 |
+
not impose a license fee, royalty, or other charge for exercise of
|
455 |
+
rights granted under this License, and you may not initiate litigation
|
456 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
457 |
+
any patent claim is infringed by making, using, selling, offering for
|
458 |
+
sale, or importing the Program or any portion of it.
|
459 |
+
|
460 |
+
11. Patents.
|
461 |
+
|
462 |
+
A "contributor" is a copyright holder who authorizes use under this
|
463 |
+
License of the Program or a work on which the Program is based. The
|
464 |
+
work thus licensed is called the contributor's "contributor version".
|
465 |
+
|
466 |
+
A contributor's "essential patent claims" are all patent claims
|
467 |
+
owned or controlled by the contributor, whether already acquired or
|
468 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
469 |
+
by this License, of making, using, or selling its contributor version,
|
470 |
+
but do not include claims that would be infringed only as a
|
471 |
+
consequence of further modification of the contributor version. For
|
472 |
+
purposes of this definition, "control" includes the right to grant
|
473 |
+
patent sublicenses in a manner consistent with the requirements of
|
474 |
+
this License.
|
475 |
+
|
476 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
477 |
+
patent license under the contributor's essential patent claims, to
|
478 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
479 |
+
propagate the contents of its contributor version.
|
480 |
+
|
481 |
+
In the following three paragraphs, a "patent license" is any express
|
482 |
+
agreement or commitment, however denominated, not to enforce a patent
|
483 |
+
(such as an express permission to practice a patent or covenant not to
|
484 |
+
sue for patent infringement). To "grant" such a patent license to a
|
485 |
+
party means to make such an agreement or commitment not to enforce a
|
486 |
+
patent against the party.
|
487 |
+
|
488 |
+
If you convey a covered work, knowingly relying on a patent license,
|
489 |
+
and the Corresponding Source of the work is not available for anyone
|
490 |
+
to copy, free of charge and under the terms of this License, through a
|
491 |
+
publicly available network server or other readily accessible means,
|
492 |
+
then you must either (1) cause the Corresponding Source to be so
|
493 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
494 |
+
patent license for this particular work, or (3) arrange, in a manner
|
495 |
+
consistent with the requirements of this License, to extend the patent
|
496 |
+
license to downstream recipients. "Knowingly relying" means you have
|
497 |
+
actual knowledge that, but for the patent license, your conveying the
|
498 |
+
covered work in a country, or your recipient's use of the covered work
|
499 |
+
in a country, would infringe one or more identifiable patents in that
|
500 |
+
country that you have reason to believe are valid.
|
501 |
+
|
502 |
+
If, pursuant to or in connection with a single transaction or
|
503 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
504 |
+
covered work, and grant a patent license to some of the parties
|
505 |
+
receiving the covered work authorizing them to use, propagate, modify
|
506 |
+
or convey a specific copy of the covered work, then the patent license
|
507 |
+
you grant is automatically extended to all recipients of the covered
|
508 |
+
work and works based on it.
|
509 |
+
|
510 |
+
A patent license is "discriminatory" if it does not include within
|
511 |
+
the scope of its coverage, prohibits the exercise of, or is
|
512 |
+
conditioned on the non-exercise of one or more of the rights that are
|
513 |
+
specifically granted under this License. You may not convey a covered
|
514 |
+
work if you are a party to an arrangement with a third party that is
|
515 |
+
in the business of distributing software, under which you make payment
|
516 |
+
to the third party based on the extent of your activity of conveying
|
517 |
+
the work, and under which the third party grants, to any of the
|
518 |
+
parties who would receive the covered work from you, a discriminatory
|
519 |
+
patent license (a) in connection with copies of the covered work
|
520 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
521 |
+
for and in connection with specific products or compilations that
|
522 |
+
contain the covered work, unless you entered into that arrangement,
|
523 |
+
or that patent license was granted, prior to 28 March 2007.
|
524 |
+
|
525 |
+
Nothing in this License shall be construed as excluding or limiting
|
526 |
+
any implied license or other defenses to infringement that may
|
527 |
+
otherwise be available to you under applicable patent law.
|
528 |
+
|
529 |
+
12. No Surrender of Others' Freedom.
|
530 |
+
|
531 |
+
If conditions are imposed on you (whether by court order, agreement or
|
532 |
+
otherwise) that contradict the conditions of this License, they do not
|
533 |
+
excuse you from the conditions of this License. If you cannot convey a
|
534 |
+
covered work so as to satisfy simultaneously your obligations under this
|
535 |
+
License and any other pertinent obligations, then as a consequence you may
|
536 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
537 |
+
to collect a royalty for further conveying from those to whom you convey
|
538 |
+
the Program, the only way you could satisfy both those terms and this
|
539 |
+
License would be to refrain entirely from conveying the Program.
|
540 |
+
|
541 |
+
13. Remote Network Interaction; Use with the GNU General Public License.
|
542 |
+
|
543 |
+
Notwithstanding any other provision of this License, if you modify the
|
544 |
+
Program, your modified version must prominently offer all users
|
545 |
+
interacting with it remotely through a computer network (if your version
|
546 |
+
supports such interaction) an opportunity to receive the Corresponding
|
547 |
+
Source of your version by providing access to the Corresponding Source
|
548 |
+
from a network server at no charge, through some standard or customary
|
549 |
+
means of facilitating copying of software. This Corresponding Source
|
550 |
+
shall include the Corresponding Source for any work covered by version 3
|
551 |
+
of the GNU General Public License that is incorporated pursuant to the
|
552 |
+
following paragraph.
|
553 |
+
|
554 |
+
Notwithstanding any other provision of this License, you have
|
555 |
+
permission to link or combine any covered work with a work licensed
|
556 |
+
under version 3 of the GNU General Public License into a single
|
557 |
+
combined work, and to convey the resulting work. The terms of this
|
558 |
+
License will continue to apply to the part which is the covered work,
|
559 |
+
but the work with which it is combined will remain governed by version
|
560 |
+
3 of the GNU General Public License.
|
561 |
+
|
562 |
+
14. Revised Versions of this License.
|
563 |
+
|
564 |
+
The Free Software Foundation may publish revised and/or new versions of
|
565 |
+
the GNU Affero General Public License from time to time. Such new versions
|
566 |
+
will be similar in spirit to the present version, but may differ in detail to
|
567 |
+
address new problems or concerns.
|
568 |
+
|
569 |
+
Each version is given a distinguishing version number. If the
|
570 |
+
Program specifies that a certain numbered version of the GNU Affero General
|
571 |
+
Public License "or any later version" applies to it, you have the
|
572 |
+
option of following the terms and conditions either of that numbered
|
573 |
+
version or of any later version published by the Free Software
|
574 |
+
Foundation. If the Program does not specify a version number of the
|
575 |
+
GNU Affero General Public License, you may choose any version ever published
|
576 |
+
by the Free Software Foundation.
|
577 |
+
|
578 |
+
If the Program specifies that a proxy can decide which future
|
579 |
+
versions of the GNU Affero General Public License can be used, that proxy's
|
580 |
+
public statement of acceptance of a version permanently authorizes you
|
581 |
+
to choose that version for the Program.
|
582 |
+
|
583 |
+
Later license versions may give you additional or different
|
584 |
+
permissions. However, no additional obligations are imposed on any
|
585 |
+
author or copyright holder as a result of your choosing to follow a
|
586 |
+
later version.
|
587 |
+
|
588 |
+
15. Disclaimer of Warranty.
|
589 |
+
|
590 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
591 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
592 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
593 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
594 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
595 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
596 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
597 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
598 |
+
|
599 |
+
16. Limitation of Liability.
|
600 |
+
|
601 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
602 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
603 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
604 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
605 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
606 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
607 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
608 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
609 |
+
SUCH DAMAGES.
|
610 |
+
|
611 |
+
17. Interpretation of Sections 15 and 16.
|
612 |
+
|
613 |
+
If the disclaimer of warranty and limitation of liability provided
|
614 |
+
above cannot be given local legal effect according to their terms,
|
615 |
+
reviewing courts shall apply local law that most closely approximates
|
616 |
+
an absolute waiver of all civil liability in connection with the
|
617 |
+
Program, unless a warranty or assumption of liability accompanies a
|
618 |
+
copy of the Program in return for a fee.
|
619 |
+
|
620 |
+
END OF TERMS AND CONDITIONS
|
621 |
+
|
622 |
+
How to Apply These Terms to Your New Programs
|
623 |
+
|
624 |
+
If you develop a new program, and you want it to be of the greatest
|
625 |
+
possible use to the public, the best way to achieve this is to make it
|
626 |
+
free software which everyone can redistribute and change under these terms.
|
627 |
+
|
628 |
+
To do so, attach the following notices to the program. It is safest
|
629 |
+
to attach them to the start of each source file to most effectively
|
630 |
+
state the exclusion of warranty; and each file should have at least
|
631 |
+
the "copyright" line and a pointer to where the full notice is found.
|
632 |
+
|
633 |
+
<one line to give the program's name and a brief idea of what it does.>
|
634 |
+
Copyright (C) <year> <name of author>
|
635 |
+
|
636 |
+
This program is free software: you can redistribute it and/or modify
|
637 |
+
it under the terms of the GNU Affero General Public License as published
|
638 |
+
by the Free Software Foundation, either version 3 of the License, or
|
639 |
+
(at your option) any later version.
|
640 |
+
|
641 |
+
This program is distributed in the hope that it will be useful,
|
642 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
643 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
644 |
+
GNU Affero General Public License for more details.
|
645 |
+
|
646 |
+
You should have received a copy of the GNU Affero General Public License
|
647 |
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
648 |
+
|
649 |
+
Also add information on how to contact you by electronic and paper mail.
|
650 |
+
|
651 |
+
If your software can interact with users remotely through a computer
|
652 |
+
network, you should also make sure that it provides a way for users to
|
653 |
+
get its source. For example, if your program is a web application, its
|
654 |
+
interface could display a "Source" link that leads users to an archive
|
655 |
+
of the code. There are many ways you could offer source, and different
|
656 |
+
solutions will be better for different programs; see section 13 for the
|
657 |
+
specific requirements.
|
658 |
+
|
659 |
+
You should also get your employer (if you work as a programmer) or school,
|
660 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
661 |
+
For more information on this, and how to apply and follow the GNU AGPL, see
|
662 |
+
<http://www.gnu.org/licenses/>.
|
extensions/adetailer/README.md
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# !After Detailer
|
2 |
+
|
3 |
+
!After Detailer is a extension for stable diffusion webui, similar to Detection Detailer, except it uses ultralytics instead of the mmdet.
|
4 |
+
|
5 |
+
## Install
|
6 |
+
|
7 |
+
(from Mikubill/sd-webui-controlnet)
|
8 |
+
|
9 |
+
1. Open "Extensions" tab.
|
10 |
+
2. Open "Install from URL" tab in the tab.
|
11 |
+
3. Enter `https://github.com/Bing-su/adetailer.git` to "URL for extension's git repository".
|
12 |
+
4. Press "Install" button.
|
13 |
+
5. Wait 5 seconds, and you will see the message "Installed into stable-diffusion-webui\extensions\adetailer. Use Installed tab to restart".
|
14 |
+
6. Go to "Installed" tab, click "Check for updates", and then click "Apply and restart UI". (The next time you can also use this method to update extensions.)
|
15 |
+
7. Completely restart A1111 webui including your terminal. (If you do not know what is a "terminal", you can reboot your computer: turn your computer off and turn it on again.)
|
16 |
+
|
17 |
+
You can now install it directly from the Extensions tab.
|
18 |
+
|
19 |
+
![image](https://i.imgur.com/g6GdRBT.png)
|
20 |
+
|
21 |
+
You **DON'T** need to download any model from huggingface.
|
22 |
+
|
23 |
+
## Options
|
24 |
+
|
25 |
+
| Model, Prompts | | |
|
26 |
+
| --------------------------------- | ------------------------------------- | ------------------------------------------------- |
|
27 |
+
| ADetailer model | Determine what to detect. | `None` = disable |
|
28 |
+
| ADetailer prompt, negative prompt | Prompts and negative prompts to apply | If left blank, it will use the same as the input. |
|
29 |
+
|
30 |
+
| Detection | | |
|
31 |
+
| ------------------------------------ | -------------------------------------------------------------------------------------------- | --- |
|
32 |
+
| Detection model confidence threshold | Only objects with a detection model confidence above this threshold are used for inpainting. | |
|
33 |
+
| Mask min/max ratio | Only use masks whose area is between those ratios for the area of the entire image. | |
|
34 |
+
|
35 |
+
If you want to exclude objects in the background, try setting the min ratio to around `0.01`.
|
36 |
+
|
37 |
+
| Mask Preprocessing | | |
|
38 |
+
| ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
|
39 |
+
| Mask x, y offset | Moves the mask horizontally and vertically by | |
|
40 |
+
| Mask erosion (-) / dilation (+) | Enlarge or reduce the detected mask. | [opencv example](https://docs.opencv.org/4.7.0/db/df6/tutorial_erosion_dilatation.html) |
|
41 |
+
| Mask merge mode | `None`: Inpaint each mask<br/>`Merge`: Merge all masks and inpaint<br/>`Merge and Invert`: Merge all masks and Invert, then inpaint | |
|
42 |
+
|
43 |
+
Applied in this order: x, y offset → erosion/dilation → merge/invert.
|
44 |
+
|
45 |
+
#### Inpainting
|
46 |
+
|
47 |
+
![image](https://i.imgur.com/wyWlT1n.png)
|
48 |
+
|
49 |
+
Each option corresponds to a corresponding option on the inpaint tab.
|
50 |
+
|
51 |
+
## ControlNet Inpainting
|
52 |
+
|
53 |
+
You can use the ControlNet extension if you have ControlNet installed and ControlNet models.
|
54 |
+
|
55 |
+
Support `inpaint, scribble, lineart, openpose, tile` controlnet models. Once you choose a model, the preprocessor is set automatically.
|
56 |
+
|
57 |
+
## Model
|
58 |
+
|
59 |
+
| Model | Target | mAP 50 | mAP 50-95 |
|
60 |
+
| --------------------- | --------------------- | ----------------------------- | ----------------------------- |
|
61 |
+
| face_yolov8n.pt | 2D / realistic face | 0.660 | 0.366 |
|
62 |
+
| face_yolov8s.pt | 2D / realistic face | 0.713 | 0.404 |
|
63 |
+
| hand_yolov8n.pt | 2D / realistic hand | 0.767 | 0.505 |
|
64 |
+
| person_yolov8n-seg.pt | 2D / realistic person | 0.782 (bbox)<br/>0.761 (mask) | 0.555 (bbox)<br/>0.460 (mask) |
|
65 |
+
| person_yolov8s-seg.pt | 2D / realistic person | 0.824 (bbox)<br/>0.809 (mask) | 0.605 (bbox)<br/>0.508 (mask) |
|
66 |
+
| mediapipe_face_full | realistic face | - | - |
|
67 |
+
| mediapipe_face_short | realistic face | - | - |
|
68 |
+
| mediapipe_face_mesh | realistic face | - | - |
|
69 |
+
|
70 |
+
The yolo models can be found on huggingface [Bingsu/adetailer](https://huggingface.co/Bingsu/adetailer).
|
71 |
+
|
72 |
+
### User Model
|
73 |
+
|
74 |
+
Put your [ultralytics](https://github.com/ultralytics/ultralytics) model in `webui/models/adetailer`. The model name should end with `.pt` or `.pth`.
|
75 |
+
|
76 |
+
It must be a bbox detection or segment model and use all label.
|
77 |
+
|
78 |
+
### Dataset
|
79 |
+
|
80 |
+
Datasets used for training the yolo models are:
|
81 |
+
|
82 |
+
#### Face
|
83 |
+
|
84 |
+
- [Anime Face CreateML](https://universe.roboflow.com/my-workspace-mph8o/anime-face-createml)
|
85 |
+
- [xml2txt](https://universe.roboflow.com/0oooooo0/xml2txt-njqx1)
|
86 |
+
- [AN](https://universe.roboflow.com/sed-b8vkf/an-lfg5i)
|
87 |
+
- [wider face](http://shuoyang1213.me/WIDERFACE/index.html)
|
88 |
+
|
89 |
+
#### Hand
|
90 |
+
|
91 |
+
- [AnHDet](https://universe.roboflow.com/1-yshhi/anhdet)
|
92 |
+
- [hand-detection-fuao9](https://universe.roboflow.com/catwithawand/hand-detection-fuao9)
|
93 |
+
|
94 |
+
#### Person
|
95 |
+
|
96 |
+
- [coco2017](https://cocodataset.org/#home) (only person)
|
97 |
+
- [AniSeg](https://github.com/jerryli27/AniSeg)
|
98 |
+
- [skytnt/anime-segmentation](https://huggingface.co/datasets/skytnt/anime-segmentation)
|
99 |
+
|
100 |
+
## Example
|
101 |
+
|
102 |
+
![image](https://i.imgur.com/38RSxSO.png)
|
103 |
+
![image](https://i.imgur.com/2CYgjLx.png)
|
104 |
+
|
105 |
+
[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/F1F1L7V2N)
|
extensions/adetailer/__pycache__/preload.cpython-310.pyc
ADDED
Binary file (433 Bytes). View file
|
|
extensions/adetailer/adetailer/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .__version__ import __version__
|
2 |
+
from .args import AD_ENABLE, ALL_ARGS, ADetailerArgs, EnableChecker
|
3 |
+
from .common import PredictOutput, get_models
|
4 |
+
from .mediapipe import mediapipe_predict
|
5 |
+
from .ultralytics import ultralytics_predict
|
6 |
+
|
7 |
+
AFTER_DETAILER = "ADetailer"
|
8 |
+
|
9 |
+
__all__ = [
|
10 |
+
"__version__",
|
11 |
+
"AD_ENABLE",
|
12 |
+
"ADetailerArgs",
|
13 |
+
"AFTER_DETAILER",
|
14 |
+
"ALL_ARGS",
|
15 |
+
"EnableChecker",
|
16 |
+
"PredictOutput",
|
17 |
+
"get_models",
|
18 |
+
"mediapipe_predict",
|
19 |
+
"ultralytics_predict",
|
20 |
+
]
|
extensions/adetailer/adetailer/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (573 Bytes). View file
|
|
extensions/adetailer/adetailer/__pycache__/__version__.cpython-310.pyc
ADDED
Binary file (190 Bytes). View file
|
|
extensions/adetailer/adetailer/__pycache__/args.cpython-310.pyc
ADDED
Binary file (6.12 kB). View file
|
|
extensions/adetailer/adetailer/__pycache__/common.cpython-310.pyc
ADDED
Binary file (3.45 kB). View file
|
|
extensions/adetailer/adetailer/__pycache__/mask.cpython-310.pyc
ADDED
Binary file (7.98 kB). View file
|
|
extensions/adetailer/adetailer/__pycache__/mediapipe.cpython-310.pyc
ADDED
Binary file (3.25 kB). View file
|
|
extensions/adetailer/adetailer/__pycache__/ui.cpython-310.pyc
ADDED
Binary file (10.4 kB). View file
|
|
extensions/adetailer/adetailer/__pycache__/ultralytics.cpython-310.pyc
ADDED
Binary file (1.86 kB). View file
|
|
extensions/adetailer/adetailer/__version__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
__version__ = "23.6.1.post0"
|
extensions/adetailer/adetailer/args.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections import UserList
|
4 |
+
from functools import cached_property, partial
|
5 |
+
from typing import Any, Literal, NamedTuple, Union
|
6 |
+
|
7 |
+
import pydantic
|
8 |
+
from pydantic import (
|
9 |
+
BaseModel,
|
10 |
+
Extra,
|
11 |
+
NonNegativeFloat,
|
12 |
+
NonNegativeInt,
|
13 |
+
PositiveInt,
|
14 |
+
confloat,
|
15 |
+
constr,
|
16 |
+
)
|
17 |
+
|
18 |
+
cn_model_regex = r".*(inpaint|tile|scribble|lineart|openpose).*|^None$"
|
19 |
+
|
20 |
+
|
21 |
+
class Arg(NamedTuple):
|
22 |
+
attr: str
|
23 |
+
name: str
|
24 |
+
|
25 |
+
|
26 |
+
class ArgsList(UserList):
|
27 |
+
@cached_property
|
28 |
+
def attrs(self) -> tuple[str]:
|
29 |
+
return tuple(attr for attr, _ in self)
|
30 |
+
|
31 |
+
@cached_property
|
32 |
+
def names(self) -> tuple[str]:
|
33 |
+
return tuple(name for _, name in self)
|
34 |
+
|
35 |
+
|
36 |
+
class ADetailerArgs(BaseModel, extra=Extra.forbid):
|
37 |
+
ad_model: str = "None"
|
38 |
+
ad_prompt: str = ""
|
39 |
+
ad_negative_prompt: str = ""
|
40 |
+
ad_confidence: confloat(ge=0.0, le=1.0) = 0.3
|
41 |
+
ad_mask_min_ratio: confloat(ge=0.0, le=1.0) = 0.0
|
42 |
+
ad_mask_max_ratio: confloat(ge=0.0, le=1.0) = 1.0
|
43 |
+
ad_dilate_erode: int = 32
|
44 |
+
ad_x_offset: int = 0
|
45 |
+
ad_y_offset: int = 0
|
46 |
+
ad_mask_merge_invert: Literal["None", "Merge", "Merge and Invert"] = "None"
|
47 |
+
ad_mask_blur: NonNegativeInt = 4
|
48 |
+
ad_denoising_strength: confloat(ge=0.0, le=1.0) = 0.4
|
49 |
+
ad_inpaint_only_masked: bool = True
|
50 |
+
ad_inpaint_only_masked_padding: NonNegativeInt = 0
|
51 |
+
ad_use_inpaint_width_height: bool = False
|
52 |
+
ad_inpaint_width: PositiveInt = 512
|
53 |
+
ad_inpaint_height: PositiveInt = 512
|
54 |
+
ad_use_steps: bool = False
|
55 |
+
ad_steps: PositiveInt = 28
|
56 |
+
ad_use_cfg_scale: bool = False
|
57 |
+
ad_cfg_scale: NonNegativeFloat = 7.0
|
58 |
+
ad_restore_face: bool = False
|
59 |
+
ad_controlnet_model: constr(regex=cn_model_regex) = "None"
|
60 |
+
ad_controlnet_weight: confloat(ge=0.0, le=1.0) = 1.0
|
61 |
+
ad_controlnet_guidance_start: confloat(ge=0.0, le=1.0) = 0.0
|
62 |
+
ad_controlnet_guidance_end: confloat(ge=0.0, le=1.0) = 1.0
|
63 |
+
|
64 |
+
@staticmethod
|
65 |
+
def ppop(
|
66 |
+
p: dict[str, Any],
|
67 |
+
key: str,
|
68 |
+
pops: list[str] | None = None,
|
69 |
+
cond: Any = None,
|
70 |
+
):
|
71 |
+
if pops is None:
|
72 |
+
pops = [key]
|
73 |
+
if key not in p:
|
74 |
+
return
|
75 |
+
value = p[key]
|
76 |
+
cond = (not bool(value)) if cond is None else value == cond
|
77 |
+
|
78 |
+
if cond:
|
79 |
+
for k in pops:
|
80 |
+
p.pop(k, None)
|
81 |
+
|
82 |
+
def extra_params(self, suffix: str = ""):
|
83 |
+
if self.ad_model == "None":
|
84 |
+
return {}
|
85 |
+
|
86 |
+
p = {name: getattr(self, attr) for attr, name in ALL_ARGS}
|
87 |
+
ppop = partial(self.ppop, p)
|
88 |
+
|
89 |
+
ppop("ADetailer prompt")
|
90 |
+
ppop("ADetailer negative prompt")
|
91 |
+
ppop("ADetailer mask min ratio", cond=0.0)
|
92 |
+
ppop("ADetailer mask max ratio", cond=1.0)
|
93 |
+
ppop("ADetailer x offset", cond=0)
|
94 |
+
ppop("ADetailer y offset", cond=0)
|
95 |
+
ppop("ADetailer mask merge/invert", cond="None")
|
96 |
+
ppop("ADetailer inpaint only masked", ["ADetailer inpaint padding"])
|
97 |
+
ppop(
|
98 |
+
"ADetailer use inpaint width/height",
|
99 |
+
[
|
100 |
+
"ADetailer use inpaint width/height",
|
101 |
+
"ADetailer inpaint width",
|
102 |
+
"ADetailer inpaint height",
|
103 |
+
],
|
104 |
+
)
|
105 |
+
ppop(
|
106 |
+
"ADetailer use separate steps",
|
107 |
+
["ADetailer use separate steps", "ADetailer steps"],
|
108 |
+
)
|
109 |
+
ppop(
|
110 |
+
"ADetailer use separate CFG scale",
|
111 |
+
["ADetailer use separate CFG scale", "ADetailer CFG scale"],
|
112 |
+
)
|
113 |
+
ppop("ADetailer restore face")
|
114 |
+
ppop(
|
115 |
+
"ADetailer ControlNet model",
|
116 |
+
[
|
117 |
+
"ADetailer ControlNet model",
|
118 |
+
"ADetailer ControlNet weight",
|
119 |
+
"ADetailer ControlNet guidance start",
|
120 |
+
"ADetailer ControlNet guidance end",
|
121 |
+
],
|
122 |
+
cond="None",
|
123 |
+
)
|
124 |
+
ppop("ADetailer ControlNet weight", cond=1.0)
|
125 |
+
ppop("ADetailer ControlNet guidance start", cond=0.0)
|
126 |
+
ppop("ADetailer ControlNet guidance end", cond=1.0)
|
127 |
+
|
128 |
+
if suffix:
|
129 |
+
p = {k + suffix: v for k, v in p.items()}
|
130 |
+
|
131 |
+
return p
|
132 |
+
|
133 |
+
|
134 |
+
class EnableChecker(BaseModel):
|
135 |
+
a0: Union[bool, dict]
|
136 |
+
a1: Any
|
137 |
+
|
138 |
+
def is_enabled(self) -> bool:
|
139 |
+
ad_model = ALL_ARGS[0].attr
|
140 |
+
if isinstance(self.a0, dict):
|
141 |
+
return self.a0.get(ad_model, "None") != "None"
|
142 |
+
if not isinstance(self.a1, dict):
|
143 |
+
return False
|
144 |
+
return self.a0 and self.a1.get(ad_model, "None") != "None"
|
145 |
+
|
146 |
+
|
147 |
+
_all_args = [
|
148 |
+
("ad_enable", "ADetailer enable"),
|
149 |
+
("ad_model", "ADetailer model"),
|
150 |
+
("ad_prompt", "ADetailer prompt"),
|
151 |
+
("ad_negative_prompt", "ADetailer negative prompt"),
|
152 |
+
("ad_confidence", "ADetailer confidence"),
|
153 |
+
("ad_mask_min_ratio", "ADetailer mask min ratio"),
|
154 |
+
("ad_mask_max_ratio", "ADetailer mask max ratio"),
|
155 |
+
("ad_x_offset", "ADetailer x offset"),
|
156 |
+
("ad_y_offset", "ADetailer y offset"),
|
157 |
+
("ad_dilate_erode", "ADetailer dilate/erode"),
|
158 |
+
("ad_mask_merge_invert", "ADetailer mask merge/invert"),
|
159 |
+
("ad_mask_blur", "ADetailer mask blur"),
|
160 |
+
("ad_denoising_strength", "ADetailer denoising strength"),
|
161 |
+
("ad_inpaint_only_masked", "ADetailer inpaint only masked"),
|
162 |
+
("ad_inpaint_only_masked_padding", "ADetailer inpaint padding"),
|
163 |
+
("ad_use_inpaint_width_height", "ADetailer use inpaint width/height"),
|
164 |
+
("ad_inpaint_width", "ADetailer inpaint width"),
|
165 |
+
("ad_inpaint_height", "ADetailer inpaint height"),
|
166 |
+
("ad_use_steps", "ADetailer use separate steps"),
|
167 |
+
("ad_steps", "ADetailer steps"),
|
168 |
+
("ad_use_cfg_scale", "ADetailer use separate CFG scale"),
|
169 |
+
("ad_cfg_scale", "ADetailer CFG scale"),
|
170 |
+
("ad_restore_face", "ADetailer restore face"),
|
171 |
+
("ad_controlnet_model", "ADetailer ControlNet model"),
|
172 |
+
("ad_controlnet_weight", "ADetailer ControlNet weight"),
|
173 |
+
("ad_controlnet_guidance_start", "ADetailer ControlNet guidance start"),
|
174 |
+
("ad_controlnet_guidance_end", "ADetailer ControlNet guidance end"),
|
175 |
+
]
|
176 |
+
|
177 |
+
AD_ENABLE = Arg(*_all_args[0])
|
178 |
+
_args = [Arg(*args) for args in _all_args[1:]]
|
179 |
+
ALL_ARGS = ArgsList(_args)
|
180 |
+
|
181 |
+
BBOX_SORTBY = [
|
182 |
+
"None",
|
183 |
+
"Position (left to right)",
|
184 |
+
"Position (center to edge)",
|
185 |
+
"Area (large to small)",
|
186 |
+
]
|
187 |
+
MASK_MERGE_INVERT = ["None", "Merge", "Merge and Invert"]
|
extensions/adetailer/adetailer/common.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from collections import OrderedDict
|
4 |
+
from dataclasses import dataclass, field
|
5 |
+
from pathlib import Path
|
6 |
+
from typing import Optional, Union
|
7 |
+
|
8 |
+
from huggingface_hub import hf_hub_download
|
9 |
+
from PIL import Image, ImageDraw
|
10 |
+
|
11 |
+
repo_id = "Bingsu/adetailer"
|
12 |
+
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class PredictOutput:
|
16 |
+
bboxes: list[list[int | float]] = field(default_factory=list)
|
17 |
+
masks: list[Image.Image] = field(default_factory=list)
|
18 |
+
preview: Optional[Image.Image] = None
|
19 |
+
|
20 |
+
|
21 |
+
def hf_download(file: str):
|
22 |
+
try:
|
23 |
+
path = hf_hub_download(repo_id, file)
|
24 |
+
except Exception:
|
25 |
+
path = "INVALID"
|
26 |
+
return path
|
27 |
+
|
28 |
+
|
29 |
+
def get_models(
|
30 |
+
model_dir: Union[str, Path], huggingface: bool = True
|
31 |
+
) -> OrderedDict[str, Optional[str]]:
|
32 |
+
model_dir = Path(model_dir)
|
33 |
+
if model_dir.is_dir():
|
34 |
+
model_paths = [
|
35 |
+
p
|
36 |
+
for p in model_dir.rglob("*")
|
37 |
+
if p.is_file() and p.suffix in (".pt", ".pth")
|
38 |
+
]
|
39 |
+
else:
|
40 |
+
model_paths = []
|
41 |
+
|
42 |
+
models = OrderedDict()
|
43 |
+
if huggingface:
|
44 |
+
models.update(
|
45 |
+
{
|
46 |
+
"face_yolov8n.pt": hf_download("face_yolov8n.pt"),
|
47 |
+
"face_yolov8s.pt": hf_download("face_yolov8s.pt"),
|
48 |
+
"hand_yolov8n.pt": hf_download("hand_yolov8n.pt"),
|
49 |
+
"person_yolov8n-seg.pt": hf_download("person_yolov8n-seg.pt"),
|
50 |
+
"person_yolov8s-seg.pt": hf_download("person_yolov8s-seg.pt"),
|
51 |
+
}
|
52 |
+
)
|
53 |
+
models.update(
|
54 |
+
{
|
55 |
+
"mediapipe_face_full": None,
|
56 |
+
"mediapipe_face_short": None,
|
57 |
+
"mediapipe_face_mesh": None,
|
58 |
+
}
|
59 |
+
)
|
60 |
+
|
61 |
+
invalid_keys = [k for k, v in models.items() if v == "INVALID"]
|
62 |
+
for key in invalid_keys:
|
63 |
+
models.pop(key)
|
64 |
+
|
65 |
+
for path in model_paths:
|
66 |
+
if path.name in models:
|
67 |
+
continue
|
68 |
+
models[path.name] = str(path)
|
69 |
+
|
70 |
+
return models
|
71 |
+
|
72 |
+
|
73 |
+
def create_mask_from_bbox(
|
74 |
+
bboxes: list[list[float]], shape: tuple[int, int]
|
75 |
+
) -> list[Image.Image]:
|
76 |
+
"""
|
77 |
+
Parameters
|
78 |
+
----------
|
79 |
+
bboxes: list[list[float]]
|
80 |
+
list of [x1, y1, x2, y2]
|
81 |
+
bounding boxes
|
82 |
+
shape: tuple[int, int]
|
83 |
+
shape of the image (width, height)
|
84 |
+
|
85 |
+
Returns
|
86 |
+
-------
|
87 |
+
masks: list[Image.Image]
|
88 |
+
A list of masks
|
89 |
+
|
90 |
+
"""
|
91 |
+
masks = []
|
92 |
+
for bbox in bboxes:
|
93 |
+
mask = Image.new("L", shape, 0)
|
94 |
+
mask_draw = ImageDraw.Draw(mask)
|
95 |
+
mask_draw.rectangle(bbox, fill=255)
|
96 |
+
masks.append(mask)
|
97 |
+
return masks
|
98 |
+
|
99 |
+
|
100 |
+
def create_bbox_from_mask(
|
101 |
+
masks: list[Image.Image], shape: tuple[int, int]
|
102 |
+
) -> list[list[int]]:
|
103 |
+
"""
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
masks: list[Image.Image]
|
107 |
+
A list of masks
|
108 |
+
shape: tuple[int, int]
|
109 |
+
shape of the image (width, height)
|
110 |
+
|
111 |
+
Returns
|
112 |
+
-------
|
113 |
+
bboxes: list[list[float]]
|
114 |
+
A list of bounding boxes
|
115 |
+
|
116 |
+
"""
|
117 |
+
bboxes = []
|
118 |
+
for mask in masks:
|
119 |
+
mask = mask.resize(shape)
|
120 |
+
bbox = mask.getbbox()
|
121 |
+
if bbox is not None:
|
122 |
+
bboxes.append(list(bbox))
|
123 |
+
return bboxes
|
extensions/adetailer/adetailer/mask.py
ADDED
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from enum import IntEnum
|
4 |
+
from functools import partial, reduce
|
5 |
+
from math import dist
|
6 |
+
|
7 |
+
import cv2
|
8 |
+
import numpy as np
|
9 |
+
from PIL import Image, ImageChops
|
10 |
+
|
11 |
+
from adetailer.args import MASK_MERGE_INVERT
|
12 |
+
from adetailer.common import PredictOutput
|
13 |
+
|
14 |
+
|
15 |
+
class SortBy(IntEnum):
|
16 |
+
NONE = 0
|
17 |
+
LEFT_TO_RIGHT = 1
|
18 |
+
CENTER_TO_EDGE = 2
|
19 |
+
AREA = 3
|
20 |
+
|
21 |
+
|
22 |
+
class MergeInvert(IntEnum):
|
23 |
+
NONE = 0
|
24 |
+
MERGE = 1
|
25 |
+
MERGE_INVERT = 2
|
26 |
+
|
27 |
+
|
28 |
+
def _dilate(arr: np.ndarray, value: int) -> np.ndarray:
|
29 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
|
30 |
+
return cv2.dilate(arr, kernel, iterations=1)
|
31 |
+
|
32 |
+
|
33 |
+
def _erode(arr: np.ndarray, value: int) -> np.ndarray:
|
34 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
|
35 |
+
return cv2.erode(arr, kernel, iterations=1)
|
36 |
+
|
37 |
+
|
38 |
+
def dilate_erode(img: Image.Image, value: int) -> Image.Image:
|
39 |
+
"""
|
40 |
+
The dilate_erode function takes an image and a value.
|
41 |
+
If the value is positive, it dilates the image by that amount.
|
42 |
+
If the value is negative, it erodes the image by that amount.
|
43 |
+
|
44 |
+
Parameters
|
45 |
+
----------
|
46 |
+
img: PIL.Image.Image
|
47 |
+
the image to be processed
|
48 |
+
value: int
|
49 |
+
kernel size of dilation or erosion
|
50 |
+
|
51 |
+
Returns
|
52 |
+
-------
|
53 |
+
PIL.Image.Image
|
54 |
+
The image that has been dilated or eroded
|
55 |
+
"""
|
56 |
+
if value == 0:
|
57 |
+
return img
|
58 |
+
|
59 |
+
arr = np.array(img)
|
60 |
+
arr = _dilate(arr, value) if value > 0 else _erode(arr, -value)
|
61 |
+
|
62 |
+
return Image.fromarray(arr)
|
63 |
+
|
64 |
+
|
65 |
+
def offset(img: Image.Image, x: int = 0, y: int = 0) -> Image.Image:
|
66 |
+
"""
|
67 |
+
The offset function takes an image and offsets it by a given x(→) and y(↑) value.
|
68 |
+
|
69 |
+
Parameters
|
70 |
+
----------
|
71 |
+
mask: Image.Image
|
72 |
+
Pass the mask image to the function
|
73 |
+
x: int
|
74 |
+
→
|
75 |
+
y: int
|
76 |
+
↑
|
77 |
+
|
78 |
+
Returns
|
79 |
+
-------
|
80 |
+
PIL.Image.Image
|
81 |
+
A new image that is offset by x and y
|
82 |
+
"""
|
83 |
+
return ImageChops.offset(img, x, -y)
|
84 |
+
|
85 |
+
|
86 |
+
def is_all_black(img: Image.Image) -> bool:
|
87 |
+
arr = np.array(img)
|
88 |
+
return cv2.countNonZero(arr) == 0
|
89 |
+
|
90 |
+
|
91 |
+
def bbox_area(bbox: list[float]):
|
92 |
+
return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
|
93 |
+
|
94 |
+
|
95 |
+
def mask_preprocess(
|
96 |
+
masks: list[Image.Image],
|
97 |
+
kernel: int = 0,
|
98 |
+
x_offset: int = 0,
|
99 |
+
y_offset: int = 0,
|
100 |
+
merge_invert: int | MergeInvert | str = MergeInvert.NONE,
|
101 |
+
) -> list[Image.Image]:
|
102 |
+
"""
|
103 |
+
The mask_preprocess function takes a list of masks and preprocesses them.
|
104 |
+
It dilates and erodes the masks, and offsets them by x_offset and y_offset.
|
105 |
+
|
106 |
+
Parameters
|
107 |
+
----------
|
108 |
+
masks: list[Image.Image]
|
109 |
+
A list of masks
|
110 |
+
kernel: int
|
111 |
+
kernel size of dilation or erosion
|
112 |
+
x_offset: int
|
113 |
+
→
|
114 |
+
y_offset: int
|
115 |
+
↑
|
116 |
+
|
117 |
+
Returns
|
118 |
+
-------
|
119 |
+
list[Image.Image]
|
120 |
+
A list of processed masks
|
121 |
+
"""
|
122 |
+
if not masks:
|
123 |
+
return []
|
124 |
+
|
125 |
+
if x_offset != 0 or y_offset != 0:
|
126 |
+
masks = [offset(m, x_offset, y_offset) for m in masks]
|
127 |
+
|
128 |
+
if kernel != 0:
|
129 |
+
masks = [dilate_erode(m, kernel) for m in masks]
|
130 |
+
masks = [m for m in masks if not is_all_black(m)]
|
131 |
+
|
132 |
+
masks = mask_merge_invert(masks, mode=merge_invert)
|
133 |
+
|
134 |
+
return masks
|
135 |
+
|
136 |
+
|
137 |
+
# Bbox sorting
|
138 |
+
def _key_left_to_right(bbox: list[float]) -> float:
|
139 |
+
"""
|
140 |
+
Left to right
|
141 |
+
|
142 |
+
Parameters
|
143 |
+
----------
|
144 |
+
bbox: list[float]
|
145 |
+
list of [x1, y1, x2, y2]
|
146 |
+
"""
|
147 |
+
return bbox[0]
|
148 |
+
|
149 |
+
|
150 |
+
def _key_center_to_edge(bbox: list[float], *, center: tuple[float, float]) -> float:
|
151 |
+
"""
|
152 |
+
Center to edge
|
153 |
+
|
154 |
+
Parameters
|
155 |
+
----------
|
156 |
+
bbox: list[float]
|
157 |
+
list of [x1, y1, x2, y2]
|
158 |
+
image: Image.Image
|
159 |
+
the image
|
160 |
+
"""
|
161 |
+
bbox_center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
|
162 |
+
return dist(center, bbox_center)
|
163 |
+
|
164 |
+
|
165 |
+
def _key_area(bbox: list[float]) -> float:
|
166 |
+
"""
|
167 |
+
Large to small
|
168 |
+
|
169 |
+
Parameters
|
170 |
+
----------
|
171 |
+
bbox: list[float]
|
172 |
+
list of [x1, y1, x2, y2]
|
173 |
+
"""
|
174 |
+
return -bbox_area(bbox)
|
175 |
+
|
176 |
+
|
177 |
+
def sort_bboxes(
|
178 |
+
pred: PredictOutput, order: int | SortBy = SortBy.NONE
|
179 |
+
) -> PredictOutput:
|
180 |
+
if order == SortBy.NONE or len(pred.bboxes) <= 1:
|
181 |
+
return pred
|
182 |
+
|
183 |
+
if order == SortBy.LEFT_TO_RIGHT:
|
184 |
+
key = _key_left_to_right
|
185 |
+
elif order == SortBy.CENTER_TO_EDGE:
|
186 |
+
width, height = pred.preview.size
|
187 |
+
center = (width / 2, height / 2)
|
188 |
+
key = partial(_key_center_to_edge, center=center)
|
189 |
+
elif order == SortBy.AREA:
|
190 |
+
key = _key_area
|
191 |
+
else:
|
192 |
+
raise RuntimeError
|
193 |
+
|
194 |
+
items = len(pred.bboxes)
|
195 |
+
idx = sorted(range(items), key=lambda i: key(pred.bboxes[i]))
|
196 |
+
pred.bboxes = [pred.bboxes[i] for i in idx]
|
197 |
+
pred.masks = [pred.masks[i] for i in idx]
|
198 |
+
return pred
|
199 |
+
|
200 |
+
|
201 |
+
# Filter by ratio
|
202 |
+
def is_in_ratio(bbox: list[float], low: float, high: float, orig_area: int) -> bool:
|
203 |
+
area = bbox_area(bbox)
|
204 |
+
return low <= area / orig_area <= high
|
205 |
+
|
206 |
+
|
207 |
+
def filter_by_ratio(pred: PredictOutput, low: float, high: float) -> PredictOutput:
|
208 |
+
if not pred.bboxes:
|
209 |
+
return pred
|
210 |
+
|
211 |
+
w, h = pred.preview.size
|
212 |
+
orig_area = w * h
|
213 |
+
items = len(pred.bboxes)
|
214 |
+
idx = [i for i in range(items) if is_in_ratio(pred.bboxes[i], low, high, orig_area)]
|
215 |
+
pred.bboxes = [pred.bboxes[i] for i in idx]
|
216 |
+
pred.masks = [pred.masks[i] for i in idx]
|
217 |
+
return pred
|
218 |
+
|
219 |
+
|
220 |
+
# Merge / Invert
|
221 |
+
def mask_merge(masks: list[Image.Image]) -> list[Image.Image]:
|
222 |
+
arrs = [np.array(m) for m in masks]
|
223 |
+
arr = reduce(cv2.bitwise_or, arrs)
|
224 |
+
return [Image.fromarray(arr)]
|
225 |
+
|
226 |
+
|
227 |
+
def mask_invert(masks: list[Image.Image]) -> list[Image.Image]:
|
228 |
+
return [ImageChops.invert(m) for m in masks]
|
229 |
+
|
230 |
+
|
231 |
+
def mask_merge_invert(
|
232 |
+
masks: list[Image.Image], mode: int | MergeInvert | str
|
233 |
+
) -> list[Image.Image]:
|
234 |
+
if isinstance(mode, str):
|
235 |
+
mode = MASK_MERGE_INVERT.index(mode)
|
236 |
+
|
237 |
+
if mode == MergeInvert.NONE or not masks:
|
238 |
+
return masks
|
239 |
+
|
240 |
+
if mode == MergeInvert.MERGE:
|
241 |
+
return mask_merge(masks)
|
242 |
+
|
243 |
+
if mode == MergeInvert.MERGE_INVERT:
|
244 |
+
merged = mask_merge(masks)
|
245 |
+
return mask_invert(merged)
|
246 |
+
|
247 |
+
raise RuntimeError
|
extensions/adetailer/adetailer/mediapipe.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
from PIL import Image, ImageDraw
|
7 |
+
|
8 |
+
from adetailer import PredictOutput
|
9 |
+
from adetailer.common import create_bbox_from_mask, create_mask_from_bbox
|
10 |
+
|
11 |
+
|
12 |
+
def mediapipe_predict(
|
13 |
+
model_type: str, image: Image.Image, confidence: float = 0.3
|
14 |
+
) -> PredictOutput:
|
15 |
+
mapping = {
|
16 |
+
"mediapipe_face_short": partial(mediapipe_face_detection, 0),
|
17 |
+
"mediapipe_face_full": partial(mediapipe_face_detection, 1),
|
18 |
+
"mediapipe_face_mesh": mediapipe_face_mesh,
|
19 |
+
}
|
20 |
+
if model_type in mapping:
|
21 |
+
func = mapping[model_type]
|
22 |
+
return func(image, confidence)
|
23 |
+
raise RuntimeError(f"[-] ADetailer: Invalid mediapipe model type: {model_type}")
|
24 |
+
|
25 |
+
|
26 |
+
def mediapipe_face_detection(
|
27 |
+
model_type: int, image: Image.Image, confidence: float = 0.3
|
28 |
+
) -> PredictOutput:
|
29 |
+
import mediapipe as mp
|
30 |
+
|
31 |
+
img_width, img_height = image.size
|
32 |
+
|
33 |
+
mp_face_detection = mp.solutions.face_detection
|
34 |
+
draw_util = mp.solutions.drawing_utils
|
35 |
+
|
36 |
+
img_array = np.array(image)
|
37 |
+
|
38 |
+
with mp_face_detection.FaceDetection(
|
39 |
+
model_selection=model_type, min_detection_confidence=confidence
|
40 |
+
) as face_detector:
|
41 |
+
pred = face_detector.process(img_array)
|
42 |
+
|
43 |
+
if pred.detections is None:
|
44 |
+
return PredictOutput()
|
45 |
+
|
46 |
+
preview_array = img_array.copy()
|
47 |
+
|
48 |
+
bboxes = []
|
49 |
+
for detection in pred.detections:
|
50 |
+
draw_util.draw_detection(preview_array, detection)
|
51 |
+
|
52 |
+
bbox = detection.location_data.relative_bounding_box
|
53 |
+
x1 = bbox.xmin * img_width
|
54 |
+
y1 = bbox.ymin * img_height
|
55 |
+
w = bbox.width * img_width
|
56 |
+
h = bbox.height * img_height
|
57 |
+
x2 = x1 + w
|
58 |
+
y2 = y1 + h
|
59 |
+
|
60 |
+
bboxes.append([x1, y1, x2, y2])
|
61 |
+
|
62 |
+
masks = create_mask_from_bbox(bboxes, image.size)
|
63 |
+
preview = Image.fromarray(preview_array)
|
64 |
+
|
65 |
+
return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
|
66 |
+
|
67 |
+
|
68 |
+
def mediapipe_face_mesh(image: Image.Image, confidence: float = 0.3) -> PredictOutput:
|
69 |
+
import mediapipe as mp
|
70 |
+
from scipy.spatial import ConvexHull
|
71 |
+
|
72 |
+
mp_face_mesh = mp.solutions.face_mesh
|
73 |
+
draw_util = mp.solutions.drawing_utils
|
74 |
+
drawing_styles = mp.solutions.drawing_styles
|
75 |
+
|
76 |
+
w, h = image.size
|
77 |
+
|
78 |
+
with mp_face_mesh.FaceMesh(
|
79 |
+
static_image_mode=True, max_num_faces=20, min_detection_confidence=confidence
|
80 |
+
) as face_mesh:
|
81 |
+
arr = np.array(image)
|
82 |
+
pred = face_mesh.process(arr)
|
83 |
+
|
84 |
+
if pred.multi_face_landmarks is None:
|
85 |
+
return PredictOutput()
|
86 |
+
|
87 |
+
preview = arr.copy()
|
88 |
+
masks = []
|
89 |
+
|
90 |
+
for landmarks in pred.multi_face_landmarks:
|
91 |
+
draw_util.draw_landmarks(
|
92 |
+
image=preview,
|
93 |
+
landmark_list=landmarks,
|
94 |
+
connections=mp_face_mesh.FACEMESH_TESSELATION,
|
95 |
+
landmark_drawing_spec=None,
|
96 |
+
connection_drawing_spec=drawing_styles.get_default_face_mesh_tesselation_style(),
|
97 |
+
)
|
98 |
+
|
99 |
+
points = np.array([(land.x * w, land.y * h) for land in landmarks.landmark])
|
100 |
+
hull = ConvexHull(points)
|
101 |
+
vertices = hull.vertices
|
102 |
+
outline = list(zip(points[vertices, 0], points[vertices, 1]))
|
103 |
+
|
104 |
+
mask = Image.new("L", image.size, "black")
|
105 |
+
draw = ImageDraw.Draw(mask)
|
106 |
+
draw.polygon(outline, fill="white")
|
107 |
+
masks.append(mask)
|
108 |
+
|
109 |
+
bboxes = create_bbox_from_mask(masks, image.size)
|
110 |
+
preview = Image.fromarray(preview)
|
111 |
+
return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
|
extensions/adetailer/adetailer/ui.py
ADDED
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
from types import SimpleNamespace
|
5 |
+
from typing import Any
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
from adetailer import AFTER_DETAILER, __version__
|
10 |
+
from adetailer.args import AD_ENABLE, ALL_ARGS, MASK_MERGE_INVERT
|
11 |
+
from controlnet_ext import controlnet_exists, get_cn_models
|
12 |
+
|
13 |
+
|
14 |
+
class Widgets(SimpleNamespace):
|
15 |
+
def tolist(self):
|
16 |
+
return [getattr(self, attr) for attr in ALL_ARGS.attrs]
|
17 |
+
|
18 |
+
|
19 |
+
def gr_interactive(value: bool = True):
|
20 |
+
return gr.update(interactive=value)
|
21 |
+
|
22 |
+
|
23 |
+
def ordinal(n: int) -> str:
|
24 |
+
d = {1: "st", 2: "nd", 3: "rd"}
|
25 |
+
return str(n) + ("th" if 11 <= n % 100 <= 13 else d.get(n % 10, "th"))
|
26 |
+
|
27 |
+
|
28 |
+
def suffix(n: int, c: str = " ") -> str:
|
29 |
+
return "" if n == 0 else c + ordinal(n + 1)
|
30 |
+
|
31 |
+
|
32 |
+
def on_widget_change(state: dict, value: Any, *, attr: str):
|
33 |
+
state[attr] = value
|
34 |
+
return state
|
35 |
+
|
36 |
+
|
37 |
+
def on_generate_click(state: dict, *values: Any):
|
38 |
+
for attr, value in zip(ALL_ARGS.attrs, values):
|
39 |
+
state[attr] = value
|
40 |
+
return state
|
41 |
+
|
42 |
+
|
43 |
+
def elem_id(item_id: str, n: int, is_img2img: bool) -> str:
|
44 |
+
tap = "img2img" if is_img2img else "txt2img"
|
45 |
+
suf = suffix(n, "_")
|
46 |
+
return f"script_{tap}_adetailer_{item_id}{suf}"
|
47 |
+
|
48 |
+
|
49 |
+
def adui(
|
50 |
+
num_models: int,
|
51 |
+
is_img2img: bool,
|
52 |
+
model_list: list[str],
|
53 |
+
t2i_button: gr.Button,
|
54 |
+
i2i_button: gr.Button,
|
55 |
+
):
|
56 |
+
states = []
|
57 |
+
infotext_fields = []
|
58 |
+
eid = partial(elem_id, n=0, is_img2img=is_img2img)
|
59 |
+
|
60 |
+
with gr.Accordion(AFTER_DETAILER, open=False, elem_id=eid("ad_main_accordion")):
|
61 |
+
with gr.Row():
|
62 |
+
with gr.Column(scale=6):
|
63 |
+
ad_enable = gr.Checkbox(
|
64 |
+
label="Enable ADetailer",
|
65 |
+
value=False,
|
66 |
+
visible=True,
|
67 |
+
elem_id=eid("ad_enable"),
|
68 |
+
)
|
69 |
+
|
70 |
+
with gr.Column(scale=1, min_width=180):
|
71 |
+
gr.Markdown(
|
72 |
+
f"v{__version__}",
|
73 |
+
elem_id=eid("ad_version"),
|
74 |
+
)
|
75 |
+
|
76 |
+
infotext_fields.append((ad_enable, AD_ENABLE.name))
|
77 |
+
|
78 |
+
with gr.Group(), gr.Tabs():
|
79 |
+
for n in range(num_models):
|
80 |
+
with gr.Tab(ordinal(n + 1)):
|
81 |
+
state, infofields = one_ui_group(
|
82 |
+
n=n,
|
83 |
+
is_img2img=is_img2img,
|
84 |
+
model_list=model_list,
|
85 |
+
t2i_button=t2i_button,
|
86 |
+
i2i_button=i2i_button,
|
87 |
+
)
|
88 |
+
|
89 |
+
states.append(state)
|
90 |
+
infotext_fields.extend(infofields)
|
91 |
+
|
92 |
+
# components: [bool, dict, dict, ...]
|
93 |
+
components = [ad_enable] + states
|
94 |
+
return components, infotext_fields
|
95 |
+
|
96 |
+
|
97 |
+
def one_ui_group(
|
98 |
+
n: int,
|
99 |
+
is_img2img: bool,
|
100 |
+
model_list: list[str],
|
101 |
+
t2i_button: gr.Button,
|
102 |
+
i2i_button: gr.Button,
|
103 |
+
):
|
104 |
+
w = Widgets()
|
105 |
+
state = gr.State({})
|
106 |
+
eid = partial(elem_id, n=n, is_img2img=is_img2img)
|
107 |
+
|
108 |
+
with gr.Row():
|
109 |
+
model_choices = model_list + ["None"] if n == 0 else ["None"] + model_list
|
110 |
+
|
111 |
+
w.ad_model = gr.Dropdown(
|
112 |
+
label="ADetailer model" + suffix(n),
|
113 |
+
choices=model_choices,
|
114 |
+
value=model_choices[0],
|
115 |
+
visible=True,
|
116 |
+
type="value",
|
117 |
+
elem_id=eid("ad_model"),
|
118 |
+
)
|
119 |
+
|
120 |
+
with gr.Group():
|
121 |
+
with gr.Row(elem_id=eid("ad_toprow_prompt")):
|
122 |
+
w.ad_prompt = gr.Textbox(
|
123 |
+
label="ad_prompt" + suffix(n),
|
124 |
+
show_label=False,
|
125 |
+
lines=3,
|
126 |
+
placeholder="ADetailer prompt" + suffix(n),
|
127 |
+
elem_id=eid("ad_prompt"),
|
128 |
+
)
|
129 |
+
|
130 |
+
with gr.Row(elem_id=eid("ad_toprow_negative_prompt")):
|
131 |
+
w.ad_negative_prompt = gr.Textbox(
|
132 |
+
label="ad_negative_prompt" + suffix(n),
|
133 |
+
show_label=False,
|
134 |
+
lines=2,
|
135 |
+
placeholder="ADetailer negative prompt" + suffix(n),
|
136 |
+
elem_id=eid("ad_negative_prompt"),
|
137 |
+
)
|
138 |
+
|
139 |
+
with gr.Group():
|
140 |
+
with gr.Accordion(
|
141 |
+
"Detection", open=False, elem_id=eid("ad_detection_accordion")
|
142 |
+
):
|
143 |
+
detection(w, n, is_img2img)
|
144 |
+
|
145 |
+
with gr.Accordion(
|
146 |
+
"Mask Preprocessing",
|
147 |
+
open=False,
|
148 |
+
elem_id=eid("ad_mask_preprocessing_accordion"),
|
149 |
+
):
|
150 |
+
mask_preprocessing(w, n, is_img2img)
|
151 |
+
|
152 |
+
with gr.Accordion(
|
153 |
+
"Inpainting", open=False, elem_id=eid("ad_inpainting_accordion")
|
154 |
+
):
|
155 |
+
inpainting(w, n, is_img2img)
|
156 |
+
|
157 |
+
with gr.Group():
|
158 |
+
controlnet(w, n, is_img2img)
|
159 |
+
|
160 |
+
for attr in ALL_ARGS.attrs:
|
161 |
+
widget = getattr(w, attr)
|
162 |
+
on_change = partial(on_widget_change, attr=attr)
|
163 |
+
widget.change(
|
164 |
+
fn=on_change, inputs=[state, widget], outputs=[state], queue=False
|
165 |
+
)
|
166 |
+
|
167 |
+
all_inputs = [state] + w.tolist()
|
168 |
+
target_button = i2i_button if is_img2img else t2i_button
|
169 |
+
target_button.click(
|
170 |
+
fn=on_generate_click, inputs=all_inputs, outputs=state, queue=False
|
171 |
+
)
|
172 |
+
|
173 |
+
infotext_fields = [(getattr(w, attr), name + suffix(n)) for attr, name in ALL_ARGS]
|
174 |
+
|
175 |
+
return state, infotext_fields
|
176 |
+
|
177 |
+
|
178 |
+
def detection(w: Widgets, n: int, is_img2img: bool):
|
179 |
+
eid = partial(elem_id, n=n, is_img2img=is_img2img)
|
180 |
+
|
181 |
+
with gr.Row():
|
182 |
+
with gr.Column():
|
183 |
+
w.ad_confidence = gr.Slider(
|
184 |
+
label="Detection model confidence threshold" + suffix(n),
|
185 |
+
minimum=0.0,
|
186 |
+
maximum=1.0,
|
187 |
+
step=0.01,
|
188 |
+
value=0.3,
|
189 |
+
visible=True,
|
190 |
+
elem_id=eid("ad_confidence"),
|
191 |
+
)
|
192 |
+
|
193 |
+
with gr.Column(variant="compact"):
|
194 |
+
w.ad_mask_min_ratio = gr.Slider(
|
195 |
+
label="Mask min area ratio" + suffix(n),
|
196 |
+
minimum=0.0,
|
197 |
+
maximum=1.0,
|
198 |
+
step=0.001,
|
199 |
+
value=0.0,
|
200 |
+
visible=True,
|
201 |
+
elem_id=eid("ad_mask_min_ratio"),
|
202 |
+
)
|
203 |
+
w.ad_mask_max_ratio = gr.Slider(
|
204 |
+
label="Mask max area ratio" + suffix(n),
|
205 |
+
minimum=0.0,
|
206 |
+
maximum=1.0,
|
207 |
+
step=0.001,
|
208 |
+
value=1.0,
|
209 |
+
visible=True,
|
210 |
+
elem_id=eid("ad_mask_max_ratio"),
|
211 |
+
)
|
212 |
+
|
213 |
+
|
214 |
+
def mask_preprocessing(w: Widgets, n: int, is_img2img: bool):
|
215 |
+
eid = partial(elem_id, n=n, is_img2img=is_img2img)
|
216 |
+
|
217 |
+
with gr.Group():
|
218 |
+
with gr.Row():
|
219 |
+
with gr.Column(variant="compact"):
|
220 |
+
w.ad_x_offset = gr.Slider(
|
221 |
+
label="Mask x(→) offset" + suffix(n),
|
222 |
+
minimum=-200,
|
223 |
+
maximum=200,
|
224 |
+
step=1,
|
225 |
+
value=0,
|
226 |
+
visible=True,
|
227 |
+
elem_id=eid("ad_x_offset"),
|
228 |
+
)
|
229 |
+
w.ad_y_offset = gr.Slider(
|
230 |
+
label="Mask y(↑) offset" + suffix(n),
|
231 |
+
minimum=-200,
|
232 |
+
maximum=200,
|
233 |
+
step=1,
|
234 |
+
value=0,
|
235 |
+
visible=True,
|
236 |
+
elem_id=eid("ad_y_offset"),
|
237 |
+
)
|
238 |
+
|
239 |
+
with gr.Column(variant="compact"):
|
240 |
+
w.ad_dilate_erode = gr.Slider(
|
241 |
+
label="Mask erosion (-) / dilation (+)" + suffix(n),
|
242 |
+
minimum=-128,
|
243 |
+
maximum=128,
|
244 |
+
step=4,
|
245 |
+
value=4,
|
246 |
+
visible=True,
|
247 |
+
elem_id=eid("ad_dilate_erode"),
|
248 |
+
)
|
249 |
+
|
250 |
+
with gr.Row():
|
251 |
+
w.ad_mask_merge_invert = gr.Radio(
|
252 |
+
label="Mask merge mode" + suffix(n),
|
253 |
+
choices=MASK_MERGE_INVERT,
|
254 |
+
value="None",
|
255 |
+
elem_id=eid("ad_mask_merge_invert"),
|
256 |
+
)
|
257 |
+
|
258 |
+
|
259 |
+
def inpainting(w: Widgets, n: int, is_img2img: bool):
|
260 |
+
eid = partial(elem_id, n=n, is_img2img=is_img2img)
|
261 |
+
|
262 |
+
with gr.Group():
|
263 |
+
with gr.Row():
|
264 |
+
w.ad_mask_blur = gr.Slider(
|
265 |
+
label="Inpaint mask blur" + suffix(n),
|
266 |
+
minimum=0,
|
267 |
+
maximum=64,
|
268 |
+
step=1,
|
269 |
+
value=4,
|
270 |
+
visible=True,
|
271 |
+
elem_id=eid("ad_mask_blur"),
|
272 |
+
)
|
273 |
+
|
274 |
+
w.ad_denoising_strength = gr.Slider(
|
275 |
+
label="Inpaint denoising strength" + suffix(n),
|
276 |
+
minimum=0.0,
|
277 |
+
maximum=1.0,
|
278 |
+
step=0.01,
|
279 |
+
value=0.4,
|
280 |
+
visible=True,
|
281 |
+
elem_id=eid("ad_denoising_strength"),
|
282 |
+
)
|
283 |
+
|
284 |
+
with gr.Row():
|
285 |
+
with gr.Column(variant="compact"):
|
286 |
+
w.ad_inpaint_only_masked = gr.Checkbox(
|
287 |
+
label="Inpaint only masked" + suffix(n),
|
288 |
+
value=True,
|
289 |
+
visible=True,
|
290 |
+
elem_id=eid("ad_inpaint_full_res"),
|
291 |
+
)
|
292 |
+
w.ad_inpaint_only_masked_padding = gr.Slider(
|
293 |
+
label="Inpaint only masked padding, pixels" + suffix(n),
|
294 |
+
minimum=0,
|
295 |
+
maximum=256,
|
296 |
+
step=4,
|
297 |
+
value=32,
|
298 |
+
visible=True,
|
299 |
+
elem_id=eid("ad_inpaint_full_res_padding"),
|
300 |
+
)
|
301 |
+
|
302 |
+
w.ad_inpaint_only_masked.change(
|
303 |
+
gr_interactive,
|
304 |
+
inputs=w.ad_inpaint_only_masked,
|
305 |
+
outputs=w.ad_inpaint_only_masked_padding,
|
306 |
+
queue=False,
|
307 |
+
)
|
308 |
+
|
309 |
+
with gr.Column(variant="compact"):
|
310 |
+
w.ad_use_inpaint_width_height = gr.Checkbox(
|
311 |
+
label="Use separate width/height" + suffix(n),
|
312 |
+
value=False,
|
313 |
+
visible=True,
|
314 |
+
elem_id=eid("ad_use_inpaint_width_height"),
|
315 |
+
)
|
316 |
+
|
317 |
+
w.ad_inpaint_width = gr.Slider(
|
318 |
+
label="inpaint width" + suffix(n),
|
319 |
+
minimum=64,
|
320 |
+
maximum=2048,
|
321 |
+
step=4,
|
322 |
+
value=512,
|
323 |
+
visible=True,
|
324 |
+
elem_id=eid("ad_inpaint_width"),
|
325 |
+
)
|
326 |
+
|
327 |
+
w.ad_inpaint_height = gr.Slider(
|
328 |
+
label="inpaint height" + suffix(n),
|
329 |
+
minimum=64,
|
330 |
+
maximum=2048,
|
331 |
+
step=4,
|
332 |
+
value=512,
|
333 |
+
visible=True,
|
334 |
+
elem_id=eid("ad_inpaint_height"),
|
335 |
+
)
|
336 |
+
|
337 |
+
w.ad_use_inpaint_width_height.change(
|
338 |
+
lambda value: (gr_interactive(value), gr_interactive(value)),
|
339 |
+
inputs=w.ad_use_inpaint_width_height,
|
340 |
+
outputs=[w.ad_inpaint_width, w.ad_inpaint_height],
|
341 |
+
queue=False,
|
342 |
+
)
|
343 |
+
|
344 |
+
with gr.Row():
|
345 |
+
with gr.Column(variant="compact"):
|
346 |
+
w.ad_use_steps = gr.Checkbox(
|
347 |
+
label="Use separate steps" + suffix(n),
|
348 |
+
value=False,
|
349 |
+
visible=True,
|
350 |
+
elem_id=eid("ad_use_steps"),
|
351 |
+
)
|
352 |
+
|
353 |
+
w.ad_steps = gr.Slider(
|
354 |
+
label="ADetailer steps" + suffix(n),
|
355 |
+
minimum=1,
|
356 |
+
maximum=150,
|
357 |
+
step=1,
|
358 |
+
value=28,
|
359 |
+
visible=True,
|
360 |
+
elem_id=eid("ad_steps"),
|
361 |
+
)
|
362 |
+
|
363 |
+
w.ad_use_steps.change(
|
364 |
+
gr_interactive,
|
365 |
+
inputs=w.ad_use_steps,
|
366 |
+
outputs=w.ad_steps,
|
367 |
+
queue=False,
|
368 |
+
)
|
369 |
+
|
370 |
+
with gr.Column(variant="compact"):
|
371 |
+
w.ad_use_cfg_scale = gr.Checkbox(
|
372 |
+
label="Use separate CFG scale" + suffix(n),
|
373 |
+
value=False,
|
374 |
+
visible=True,
|
375 |
+
elem_id=eid("ad_use_cfg_scale"),
|
376 |
+
)
|
377 |
+
|
378 |
+
w.ad_cfg_scale = gr.Slider(
|
379 |
+
label="ADetailer CFG scale" + suffix(n),
|
380 |
+
minimum=0.0,
|
381 |
+
maximum=30.0,
|
382 |
+
step=0.5,
|
383 |
+
value=7.0,
|
384 |
+
visible=True,
|
385 |
+
elem_id=eid("ad_cfg_scale"),
|
386 |
+
)
|
387 |
+
|
388 |
+
w.ad_use_cfg_scale.change(
|
389 |
+
gr_interactive,
|
390 |
+
inputs=w.ad_use_cfg_scale,
|
391 |
+
outputs=w.ad_cfg_scale,
|
392 |
+
queue=False,
|
393 |
+
)
|
394 |
+
|
395 |
+
with gr.Row():
|
396 |
+
w.ad_restore_face = gr.Checkbox(
|
397 |
+
label="Restore faces after ADetailer" + suffix(n),
|
398 |
+
value=False,
|
399 |
+
elem_id=eid("ad_restore_face"),
|
400 |
+
)
|
401 |
+
|
402 |
+
|
403 |
+
def controlnet(w: Widgets, n: int, is_img2img: bool):
|
404 |
+
eid = partial(elem_id, n=n, is_img2img=is_img2img)
|
405 |
+
cn_models = ["None"] + get_cn_models()
|
406 |
+
|
407 |
+
with gr.Row(variant="panel"):
|
408 |
+
with gr.Column(variant="compact"):
|
409 |
+
w.ad_controlnet_model = gr.Dropdown(
|
410 |
+
label="ControlNet model" + suffix(n),
|
411 |
+
choices=cn_models,
|
412 |
+
value="None",
|
413 |
+
visible=True,
|
414 |
+
type="value",
|
415 |
+
interactive=controlnet_exists,
|
416 |
+
elem_id=eid("ad_controlnet_model"),
|
417 |
+
)
|
418 |
+
|
419 |
+
w.ad_controlnet_weight = gr.Slider(
|
420 |
+
label="ControlNet weight" + suffix(n),
|
421 |
+
minimum=0.0,
|
422 |
+
maximum=1.0,
|
423 |
+
step=0.01,
|
424 |
+
value=1.0,
|
425 |
+
visible=True,
|
426 |
+
interactive=controlnet_exists,
|
427 |
+
elem_id=eid("ad_controlnet_weight"),
|
428 |
+
)
|
429 |
+
|
430 |
+
with gr.Column(variant="compact"):
|
431 |
+
w.ad_controlnet_guidance_start = gr.Slider(
|
432 |
+
label="ControlNet guidance start" + suffix(n),
|
433 |
+
minimum=0.0,
|
434 |
+
maximum=1.0,
|
435 |
+
step=0.01,
|
436 |
+
value=0.0,
|
437 |
+
visible=True,
|
438 |
+
interactive=controlnet_exists,
|
439 |
+
elem_id=eid("ad_controlnet_guidance_start"),
|
440 |
+
)
|
441 |
+
|
442 |
+
w.ad_controlnet_guidance_end = gr.Slider(
|
443 |
+
label="ControlNet guidance end" + suffix(n),
|
444 |
+
minimum=0.0,
|
445 |
+
maximum=1.0,
|
446 |
+
step=0.01,
|
447 |
+
value=1.0,
|
448 |
+
visible=True,
|
449 |
+
interactive=controlnet_exists,
|
450 |
+
elem_id=eid("ad_controlnet_guidance_end"),
|
451 |
+
)
|
extensions/adetailer/adetailer/ultralytics.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import cv2
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
from adetailer import PredictOutput
|
9 |
+
from adetailer.common import create_mask_from_bbox
|
10 |
+
|
11 |
+
|
12 |
+
def ultralytics_predict(
|
13 |
+
model_path: str | Path,
|
14 |
+
image: Image.Image,
|
15 |
+
confidence: float = 0.3,
|
16 |
+
device: str = "",
|
17 |
+
) -> PredictOutput:
|
18 |
+
from ultralytics import YOLO
|
19 |
+
|
20 |
+
model_path = str(model_path)
|
21 |
+
|
22 |
+
model = YOLO(model_path)
|
23 |
+
pred = model(image, conf=confidence, device=device)
|
24 |
+
|
25 |
+
bboxes = pred[0].boxes.xyxy.cpu().numpy()
|
26 |
+
if bboxes.size == 0:
|
27 |
+
return PredictOutput()
|
28 |
+
bboxes = bboxes.tolist()
|
29 |
+
|
30 |
+
if pred[0].masks is None:
|
31 |
+
masks = create_mask_from_bbox(bboxes, image.size)
|
32 |
+
else:
|
33 |
+
masks = mask_to_pil(pred[0].masks.data, image.size)
|
34 |
+
preview = pred[0].plot()
|
35 |
+
preview = cv2.cvtColor(preview, cv2.COLOR_BGR2RGB)
|
36 |
+
preview = Image.fromarray(preview)
|
37 |
+
|
38 |
+
return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
|
39 |
+
|
40 |
+
|
41 |
+
def mask_to_pil(masks, shape: tuple[int, int]) -> list[Image.Image]:
|
42 |
+
"""
|
43 |
+
Parameters
|
44 |
+
----------
|
45 |
+
masks: torch.Tensor, dtype=torch.float32, shape=(N, H, W).
|
46 |
+
The device can be CUDA, but `to_pil_image` takes care of that.
|
47 |
+
|
48 |
+
shape: tuple[int, int]
|
49 |
+
(width, height) of the original image
|
50 |
+
"""
|
51 |
+
from torchvision.transforms.functional import to_pil_image
|
52 |
+
|
53 |
+
n = masks.shape[0]
|
54 |
+
return [to_pil_image(masks[i], mode="L").resize(shape) for i in range(n)]
|
extensions/adetailer/controlnet_ext/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .controlnet_ext import ControlNetExt, controlnet_exists, get_cn_models
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"ControlNetExt",
|
5 |
+
"controlnet_exists",
|
6 |
+
"get_cn_models",
|
7 |
+
]
|
extensions/adetailer/controlnet_ext/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (287 Bytes). View file
|
|
extensions/adetailer/controlnet_ext/__pycache__/controlnet_ext.cpython-310.pyc
ADDED
Binary file (4.02 kB). View file
|
|
extensions/adetailer/controlnet_ext/__pycache__/restore.cpython-310.pyc
ADDED
Binary file (1.76 kB). View file
|
|
extensions/adetailer/controlnet_ext/controlnet_ext.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import importlib
|
4 |
+
import re
|
5 |
+
from functools import lru_cache
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
from modules import extensions, sd_models, shared
|
9 |
+
from modules.paths import data_path, models_path, script_path
|
10 |
+
|
11 |
+
ext_path = Path(data_path, "extensions")
|
12 |
+
ext_builtin_path = Path(script_path, "extensions-builtin")
|
13 |
+
controlnet_exists = False
|
14 |
+
controlnet_path = None
|
15 |
+
cn_base_path = ""
|
16 |
+
|
17 |
+
for extension in extensions.active():
|
18 |
+
if not extension.enabled:
|
19 |
+
continue
|
20 |
+
# For cases like sd-webui-controlnet-master
|
21 |
+
if "sd-webui-controlnet" in extension.name:
|
22 |
+
controlnet_exists = True
|
23 |
+
controlnet_path = Path(extension.path)
|
24 |
+
cn_base_path = ".".join(controlnet_path.parts[-2:])
|
25 |
+
break
|
26 |
+
|
27 |
+
cn_model_module = {
|
28 |
+
"inpaint": "inpaint_global_harmonious",
|
29 |
+
"scribble": "t2ia_sketch_pidi",
|
30 |
+
"lineart": "lineart_coarse",
|
31 |
+
"openpose": "openpose_full",
|
32 |
+
"tile": None,
|
33 |
+
}
|
34 |
+
cn_model_regex = re.compile("|".join(cn_model_module.keys()))
|
35 |
+
|
36 |
+
|
37 |
+
class ControlNetExt:
|
38 |
+
def __init__(self):
|
39 |
+
self.cn_models = ["None"]
|
40 |
+
self.cn_available = False
|
41 |
+
self.external_cn = None
|
42 |
+
|
43 |
+
def init_controlnet(self):
|
44 |
+
import_path = cn_base_path + ".scripts.external_code"
|
45 |
+
|
46 |
+
self.external_cn = importlib.import_module(import_path, "external_code")
|
47 |
+
self.cn_available = True
|
48 |
+
models = self.external_cn.get_models()
|
49 |
+
self.cn_models.extend(m for m in models if cn_model_regex.search(m))
|
50 |
+
|
51 |
+
def update_scripts_args(
|
52 |
+
self, p, model: str, weight: float, guidance_start: float, guidance_end: float
|
53 |
+
):
|
54 |
+
if (not self.cn_available) or model == "None":
|
55 |
+
return
|
56 |
+
|
57 |
+
module = None
|
58 |
+
for m, v in cn_model_module.items():
|
59 |
+
if m in model:
|
60 |
+
module = v
|
61 |
+
break
|
62 |
+
|
63 |
+
cn_units = [
|
64 |
+
self.external_cn.ControlNetUnit(
|
65 |
+
model=model,
|
66 |
+
weight=weight,
|
67 |
+
control_mode=self.external_cn.ControlMode.BALANCED,
|
68 |
+
module=module,
|
69 |
+
guidance_start=guidance_start,
|
70 |
+
guidance_end=guidance_end,
|
71 |
+
pixel_perfect=True,
|
72 |
+
)
|
73 |
+
]
|
74 |
+
|
75 |
+
self.external_cn.update_cn_script_in_processing(p, cn_units)
|
76 |
+
|
77 |
+
|
78 |
+
def get_cn_model_dirs() -> list[Path]:
|
79 |
+
cn_model_dir = Path(models_path, "ControlNet")
|
80 |
+
if controlnet_path is not None:
|
81 |
+
cn_model_dir_old = controlnet_path.joinpath("models")
|
82 |
+
else:
|
83 |
+
cn_model_dir_old = None
|
84 |
+
ext_dir1 = shared.opts.data.get("control_net_models_path", "")
|
85 |
+
ext_dir2 = shared.opts.data.get("controlnet_dir", "")
|
86 |
+
|
87 |
+
dirs = [cn_model_dir]
|
88 |
+
for ext_dir in [cn_model_dir_old, ext_dir1, ext_dir2]:
|
89 |
+
if ext_dir:
|
90 |
+
dirs.append(Path(ext_dir))
|
91 |
+
|
92 |
+
return dirs
|
93 |
+
|
94 |
+
|
95 |
+
@lru_cache
|
96 |
+
def _get_cn_models() -> list[str]:
|
97 |
+
"""
|
98 |
+
Since we can't import ControlNet, we use a function that does something like
|
99 |
+
controlnet's `list(global_state.cn_models_names.values())`.
|
100 |
+
"""
|
101 |
+
cn_model_exts = (".pt", ".pth", ".ckpt", ".safetensors")
|
102 |
+
dirs = get_cn_model_dirs()
|
103 |
+
name_filter = shared.opts.data.get("control_net_models_name_filter", "")
|
104 |
+
name_filter = name_filter.strip(" ").lower()
|
105 |
+
|
106 |
+
model_paths = []
|
107 |
+
|
108 |
+
for base in dirs:
|
109 |
+
if not base.exists():
|
110 |
+
continue
|
111 |
+
|
112 |
+
for p in base.rglob("*"):
|
113 |
+
if (
|
114 |
+
p.is_file()
|
115 |
+
and p.suffix in cn_model_exts
|
116 |
+
and cn_model_regex.search(p.name)
|
117 |
+
):
|
118 |
+
if name_filter and name_filter not in p.name.lower():
|
119 |
+
continue
|
120 |
+
model_paths.append(p)
|
121 |
+
model_paths.sort(key=lambda p: p.name)
|
122 |
+
|
123 |
+
models = []
|
124 |
+
for p in model_paths:
|
125 |
+
model_hash = sd_models.model_hash(p)
|
126 |
+
name = f"{p.stem} [{model_hash}]"
|
127 |
+
models.append(name)
|
128 |
+
return models
|
129 |
+
|
130 |
+
|
131 |
+
def get_cn_models() -> list[str]:
|
132 |
+
if controlnet_exists:
|
133 |
+
return _get_cn_models()
|
134 |
+
return []
|