34d8b59e470cb8bb29febe9216fd3aad5fab95fd4ca35aa41b47b4eb754cdc8b
Browse files- extensions/microsoftexcel-supermerger/elemental_en.md +118 -0
- extensions/microsoftexcel-supermerger/elemental_ja.md +119 -0
- extensions/microsoftexcel-supermerger/install.py +7 -0
- extensions/microsoftexcel-supermerger/sample.txt +95 -0
- extensions/microsoftexcel-supermerger/scripts/__pycache__/supermerger.cpython-310.pyc +0 -0
- extensions/microsoftexcel-supermerger/scripts/mbwpresets.txt +39 -0
- extensions/microsoftexcel-supermerger/scripts/mbwpresets_master.txt +39 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/mergers.cpython-310.pyc +0 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/model_util.cpython-310.pyc +0 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/pluslora.cpython-310.pyc +0 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/xyplot.cpython-310.pyc +0 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/mergers.py +699 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/model_util.py +928 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/pluslora.py +1298 -0
- extensions/microsoftexcel-supermerger/scripts/mergers/xyplot.py +513 -0
- extensions/microsoftexcel-supermerger/scripts/supermerger.py +552 -0
- extensions/microsoftexcel-tunnels/.gitignore +176 -0
- extensions/microsoftexcel-tunnels/.pre-commit-config.yaml +25 -0
- extensions/microsoftexcel-tunnels/LICENSE.md +22 -0
- extensions/microsoftexcel-tunnels/README.md +21 -0
- extensions/microsoftexcel-tunnels/__pycache__/preload.cpython-310.pyc +0 -0
- extensions/microsoftexcel-tunnels/install.py +4 -0
- extensions/microsoftexcel-tunnels/preload.py +21 -0
- extensions/microsoftexcel-tunnels/pyproject.toml +25 -0
- extensions/microsoftexcel-tunnels/scripts/__pycache__/ssh_tunnel.cpython-310.pyc +0 -0
- extensions/microsoftexcel-tunnels/scripts/__pycache__/try_cloudflare.cpython-310.pyc +0 -0
- extensions/microsoftexcel-tunnels/scripts/ssh_tunnel.py +81 -0
- extensions/microsoftexcel-tunnels/scripts/try_cloudflare.py +15 -0
- extensions/microsoftexcel-tunnels/ssh_tunnel.py +86 -0
- extensions/put extensions here.txt +0 -0
- extensions/sd-webui-lora-block-weight/README.md +350 -0
- extensions/sd-webui-lora-block-weight/scripts/Roboto-Regular.ttf +0 -0
- extensions/sd-webui-lora-block-weight/scripts/__pycache__/lora_block_weight.cpython-310.pyc +0 -0
- extensions/sd-webui-lora-block-weight/scripts/elempresets.txt +7 -0
- extensions/sd-webui-lora-block-weight/scripts/lbwpresets.txt +10 -0
- extensions/sd-webui-lora-block-weight/scripts/lora_block_weight.py +744 -0
- extensions/stable-diffusion-webui-composable-lora/.gitignore +129 -0
- extensions/stable-diffusion-webui-composable-lora/LICENSE +21 -0
- extensions/stable-diffusion-webui-composable-lora/README.md +25 -0
- extensions/stable-diffusion-webui-composable-lora/__pycache__/composable_lora.cpython-310.pyc +0 -0
- extensions/stable-diffusion-webui-composable-lora/composable_lora.py +165 -0
- extensions/stable-diffusion-webui-composable-lora/scripts/__pycache__/composable_lora_script.cpython-310.pyc +0 -0
- extensions/stable-diffusion-webui-composable-lora/scripts/composable_lora_script.py +57 -0
- extensions/stable-diffusion-webui-images-browser/.gitignore +6 -0
- extensions/stable-diffusion-webui-images-browser/README.md +61 -0
- extensions/stable-diffusion-webui-images-browser/install.py +9 -0
- extensions/stable-diffusion-webui-images-browser/javascript/image_browser.js +770 -0
- extensions/stable-diffusion-webui-images-browser/req_IR.txt +1 -0
- extensions/stable-diffusion-webui-images-browser/scripts/__pycache__/image_browser.cpython-310.pyc +0 -0
- extensions/stable-diffusion-webui-images-browser/scripts/image_browser.py +1717 -0
extensions/microsoftexcel-supermerger/elemental_en.md
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Elemental Merge
|
2 |
+
- This is a block-by-block merge that goes beyond block-by-block merge.
|
3 |
+
|
4 |
+
In a block-by-block merge, the merge ratio can be changed for each of the 25 blocks, but a blocks also consists of multiple elements, and in principle it is possible to change the ratio for each element. It is possible, but the number of elements is more than 600, and it was doubtful whether it could be handled by human hands, but we tried to implement it. I do not recommend merging elements by element out of the blue. It is recommended to use it as a final adjustment when a problem that cannot be solved by block-by-block merging.
|
5 |
+
The following images show the result of changing the elements in the OUT05 layer. The leftmost one is without merging, the second one is all the OUT05 layers (i.e., normal block-by-block merging), and the rest are element merging. As shown in the table below, there are several more elements in attn2, etc.
|
6 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample1.jpg)
|
7 |
+
## Usage
|
8 |
+
Note that elemental merging is effective for both normal and block-by-block merging, and is computed last, so it will overwrite values specified for block-by-block merging.
|
9 |
+
|
10 |
+
Set in Elemental Merge. Note that if text is set here, it will be automatically adapted. Each element is listed in the table below, but it is not necessary to enter the full name of each element.
|
11 |
+
You can check to see if the effect is properly applied by activating "print change" check. If this check is enabled, the applied elements will be displayed on the command prompt screen during the merge.
|
12 |
+
|
13 |
+
### Format
|
14 |
+
Bloks:Element:Ratio, Bloks:Element:Ratio,...
|
15 |
+
or
|
16 |
+
Bloks:Element:Ratio
|
17 |
+
Bloks:Element:Ratio
|
18 |
+
Bloks:Element:Ratio
|
19 |
+
|
20 |
+
Multiple specifications can be specified by separating them with commas or newlines. Commas and newlines may be mixed.
|
21 |
+
Bloks can be specified in uppercase from BASE,IN00-M00-OUT11. If left blank, all Bloks will be applied. Multiple Bloks can be specified by separating them with a space.
|
22 |
+
Similarly, multiple elements can be specified by separating them with a space.
|
23 |
+
Partial matching is used, so for example, typing "attn" will change both attn1 and attn2, and typing "attn2" will change only attn2. If you want to specify more details, enter "attn2.to_out" and so on.
|
24 |
+
|
25 |
+
OUT03 OUT04 OUT05:attn2 attn1.to_out:0.5
|
26 |
+
|
27 |
+
the ratio of elements containing attn2 and attn1.to_out in the OUT03, OUT04 and OUT05 layers will be 0.5.
|
28 |
+
If the element column is left blank, all elements in the specified Blocks will change, and the effect will be the same as a block-by-block merge.
|
29 |
+
If there are duplicate specifications, the one entered later takes precedence.
|
30 |
+
|
31 |
+
OUT06:attn:0.5,OUT06:attn2.to_k:0.2
|
32 |
+
|
33 |
+
is entered, attn other than attn2.to_k in the OUT06 layer will be 0.5, and only attn2.to_k will be 0.2.
|
34 |
+
|
35 |
+
You can invert the effect by first entering NOT.
|
36 |
+
This can be set by Blocks and Element.
|
37 |
+
|
38 |
+
NOT OUT04:attn:1
|
39 |
+
|
40 |
+
will set the ratio 1 to the attn of all Blocks except the OUT04 layer.
|
41 |
+
|
42 |
+
OUT05:NOT attn proj:0.2
|
43 |
+
|
44 |
+
will set all Blocks except attn and proj in the OUT05 layer to 0.2.
|
45 |
+
|
46 |
+
## XY plot
|
47 |
+
Several XY plots for elemental merge are available. Input examples can be found in sample.txt.
|
48 |
+
#### elemental
|
49 |
+
Creates XY plots for multiple elemental merges. Elements should be separated from each other by blank lines.
|
50 |
+
The following image is the result of executing sample1 of sample.txt.
|
51 |
+
|
52 |
+
#### pinpoint element
|
53 |
+
Creates an XY plot with different values for a specific element. Do the same with elements as with Pinpoint Blocks, but specify alpha for the opposite axis. Separate elements with a new line or comma.
|
54 |
+
The following image shows the result of running sample 3 of sample.txt.
|
55 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample3.jpg)
|
56 |
+
|
57 |
+
#### effective elenemtal checker
|
58 |
+
Outputs the difference of each element's effective elenemtal checker. The gif.csv file will be created in the output folder under the ModelA and ModelB folders in the diff folder. If there are duplicate file names, rename and save the files, but it is recommended to rename the diff folder to an appropriate name because it is complicated when the number of files increases.
|
59 |
+
Separate the files with a new line or comma. Use alpha for the opposite axis and enter a single value. This is useful to see the effect of an element, but it is also possible to see the effect of a hierarchy by not specifying an element, so you may use it that way more often.
|
60 |
+
The following image shows the result of running sample5 of sample.txt.
|
61 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample5-1.jpg)
|
62 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample5-2.jpg)
|
63 |
+
### List of elements
|
64 |
+
Basically, it seems that attn is responsible for the face and clothing information. The IN07, OUT03, OUT04, and OUT05 layers seem to have a particularly strong influence. It does not seem to make sense to change the same element in multiple Blocks at the same time, since the degree of influence often differs depending on the Blocks.
|
65 |
+
No element exists where it is marked null.
|
66 |
+
|
67 |
+
||IN00|IN01|IN02|IN03|IN04|IN05|IN06|IN07|IN08|IN09|IN10|IN11|M00|M00|OUT00|OUT01|OUT02|OUT03|OUT04|OUT05|OUT06|OUT07|OUT08|OUT09|OUT10|OUT11
|
68 |
+
|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
|
69 |
+
op.bias|null|null|null||null|null||null|null||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null
|
70 |
+
op.weight|null|null|null||null|null||null|null||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null
|
71 |
+
emb_layers.1.bias|null|||null|||null|||null|null|||||||||||||||
|
72 |
+
emb_layers.1.weight|null|||null|||null|||null|null|||||||||||||||
|
73 |
+
in_layers.0.bias|null|||null|||null|||null|null|||||||||||||||
|
74 |
+
in_layers.0.weight|null|||null|||null|||null|null|||||||||||||||
|
75 |
+
in_layers.2.bias|null|||null|||null|||null|null|||||||||||||||
|
76 |
+
in_layers.2.weight|null|||null|||null|||null|null|||||||||||||||
|
77 |
+
out_layers.0.bias|null|||null|||null|||null|null|||||||||||||||
|
78 |
+
out_layers.0.weight|null|||null|||null|||null|null|||||||||||||||
|
79 |
+
out_layers.3.bias|null|||null|||null|||null|null|||||||||||||||
|
80 |
+
out_layers.3.weight|null|||null|||null|||null|null|||||||||||||||
|
81 |
+
skip_connection.bias|null|null|null|null||null|null||null|null|null|null|null|null||||||||||||
|
82 |
+
skip_connection.weight|null|null|null|null||null|null||null|null|null|null|null|null||||||||||||
|
83 |
+
norm.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
84 |
+
norm.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
85 |
+
proj_in.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
86 |
+
proj_in.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
87 |
+
proj_out.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
88 |
+
proj_out.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
89 |
+
transformer_blocks.0.attn1.to_k.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
90 |
+
transformer_blocks.0.attn1.to_out.0.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
91 |
+
transformer_blocks.0.attn1.to_out.0.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
92 |
+
transformer_blocks.0.attn1.to_q.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
93 |
+
transformer_blocks.0.attn1.to_v.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
94 |
+
transformer_blocks.0.attn2.to_k.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
95 |
+
transformer_blocks.0.attn2.to_out.0.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
96 |
+
transformer_blocks.0.attn2.to_out.0.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
97 |
+
transformer_blocks.0.attn2.to_q.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
98 |
+
transformer_blocks.0.attn2.to_v.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
99 |
+
transformer_blocks.0.ff.net.0.proj.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
100 |
+
transformer_blocks.0.ff.net.0.proj.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
101 |
+
transformer_blocks.0.ff.net.2.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
102 |
+
transformer_blocks.0.ff.net.2.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
103 |
+
transformer_blocks.0.norm1.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
104 |
+
transformer_blocks.0.norm1.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
105 |
+
transformer_blocks.0.norm2.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
106 |
+
transformer_blocks.0.norm2.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
107 |
+
transformer_blocks.0.norm3.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
108 |
+
transformer_blocks.0.norm3.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
109 |
+
conv.bias|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null||null|null||null|null||null|null|null
|
110 |
+
conv.weight|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null||null|null||null|null||null|null|null
|
111 |
+
0.bias||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
112 |
+
0.weight||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
113 |
+
2.bias|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
114 |
+
2.weight|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
115 |
+
time_embed.0.weight||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
116 |
+
time_embed.0.bias||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
117 |
+
time_embed.2.weight||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
118 |
+
time_embed.2.bias||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
extensions/microsoftexcel-supermerger/elemental_ja.md
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Elemental Merge
|
2 |
+
- 階層マージを越えた階層マージです
|
3 |
+
|
4 |
+
階層マージでは25の階層ごとにマージ比率を変えることができますが、階層もまた複数の要素で構成されており、要素ごとに比率を変えることも原理的には可能です。可能ですが、要素の数は600以上にもなり人の手で扱えるのかは疑問でしたが実装してみました。いきなり要素ごとのマージは推奨されません。階層マージにおいて解決不可能な問題が生じたときに最終調節手段として使うことをおすすめします。
|
5 |
+
次の画像はOUT05層の要素を変えた結果です。左端はマージ無し。2番目はOUT05層すべて(つまりは普通の階層マージ),以降が要素マージです。下表のとおり、attn2などの中にはさらに複数の要素が含まれます。
|
6 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample1.jpg)
|
7 |
+
|
8 |
+
## 使い方
|
9 |
+
要素マージは通常マージ、階層マージ時どちらの場合でも有効で、最後に計算されるために、階層マージで指定した値は上書きされることに注意してください。
|
10 |
+
|
11 |
+
Elemental Mergeで設定します。ここにテキストが設定されていると自動的に適応されるので注意して下さい。各要素は下表のとおりですが、各要素のフルネームを入力する必要はありません。
|
12 |
+
ちゃんと効果が現れるかどうかはprint changeチェックを有効にすることで確認できます。このチェックを有効にするとマージ時にコマンドプロンプト画面に適用された要素が表示されます。
|
13 |
+
部分一致で指定が可能です。
|
14 |
+
### 書式
|
15 |
+
階層:要素:比率,階層:要素:比率,...
|
16 |
+
または
|
17 |
+
階層:要素:比率
|
18 |
+
階層:要素:比率
|
19 |
+
階層:要素:比率
|
20 |
+
|
21 |
+
カンマまたは改行で区切ることで複数の指定が可能です。カンマと改行は混在しても問題ありません。
|
22 |
+
階層は大文字でBASE,IN00-M00-OUT11まで指定でます。空欄にするとすべての階層に適用されます。スペースで区切ることで複数の階層を指定できます。
|
23 |
+
要素も同様でスペースで区切ることで複数の要素を指定できます。
|
24 |
+
部分一致で判断するので、例えば「attn」と入力するとattn1,attn2両方が変化します。「attn2」の場合はattn2のみ。さらに細かく指定したい場合は「attn2.to_out」などと入力します。
|
25 |
+
|
26 |
+
OUT03 OUT04 OUT05:attn2 attn1.to_out:0.5
|
27 |
+
|
28 |
+
と入力すると、OUT03,OUT04,OUT05層のattn2が含まれる要素及びattn1.to_outの比率が0.5になります。
|
29 |
+
要素の欄を空欄にすると指定階層のすべての要素が変わり、階層マージと同じ効果になります。
|
30 |
+
指定が重複する場合、後に入力された方が優先されます。
|
31 |
+
|
32 |
+
OUT06:attn:0.5,OUT06:attn2.to_k:0.2
|
33 |
+
|
34 |
+
と入力した場合、OUT06層のattn2.to_k以外のattnは0.5,attn2.to_kのみ0.2となります。
|
35 |
+
|
36 |
+
最初にNOTと入力することで効果範囲を反転させることができます。
|
37 |
+
これは階層・要素別に設定できます。
|
38 |
+
|
39 |
+
NOT OUT04:attn:1
|
40 |
+
|
41 |
+
と入力するとOUT04層以外の層のattnに比率1が設定されます。
|
42 |
+
|
43 |
+
OUT05:NOT attn proj:0.2
|
44 |
+
|
45 |
+
とすると、OUT05層のattnとproj以外の層が0.2になります。
|
46 |
+
|
47 |
+
## XY plot
|
48 |
+
elemental用のXY plotを複数用意しています。入力例はsample.txtにあります。
|
49 |
+
#### elemental
|
50 |
+
複数の要素マージについてXY plotを作成します。要素同士は空行で区切ってください。
|
51 |
+
トップ画像はsample.txtのsample1を実行した結果です。
|
52 |
+
|
53 |
+
#### pinpoint element
|
54 |
+
特定の要素について値を変えてXY plotを作成します。pinpoint Blocksと同じことを要素で行います。反対の軸にはalphaを指定してください。要素同士は改行またはカンマで区切ります。
|
55 |
+
以下の画像はsample.txtのsample3を実行した結果です。
|
56 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample3.jpg)
|
57 |
+
|
58 |
+
#### effective elenemtal checker
|
59 |
+
各要素の影響度を差分として出力します。オプションでanime gif、csvファイルを出力できます。gif.csvファイルはoutputフォルダにModelAとModelBから作られるフォルダ下に作成されるdiffフォルダに作成されます。ファイル名が重複する場合名前を変えて保存しますが、増えてくるとややこしいのでdiffフォルダを適当な名前に変えることをおすすめします。
|
60 |
+
改行またはカンマで区切ります。反対の軸はalphaを使用し、単一の値を入力してください。これは要素の効果を見るのにも有効ですが、要素を指定しないことで階層の効果を見ることも可能なので、そちらの使い方をする場合が多いかもしれません。
|
61 |
+
以下��画像はsample.txtのsample5を実行した結果です。
|
62 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample5-1.jpg)
|
63 |
+
![](https://raw.githubusercontent.com/hako-mikan/sd-webui-supermerger/images/sample5-2.jpg)
|
64 |
+
### 要素一覧
|
65 |
+
基本的にはattnが顔や服装の情報を担っているようです。特にIN07,OUT03,OUT04,OUT05層の影響度が強いようです。階層によって影響度が異なることが多いので複数の層の同じ要素を同時に変化させることは意味が無いように思えます。
|
66 |
+
nullと書かれた場所には要素が存在しません。
|
67 |
+
|
68 |
+
||IN00|IN01|IN02|IN03|IN04|IN05|IN06|IN07|IN08|IN09|IN10|IN11|M00|M00|OUT00|OUT01|OUT02|OUT03|OUT04|OUT05|OUT06|OUT07|OUT08|OUT09|OUT10|OUT11
|
69 |
+
|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
|
70 |
+
op.bias|null|null|null||null|null||null|null||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null
|
71 |
+
op.weight|null|null|null||null|null||null|null||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null
|
72 |
+
emb_layers.1.bias|null|||null|||null|||null|null|||||||||||||||
|
73 |
+
emb_layers.1.weight|null|||null|||null|||null|null|||||||||||||||
|
74 |
+
in_layers.0.bias|null|||null|||null|||null|null|||||||||||||||
|
75 |
+
in_layers.0.weight|null|||null|||null|||null|null|||||||||||||||
|
76 |
+
in_layers.2.bias|null|||null|||null|||null|null|||||||||||||||
|
77 |
+
in_layers.2.weight|null|||null|||null|||null|null|||||||||||||||
|
78 |
+
out_layers.0.bias|null|||null|||null|||null|null|||||||||||||||
|
79 |
+
out_layers.0.weight|null|||null|||null|||null|null|||||||||||||||
|
80 |
+
out_layers.3.bias|null|||null|||null|||null|null|||||||||||||||
|
81 |
+
out_layers.3.weight|null|||null|||null|||null|null|||||||||||||||
|
82 |
+
skip_connection.bias|null|null|null|null||null|null||null|null|null|null|null|null||||||||||||
|
83 |
+
skip_connection.weight|null|null|null|null||null|null||null|null|null|null|null|null||||||||||||
|
84 |
+
norm.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
85 |
+
norm.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
86 |
+
proj_in.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
87 |
+
proj_in.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
88 |
+
proj_out.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
89 |
+
proj_out.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
90 |
+
transformer_blocks.0.attn1.to_k.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
91 |
+
transformer_blocks.0.attn1.to_out.0.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
92 |
+
transformer_blocks.0.attn1.to_out.0.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
93 |
+
transformer_blocks.0.attn1.to_q.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
94 |
+
transformer_blocks.0.attn1.to_v.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
95 |
+
transformer_blocks.0.attn2.to_k.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
96 |
+
transformer_blocks.0.attn2.to_out.0.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
97 |
+
transformer_blocks.0.attn2.to_out.0.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
98 |
+
transformer_blocks.0.attn2.to_q.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
99 |
+
transformer_blocks.0.attn2.to_v.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
100 |
+
transformer_blocks.0.ff.net.0.proj.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
101 |
+
transformer_blocks.0.ff.net.0.proj.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
102 |
+
transformer_blocks.0.ff.net.2.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
103 |
+
transformer_blocks.0.ff.net.2.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
104 |
+
transformer_blocks.0.norm1.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
105 |
+
transformer_blocks.0.norm1.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
106 |
+
transformer_blocks.0.norm2.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
107 |
+
transformer_blocks.0.norm2.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
108 |
+
transformer_blocks.0.norm3.bias|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
109 |
+
transformer_blocks.0.norm3.weight|null|||null|||null|||null|null|null||null|null|null|null|||||||||
|
110 |
+
conv.bias|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null||null|null||null|null||null|null|null
|
111 |
+
conv.weight|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null||null|null||null|null||null|null|null
|
112 |
+
0.bias||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
113 |
+
0.weight||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
114 |
+
2.bias|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
115 |
+
2.weight|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
116 |
+
time_embed.0.weight||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
117 |
+
time_embed.0.bias||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
118 |
+
time_embed.2.weight||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
119 |
+
time_embed.2.bias||null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|null|
|
extensions/microsoftexcel-supermerger/install.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import launch
|
2 |
+
|
3 |
+
if not launch.is_installed("sklearn"):
|
4 |
+
launch.run_pip("install scikit-learn", "scikit-learn")
|
5 |
+
|
6 |
+
if not launch.is_installed("diffusers"):
|
7 |
+
launch.run_pip("install diffusers", "diffusers")
|
extensions/microsoftexcel-supermerger/sample.txt
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
eamples of XY plot
|
2 |
+
***************************************************************
|
3 |
+
for elemental
|
4 |
+
Each value is separated by a blank line/空行で区切ります
|
5 |
+
Element merge values are comma or newline delimited/要素マージの値はカンマまたは改行区切りです
|
6 |
+
Commas and newlines can also be mixed/カンマと改行の混在も可能です
|
7 |
+
You can insert no elemental by inserting two blank lines at the beginning/最初に空行をふたつ入れるとelementalのない状態を挿入できます
|
8 |
+
|
9 |
+
**sample1*******************************************************
|
10 |
+
|
11 |
+
|
12 |
+
OUT05::1
|
13 |
+
|
14 |
+
OUT05:layers:1
|
15 |
+
|
16 |
+
OUT05:attn1:1
|
17 |
+
|
18 |
+
OUT05:attn2:1
|
19 |
+
|
20 |
+
OUT05:ff.net:1
|
21 |
+
**sample2*******************************************************
|
22 |
+
|
23 |
+
|
24 |
+
IN07:attn1:0.5,IN07:attn2:0.2
|
25 |
+
OUT03:NOT attn1.to_k.weight:0.5
|
26 |
+
OUT03:attn1.to_k.weight:0.3
|
27 |
+
|
28 |
+
OUT04:NOT ff.net:0.5
|
29 |
+
OUT04:attn1.to_k.weight:0.3
|
30 |
+
OUT04:attn1.to_out.0.weight:0.3
|
31 |
+
|
32 |
+
OUT05:NOT ff.net:0.5
|
33 |
+
OUT05:attn1.to_k.weight:0.3
|
34 |
+
OUT05:attn1.to_out.0.weight:0.3
|
35 |
+
***************************************************************
|
36 |
+
***************************************************************
|
37 |
+
for pinpoint element
|
38 |
+
Each value is separated by comma or a blank line/カンマまたは改行で区切ります
|
39 |
+
do not enter ratio/ratioは入力しません
|
40 |
+
**sample3******************************************************
|
41 |
+
IN07:,IN07:layers,IN07:attn1,IN07:attn2,IN07:ff.net
|
42 |
+
**sample4******************************************************
|
43 |
+
OUT04:NOT attn2.to_q
|
44 |
+
OUT04:attn1.to_k.weight
|
45 |
+
OUT04:ff.net
|
46 |
+
OUT04:attn
|
47 |
+
***************************************************************
|
48 |
+
***************************************************************
|
49 |
+
for effective elemental cheker
|
50 |
+
|
51 |
+
Examine the effect of each block/各階層の影響度を調べる
|
52 |
+
Output results can be split by inserting "|"/途中「|」を挿入することで出力結果を分割できます。
|
53 |
+
**sample5*************************************************************
|
54 |
+
IN00:,IN01:,IN02:,IN03:,IN04:,IN05:,IN06:,IN07:,IN08:,IN09:,IN10:,IN11:,M00:|OUT00:,OUT01:,OUT02:,OUT03:,OUT04:,OUT05:,OUT06:,OUT07:,OUT08:,OUT09:,OUT10:,OUT11:
|
55 |
+
|
56 |
+
Examine the effect of all elements in the IN01/IN01層のすべての要素の影響度を調べる
|
57 |
+
Below that corresponds to IN02 or later/その下はIN02以降に対応
|
58 |
+
**sample6*************************************************************
|
59 |
+
IN01:emb_layers.1.bias,IN01:emb_layers.1.weight,IN01:in_layers.0.bias,IN01:in_layers.0.weight,IN01:in_layers.2.bias,IN01:in_layers.2.weight,IN01:out_layers.0.bias,IN01:out_layers.0.weight,IN01:out_layers.3.bias,IN01:out_layers.3.weight,IN01:skip_connection.bias,IN01:skip_connection.weight,IN01:norm.bias,IN01:norm.weight,IN01:proj_in.bias,IN01:proj_in.weight,IN01:proj_out.bias,IN01:proj_out.weight,IN01:transformer_blocks.0.attn1.to_k.weight,IN01:transformer_blocks.0.attn1.to_out.0.bias,IN01:transformer_blocks.0.attn1.to_out.0.weight,IN01:transformer_blocks.0.attn1.to_q.weight,IN01:transformer_blocks.0.attn1.to_v.weight,IN01:transformer_blocks.0.attn2.to_k.weight,IN01:transformer_blocks.0.attn2.to_out.0.bias,IN01:transformer_blocks.0.attn2.to_out.0.weight,IN01:transformer_blocks.0.attn2.to_q.weight,IN01:transformer_blocks.0.attn2.to_v.weight,IN01:transformer_blocks.0.ff.net.0.proj.bias,IN01:transformer_blocks.0.ff.net.0.proj.weight,IN01:transformer_blocks.0.ff.net.2.bias,IN01:transformer_blocks.0.ff.net.2.weight,IN01:transformer_blocks.0.norm1.bias,IN01:transformer_blocks.0.norm1.weight,IN01:transformer_blocks.0.norm2.bias,IN01:transformer_blocks.0.norm2.weight,IN01:transformer_blocks.0.norm3.bias,IN01:transformer_blocks.0.norm3.weight
|
60 |
+
|
61 |
+
IN02:emb_layers.1.bias,IN02:emb_layers.1.weight,IN02:in_layers.0.bias,IN02:in_layers.0.weight,IN02:in_layers.2.bias,IN02:in_layers.2.weight,IN02:out_layers.0.bias,IN02:out_layers.0.weight,IN02:out_layers.3.bias,IN02:out_layers.3.weight,IN02:skip_connection.bias,IN02:skip_connection.weight,IN02:norm.bias,IN02:norm.weight,IN02:proj_in.bias,IN02:proj_in.weight,IN02:proj_out.bias,IN02:proj_out.weight,IN02:transformer_blocks.0.attn1.to_k.weight,IN02:transformer_blocks.0.attn1.to_out.0.bias,IN02:transformer_blocks.0.attn1.to_out.0.weight,IN02:transformer_blocks.0.attn1.to_q.weight,IN02:transformer_blocks.0.attn1.to_v.weight,IN02:transformer_blocks.0.attn2.to_k.weight,IN02:transformer_blocks.0.attn2.to_out.0.bias,IN02:transformer_blocks.0.attn2.to_out.0.weight,IN02:transformer_blocks.0.attn2.to_q.weight,IN02:transformer_blocks.0.attn2.to_v.weight,IN02:transformer_blocks.0.ff.net.0.proj.bias,IN02:transformer_blocks.0.ff.net.0.proj.weight,IN02:transformer_blocks.0.ff.net.2.bias,IN02:transformer_blocks.0.ff.net.2.weight,IN02:transformer_blocks.0.norm1.bias,IN02:transformer_blocks.0.norm1.weight,IN02:transformer_blocks.0.norm2.bias,IN02:transformer_blocks.0.norm2.weight,IN02:transformer_blocks.0.norm3.bias,IN02:transformer_blocks.0.norm3.weight
|
62 |
+
|
63 |
+
IN00:bias,IN00:weight,IN03:op.bias,IN03:op.weight,IN06:op.bias,IN06:op.weight,IN09:op.bias,IN09:op.weight
|
64 |
+
|
65 |
+
IN04:emb_layers.1.bias,IN04:emb_layers.1.weight,IN04:in_layers.0.bias,IN04:in_layers.0.weight,IN04:in_layers.2.bias,IN04:in_layers.2.weight,IN04:out_layers.0.bias,IN04:out_layers.0.weight,IN04:out_layers.3.bias,IN04:out_layers.3.weight,IN04:skip_connection.bias,IN04:skip_connection.weight,IN04:norm.bias,IN04:norm.weight,IN04:proj_in.bias,IN04:proj_in.weight,IN04:proj_out.bias,IN04:proj_out.weight,IN04:transformer_blocks.0.attn1.to_k.weight,IN04:transformer_blocks.0.attn1.to_out.0.bias,IN04:transformer_blocks.0.attn1.to_out.0.weight,IN04:transformer_blocks.0.attn1.to_q.weight,IN04:transformer_blocks.0.attn1.to_v.weight,IN04:transformer_blocks.0.attn2.to_k.weight,IN04:transformer_blocks.0.attn2.to_out.0.bias,IN04:transformer_blocks.0.attn2.to_out.0.weight,IN04:transformer_blocks.0.attn2.to_q.weight,IN04:transformer_blocks.0.attn2.to_v.weight,IN04:transformer_blocks.0.ff.net.0.proj.bias,IN04:transformer_blocks.0.ff.net.0.proj.weight,IN04:transformer_blocks.0.ff.net.2.bias,IN04:transformer_blocks.0.ff.net.2.weight,IN04:transformer_blocks.0.norm1.bias,IN04:transformer_blocks.0.norm1.weight,IN04:transformer_blocks.0.norm2.bias,IN04:transformer_blocks.0.norm2.weight,IN04:transformer_blocks.0.norm3.bias,IN04:transformer_blocks.0.norm3.weight
|
66 |
+
|
67 |
+
IN05:emb_layers.1.bias,IN05:emb_layers.1.weight,IN05:in_layers.0.bias,IN05:in_layers.0.weight,IN05:in_layers.2.bias,IN05:in_layers.2.weight,IN05:out_layers.0.bias,IN05:out_layers.0.weight,IN05:out_layers.3.bias,IN05:out_layers.3.weight,IN05:skip_connection.bias,IN05:skip_connection.weight,IN05:norm.bias,IN05:norm.weight,IN05:proj_in.bias,IN05:proj_in.weight,IN05:proj_out.bias,IN05:proj_out.weight,IN05:transformer_blocks.0.attn1.to_k.weight,IN05:transformer_blocks.0.attn1.to_out.0.bias,IN05:transformer_blocks.0.attn1.to_out.0.weight,IN05:transformer_blocks.0.attn1.to_q.weight,IN05:transformer_blocks.0.attn1.to_v.weight,IN05:transformer_blocks.0.attn2.to_k.weight,IN05:transformer_blocks.0.attn2.to_out.0.bias,IN05:transformer_blocks.0.attn2.to_out.0.weight,IN05:transformer_blocks.0.attn2.to_q.weight,IN05:transformer_blocks.0.attn2.to_v.weight,IN05:transformer_blocks.0.ff.net.0.proj.bias,IN05:transformer_blocks.0.ff.net.0.proj.weight,IN05:transformer_blocks.0.ff.net.2.bias,IN05:transformer_blocks.0.ff.net.2.weight,IN05:transformer_blocks.0.norm1.bias,IN05:transformer_blocks.0.norm1.weight,IN05:transformer_blocks.0.norm2.bias,IN05:transformer_blocks.0.norm2.weight,IN05:transformer_blocks.0.norm3.bias,IN05:transformer_blocks.0.norm3.weight
|
68 |
+
|
69 |
+
IN07:emb_layers.1.bias,IN07:emb_layers.1.weight,IN07:in_layers.0.bias,IN07:in_layers.0.weight,IN07:in_layers.2.bias,IN07:in_layers.2.weight,IN07:out_layers.0.bias,IN07:out_layers.0.weight,IN07:out_layers.3.bias,IN07:out_layers.3.weight,IN07:skip_connection.bias,IN07:skip_connection.weight,IN07:norm.bias,IN07:norm.weight,IN07:proj_in.bias,IN07:proj_in.weight,IN07:proj_out.bias,IN07:proj_out.weight,IN07:transformer_blocks.0.attn1.to_k.weight,IN07:transformer_blocks.0.attn1.to_out.0.bias,IN07:transformer_blocks.0.attn1.to_out.0.weight,IN07:transformer_blocks.0.attn1.to_q.weight,IN07:transformer_blocks.0.attn1.to_v.weight,IN07:transformer_blocks.0.attn2.to_k.weight,IN07:transformer_blocks.0.attn2.to_out.0.bias,IN07:transformer_blocks.0.attn2.to_out.0.weight,IN07:transformer_blocks.0.attn2.to_q.weight,IN07:transformer_blocks.0.attn2.to_v.weight,IN07:transformer_blocks.0.ff.net.0.proj.bias,IN07:transformer_blocks.0.ff.net.0.proj.weight,IN07:transformer_blocks.0.ff.net.2.bias,IN07:transformer_blocks.0.ff.net.2.weight,IN07:transformer_blocks.0.norm1.bias,IN07:transformer_blocks.0.norm1.weight,IN07:transformer_blocks.0.norm2.bias,IN07:transformer_blocks.0.norm2.weight,IN07:transformer_blocks.0.norm3.bias,IN07:transformer_blocks.0.norm3.weight
|
70 |
+
|
71 |
+
IN08:emb_layers.1.bias,IN08:emb_layers.1.weight,IN08:in_layers.0.bias,IN08:in_layers.0.weight,IN08:in_layers.2.bias,IN08:in_layers.2.weight,IN08:out_layers.0.bias,IN08:out_layers.0.weight,IN08:out_layers.3.bias,IN08:out_layers.3.weight,IN08:skip_connection.bias,IN08:skip_connection.weight,IN08:norm.bias,IN08:norm.weight,IN08:proj_in.bias,IN08:proj_in.weight,IN08:proj_out.bias,IN08:proj_out.weight,IN08:transformer_blocks.0.attn1.to_k.weight,IN08:transformer_blocks.0.attn1.to_out.0.bias,IN08:transformer_blocks.0.attn1.to_out.0.weight,IN08:transformer_blocks.0.attn1.to_q.weight,IN08:transformer_blocks.0.attn1.to_v.weight,IN08:transformer_blocks.0.attn2.to_k.weight,IN08:transformer_blocks.0.attn2.to_out.0.bias,IN08:transformer_blocks.0.attn2.to_out.0.weight,IN08:transformer_blocks.0.attn2.to_q.weight,IN08:transformer_blocks.0.attn2.to_v.weight,IN08:transformer_blocks.0.ff.net.0.proj.bias,IN08:transformer_blocks.0.ff.net.0.proj.weight,IN08:transformer_blocks.0.ff.net.2.bias,IN08:transformer_blocks.0.ff.net.2.weight,IN08:transformer_blocks.0.norm1.bias,IN08:transformer_blocks.0.norm1.weight,IN08:transformer_blocks.0.norm2.bias,IN08:transformer_blocks.0.norm2.weight,IN08:transformer_blocks.0.norm3.bias,IN08:transformer_blocks.0.norm3.weight
|
72 |
+
|
73 |
+
IN10:emb_layers.1.bias,IN10:emb_layers.1.weight,IN10:in_layers.0.bias,IN10:in_layers.0.weight,IN10:in_layers.2.bias,IN10:in_layers.2.weight,IN10:out_layers.0.bias,IN10:out_layers.0.weight,IN10:out_layers.3.bias,IN10:out_layers.3.weight,IN11:emb_layers.1.bias,IN11:emb_layers.1.weight,IN11:in_layers.0.bias,IN11:in_layers.0.weight,IN11:in_layers.2.bias,IN11:in_layers.2.weight,IN11:out_layers.0.bias,IN11:out_layers.0.weight,IN11:out_layers.3.bias,IN11:out_layers.3.weight
|
74 |
+
|
75 |
+
M00:0.emb_layers.1.bias,M00:0.emb_layers.1.weight,M00:0.in_layers.0.bias,M00:0.in_layers.0.weight,M00:0.in_layers.2.bias,M00:0.in_layers.2.weight,M00:0.out_layers.0.bias,M00:0.out_layers.0.weight,M00:0.out_layers.3.bias,M00:0.out_layers.3.weight,M00:1.norm.bias,M00:1.norm.weight,M00:1.proj_in.bias,M00:1.proj_in.weight,M00:1.proj_out.bias,M00:1.proj_out.weight,M00:1.transformer_blocks.0.attn1.to_k.weight,M00:1.transformer_blocks.0.attn1.to_out.0.bias,M00:1.transformer_blocks.0.attn1.to_out.0.weight,M00:1.transformer_blocks.0.attn1.to_q.weight,M00:1.transformer_blocks.0.attn1.to_v.weight,M00:1.transformer_blocks.0.attn2.to_k.weight,M00:1.transformer_blocks.0.attn2.to_out.0.bias,M00:1.transformer_blocks.0.attn2.to_out.0.weight,M00:1.transformer_blocks.0.attn2.to_q.weight,M00:1.transformer_blocks.0.attn2.to_v.weight,M00:1.transformer_blocks.0.ff.net.0.proj.bias,M00:1.transformer_blocks.0.ff.net.0.proj.weight,M00:1.transformer_blocks.0.ff.net.2.bias,M00:1.transformer_blocks.0.ff.net.2.weight,M00:1.transformer_blocks.0.norm1.bias,M00:1.transformer_blocks.0.norm1.weight,M00:1.transformer_blocks.0.norm2.bias,M00:1.transformer_blocks.0.norm2.weight,M00:1.transformer_blocks.0.norm3.bias,M00:1.transformer_blocks.0.norm3.weight,M00:2.emb_layers.1.bias,M00:2.emb_layers.1.weight,M00:2.in_layers.0.bias,M00:2.in_layers.0.weight,M00:2.in_layers.2.bias,M00:2.in_layers.2.weight,M00:2.out_layers.0.bias,M00:2.out_layers.0.weight,M00:2.out_layers.3.bias,M00:2.out_layers.3.weight
|
76 |
+
|
77 |
+
OUT00:emb_layers.1.bias,OUT00:emb_layers.1.weight,OUT00:in_layers.0.bias,OUT00:in_layers.0.weight,OUT00:in_layers.2.bias,OUT00:in_layers.2.weight,OUT00:out_layers.0.bias,OUT00:out_layers.0.weight,OUT00:out_layers.3.bias,OUT00:out_layers.3.weight,OUT00:skip_connection.bias,OUT00:skip_connection.weight,OUT01:emb_layers.1.bias,OUT01:emb_layers.1.weight,OUT01:in_layers.0.bias,OUT01:in_layers.0.weight,OUT01:in_layers.2.bias,OUT01:in_layers.2.weight,OUT01:out_layers.0.bias,OUT01:out_layers.0.weight,OUT01:out_layers.3.bias,OUT01:out_layers.3.weight,OUT01:skip_connection.bias,OUT01:skip_connection.weight,OUT02:emb_layers.1.bias,OUT02:emb_layers.1.weight,OUT02:in_layers.0.bias,OUT02:in_layers.0.weight,OUT02:in_layers.2.bias,OUT02:in_layers.2.weight,OUT02:out_layers.0.bias,OUT02:out_layers.0.weight,OUT02:out_layers.3.bias,OUT02:out_layers.3.weight,OUT02:skip_connection.bias,OUT02:skip_connection.weight,OUT02:conv.bias,OUT02:conv.weight
|
78 |
+
|
79 |
+
OUT03:emb_layers.1.bias,OUT03:emb_layers.1.weight,OUT03:in_layers.0.bias,OUT03:in_layers.0.weight,OUT03:in_layers.2.bias,OUT03:in_layers.2.weight,OUT03:out_layers.0.bias,OUT03:out_layers.0.weight,OUT03:out_layers.3.bias,OUT03:out_layers.3.weight,OUT03:skip_connection.bias,OUT03:skip_connection.weight,OUT03:norm.bias,OUT03:norm.weight,OUT03:proj_in.bias,OUT03:proj_in.weight,OUT03:proj_out.bias,OUT03:proj_out.weight,OUT03:transformer_blocks.0.attn1.to_k.weight,OUT03:transformer_blocks.0.attn1.to_out.0.bias,OUT03:transformer_blocks.0.attn1.to_out.0.weight,OUT03:transformer_blocks.0.attn1.to_q.weight,OUT03:transformer_blocks.0.attn1.to_v.weight,OUT03:transformer_blocks.0.attn2.to_k.weight,OUT03:transformer_blocks.0.attn2.to_out.0.bias,OUT03:transformer_blocks.0.attn2.to_out.0.weight,OUT03:transformer_blocks.0.attn2.to_q.weight,OUT03:transformer_blocks.0.attn2.to_v.weight,OUT03:transformer_blocks.0.ff.net.0.proj.bias,OUT03:transformer_blocks.0.ff.net.0.proj.weight,OUT03:transformer_blocks.0.ff.net.2.bias,OUT03:transformer_blocks.0.ff.net.2.weight,OUT03:transformer_blocks.0.norm1.bias,OUT03:transformer_blocks.0.norm1.weight,OUT03:transformer_blocks.0.norm2.bias,OUT03:transformer_blocks.0.norm2.weight,OUT03:transformer_blocks.0.norm3.bias,OUT03:transformer_blocks.0.norm3.weight
|
80 |
+
|
81 |
+
OUT04:emb_layers.1.bias,OUT04:emb_layers.1.weight,OUT04:in_layers.0.bias,OUT04:in_layers.0.weight,OUT04:in_layers.2.bias,OUT04:in_layers.2.weight,OUT04:out_layers.0.bias,OUT04:out_layers.0.weight,OUT04:out_layers.3.bias,OUT04:out_layers.3.weight,OUT04:skip_connection.bias,OUT04:skip_connection.weight,OUT04:norm.bias,OUT04:norm.weight,OUT04:proj_in.bias,OUT04:proj_in.weight,OUT04:proj_out.bias,OUT04:proj_out.weight,OUT04:transformer_blocks.0.attn1.to_k.weight,OUT04:transformer_blocks.0.attn1.to_out.0.bias,OUT04:transformer_blocks.0.attn1.to_out.0.weight,OUT04:transformer_blocks.0.attn1.to_q.weight,OUT04:transformer_blocks.0.attn1.to_v.weight,OUT04:transformer_blocks.0.attn2.to_k.weight,OUT04:transformer_blocks.0.attn2.to_out.0.bias,OUT04:transformer_blocks.0.attn2.to_out.0.weight,OUT04:transformer_blocks.0.attn2.to_q.weight,OUT04:transformer_blocks.0.attn2.to_v.weight,OUT04:transformer_blocks.0.ff.net.0.proj.bias,OUT04:transformer_blocks.0.ff.net.0.proj.weight,OUT04:transformer_blocks.0.ff.net.2.bias,OUT04:transformer_blocks.0.ff.net.2.weight,OUT04:transformer_blocks.0.norm1.bias,OUT04:transformer_blocks.0.norm1.weight,OUT04:transformer_blocks.0.norm2.bias,OUT04:transformer_blocks.0.norm2.weight,OUT04:transformer_blocks.0.norm3.bias,OUT04:transformer_blocks.0.norm3.weight
|
82 |
+
|
83 |
+
OUT05:emb_layers.1.bias,OUT05:emb_layers.1.weight,OUT05:in_layers.0.bias,OUT05:in_layers.0.weight,OUT05:in_layers.2.bias,OUT05:in_layers.2.weight,OUT05:out_layers.0.bias,OUT05:out_layers.0.weight,OUT05:out_layers.3.bias,OUT05:out_layers.3.weight,OUT05:skip_connection.bias,OUT05:skip_connection.weight,OUT05:norm.bias,OUT05:norm.weight,OUT05:proj_in.bias,OUT05:proj_in.weight,OUT05:proj_out.bias,OUT05:proj_out.weight,OUT05:transformer_blocks.0.attn1.to_k.weight,OUT05:transformer_blocks.0.attn1.to_out.0.bias,OUT05:transformer_blocks.0.attn1.to_out.0.weight,OUT05:transformer_blocks.0.attn1.to_q.weight,OUT05:transformer_blocks.0.attn1.to_v.weight,OUT05:transformer_blocks.0.attn2.to_k.weight,OUT05:transformer_blocks.0.attn2.to_out.0.bias,OUT05:transformer_blocks.0.attn2.to_out.0.weight,OUT05:transformer_blocks.0.attn2.to_q.weight,OUT05:transformer_blocks.0.attn2.to_v.weight,OUT05:transformer_blocks.0.ff.net.0.proj.bias,OUT05:transformer_blocks.0.ff.net.0.proj.weight,OUT05:transformer_blocks.0.ff.net.2.bias,OUT05:transformer_blocks.0.ff.net.2.weight,OUT05:transformer_blocks.0.norm1.bias,OUT05:transformer_blocks.0.norm1.weight,OUT05:transformer_blocks.0.norm2.bias,OUT05:transformer_blocks.0.norm2.weight,OUT05:transformer_blocks.0.norm3.bias,OUT05:transformer_blocks.0.norm3.weight,OUT05:conv.bias,OUT05:conv.weight
|
84 |
+
|
85 |
+
,OUT06:emb_layers.1.bias,OUT06:emb_layers.1.weight,OUT06:in_layers.0.bias,OUT06:in_layers.0.weight,OUT06:in_layers.2.bias,OUT06:in_layers.2.weight,OUT06:out_layers.0.bias,OUT06:out_layers.0.weight,OUT06:out_layers.3.bias,OUT06:out_layers.3.weight,OUT06:skip_connection.bias,OUT06:skip_connection.weight,OUT06:norm.bias,OUT06:norm.weight,OUT06:proj_in.bias,OUT06:proj_in.weight,OUT06:proj_out.bias,OUT06:proj_out.weight,OUT06:transformer_blocks.0.attn1.to_k.weight,OUT06:transformer_blocks.0.attn1.to_out.0.bias,OUT06:transformer_blocks.0.attn1.to_out.0.weight,OUT06:transformer_blocks.0.attn1.to_q.weight,OUT06:transformer_blocks.0.attn1.to_v.weight,OUT06:transformer_blocks.0.attn2.to_k.weight,OUT06:transformer_blocks.0.attn2.to_out.0.bias,OUT06:transformer_blocks.0.attn2.to_out.0.weight,OUT06:transformer_blocks.0.attn2.to_q.weight,OUT06:transformer_blocks.0.attn2.to_v.weight,OUT06:transformer_blocks.0.ff.net.0.proj.bias,OUT06:transformer_blocks.0.ff.net.0.proj.weight,OUT06:transformer_blocks.0.ff.net.2.bias,OUT06:transformer_blocks.0.ff.net.2.weight,OUT06:transformer_blocks.0.norm1.bias,OUT06:transformer_blocks.0.norm1.weight,OUT06:transformer_blocks.0.norm2.bias,OUT06:transformer_blocks.0.norm2.weight,OUT06:transformer_blocks.0.norm3.bias,OUT06:transformer_blocks.0.norm3.weight
|
86 |
+
|
87 |
+
OUT07:emb_layers.1.bias,OUT07:emb_layers.1.weight,OUT07:in_layers.0.bias,OUT07:in_layers.0.weight,OUT07:in_layers.2.bias,OUT07:in_layers.2.weight,OUT07:out_layers.0.bias,OUT07:out_layers.0.weight,OUT07:out_layers.3.bias,OUT07:out_layers.3.weight,OUT07:skip_connection.bias,OUT07:skip_connection.weight,OUT07:norm.bias,OUT07:norm.weight,OUT07:proj_in.bias,OUT07:proj_in.weight,OUT07:proj_out.bias,OUT07:proj_out.weight,OUT07:transformer_blocks.0.attn1.to_k.weight,OUT07:transformer_blocks.0.attn1.to_out.0.bias,OUT07:transformer_blocks.0.attn1.to_out.0.weight,OUT07:transformer_blocks.0.attn1.to_q.weight,OUT07:transformer_blocks.0.attn1.to_v.weight,OUT07:transformer_blocks.0.attn2.to_k.weight,OUT07:transformer_blocks.0.attn2.to_out.0.bias,OUT07:transformer_blocks.0.attn2.to_out.0.weight,OUT07:transformer_blocks.0.attn2.to_q.weight,OUT07:transformer_blocks.0.attn2.to_v.weight,OUT07:transformer_blocks.0.ff.net.0.proj.bias,OUT07:transformer_blocks.0.ff.net.0.proj.weight,OUT07:transformer_blocks.0.ff.net.2.bias,OUT07:transformer_blocks.0.ff.net.2.weight,OUT07:transformer_blocks.0.norm1.bias,OUT07:transformer_blocks.0.norm1.weight,OUT07:transformer_blocks.0.norm2.bias,OUT07:transformer_blocks.0.norm2.weight,OUT07:transformer_blocks.0.norm3.bias,OUT07:transformer_blocks.0.norm3.weight
|
88 |
+
|
89 |
+
,OUT08:emb_layers.1.bias,OUT08:emb_layers.1.weight,OUT08:in_layers.0.bias,OUT08:in_layers.0.weight,OUT08:in_layers.2.bias,OUT08:in_layers.2.weight,OUT08:out_layers.0.bias,OUT08:out_layers.0.weight,OUT08:out_layers.3.bias,OUT08:out_layers.3.weight,OUT08:skip_connection.bias,OUT08:skip_connection.weight,OUT08:norm.bias,OUT08:norm.weight,OUT08:proj_in.bias,OUT08:proj_in.weight,OUT08:proj_out.bias,OUT08:proj_out.weight,OUT08:transformer_blocks.0.attn1.to_k.weight,OUT08:transformer_blocks.0.attn1.to_out.0.bias,OUT08:transformer_blocks.0.attn1.to_out.0.weight,OUT08:transformer_blocks.0.attn1.to_q.weight,OUT08:transformer_blocks.0.attn1.to_v.weight,OUT08:transformer_blocks.0.attn2.to_k.weight,OUT08:transformer_blocks.0.attn2.to_out.0.bias,OUT08:transformer_blocks.0.attn2.to_out.0.weight,OUT08:transformer_blocks.0.attn2.to_q.weight,OUT08:transformer_blocks.0.attn2.to_v.weight,OUT08:transformer_blocks.0.ff.net.0.proj.bias,OUT08:transformer_blocks.0.ff.net.0.proj.weight,OUT08:transformer_blocks.0.ff.net.2.bias,OUT08:transformer_blocks.0.ff.net.2.weight,OUT08:transformer_blocks.0.norm1.bias,OUT08:transformer_blocks.0.norm1.weight,OUT08:transformer_blocks.0.norm2.bias,OUT08:transformer_blocks.0.norm2.weight,OUT08:transformer_blocks.0.norm3.bias,OUT08:transformer_blocks.0.norm3.weight,OUT08:conv.bias,OUT08:conv.weight
|
90 |
+
|
91 |
+
OUT09:emb_layers.1.bias,OUT09:emb_layers.1.weight,OUT09:in_layers.0.bias,OUT09:in_layers.0.weight,OUT09:in_layers.2.bias,OUT09:in_layers.2.weight,OUT09:out_layers.0.bias,OUT09:out_layers.0.weight,OUT09:out_layers.3.bias,OUT09:out_layers.3.weight,OUT09:skip_connection.bias,OUT09:skip_connection.weight,OUT09:norm.bias,OUT09:norm.weight,OUT09:proj_in.bias,OUT09:proj_in.weight,OUT09:proj_out.bias,OUT09:proj_out.weight,OUT09:transformer_blocks.0.attn1.to_k.weight,OUT09:transformer_blocks.0.attn1.to_out.0.bias,OUT09:transformer_blocks.0.attn1.to_out.0.weight,OUT09:transformer_blocks.0.attn1.to_q.weight,OUT09:transformer_blocks.0.attn1.to_v.weight,OUT09:transformer_blocks.0.attn2.to_k.weight,OUT09:transformer_blocks.0.attn2.to_out.0.bias,OUT09:transformer_blocks.0.attn2.to_out.0.weight,OUT09:transformer_blocks.0.attn2.to_q.weight,OUT09:transformer_blocks.0.attn2.to_v.weight,OUT09:transformer_blocks.0.ff.net.0.proj.bias,OUT09:transformer_blocks.0.ff.net.0.proj.weight,OUT09:transformer_blocks.0.ff.net.2.bias,OUT09:transformer_blocks.0.ff.net.2.weight,OUT09:transformer_blocks.0.norm1.bias,OUT09:transformer_blocks.0.norm1.weight,OUT09:transformer_blocks.0.norm2.bias,OUT09:transformer_blocks.0.norm2.weight,OUT09:transformer_blocks.0.norm3.bias,OUT09:transformer_blocks.0.norm3.weight
|
92 |
+
|
93 |
+
OUT10:emb_layers.1.bias,OUT10:emb_layers.1.weight,OUT10:in_layers.0.bias,OUT10:in_layers.0.weight,OUT10:in_layers.2.bias,OUT10:in_layers.2.weight,OUT10:out_layers.0.bias,OUT10:out_layers.0.weight,OUT10:out_layers.3.bias,OUT10:out_layers.3.weight,OUT10:skip_connection.bias,OUT10:skip_connection.weight,OUT10:norm.bias,OUT10:norm.weight,OUT10:proj_in.bias,OUT10:proj_in.weight,OUT10:proj_out.bias,OUT10:proj_out.weight,OUT10:transformer_blocks.0.attn1.to_k.weight,OUT10:transformer_blocks.0.attn1.to_out.0.bias,OUT10:transformer_blocks.0.attn1.to_out.0.weight,OUT10:transformer_blocks.0.attn1.to_q.weight,OUT10:transformer_blocks.0.attn1.to_v.weight,OUT10:transformer_blocks.0.attn2.to_k.weight,OUT10:transformer_blocks.0.attn2.to_out.0.bias,OUT10:transformer_blocks.0.attn2.to_out.0.weight,OUT10:transformer_blocks.0.attn2.to_q.weight,OUT10:transformer_blocks.0.attn2.to_v.weight,OUT10:transformer_blocks.0.ff.net.0.proj.bias,OUT10:transformer_blocks.0.ff.net.0.proj.weight,OUT10:transformer_blocks.0.ff.net.2.bias,OUT10:transformer_blocks.0.ff.net.2.weight,OUT10:transformer_blocks.0.norm1.bias,OUT10:transformer_blocks.0.norm1.weight,OUT10:transformer_blocks.0.norm2.bias,OUT10:transformer_blocks.0.norm2.weight,OUT10:transformer_blocks.0.norm3.bias,OUT10:transformer_blocks.0.norm3.weight
|
94 |
+
|
95 |
+
OUT11:emb_layers.1.bias,OUT11:emb_layers.1.weight,OUT11:in_layers.0.bias,OUT11:in_layers.0.weight,OUT11:in_layers.2.bias,OUT11:in_layers.2.weight,OUT11:out_layers.0.bias,OUT11:out_layers.0.weight,OUT11:out_layers.3.bias,OUT11:out_layers.3.weight,OUT11:skip_connection.bias,OUT11:skip_connection.weight,OUT11:norm.bias,OUT11:norm.weight,OUT11:proj_in.bias,OUT11:proj_in.weight,OUT11:proj_out.bias,OUT11:proj_out.weight,OUT11:transformer_blocks.0.attn1.to_k.weight,OUT11:transformer_blocks.0.attn1.to_out.0.bias,OUT11:transformer_blocks.0.attn1.to_out.0.weight,OUT11:transformer_blocks.0.attn1.to_q.weight,OUT11:transformer_blocks.0.attn1.to_v.weight,OUT11:transformer_blocks.0.attn2.to_k.weight,OUT11:transformer_blocks.0.attn2.to_out.0.bias,OUT11:transformer_blocks.0.attn2.to_out.0.weight,OUT11:transformer_blocks.0.attn2.to_q.weight,OUT11:transformer_blocks.0.attn2.to_v.weight,OUT11:transformer_blocks.0.ff.net.0.proj.bias,OUT11:transformer_blocks.0.ff.net.0.proj.weight,OUT11:transformer_blocks.0.ff.net.2.bias,OUT11:transformer_blocks.0.ff.net.2.weight,OUT11:transformer_blocks.0.norm1.bias,OUT11:transformer_blocks.0.norm1.weight,OUT11:transformer_blocks.0.norm2.bias,OUT11:transformer_blocks.0.norm2.weight,OUT11:transformer_blocks.0.norm3.bias,OUT11:transformer_blocks.0.norm3.weight,OUT11:0.bias,OUT11:0.weight,OUT11:2.bias,OUT11:2.weight
|
extensions/microsoftexcel-supermerger/scripts/__pycache__/supermerger.cpython-310.pyc
ADDED
Binary file (22.2 kB). View file
|
|
extensions/microsoftexcel-supermerger/scripts/mbwpresets.txt
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
preset_name preset_weights
|
2 |
+
GRAD_V 0,1,0.9166666667,0.8333333333,0.75,0.6666666667,0.5833333333,0.5,0.4166666667,0.3333333333,0.25,0.1666666667,0.0833333333,0,0.0833333333,0.1666666667,0.25,0.3333333333,0.4166666667,0.5,0.5833333333,0.6666666667,0.75,0.8333333333,0.9166666667,1.0
|
3 |
+
GRAD_A 0,0,0.0833333333,0.1666666667,0.25,0.3333333333,0.4166666667,0.5,0.5833333333,0.6666666667,0.75,0.8333333333,0.9166666667,1.0,0.9166666667,0.8333333333,0.75,0.6666666667,0.5833333333,0.5,0.4166666667,0.3333333333,0.25,0.1666666667,0.0833333333,0
|
4 |
+
FLAT_25 0,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25
|
5 |
+
FLAT_75 0,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75
|
6 |
+
WRAP08 0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1
|
7 |
+
WRAP12 0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1
|
8 |
+
WRAP14 0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1
|
9 |
+
WRAP16 0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1
|
10 |
+
MID12_50 0,0,0,0,0,0,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0,0,0,0,0,0
|
11 |
+
OUT07 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1
|
12 |
+
OUT12 0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1
|
13 |
+
OUT12_5 0,0,0,0,0,0,0,0,0,0,0,0,0,0.5,1,1,1,1,1,1,1,1,1,1,1,1
|
14 |
+
RING08_SOFT 0,0,0,0,0,0,0.5,1,1,1,0.5,0,0,0,0,0,0.5,1,1,1,0.5,0,0,0,0,0
|
15 |
+
RING08_5 0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0
|
16 |
+
RING10_5 0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0
|
17 |
+
RING10_3 0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0,0,0
|
18 |
+
SMOOTHSTEP 0,0,0.00506365740740741,0.0196759259259259,0.04296875,0.0740740740740741,0.112123842592593,0.15625,0.205584490740741,0.259259259259259,0.31640625,0.376157407407407,0.437644675925926,0.5,0.562355324074074,0.623842592592592,0.68359375,0.740740740740741,0.794415509259259,0.84375,0.887876157407408,0.925925925925926,0.95703125,0.980324074074074,0.994936342592593,1
|
19 |
+
REVERSE-SMOOTHSTEP 0,1,0.994936342592593,0.980324074074074,0.95703125,0.925925925925926,0.887876157407407,0.84375,0.794415509259259,0.740740740740741,0.68359375,0.623842592592593,0.562355324074074,0.5,0.437644675925926,0.376157407407408,0.31640625,0.259259259259259,0.205584490740741,0.15625,0.112123842592592,0.0740740740740742,0.0429687499999996,0.0196759259259258,0.00506365740740744,0
|
20 |
+
SMOOTHSTEP*2 0,0,0.0101273148148148,0.0393518518518519,0.0859375,0.148148148148148,0.224247685185185,0.3125,0.411168981481482,0.518518518518519,0.6328125,0.752314814814815,0.875289351851852,1.,0.875289351851852,0.752314814814815,0.6328125,0.518518518518519,0.411168981481481,0.3125,0.224247685185184,0.148148148148148,0.0859375,0.0393518518518512,0.0101273148148153,0
|
21 |
+
R_SMOOTHSTEP*2 0,1,0.989872685185185,0.960648148148148,0.9140625,0.851851851851852,0.775752314814815,0.6875,0.588831018518519,0.481481481481481,0.3671875,0.247685185185185,0.124710648148148,0.,0.124710648148148,0.247685185185185,0.3671875,0.481481481481481,0.588831018518519,0.6875,0.775752314814816,0.851851851851852,0.9140625,0.960648148148149,0.989872685185185,1
|
22 |
+
SMOOTHSTEP*3 0,0,0.0151909722222222,0.0590277777777778,0.12890625,0.222222222222222,0.336371527777778,0.46875,0.616753472222222,0.777777777777778,0.94921875,0.871527777777778,0.687065972222222,0.5,0.312934027777778,0.128472222222222,0.0507812500000004,0.222222222222222,0.383246527777778,0.53125,0.663628472222223,0.777777777777778,0.87109375,0.940972222222222,0.984809027777777,1
|
23 |
+
R_SMOOTHSTEP*3 0,1,0.984809027777778,0.940972222222222,0.87109375,0.777777777777778,0.663628472222222,0.53125,0.383246527777778,0.222222222222222,0.05078125,0.128472222222222,0.312934027777778,0.5,0.687065972222222,0.871527777777778,0.94921875,0.777777777777778,0.616753472222222,0.46875,0.336371527777777,0.222222222222222,0.12890625,0.0590277777777777,0.0151909722222232,0
|
24 |
+
SMOOTHSTEP*4 0,0,0.0202546296296296,0.0787037037037037,0.171875,0.296296296296296,0.44849537037037,0.625,0.822337962962963,0.962962962962963,0.734375,0.49537037037037,0.249421296296296,0.,0.249421296296296,0.495370370370371,0.734375000000001,0.962962962962963,0.822337962962962,0.625,0.448495370370369,0.296296296296297,0.171875,0.0787037037037024,0.0202546296296307,0
|
25 |
+
R_SMOOTHSTEP*4 0,1,0.97974537037037,0.921296296296296,0.828125,0.703703703703704,0.55150462962963,0.375,0.177662037037037,0.0370370370370372,0.265625,0.50462962962963,0.750578703703704,1.,0.750578703703704,0.504629629629629,0.265624999999999,0.0370370370370372,0.177662037037038,0.375,0.551504629629631,0.703703703703703,0.828125,0.921296296296298,0.979745370370369,1
|
26 |
+
SMOOTHSTEP/2 0,0,0.0196759259259259,0.0740740740740741,0.15625,0.259259259259259,0.376157407407407,0.5,0.623842592592593,0.740740740740741,0.84375,0.925925925925926,0.980324074074074,1.,0.980324074074074,0.925925925925926,0.84375,0.740740740740741,0.623842592592593,0.5,0.376157407407407,0.259259259259259,0.15625,0.0740740740740741,0.0196759259259259,0
|
27 |
+
R_SMOOTHSTEP/2 0,1,0.980324074074074,0.925925925925926,0.84375,0.740740740740741,0.623842592592593,0.5,0.376157407407407,0.259259259259259,0.15625,0.0740740740740742,0.0196759259259256,0.,0.0196759259259256,0.0740740740740742,0.15625,0.259259259259259,0.376157407407407,0.5,0.623842592592593,0.740740740740741,0.84375,0.925925925925926,0.980324074074074,1
|
28 |
+
SMOOTHSTEP/3 0,0,0.04296875,0.15625,0.31640625,0.5,0.68359375,0.84375,0.95703125,1.,0.95703125,0.84375,0.68359375,0.5,0.31640625,0.15625,0.04296875,0.,0.04296875,0.15625,0.31640625,0.5,0.68359375,0.84375,0.95703125,1
|
29 |
+
R_SMOOTHSTEP/3 0,1,0.95703125,0.84375,0.68359375,0.5,0.31640625,0.15625,0.04296875,0.,0.04296875,0.15625,0.31640625,0.5,0.68359375,0.84375,0.95703125,1.,0.95703125,0.84375,0.68359375,0.5,0.31640625,0.15625,0.04296875,0
|
30 |
+
SMOOTHSTEP/4 0,0,0.0740740740740741,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1.,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740741,0.,0.0740740740740741,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1.,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740741,0
|
31 |
+
R_SMOOTHSTEP/4 0,1,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740742,0.,0.0740740740740742,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1.,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740742,0.,0.0740740740740742,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1
|
32 |
+
COSINE 0,1,0.995722430686905,0.982962913144534,0.961939766255643,0.933012701892219,0.896676670145617,0.853553390593274,0.80438071450436,0.75,0.691341716182545,0.62940952255126,0.565263096110026,0.5,0.434736903889974,0.37059047744874,0.308658283817455,0.25,0.195619285495639,0.146446609406726,0.103323329854382,0.0669872981077805,0.0380602337443566,0.0170370868554658,0.00427756931309475,0
|
33 |
+
REVERSE_COSINE 0,0,0.00427756931309475,0.0170370868554659,0.0380602337443566,0.0669872981077808,0.103323329854383,0.146446609406726,0.19561928549564,0.25,0.308658283817455,0.37059047744874,0.434736903889974,0.5,0.565263096110026,0.62940952255126,0.691341716182545,0.75,0.804380714504361,0.853553390593274,0.896676670145618,0.933012701892219,0.961939766255643,0.982962913144534,0.995722430686905,1
|
34 |
+
TRUE_CUBIC_HERMITE 0,0,0.199031876929012,0.325761959876543,0.424641927083333,0.498456790123457,0.549991560570988,0.58203125,0.597360869984568,0.598765432098765,0.589029947916667,0.570939429012346,0.547278886959876,0.520833333333333,0.49438777970679,0.470727237654321,0.45263671875,0.442901234567901,0.444305796682099,0.459635416666667,0.491675106095678,0.543209876543211,0.617024739583333,0.715904706790124,0.842634789737655,1
|
35 |
+
TRUE_REVERSE_CUBIC_HERMITE 0,1,0.800968123070988,0.674238040123457,0.575358072916667,0.501543209876543,0.450008439429012,0.41796875,0.402639130015432,0.401234567901235,0.410970052083333,0.429060570987654,0.452721113040124,0.479166666666667,0.50561222029321,0.529272762345679,0.54736328125,0.557098765432099,0.555694203317901,0.540364583333333,0.508324893904322,0.456790123456789,0.382975260416667,0.284095293209876,0.157365210262345,0
|
36 |
+
FAKE_CUBIC_HERMITE 0,0,0.157576195987654,0.28491512345679,0.384765625,0.459876543209877,0.512996720679012,0.546875,0.564260223765432,0.567901234567901,0.560546875,0.544945987654321,0.523847415123457,0.5,0.476152584876543,0.455054012345679,0.439453125,0.432098765432099,0.435739776234568,0.453125,0.487003279320987,0.540123456790124,0.615234375,0.71508487654321,0.842423804012347,1
|
37 |
+
FAKE_REVERSE_CUBIC_HERMITE 0,1,0.842423804012346,0.71508487654321,0.615234375,0.540123456790123,0.487003279320988,0.453125,0.435739776234568,0.432098765432099,0.439453125,0.455054012345679,0.476152584876543,0.5,0.523847415123457,0.544945987654321,0.560546875,0.567901234567901,0.564260223765432,0.546875,0.512996720679013,0.459876543209876,0.384765625,0.28491512345679,0.157576195987653,0
|
38 |
+
ALL_A 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
39 |
+
ALL_B 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
|
extensions/microsoftexcel-supermerger/scripts/mbwpresets_master.txt
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
preset_name preset_weights
|
2 |
+
GRAD_V 0,1,0.9166666667,0.8333333333,0.75,0.6666666667,0.5833333333,0.5,0.4166666667,0.3333333333,0.25,0.1666666667,0.0833333333,0,0.0833333333,0.1666666667,0.25,0.3333333333,0.4166666667,0.5,0.5833333333,0.6666666667,0.75,0.8333333333,0.9166666667,1.0
|
3 |
+
GRAD_A 0,0,0.0833333333,0.1666666667,0.25,0.3333333333,0.4166666667,0.5,0.5833333333,0.6666666667,0.75,0.8333333333,0.9166666667,1.0,0.9166666667,0.8333333333,0.75,0.6666666667,0.5833333333,0.5,0.4166666667,0.3333333333,0.25,0.1666666667,0.0833333333,0
|
4 |
+
FLAT_25 0,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25,0.25
|
5 |
+
FLAT_75 0,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75,0.75
|
6 |
+
WRAP08 0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1
|
7 |
+
WRAP12 0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1
|
8 |
+
WRAP14 0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1
|
9 |
+
WRAP16 0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1
|
10 |
+
MID12_50 0,0,0,0,0,0,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0,0,0,0,0,0
|
11 |
+
OUT07 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1
|
12 |
+
OUT12 0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1
|
13 |
+
OUT12_5 0,0,0,0,0,0,0,0,0,0,0,0,0,0.5,1,1,1,1,1,1,1,1,1,1,1,1
|
14 |
+
RING08_SOFT 0,0,0,0,0,0,0.5,1,1,1,0.5,0,0,0,0,0,0.5,1,1,1,0.5,0,0,0,0,0
|
15 |
+
RING08_5 0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0
|
16 |
+
RING10_5 0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0
|
17 |
+
RING10_3 0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,0,0,0,0,0
|
18 |
+
SMOOTHSTEP 0,0,0.00506365740740741,0.0196759259259259,0.04296875,0.0740740740740741,0.112123842592593,0.15625,0.205584490740741,0.259259259259259,0.31640625,0.376157407407407,0.437644675925926,0.5,0.562355324074074,0.623842592592592,0.68359375,0.740740740740741,0.794415509259259,0.84375,0.887876157407408,0.925925925925926,0.95703125,0.980324074074074,0.994936342592593,1
|
19 |
+
REVERSE-SMOOTHSTEP 0,1,0.994936342592593,0.980324074074074,0.95703125,0.925925925925926,0.887876157407407,0.84375,0.794415509259259,0.740740740740741,0.68359375,0.623842592592593,0.562355324074074,0.5,0.437644675925926,0.376157407407408,0.31640625,0.259259259259259,0.205584490740741,0.15625,0.112123842592592,0.0740740740740742,0.0429687499999996,0.0196759259259258,0.00506365740740744,0
|
20 |
+
SMOOTHSTEP*2 0,0,0.0101273148148148,0.0393518518518519,0.0859375,0.148148148148148,0.224247685185185,0.3125,0.411168981481482,0.518518518518519,0.6328125,0.752314814814815,0.875289351851852,1.,0.875289351851852,0.752314814814815,0.6328125,0.518518518518519,0.411168981481481,0.3125,0.224247685185184,0.148148148148148,0.0859375,0.0393518518518512,0.0101273148148153,0
|
21 |
+
R_SMOOTHSTEP*2 0,1,0.989872685185185,0.960648148148148,0.9140625,0.851851851851852,0.775752314814815,0.6875,0.588831018518519,0.481481481481481,0.3671875,0.247685185185185,0.124710648148148,0.,0.124710648148148,0.247685185185185,0.3671875,0.481481481481481,0.588831018518519,0.6875,0.775752314814816,0.851851851851852,0.9140625,0.960648148148149,0.989872685185185,1
|
22 |
+
SMOOTHSTEP*3 0,0,0.0151909722222222,0.0590277777777778,0.12890625,0.222222222222222,0.336371527777778,0.46875,0.616753472222222,0.777777777777778,0.94921875,0.871527777777778,0.687065972222222,0.5,0.312934027777778,0.128472222222222,0.0507812500000004,0.222222222222222,0.383246527777778,0.53125,0.663628472222223,0.777777777777778,0.87109375,0.940972222222222,0.984809027777777,1
|
23 |
+
R_SMOOTHSTEP*3 0,1,0.984809027777778,0.940972222222222,0.87109375,0.777777777777778,0.663628472222222,0.53125,0.383246527777778,0.222222222222222,0.05078125,0.128472222222222,0.312934027777778,0.5,0.687065972222222,0.871527777777778,0.94921875,0.777777777777778,0.616753472222222,0.46875,0.336371527777777,0.222222222222222,0.12890625,0.0590277777777777,0.0151909722222232,0
|
24 |
+
SMOOTHSTEP*4 0,0,0.0202546296296296,0.0787037037037037,0.171875,0.296296296296296,0.44849537037037,0.625,0.822337962962963,0.962962962962963,0.734375,0.49537037037037,0.249421296296296,0.,0.249421296296296,0.495370370370371,0.734375000000001,0.962962962962963,0.822337962962962,0.625,0.448495370370369,0.296296296296297,0.171875,0.0787037037037024,0.0202546296296307,0
|
25 |
+
R_SMOOTHSTEP*4 0,1,0.97974537037037,0.921296296296296,0.828125,0.703703703703704,0.55150462962963,0.375,0.177662037037037,0.0370370370370372,0.265625,0.50462962962963,0.750578703703704,1.,0.750578703703704,0.504629629629629,0.265624999999999,0.0370370370370372,0.177662037037038,0.375,0.551504629629631,0.703703703703703,0.828125,0.921296296296298,0.979745370370369,1
|
26 |
+
SMOOTHSTEP/2 0,0,0.0196759259259259,0.0740740740740741,0.15625,0.259259259259259,0.376157407407407,0.5,0.623842592592593,0.740740740740741,0.84375,0.925925925925926,0.980324074074074,1.,0.980324074074074,0.925925925925926,0.84375,0.740740740740741,0.623842592592593,0.5,0.376157407407407,0.259259259259259,0.15625,0.0740740740740741,0.0196759259259259,0
|
27 |
+
R_SMOOTHSTEP/2 0,1,0.980324074074074,0.925925925925926,0.84375,0.740740740740741,0.623842592592593,0.5,0.376157407407407,0.259259259259259,0.15625,0.0740740740740742,0.0196759259259256,0.,0.0196759259259256,0.0740740740740742,0.15625,0.259259259259259,0.376157407407407,0.5,0.623842592592593,0.740740740740741,0.84375,0.925925925925926,0.980324074074074,1
|
28 |
+
SMOOTHSTEP/3 0,0,0.04296875,0.15625,0.31640625,0.5,0.68359375,0.84375,0.95703125,1.,0.95703125,0.84375,0.68359375,0.5,0.31640625,0.15625,0.04296875,0.,0.04296875,0.15625,0.31640625,0.5,0.68359375,0.84375,0.95703125,1
|
29 |
+
R_SMOOTHSTEP/3 0,1,0.95703125,0.84375,0.68359375,0.5,0.31640625,0.15625,0.04296875,0.,0.04296875,0.15625,0.31640625,0.5,0.68359375,0.84375,0.95703125,1.,0.95703125,0.84375,0.68359375,0.5,0.31640625,0.15625,0.04296875,0
|
30 |
+
SMOOTHSTEP/4 0,0,0.0740740740740741,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1.,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740741,0.,0.0740740740740741,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1.,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740741,0
|
31 |
+
R_SMOOTHSTEP/4 0,1,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740742,0.,0.0740740740740742,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1.,0.925925925925926,0.740740740740741,0.5,0.259259259259259,0.0740740740740742,0.,0.0740740740740742,0.259259259259259,0.5,0.740740740740741,0.925925925925926,1
|
32 |
+
COSINE 0,1,0.995722430686905,0.982962913144534,0.961939766255643,0.933012701892219,0.896676670145617,0.853553390593274,0.80438071450436,0.75,0.691341716182545,0.62940952255126,0.565263096110026,0.5,0.434736903889974,0.37059047744874,0.308658283817455,0.25,0.195619285495639,0.146446609406726,0.103323329854382,0.0669872981077805,0.0380602337443566,0.0170370868554658,0.00427756931309475,0
|
33 |
+
REVERSE_COSINE 0,0,0.00427756931309475,0.0170370868554659,0.0380602337443566,0.0669872981077808,0.103323329854383,0.146446609406726,0.19561928549564,0.25,0.308658283817455,0.37059047744874,0.434736903889974,0.5,0.565263096110026,0.62940952255126,0.691341716182545,0.75,0.804380714504361,0.853553390593274,0.896676670145618,0.933012701892219,0.961939766255643,0.982962913144534,0.995722430686905,1
|
34 |
+
TRUE_CUBIC_HERMITE 0,0,0.199031876929012,0.325761959876543,0.424641927083333,0.498456790123457,0.549991560570988,0.58203125,0.597360869984568,0.598765432098765,0.589029947916667,0.570939429012346,0.547278886959876,0.520833333333333,0.49438777970679,0.470727237654321,0.45263671875,0.442901234567901,0.444305796682099,0.459635416666667,0.491675106095678,0.543209876543211,0.617024739583333,0.715904706790124,0.842634789737655,1
|
35 |
+
TRUE_REVERSE_CUBIC_HERMITE 0,1,0.800968123070988,0.674238040123457,0.575358072916667,0.501543209876543,0.450008439429012,0.41796875,0.402639130015432,0.401234567901235,0.410970052083333,0.429060570987654,0.452721113040124,0.479166666666667,0.50561222029321,0.529272762345679,0.54736328125,0.557098765432099,0.555694203317901,0.540364583333333,0.508324893904322,0.456790123456789,0.382975260416667,0.284095293209876,0.157365210262345,0
|
36 |
+
FAKE_CUBIC_HERMITE 0,0,0.157576195987654,0.28491512345679,0.384765625,0.459876543209877,0.512996720679012,0.546875,0.564260223765432,0.567901234567901,0.560546875,0.544945987654321,0.523847415123457,0.5,0.476152584876543,0.455054012345679,0.439453125,0.432098765432099,0.435739776234568,0.453125,0.487003279320987,0.540123456790124,0.615234375,0.71508487654321,0.842423804012347,1
|
37 |
+
FAKE_REVERSE_CUBIC_HERMITE 0,1,0.842423804012346,0.71508487654321,0.615234375,0.540123456790123,0.487003279320988,0.453125,0.435739776234568,0.432098765432099,0.439453125,0.455054012345679,0.476152584876543,0.5,0.523847415123457,0.544945987654321,0.560546875,0.567901234567901,0.564260223765432,0.546875,0.512996720679013,0.459876543209876,0.384765625,0.28491512345679,0.157576195987653,0
|
38 |
+
ALL_A 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
39 |
+
ALL_B 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
|
extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/mergers.cpython-310.pyc
ADDED
Binary file (19.9 kB). View file
|
|
extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/model_util.cpython-310.pyc
ADDED
Binary file (24.9 kB). View file
|
|
extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/pluslora.cpython-310.pyc
ADDED
Binary file (34.5 kB). View file
|
|
extensions/microsoftexcel-supermerger/scripts/mergers/__pycache__/xyplot.cpython-310.pyc
ADDED
Binary file (15.2 kB). View file
|
|
extensions/microsoftexcel-supermerger/scripts/mergers/mergers.py
ADDED
@@ -0,0 +1,699 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from linecache import clearcache
|
2 |
+
|
3 |
+
import os
|
4 |
+
import gc
|
5 |
+
import numpy as np
|
6 |
+
import os.path
|
7 |
+
import re
|
8 |
+
import torch
|
9 |
+
import tqdm
|
10 |
+
import datetime
|
11 |
+
import csv
|
12 |
+
import json
|
13 |
+
import torch.nn as nn
|
14 |
+
import scipy.ndimage
|
15 |
+
from scipy.ndimage.filters import median_filter as filter
|
16 |
+
from PIL import Image, ImageFont, ImageDraw
|
17 |
+
from tqdm import tqdm
|
18 |
+
from modules import shared, processing, sd_models, images, sd_samplers,scripts
|
19 |
+
from modules.ui import plaintext_to_html
|
20 |
+
from modules.shared import opts
|
21 |
+
from modules.processing import create_infotext,Processed
|
22 |
+
from modules.sd_models import load_model,checkpoints_loaded
|
23 |
+
from scripts.mergers.model_util import usemodelgen,filenamecutter,savemodel
|
24 |
+
|
25 |
+
from inspect import currentframe
|
26 |
+
|
27 |
+
stopmerge = False
|
28 |
+
|
29 |
+
def freezemtime():
|
30 |
+
global stopmerge
|
31 |
+
stopmerge = True
|
32 |
+
|
33 |
+
mergedmodel=[]
|
34 |
+
TYPESEG = ["none","alpha","beta (if Triple or Twice is not selected,Twice automatically enable)","alpha and beta","seed", "mbw alpha","mbw beta","mbw alpha and beta", "model_A","model_B","model_C","pinpoint blocks (alpha or beta must be selected for another axis)","elemental","pinpoint element","effective elemental checker","tensors","calcmode","prompt"]
|
35 |
+
TYPES = ["none","alpha","beta","alpha and beta","seed", "mbw alpha ","mbw beta","mbw alpha and beta", "model_A","model_B","model_C","pinpoint blocks","elemental","pinpoint element","effective","tensor","calcmode","prompt"]
|
36 |
+
MODES=["Weight" ,"Add" ,"Triple","Twice"]
|
37 |
+
SAVEMODES=["save model", "overwrite"]
|
38 |
+
#type[0:aplha,1:beta,2:seed,3:mbw,4:model_A,5:model_B,6:model_C]
|
39 |
+
#msettings=[0 weights_a,1 weights_b,2 model_a,3 model_b,4 model_c,5 base_alpha,6 base_beta,7 mode,8 useblocks,9 custom_name,10 save_sets,11 id_sets,12 wpresets]
|
40 |
+
#id sets "image", "PNG info","XY grid"
|
41 |
+
|
42 |
+
hear = False
|
43 |
+
hearm = False
|
44 |
+
non4 = [None]*4
|
45 |
+
|
46 |
+
def caster(news,hear):
|
47 |
+
if hear: print(news)
|
48 |
+
|
49 |
+
def casterr(*args,hear=hear):
|
50 |
+
if hear:
|
51 |
+
names = {id(v): k for k, v in currentframe().f_back.f_locals.items()}
|
52 |
+
print('\n'.join([names.get(id(arg), '???') + ' = ' + repr(arg) for arg in args]))
|
53 |
+
|
54 |
+
#msettings=[weights_a,weights_b,model_a,model_b,model_c,device,base_alpha,base_beta,mode,loranames,useblocks,custom_name,save_sets,id_sets,wpresets,deep]
|
55 |
+
def smergegen(weights_a,weights_b,model_a,model_b,model_c,base_alpha,base_beta,mode,
|
56 |
+
calcmode,useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
57 |
+
esettings,
|
58 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
59 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size,
|
60 |
+
currentmodel,imggen):
|
61 |
+
|
62 |
+
deepprint = True if "print change" in esettings else False
|
63 |
+
|
64 |
+
result,currentmodel,modelid,theta_0,metadata = smerge(
|
65 |
+
weights_a,weights_b,model_a,model_b,model_c,base_alpha,base_beta,mode,calcmode,
|
66 |
+
useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,deepprint=deepprint
|
67 |
+
)
|
68 |
+
|
69 |
+
if "ERROR" in result or "STOPPED" in result:
|
70 |
+
return result,"not loaded",*non4
|
71 |
+
|
72 |
+
usemodelgen(theta_0,model_a,currentmodel)
|
73 |
+
|
74 |
+
save = True if SAVEMODES[0] in save_sets else False
|
75 |
+
|
76 |
+
result = savemodel(theta_0,currentmodel,custom_name,save_sets,model_a,metadata) if save else "Merged model loaded:"+currentmodel
|
77 |
+
del theta_0
|
78 |
+
gc.collect()
|
79 |
+
|
80 |
+
if imggen :
|
81 |
+
images = simggen(prompt,nprompt,steps,sampler,cfg,seed,w,h,hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size,currentmodel,id_sets,modelid)
|
82 |
+
return result,currentmodel,*images[:4]
|
83 |
+
else:
|
84 |
+
return result,currentmodel
|
85 |
+
|
86 |
+
NUM_INPUT_BLOCKS = 12
|
87 |
+
NUM_MID_BLOCK = 1
|
88 |
+
NUM_OUTPUT_BLOCKS = 12
|
89 |
+
NUM_TOTAL_BLOCKS = NUM_INPUT_BLOCKS + NUM_MID_BLOCK + NUM_OUTPUT_BLOCKS
|
90 |
+
blockid=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","IN09","IN10","IN11","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
|
91 |
+
|
92 |
+
def smerge(weights_a,weights_b,model_a,model_b,model_c,base_alpha,base_beta,mode,calcmode,
|
93 |
+
useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,deepprint = False):
|
94 |
+
caster("merge start",hearm)
|
95 |
+
global hear,mergedmodel,stopmerge
|
96 |
+
stopmerge = False
|
97 |
+
|
98 |
+
gc.collect()
|
99 |
+
|
100 |
+
# for from file
|
101 |
+
if type(useblocks) is str:
|
102 |
+
useblocks = True if useblocks =="True" else False
|
103 |
+
if type(base_alpha) == str:base_alpha = float(base_alpha)
|
104 |
+
if type(base_beta) == str:base_beta = float(base_beta)
|
105 |
+
|
106 |
+
weights_a_orig = weights_a
|
107 |
+
weights_b_orig = weights_b
|
108 |
+
|
109 |
+
# preset to weights
|
110 |
+
if wpresets != False and useblocks:
|
111 |
+
weights_a = wpreseter(weights_a,wpresets)
|
112 |
+
weights_b = wpreseter(weights_b,wpresets)
|
113 |
+
|
114 |
+
# mode select booleans
|
115 |
+
save = True if SAVEMODES[0] in save_sets else False
|
116 |
+
usebeta = MODES[2] in mode or MODES[3] in mode or calcmode == "tensor"
|
117 |
+
save_metadata = "save metadata" in save_sets
|
118 |
+
metadata = {"format": "pt"}
|
119 |
+
|
120 |
+
if not useblocks:
|
121 |
+
weights_a = weights_b = ""
|
122 |
+
#for save log and save current model
|
123 |
+
mergedmodel =[weights_a,weights_b,
|
124 |
+
hashfromname(model_a),hashfromname(model_b),hashfromname(model_c),
|
125 |
+
base_alpha,base_beta,mode,useblocks,custom_name,save_sets,id_sets,deep,calcmode,tensor].copy()
|
126 |
+
|
127 |
+
model_a = namefromhash(model_a)
|
128 |
+
model_b = namefromhash(model_b)
|
129 |
+
model_c = namefromhash(model_c)
|
130 |
+
theta_2 = {}
|
131 |
+
|
132 |
+
caster(mergedmodel,False)
|
133 |
+
|
134 |
+
if len(deep) > 0:
|
135 |
+
deep = deep.replace("\n",",")
|
136 |
+
deep = deep.split(",")
|
137 |
+
|
138 |
+
#format check
|
139 |
+
if model_a =="" or model_b =="" or ((not MODES[0] in mode) and model_c=="") :
|
140 |
+
return "ERROR: Necessary model is not selected",*non4
|
141 |
+
|
142 |
+
#for MBW text to list
|
143 |
+
if useblocks:
|
144 |
+
weights_a_t=weights_a.split(',',1)
|
145 |
+
weights_b_t=weights_b.split(',',1)
|
146 |
+
base_alpha = float(weights_a_t[0])
|
147 |
+
weights_a = [float(w) for w in weights_a_t[1].split(',')]
|
148 |
+
caster(f"from {weights_a_t}, alpha = {base_alpha},weights_a ={weights_a}",hearm)
|
149 |
+
if len(weights_a) != 25:return f"ERROR: weights alpha value must be {26}.",*non4
|
150 |
+
if usebeta:
|
151 |
+
base_beta = float(weights_b_t[0])
|
152 |
+
weights_b = [float(w) for w in weights_b_t[1].split(',')]
|
153 |
+
caster(f"from {weights_b_t}, beta = {base_beta},weights_a ={weights_b}",hearm)
|
154 |
+
if len(weights_b) != 25: return f"ERROR: weights beta value must be {26}.",*non4
|
155 |
+
|
156 |
+
caster("model load start",hearm)
|
157 |
+
|
158 |
+
print(f" model A \t: {model_a}")
|
159 |
+
print(f" model B \t: {model_b}")
|
160 |
+
print(f" model C \t: {model_c}")
|
161 |
+
print(f" alpha,beta\t: {base_alpha,base_beta}")
|
162 |
+
print(f" weights_alpha\t: {weights_a}")
|
163 |
+
print(f" weights_beta\t: {weights_b}")
|
164 |
+
print(f" mode\t\t: {mode}")
|
165 |
+
print(f" MBW \t\t: {useblocks}")
|
166 |
+
print(f" CalcMode \t: {calcmode}")
|
167 |
+
print(f" Elemental \t: {deep}")
|
168 |
+
print(f" Tensors \t: {tensor}")
|
169 |
+
|
170 |
+
theta_1=load_model_weights_m(model_b,False,True,save).copy()
|
171 |
+
|
172 |
+
if MODES[1] in mode:#Add
|
173 |
+
if stopmerge: return "STOPPED", *non4
|
174 |
+
theta_2 = load_model_weights_m(model_c,False,False,save).copy()
|
175 |
+
for key in tqdm(theta_1.keys()):
|
176 |
+
if 'model' in key:
|
177 |
+
if key in theta_2:
|
178 |
+
t2 = theta_2.get(key, torch.zeros_like(theta_1[key]))
|
179 |
+
theta_1[key] = theta_1[key]- t2
|
180 |
+
else:
|
181 |
+
theta_1[key] = torch.zeros_like(theta_1[key])
|
182 |
+
del theta_2
|
183 |
+
|
184 |
+
if stopmerge: return "STOPPED", *non4
|
185 |
+
|
186 |
+
if calcmode == "tensor":
|
187 |
+
theta_t = load_model_weights_m(model_a,True,False,save).copy()
|
188 |
+
theta_0 ={}
|
189 |
+
for key in theta_t:
|
190 |
+
theta_0[key] = theta_t[key].clone()
|
191 |
+
del theta_t
|
192 |
+
else:
|
193 |
+
theta_0=load_model_weights_m(model_a,True,False,save).copy()
|
194 |
+
|
195 |
+
|
196 |
+
if MODES[2] in mode or MODES[3] in mode:#Tripe or Twice
|
197 |
+
theta_2 = load_model_weights_m(model_c,False,False,save).copy()
|
198 |
+
|
199 |
+
alpha = base_alpha
|
200 |
+
beta = base_beta
|
201 |
+
|
202 |
+
re_inp = re.compile(r'\.input_blocks\.(\d+)\.') # 12
|
203 |
+
re_mid = re.compile(r'\.middle_block\.(\d+)\.') # 1
|
204 |
+
re_out = re.compile(r'\.output_blocks\.(\d+)\.') # 12
|
205 |
+
|
206 |
+
chckpoint_dict_skip_on_merge = ["cond_stage_model.transformer.text_model.embeddings.position_ids"]
|
207 |
+
count_target_of_basealpha = 0
|
208 |
+
|
209 |
+
if calcmode =="cosineA": #favors modelA's structure with details from B
|
210 |
+
if stopmerge: return "STOPPED", *non4
|
211 |
+
sim = torch.nn.CosineSimilarity(dim=0)
|
212 |
+
sims = np.array([], dtype=np.float64)
|
213 |
+
for key in (tqdm(theta_0.keys(), desc="Stage 0/2")):
|
214 |
+
# skip VAE model parameters to get better results
|
215 |
+
if "first_stage_model" in key: continue
|
216 |
+
if "model" in key and key in theta_1:
|
217 |
+
theta_0_norm = nn.functional.normalize(theta_0[key].to(torch.float32), p=2, dim=0)
|
218 |
+
theta_1_norm = nn.functional.normalize(theta_1[key].to(torch.float32), p=2, dim=0)
|
219 |
+
simab = sim(theta_0_norm, theta_1_norm)
|
220 |
+
sims = np.append(sims,simab.numpy())
|
221 |
+
sims = sims[~np.isnan(sims)]
|
222 |
+
sims = np.delete(sims, np.where(sims<np.percentile(sims, 1 ,method = 'midpoint')))
|
223 |
+
sims = np.delete(sims, np.where(sims>np.percentile(sims, 99 ,method = 'midpoint')))
|
224 |
+
|
225 |
+
if calcmode =="cosineB": #favors modelB's structure with details from A
|
226 |
+
if stopmerge: return "STOPPED", *non4
|
227 |
+
sim = torch.nn.CosineSimilarity(dim=0)
|
228 |
+
sims = np.array([], dtype=np.float64)
|
229 |
+
for key in (tqdm(theta_0.keys(), desc="Stage 0/2")):
|
230 |
+
# skip VAE model parameters to get better results
|
231 |
+
if "first_stage_model" in key: continue
|
232 |
+
if "model" in key and key in theta_1:
|
233 |
+
simab = sim(theta_0[key].to(torch.float32), theta_1[key].to(torch.float32))
|
234 |
+
dot_product = torch.dot(theta_0[key].view(-1).to(torch.float32), theta_1[key].view(-1).to(torch.float32))
|
235 |
+
magnitude_similarity = dot_product / (torch.norm(theta_0[key].to(torch.float32)) * torch.norm(theta_1[key].to(torch.float32)))
|
236 |
+
combined_similarity = (simab + magnitude_similarity) / 2.0
|
237 |
+
sims = np.append(sims, combined_similarity.numpy())
|
238 |
+
sims = sims[~np.isnan(sims)]
|
239 |
+
sims = np.delete(sims, np.where(sims < np.percentile(sims, 1, method='midpoint')))
|
240 |
+
sims = np.delete(sims, np.where(sims > np.percentile(sims, 99, method='midpoint')))
|
241 |
+
|
242 |
+
for key in (tqdm(theta_0.keys(), desc="Stage 1/2") if not False else theta_0.keys()):
|
243 |
+
if stopmerge: return "STOPPED", *non4
|
244 |
+
if "model" in key and key in theta_1:
|
245 |
+
if usebeta and (not key in theta_2) and (not theta_2 == {}) :
|
246 |
+
continue
|
247 |
+
|
248 |
+
weight_index = -1
|
249 |
+
current_alpha = alpha
|
250 |
+
current_beta = beta
|
251 |
+
|
252 |
+
if key in chckpoint_dict_skip_on_merge:
|
253 |
+
continue
|
254 |
+
|
255 |
+
# check weighted and U-Net or not
|
256 |
+
if weights_a is not None and 'model.diffusion_model.' in key:
|
257 |
+
# check block index
|
258 |
+
weight_index = -1
|
259 |
+
|
260 |
+
if 'time_embed' in key:
|
261 |
+
weight_index = 0 # before input blocks
|
262 |
+
elif '.out.' in key:
|
263 |
+
weight_index = NUM_TOTAL_BLOCKS - 1 # after output blocks
|
264 |
+
else:
|
265 |
+
m = re_inp.search(key)
|
266 |
+
if m:
|
267 |
+
inp_idx = int(m.groups()[0])
|
268 |
+
weight_index = inp_idx
|
269 |
+
else:
|
270 |
+
m = re_mid.search(key)
|
271 |
+
if m:
|
272 |
+
weight_index = NUM_INPUT_BLOCKS
|
273 |
+
else:
|
274 |
+
m = re_out.search(key)
|
275 |
+
if m:
|
276 |
+
out_idx = int(m.groups()[0])
|
277 |
+
weight_index = NUM_INPUT_BLOCKS + NUM_MID_BLOCK + out_idx
|
278 |
+
|
279 |
+
if weight_index >= NUM_TOTAL_BLOCKS:
|
280 |
+
print(f"ERROR: illegal block index: {key}")
|
281 |
+
return f"ERROR: illegal block index: {key}",*non4
|
282 |
+
|
283 |
+
if weight_index >= 0 and useblocks:
|
284 |
+
current_alpha = weights_a[weight_index]
|
285 |
+
if usebeta: current_beta = weights_b[weight_index]
|
286 |
+
else:
|
287 |
+
count_target_of_basealpha = count_target_of_basealpha + 1
|
288 |
+
|
289 |
+
if len(deep) > 0:
|
290 |
+
skey = key + blockid[weight_index+1]
|
291 |
+
for d in deep:
|
292 |
+
if d.count(":") != 2 :continue
|
293 |
+
dbs,dws,dr = d.split(":")[0],d.split(":")[1],d.split(":")[2]
|
294 |
+
dbs,dws = dbs.split(" "), dws.split(" ")
|
295 |
+
dbn,dbs = (True,dbs[1:]) if dbs[0] == "NOT" else (False,dbs)
|
296 |
+
dwn,dws = (True,dws[1:]) if dws[0] == "NOT" else (False,dws)
|
297 |
+
flag = dbn
|
298 |
+
for db in dbs:
|
299 |
+
if db in skey:
|
300 |
+
flag = not dbn
|
301 |
+
if flag:flag = dwn
|
302 |
+
else:continue
|
303 |
+
for dw in dws:
|
304 |
+
if dw in skey:
|
305 |
+
flag = not dwn
|
306 |
+
if flag:
|
307 |
+
dr = float(dr)
|
308 |
+
if deepprint :print(dbs,dws,key,dr)
|
309 |
+
current_alpha = dr
|
310 |
+
|
311 |
+
if calcmode == "normal":
|
312 |
+
if MODES[1] in mode:#Add
|
313 |
+
caster(f"model A[{key}] + {current_alpha} + * (model B - model C)[{key}]",hear)
|
314 |
+
theta_0[key] = theta_0[key] + current_alpha * theta_1[key]
|
315 |
+
elif MODES[2] in mode:#Triple
|
316 |
+
caster(f"model A[{key}] + {1-current_alpha-current_beta} + model B[{key}]*{current_alpha} + model C[{key}]*{current_beta}",hear)
|
317 |
+
theta_0[key] = (1 - current_alpha-current_beta) * theta_0[key] + current_alpha * theta_1[key]+current_beta * theta_2[key]
|
318 |
+
elif MODES[3] in mode:#Twice
|
319 |
+
caster(f"model A[{key}] + {1-current_alpha} + * model B[{key}]*{alpha}",hear)
|
320 |
+
caster(f"model A+B[{key}] + {1-current_beta} + * model C[{key}]*{beta}",hear)
|
321 |
+
theta_0[key] = (1 - current_alpha) * theta_0[key] + current_alpha * theta_1[key]
|
322 |
+
theta_0[key] = (1 - current_beta) * theta_0[key] + current_beta * theta_2[key]
|
323 |
+
else:#Weight
|
324 |
+
if current_alpha == 1:
|
325 |
+
caster(f"alpha = 0,model A[{key}=model B[{key}",hear)
|
326 |
+
theta_0[key] = theta_1[key]
|
327 |
+
elif current_alpha !=0:
|
328 |
+
caster(f"model A[{key}] + {1-current_alpha} + * (model B)[{key}]*{alpha}",hear)
|
329 |
+
theta_0[key] = (1 - current_alpha) * theta_0[key] + current_alpha * theta_1[key]
|
330 |
+
|
331 |
+
elif calcmode == "cosineA": #favors modelA's structure with details from B
|
332 |
+
# skip VAE model parameters to get better results
|
333 |
+
if "first_stage_model" in key: continue
|
334 |
+
if "model" in key and key in theta_0:
|
335 |
+
# Normalize the vectors before merging
|
336 |
+
theta_0_norm = nn.functional.normalize(theta_0[key].to(torch.float32), p=2, dim=0)
|
337 |
+
theta_1_norm = nn.functional.normalize(theta_1[key].to(torch.float32), p=2, dim=0)
|
338 |
+
simab = sim(theta_0_norm, theta_1_norm)
|
339 |
+
dot_product = torch.dot(theta_0_norm.view(-1), theta_1_norm.view(-1))
|
340 |
+
magnitude_similarity = dot_product / (torch.norm(theta_0_norm) * torch.norm(theta_1_norm))
|
341 |
+
combined_similarity = (simab + magnitude_similarity) / 2.0
|
342 |
+
k = (combined_similarity - sims.min()) / (sims.max() - sims.min())
|
343 |
+
k = k - current_alpha
|
344 |
+
k = k.clip(min=.0,max=1.)
|
345 |
+
caster(f"model A[{key}] + {1-k} + * (model B)[{key}]*{k}",hear)
|
346 |
+
theta_0[key] = theta_1[key] * (1 - k) + theta_0[key] * k
|
347 |
+
|
348 |
+
elif calcmode == "cosineB": #favors modelB's structure with details from A
|
349 |
+
# skip VAE model parameters to get better results
|
350 |
+
if "first_stage_model" in key: continue
|
351 |
+
if "model" in key and key in theta_0:
|
352 |
+
simab = sim(theta_0[key].to(torch.float32), theta_1[key].to(torch.float32))
|
353 |
+
dot_product = torch.dot(theta_0[key].view(-1).to(torch.float32), theta_1[key].view(-1).to(torch.float32))
|
354 |
+
magnitude_similarity = dot_product / (torch.norm(theta_0[key].to(torch.float32)) * torch.norm(theta_1[key].to(torch.float32)))
|
355 |
+
combined_similarity = (simab + magnitude_similarity) / 2.0
|
356 |
+
k = (combined_similarity - sims.min()) / (sims.max() - sims.min())
|
357 |
+
k = k - current_alpha
|
358 |
+
k = k.clip(min=.0,max=1.)
|
359 |
+
caster(f"model A[{key}] + {1-k} + * (model B)[{key}]*{k}",hear)
|
360 |
+
theta_0[key] = theta_1[key] * (1 - k) + theta_0[key] * k
|
361 |
+
|
362 |
+
elif calcmode == "smoothAdd":
|
363 |
+
caster(f"model A[{key}] + {current_alpha} + * (model B - model C)[{key}]", hear)
|
364 |
+
# Apply median filter to the weight differences
|
365 |
+
filtered_diff = scipy.ndimage.median_filter(theta_1[key].to(torch.float32).cpu().numpy(), size=3)
|
366 |
+
# Apply Gaussian filter to the filtered differences
|
367 |
+
filtered_diff = scipy.ndimage.gaussian_filter(filtered_diff, sigma=1)
|
368 |
+
theta_1[key] = torch.tensor(filtered_diff)
|
369 |
+
# Add the filtered differences to the original weights
|
370 |
+
theta_0[key] = theta_0[key] + current_alpha * theta_1[key]
|
371 |
+
|
372 |
+
elif calcmode == "tensor":
|
373 |
+
dim = theta_0[key].dim()
|
374 |
+
if dim == 0 : continue
|
375 |
+
if current_alpha+current_beta <= 1 :
|
376 |
+
talphas = int(theta_0[key].shape[0]*(current_beta))
|
377 |
+
talphae = int(theta_0[key].shape[0]*(current_alpha+current_beta))
|
378 |
+
if dim == 1:
|
379 |
+
theta_0[key][talphas:talphae] = theta_1[key][talphas:talphae].clone()
|
380 |
+
|
381 |
+
elif dim == 2:
|
382 |
+
theta_0[key][talphas:talphae,:] = theta_1[key][talphas:talphae,:].clone()
|
383 |
+
|
384 |
+
elif dim == 3:
|
385 |
+
theta_0[key][talphas:talphae,:,:] = theta_1[key][talphas:talphae,:,:].clone()
|
386 |
+
|
387 |
+
elif dim == 4:
|
388 |
+
theta_0[key][talphas:talphae,:,:,:] = theta_1[key][talphas:talphae,:,:,:].clone()
|
389 |
+
|
390 |
+
else:
|
391 |
+
talphas = int(theta_0[key].shape[0]*(current_alpha+current_beta-1))
|
392 |
+
talphae = int(theta_0[key].shape[0]*(current_beta))
|
393 |
+
theta_t = theta_1[key].clone()
|
394 |
+
if dim == 1:
|
395 |
+
theta_t[talphas:talphae] = theta_0[key][talphas:talphae].clone()
|
396 |
+
|
397 |
+
elif dim == 2:
|
398 |
+
theta_t[talphas:talphae,:] = theta_0[key][talphas:talphae,:].clone()
|
399 |
+
|
400 |
+
elif dim == 3:
|
401 |
+
theta_t[talphas:talphae,:,:] = theta_0[key][talphas:talphae,:,:].clone()
|
402 |
+
|
403 |
+
elif dim == 4:
|
404 |
+
theta_t[talphas:talphae,:,:,:] = theta_0[key][talphas:talphae,:,:,:].clone()
|
405 |
+
theta_0[key] = theta_t
|
406 |
+
|
407 |
+
currentmodel = makemodelname(weights_a,weights_b,model_a, model_b,model_c, base_alpha,base_beta,useblocks,mode)
|
408 |
+
|
409 |
+
for key in tqdm(theta_1.keys(), desc="Stage 2/2"):
|
410 |
+
if key in chckpoint_dict_skip_on_merge:
|
411 |
+
continue
|
412 |
+
if "model" in key and key not in theta_0:
|
413 |
+
theta_0.update({key:theta_1[key]})
|
414 |
+
|
415 |
+
del theta_1
|
416 |
+
|
417 |
+
modelid = rwmergelog(currentmodel,mergedmodel)
|
418 |
+
|
419 |
+
caster(mergedmodel,False)
|
420 |
+
|
421 |
+
if save_metadata:
|
422 |
+
merge_recipe = {
|
423 |
+
"type": "sd-webui-supermerger",
|
424 |
+
"weights_alpha": weights_a if useblocks else None,
|
425 |
+
"weights_beta": weights_b if useblocks else None,
|
426 |
+
"weights_alpha_orig": weights_a_orig if useblocks else None,
|
427 |
+
"weights_beta_orig": weights_b_orig if useblocks else None,
|
428 |
+
"model_a": longhashfromname(model_a),
|
429 |
+
"model_b": longhashfromname(model_b),
|
430 |
+
"model_c": longhashfromname(model_c),
|
431 |
+
"base_alpha": base_alpha,
|
432 |
+
"base_beta": base_beta,
|
433 |
+
"mode": mode,
|
434 |
+
"mbw": useblocks,
|
435 |
+
"elemental_merge": deep,
|
436 |
+
"calcmode" : calcmode
|
437 |
+
}
|
438 |
+
metadata["sd_merge_recipe"] = json.dumps(merge_recipe)
|
439 |
+
metadata["sd_merge_models"] = {}
|
440 |
+
|
441 |
+
def add_model_metadata(checkpoint_name):
|
442 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(checkpoint_name)
|
443 |
+
checkpoint_info.calculate_shorthash()
|
444 |
+
metadata["sd_merge_models"][checkpoint_info.sha256] = {
|
445 |
+
"name": checkpoint_name,
|
446 |
+
"legacy_hash": checkpoint_info.hash
|
447 |
+
}
|
448 |
+
|
449 |
+
#metadata["sd_merge_models"].update(checkpoint_info.metadata.get("sd_merge_models", {}))
|
450 |
+
|
451 |
+
if model_a:
|
452 |
+
add_model_metadata(model_a)
|
453 |
+
if model_b:
|
454 |
+
add_model_metadata(model_b)
|
455 |
+
if model_c:
|
456 |
+
add_model_metadata(model_c)
|
457 |
+
|
458 |
+
metadata["sd_merge_models"] = json.dumps(metadata["sd_merge_models"])
|
459 |
+
|
460 |
+
return "",currentmodel,modelid,theta_0,metadata
|
461 |
+
def forkforker(filename):
|
462 |
+
try:
|
463 |
+
return sd_models.read_state_dict(filename,"cuda")
|
464 |
+
except:
|
465 |
+
return sd_models.read_state_dict(filename)
|
466 |
+
|
467 |
+
def load_model_weights_m(model,model_a,model_b,save):
|
468 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(model)
|
469 |
+
sd_model_name = checkpoint_info.model_name
|
470 |
+
|
471 |
+
cachenum = shared.opts.sd_checkpoint_cache
|
472 |
+
|
473 |
+
if save:
|
474 |
+
if model_a:
|
475 |
+
load_model(checkpoint_info)
|
476 |
+
print(f"Loading weights [{sd_model_name}] from file")
|
477 |
+
return forkforker(checkpoint_info.filename)
|
478 |
+
|
479 |
+
if checkpoint_info in checkpoints_loaded:
|
480 |
+
print(f"Loading weights [{sd_model_name}] from cache")
|
481 |
+
return checkpoints_loaded[checkpoint_info]
|
482 |
+
elif cachenum>0 and model_a:
|
483 |
+
load_model(checkpoint_info)
|
484 |
+
print(f"Loading weights [{sd_model_name}] from cache")
|
485 |
+
return checkpoints_loaded[checkpoint_info]
|
486 |
+
elif cachenum>1 and model_b:
|
487 |
+
load_model(checkpoint_info)
|
488 |
+
print(f"Loading weights [{sd_model_name}] from cache")
|
489 |
+
return checkpoints_loaded[checkpoint_info]
|
490 |
+
elif cachenum>2:
|
491 |
+
load_model(checkpoint_info)
|
492 |
+
print(f"Loading weights [{sd_model_name}] from cache")
|
493 |
+
return checkpoints_loaded[checkpoint_info]
|
494 |
+
else:
|
495 |
+
if model_a:
|
496 |
+
load_model(checkpoint_info)
|
497 |
+
print(f"Loading weights [{sd_model_name}] from file")
|
498 |
+
return forkforker(checkpoint_info.filename)
|
499 |
+
|
500 |
+
def makemodelname(weights_a,weights_b,model_a, model_b,model_c, alpha,beta,useblocks,mode):
|
501 |
+
model_a=filenamecutter(model_a)
|
502 |
+
model_b=filenamecutter(model_b)
|
503 |
+
model_c=filenamecutter(model_c)
|
504 |
+
|
505 |
+
if type(alpha) == str:alpha = float(alpha)
|
506 |
+
if type(beta)== str:beta = float(beta)
|
507 |
+
|
508 |
+
if useblocks:
|
509 |
+
if MODES[1] in mode:#add
|
510 |
+
currentmodel =f"{model_a} + ({model_b} - {model_c}) x alpha ({str(round(alpha,3))},{','.join(str(s) for s in weights_a)}"
|
511 |
+
elif MODES[2] in mode:#triple
|
512 |
+
currentmodel =f"{model_a} x (1-alpha-beta) + {model_b} x alpha + {model_c} x beta (alpha = {str(round(alpha,3))},{','.join(str(s) for s in weights_a)},beta = {beta},{','.join(str(s) for s in weights_b)})"
|
513 |
+
elif MODES[3] in mode:#twice
|
514 |
+
currentmodel =f"({model_a} x (1-alpha) + {model_b} x alpha)x(1-beta)+ {model_c} x beta ({str(round(alpha,3))},{','.join(str(s) for s in weights_a)})_({str(round(beta,3))},{','.join(str(s) for s in weights_b)})"
|
515 |
+
else:
|
516 |
+
currentmodel =f"{model_a} x (1-alpha) + {model_b} x alpha ({str(round(alpha,3))},{','.join(str(s) for s in weights_a)})"
|
517 |
+
else:
|
518 |
+
if MODES[1] in mode:#add
|
519 |
+
currentmodel =f"{model_a} + ({model_b} - {model_c}) x {str(round(alpha,3))}"
|
520 |
+
elif MODES[2] in mode:#triple
|
521 |
+
currentmodel =f"{model_a} x {str(round(1-alpha-beta,3))} + {model_b} x {str(round(alpha,3))} + {model_c} x {str(round(beta,3))}"
|
522 |
+
elif MODES[3] in mode:#twice
|
523 |
+
currentmodel =f"({model_a} x {str(round(1-alpha,3))} +{model_b} x {str(round(alpha,3))}) x {str(round(1-beta,3))} + {model_c} x {str(round(beta,3))}"
|
524 |
+
else:
|
525 |
+
currentmodel =f"{model_a} x {str(round(1-alpha,3))} + {model_b} x {str(round(alpha,3))}"
|
526 |
+
return currentmodel
|
527 |
+
|
528 |
+
path_root = scripts.basedir()
|
529 |
+
|
530 |
+
def rwmergelog(mergedname = "",settings= [],id = 0):
|
531 |
+
setting = settings.copy()
|
532 |
+
filepath = os.path.join(path_root, "mergehistory.csv")
|
533 |
+
is_file = os.path.isfile(filepath)
|
534 |
+
if not is_file:
|
535 |
+
with open(filepath, 'a') as f:
|
536 |
+
#msettings=[0 weights_a,1 weights_b,2 model_a,3 model_b,4 model_c,5 base_alpha,6 base_beta,7 mode,8 useblocks,9 custom_name,10 save_sets,11 id_sets, 12 deep 13 calcmode]
|
537 |
+
f.writelines('"ID","time","name","weights alpha","weights beta","model A","model B","model C","alpha","beta","mode","use MBW","plus lora","custum name","save setting","use ID"\n')
|
538 |
+
with open(filepath, 'r+') as f:
|
539 |
+
reader = csv.reader(f)
|
540 |
+
mlist = [raw for raw in reader]
|
541 |
+
if mergedname != "":
|
542 |
+
mergeid = len(mlist)
|
543 |
+
setting.insert(0,mergedname)
|
544 |
+
for i,x in enumerate(setting):
|
545 |
+
if "," in str(x):setting[i] = f'"{str(setting[i])}"'
|
546 |
+
text = ",".join(map(str, setting))
|
547 |
+
text=str(mergeid)+","+datetime.datetime.now().strftime('%Y.%m.%d %H.%M.%S.%f')[:-7]+"," + text + "\n"
|
548 |
+
f.writelines(text)
|
549 |
+
return mergeid
|
550 |
+
try:
|
551 |
+
out = mlist[int(id)]
|
552 |
+
except:
|
553 |
+
out = "ERROR: OUT of ID index"
|
554 |
+
return out
|
555 |
+
|
556 |
+
def draw_origin(grid, text,width,height,width_one):
|
557 |
+
grid_d= Image.new("RGB", (grid.width,grid.height), "white")
|
558 |
+
grid_d.paste(grid,(0,0))
|
559 |
+
def get_font(fontsize):
|
560 |
+
try:
|
561 |
+
from fonts.ttf import Roboto
|
562 |
+
try:
|
563 |
+
return ImageFont.truetype(opts.font or Roboto, fontsize)
|
564 |
+
except Exception:
|
565 |
+
return ImageFont.truetype(Roboto, fontsize)
|
566 |
+
except Exception:
|
567 |
+
try:
|
568 |
+
return ImageFont.truetype(shared.opts.font or 'javascript/roboto.ttf', fontsize)
|
569 |
+
except Exception:
|
570 |
+
return ImageFont.truetype('javascript/roboto.ttf', fontsize)
|
571 |
+
|
572 |
+
d= ImageDraw.Draw(grid_d)
|
573 |
+
color_active = (0, 0, 0)
|
574 |
+
fontsize = (width+height)//25
|
575 |
+
fnt = get_font(fontsize)
|
576 |
+
|
577 |
+
if grid.width != width_one:
|
578 |
+
while d.multiline_textsize(text, font=fnt)[0] > width_one*0.75 and fontsize > 0:
|
579 |
+
fontsize -=1
|
580 |
+
fnt = get_font(fontsize)
|
581 |
+
d.multiline_text((0,0), text, font=fnt, fill=color_active,align="center")
|
582 |
+
return grid_d
|
583 |
+
|
584 |
+
def wpreseter(w,presets):
|
585 |
+
if "," not in w and w != "":
|
586 |
+
presets=presets.splitlines()
|
587 |
+
wdict={}
|
588 |
+
for l in presets:
|
589 |
+
if ":" in l :
|
590 |
+
key = l.split(":",1)[0]
|
591 |
+
wdict[key.strip()]=l.split(":",1)[1]
|
592 |
+
if "\t" in l:
|
593 |
+
key = l.split("\t",1)[0]
|
594 |
+
wdict[key.strip()]=l.split("\t",1)[1]
|
595 |
+
if w.strip() in wdict:
|
596 |
+
name = w
|
597 |
+
w = wdict[w.strip()]
|
598 |
+
print(f"weights {name} imported from presets : {w}")
|
599 |
+
return w
|
600 |
+
|
601 |
+
def fullpathfromname(name):
|
602 |
+
if hash == "" or hash ==[]: return ""
|
603 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(name)
|
604 |
+
return checkpoint_info.filename
|
605 |
+
|
606 |
+
def namefromhash(hash):
|
607 |
+
if hash == "" or hash ==[]: return ""
|
608 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(hash)
|
609 |
+
return checkpoint_info.model_name
|
610 |
+
|
611 |
+
def hashfromname(name):
|
612 |
+
from modules import sd_models
|
613 |
+
if name == "" or name ==[]: return ""
|
614 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(name)
|
615 |
+
if checkpoint_info.shorthash is not None:
|
616 |
+
return checkpoint_info.shorthash
|
617 |
+
return checkpoint_info.calculate_shorthash()
|
618 |
+
|
619 |
+
def longhashfromname(name):
|
620 |
+
from modules import sd_models
|
621 |
+
if name == "" or name ==[]: return ""
|
622 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(name)
|
623 |
+
if checkpoint_info.sha256 is not None:
|
624 |
+
return checkpoint_info.sha256
|
625 |
+
checkpoint_info.calculate_shorthash()
|
626 |
+
return checkpoint_info.sha256
|
627 |
+
|
628 |
+
def simggen(prompt, nprompt, steps, sampler, cfg, seed, w, h,genoptions,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size,mergeinfo="",id_sets=[],modelid = "no id"):
|
629 |
+
shared.state.begin()
|
630 |
+
p = processing.StableDiffusionProcessingTxt2Img(
|
631 |
+
sd_model=shared.sd_model,
|
632 |
+
do_not_save_grid=True,
|
633 |
+
do_not_save_samples=True,
|
634 |
+
do_not_reload_embeddings=True,
|
635 |
+
)
|
636 |
+
p.batch_size = int(batch_size)
|
637 |
+
p.prompt = prompt
|
638 |
+
p.negative_prompt = nprompt
|
639 |
+
p.steps = steps
|
640 |
+
p.sampler_name = sd_samplers.samplers[sampler].name
|
641 |
+
p.cfg_scale = cfg
|
642 |
+
p.seed = seed
|
643 |
+
p.width = w
|
644 |
+
p.height = h
|
645 |
+
p.seed_resize_from_w=0
|
646 |
+
p.seed_resize_from_h=0
|
647 |
+
p.denoising_strength=None
|
648 |
+
|
649 |
+
#"Restore faces", "Tiling", "Hires. fix"
|
650 |
+
|
651 |
+
if "Hires. fix" in genoptions:
|
652 |
+
p.enable_hr = True
|
653 |
+
p.denoising_strength = denoise_str
|
654 |
+
p.hr_upscaler = hrupscaler
|
655 |
+
p.hr_second_pass_steps = hr2ndsteps
|
656 |
+
p.hr_scale = hr_scale
|
657 |
+
|
658 |
+
if "Tiling" in genoptions:
|
659 |
+
p.tiling = True
|
660 |
+
|
661 |
+
if "Restore faces" in genoptions:
|
662 |
+
p.restore_faces = True
|
663 |
+
|
664 |
+
if type(p.prompt) == list:
|
665 |
+
p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.prompt]
|
666 |
+
else:
|
667 |
+
p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)]
|
668 |
+
|
669 |
+
if type(p.negative_prompt) == list:
|
670 |
+
p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.negative_prompt]
|
671 |
+
else:
|
672 |
+
p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)]
|
673 |
+
|
674 |
+
processed:Processed = processing.process_images(p)
|
675 |
+
if "image" in id_sets:
|
676 |
+
for i, image in enumerate(processed.images):
|
677 |
+
processed.images[i] = draw_origin(image, str(modelid),w,h,w)
|
678 |
+
|
679 |
+
if "PNG info" in id_sets:mergeinfo = mergeinfo + " ID " + str(modelid)
|
680 |
+
|
681 |
+
infotext = create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds)
|
682 |
+
if infotext.count("Steps: ")>1:
|
683 |
+
infotext = infotext[:infotext.rindex("Steps")]
|
684 |
+
|
685 |
+
infotexts = infotext.split(",")
|
686 |
+
for i,x in enumerate(infotexts):
|
687 |
+
if "Model:"in x:
|
688 |
+
infotexts[i] = " Model: "+mergeinfo.replace(","," ")
|
689 |
+
infotext= ",".join(infotexts)
|
690 |
+
|
691 |
+
for i, image in enumerate(processed.images):
|
692 |
+
images.save_image(image, opts.outdir_txt2img_samples, "",p.seed, p.prompt,shared.opts.samples_format, p=p,info=infotext)
|
693 |
+
|
694 |
+
if batch_size > 1:
|
695 |
+
grid = images.image_grid(processed.images, p.batch_size)
|
696 |
+
processed.images.insert(0, grid)
|
697 |
+
images.save_image(grid, opts.outdir_txt2img_grids, "grid", p.seed, p.prompt, opts.grid_format, info=infotext, short_filename=not opts.grid_extended_filename, p=p, grid=True)
|
698 |
+
shared.state.end()
|
699 |
+
return processed.images,infotext,plaintext_to_html(processed.info), plaintext_to_html(processed.comments),p
|
extensions/microsoftexcel-supermerger/scripts/mergers/model_util.py
ADDED
@@ -0,0 +1,928 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from transformers import CLIPTextModel, CLIPTextConfig
|
4 |
+
from safetensors.torch import load_file
|
5 |
+
import safetensors.torch
|
6 |
+
from modules.sd_models import read_state_dict
|
7 |
+
|
8 |
+
# DiffUsers版StableDiffusionのモデルパラメータ
|
9 |
+
NUM_TRAIN_TIMESTEPS = 1000
|
10 |
+
BETA_START = 0.00085
|
11 |
+
BETA_END = 0.0120
|
12 |
+
|
13 |
+
UNET_PARAMS_MODEL_CHANNELS = 320
|
14 |
+
UNET_PARAMS_CHANNEL_MULT = [1, 2, 4, 4]
|
15 |
+
UNET_PARAMS_ATTENTION_RESOLUTIONS = [4, 2, 1]
|
16 |
+
UNET_PARAMS_IMAGE_SIZE = 64 # fixed from old invalid value `32`
|
17 |
+
UNET_PARAMS_IN_CHANNELS = 4
|
18 |
+
UNET_PARAMS_OUT_CHANNELS = 4
|
19 |
+
UNET_PARAMS_NUM_RES_BLOCKS = 2
|
20 |
+
UNET_PARAMS_CONTEXT_DIM = 768
|
21 |
+
UNET_PARAMS_NUM_HEADS = 8
|
22 |
+
|
23 |
+
VAE_PARAMS_Z_CHANNELS = 4
|
24 |
+
VAE_PARAMS_RESOLUTION = 256
|
25 |
+
VAE_PARAMS_IN_CHANNELS = 3
|
26 |
+
VAE_PARAMS_OUT_CH = 3
|
27 |
+
VAE_PARAMS_CH = 128
|
28 |
+
VAE_PARAMS_CH_MULT = [1, 2, 4, 4]
|
29 |
+
VAE_PARAMS_NUM_RES_BLOCKS = 2
|
30 |
+
|
31 |
+
# V2
|
32 |
+
V2_UNET_PARAMS_ATTENTION_HEAD_DIM = [5, 10, 20, 20]
|
33 |
+
V2_UNET_PARAMS_CONTEXT_DIM = 1024
|
34 |
+
|
35 |
+
# Diffusersの設定を読み込むための参照モデル
|
36 |
+
DIFFUSERS_REF_MODEL_ID_V1 = "runwayml/stable-diffusion-v1-5"
|
37 |
+
DIFFUSERS_REF_MODEL_ID_V2 = "stabilityai/stable-diffusion-2-1"
|
38 |
+
|
39 |
+
|
40 |
+
# region StableDiffusion->Diffusersの変換コード
|
41 |
+
# convert_original_stable_diffusion_to_diffusers をコピーして修正している(ASL 2.0)
|
42 |
+
|
43 |
+
|
44 |
+
def shave_segments(path, n_shave_prefix_segments=1):
|
45 |
+
"""
|
46 |
+
Removes segments. Positive values shave the first segments, negative shave the last segments.
|
47 |
+
"""
|
48 |
+
if n_shave_prefix_segments >= 0:
|
49 |
+
return ".".join(path.split(".")[n_shave_prefix_segments:])
|
50 |
+
else:
|
51 |
+
return ".".join(path.split(".")[:n_shave_prefix_segments])
|
52 |
+
|
53 |
+
|
54 |
+
def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
|
55 |
+
"""
|
56 |
+
Updates paths inside resnets to the new naming scheme (local renaming)
|
57 |
+
"""
|
58 |
+
mapping = []
|
59 |
+
for old_item in old_list:
|
60 |
+
new_item = old_item.replace("in_layers.0", "norm1")
|
61 |
+
new_item = new_item.replace("in_layers.2", "conv1")
|
62 |
+
|
63 |
+
new_item = new_item.replace("out_layers.0", "norm2")
|
64 |
+
new_item = new_item.replace("out_layers.3", "conv2")
|
65 |
+
|
66 |
+
new_item = new_item.replace("emb_layers.1", "time_emb_proj")
|
67 |
+
new_item = new_item.replace("skip_connection", "conv_shortcut")
|
68 |
+
|
69 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
70 |
+
|
71 |
+
mapping.append({"old": old_item, "new": new_item})
|
72 |
+
|
73 |
+
return mapping
|
74 |
+
|
75 |
+
|
76 |
+
def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
|
77 |
+
"""
|
78 |
+
Updates paths inside resnets to the new naming scheme (local renaming)
|
79 |
+
"""
|
80 |
+
mapping = []
|
81 |
+
for old_item in old_list:
|
82 |
+
new_item = old_item
|
83 |
+
|
84 |
+
new_item = new_item.replace("nin_shortcut", "conv_shortcut")
|
85 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
86 |
+
|
87 |
+
mapping.append({"old": old_item, "new": new_item})
|
88 |
+
|
89 |
+
return mapping
|
90 |
+
|
91 |
+
|
92 |
+
def renew_attention_paths(old_list, n_shave_prefix_segments=0):
|
93 |
+
"""
|
94 |
+
Updates paths inside attentions to the new naming scheme (local renaming)
|
95 |
+
"""
|
96 |
+
mapping = []
|
97 |
+
for old_item in old_list:
|
98 |
+
new_item = old_item
|
99 |
+
|
100 |
+
# new_item = new_item.replace('norm.weight', 'group_norm.weight')
|
101 |
+
# new_item = new_item.replace('norm.bias', 'group_norm.bias')
|
102 |
+
|
103 |
+
# new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
|
104 |
+
# new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
|
105 |
+
|
106 |
+
# new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
107 |
+
|
108 |
+
mapping.append({"old": old_item, "new": new_item})
|
109 |
+
|
110 |
+
return mapping
|
111 |
+
|
112 |
+
|
113 |
+
def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
|
114 |
+
"""
|
115 |
+
Updates paths inside attentions to the new naming scheme (local renaming)
|
116 |
+
"""
|
117 |
+
mapping = []
|
118 |
+
for old_item in old_list:
|
119 |
+
new_item = old_item
|
120 |
+
|
121 |
+
new_item = new_item.replace("norm.weight", "group_norm.weight")
|
122 |
+
new_item = new_item.replace("norm.bias", "group_norm.bias")
|
123 |
+
|
124 |
+
new_item = new_item.replace("q.weight", "query.weight")
|
125 |
+
new_item = new_item.replace("q.bias", "query.bias")
|
126 |
+
|
127 |
+
new_item = new_item.replace("k.weight", "key.weight")
|
128 |
+
new_item = new_item.replace("k.bias", "key.bias")
|
129 |
+
|
130 |
+
new_item = new_item.replace("v.weight", "value.weight")
|
131 |
+
new_item = new_item.replace("v.bias", "value.bias")
|
132 |
+
|
133 |
+
new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
|
134 |
+
new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
|
135 |
+
|
136 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
137 |
+
|
138 |
+
mapping.append({"old": old_item, "new": new_item})
|
139 |
+
|
140 |
+
return mapping
|
141 |
+
|
142 |
+
|
143 |
+
def assign_to_checkpoint(
|
144 |
+
paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
|
145 |
+
):
|
146 |
+
"""
|
147 |
+
This does the final conversion step: take locally converted weights and apply a global renaming
|
148 |
+
to them. It splits attention layers, and takes into account additional replacements
|
149 |
+
that may arise.
|
150 |
+
|
151 |
+
Assigns the weights to the new checkpoint.
|
152 |
+
"""
|
153 |
+
assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
|
154 |
+
|
155 |
+
# Splits the attention layers into three variables.
|
156 |
+
if attention_paths_to_split is not None:
|
157 |
+
for path, path_map in attention_paths_to_split.items():
|
158 |
+
old_tensor = old_checkpoint[path]
|
159 |
+
channels = old_tensor.shape[0] // 3
|
160 |
+
|
161 |
+
target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
|
162 |
+
|
163 |
+
num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
|
164 |
+
|
165 |
+
old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
|
166 |
+
query, key, value = old_tensor.split(channels // num_heads, dim=1)
|
167 |
+
|
168 |
+
checkpoint[path_map["query"]] = query.reshape(target_shape)
|
169 |
+
checkpoint[path_map["key"]] = key.reshape(target_shape)
|
170 |
+
checkpoint[path_map["value"]] = value.reshape(target_shape)
|
171 |
+
|
172 |
+
for path in paths:
|
173 |
+
new_path = path["new"]
|
174 |
+
|
175 |
+
# These have already been assigned
|
176 |
+
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
|
177 |
+
continue
|
178 |
+
|
179 |
+
# Global renaming happens here
|
180 |
+
new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
|
181 |
+
new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
|
182 |
+
new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
|
183 |
+
|
184 |
+
if additional_replacements is not None:
|
185 |
+
for replacement in additional_replacements:
|
186 |
+
new_path = new_path.replace(replacement["old"], replacement["new"])
|
187 |
+
|
188 |
+
# proj_attn.weight has to be converted from conv 1D to linear
|
189 |
+
if "proj_attn.weight" in new_path:
|
190 |
+
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
|
191 |
+
else:
|
192 |
+
checkpoint[new_path] = old_checkpoint[path["old"]]
|
193 |
+
|
194 |
+
|
195 |
+
def conv_attn_to_linear(checkpoint):
|
196 |
+
keys = list(checkpoint.keys())
|
197 |
+
attn_keys = ["query.weight", "key.weight", "value.weight"]
|
198 |
+
for key in keys:
|
199 |
+
if ".".join(key.split(".")[-2:]) in attn_keys:
|
200 |
+
if checkpoint[key].ndim > 2:
|
201 |
+
checkpoint[key] = checkpoint[key][:, :, 0, 0]
|
202 |
+
elif "proj_attn.weight" in key:
|
203 |
+
if checkpoint[key].ndim > 2:
|
204 |
+
checkpoint[key] = checkpoint[key][:, :, 0]
|
205 |
+
|
206 |
+
|
207 |
+
def linear_transformer_to_conv(checkpoint):
|
208 |
+
keys = list(checkpoint.keys())
|
209 |
+
tf_keys = ["proj_in.weight", "proj_out.weight"]
|
210 |
+
for key in keys:
|
211 |
+
if ".".join(key.split(".")[-2:]) in tf_keys:
|
212 |
+
if checkpoint[key].ndim == 2:
|
213 |
+
checkpoint[key] = checkpoint[key].unsqueeze(2).unsqueeze(2)
|
214 |
+
|
215 |
+
|
216 |
+
def convert_ldm_unet_checkpoint(v2, checkpoint, config):
|
217 |
+
"""
|
218 |
+
Takes a state dict and a config, and returns a converted checkpoint.
|
219 |
+
"""
|
220 |
+
|
221 |
+
# extract state_dict for UNet
|
222 |
+
unet_state_dict = {}
|
223 |
+
unet_key = "model.diffusion_model."
|
224 |
+
keys = list(checkpoint.keys())
|
225 |
+
for key in keys:
|
226 |
+
if key.startswith(unet_key):
|
227 |
+
unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
|
228 |
+
|
229 |
+
new_checkpoint = {}
|
230 |
+
|
231 |
+
new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
|
232 |
+
new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
|
233 |
+
new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
|
234 |
+
new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
|
235 |
+
|
236 |
+
new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
|
237 |
+
new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
|
238 |
+
|
239 |
+
new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
|
240 |
+
new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
|
241 |
+
new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
|
242 |
+
new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
|
243 |
+
|
244 |
+
# Retrieves the keys for the input blocks only
|
245 |
+
num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
|
246 |
+
input_blocks = {
|
247 |
+
layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}." in key]
|
248 |
+
for layer_id in range(num_input_blocks)
|
249 |
+
}
|
250 |
+
|
251 |
+
# Retrieves the keys for the middle blocks only
|
252 |
+
num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
|
253 |
+
middle_blocks = {
|
254 |
+
layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}." in key]
|
255 |
+
for layer_id in range(num_middle_blocks)
|
256 |
+
}
|
257 |
+
|
258 |
+
# Retrieves the keys for the output blocks only
|
259 |
+
num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
|
260 |
+
output_blocks = {
|
261 |
+
layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}." in key]
|
262 |
+
for layer_id in range(num_output_blocks)
|
263 |
+
}
|
264 |
+
|
265 |
+
for i in range(1, num_input_blocks):
|
266 |
+
block_id = (i - 1) // (config["layers_per_block"] + 1)
|
267 |
+
layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
|
268 |
+
|
269 |
+
resnets = [
|
270 |
+
key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
|
271 |
+
]
|
272 |
+
attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
|
273 |
+
|
274 |
+
if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
|
275 |
+
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
|
276 |
+
f"input_blocks.{i}.0.op.weight"
|
277 |
+
)
|
278 |
+
new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
|
279 |
+
f"input_blocks.{i}.0.op.bias"
|
280 |
+
)
|
281 |
+
|
282 |
+
paths = renew_resnet_paths(resnets)
|
283 |
+
meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
284 |
+
assign_to_checkpoint(
|
285 |
+
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
286 |
+
)
|
287 |
+
|
288 |
+
if len(attentions):
|
289 |
+
paths = renew_attention_paths(attentions)
|
290 |
+
meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
|
291 |
+
assign_to_checkpoint(
|
292 |
+
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
293 |
+
)
|
294 |
+
|
295 |
+
resnet_0 = middle_blocks[0]
|
296 |
+
attentions = middle_blocks[1]
|
297 |
+
resnet_1 = middle_blocks[2]
|
298 |
+
|
299 |
+
resnet_0_paths = renew_resnet_paths(resnet_0)
|
300 |
+
assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
|
301 |
+
|
302 |
+
resnet_1_paths = renew_resnet_paths(resnet_1)
|
303 |
+
assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
|
304 |
+
|
305 |
+
attentions_paths = renew_attention_paths(attentions)
|
306 |
+
meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
|
307 |
+
assign_to_checkpoint(
|
308 |
+
attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
309 |
+
)
|
310 |
+
|
311 |
+
for i in range(num_output_blocks):
|
312 |
+
block_id = i // (config["layers_per_block"] + 1)
|
313 |
+
layer_in_block_id = i % (config["layers_per_block"] + 1)
|
314 |
+
output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
|
315 |
+
output_block_list = {}
|
316 |
+
|
317 |
+
for layer in output_block_layers:
|
318 |
+
layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
|
319 |
+
if layer_id in output_block_list:
|
320 |
+
output_block_list[layer_id].append(layer_name)
|
321 |
+
else:
|
322 |
+
output_block_list[layer_id] = [layer_name]
|
323 |
+
|
324 |
+
if len(output_block_list) > 1:
|
325 |
+
resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
|
326 |
+
attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
|
327 |
+
|
328 |
+
resnet_0_paths = renew_resnet_paths(resnets)
|
329 |
+
paths = renew_resnet_paths(resnets)
|
330 |
+
|
331 |
+
meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
|
332 |
+
assign_to_checkpoint(
|
333 |
+
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
334 |
+
)
|
335 |
+
|
336 |
+
# オリジナル:
|
337 |
+
# if ["conv.weight", "conv.bias"] in output_block_list.values():
|
338 |
+
# index = list(output_block_list.values()).index(["conv.weight", "conv.bias"])
|
339 |
+
|
340 |
+
# biasとweightの順番に依存しないようにする:もっといいやり方がありそうだが
|
341 |
+
for l in output_block_list.values():
|
342 |
+
l.sort()
|
343 |
+
|
344 |
+
if ["conv.bias", "conv.weight"] in output_block_list.values():
|
345 |
+
index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
|
346 |
+
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
|
347 |
+
f"output_blocks.{i}.{index}.conv.bias"
|
348 |
+
]
|
349 |
+
new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
|
350 |
+
f"output_blocks.{i}.{index}.conv.weight"
|
351 |
+
]
|
352 |
+
|
353 |
+
# Clear attentions as they have been attributed above.
|
354 |
+
if len(attentions) == 2:
|
355 |
+
attentions = []
|
356 |
+
|
357 |
+
if len(attentions):
|
358 |
+
paths = renew_attention_paths(attentions)
|
359 |
+
meta_path = {
|
360 |
+
"old": f"output_blocks.{i}.1",
|
361 |
+
"new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
|
362 |
+
}
|
363 |
+
assign_to_checkpoint(
|
364 |
+
paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
|
365 |
+
)
|
366 |
+
else:
|
367 |
+
resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
|
368 |
+
for path in resnet_0_paths:
|
369 |
+
old_path = ".".join(["output_blocks", str(i), path["old"]])
|
370 |
+
new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
|
371 |
+
|
372 |
+
new_checkpoint[new_path] = unet_state_dict[old_path]
|
373 |
+
|
374 |
+
# SDのv2では1*1のconv2dがlinearに変わっているので、linear->convに変換する
|
375 |
+
if v2:
|
376 |
+
linear_transformer_to_conv(new_checkpoint)
|
377 |
+
|
378 |
+
return new_checkpoint
|
379 |
+
|
380 |
+
|
381 |
+
def convert_ldm_vae_checkpoint(checkpoint, config):
|
382 |
+
# extract state dict for VAE
|
383 |
+
vae_state_dict = {}
|
384 |
+
vae_key = "first_stage_model."
|
385 |
+
keys = list(checkpoint.keys())
|
386 |
+
for key in keys:
|
387 |
+
if key.startswith(vae_key):
|
388 |
+
vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
|
389 |
+
# if len(vae_state_dict) == 0:
|
390 |
+
# # 渡されたcheckpointは.ckptから読み込んだcheckpointではなくvaeのstate_dict
|
391 |
+
# vae_state_dict = checkpoint
|
392 |
+
|
393 |
+
new_checkpoint = {}
|
394 |
+
|
395 |
+
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
|
396 |
+
new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
|
397 |
+
new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
|
398 |
+
new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
|
399 |
+
new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
|
400 |
+
new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
|
401 |
+
|
402 |
+
new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
|
403 |
+
new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
|
404 |
+
new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
|
405 |
+
new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
|
406 |
+
new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
|
407 |
+
new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
|
408 |
+
|
409 |
+
new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
|
410 |
+
new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
|
411 |
+
new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
|
412 |
+
new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
|
413 |
+
|
414 |
+
# Retrieves the keys for the encoder down blocks only
|
415 |
+
num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
|
416 |
+
down_blocks = {
|
417 |
+
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
|
418 |
+
}
|
419 |
+
|
420 |
+
# Retrieves the keys for the decoder up blocks only
|
421 |
+
num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
|
422 |
+
up_blocks = {
|
423 |
+
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
|
424 |
+
}
|
425 |
+
|
426 |
+
for i in range(num_down_blocks):
|
427 |
+
resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
|
428 |
+
|
429 |
+
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
|
430 |
+
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
|
431 |
+
f"encoder.down.{i}.downsample.conv.weight"
|
432 |
+
)
|
433 |
+
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
|
434 |
+
f"encoder.down.{i}.downsample.conv.bias"
|
435 |
+
)
|
436 |
+
|
437 |
+
paths = renew_vae_resnet_paths(resnets)
|
438 |
+
meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
|
439 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
440 |
+
|
441 |
+
mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
|
442 |
+
num_mid_res_blocks = 2
|
443 |
+
for i in range(1, num_mid_res_blocks + 1):
|
444 |
+
resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
|
445 |
+
|
446 |
+
paths = renew_vae_resnet_paths(resnets)
|
447 |
+
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
448 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
449 |
+
|
450 |
+
mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
|
451 |
+
paths = renew_vae_attention_paths(mid_attentions)
|
452 |
+
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
453 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
454 |
+
conv_attn_to_linear(new_checkpoint)
|
455 |
+
|
456 |
+
for i in range(num_up_blocks):
|
457 |
+
block_id = num_up_blocks - 1 - i
|
458 |
+
resnets = [
|
459 |
+
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
|
460 |
+
]
|
461 |
+
|
462 |
+
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
|
463 |
+
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
|
464 |
+
f"decoder.up.{block_id}.upsample.conv.weight"
|
465 |
+
]
|
466 |
+
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
|
467 |
+
f"decoder.up.{block_id}.upsample.conv.bias"
|
468 |
+
]
|
469 |
+
|
470 |
+
paths = renew_vae_resnet_paths(resnets)
|
471 |
+
meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
|
472 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
473 |
+
|
474 |
+
mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
|
475 |
+
num_mid_res_blocks = 2
|
476 |
+
for i in range(1, num_mid_res_blocks + 1):
|
477 |
+
resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
|
478 |
+
|
479 |
+
paths = renew_vae_resnet_paths(resnets)
|
480 |
+
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
481 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
482 |
+
|
483 |
+
mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
|
484 |
+
paths = renew_vae_attention_paths(mid_attentions)
|
485 |
+
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
486 |
+
assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
|
487 |
+
conv_attn_to_linear(new_checkpoint)
|
488 |
+
return new_checkpoint
|
489 |
+
|
490 |
+
|
491 |
+
def create_unet_diffusers_config(v2):
|
492 |
+
"""
|
493 |
+
Creates a config for the diffusers based on the config of the LDM model.
|
494 |
+
"""
|
495 |
+
# unet_params = original_config.model.params.unet_config.params
|
496 |
+
|
497 |
+
block_out_channels = [UNET_PARAMS_MODEL_CHANNELS * mult for mult in UNET_PARAMS_CHANNEL_MULT]
|
498 |
+
|
499 |
+
down_block_types = []
|
500 |
+
resolution = 1
|
501 |
+
for i in range(len(block_out_channels)):
|
502 |
+
block_type = "CrossAttnDownBlock2D" if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else "DownBlock2D"
|
503 |
+
down_block_types.append(block_type)
|
504 |
+
if i != len(block_out_channels) - 1:
|
505 |
+
resolution *= 2
|
506 |
+
|
507 |
+
up_block_types = []
|
508 |
+
for i in range(len(block_out_channels)):
|
509 |
+
block_type = "CrossAttnUpBlock2D" if resolution in UNET_PARAMS_ATTENTION_RESOLUTIONS else "UpBlock2D"
|
510 |
+
up_block_types.append(block_type)
|
511 |
+
resolution //= 2
|
512 |
+
|
513 |
+
config = dict(
|
514 |
+
sample_size=UNET_PARAMS_IMAGE_SIZE,
|
515 |
+
in_channels=UNET_PARAMS_IN_CHANNELS,
|
516 |
+
out_channels=UNET_PARAMS_OUT_CHANNELS,
|
517 |
+
down_block_types=tuple(down_block_types),
|
518 |
+
up_block_types=tuple(up_block_types),
|
519 |
+
block_out_channels=tuple(block_out_channels),
|
520 |
+
layers_per_block=UNET_PARAMS_NUM_RES_BLOCKS,
|
521 |
+
cross_attention_dim=UNET_PARAMS_CONTEXT_DIM if not v2 else V2_UNET_PARAMS_CONTEXT_DIM,
|
522 |
+
attention_head_dim=UNET_PARAMS_NUM_HEADS if not v2 else V2_UNET_PARAMS_ATTENTION_HEAD_DIM,
|
523 |
+
)
|
524 |
+
|
525 |
+
return config
|
526 |
+
|
527 |
+
|
528 |
+
def create_vae_diffusers_config():
|
529 |
+
"""
|
530 |
+
Creates a config for the diffusers based on the config of the LDM model.
|
531 |
+
"""
|
532 |
+
# vae_params = original_config.model.params.first_stage_config.params.ddconfig
|
533 |
+
# _ = original_config.model.params.first_stage_config.params.embed_dim
|
534 |
+
block_out_channels = [VAE_PARAMS_CH * mult for mult in VAE_PARAMS_CH_MULT]
|
535 |
+
down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
|
536 |
+
up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
|
537 |
+
|
538 |
+
config = dict(
|
539 |
+
sample_size=VAE_PARAMS_RESOLUTION,
|
540 |
+
in_channels=VAE_PARAMS_IN_CHANNELS,
|
541 |
+
out_channels=VAE_PARAMS_OUT_CH,
|
542 |
+
down_block_types=tuple(down_block_types),
|
543 |
+
up_block_types=tuple(up_block_types),
|
544 |
+
block_out_channels=tuple(block_out_channels),
|
545 |
+
latent_channels=VAE_PARAMS_Z_CHANNELS,
|
546 |
+
layers_per_block=VAE_PARAMS_NUM_RES_BLOCKS,
|
547 |
+
)
|
548 |
+
return config
|
549 |
+
|
550 |
+
|
551 |
+
def convert_ldm_clip_checkpoint_v1(checkpoint):
|
552 |
+
keys = list(checkpoint.keys())
|
553 |
+
text_model_dict = {}
|
554 |
+
for key in keys:
|
555 |
+
if key.startswith("cond_stage_model.transformer"):
|
556 |
+
text_model_dict[key[len("cond_stage_model.transformer."):]] = checkpoint[key]
|
557 |
+
return text_model_dict
|
558 |
+
|
559 |
+
|
560 |
+
def convert_ldm_clip_checkpoint_v2(checkpoint, max_length):
|
561 |
+
# 嫌になるくらい違うぞ!
|
562 |
+
def convert_key(key):
|
563 |
+
if not key.startswith("cond_stage_model"):
|
564 |
+
return None
|
565 |
+
|
566 |
+
# common conversion
|
567 |
+
key = key.replace("cond_stage_model.model.transformer.", "text_model.encoder.")
|
568 |
+
key = key.replace("cond_stage_model.model.", "text_model.")
|
569 |
+
|
570 |
+
if "resblocks" in key:
|
571 |
+
# resblocks conversion
|
572 |
+
key = key.replace(".resblocks.", ".layers.")
|
573 |
+
if ".ln_" in key:
|
574 |
+
key = key.replace(".ln_", ".layer_norm")
|
575 |
+
elif ".mlp." in key:
|
576 |
+
key = key.replace(".c_fc.", ".fc1.")
|
577 |
+
key = key.replace(".c_proj.", ".fc2.")
|
578 |
+
elif '.attn.out_proj' in key:
|
579 |
+
key = key.replace(".attn.out_proj.", ".self_attn.out_proj.")
|
580 |
+
elif '.attn.in_proj' in key:
|
581 |
+
key = None # 特殊なので後で処理する
|
582 |
+
else:
|
583 |
+
raise ValueError(f"unexpected key in SD: {key}")
|
584 |
+
elif '.positional_embedding' in key:
|
585 |
+
key = key.replace(".positional_embedding", ".embeddings.position_embedding.weight")
|
586 |
+
elif '.text_projection' in key:
|
587 |
+
key = None # 使われない???
|
588 |
+
elif '.logit_scale' in key:
|
589 |
+
key = None # 使われない???
|
590 |
+
elif '.token_embedding' in key:
|
591 |
+
key = key.replace(".token_embedding.weight", ".embeddings.token_embedding.weight")
|
592 |
+
elif '.ln_final' in key:
|
593 |
+
key = key.replace(".ln_final", ".final_layer_norm")
|
594 |
+
return key
|
595 |
+
|
596 |
+
keys = list(checkpoint.keys())
|
597 |
+
new_sd = {}
|
598 |
+
for key in keys:
|
599 |
+
# remove resblocks 23
|
600 |
+
if '.resblocks.23.' in key:
|
601 |
+
continue
|
602 |
+
new_key = convert_key(key)
|
603 |
+
if new_key is None:
|
604 |
+
continue
|
605 |
+
new_sd[new_key] = checkpoint[key]
|
606 |
+
|
607 |
+
# attnの変換
|
608 |
+
for key in keys:
|
609 |
+
if '.resblocks.23.' in key:
|
610 |
+
continue
|
611 |
+
if '.resblocks' in key and '.attn.in_proj_' in key:
|
612 |
+
# 三つに分割
|
613 |
+
values = torch.chunk(checkpoint[key], 3)
|
614 |
+
|
615 |
+
key_suffix = ".weight" if "weight" in key else ".bias"
|
616 |
+
key_pfx = key.replace("cond_stage_model.model.transformer.resblocks.", "text_model.encoder.layers.")
|
617 |
+
key_pfx = key_pfx.replace("_weight", "")
|
618 |
+
key_pfx = key_pfx.replace("_bias", "")
|
619 |
+
key_pfx = key_pfx.replace(".attn.in_proj", ".self_attn.")
|
620 |
+
new_sd[key_pfx + "q_proj" + key_suffix] = values[0]
|
621 |
+
new_sd[key_pfx + "k_proj" + key_suffix] = values[1]
|
622 |
+
new_sd[key_pfx + "v_proj" + key_suffix] = values[2]
|
623 |
+
|
624 |
+
# rename or add position_ids
|
625 |
+
ANOTHER_POSITION_IDS_KEY = "text_model.encoder.text_model.embeddings.position_ids"
|
626 |
+
if ANOTHER_POSITION_IDS_KEY in new_sd:
|
627 |
+
# waifu diffusion v1.4
|
628 |
+
position_ids = new_sd[ANOTHER_POSITION_IDS_KEY]
|
629 |
+
del new_sd[ANOTHER_POSITION_IDS_KEY]
|
630 |
+
else:
|
631 |
+
position_ids = torch.Tensor([list(range(max_length))]).to(torch.int64)
|
632 |
+
|
633 |
+
new_sd["text_model.embeddings.position_ids"] = position_ids
|
634 |
+
return new_sd
|
635 |
+
|
636 |
+
def is_safetensors(path):
|
637 |
+
return os.path.splitext(path)[1].lower() == '.safetensors'
|
638 |
+
|
639 |
+
def load_checkpoint_with_text_encoder_conversion(ckpt_path):
|
640 |
+
# text encoderの格納形式が違うモデルに対応する ('text_model'がない)
|
641 |
+
TEXT_ENCODER_KEY_REPLACEMENTS = [
|
642 |
+
('cond_stage_model.transformer.embeddings.', 'cond_stage_model.transformer.text_model.embeddings.'),
|
643 |
+
('cond_stage_model.transformer.encoder.', 'cond_stage_model.transformer.text_model.encoder.'),
|
644 |
+
('cond_stage_model.transformer.final_layer_norm.', 'cond_stage_model.transformer.text_model.final_layer_norm.')
|
645 |
+
]
|
646 |
+
|
647 |
+
state_dict = read_state_dict(ckpt_path)
|
648 |
+
|
649 |
+
key_reps = []
|
650 |
+
for rep_from, rep_to in TEXT_ENCODER_KEY_REPLACEMENTS:
|
651 |
+
for key in state_dict.keys():
|
652 |
+
if key.startswith(rep_from):
|
653 |
+
new_key = rep_to + key[len(rep_from):]
|
654 |
+
key_reps.append((key, new_key))
|
655 |
+
|
656 |
+
for key, new_key in key_reps:
|
657 |
+
state_dict[new_key] = state_dict[key]
|
658 |
+
del state_dict[key]
|
659 |
+
|
660 |
+
return state_dict
|
661 |
+
|
662 |
+
def to_half(sd):
|
663 |
+
for key in sd.keys():
|
664 |
+
if 'model' in key and sd[key].dtype == torch.float:
|
665 |
+
sd[key] = sd[key].half()
|
666 |
+
return sd
|
667 |
+
|
668 |
+
def savemodel(state_dict,currentmodel,fname,savesets,model_a,metadata={}):
|
669 |
+
from modules import sd_models,shared
|
670 |
+
if "fp16" in savesets:
|
671 |
+
state_dict = to_half(state_dict)
|
672 |
+
pre = "fp16"
|
673 |
+
else:pre = ""
|
674 |
+
ext = ".safetensors" if "safetensors" in savesets else ".ckpt"
|
675 |
+
|
676 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(model_a)
|
677 |
+
model_a_path= checkpoint_info.filename
|
678 |
+
modeldir = os.path.split(model_a_path)[0]
|
679 |
+
|
680 |
+
if not fname or fname == "":
|
681 |
+
fname = currentmodel.replace(" ","").replace(",","_").replace("(","_").replace(")","_")+pre+ext
|
682 |
+
if fname[0]=="_":fname = fname[1:]
|
683 |
+
else:
|
684 |
+
fname = fname if ext in fname else fname +pre+ext
|
685 |
+
|
686 |
+
fname = os.path.join(modeldir, fname)
|
687 |
+
|
688 |
+
if len(fname) > 255:
|
689 |
+
fname.replace(ext,"")
|
690 |
+
fname=fname[:240]+ext
|
691 |
+
|
692 |
+
# check if output file already exists
|
693 |
+
if os.path.isfile(fname) and not "overwrite" in savesets:
|
694 |
+
_err_msg = f"Output file ({fname}) existed and was not saved]"
|
695 |
+
print(_err_msg)
|
696 |
+
return _err_msg
|
697 |
+
|
698 |
+
print("Saving...")
|
699 |
+
if ext == ".safetensors":
|
700 |
+
safetensors.torch.save_file(state_dict, fname, metadata=metadata)
|
701 |
+
else:
|
702 |
+
torch.save(state_dict, fname)
|
703 |
+
print("Done!")
|
704 |
+
return "Merged model saved in "+fname
|
705 |
+
|
706 |
+
def filenamecutter(name,model_a = False):
|
707 |
+
from modules import sd_models
|
708 |
+
if name =="" or name ==[]: return
|
709 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(name)
|
710 |
+
name= os.path.splitext(checkpoint_info.filename)[0]
|
711 |
+
|
712 |
+
if not model_a:
|
713 |
+
name = os.path.basename(name)
|
714 |
+
return name
|
715 |
+
|
716 |
+
# TODO dtype指定の動作が怪しいので確認する text_encoderを指定形式で作れるか未確認
|
717 |
+
def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, dtype=None):
|
718 |
+
import diffusers
|
719 |
+
print("diffusers version : ",diffusers.__version__)
|
720 |
+
state_dict = load_checkpoint_with_text_encoder_conversion(ckpt_path)
|
721 |
+
if dtype is not None:
|
722 |
+
for k, v in state_dict.items():
|
723 |
+
if type(v) is torch.Tensor:
|
724 |
+
state_dict[k] = v.to(dtype)
|
725 |
+
|
726 |
+
# Convert the UNet2DConditionModel model.
|
727 |
+
unet_config = create_unet_diffusers_config(v2)
|
728 |
+
converted_unet_checkpoint = convert_ldm_unet_checkpoint(v2, state_dict, unet_config)
|
729 |
+
|
730 |
+
unet = diffusers.UNet2DConditionModel(**unet_config)
|
731 |
+
info = unet.load_state_dict(converted_unet_checkpoint)
|
732 |
+
print("loading u-net:", info)
|
733 |
+
|
734 |
+
# Convert the VAE model.
|
735 |
+
vae_config = create_vae_diffusers_config()
|
736 |
+
converted_vae_checkpoint = convert_ldm_vae_checkpoint(state_dict, vae_config)
|
737 |
+
|
738 |
+
vae = diffusers.AutoencoderKL(**vae_config)
|
739 |
+
info = vae.load_state_dict(converted_vae_checkpoint)
|
740 |
+
print("loading vae:", info)
|
741 |
+
|
742 |
+
# convert text_model
|
743 |
+
if v2:
|
744 |
+
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v2(state_dict, 77)
|
745 |
+
cfg = CLIPTextConfig(
|
746 |
+
vocab_size=49408,
|
747 |
+
hidden_size=1024,
|
748 |
+
intermediate_size=4096,
|
749 |
+
num_hidden_layers=23,
|
750 |
+
num_attention_heads=16,
|
751 |
+
max_position_embeddings=77,
|
752 |
+
hidden_act="gelu",
|
753 |
+
layer_norm_eps=1e-05,
|
754 |
+
dropout=0.0,
|
755 |
+
attention_dropout=0.0,
|
756 |
+
initializer_range=0.02,
|
757 |
+
initializer_factor=1.0,
|
758 |
+
pad_token_id=1,
|
759 |
+
bos_token_id=0,
|
760 |
+
eos_token_id=2,
|
761 |
+
model_type="clip_text_model",
|
762 |
+
projection_dim=512,
|
763 |
+
torch_dtype="float32",
|
764 |
+
transformers_version="4.25.0.dev0",
|
765 |
+
)
|
766 |
+
text_model = CLIPTextModel._from_config(cfg)
|
767 |
+
info = text_model.load_state_dict(converted_text_encoder_checkpoint)
|
768 |
+
else:
|
769 |
+
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1(state_dict)
|
770 |
+
text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
|
771 |
+
info = text_model.load_state_dict(converted_text_encoder_checkpoint)
|
772 |
+
print("loading text encoder:", info)
|
773 |
+
|
774 |
+
return text_model, vae, unet
|
775 |
+
|
776 |
+
def usemodelgen(theta_0,model_a,model_name):
|
777 |
+
from modules import lowvram, devices, sd_hijack,shared, sd_vae
|
778 |
+
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
|
779 |
+
|
780 |
+
model = shared.sd_model
|
781 |
+
model.load_state_dict(theta_0, strict=False)
|
782 |
+
del theta_0
|
783 |
+
if shared.cmd_opts.opt_channelslast:
|
784 |
+
model.to(memory_format=torch.channels_last)
|
785 |
+
|
786 |
+
if not shared.cmd_opts.no_half:
|
787 |
+
vae = model.first_stage_model
|
788 |
+
|
789 |
+
# with --no-half-vae, remove VAE from model when doing half() to prevent its weights from being converted to float16
|
790 |
+
if shared.cmd_opts.no_half_vae:
|
791 |
+
model.first_stage_model = None
|
792 |
+
|
793 |
+
model.half()
|
794 |
+
model.first_stage_model = vae
|
795 |
+
|
796 |
+
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
|
797 |
+
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
|
798 |
+
devices.dtype_unet = model.model.diffusion_model.dtype
|
799 |
+
|
800 |
+
if hasattr(shared.cmd_opts,"upcast_sampling"):
|
801 |
+
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
|
802 |
+
else:
|
803 |
+
devices.unet_needs_upcast = devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
|
804 |
+
|
805 |
+
model.first_stage_model.to(devices.dtype_vae)
|
806 |
+
sd_hijack.model_hijack.hijack(model)
|
807 |
+
|
808 |
+
model.logvar = shared.sd_model.logvar.to(devices.device)
|
809 |
+
|
810 |
+
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
|
811 |
+
setup_for_low_vram_s(model, shared.cmd_opts.medvram)
|
812 |
+
else:
|
813 |
+
model.to(shared.device)
|
814 |
+
|
815 |
+
model.eval()
|
816 |
+
|
817 |
+
shared.sd_model = model
|
818 |
+
try:
|
819 |
+
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
|
820 |
+
except:
|
821 |
+
pass
|
822 |
+
#shared.sd_model.sd_checkpoint_info.model_name = model_name
|
823 |
+
|
824 |
+
def _setvae():
|
825 |
+
sd_vae.delete_base_vae()
|
826 |
+
sd_vae.clear_loaded_vae()
|
827 |
+
vae_file, vae_source = sd_vae.resolve_vae(model_a)
|
828 |
+
sd_vae.load_vae(shared.sd_model, vae_file, vae_source)
|
829 |
+
|
830 |
+
try:
|
831 |
+
_setvae()
|
832 |
+
except:
|
833 |
+
print("ERROR:setting VAE skipped")
|
834 |
+
|
835 |
+
|
836 |
+
import torch
|
837 |
+
from modules import devices
|
838 |
+
|
839 |
+
module_in_gpu = None
|
840 |
+
cpu = torch.device("cpu")
|
841 |
+
|
842 |
+
|
843 |
+
def send_everything_to_cpu():
|
844 |
+
global module_in_gpu
|
845 |
+
|
846 |
+
if module_in_gpu is not None:
|
847 |
+
module_in_gpu.to(cpu)
|
848 |
+
|
849 |
+
module_in_gpu = None
|
850 |
+
|
851 |
+
def setup_for_low_vram_s(sd_model, use_medvram):
|
852 |
+
parents = {}
|
853 |
+
|
854 |
+
def send_me_to_gpu(module, _):
|
855 |
+
"""send this module to GPU; send whatever tracked module was previous in GPU to CPU;
|
856 |
+
we add this as forward_pre_hook to a lot of modules and this way all but one of them will
|
857 |
+
be in CPU
|
858 |
+
"""
|
859 |
+
global module_in_gpu
|
860 |
+
|
861 |
+
module = parents.get(module, module)
|
862 |
+
|
863 |
+
if module_in_gpu == module:
|
864 |
+
return
|
865 |
+
|
866 |
+
if module_in_gpu is not None:
|
867 |
+
module_in_gpu.to(cpu)
|
868 |
+
|
869 |
+
module.to(devices.device)
|
870 |
+
module_in_gpu = module
|
871 |
+
|
872 |
+
# see below for register_forward_pre_hook;
|
873 |
+
# first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
|
874 |
+
# useless here, and we just replace those methods
|
875 |
+
|
876 |
+
first_stage_model = sd_model.first_stage_model
|
877 |
+
first_stage_model_encode = sd_model.first_stage_model.encode
|
878 |
+
first_stage_model_decode = sd_model.first_stage_model.decode
|
879 |
+
|
880 |
+
def first_stage_model_encode_wrap(x):
|
881 |
+
send_me_to_gpu(first_stage_model, None)
|
882 |
+
return first_stage_model_encode(x)
|
883 |
+
|
884 |
+
def first_stage_model_decode_wrap(z):
|
885 |
+
send_me_to_gpu(first_stage_model, None)
|
886 |
+
return first_stage_model_decode(z)
|
887 |
+
|
888 |
+
# for SD1, cond_stage_model is CLIP and its NN is in the tranformer frield, but for SD2, it's open clip, and it's in model field
|
889 |
+
if hasattr(sd_model.cond_stage_model, 'model'):
|
890 |
+
sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
|
891 |
+
|
892 |
+
# remove four big modules, cond, first_stage, depth (if applicable), and unet from the model and then
|
893 |
+
# send the model to GPU. Then put modules back. the modules will be in CPU.
|
894 |
+
stored = sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model
|
895 |
+
sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None
|
896 |
+
sd_model.to(devices.device)
|
897 |
+
sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored
|
898 |
+
|
899 |
+
# register hooks for those the first three models
|
900 |
+
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
|
901 |
+
sd_model.first_stage_model.encode = first_stage_model_encode_wrap
|
902 |
+
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
|
903 |
+
if sd_model.depth_model:
|
904 |
+
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
|
905 |
+
|
906 |
+
if hasattr(sd_model.cond_stage_model, 'model'):
|
907 |
+
sd_model.cond_stage_model.model = sd_model.cond_stage_model.transformer
|
908 |
+
del sd_model.cond_stage_model.transformer
|
909 |
+
|
910 |
+
if use_medvram:
|
911 |
+
sd_model.model.register_forward_pre_hook(send_me_to_gpu)
|
912 |
+
else:
|
913 |
+
diff_model = sd_model.model.diffusion_model
|
914 |
+
|
915 |
+
# the third remaining model is still too big for 4 GB, so we also do the same for its submodules
|
916 |
+
# so that only one of them is in GPU at a time
|
917 |
+
stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed
|
918 |
+
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None
|
919 |
+
sd_model.model.to(devices.device)
|
920 |
+
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored
|
921 |
+
|
922 |
+
# install hooks for bits of third model
|
923 |
+
diff_model.time_embed.register_forward_pre_hook(send_me_to_gpu)
|
924 |
+
for block in diff_model.input_blocks:
|
925 |
+
block.register_forward_pre_hook(send_me_to_gpu)
|
926 |
+
diff_model.middle_block.register_forward_pre_hook(send_me_to_gpu)
|
927 |
+
for block in diff_model.output_blocks:
|
928 |
+
block.register_forward_pre_hook(send_me_to_gpu)
|
extensions/microsoftexcel-supermerger/scripts/mergers/pluslora.py
ADDED
@@ -0,0 +1,1298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from sklearn.linear_model import PassiveAggressiveClassifier
|
3 |
+
import torch
|
4 |
+
import math
|
5 |
+
import os
|
6 |
+
import gc
|
7 |
+
import gradio as gr
|
8 |
+
from torchmetrics import Precision
|
9 |
+
import modules.shared as shared
|
10 |
+
import gc
|
11 |
+
from safetensors.torch import load_file, save_file
|
12 |
+
from typing import List
|
13 |
+
from tqdm import tqdm
|
14 |
+
from modules import sd_models,scripts
|
15 |
+
from scripts.mergers.model_util import load_models_from_stable_diffusion_checkpoint,filenamecutter,savemodel
|
16 |
+
from modules.ui import create_refresh_button
|
17 |
+
|
18 |
+
def on_ui_tabs():
|
19 |
+
import lora
|
20 |
+
sml_path_root = scripts.basedir()
|
21 |
+
LWEIGHTSPRESETS="\
|
22 |
+
NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n\
|
23 |
+
ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\n\
|
24 |
+
INS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\n\
|
25 |
+
IND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\n\
|
26 |
+
INALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\n\
|
27 |
+
MIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\n\
|
28 |
+
OUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\n\
|
29 |
+
OUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\n\
|
30 |
+
OUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\n\
|
31 |
+
ALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5"
|
32 |
+
sml_filepath = os.path.join(sml_path_root,"scripts", "lbwpresets.txt")
|
33 |
+
sml_lbwpresets=""
|
34 |
+
try:
|
35 |
+
with open(sml_filepath,encoding="utf-8") as f:
|
36 |
+
sml_lbwpresets = f.read()
|
37 |
+
except OSError as e:
|
38 |
+
sml_lbwpresets=LWEIGHTSPRESETS
|
39 |
+
|
40 |
+
with gr.Blocks(analytics_enabled=False) :
|
41 |
+
sml_submit_result = gr.Textbox(label="Message")
|
42 |
+
with gr.Row().style(equal_height=False):
|
43 |
+
sml_cpmerge = gr.Button(elem_id="model_merger_merge", value="Merge to Checkpoint",variant='primary')
|
44 |
+
sml_makelora = gr.Button(elem_id="model_merger_merge", value="Make LoRA (alpha * A - beta * B)",variant='primary')
|
45 |
+
sml_model_a = gr.Dropdown(sd_models.checkpoint_tiles(),elem_id="model_converter_model_name",label="Checkpoint A",interactive=True)
|
46 |
+
create_refresh_button(sml_model_a, sd_models.list_models,lambda: {"choices": sd_models.checkpoint_tiles()},"refresh_checkpoint_Z")
|
47 |
+
sml_model_b = gr.Dropdown(sd_models.checkpoint_tiles(),elem_id="model_converter_model_name",label="Checkpoint B",interactive=True)
|
48 |
+
create_refresh_button(sml_model_b, sd_models.list_models,lambda: {"choices": sd_models.checkpoint_tiles()},"refresh_checkpoint_Z")
|
49 |
+
with gr.Row().style(equal_height=False):
|
50 |
+
sml_merge = gr.Button(elem_id="model_merger_merge", value="Merge LoRAs",variant='primary')
|
51 |
+
alpha = gr.Slider(label="alpha", minimum=-1.0, maximum=2, step=0.001, value=1)
|
52 |
+
beta = gr.Slider(label="beta", minimum=-1.0, maximum=2, step=0.001, value=1)
|
53 |
+
with gr.Row().style(equal_height=False):
|
54 |
+
sml_settings = gr.CheckboxGroup(["same to Strength", "overwrite"], label="settings")
|
55 |
+
precision = gr.Radio(label = "save precision",choices=["float","fp16","bf16"],value = "fp16",type="value")
|
56 |
+
with gr.Row().style(equal_height=False):
|
57 |
+
sml_dim = gr.Radio(label = "remake dimension",choices = ["no","auto",*[2**(x+2) for x in range(9)]],value = "no",type = "value")
|
58 |
+
sml_filename = gr.Textbox(label="filename(option)",lines=1,visible =True,interactive = True)
|
59 |
+
sml_loranames = gr.Textbox(label='LoRAname1:ratio1:Blocks1,LoRAname2:ratio2:Blocks2,...(":blocks" is option, not necessary)',lines=1,value="",visible =True)
|
60 |
+
sml_dims = gr.CheckboxGroup(label = "limit dimension",choices=[],value = [],type="value",interactive=True,visible = False)
|
61 |
+
with gr.Row().style(equal_height=False):
|
62 |
+
sml_calcdim = gr.Button(elem_id="calcloras", value="calculate dimension of LoRAs(It may take a few minutes if there are many LoRAs)",variant='primary')
|
63 |
+
sml_update = gr.Button(elem_id="calcloras", value="update list",variant='primary')
|
64 |
+
sml_loras = gr.CheckboxGroup(label = "Lora",choices=[x[0] for x in lora.available_loras.items()],type="value",interactive=True,visible = True)
|
65 |
+
sml_loraratios = gr.TextArea(label="",value=sml_lbwpresets,visible =True,interactive = True)
|
66 |
+
|
67 |
+
sml_merge.click(
|
68 |
+
fn=lmerge,
|
69 |
+
inputs=[sml_loranames,sml_loraratios,sml_settings,sml_filename,sml_dim,precision],
|
70 |
+
outputs=[sml_submit_result]
|
71 |
+
)
|
72 |
+
|
73 |
+
sml_makelora.click(
|
74 |
+
fn=makelora,
|
75 |
+
inputs=[sml_model_a,sml_model_b,sml_dim,sml_filename,sml_settings,alpha,beta,precision],
|
76 |
+
outputs=[sml_submit_result]
|
77 |
+
)
|
78 |
+
|
79 |
+
sml_cpmerge.click(
|
80 |
+
fn=pluslora,
|
81 |
+
inputs=[sml_loranames,sml_loraratios,sml_settings,sml_filename,sml_model_a,precision],
|
82 |
+
outputs=[sml_submit_result]
|
83 |
+
)
|
84 |
+
llist ={}
|
85 |
+
dlist =[]
|
86 |
+
dn = []
|
87 |
+
|
88 |
+
def updateloras():
|
89 |
+
lora.list_available_loras()
|
90 |
+
for n in lora.available_loras.items():
|
91 |
+
if n[0] not in llist:llist[n[0]] = ""
|
92 |
+
return gr.update(choices = [f"{x[0]}({x[1]})" for x in llist.items()])
|
93 |
+
|
94 |
+
sml_update.click(fn = updateloras,outputs = [sml_loras])
|
95 |
+
|
96 |
+
def calculatedim():
|
97 |
+
print("listing dimensions...")
|
98 |
+
for n in tqdm(lora.available_loras.items()):
|
99 |
+
if n[0] in llist:
|
100 |
+
if llist[n[0]] !="": continue
|
101 |
+
c_lora = lora.available_loras.get(n[0], None)
|
102 |
+
d,t = dimgetter(c_lora.filename)
|
103 |
+
if t == "LoCon" : d = f"{d}:{t}"
|
104 |
+
if d not in dlist:
|
105 |
+
if type(d) == int :dlist.append(d)
|
106 |
+
elif d not in dn: dn.append(d)
|
107 |
+
llist[n[0]] = d
|
108 |
+
dlist.sort()
|
109 |
+
return gr.update(choices = [f"{x[0]}({x[1]})" for x in llist.items()],value =[]),gr.update(visible =True,choices = [x for x in (dlist+dn)])
|
110 |
+
|
111 |
+
sml_calcdim.click(
|
112 |
+
fn=calculatedim,
|
113 |
+
inputs=[],
|
114 |
+
outputs=[sml_loras,sml_dims]
|
115 |
+
)
|
116 |
+
|
117 |
+
def dimselector(dims):
|
118 |
+
if dims ==[]:return gr.update(choices = [f"{x[0]}({x[1]})" for x in llist.items()])
|
119 |
+
rl=[]
|
120 |
+
for d in dims:
|
121 |
+
for i in llist.items():
|
122 |
+
if d == i[1]:rl.append(f"{i[0]}({i[1]})")
|
123 |
+
return gr.update(choices = [l for l in rl],value =[])
|
124 |
+
|
125 |
+
def llister(names):
|
126 |
+
if names ==[] : return ""
|
127 |
+
else:
|
128 |
+
for i,n in enumerate(names):
|
129 |
+
if "(" in n:names[i] = n[:n.rfind("(")]
|
130 |
+
return ":1.0,".join(names)+":1.0"
|
131 |
+
sml_loras.change(fn=llister,inputs=[sml_loras],outputs=[sml_loranames])
|
132 |
+
sml_dims.change(fn=dimselector,inputs=[sml_dims],outputs=[sml_loras])
|
133 |
+
|
134 |
+
def makelora(model_a,model_b,dim,saveto,settings,alpha,beta,precision):
|
135 |
+
print("make LoRA start")
|
136 |
+
if model_a == "" or model_b =="":
|
137 |
+
return "ERROR: No model Selected"
|
138 |
+
gc.collect()
|
139 |
+
|
140 |
+
if saveto =="" : saveto = makeloraname(model_a,model_b)
|
141 |
+
if not ".safetensors" in saveto :saveto += ".safetensors"
|
142 |
+
saveto = os.path.join(shared.cmd_opts.lora_dir,saveto)
|
143 |
+
|
144 |
+
dim = 128 if type(dim) != int else int(dim)
|
145 |
+
if os.path.isfile(saveto ) and not "overwrite" in settings:
|
146 |
+
_err_msg = f"Output file ({saveto}) existed and was not saved"
|
147 |
+
print(_err_msg)
|
148 |
+
return _err_msg
|
149 |
+
|
150 |
+
svd(fullpathfromname(model_a),fullpathfromname(model_b),False,dim,precision,saveto,alpha,beta)
|
151 |
+
return f"saved to {saveto}"
|
152 |
+
|
153 |
+
def lmerge(loranames,loraratioss,settings,filename,dim,precision):
|
154 |
+
import lora
|
155 |
+
loras_on_disk = [lora.available_loras.get(name, None) for name in loranames]
|
156 |
+
if any([x is None for x in loras_on_disk]):
|
157 |
+
lora.list_available_loras()
|
158 |
+
|
159 |
+
loras_on_disk = [lora.available_loras.get(name, None) for name in loranames]
|
160 |
+
|
161 |
+
lnames = [loranames] if "," not in loranames else loranames.split(",")
|
162 |
+
|
163 |
+
for i, n in enumerate(lnames):
|
164 |
+
lnames[i] = n.split(":")
|
165 |
+
|
166 |
+
loraratios=loraratioss.splitlines()
|
167 |
+
ldict ={}
|
168 |
+
|
169 |
+
for i,l in enumerate(loraratios):
|
170 |
+
if ":" not in l or not (l.count(",") == 16 or l.count(",") == 25) : continue
|
171 |
+
ldict[l.split(":")[0]]=l.split(":")[1]
|
172 |
+
|
173 |
+
ln = []
|
174 |
+
lr = []
|
175 |
+
ld = []
|
176 |
+
lt = []
|
177 |
+
dmax = 1
|
178 |
+
|
179 |
+
for i,n in enumerate(lnames):
|
180 |
+
if len(n) ==3:
|
181 |
+
if n[2].strip() in ldict:
|
182 |
+
ratio = [float(r)*float(n[1]) for r in ldict[n[2]].split(",")]
|
183 |
+
else:ratio = [float(n[1])]*17
|
184 |
+
else:ratio = [float(n[1])]*17
|
185 |
+
c_lora = lora.available_loras.get(n[0], None)
|
186 |
+
ln.append(c_lora.filename)
|
187 |
+
lr.append(ratio)
|
188 |
+
d,t = dimgetter(c_lora.filename)
|
189 |
+
lt.append(t)
|
190 |
+
ld.append(d)
|
191 |
+
if d != "LyCORIS":
|
192 |
+
if d > dmax : dmax = d
|
193 |
+
|
194 |
+
if filename =="":filename =loranames.replace(",","+").replace(":","_")
|
195 |
+
if not ".safetensors" in filename:filename += ".safetensors"
|
196 |
+
filename = os.path.join(shared.cmd_opts.lora_dir,filename)
|
197 |
+
|
198 |
+
dim = int(dim) if dim != "no" and dim != "auto" else 0
|
199 |
+
|
200 |
+
if "LyCORIS" in ld or "LoCon" in lt:
|
201 |
+
if len(ld) !=1:
|
202 |
+
return "multiple merge of LyCORIS is not supported"
|
203 |
+
sd = lycomerge(ln[0],lr[0])
|
204 |
+
elif dim > 0:
|
205 |
+
print("change demension to ", dim)
|
206 |
+
sd = merge_lora_models_dim(ln, lr, dim,settings)
|
207 |
+
elif "auto" in settings and ld.count(ld[0]) != len(ld):
|
208 |
+
print("change demension to ",dmax)
|
209 |
+
sd = merge_lora_models_dim(ln, lr, dmax,settings)
|
210 |
+
else:
|
211 |
+
sd = merge_lora_models(ln, lr,settings)
|
212 |
+
|
213 |
+
if os.path.isfile(filename) and not "overwrite" in settings:
|
214 |
+
_err_msg = f"Output file ({filename}) existed and was not saved"
|
215 |
+
print(_err_msg)
|
216 |
+
return _err_msg
|
217 |
+
|
218 |
+
save_to_file(filename,sd,sd, str_to_dtype(precision))
|
219 |
+
return "saved : "+filename
|
220 |
+
|
221 |
+
def pluslora(lnames,loraratios,settings,output,model,precision):
|
222 |
+
if model == []:
|
223 |
+
return "ERROR: No model Selected"
|
224 |
+
if lnames == "":
|
225 |
+
return "ERROR: No LoRA Selected"
|
226 |
+
|
227 |
+
print("plus LoRA start")
|
228 |
+
import lora
|
229 |
+
lnames = [lnames] if "," not in lnames else lnames.split(",")
|
230 |
+
|
231 |
+
for i, n in enumerate(lnames):
|
232 |
+
lnames[i] = n.split(":")
|
233 |
+
|
234 |
+
loraratios=loraratios.splitlines()
|
235 |
+
ldict ={}
|
236 |
+
|
237 |
+
for i,l in enumerate(loraratios):
|
238 |
+
if ":" not in l or not (l.count(",") == 16 or l.count(",") == 25) : continue
|
239 |
+
ldict[l.split(":")[0].strip()]=l.split(":")[1]
|
240 |
+
|
241 |
+
names=[]
|
242 |
+
filenames=[]
|
243 |
+
loratypes=[]
|
244 |
+
lweis=[]
|
245 |
+
|
246 |
+
for n in lnames:
|
247 |
+
if len(n) ==3:
|
248 |
+
if n[2].strip() in ldict:
|
249 |
+
ratio = [float(r)*float(n[1]) for r in ldict[n[2]].split(",")]
|
250 |
+
else:ratio = [float(n[1])]*17
|
251 |
+
else:ratio = [float(n[1])]*17
|
252 |
+
c_lora = lora.available_loras.get(n[0], None)
|
253 |
+
names.append(n[0])
|
254 |
+
filenames.append(c_lora.filename)
|
255 |
+
_,t = dimgetter(c_lora.filename)
|
256 |
+
if "LyCORIS" in t: return "LyCORIS merge is not supported"
|
257 |
+
lweis.append(ratio)
|
258 |
+
|
259 |
+
modeln=filenamecutter(model,True)
|
260 |
+
dname = modeln
|
261 |
+
for n in names:
|
262 |
+
dname = dname + "+"+n
|
263 |
+
|
264 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(model)
|
265 |
+
print(f"Loading {model}")
|
266 |
+
theta_0 = sd_models.read_state_dict(checkpoint_info.filename,"cpu")
|
267 |
+
|
268 |
+
keychanger = {}
|
269 |
+
for key in theta_0.keys():
|
270 |
+
if "model" in key:
|
271 |
+
skey = key.replace(".","_").replace("_weight","")
|
272 |
+
keychanger[skey.split("model_",1)[1]] = key
|
273 |
+
|
274 |
+
for name,filename, lwei in zip(names,filenames, lweis):
|
275 |
+
print(f"loading: {name}")
|
276 |
+
lora_sd = load_state_dict(filename, torch.float)
|
277 |
+
|
278 |
+
print(f"merging..." ,lwei)
|
279 |
+
for key in lora_sd.keys():
|
280 |
+
ratio = 1
|
281 |
+
|
282 |
+
fullkey = convert_diffusers_name_to_compvis(key)
|
283 |
+
|
284 |
+
for i,block in enumerate(LORABLOCKS):
|
285 |
+
if block in fullkey:
|
286 |
+
ratio = lwei[i]
|
287 |
+
|
288 |
+
msd_key, lora_key = fullkey.split(".", 1)
|
289 |
+
|
290 |
+
if "lora_down" in key:
|
291 |
+
up_key = key.replace("lora_down", "lora_up")
|
292 |
+
alpha_key = key[:key.index("lora_down")] + 'alpha'
|
293 |
+
|
294 |
+
# print(f"apply {key} to {module}")
|
295 |
+
|
296 |
+
down_weight = lora_sd[key].to(device="cpu")
|
297 |
+
up_weight = lora_sd[up_key].to(device="cpu")
|
298 |
+
|
299 |
+
dim = down_weight.size()[0]
|
300 |
+
alpha = lora_sd.get(alpha_key, dim)
|
301 |
+
scale = alpha / dim
|
302 |
+
# W <- W + U * D
|
303 |
+
weight = theta_0[keychanger[msd_key]].to(device="cpu")
|
304 |
+
|
305 |
+
if not len(down_weight.size()) == 4:
|
306 |
+
# linear
|
307 |
+
weight = weight + ratio * (up_weight @ down_weight) * scale
|
308 |
+
else:
|
309 |
+
# conv2d
|
310 |
+
weight = weight + ratio * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)
|
311 |
+
).unsqueeze(2).unsqueeze(3) * scale
|
312 |
+
theta_0[keychanger[msd_key]] = torch.nn.Parameter(weight)
|
313 |
+
#usemodelgen(theta_0,model)
|
314 |
+
settings.append(precision)
|
315 |
+
result = savemodel(theta_0,dname,output,settings,model)
|
316 |
+
del theta_0
|
317 |
+
gc.collect()
|
318 |
+
return result
|
319 |
+
|
320 |
+
def save_to_file(file_name, model, state_dict, dtype):
|
321 |
+
if dtype is not None:
|
322 |
+
for key in list(state_dict.keys()):
|
323 |
+
if type(state_dict[key]) == torch.Tensor:
|
324 |
+
state_dict[key] = state_dict[key].to(dtype)
|
325 |
+
|
326 |
+
if os.path.splitext(file_name)[1] == '.safetensors':
|
327 |
+
save_file(model, file_name)
|
328 |
+
else:
|
329 |
+
torch.save(model, file_name)
|
330 |
+
|
331 |
+
re_digits = re.compile(r"\d+")
|
332 |
+
|
333 |
+
re_unet_down_blocks = re.compile(r"lora_unet_down_blocks_(\d+)_attentions_(\d+)_(.+)")
|
334 |
+
re_unet_mid_blocks = re.compile(r"lora_unet_mid_block_attentions_(\d+)_(.+)")
|
335 |
+
re_unet_up_blocks = re.compile(r"lora_unet_up_blocks_(\d+)_attentions_(\d+)_(.+)")
|
336 |
+
|
337 |
+
re_unet_down_blocks_res = re.compile(r"lora_unet_down_blocks_(\d+)_resnets_(\d+)_(.+)")
|
338 |
+
re_unet_mid_blocks_res = re.compile(r"lora_unet_mid_block_resnets_(\d+)_(.+)")
|
339 |
+
re_unet_up_blocks_res = re.compile(r"lora_unet_up_blocks_(\d+)_resnets_(\d+)_(.+)")
|
340 |
+
|
341 |
+
re_unet_downsample = re.compile(r"lora_unet_down_blocks_(\d+)_downsamplers_0_conv(.+)")
|
342 |
+
re_unet_upsample = re.compile(r"lora_unet_up_blocks_(\d+)_upsamplers_0_conv(.+)")
|
343 |
+
|
344 |
+
re_text_block = re.compile(r"lora_te_text_model_encoder_layers_(\d+)_(.+)")
|
345 |
+
|
346 |
+
|
347 |
+
def convert_diffusers_name_to_compvis(key):
|
348 |
+
def match(match_list, regex):
|
349 |
+
r = re.match(regex, key)
|
350 |
+
if not r:
|
351 |
+
return False
|
352 |
+
|
353 |
+
match_list.clear()
|
354 |
+
match_list.extend([int(x) if re.match(re_digits, x) else x for x in r.groups()])
|
355 |
+
return True
|
356 |
+
|
357 |
+
m = []
|
358 |
+
|
359 |
+
if match(m, re_unet_down_blocks):
|
360 |
+
return f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[1]}_1_{m[2]}"
|
361 |
+
|
362 |
+
if match(m, re_unet_mid_blocks):
|
363 |
+
return f"diffusion_model_middle_block_1_{m[1]}"
|
364 |
+
|
365 |
+
if match(m, re_unet_up_blocks):
|
366 |
+
return f"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_1_{m[2]}"
|
367 |
+
|
368 |
+
if match(m, re_unet_down_blocks_res):
|
369 |
+
block = f"diffusion_model_input_blocks_{1 + m[0] * 3 + m[1]}_0_"
|
370 |
+
if m[2].startswith('conv1'):
|
371 |
+
return f"{block}in_layers_2{m[2][len('conv1'):]}"
|
372 |
+
elif m[2].startswith('conv2'):
|
373 |
+
return f"{block}out_layers_3{m[2][len('conv2'):]}"
|
374 |
+
elif m[2].startswith('time_emb_proj'):
|
375 |
+
return f"{block}emb_layers_1{m[2][len('time_emb_proj'):]}"
|
376 |
+
elif m[2].startswith('conv_shortcut'):
|
377 |
+
return f"{block}skip_connection{m[2][len('conv_shortcut'):]}"
|
378 |
+
|
379 |
+
if match(m, re_unet_mid_blocks_res):
|
380 |
+
block = f"diffusion_model_middle_block_{m[0]*2}_"
|
381 |
+
if m[1].startswith('conv1'):
|
382 |
+
return f"{block}in_layers_2{m[1][len('conv1'):]}"
|
383 |
+
elif m[1].startswith('conv2'):
|
384 |
+
return f"{block}out_layers_3{m[1][len('conv2'):]}"
|
385 |
+
elif m[1].startswith('time_emb_proj'):
|
386 |
+
return f"{block}emb_layers_1{m[1][len('time_emb_proj'):]}"
|
387 |
+
elif m[1].startswith('conv_shortcut'):
|
388 |
+
return f"{block}skip_connection{m[1][len('conv_shortcut'):]}"
|
389 |
+
|
390 |
+
if match(m, re_unet_up_blocks_res):
|
391 |
+
block = f"diffusion_model_output_blocks_{m[0] * 3 + m[1]}_0_"
|
392 |
+
if m[2].startswith('conv1'):
|
393 |
+
return f"{block}in_layers_2{m[2][len('conv1'):]}"
|
394 |
+
elif m[2].startswith('conv2'):
|
395 |
+
return f"{block}out_layers_3{m[2][len('conv2'):]}"
|
396 |
+
elif m[2].startswith('time_emb_proj'):
|
397 |
+
return f"{block}emb_layers_1{m[2][len('time_emb_proj'):]}"
|
398 |
+
elif m[2].startswith('conv_shortcut'):
|
399 |
+
return f"{block}skip_connection{m[2][len('conv_shortcut'):]}"
|
400 |
+
|
401 |
+
if match(m, re_unet_downsample):
|
402 |
+
return f"diffusion_model_input_blocks_{m[0]*3+3}_0_op{m[1]}"
|
403 |
+
|
404 |
+
if match(m, re_unet_upsample):
|
405 |
+
return f"diffusion_model_output_blocks_{m[0]*3 + 2}_{1+(m[0]!=0)}_conv{m[1]}"
|
406 |
+
|
407 |
+
if match(m, re_text_block):
|
408 |
+
return f"transformer_text_model_encoder_layers_{m[0]}_{m[1]}"
|
409 |
+
|
410 |
+
return key
|
411 |
+
|
412 |
+
CLAMP_QUANTILE = 0.99
|
413 |
+
MIN_DIFF = 1e-6
|
414 |
+
|
415 |
+
def str_to_dtype(p):
|
416 |
+
if p == 'float':
|
417 |
+
return torch.float
|
418 |
+
if p == 'fp16':
|
419 |
+
return torch.float16
|
420 |
+
if p == 'bf16':
|
421 |
+
return torch.bfloat16
|
422 |
+
return None
|
423 |
+
|
424 |
+
def svd(model_a,model_b,v2,dim,save_precision,save_to,alpha,beta):
|
425 |
+
save_dtype = str_to_dtype(save_precision)
|
426 |
+
|
427 |
+
if model_a == model_b:
|
428 |
+
text_encoder_t, _, unet_t = load_models_from_stable_diffusion_checkpoint(v2, model_a)
|
429 |
+
text_encoder_o, _, unet_o = text_encoder_t, _, unet_t
|
430 |
+
else:
|
431 |
+
print(f"loading SD model : {model_b}")
|
432 |
+
text_encoder_o, _, unet_o = load_models_from_stable_diffusion_checkpoint(v2, model_b)
|
433 |
+
|
434 |
+
print(f"loading SD model : {model_a}")
|
435 |
+
text_encoder_t, _, unet_t = load_models_from_stable_diffusion_checkpoint(v2, model_a)
|
436 |
+
|
437 |
+
# create LoRA network to extract weights: Use dim (rank) as alpha
|
438 |
+
lora_network_o = create_network(1.0, dim, dim, None, text_encoder_o, unet_o)
|
439 |
+
lora_network_t = create_network(1.0, dim, dim, None, text_encoder_t, unet_t)
|
440 |
+
assert len(lora_network_o.text_encoder_loras) == len(
|
441 |
+
lora_network_t.text_encoder_loras), f"model version is different (SD1.x vs SD2.x) / それぞれのモデルのバージョンが違います(SD1.xベースとSD2.xベース) "
|
442 |
+
# get diffs
|
443 |
+
diffs = {}
|
444 |
+
text_encoder_different = False
|
445 |
+
for i, (lora_o, lora_t) in enumerate(zip(lora_network_o.text_encoder_loras, lora_network_t.text_encoder_loras)):
|
446 |
+
lora_name = lora_o.lora_name
|
447 |
+
module_o = lora_o.org_module
|
448 |
+
module_t = lora_t.org_module
|
449 |
+
diff = alpha*module_t.weight - beta*module_o.weight
|
450 |
+
|
451 |
+
# Text Encoder might be same
|
452 |
+
if torch.max(torch.abs(diff)) > MIN_DIFF:
|
453 |
+
text_encoder_different = True
|
454 |
+
|
455 |
+
diff = diff.float()
|
456 |
+
diffs[lora_name] = diff
|
457 |
+
|
458 |
+
if not text_encoder_different:
|
459 |
+
print("Text encoder is same. Extract U-Net only.")
|
460 |
+
lora_network_o.text_encoder_loras = []
|
461 |
+
diffs = {}
|
462 |
+
|
463 |
+
for i, (lora_o, lora_t) in enumerate(zip(lora_network_o.unet_loras, lora_network_t.unet_loras)):
|
464 |
+
lora_name = lora_o.lora_name
|
465 |
+
module_o = lora_o.org_module
|
466 |
+
module_t = lora_t.org_module
|
467 |
+
diff = alpha*module_t.weight - beta*module_o.weight
|
468 |
+
diff = diff.float()
|
469 |
+
|
470 |
+
diffs[lora_name] = diff
|
471 |
+
|
472 |
+
# make LoRA with svd
|
473 |
+
print("calculating by svd")
|
474 |
+
rank = dim
|
475 |
+
lora_weights = {}
|
476 |
+
with torch.no_grad():
|
477 |
+
for lora_name, mat in tqdm(list(diffs.items())):
|
478 |
+
conv2d = (len(mat.size()) == 4)
|
479 |
+
if conv2d:
|
480 |
+
mat = mat.squeeze()
|
481 |
+
|
482 |
+
U, S, Vh = torch.linalg.svd(mat)
|
483 |
+
|
484 |
+
U = U[:, :rank]
|
485 |
+
S = S[:rank]
|
486 |
+
U = U @ torch.diag(S)
|
487 |
+
|
488 |
+
Vh = Vh[:rank, :]
|
489 |
+
|
490 |
+
dist = torch.cat([U.flatten(), Vh.flatten()])
|
491 |
+
hi_val = torch.quantile(dist, CLAMP_QUANTILE)
|
492 |
+
low_val = -hi_val
|
493 |
+
|
494 |
+
U = U.clamp(low_val, hi_val)
|
495 |
+
Vh = Vh.clamp(low_val, hi_val)
|
496 |
+
|
497 |
+
lora_weights[lora_name] = (U, Vh)
|
498 |
+
|
499 |
+
# make state dict for LoRA
|
500 |
+
lora_network_o.apply_to(text_encoder_o, unet_o, text_encoder_different, True) # to make state dict
|
501 |
+
lora_sd = lora_network_o.state_dict()
|
502 |
+
print(f"LoRA has {len(lora_sd)} weights.")
|
503 |
+
|
504 |
+
for key in list(lora_sd.keys()):
|
505 |
+
if "alpha" in key:
|
506 |
+
continue
|
507 |
+
|
508 |
+
lora_name = key.split('.')[0]
|
509 |
+
i = 0 if "lora_up" in key else 1
|
510 |
+
|
511 |
+
weights = lora_weights[lora_name][i]
|
512 |
+
# print(key, i, weights.size(), lora_sd[key].size())
|
513 |
+
if len(lora_sd[key].size()) == 4:
|
514 |
+
weights = weights.unsqueeze(2).unsqueeze(3)
|
515 |
+
|
516 |
+
assert weights.size() == lora_sd[key].size(), f"size unmatch: {key}"
|
517 |
+
lora_sd[key] = weights
|
518 |
+
|
519 |
+
# load state dict to LoRA and save it
|
520 |
+
info = lora_network_o.load_state_dict(lora_sd)
|
521 |
+
print(f"Loading extracted LoRA weights: {info}")
|
522 |
+
|
523 |
+
dir_name = os.path.dirname(save_to)
|
524 |
+
if dir_name and not os.path.exists(dir_name):
|
525 |
+
os.makedirs(dir_name, exist_ok=True)
|
526 |
+
|
527 |
+
# minimum metadata
|
528 |
+
metadata = {"ss_network_dim": str(dim), "ss_network_alpha": str(dim)}
|
529 |
+
|
530 |
+
lora_network_o.save_weights(save_to, save_dtype, metadata)
|
531 |
+
print(f"LoRA weights are saved to: {save_to}")
|
532 |
+
return save_to
|
533 |
+
|
534 |
+
def load_state_dict(file_name, dtype):
|
535 |
+
if os.path.splitext(file_name)[1] == '.safetensors':
|
536 |
+
sd = load_file(file_name)
|
537 |
+
else:
|
538 |
+
sd = torch.load(file_name, map_location='cpu')
|
539 |
+
for key in list(sd.keys()):
|
540 |
+
if type(sd[key]) == torch.Tensor:
|
541 |
+
sd[key] = sd[key].to(dtype)
|
542 |
+
return sd
|
543 |
+
|
544 |
+
def dimgetter(filename):
|
545 |
+
lora_sd = load_state_dict(filename, torch.float)
|
546 |
+
alpha = None
|
547 |
+
dim = None
|
548 |
+
type = None
|
549 |
+
|
550 |
+
if "lora_unet_down_blocks_0_resnets_0_conv1.lora_down.weight" in lora_sd.keys():
|
551 |
+
type = "LoCon"
|
552 |
+
|
553 |
+
for key, value in lora_sd.items():
|
554 |
+
|
555 |
+
if alpha is None and 'alpha' in key:
|
556 |
+
alpha = value
|
557 |
+
if dim is None and 'lora_down' in key and len(value.size()) == 2:
|
558 |
+
dim = value.size()[0]
|
559 |
+
if "hada_" in key:
|
560 |
+
dim,type = "LyCORIS","LyCORIS"
|
561 |
+
if alpha is not None and dim is not None:
|
562 |
+
break
|
563 |
+
if alpha is None:
|
564 |
+
alpha = dim
|
565 |
+
if type == None:type = "LoRA"
|
566 |
+
if dim :
|
567 |
+
return dim,type
|
568 |
+
else:
|
569 |
+
return "unknown","unknown"
|
570 |
+
|
571 |
+
def blockfromkey(key):
|
572 |
+
fullkey = convert_diffusers_name_to_compvis(key)
|
573 |
+
for i,n in enumerate(LORABLOCKS):
|
574 |
+
if n in fullkey: return i
|
575 |
+
return 0
|
576 |
+
|
577 |
+
def merge_lora_models_dim(models, ratios, new_rank,sets):
|
578 |
+
merged_sd = {}
|
579 |
+
fugou = 1
|
580 |
+
for model, ratios in zip(models, ratios):
|
581 |
+
merge_dtype = torch.float
|
582 |
+
lora_sd = load_state_dict(model, merge_dtype)
|
583 |
+
|
584 |
+
# merge
|
585 |
+
print(f"merging {model}: {ratios}")
|
586 |
+
for key in tqdm(list(lora_sd.keys())):
|
587 |
+
if 'lora_down' not in key:
|
588 |
+
continue
|
589 |
+
lora_module_name = key[:key.rfind(".lora_down")]
|
590 |
+
|
591 |
+
down_weight = lora_sd[key]
|
592 |
+
network_dim = down_weight.size()[0]
|
593 |
+
|
594 |
+
up_weight = lora_sd[lora_module_name + '.lora_up.weight']
|
595 |
+
alpha = lora_sd.get(lora_module_name + '.alpha', network_dim)
|
596 |
+
|
597 |
+
in_dim = down_weight.size()[1]
|
598 |
+
out_dim = up_weight.size()[0]
|
599 |
+
conv2d = len(down_weight.size()) == 4
|
600 |
+
# print(lora_module_name, network_dim, alpha, in_dim, out_dim)
|
601 |
+
|
602 |
+
# make original weight if not exist
|
603 |
+
if lora_module_name not in merged_sd:
|
604 |
+
weight = torch.zeros((out_dim, in_dim, 1, 1) if conv2d else (out_dim, in_dim), dtype=merge_dtype)
|
605 |
+
else:
|
606 |
+
weight = merged_sd[lora_module_name]
|
607 |
+
|
608 |
+
ratio = ratios[blockfromkey(key)]
|
609 |
+
if "same to Strength" in sets:
|
610 |
+
ratio, fugou = (ratio**0.5,1) if ratio > 0 else (abs(ratio)**0.5,-1)
|
611 |
+
#print(lora_module_name, ratio)
|
612 |
+
# W <- W + U * D
|
613 |
+
scale = (alpha / network_dim)
|
614 |
+
if not conv2d: # linear
|
615 |
+
weight = weight + ratio * (up_weight @ down_weight) * scale * fugou
|
616 |
+
else:
|
617 |
+
weight = weight + ratio * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)
|
618 |
+
).unsqueeze(2).unsqueeze(3) * scale * fugou
|
619 |
+
|
620 |
+
merged_sd[lora_module_name] = weight
|
621 |
+
|
622 |
+
# extract from merged weights
|
623 |
+
print("extract new lora...")
|
624 |
+
merged_lora_sd = {}
|
625 |
+
with torch.no_grad():
|
626 |
+
for lora_module_name, mat in tqdm(list(merged_sd.items())):
|
627 |
+
conv2d = (len(mat.size()) == 4)
|
628 |
+
if conv2d:
|
629 |
+
mat = mat.squeeze()
|
630 |
+
|
631 |
+
U, S, Vh = torch.linalg.svd(mat)
|
632 |
+
|
633 |
+
U = U[:, :new_rank]
|
634 |
+
S = S[:new_rank]
|
635 |
+
U = U @ torch.diag(S)
|
636 |
+
|
637 |
+
Vh = Vh[:new_rank, :]
|
638 |
+
|
639 |
+
dist = torch.cat([U.flatten(), Vh.flatten()])
|
640 |
+
hi_val = torch.quantile(dist, CLAMP_QUANTILE)
|
641 |
+
low_val = -hi_val
|
642 |
+
|
643 |
+
U = U.clamp(low_val, hi_val)
|
644 |
+
Vh = Vh.clamp(low_val, hi_val)
|
645 |
+
|
646 |
+
up_weight = U
|
647 |
+
down_weight = Vh
|
648 |
+
|
649 |
+
if conv2d:
|
650 |
+
up_weight = up_weight.unsqueeze(2).unsqueeze(3)
|
651 |
+
down_weight = down_weight.unsqueeze(2).unsqueeze(3)
|
652 |
+
|
653 |
+
merged_lora_sd[lora_module_name + '.lora_up.weight'] = up_weight.to("cpu").contiguous()
|
654 |
+
merged_lora_sd[lora_module_name + '.lora_down.weight'] = down_weight.to("cpu").contiguous()
|
655 |
+
merged_lora_sd[lora_module_name + '.alpha'] = torch.tensor(new_rank)
|
656 |
+
|
657 |
+
return merged_lora_sd
|
658 |
+
|
659 |
+
def merge_lora_models(models, ratios,sets):
|
660 |
+
base_alphas = {} # alpha for merged model
|
661 |
+
base_dims = {}
|
662 |
+
merge_dtype = torch.float
|
663 |
+
merged_sd = {}
|
664 |
+
fugou = 1
|
665 |
+
for model, ratios in zip(models, ratios):
|
666 |
+
print(f"merging {model}: {ratios}")
|
667 |
+
lora_sd = load_state_dict(model, merge_dtype)
|
668 |
+
|
669 |
+
# get alpha and dim
|
670 |
+
alphas = {} # alpha for current model
|
671 |
+
dims = {} # dims for current model
|
672 |
+
for key in lora_sd.keys():
|
673 |
+
if 'alpha' in key:
|
674 |
+
lora_module_name = key[:key.rfind(".alpha")]
|
675 |
+
alpha = float(lora_sd[key].detach().numpy())
|
676 |
+
alphas[lora_module_name] = alpha
|
677 |
+
if lora_module_name not in base_alphas:
|
678 |
+
base_alphas[lora_module_name] = alpha
|
679 |
+
elif "lora_down" in key:
|
680 |
+
lora_module_name = key[:key.rfind(".lora_down")]
|
681 |
+
dim = lora_sd[key].size()[0]
|
682 |
+
dims[lora_module_name] = dim
|
683 |
+
if lora_module_name not in base_dims:
|
684 |
+
base_dims[lora_module_name] = dim
|
685 |
+
|
686 |
+
for lora_module_name in dims.keys():
|
687 |
+
if lora_module_name not in alphas:
|
688 |
+
alpha = dims[lora_module_name]
|
689 |
+
alphas[lora_module_name] = alpha
|
690 |
+
if lora_module_name not in base_alphas:
|
691 |
+
base_alphas[lora_module_name] = alpha
|
692 |
+
|
693 |
+
print(f"dim: {list(set(dims.values()))}, alpha: {list(set(alphas.values()))}")
|
694 |
+
|
695 |
+
# merge
|
696 |
+
print(f"merging...")
|
697 |
+
for key in lora_sd.keys():
|
698 |
+
if 'alpha' in key:
|
699 |
+
continue
|
700 |
+
if "lora_down" in key: dwon = True
|
701 |
+
lora_module_name = key[:key.rfind(".lora_")]
|
702 |
+
|
703 |
+
base_alpha = base_alphas[lora_module_name]
|
704 |
+
alpha = alphas[lora_module_name]
|
705 |
+
|
706 |
+
ratio = ratios[blockfromkey(key)]
|
707 |
+
if "same to Strength" in sets:
|
708 |
+
ratio, fugou = (ratio**0.5,1) if ratio > 0 else (abs(ratio)**0.5,-1)
|
709 |
+
|
710 |
+
if "lora_down" in key:
|
711 |
+
ratio = ratio * fugou
|
712 |
+
|
713 |
+
scale = math.sqrt(alpha / base_alpha) * ratio
|
714 |
+
|
715 |
+
if key in merged_sd:
|
716 |
+
assert merged_sd[key].size() == lora_sd[key].size(
|
717 |
+
), f"weights shape mismatch merging v1 and v2, different dims? / 重みのサイズが合いません。v1とv2、または次元数の異なるモデルはマージできません"
|
718 |
+
merged_sd[key] = merged_sd[key] + lora_sd[key] * scale
|
719 |
+
else:
|
720 |
+
merged_sd[key] = lora_sd[key] * scale
|
721 |
+
|
722 |
+
# set alpha to sd
|
723 |
+
for lora_module_name, alpha in base_alphas.items():
|
724 |
+
key = lora_module_name + ".alpha"
|
725 |
+
merged_sd[key] = torch.tensor(alpha)
|
726 |
+
|
727 |
+
print("merged model")
|
728 |
+
print(f"dim: {list(set(base_dims.values()))}, alpha: {list(set(base_alphas.values()))}")
|
729 |
+
|
730 |
+
return merged_sd
|
731 |
+
|
732 |
+
def fullpathfromname(name):
|
733 |
+
if hash == "" or hash ==[]: return ""
|
734 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(name)
|
735 |
+
return checkpoint_info.filename
|
736 |
+
|
737 |
+
def makeloraname(model_a,model_b):
|
738 |
+
model_a=filenamecutter(model_a)
|
739 |
+
model_b=filenamecutter(model_b)
|
740 |
+
return "lora_"+model_a+"-"+model_b
|
741 |
+
|
742 |
+
def lycomerge(filename,ratios):
|
743 |
+
sd = load_state_dict(filename, torch.float)
|
744 |
+
|
745 |
+
if len(ratios) == 17:
|
746 |
+
r0 = 1
|
747 |
+
ratios = [ratios[0]] + [r0] + ratios[1:3]+ [r0] + ratios[3:5]+[r0] + ratios[5:7]+[r0,r0,r0] + [ratios[7]] + [r0,r0,r0] + ratios[8:]
|
748 |
+
|
749 |
+
print("LyCORIS: " , ratios)
|
750 |
+
|
751 |
+
keys_failed_to_match = []
|
752 |
+
|
753 |
+
for lkey, weight in sd.items():
|
754 |
+
ratio = 1
|
755 |
+
picked = False
|
756 |
+
if 'alpha' in lkey:
|
757 |
+
continue
|
758 |
+
|
759 |
+
fullkey = convert_diffusers_name_to_compvis(lkey)
|
760 |
+
key, lora_key = fullkey.split(".", 1)
|
761 |
+
|
762 |
+
for i,block in enumerate(LYCOBLOCKS):
|
763 |
+
if block in key:
|
764 |
+
ratio = ratios[i]
|
765 |
+
picked = True
|
766 |
+
if not picked: keys_failed_to_match.append(key)
|
767 |
+
|
768 |
+
sd[lkey] = weight * math.sqrt(abs(float(ratio)))
|
769 |
+
|
770 |
+
if "down" in lkey and ratio < 0:
|
771 |
+
sd[key] = sd[key] * -1
|
772 |
+
|
773 |
+
if len(keys_failed_to_match) > 0:
|
774 |
+
print(keys_failed_to_match)
|
775 |
+
|
776 |
+
return sd
|
777 |
+
|
778 |
+
LORABLOCKS=["encoder",
|
779 |
+
"diffusion_model_input_blocks_1_",
|
780 |
+
"diffusion_model_input_blocks_2_",
|
781 |
+
"diffusion_model_input_blocks_4_",
|
782 |
+
"diffusion_model_input_blocks_5_",
|
783 |
+
"diffusion_model_input_blocks_7_",
|
784 |
+
"diffusion_model_input_blocks_8_",
|
785 |
+
"diffusion_model_middle_block_",
|
786 |
+
"diffusion_model_output_blocks_3_",
|
787 |
+
"diffusion_model_output_blocks_4_",
|
788 |
+
"diffusion_model_output_blocks_5_",
|
789 |
+
"diffusion_model_output_blocks_6_",
|
790 |
+
"diffusion_model_output_blocks_7_",
|
791 |
+
"diffusion_model_output_blocks_8_",
|
792 |
+
"diffusion_model_output_blocks_9_",
|
793 |
+
"diffusion_model_output_blocks_10_",
|
794 |
+
"diffusion_model_output_blocks_11_"]
|
795 |
+
|
796 |
+
LYCOBLOCKS=["encoder",
|
797 |
+
"diffusion_model_input_blocks_0_",
|
798 |
+
"diffusion_model_input_blocks_1_",
|
799 |
+
"diffusion_model_input_blocks_2_",
|
800 |
+
"diffusion_model_input_blocks_3_",
|
801 |
+
"diffusion_model_input_blocks_4_",
|
802 |
+
"diffusion_model_input_blocks_5_",
|
803 |
+
"diffusion_model_input_blocks_6_",
|
804 |
+
"diffusion_model_input_blocks_7_",
|
805 |
+
"diffusion_model_input_blocks_8_",
|
806 |
+
"diffusion_model_input_blocks_9_",
|
807 |
+
"diffusion_model_input_blocks_10_",
|
808 |
+
"diffusion_model_input_blocks_11_",
|
809 |
+
"diffusion_model_middle_block_",
|
810 |
+
"diffusion_model_output_blocks_0_",
|
811 |
+
"diffusion_model_output_blocks_1_",
|
812 |
+
"diffusion_model_output_blocks_2_",
|
813 |
+
"diffusion_model_output_blocks_3_",
|
814 |
+
"diffusion_model_output_blocks_4_",
|
815 |
+
"diffusion_model_output_blocks_5_",
|
816 |
+
"diffusion_model_output_blocks_6_",
|
817 |
+
"diffusion_model_output_blocks_7_",
|
818 |
+
"diffusion_model_output_blocks_8_",
|
819 |
+
"diffusion_model_output_blocks_9_",
|
820 |
+
"diffusion_model_output_blocks_10_",
|
821 |
+
"diffusion_model_output_blocks_11_"]
|
822 |
+
|
823 |
+
class LoRAModule(torch.nn.Module):
|
824 |
+
"""
|
825 |
+
replaces forward method of the original Linear, instead of replacing the original Linear module.
|
826 |
+
"""
|
827 |
+
|
828 |
+
def __init__(self, lora_name, org_module: torch.nn.Module, multiplier=1.0, lora_dim=4, alpha=1):
|
829 |
+
"""if alpha == 0 or None, alpha is rank (no scaling)."""
|
830 |
+
super().__init__()
|
831 |
+
self.lora_name = lora_name
|
832 |
+
|
833 |
+
if org_module.__class__.__name__ == "Conv2d":
|
834 |
+
in_dim = org_module.in_channels
|
835 |
+
out_dim = org_module.out_channels
|
836 |
+
else:
|
837 |
+
in_dim = org_module.in_features
|
838 |
+
out_dim = org_module.out_features
|
839 |
+
|
840 |
+
# if limit_rank:
|
841 |
+
# self.lora_dim = min(lora_dim, in_dim, out_dim)
|
842 |
+
# if self.lora_dim != lora_dim:
|
843 |
+
# print(f"{lora_name} dim (rank) is changed to: {self.lora_dim}")
|
844 |
+
# else:
|
845 |
+
self.lora_dim = lora_dim
|
846 |
+
|
847 |
+
if org_module.__class__.__name__ == "Conv2d":
|
848 |
+
kernel_size = org_module.kernel_size
|
849 |
+
stride = org_module.stride
|
850 |
+
padding = org_module.padding
|
851 |
+
self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False)
|
852 |
+
self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False)
|
853 |
+
else:
|
854 |
+
self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False)
|
855 |
+
self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False)
|
856 |
+
|
857 |
+
if type(alpha) == torch.Tensor:
|
858 |
+
alpha = alpha.detach().float().numpy() # without casting, bf16 causes error
|
859 |
+
alpha = self.lora_dim if alpha is None or alpha == 0 else alpha
|
860 |
+
self.scale = alpha / self.lora_dim
|
861 |
+
self.register_buffer("alpha", torch.tensor(alpha)) # 定数として扱える
|
862 |
+
|
863 |
+
# same as microsoft's
|
864 |
+
torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))
|
865 |
+
torch.nn.init.zeros_(self.lora_up.weight)
|
866 |
+
|
867 |
+
self.multiplier = multiplier
|
868 |
+
self.org_module = org_module # remove in applying
|
869 |
+
self.region = None
|
870 |
+
self.region_mask = None
|
871 |
+
|
872 |
+
def apply_to(self):
|
873 |
+
self.org_forward = self.org_module.forward
|
874 |
+
self.org_module.forward = self.forward
|
875 |
+
del self.org_module
|
876 |
+
|
877 |
+
def merge_to(self, sd, dtype, device):
|
878 |
+
# get up/down weight
|
879 |
+
up_weight = sd["lora_up.weight"].to(torch.float).to(device)
|
880 |
+
down_weight = sd["lora_down.weight"].to(torch.float).to(device)
|
881 |
+
|
882 |
+
# extract weight from org_module
|
883 |
+
org_sd = self.org_module.state_dict()
|
884 |
+
weight = org_sd["weight"].to(torch.float)
|
885 |
+
|
886 |
+
# merge weight
|
887 |
+
if len(weight.size()) == 2:
|
888 |
+
# linear
|
889 |
+
weight = weight + self.multiplier * (up_weight @ down_weight) * self.scale
|
890 |
+
elif down_weight.size()[2:4] == (1, 1):
|
891 |
+
# conv2d 1x1
|
892 |
+
weight = (
|
893 |
+
weight
|
894 |
+
+ self.multiplier
|
895 |
+
* (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3)
|
896 |
+
* self.scale
|
897 |
+
)
|
898 |
+
else:
|
899 |
+
# conv2d 3x3
|
900 |
+
conved = torch.nn.functional.conv2d(down_weight.permute(1, 0, 2, 3), up_weight).permute(1, 0, 2, 3)
|
901 |
+
# print(conved.size(), weight.size(), module.stride, module.padding)
|
902 |
+
weight = weight + self.multiplier * conved * self.scale
|
903 |
+
|
904 |
+
# set weight to org_module
|
905 |
+
org_sd["weight"] = weight.to(dtype)
|
906 |
+
self.org_module.load_state_dict(org_sd)
|
907 |
+
|
908 |
+
def set_region(self, region):
|
909 |
+
self.region = region
|
910 |
+
self.region_mask = None
|
911 |
+
|
912 |
+
def forward(self, x):
|
913 |
+
if self.region is None:
|
914 |
+
return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
|
915 |
+
|
916 |
+
# regional LoRA FIXME same as additional-network extension
|
917 |
+
if x.size()[1] % 77 == 0:
|
918 |
+
# print(f"LoRA for context: {self.lora_name}")
|
919 |
+
self.region = None
|
920 |
+
return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale
|
921 |
+
|
922 |
+
# calculate region mask first time
|
923 |
+
if self.region_mask is None:
|
924 |
+
if len(x.size()) == 4:
|
925 |
+
h, w = x.size()[2:4]
|
926 |
+
else:
|
927 |
+
seq_len = x.size()[1]
|
928 |
+
ratio = math.sqrt((self.region.size()[0] * self.region.size()[1]) / seq_len)
|
929 |
+
h = int(self.region.size()[0] / ratio + 0.5)
|
930 |
+
w = seq_len // h
|
931 |
+
|
932 |
+
r = self.region.to(x.device)
|
933 |
+
if r.dtype == torch.bfloat16:
|
934 |
+
r = r.to(torch.float)
|
935 |
+
r = r.unsqueeze(0).unsqueeze(1)
|
936 |
+
# print(self.lora_name, self.region.size(), x.size(), r.size(), h, w)
|
937 |
+
r = torch.nn.functional.interpolate(r, (h, w), mode="bilinear")
|
938 |
+
r = r.to(x.dtype)
|
939 |
+
|
940 |
+
if len(x.size()) == 3:
|
941 |
+
r = torch.reshape(r, (1, x.size()[1], -1))
|
942 |
+
|
943 |
+
self.region_mask = r
|
944 |
+
|
945 |
+
return self.org_forward(x) + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale * self.region_mask
|
946 |
+
|
947 |
+
def create_network(multiplier, network_dim, network_alpha, vae, text_encoder, unet, **kwargs):
|
948 |
+
if network_dim is None:
|
949 |
+
network_dim = 4 # default
|
950 |
+
|
951 |
+
# extract dim/alpha for conv2d, and block dim
|
952 |
+
conv_dim = kwargs.get("conv_dim", None)
|
953 |
+
conv_alpha = kwargs.get("conv_alpha", None)
|
954 |
+
if conv_dim is not None:
|
955 |
+
conv_dim = int(conv_dim)
|
956 |
+
if conv_alpha is None:
|
957 |
+
conv_alpha = 1.0
|
958 |
+
else:
|
959 |
+
conv_alpha = float(conv_alpha)
|
960 |
+
|
961 |
+
"""
|
962 |
+
block_dims = kwargs.get("block_dims")
|
963 |
+
block_alphas = None
|
964 |
+
if block_dims is not None:
|
965 |
+
block_dims = [int(d) for d in block_dims.split(',')]
|
966 |
+
assert len(block_dims) == NUM_BLOCKS, f"Number of block dimensions is not same to {NUM_BLOCKS}"
|
967 |
+
block_alphas = kwargs.get("block_alphas")
|
968 |
+
if block_alphas is None:
|
969 |
+
block_alphas = [1] * len(block_dims)
|
970 |
+
else:
|
971 |
+
block_alphas = [int(a) for a in block_alphas(',')]
|
972 |
+
assert len(block_alphas) == NUM_BLOCKS, f"Number of block alphas is not same to {NUM_BLOCKS}"
|
973 |
+
conv_block_dims = kwargs.get("conv_block_dims")
|
974 |
+
conv_block_alphas = None
|
975 |
+
if conv_block_dims is not None:
|
976 |
+
conv_block_dims = [int(d) for d in conv_block_dims.split(',')]
|
977 |
+
assert len(conv_block_dims) == NUM_BLOCKS, f"Number of block dimensions is not same to {NUM_BLOCKS}"
|
978 |
+
conv_block_alphas = kwargs.get("conv_block_alphas")
|
979 |
+
if conv_block_alphas is None:
|
980 |
+
conv_block_alphas = [1] * len(conv_block_dims)
|
981 |
+
else:
|
982 |
+
conv_block_alphas = [int(a) for a in conv_block_alphas(',')]
|
983 |
+
assert len(conv_block_alphas) == NUM_BLOCKS, f"Number of block alphas is not same to {NUM_BLOCKS}"
|
984 |
+
"""
|
985 |
+
|
986 |
+
network = LoRANetwork(
|
987 |
+
text_encoder,
|
988 |
+
unet,
|
989 |
+
multiplier=multiplier,
|
990 |
+
lora_dim=network_dim,
|
991 |
+
alpha=network_alpha,
|
992 |
+
conv_lora_dim=conv_dim,
|
993 |
+
conv_alpha=conv_alpha,
|
994 |
+
)
|
995 |
+
return network
|
996 |
+
|
997 |
+
|
998 |
+
|
999 |
+
class LoRANetwork(torch.nn.Module):
|
1000 |
+
# is it possible to apply conv_in and conv_out?
|
1001 |
+
UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"]
|
1002 |
+
UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
|
1003 |
+
TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
|
1004 |
+
LORA_PREFIX_UNET = "lora_unet"
|
1005 |
+
LORA_PREFIX_TEXT_ENCODER = "lora_te"
|
1006 |
+
|
1007 |
+
def __init__(
|
1008 |
+
self,
|
1009 |
+
text_encoder,
|
1010 |
+
unet,
|
1011 |
+
multiplier=1.0,
|
1012 |
+
lora_dim=4,
|
1013 |
+
alpha=1,
|
1014 |
+
conv_lora_dim=None,
|
1015 |
+
conv_alpha=None,
|
1016 |
+
modules_dim=None,
|
1017 |
+
modules_alpha=None,
|
1018 |
+
) -> None:
|
1019 |
+
super().__init__()
|
1020 |
+
self.multiplier = multiplier
|
1021 |
+
|
1022 |
+
self.lora_dim = lora_dim
|
1023 |
+
self.alpha = alpha
|
1024 |
+
self.conv_lora_dim = conv_lora_dim
|
1025 |
+
self.conv_alpha = conv_alpha
|
1026 |
+
|
1027 |
+
if modules_dim is not None:
|
1028 |
+
print(f"create LoRA network from weights")
|
1029 |
+
else:
|
1030 |
+
print(f"create LoRA network. base dim (rank): {lora_dim}, alpha: {alpha}")
|
1031 |
+
|
1032 |
+
self.apply_to_conv2d_3x3 = self.conv_lora_dim is not None
|
1033 |
+
if self.apply_to_conv2d_3x3:
|
1034 |
+
if self.conv_alpha is None:
|
1035 |
+
self.conv_alpha = self.alpha
|
1036 |
+
print(f"apply LoRA to Conv2d with kernel size (3,3). dim (rank): {self.conv_lora_dim}, alpha: {self.conv_alpha}")
|
1037 |
+
|
1038 |
+
# create module instances
|
1039 |
+
def create_modules(prefix, root_module: torch.nn.Module, target_replace_modules) -> List[LoRAModule]:
|
1040 |
+
loras = []
|
1041 |
+
for name, module in root_module.named_modules():
|
1042 |
+
if module.__class__.__name__ in target_replace_modules:
|
1043 |
+
# TODO get block index here
|
1044 |
+
for child_name, child_module in module.named_modules():
|
1045 |
+
is_linear = child_module.__class__.__name__ == "Linear"
|
1046 |
+
is_conv2d = child_module.__class__.__name__ == "Conv2d"
|
1047 |
+
is_conv2d_1x1 = is_conv2d and child_module.kernel_size == (1, 1)
|
1048 |
+
if is_linear or is_conv2d:
|
1049 |
+
lora_name = prefix + "." + name + "." + child_name
|
1050 |
+
lora_name = lora_name.replace(".", "_")
|
1051 |
+
|
1052 |
+
if modules_dim is not None:
|
1053 |
+
if lora_name not in modules_dim:
|
1054 |
+
continue # no LoRA module in this weights file
|
1055 |
+
dim = modules_dim[lora_name]
|
1056 |
+
alpha = modules_alpha[lora_name]
|
1057 |
+
else:
|
1058 |
+
if is_linear or is_conv2d_1x1:
|
1059 |
+
dim = self.lora_dim
|
1060 |
+
alpha = self.alpha
|
1061 |
+
elif self.apply_to_conv2d_3x3:
|
1062 |
+
dim = self.conv_lora_dim
|
1063 |
+
alpha = self.conv_alpha
|
1064 |
+
else:
|
1065 |
+
continue
|
1066 |
+
|
1067 |
+
lora = LoRAModule(lora_name, child_module, self.multiplier, dim, alpha)
|
1068 |
+
loras.append(lora)
|
1069 |
+
return loras
|
1070 |
+
|
1071 |
+
self.text_encoder_loras = create_modules(
|
1072 |
+
LoRANetwork.LORA_PREFIX_TEXT_ENCODER, text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE
|
1073 |
+
)
|
1074 |
+
print(f"create LoRA for Text Encoder: {len(self.text_encoder_loras)} modules.")
|
1075 |
+
|
1076 |
+
# extend U-Net target modules if conv2d 3x3 is enabled, or load from weights
|
1077 |
+
target_modules = LoRANetwork.UNET_TARGET_REPLACE_MODULE
|
1078 |
+
if modules_dim is not None or self.conv_lora_dim is not None:
|
1079 |
+
target_modules += LoRANetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
|
1080 |
+
|
1081 |
+
self.unet_loras = create_modules(LoRANetwork.LORA_PREFIX_UNET, unet, target_modules)
|
1082 |
+
print(f"create LoRA for U-Net: {len(self.unet_loras)} modules.")
|
1083 |
+
|
1084 |
+
self.weights_sd = None
|
1085 |
+
|
1086 |
+
# assertion
|
1087 |
+
names = set()
|
1088 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
1089 |
+
assert lora.lora_name not in names, f"duplicated lora name: {lora.lora_name}"
|
1090 |
+
names.add(lora.lora_name)
|
1091 |
+
|
1092 |
+
def set_multiplier(self, multiplier):
|
1093 |
+
self.multiplier = multiplier
|
1094 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
1095 |
+
lora.multiplier = self.multiplier
|
1096 |
+
|
1097 |
+
def load_weights(self, file):
|
1098 |
+
if os.path.splitext(file)[1] == ".safetensors":
|
1099 |
+
from safetensors.torch import load_file, safe_open
|
1100 |
+
|
1101 |
+
self.weights_sd = load_file(file)
|
1102 |
+
else:
|
1103 |
+
self.weights_sd = torch.load(file, map_location="cpu")
|
1104 |
+
|
1105 |
+
def apply_to(self, text_encoder, unet, apply_text_encoder=None, apply_unet=None):
|
1106 |
+
if self.weights_sd:
|
1107 |
+
weights_has_text_encoder = weights_has_unet = False
|
1108 |
+
for key in self.weights_sd.keys():
|
1109 |
+
if key.startswith(LoRANetwork.LORA_PREFIX_TEXT_ENCODER):
|
1110 |
+
weights_has_text_encoder = True
|
1111 |
+
elif key.startswith(LoRANetwork.LORA_PREFIX_UNET):
|
1112 |
+
weights_has_unet = True
|
1113 |
+
|
1114 |
+
if apply_text_encoder is None:
|
1115 |
+
apply_text_encoder = weights_has_text_encoder
|
1116 |
+
else:
|
1117 |
+
assert (
|
1118 |
+
apply_text_encoder == weights_has_text_encoder
|
1119 |
+
), f"text encoder weights: {weights_has_text_encoder} but text encoder flag: {apply_text_encoder} / 重みとText Encoderのフラグが矛盾しています"
|
1120 |
+
|
1121 |
+
if apply_unet is None:
|
1122 |
+
apply_unet = weights_has_unet
|
1123 |
+
else:
|
1124 |
+
assert (
|
1125 |
+
apply_unet == weights_has_unet
|
1126 |
+
), f"u-net weights: {weights_has_unet} but u-net flag: {apply_unet} / 重みとU-Netのフラグが矛盾しています"
|
1127 |
+
else:
|
1128 |
+
assert apply_text_encoder is not None and apply_unet is not None, f"internal error: flag not set"
|
1129 |
+
|
1130 |
+
if apply_text_encoder:
|
1131 |
+
print("enable LoRA for text encoder")
|
1132 |
+
else:
|
1133 |
+
self.text_encoder_loras = []
|
1134 |
+
|
1135 |
+
if apply_unet:
|
1136 |
+
print("enable LoRA for U-Net")
|
1137 |
+
else:
|
1138 |
+
self.unet_loras = []
|
1139 |
+
|
1140 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
1141 |
+
lora.apply_to()
|
1142 |
+
self.add_module(lora.lora_name, lora)
|
1143 |
+
|
1144 |
+
if self.weights_sd:
|
1145 |
+
# if some weights are not in state dict, it is ok because initial LoRA does nothing (lora_up is initialized by zeros)
|
1146 |
+
info = self.load_state_dict(self.weights_sd, False)
|
1147 |
+
print(f"weights are loaded: {info}")
|
1148 |
+
|
1149 |
+
# TODO refactor to common function with apply_to
|
1150 |
+
def merge_to(self, text_encoder, unet, dtype, device):
|
1151 |
+
assert self.weights_sd is not None, "weights are not loaded"
|
1152 |
+
|
1153 |
+
apply_text_encoder = apply_unet = False
|
1154 |
+
for key in self.weights_sd.keys():
|
1155 |
+
if key.startswith(LoRANetwork.LORA_PREFIX_TEXT_ENCODER):
|
1156 |
+
apply_text_encoder = True
|
1157 |
+
elif key.startswith(LoRANetwork.LORA_PREFIX_UNET):
|
1158 |
+
apply_unet = True
|
1159 |
+
|
1160 |
+
if apply_text_encoder:
|
1161 |
+
print("enable LoRA for text encoder")
|
1162 |
+
else:
|
1163 |
+
self.text_encoder_loras = []
|
1164 |
+
|
1165 |
+
if apply_unet:
|
1166 |
+
print("enable LoRA for U-Net")
|
1167 |
+
else:
|
1168 |
+
self.unet_loras = []
|
1169 |
+
|
1170 |
+
for lora in self.text_encoder_loras + self.unet_loras:
|
1171 |
+
sd_for_lora = {}
|
1172 |
+
for key in self.weights_sd.keys():
|
1173 |
+
if key.startswith(lora.lora_name):
|
1174 |
+
sd_for_lora[key[len(lora.lora_name) + 1 :]] = self.weights_sd[key]
|
1175 |
+
lora.merge_to(sd_for_lora, dtype, device)
|
1176 |
+
print(f"weights are merged")
|
1177 |
+
|
1178 |
+
def enable_gradient_checkpointing(self):
|
1179 |
+
# not supported
|
1180 |
+
pass
|
1181 |
+
|
1182 |
+
def prepare_optimizer_params(self, text_encoder_lr, unet_lr):
|
1183 |
+
def enumerate_params(loras):
|
1184 |
+
params = []
|
1185 |
+
for lora in loras:
|
1186 |
+
params.extend(lora.parameters())
|
1187 |
+
return params
|
1188 |
+
|
1189 |
+
self.requires_grad_(True)
|
1190 |
+
all_params = []
|
1191 |
+
|
1192 |
+
if self.text_encoder_loras:
|
1193 |
+
param_data = {"params": enumerate_params(self.text_encoder_loras)}
|
1194 |
+
if text_encoder_lr is not None:
|
1195 |
+
param_data["lr"] = text_encoder_lr
|
1196 |
+
all_params.append(param_data)
|
1197 |
+
|
1198 |
+
if self.unet_loras:
|
1199 |
+
param_data = {"params": enumerate_params(self.unet_loras)}
|
1200 |
+
if unet_lr is not None:
|
1201 |
+
param_data["lr"] = unet_lr
|
1202 |
+
all_params.append(param_data)
|
1203 |
+
|
1204 |
+
return all_params
|
1205 |
+
|
1206 |
+
def prepare_grad_etc(self, text_encoder, unet):
|
1207 |
+
self.requires_grad_(True)
|
1208 |
+
|
1209 |
+
def on_epoch_start(self, text_encoder, unet):
|
1210 |
+
self.train()
|
1211 |
+
|
1212 |
+
def get_trainable_params(self):
|
1213 |
+
return self.parameters()
|
1214 |
+
|
1215 |
+
def save_weights(self, file, dtype, metadata):
|
1216 |
+
if metadata is not None and len(metadata) == 0:
|
1217 |
+
metadata = None
|
1218 |
+
|
1219 |
+
state_dict = self.state_dict()
|
1220 |
+
|
1221 |
+
if dtype is not None:
|
1222 |
+
for key in list(state_dict.keys()):
|
1223 |
+
v = state_dict[key]
|
1224 |
+
v = v.detach().clone().to("cpu").to(dtype)
|
1225 |
+
state_dict[key] = v
|
1226 |
+
|
1227 |
+
if os.path.splitext(file)[1] == ".safetensors":
|
1228 |
+
from safetensors.torch import save_file
|
1229 |
+
|
1230 |
+
# Precalculate model hashes to save time on indexing
|
1231 |
+
if metadata is None:
|
1232 |
+
metadata = {}
|
1233 |
+
model_hash, legacy_hash = precalculate_safetensors_hashes(state_dict, metadata)
|
1234 |
+
metadata["sshs_model_hash"] = model_hash
|
1235 |
+
metadata["sshs_legacy_hash"] = legacy_hash
|
1236 |
+
|
1237 |
+
save_file(state_dict, file, metadata)
|
1238 |
+
else:
|
1239 |
+
torch.save(state_dict, file)
|
1240 |
+
|
1241 |
+
@staticmethod
|
1242 |
+
def set_regions(networks, image):
|
1243 |
+
image = image.astype(np.float32) / 255.0
|
1244 |
+
for i, network in enumerate(networks[:3]):
|
1245 |
+
# NOTE: consider averaging overwrapping area
|
1246 |
+
region = image[:, :, i]
|
1247 |
+
if region.max() == 0:
|
1248 |
+
continue
|
1249 |
+
region = torch.tensor(region)
|
1250 |
+
network.set_region(region)
|
1251 |
+
|
1252 |
+
def set_region(self, region):
|
1253 |
+
for lora in self.unet_loras:
|
1254 |
+
lora.set_region(region)
|
1255 |
+
|
1256 |
+
from io import BytesIO
|
1257 |
+
import safetensors.torch
|
1258 |
+
import hashlib
|
1259 |
+
|
1260 |
+
def precalculate_safetensors_hashes(tensors, metadata):
|
1261 |
+
"""Precalculate the model hashes needed by sd-webui-additional-networks to
|
1262 |
+
save time on indexing the model later."""
|
1263 |
+
|
1264 |
+
# Because writing user metadata to the file can change the result of
|
1265 |
+
# sd_models.model_hash(), only retain the training metadata for purposes of
|
1266 |
+
# calculating the hash, as they are meant to be immutable
|
1267 |
+
metadata = {k: v for k, v in metadata.items() if k.startswith("ss_")}
|
1268 |
+
|
1269 |
+
bytes = safetensors.torch.save(tensors, metadata)
|
1270 |
+
b = BytesIO(bytes)
|
1271 |
+
|
1272 |
+
model_hash = addnet_hash_safetensors(b)
|
1273 |
+
legacy_hash = addnet_hash_legacy(b)
|
1274 |
+
return model_hash, legacy_hash
|
1275 |
+
|
1276 |
+
def addnet_hash_safetensors(b):
|
1277 |
+
"""New model hash used by sd-webui-additional-networks for .safetensors format files"""
|
1278 |
+
hash_sha256 = hashlib.sha256()
|
1279 |
+
blksize = 1024 * 1024
|
1280 |
+
|
1281 |
+
b.seek(0)
|
1282 |
+
header = b.read(8)
|
1283 |
+
n = int.from_bytes(header, "little")
|
1284 |
+
|
1285 |
+
offset = n + 8
|
1286 |
+
b.seek(offset)
|
1287 |
+
for chunk in iter(lambda: b.read(blksize), b""):
|
1288 |
+
hash_sha256.update(chunk)
|
1289 |
+
|
1290 |
+
return hash_sha256.hexdigest()
|
1291 |
+
|
1292 |
+
def addnet_hash_legacy(b):
|
1293 |
+
"""Old model hash used by sd-webui-additional-networks for .safetensors format files"""
|
1294 |
+
m = hashlib.sha256()
|
1295 |
+
|
1296 |
+
b.seek(0x100000)
|
1297 |
+
m.update(b.read(0x10000))
|
1298 |
+
return m.hexdigest()[0:8]
|
extensions/microsoftexcel-supermerger/scripts/mergers/xyplot.py
ADDED
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
import os
|
5 |
+
import copy
|
6 |
+
import csv
|
7 |
+
from PIL import Image
|
8 |
+
from modules import images
|
9 |
+
from modules.shared import opts
|
10 |
+
from scripts.mergers.mergers import TYPES,smerge,simggen,filenamecutter,draw_origin,wpreseter
|
11 |
+
from scripts.mergers.model_util import usemodelgen
|
12 |
+
|
13 |
+
hear = True
|
14 |
+
hearm = False
|
15 |
+
|
16 |
+
state_mergen = False
|
17 |
+
|
18 |
+
numadepth = []
|
19 |
+
|
20 |
+
def freezetime():
|
21 |
+
global state_mergen
|
22 |
+
state_mergen = True
|
23 |
+
|
24 |
+
def numanager(normalstart,xtype,xmen,ytype,ymen,esettings,
|
25 |
+
weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
26 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
27 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size):
|
28 |
+
global numadepth
|
29 |
+
grids = []
|
30 |
+
sep = "|"
|
31 |
+
|
32 |
+
if sep in xmen:
|
33 |
+
xmens = xmen.split(sep)
|
34 |
+
xmen = xmens[0]
|
35 |
+
if seed =="-1": seed = str(random.randrange(4294967294))
|
36 |
+
for men in xmens[1:]:
|
37 |
+
numaker(xtype,men,ytype,ymen,esettings,
|
38 |
+
weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
39 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
40 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size)
|
41 |
+
elif sep in ymen:
|
42 |
+
ymens = ymen.split(sep)
|
43 |
+
ymen = ymens[0]
|
44 |
+
if seed =="-1": seed = str(random.randrange(4294967294))
|
45 |
+
for men in ymens[1:]:
|
46 |
+
numaker(xtype,xmen,ytype,men,esettings,
|
47 |
+
weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
48 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
49 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size)
|
50 |
+
|
51 |
+
if normalstart:
|
52 |
+
result,currentmodel,xyimage,a,b,c= sgenxyplot(xtype,xmen,ytype,ymen,esettings,
|
53 |
+
weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,
|
54 |
+
useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
55 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
56 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size)
|
57 |
+
if xyimage is not None:grids =[xyimage[0]]
|
58 |
+
else:print(result)
|
59 |
+
else:
|
60 |
+
if numadepth ==[]:
|
61 |
+
return "no reservation",*[None]*5
|
62 |
+
result=currentmodel=xyimage=a=b=c = None
|
63 |
+
|
64 |
+
while True:
|
65 |
+
for i,row in enumerate(numadepth):
|
66 |
+
if row[1] =="waiting":
|
67 |
+
numadepth[i][1] = "Operating"
|
68 |
+
try:
|
69 |
+
result,currentmodel,xyimage,a,b,c = sgenxyplot(*row[2:])
|
70 |
+
except Exception as e:
|
71 |
+
print(e)
|
72 |
+
numadepth[i][1] = "Error"
|
73 |
+
else:
|
74 |
+
if xyimage is not None:
|
75 |
+
grids.append(xyimage[0])
|
76 |
+
numadepth[i][1] = "Finished"
|
77 |
+
else:
|
78 |
+
print(result)
|
79 |
+
numadepth[i][1] = "Error"
|
80 |
+
wcounter = 0
|
81 |
+
for row in numadepth:
|
82 |
+
if row[1] != "waiting":
|
83 |
+
wcounter += 1
|
84 |
+
if wcounter == len(numadepth):
|
85 |
+
break
|
86 |
+
|
87 |
+
return result,currentmodel,grids,a,b,c
|
88 |
+
|
89 |
+
def numaker(xtype,xmen,ytype,ymen,esettings,
|
90 |
+
#msettings=[weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,useblocks,custom_name,save_sets,id_sets,wpresets]
|
91 |
+
weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,
|
92 |
+
useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
93 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
94 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size):
|
95 |
+
global numadepth
|
96 |
+
numadepth.append([len(numadepth)+1,"waiting",xtype,xmen,ytype,ymen,esettings,
|
97 |
+
weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,
|
98 |
+
useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
99 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
100 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size])
|
101 |
+
return numalistmaker(copy.deepcopy(numadepth))
|
102 |
+
|
103 |
+
def nulister(redel):
|
104 |
+
global numadepth
|
105 |
+
if redel == False:
|
106 |
+
return numalistmaker(copy.deepcopy(numadepth))
|
107 |
+
if redel ==-1:
|
108 |
+
numadepth = []
|
109 |
+
else:
|
110 |
+
try:del numadepth[int(redel-1)]
|
111 |
+
except Exception as e:print(e)
|
112 |
+
return numalistmaker(copy.deepcopy(numadepth))
|
113 |
+
|
114 |
+
def numalistmaker(numa):
|
115 |
+
if numa ==[]: return [["no data","",""],]
|
116 |
+
for i,r in enumerate(numa):
|
117 |
+
r[2] = TYPES[int(r[2])]
|
118 |
+
r[4] = TYPES[int(r[4])]
|
119 |
+
numa[i] = r[0:6]+r[8:11]+r[12:16]+r[6:8]
|
120 |
+
return numa
|
121 |
+
|
122 |
+
def caster(news,hear):
|
123 |
+
if hear: print(news)
|
124 |
+
|
125 |
+
def sgenxyplot(xtype,xmen,ytype,ymen,esettings,
|
126 |
+
weights_a,weights_b,model_a,model_b,model_c,alpha,beta,mode,calcmode,
|
127 |
+
useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor,
|
128 |
+
prompt,nprompt,steps,sampler,cfg,seed,w,h,
|
129 |
+
hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size):
|
130 |
+
global hear
|
131 |
+
esettings = " ".join(esettings)
|
132 |
+
#type[0:none,1:aplha,2:beta,3:seed,4:mbw,5:model_A,6:model_B,7:model_C,8:pinpoint 9:deep]
|
133 |
+
xtype = TYPES[xtype]
|
134 |
+
ytype = TYPES[ytype]
|
135 |
+
if ytype == "none": ymen = ""
|
136 |
+
|
137 |
+
modes=["Weight" ,"Add" ,"Triple","Twice"]
|
138 |
+
xs=ys=0
|
139 |
+
weights_a_in=weights_b_in="0"
|
140 |
+
|
141 |
+
deepprint = True if "print change" in esettings else False
|
142 |
+
|
143 |
+
def castall(hear):
|
144 |
+
if hear :print(f"xmen:{xmen}, ymen:{ymen}, xtype:{xtype}, ytype:{ytype}, weights_a:{weights_a_in}, weights_b:{weights_b_in}, model_A:{model_a},model_B :{model_b}, model_C:{model_c}, alpha:{alpha},\
|
145 |
+
beta :{beta}, mode:{mode}, blocks:{useblocks}")
|
146 |
+
|
147 |
+
pinpoint = "pinpoint blocks" in xtype or "pinpoint blocks" in ytype
|
148 |
+
usebeta = modes[2] in mode or modes[3] in mode
|
149 |
+
|
150 |
+
#check and adjust format
|
151 |
+
print(f"XY plot start, mode:{mode}, X: {xtype}, Y: {ytype}, MBW: {useblocks}")
|
152 |
+
castall(hear)
|
153 |
+
None5 = [None,None,None,None,None]
|
154 |
+
if xmen =="": return "ERROR: parameter X is empty",*None5
|
155 |
+
if ymen =="" and not ytype=="none": return "ERROR: parameter Y is empty",*None5
|
156 |
+
if model_a ==[] and not ("model_A" in xtype or "model_A" in ytype):return f"ERROR: model_A is not selected",*None5
|
157 |
+
if model_b ==[] and not ("model_B" in xtype or "model_B" in ytype):return f"ERROR: model_B is not selected",*None5
|
158 |
+
if model_c ==[] and usebeta and not ("model_C" in xtype or "model_C" in ytype):return "ERROR: model_C is not selected",*None5
|
159 |
+
if xtype == ytype: return "ERROR: same type selected for X,Y",*None5
|
160 |
+
|
161 |
+
if useblocks:
|
162 |
+
weights_a_in=wpreseter(weights_a,wpresets)
|
163 |
+
weights_b_in=wpreseter(weights_b,wpresets)
|
164 |
+
|
165 |
+
#for X only plot, use same seed
|
166 |
+
if seed == -1: seed = int(random.randrange(4294967294))
|
167 |
+
|
168 |
+
#for XY plot, use same seed
|
169 |
+
def dicedealer(zs):
|
170 |
+
for i,z in enumerate(zs):
|
171 |
+
if z =="-1": zs[i] = str(random.randrange(4294967294))
|
172 |
+
print(f"the die was thrown : {zs}")
|
173 |
+
|
174 |
+
#adjust parameters, alpha,beta,models,seed: list of single parameters, mbw(no beta):list of text,mbw(usebeta); list of pair text
|
175 |
+
def adjuster(zmen,ztype,aztype):
|
176 |
+
if "mbw" in ztype or "prompt" in ztype:#men separated by newline
|
177 |
+
zs = zmen.splitlines()
|
178 |
+
caster(zs,hear)
|
179 |
+
if "mbw alpha and beta" in ztype:
|
180 |
+
zs = [zs[i:i+2] for i in range(0,len(zs),2)]
|
181 |
+
caster(zs,hear)
|
182 |
+
elif "elemental" in ztype:
|
183 |
+
zs = zmen.split("\n\n")
|
184 |
+
else:
|
185 |
+
if "pinpoint element" in ztype:
|
186 |
+
zmen = zmen.replace("\n",",")
|
187 |
+
if "effective" in ztype:
|
188 |
+
zmen = ","+zmen
|
189 |
+
zmen = zmen.replace("\n",",")
|
190 |
+
zs = [z.strip() for z in zmen.split(',')]
|
191 |
+
caster(zs,hear)
|
192 |
+
if "alpha" in ztype and "effective" in aztype:
|
193 |
+
zs = [zs[0]]
|
194 |
+
if "seed" in ztype:dicedealer(zs)
|
195 |
+
if "alpha" == ztype or "beta" == ztype:
|
196 |
+
oz = []
|
197 |
+
for z in zs:
|
198 |
+
try:
|
199 |
+
float(z)
|
200 |
+
oz.append(z)
|
201 |
+
except:
|
202 |
+
pass
|
203 |
+
zs = oz
|
204 |
+
return zs
|
205 |
+
|
206 |
+
xs = adjuster(xmen,xtype,ytype)
|
207 |
+
ys = adjuster(ymen,ytype,xtype)
|
208 |
+
|
209 |
+
#in case beta selected but mode is Weight sum or Add or Diff
|
210 |
+
if ("beta" in xtype or "beta" in ytype) and (not usebeta and "tensor" not in calcmode):
|
211 |
+
mode = modes[3]
|
212 |
+
print(f"{modes[3]} mode automatically selected)")
|
213 |
+
|
214 |
+
#in case mbw or pinpoint selected but useblocks not chekced
|
215 |
+
if ("mbw" in xtype or "pinpoint blocks" in xtype) and not useblocks:
|
216 |
+
useblocks = True
|
217 |
+
print(f"MBW mode enabled")
|
218 |
+
|
219 |
+
if ("mbw" in ytype or "pinpoint blocks" in ytype) and not useblocks:
|
220 |
+
useblocks = True
|
221 |
+
print(f"MBW mode enabled")
|
222 |
+
|
223 |
+
xyimage=[]
|
224 |
+
xcount =ycount=0
|
225 |
+
allcount = len(xs)*len(ys)
|
226 |
+
|
227 |
+
#for STOP XY bottun
|
228 |
+
flag = False
|
229 |
+
global state_mergen
|
230 |
+
state_mergen = False
|
231 |
+
|
232 |
+
#type[0:none,1:aplha,2:beta,3:seed,4:mbw,5:model_A,6:model_B,7:model_C,8:pinpoint ]
|
233 |
+
blockid=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","IN09","IN10","IN11","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
|
234 |
+
#format ,IN00 IN03,IN04-IN09,OUT4,OUT05
|
235 |
+
def weightsdealer(x,xtype,y,weights):
|
236 |
+
caster(f"weights from : {weights}",hear)
|
237 |
+
zz = x if "pinpoint blocks" in xtype else y
|
238 |
+
za = y if "pinpoint blocks" in xtype else x
|
239 |
+
zz = [z.strip() for z in zz.split(' ')]
|
240 |
+
weights_t = [w.strip() for w in weights.split(',')]
|
241 |
+
if zz[0]!="NOT":
|
242 |
+
flagger=[False]*26
|
243 |
+
changer = True
|
244 |
+
else:
|
245 |
+
flagger=[True]*26
|
246 |
+
changer = False
|
247 |
+
for z in zz:
|
248 |
+
if z =="NOT":continue
|
249 |
+
if "-" in z:
|
250 |
+
zt = [zt.strip() for zt in z.split('-')]
|
251 |
+
if blockid.index(zt[1]) > blockid.index(zt[0]):
|
252 |
+
flagger[blockid.index(zt[0]):blockid.index(zt[1])+1] = [changer]*(blockid.index(zt[1])-blockid.index(zt[0])+1)
|
253 |
+
else:
|
254 |
+
flagger[blockid.index(zt[1]):blockid.index(zt[0])+1] = [changer]*(blockid.index(zt[0])-blockid.index(zt[1])+1)
|
255 |
+
else:
|
256 |
+
flagger[blockid.index(z)] =changer
|
257 |
+
for i,f in enumerate(flagger):
|
258 |
+
if f:weights_t[i]=za
|
259 |
+
outext = ",".join(weights_t)
|
260 |
+
caster(f"weights changed: {outext}",hear)
|
261 |
+
return outext
|
262 |
+
|
263 |
+
def abdealer(z):
|
264 |
+
if " " in z:return z.split(" ")[0],z.split(" ")[1]
|
265 |
+
return z,z
|
266 |
+
|
267 |
+
def xydealer(z,zt,azt):
|
268 |
+
nonlocal alpha,beta,seed,weights_a_in,weights_b_in,model_a,model_b,model_c,deep,calcmode,prompt
|
269 |
+
if pinpoint or "pinpoint element" in zt or "effective" in zt:return
|
270 |
+
if "mbw" in zt:
|
271 |
+
def weightser(z):return z, z.split(',',1)[0]
|
272 |
+
if "mbw alpha and beta" in zt:
|
273 |
+
weights_a_in,alpha = weightser(wpreseter(z[0],wpresets))
|
274 |
+
weights_b_in,beta = weightser(wpreseter(z[1],wpresets))
|
275 |
+
return
|
276 |
+
elif "alpha" in zt:
|
277 |
+
weights_a_in,alpha = weightser(wpreseter(z,wpresets))
|
278 |
+
return
|
279 |
+
else:
|
280 |
+
weights_b_in,beta = weightser(wpreseter(z,wpresets))
|
281 |
+
return
|
282 |
+
if "and" in zt:
|
283 |
+
alpha,beta = abdealer(z)
|
284 |
+
return
|
285 |
+
if "alpha" in zt and not "pinpoint element" in azt:alpha = z
|
286 |
+
if "beta" in zt: beta = z
|
287 |
+
if "seed" in zt:seed = int(z)
|
288 |
+
if "model_A" in zt:model_a = z
|
289 |
+
if "model_B" in zt:model_b = z
|
290 |
+
if "model_C" in zt:model_c = z
|
291 |
+
if "elemental" in zt:deep = z
|
292 |
+
if "calcmode" in zt:calcmode = z
|
293 |
+
if "prompt" in zt:prompt = z
|
294 |
+
|
295 |
+
# plot start
|
296 |
+
for y in ys:
|
297 |
+
xydealer(y,ytype,xtype)
|
298 |
+
xcount = 0
|
299 |
+
for x in xs:
|
300 |
+
xydealer(x,xtype,ytype)
|
301 |
+
if ("alpha" in xtype or "alpha" in ytype) and pinpoint:
|
302 |
+
weights_a_in = weightsdealer(x,xtype,y,weights_a)
|
303 |
+
weights_b_in = weights_b
|
304 |
+
if ("beta" in xtype or "beta" in ytype) and pinpoint:
|
305 |
+
weights_b_in = weightsdealer(x,xtype,y,weights_b)
|
306 |
+
weights_a_in =weights_a
|
307 |
+
if "pinpoint element" in xtype or "effective" in xtype:
|
308 |
+
deep_in = deep +","+ str(x)+":"+ str(y)
|
309 |
+
elif "pinpoint element" in ytype or "effective" in ytype:
|
310 |
+
deep_in = deep +","+ str(y)+":"+ str(x)
|
311 |
+
else:
|
312 |
+
deep_in = deep
|
313 |
+
|
314 |
+
print(f"XY plot: X: {xtype}, {str(x)}, Y: {ytype}, {str(y)} ({xcount+ycount*len(xs)+1}/{allcount})")
|
315 |
+
if not (xtype=="seed" and xcount > 0):
|
316 |
+
_ , currentmodel,modelid,theta_0,_=smerge(weights_a_in,weights_b_in, model_a,model_b,model_c, float(alpha),float(beta),mode,calcmode,
|
317 |
+
useblocks,"","",id_sets,False,deep_in,tensor,deepprint = deepprint)
|
318 |
+
usemodelgen(theta_0,model_a,currentmodel)
|
319 |
+
# simggen(prompt, nprompt, steps, sampler, cfg, seed, w, h,mergeinfo="",id_sets=[],modelid = "no id"):
|
320 |
+
image_temp = simggen(prompt, nprompt, steps, sampler, cfg, seed, w, h,hireson,hrupscaler,hr2ndsteps,denoise_str,hr_scale,batch_size,currentmodel,id_sets,modelid)
|
321 |
+
xyimage.append(image_temp[0][0])
|
322 |
+
xcount+=1
|
323 |
+
if state_mergen:
|
324 |
+
flag = True
|
325 |
+
break
|
326 |
+
ycount+=1
|
327 |
+
if flag:break
|
328 |
+
|
329 |
+
if flag and ycount ==1:
|
330 |
+
xs = xs[:xcount]
|
331 |
+
ys = [ys[0],]
|
332 |
+
print(f"stopped at x={xcount},y={ycount}")
|
333 |
+
elif flag:
|
334 |
+
ys=ys[:ycount]
|
335 |
+
print(f"stopped at x={xcount},y={ycount}")
|
336 |
+
|
337 |
+
if "mbw alpha and beta" in xtype: xs = [f"alpha:({x[0]}),beta({x[1]})" for x in xs ]
|
338 |
+
if "mbw alpha and beta" in ytype: ys = [f"alpha:({y[0]}),beta({y[1]})" for y in ys ]
|
339 |
+
|
340 |
+
xs[0]=xtype+" = "+xs[0] #draw X label
|
341 |
+
if ytype!=TYPES[0] or "model" in ytype:ys[0]=ytype+" = "+ys[0] #draw Y label
|
342 |
+
|
343 |
+
if ys==[""]:ys = [" "]
|
344 |
+
|
345 |
+
if "effective" in xtype or "effective" in ytype:
|
346 |
+
xyimage,xs,ys = effectivechecker(xyimage,xs,ys,model_a,model_b,esettings)
|
347 |
+
|
348 |
+
if not "grid" in esettings:
|
349 |
+
gridmodel= makegridmodelname(model_a, model_b,model_c, useblocks,mode,xtype,ytype,alpha,beta,weights_a,weights_b,usebeta)
|
350 |
+
grid = smakegrid(xyimage,xs,ys,gridmodel,image_temp[4])
|
351 |
+
xyimage.insert(0,grid)
|
352 |
+
|
353 |
+
state_mergen = False
|
354 |
+
return "Finished",currentmodel,xyimage,*image_temp[1:4]
|
355 |
+
|
356 |
+
def smakegrid(imgs,xs,ys,currentmodel,p):
|
357 |
+
ver_texts = [[images.GridAnnotation(y)] for y in ys]
|
358 |
+
hor_texts = [[images.GridAnnotation(x)] for x in xs]
|
359 |
+
|
360 |
+
w, h = imgs[0].size
|
361 |
+
grid = Image.new('RGB', size=(len(xs) * w, len(ys) * h), color='black')
|
362 |
+
|
363 |
+
for i, img in enumerate(imgs):
|
364 |
+
grid.paste(img, box=(i % len(xs) * w, i // len(xs) * h))
|
365 |
+
|
366 |
+
grid = images.draw_grid_annotations(grid,w,h, hor_texts, ver_texts)
|
367 |
+
grid = draw_origin(grid, currentmodel,w*len(xs),h*len(ys),w)
|
368 |
+
if opts.grid_save:
|
369 |
+
images.save_image(grid, opts.outdir_txt2img_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=p.seed, grid=True, p=p)
|
370 |
+
|
371 |
+
return grid
|
372 |
+
|
373 |
+
def makegridmodelname(model_a, model_b,model_c, useblocks,mode,xtype,ytype,alpha,beta,wa,wb,usebeta):
|
374 |
+
model_a=filenamecutter(model_a)
|
375 |
+
model_b=filenamecutter(model_b)
|
376 |
+
model_c=filenamecutter(model_c)
|
377 |
+
|
378 |
+
if not usebeta:beta,wb = "not used","not used"
|
379 |
+
vals = ""
|
380 |
+
modes=["Weight" ,"Add" ,"Triple","Twice"]
|
381 |
+
|
382 |
+
if "mbw" in xtype:
|
383 |
+
if "alpha" in xtype:wa = "X"
|
384 |
+
if usebeta or " beta" in xtype:wb = "X"
|
385 |
+
|
386 |
+
if "mbw" in ytype:
|
387 |
+
if "alpha" in ytype:wa = "Y"
|
388 |
+
if usebeta or " beta" in ytype:wb = "Y"
|
389 |
+
|
390 |
+
wa = "alpha = " + wa
|
391 |
+
wb = "beta = " + wb
|
392 |
+
|
393 |
+
x = 50
|
394 |
+
while len(wa) > x:
|
395 |
+
wa = wa[:x] + '\n' + wa[x:]
|
396 |
+
x = x + 50
|
397 |
+
|
398 |
+
x = 50
|
399 |
+
while len(wb) > x:
|
400 |
+
wb = wb[:x] + '\n' + wb[x:]
|
401 |
+
x = x + 50
|
402 |
+
|
403 |
+
if "model" in xtype:
|
404 |
+
if "A" in xtype:model_a = "model A"
|
405 |
+
elif "B" in xtype:model_b="model B"
|
406 |
+
elif "C" in xtype:model_c="model C"
|
407 |
+
|
408 |
+
if "model" in ytype:
|
409 |
+
if "A" in ytype:model_a = "model A"
|
410 |
+
elif "B" in ytype:model_b="model B"
|
411 |
+
elif "C" in ytype:model_c="model C"
|
412 |
+
|
413 |
+
if modes[1] in mode:
|
414 |
+
currentmodel =f"{model_a} \n {model_b} - {model_c})\n x alpha"
|
415 |
+
elif modes[2] in mode:
|
416 |
+
currentmodel =f"{model_a} x \n(1-alpha-beta) {model_b} x alpha \n+ {model_c} x beta"
|
417 |
+
elif modes[3] in mode:
|
418 |
+
currentmodel =f"({model_a} x(1-alpha) \n + {model_b} x alpha)*(1-beta)\n+ {model_c} x beta"
|
419 |
+
else:
|
420 |
+
currentmodel =f"{model_a} x (1-alpha) \n {model_b} x alpha"
|
421 |
+
|
422 |
+
if "alpha" in xtype:alpha = "X"
|
423 |
+
if "beta" in xtype:beta = "X"
|
424 |
+
if "alpha" in ytype:alpha = "Y"
|
425 |
+
if "beta" in ytype:beta = "Y"
|
426 |
+
|
427 |
+
if "mbw" in xtype:
|
428 |
+
if "alpha" in xtype: alpha = "X"
|
429 |
+
if "beta" in xtype or usebeta: beta = "X"
|
430 |
+
|
431 |
+
if "mbw" in ytype:
|
432 |
+
if "alpha" in ytype: alpha = "Y"
|
433 |
+
if "beta" in ytype or usebeta: beta = "Y"
|
434 |
+
|
435 |
+
vals = f"\nalpha = {alpha},beta = {beta}" if not useblocks else f"\n{wa}\n{wb}"
|
436 |
+
|
437 |
+
currentmodel = currentmodel+vals
|
438 |
+
return currentmodel
|
439 |
+
|
440 |
+
def effectivechecker(imgs,xs,ys,model_a,model_b,esettings):
|
441 |
+
diffs = []
|
442 |
+
outnum =[]
|
443 |
+
im1 = np.array(imgs[0])
|
444 |
+
|
445 |
+
model_a = filenamecutter(model_a)
|
446 |
+
model_b = filenamecutter(model_b)
|
447 |
+
dir = os.path.join(opts.outdir_txt2img_samples,f"{model_a+model_b}","difgif")
|
448 |
+
|
449 |
+
if "gif" in esettings:
|
450 |
+
try:
|
451 |
+
os.makedirs(dir)
|
452 |
+
except FileExistsError:
|
453 |
+
pass
|
454 |
+
|
455 |
+
ls,ss = (xs.copy(),ys.copy()) if len(xs) > len(ys) else (ys.copy(),xs.copy())
|
456 |
+
|
457 |
+
for i in range(len(imgs)-1):
|
458 |
+
im2 = np.array(imgs[i+1])
|
459 |
+
|
460 |
+
abs_diff = cv2.absdiff(im2 , im1)
|
461 |
+
|
462 |
+
abs_diff_t = cv2.threshold(abs_diff, 5, 255, cv2.THRESH_BINARY)[1]
|
463 |
+
res = abs_diff_t.astype(np.uint8)
|
464 |
+
percentage = (np.count_nonzero(res) * 100)/ res.size
|
465 |
+
abs_diff = cv2.bitwise_not(abs_diff)
|
466 |
+
outnum.append(percentage)
|
467 |
+
|
468 |
+
abs_diff = Image.fromarray(abs_diff)
|
469 |
+
|
470 |
+
diffs.append(abs_diff)
|
471 |
+
|
472 |
+
if "gif" in esettings:
|
473 |
+
gifpath = gifpath_t = os.path.join(dir,ls[i+1].replace(":","_")+".gif")
|
474 |
+
|
475 |
+
is_file = os.path.isfile(gifpath)
|
476 |
+
j = 0
|
477 |
+
while is_file:
|
478 |
+
gifpath = gifpath_t.replace(".gif",f"_{j}.gif")
|
479 |
+
print(gifpath)
|
480 |
+
is_file = os.path.isfile(gifpath)
|
481 |
+
j = j + 1
|
482 |
+
|
483 |
+
imgs[0].save(gifpath, save_all=True, append_images=[imgs[i+1]], optimize=False, duration=1000, loop=0)
|
484 |
+
|
485 |
+
nums = []
|
486 |
+
outs = []
|
487 |
+
|
488 |
+
ls = ls[1:]
|
489 |
+
for i in range(len(ls)):
|
490 |
+
nums.append([ls[i],outnum[i]])
|
491 |
+
ls[i] = ls[i] + "\n Diff : " + str(round(outnum[i],3)) + "%"
|
492 |
+
|
493 |
+
if "csv" in esettings:
|
494 |
+
try:
|
495 |
+
os.makedirs(dir)
|
496 |
+
except FileExistsError:
|
497 |
+
pass
|
498 |
+
filepath = os.path.join(dir, f"{model_a+model_b}.csv")
|
499 |
+
with open(filepath, "a", newline="") as f:
|
500 |
+
writer = csv.writer(f)
|
501 |
+
writer.writerows(nums)
|
502 |
+
|
503 |
+
if len(ys) > len (xs):
|
504 |
+
for diff,img in zip(diffs,imgs[1:]):
|
505 |
+
outs.append(diff)
|
506 |
+
outs.append(img)
|
507 |
+
outs.append(imgs[0])
|
508 |
+
ss = ["diff",ss[0],"source"]
|
509 |
+
return outs,ss,ls
|
510 |
+
else:
|
511 |
+
outs = [imgs[0]]*len(diffs) + imgs[1:]+ diffs
|
512 |
+
ss = ["source",ss[0],"diff"]
|
513 |
+
return outs,ls,ss
|
extensions/microsoftexcel-supermerger/scripts/supermerger.py
ADDED
@@ -0,0 +1,552 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import gc
|
3 |
+
import os
|
4 |
+
import os.path
|
5 |
+
import re
|
6 |
+
import shutil
|
7 |
+
from importlib import reload
|
8 |
+
from pprint import pprint
|
9 |
+
import gradio as gr
|
10 |
+
from modules import (devices, script_callbacks, scripts, sd_hijack, sd_models,sd_vae, shared)
|
11 |
+
from modules.scripts import basedir
|
12 |
+
from modules.sd_models import checkpoints_loaded
|
13 |
+
from modules.shared import opts
|
14 |
+
from modules.ui import create_output_panel, create_refresh_button
|
15 |
+
import scripts.mergers.mergers
|
16 |
+
import scripts.mergers.pluslora
|
17 |
+
import scripts.mergers.xyplot
|
18 |
+
reload(scripts.mergers.mergers) # update without restarting web-ui.bat
|
19 |
+
reload(scripts.mergers.xyplot)
|
20 |
+
reload(scripts.mergers.pluslora)
|
21 |
+
import csv
|
22 |
+
import scripts.mergers.pluslora as pluslora
|
23 |
+
from scripts.mergers.mergers import (TYPESEG, freezemtime, rwmergelog, simggen,smergegen)
|
24 |
+
from scripts.mergers.xyplot import freezetime, nulister, numaker, numanager
|
25 |
+
|
26 |
+
gensets=argparse.Namespace()
|
27 |
+
|
28 |
+
def on_ui_train_tabs(params):
|
29 |
+
txt2img_preview_params=params.txt2img_preview_params
|
30 |
+
gensets.txt2img_preview_params=txt2img_preview_params
|
31 |
+
return None
|
32 |
+
|
33 |
+
path_root = basedir()
|
34 |
+
|
35 |
+
def on_ui_tabs():
|
36 |
+
weights_presets=""
|
37 |
+
userfilepath = os.path.join(path_root, "scripts","mbwpresets.txt")
|
38 |
+
if os.path.isfile(userfilepath):
|
39 |
+
try:
|
40 |
+
with open(userfilepath) as f:
|
41 |
+
weights_presets = f.read()
|
42 |
+
filepath = userfilepath
|
43 |
+
except OSError as e:
|
44 |
+
pass
|
45 |
+
else:
|
46 |
+
filepath = os.path.join(path_root, "scripts","mbwpresets_master.txt")
|
47 |
+
try:
|
48 |
+
with open(filepath) as f:
|
49 |
+
weights_presets = f.read()
|
50 |
+
shutil.copyfile(filepath, userfilepath)
|
51 |
+
except OSError as e:
|
52 |
+
pass
|
53 |
+
|
54 |
+
with gr.Blocks() as supermergerui:
|
55 |
+
with gr.Tab("Merge"):
|
56 |
+
with gr.Row().style(equal_height=False):
|
57 |
+
with gr.Column(scale = 3):
|
58 |
+
gr.HTML(value="<p>Merge models and load it for generation</p>")
|
59 |
+
|
60 |
+
with gr.Row():
|
61 |
+
model_a = gr.Dropdown(sd_models.checkpoint_tiles(),elem_id="model_converter_model_name",label="Model A",interactive=True)
|
62 |
+
create_refresh_button(model_a, sd_models.list_models,lambda: {"choices": sd_models.checkpoint_tiles()},"refresh_checkpoint_Z")
|
63 |
+
|
64 |
+
model_b = gr.Dropdown(sd_models.checkpoint_tiles(),elem_id="model_converter_model_name",label="Model B",interactive=True)
|
65 |
+
create_refresh_button(model_b, sd_models.list_models,lambda: {"choices": sd_models.checkpoint_tiles()},"refresh_checkpoint_Z")
|
66 |
+
|
67 |
+
model_c = gr.Dropdown(sd_models.checkpoint_tiles(),elem_id="model_converter_model_name",label="Model C",interactive=True)
|
68 |
+
create_refresh_button(model_c, sd_models.list_models,lambda: {"choices": sd_models.checkpoint_tiles()},"refresh_checkpoint_Z")
|
69 |
+
|
70 |
+
mode = gr.Radio(label = "Merge Mode",choices = ["Weight sum:A*(1-alpha)+B*alpha", "Add difference:A+(B-C)*alpha",
|
71 |
+
"Triple sum:A*(1-alpha-beta)+B*alpha+C*beta",
|
72 |
+
"sum Twice:(A*(1-alpha)+B*alpha)*(1-beta)+C*beta",
|
73 |
+
], value = "Weight sum:A*(1-alpha)+B*alpha")
|
74 |
+
calcmode = gr.Radio(label = "Calcutation Mode",choices = ["normal", "cosineA", "cosineB", "smoothAdd","tensor"], value = "normal")
|
75 |
+
with gr.Row():
|
76 |
+
useblocks = gr.Checkbox(label="use MBW")
|
77 |
+
base_alpha = gr.Slider(label="alpha", minimum=-1.0, maximum=2, step=0.001, value=0.5)
|
78 |
+
base_beta = gr.Slider(label="beta", minimum=-1.0, maximum=2, step=0.001, value=0.25)
|
79 |
+
#weights = gr.Textbox(label="weights,base alpha,IN00,IN02,...IN11,M00,OUT00,...,OUT11",lines=2,value="0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5")
|
80 |
+
|
81 |
+
with gr.Row():
|
82 |
+
merge = gr.Button(elem_id="model_merger_merge", value="Merge!",variant='primary')
|
83 |
+
mergeandgen = gr.Button(elem_id="model_merger_merge", value="Merge&Gen",variant='primary')
|
84 |
+
gen = gr.Button(elem_id="model_merger_merge", value="Gen",variant='primary')
|
85 |
+
stopmerge = gr.Button(elem_id="stopmerge", value="Stop",variant='primary')
|
86 |
+
with gr.Row():
|
87 |
+
with gr.Column(scale = 4):
|
88 |
+
save_sets = gr.CheckboxGroup(["save model", "overwrite","safetensors","fp16","save metadata"], value=["safetensors"], label="save settings")
|
89 |
+
with gr.Column(scale = 2):
|
90 |
+
id_sets = gr.CheckboxGroup(["image", "PNG info"], label="write merged model ID to")
|
91 |
+
with gr.Row():
|
92 |
+
with gr.Column(min_width = 50, scale=2):
|
93 |
+
with gr.Row():
|
94 |
+
custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="model_converter_custom_name")
|
95 |
+
mergeid = gr.Textbox(label="merge from ID", elem_id="model_converter_custom_name",value = "-1")
|
96 |
+
with gr.Column(min_width = 50, scale=1):
|
97 |
+
with gr.Row():s_reverse= gr.Button(value="Set from ID(-1 for last)",variant='primary')
|
98 |
+
|
99 |
+
with gr.Accordion("Restore faces, Tiling, Hires. fix, Batch size",open = False):
|
100 |
+
batch_size = denois_str = gr.Slider(minimum=0, maximum=8, step=1, label='Batch size', value=1, elem_id="sm_txt2img_batch_size")
|
101 |
+
genoptions = gr.CheckboxGroup(label = "Gen Options",choices=["Restore faces", "Tiling", "Hires. fix"], visible = True,interactive=True,type="value")
|
102 |
+
with gr.Row(elem_id="txt2img_hires_fix_row1", variant="compact"):
|
103 |
+
hrupscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
|
104 |
+
hr2ndsteps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
|
105 |
+
denois_str = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength")
|
106 |
+
hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale")
|
107 |
+
|
108 |
+
hiresfix = [genoptions,hrupscaler,hr2ndsteps,denois_str,hr_scale]
|
109 |
+
|
110 |
+
with gr.Accordion("Elemental Merge",open = False):
|
111 |
+
with gr.Row():
|
112 |
+
esettings1 = gr.CheckboxGroup(label = "settings",choices=["print change"],type="value",interactive=True)
|
113 |
+
with gr.Row():
|
114 |
+
deep = gr.Textbox(label="Blocks:Element:Ratio,Blocks:Element:Ratio,...",lines=2,value="")
|
115 |
+
|
116 |
+
with gr.Accordion("Tensor Merge",open = False,visible=False):
|
117 |
+
tensor = gr.Textbox(label="Blocks:Tensors",lines=2,value="")
|
118 |
+
|
119 |
+
with gr.Row():
|
120 |
+
x_type = gr.Dropdown(label="X type", choices=[x for x in TYPESEG], value="alpha", type="index")
|
121 |
+
x_randseednum = gr.Number(value=3, label="number of -1", interactive=True, visible = True)
|
122 |
+
xgrid = gr.Textbox(label="Sequential Merge Parameters",lines=3,value="0.25,0.5,0.75")
|
123 |
+
y_type = gr.Dropdown(label="Y type", choices=[y for y in TYPESEG], value="none", type="index")
|
124 |
+
ygrid = gr.Textbox(label="Y grid (Disabled if blank)",lines=3,value="",visible =False)
|
125 |
+
with gr.Row():
|
126 |
+
gengrid = gr.Button(elem_id="model_merger_merge", value="Sequential XY Merge and Generation",variant='primary')
|
127 |
+
stopgrid = gr.Button(elem_id="model_merger_merge", value="Stop XY",variant='primary')
|
128 |
+
s_reserve1 = gr.Button(value="Reserve XY Plot",variant='primary')
|
129 |
+
dtrue = gr.Checkbox(value = True, visible = False)
|
130 |
+
dfalse = gr.Checkbox(value = False,visible = False)
|
131 |
+
dummy_t = gr.Textbox(value = "",visible = False)
|
132 |
+
blockid=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","IN09","IN10","IN11","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
|
133 |
+
|
134 |
+
with gr.Column(scale = 2):
|
135 |
+
currentmodel = gr.Textbox(label="Current Model",lines=1,value="")
|
136 |
+
submit_result = gr.Textbox(label="Message")
|
137 |
+
mgallery, mgeninfo, mhtmlinfo, mhtmllog = create_output_panel("txt2img", opts.outdir_txt2img_samples)
|
138 |
+
with gr.Row(visible = False) as row_inputers:
|
139 |
+
inputer = gr.Textbox(label="",lines=1,value="")
|
140 |
+
addtox = gr.Button(value="Add to Sequence X")
|
141 |
+
addtoy = gr.Button(value="Add to Sequence Y")
|
142 |
+
with gr.Row(visible = False) as row_blockids:
|
143 |
+
blockids = gr.CheckboxGroup(label = "block IDs",choices=[x for x in blockid],type="value",interactive=True)
|
144 |
+
with gr.Row(visible = False) as row_calcmode:
|
145 |
+
calcmodes = gr.CheckboxGroup(label = "calcmode",choices=["normal", "cosineA", "cosineB", "smoothAdd","tensor"],type="value",interactive=True)
|
146 |
+
with gr.Row(visible = False) as row_checkpoints:
|
147 |
+
checkpoints = gr.CheckboxGroup(label = "checkpoint",choices=[x.model_name for x in sd_models.checkpoints_list.values()],type="value",interactive=True)
|
148 |
+
with gr.Row(visible = False) as row_esets:
|
149 |
+
esettings = gr.CheckboxGroup(label = "effective chekcer settings",choices=["save csv","save anime gif","not save grid","print change"],type="value",interactive=True)
|
150 |
+
|
151 |
+
with gr.Tab("Weights Setting"):
|
152 |
+
with gr.Row():
|
153 |
+
setalpha = gr.Button(elem_id="copytogen", value="set to alpha",variant='primary')
|
154 |
+
readalpha = gr.Button(elem_id="copytogen", value="read from alpha",variant='primary')
|
155 |
+
setbeta = gr.Button(elem_id="copytogen", value="set to beta",variant='primary')
|
156 |
+
readbeta = gr.Button(elem_id="copytogen", value="read from beta",variant='primary')
|
157 |
+
setx = gr.Button(elem_id="copytogen", value="set to X",variant='primary')
|
158 |
+
with gr.Row():
|
159 |
+
weights_a = gr.Textbox(label="weights for alpha, base alpha,IN00,IN02,...IN11,M00,OUT00,...,OUT11",value = "0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5")
|
160 |
+
weights_b = gr.Textbox(label="weights,for beta, base beta,IN00,IN02,...IN11,M00,OUT00,...,OUT11",value = "0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2")
|
161 |
+
with gr.Row():
|
162 |
+
base= gr.Slider(label="Base", minimum=0, maximum=1, step =0.01, value=0.5)
|
163 |
+
in00 = gr.Slider(label="IN00", minimum=0, maximum=1, step=0.01, value=0.5)
|
164 |
+
in01 = gr.Slider(label="IN01", minimum=0, maximum=1, step=0.01, value=0.5)
|
165 |
+
in02 = gr.Slider(label="IN02", minimum=0, maximum=1, step=0.01, value=0.5)
|
166 |
+
in03 = gr.Slider(label="IN03", minimum=0, maximum=1, step=0.01, value=0.5)
|
167 |
+
with gr.Row():
|
168 |
+
in04 = gr.Slider(label="IN04", minimum=0, maximum=1, step=0.01, value=0.5)
|
169 |
+
in05 = gr.Slider(label="IN05", minimum=0, maximum=1, step=0.01, value=0.5)
|
170 |
+
in06 = gr.Slider(label="IN06", minimum=0, maximum=1, step=0.01, value=0.5)
|
171 |
+
in07 = gr.Slider(label="IN07", minimum=0, maximum=1, step=0.01, value=0.5)
|
172 |
+
in08 = gr.Slider(label="IN08", minimum=0, maximum=1, step=0.01, value=0.5)
|
173 |
+
in09 = gr.Slider(label="IN09", minimum=0, maximum=1, step=0.01, value=0.5)
|
174 |
+
with gr.Row():
|
175 |
+
in10 = gr.Slider(label="IN10", minimum=0, maximum=1, step=0.01, value=0.5)
|
176 |
+
in11 = gr.Slider(label="IN11", minimum=0, maximum=1, step=0.01, value=0.5)
|
177 |
+
mi00 = gr.Slider(label="M00", minimum=0, maximum=1, step=0.01, value=0.5)
|
178 |
+
ou00 = gr.Slider(label="OUT00", minimum=0, maximum=1, step=0.01, value=0.5)
|
179 |
+
ou01 = gr.Slider(label="OUT01", minimum=0, maximum=1, step=0.01, value=0.5)
|
180 |
+
ou02 = gr.Slider(label="OUT02", minimum=0, maximum=1, step=0.01, value=0.5)
|
181 |
+
with gr.Row():
|
182 |
+
ou03 = gr.Slider(label="OUT03", minimum=0, maximum=1, step=0.01, value=0.5)
|
183 |
+
ou04 = gr.Slider(label="OUT04", minimum=0, maximum=1, step=0.01, value=0.5)
|
184 |
+
ou05 = gr.Slider(label="OUT05", minimum=0, maximum=1, step=0.01, value=0.5)
|
185 |
+
ou06 = gr.Slider(label="OUT06", minimum=0, maximum=1, step=0.01, value=0.5)
|
186 |
+
ou07 = gr.Slider(label="OUT07", minimum=0, maximum=1, step=0.01, value=0.5)
|
187 |
+
ou08 = gr.Slider(label="OUT08", minimum=0, maximum=1, step=0.01, value=0.5)
|
188 |
+
with gr.Row():
|
189 |
+
ou09 = gr.Slider(label="OUT09", minimum=0, maximum=1, step=0.01, value=0.5)
|
190 |
+
ou10 = gr.Slider(label="OUT10", minimum=0, maximum=1, step=0.01, value=0.5)
|
191 |
+
ou11 = gr.Slider(label="OUT11", minimum=0, maximum=1, step=0.01, value=0.5)
|
192 |
+
with gr.Tab("Weights Presets"):
|
193 |
+
with gr.Row():
|
194 |
+
s_reloadtext = gr.Button(value="Reload Presets",variant='primary')
|
195 |
+
s_reloadtags = gr.Button(value="Reload Tags",variant='primary')
|
196 |
+
s_savetext = gr.Button(value="Save Presets",variant='primary')
|
197 |
+
s_openeditor = gr.Button(value="Open TextEditor",variant='primary')
|
198 |
+
weightstags= gr.Textbox(label="available",lines = 2,value=tagdicter(weights_presets),visible =True,interactive =True)
|
199 |
+
wpresets= gr.TextArea(label="",value=weights_presets,visible =True,interactive = True)
|
200 |
+
|
201 |
+
with gr.Tab("Reservation"):
|
202 |
+
with gr.Row():
|
203 |
+
s_reserve = gr.Button(value="Reserve XY Plot",variant='primary')
|
204 |
+
s_reloadreserve = gr.Button(value="Reloat List",variant='primary')
|
205 |
+
s_startreserve = gr.Button(value="Start XY plot",variant='primary')
|
206 |
+
s_delreserve = gr.Button(value="Delete list(-1 for all)",variant='primary')
|
207 |
+
s_delnum = gr.Number(value=1, label="Delete num : ", interactive=True, visible = True,precision =0)
|
208 |
+
with gr.Row():
|
209 |
+
numaframe = gr.Dataframe(
|
210 |
+
headers=["No.","status","xtype","xmenber", "ytype","ymenber","model A","model B","model C","alpha","beta","mode","use MBW","weights alpha","weights beta"],
|
211 |
+
row_count=5,)
|
212 |
+
# with gr.Tab("manual"):
|
213 |
+
# with gr.Row():
|
214 |
+
# gr.HTML(value="<p> exampls: Change base alpha from 0.1 to 0.9 <br>0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9<br>If you want to display the original model as well for comparison<br>0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1</p>")
|
215 |
+
# gr.HTML(value="<p> For block-by-block merging <br>0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5<br>1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1<br>0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1</p>")
|
216 |
+
|
217 |
+
with gr.Row():
|
218 |
+
|
219 |
+
currentcache = gr.Textbox(label="Current Cache")
|
220 |
+
loadcachelist = gr.Button(elem_id="model_merger_merge", value="Reload Cache List",variant='primary')
|
221 |
+
unloadmodel = gr.Button(value="unload model",variant='primary')
|
222 |
+
|
223 |
+
|
224 |
+
# main ui end
|
225 |
+
|
226 |
+
with gr.Tab("LoRA", elem_id="tab_lora"):
|
227 |
+
pluslora.on_ui_tabs()
|
228 |
+
|
229 |
+
with gr.Tab("History", elem_id="tab_history"):
|
230 |
+
|
231 |
+
with gr.Row():
|
232 |
+
load_history = gr.Button(value="load_history",variant='primary')
|
233 |
+
searchwrods = gr.Textbox(label="",lines=1,value="")
|
234 |
+
search = gr.Button(value="search")
|
235 |
+
searchmode = gr.Radio(label = "Search Mode",choices = ["or","and"], value = "or",type = "value")
|
236 |
+
with gr.Row():
|
237 |
+
history = gr.Dataframe(
|
238 |
+
headers=["ID","Time","Name","Weights alpha","Weights beta","Model A","Model B","Model C","alpha","beta","Mode","use MBW","custum name","save setting","use ID"],
|
239 |
+
)
|
240 |
+
|
241 |
+
with gr.Tab("Elements", elem_id="tab_deep"):
|
242 |
+
with gr.Row():
|
243 |
+
smd_model_a = gr.Dropdown(sd_models.checkpoint_tiles(),elem_id="model_converter_model_name",label="Checkpoint A",interactive=True)
|
244 |
+
create_refresh_button(smd_model_a, sd_models.list_models,lambda: {"choices": sd_models.checkpoint_tiles()},"refresh_checkpoint_Z")
|
245 |
+
smd_loadkeys = gr.Button(value="load keys",variant='primary')
|
246 |
+
with gr.Row():
|
247 |
+
keys = gr.Dataframe(headers=["No.","block","key"],)
|
248 |
+
|
249 |
+
with gr.Tab("Metadeta", elem_id="tab_metadata"):
|
250 |
+
with gr.Row():
|
251 |
+
meta_model_a = gr.Dropdown(sd_models.checkpoint_tiles(),elem_id="model_converter_model_name",label="read metadata",interactive=True)
|
252 |
+
create_refresh_button(meta_model_a, sd_models.list_models,lambda: {"choices": sd_models.checkpoint_tiles()},"refresh_checkpoint_Z")
|
253 |
+
smd_loadmetadata = gr.Button(value="load keys",variant='primary')
|
254 |
+
with gr.Row():
|
255 |
+
metadata = gr.TextArea()
|
256 |
+
|
257 |
+
smd_loadmetadata.click(
|
258 |
+
fn=loadmetadata,
|
259 |
+
inputs=[meta_model_a],
|
260 |
+
outputs=[metadata]
|
261 |
+
)
|
262 |
+
|
263 |
+
smd_loadkeys.click(
|
264 |
+
fn=loadkeys,
|
265 |
+
inputs=[smd_model_a],
|
266 |
+
outputs=[keys]
|
267 |
+
)
|
268 |
+
|
269 |
+
def unload():
|
270 |
+
if shared.sd_model == None: return "already unloaded"
|
271 |
+
sd_hijack.model_hijack.undo_hijack(shared.sd_model)
|
272 |
+
shared.sd_model = None
|
273 |
+
gc.collect()
|
274 |
+
devices.torch_gc()
|
275 |
+
return "model unloaded"
|
276 |
+
|
277 |
+
unloadmodel.click(fn=unload,outputs=[submit_result])
|
278 |
+
|
279 |
+
load_history.click(fn=load_historyf,outputs=[history ])
|
280 |
+
|
281 |
+
msettings=[weights_a,weights_b,model_a,model_b,model_c,base_alpha,base_beta,mode,calcmode,useblocks,custom_name,save_sets,id_sets,wpresets,deep,tensor]
|
282 |
+
imagegal = [mgallery,mgeninfo,mhtmlinfo,mhtmllog]
|
283 |
+
xysettings=[x_type,xgrid,y_type,ygrid,esettings]
|
284 |
+
|
285 |
+
s_reverse.click(fn = reversparams,
|
286 |
+
inputs =mergeid,
|
287 |
+
outputs = [submit_result,*msettings[0:8],*msettings[9:13],deep,calcmode]
|
288 |
+
)
|
289 |
+
|
290 |
+
merge.click(
|
291 |
+
fn=smergegen,
|
292 |
+
inputs=[*msettings,esettings1,*gensets.txt2img_preview_params,*hiresfix,batch_size,currentmodel,dfalse],
|
293 |
+
outputs=[submit_result,currentmodel]
|
294 |
+
)
|
295 |
+
|
296 |
+
mergeandgen.click(
|
297 |
+
fn=smergegen,
|
298 |
+
inputs=[*msettings,esettings1,*gensets.txt2img_preview_params,*hiresfix,batch_size,currentmodel,dtrue],
|
299 |
+
outputs=[submit_result,currentmodel,*imagegal]
|
300 |
+
)
|
301 |
+
|
302 |
+
gen.click(
|
303 |
+
fn=simggen,
|
304 |
+
inputs=[*gensets.txt2img_preview_params,*hiresfix,batch_size,currentmodel,id_sets],
|
305 |
+
outputs=[*imagegal],
|
306 |
+
)
|
307 |
+
|
308 |
+
s_reserve.click(
|
309 |
+
fn=numaker,
|
310 |
+
inputs=[*xysettings,*msettings,*gensets.txt2img_preview_params,*hiresfix,batch_size],
|
311 |
+
outputs=[numaframe]
|
312 |
+
)
|
313 |
+
|
314 |
+
s_reserve1.click(
|
315 |
+
fn=numaker,
|
316 |
+
inputs=[*xysettings,*msettings,*gensets.txt2img_preview_params,*hiresfix,batch_size],
|
317 |
+
outputs=[numaframe]
|
318 |
+
)
|
319 |
+
|
320 |
+
gengrid.click(
|
321 |
+
fn=numanager,
|
322 |
+
inputs=[dtrue,*xysettings,*msettings,*gensets.txt2img_preview_params,*hiresfix,batch_size],
|
323 |
+
outputs=[submit_result,currentmodel,*imagegal],
|
324 |
+
)
|
325 |
+
|
326 |
+
s_startreserve.click(
|
327 |
+
fn=numanager,
|
328 |
+
inputs=[dfalse,*xysettings,*msettings,*gensets.txt2img_preview_params,*hiresfix,batch_size],
|
329 |
+
outputs=[submit_result,currentmodel,*imagegal],
|
330 |
+
)
|
331 |
+
|
332 |
+
search.click(fn = searchhistory,inputs=[searchwrods,searchmode],outputs=[history])
|
333 |
+
|
334 |
+
s_reloadreserve.click(fn=nulister,inputs=[dfalse],outputs=[numaframe])
|
335 |
+
s_delreserve.click(fn=nulister,inputs=[s_delnum],outputs=[numaframe])
|
336 |
+
loadcachelist.click(fn=load_cachelist,inputs=[],outputs=[currentcache])
|
337 |
+
addtox.click(fn=lambda x:gr.Textbox.update(value = x),inputs=[inputer],outputs=[xgrid])
|
338 |
+
addtoy.click(fn=lambda x:gr.Textbox.update(value = x),inputs=[inputer],outputs=[ygrid])
|
339 |
+
|
340 |
+
stopgrid.click(fn=freezetime)
|
341 |
+
stopmerge.click(fn=freezemtime)
|
342 |
+
|
343 |
+
checkpoints.change(fn=lambda x:",".join(x),inputs=[checkpoints],outputs=[inputer])
|
344 |
+
blockids.change(fn=lambda x:" ".join(x),inputs=[blockids],outputs=[inputer])
|
345 |
+
calcmodes.change(fn=lambda x:",".join(x),inputs=[calcmodes],outputs=[inputer])
|
346 |
+
|
347 |
+
menbers = [base,in00,in01,in02,in03,in04,in05,in06,in07,in08,in09,in10,in11,mi00,ou00,ou01,ou02,ou03,ou04,ou05,ou06,ou07,ou08,ou09,ou10,ou11]
|
348 |
+
|
349 |
+
setalpha.click(fn=slider2text,inputs=menbers,outputs=[weights_a])
|
350 |
+
setbeta.click(fn=slider2text,inputs=menbers,outputs=[weights_b])
|
351 |
+
setx.click(fn=add_to_seq,inputs=[xgrid,weights_a],outputs=[xgrid])
|
352 |
+
|
353 |
+
readalpha.click(fn=text2slider,inputs=weights_a,outputs=menbers)
|
354 |
+
readbeta.click(fn=text2slider,inputs=weights_b,outputs=menbers)
|
355 |
+
|
356 |
+
x_type.change(fn=showxy,inputs=[x_type,y_type], outputs=[row_blockids,row_checkpoints,row_inputers,ygrid,row_esets,row_calcmode])
|
357 |
+
y_type.change(fn=showxy,inputs=[x_type,y_type], outputs=[row_blockids,row_checkpoints,row_inputers,ygrid,row_esets,row_calcmode])
|
358 |
+
x_randseednum.change(fn=makerand,inputs=[x_randseednum],outputs=[xgrid])
|
359 |
+
|
360 |
+
import subprocess
|
361 |
+
def openeditors():
|
362 |
+
subprocess.Popen(['start', filepath], shell=True)
|
363 |
+
|
364 |
+
def reloadpresets():
|
365 |
+
try:
|
366 |
+
with open(filepath) as f:
|
367 |
+
return f.read()
|
368 |
+
except OSError as e:
|
369 |
+
pass
|
370 |
+
|
371 |
+
def savepresets(text):
|
372 |
+
with open(filepath,mode = 'w') as f:
|
373 |
+
f.write(text)
|
374 |
+
|
375 |
+
s_reloadtext.click(fn=reloadpresets,inputs=[],outputs=[wpresets])
|
376 |
+
s_reloadtags.click(fn=tagdicter,inputs=[wpresets],outputs=[weightstags])
|
377 |
+
s_savetext.click(fn=savepresets,inputs=[wpresets],outputs=[])
|
378 |
+
s_openeditor.click(fn=openeditors,inputs=[],outputs=[])
|
379 |
+
|
380 |
+
return (supermergerui, "SuperMerger", "supermerger"),
|
381 |
+
|
382 |
+
msearch = []
|
383 |
+
mlist=[]
|
384 |
+
|
385 |
+
def loadmetadata(model):
|
386 |
+
import json
|
387 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(model)
|
388 |
+
if ".safetensors" not in checkpoint_info.filename: return "no metadata(not safetensors)"
|
389 |
+
sdict = sd_models.read_metadata_from_safetensors(checkpoint_info.filename)
|
390 |
+
if sdict == {}: return "no metadata"
|
391 |
+
return json.dumps(sdict,indent=4)
|
392 |
+
|
393 |
+
def load_historyf():
|
394 |
+
filepath = os.path.join(path_root,"mergehistory.csv")
|
395 |
+
global mlist,msearch
|
396 |
+
msearch = []
|
397 |
+
mlist=[]
|
398 |
+
try:
|
399 |
+
with open(filepath, 'r') as f:
|
400 |
+
reader = csv.reader(f)
|
401 |
+
mlist = [raw for raw in reader]
|
402 |
+
mlist = mlist[1:]
|
403 |
+
for m in mlist:
|
404 |
+
msearch.append(" ".join(m))
|
405 |
+
maxlen = len(mlist[-1][0])
|
406 |
+
for i,m in enumerate(mlist):
|
407 |
+
mlist[i][0] = mlist[i][0].zfill(maxlen)
|
408 |
+
return mlist
|
409 |
+
except:
|
410 |
+
return [["no data","",""],]
|
411 |
+
|
412 |
+
def searchhistory(words,searchmode):
|
413 |
+
outs =[]
|
414 |
+
ando = "and" in searchmode
|
415 |
+
words = words.split(" ") if " " in words else [words]
|
416 |
+
for i, m in enumerate(msearch):
|
417 |
+
hit = ando
|
418 |
+
for w in words:
|
419 |
+
if ando:
|
420 |
+
if w not in m:hit = False
|
421 |
+
else:
|
422 |
+
if w in m:hit = True
|
423 |
+
print(i,len(mlist))
|
424 |
+
if hit :outs.append(mlist[i])
|
425 |
+
|
426 |
+
if outs == []:return [["no result","",""],]
|
427 |
+
return outs
|
428 |
+
|
429 |
+
#msettings=[0 weights_a,1 weights_b,2 model_a,3 model_b,4 model_c,5 base_alpha,6 base_beta,7 mode,8 useblocks,9 custom_name,10 save_sets,11 id_sets,12 wpresets]
|
430 |
+
|
431 |
+
def reversparams(id):
|
432 |
+
def selectfromhash(hash):
|
433 |
+
for model in sd_models.checkpoint_tiles():
|
434 |
+
if hash in model:
|
435 |
+
return model
|
436 |
+
return ""
|
437 |
+
try:
|
438 |
+
idsets = rwmergelog(id = id)
|
439 |
+
except:
|
440 |
+
return [gr.update(value = "ERROR: history file could not open"),*[gr.update() for x in range(14)]]
|
441 |
+
if type(idsets) == str:
|
442 |
+
print("ERROR")
|
443 |
+
return [gr.update(value = idsets),*[gr.update() for x in range(14)]]
|
444 |
+
if idsets[0] == "ID":return [gr.update(value ="ERROR: no history"),*[gr.update() for x in range(14)]]
|
445 |
+
mgs = idsets[3:]
|
446 |
+
if mgs[0] == "":mgs[0] = "0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5"
|
447 |
+
if mgs[1] == "":mgs[1] = "0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2"
|
448 |
+
mgs[2] = selectfromhash(mgs[2]) if len(mgs[2]) > 5 else ""
|
449 |
+
mgs[3] = selectfromhash(mgs[3]) if len(mgs[3]) > 5 else ""
|
450 |
+
mgs[4] = selectfromhash(mgs[4]) if len(mgs[4]) > 5 else ""
|
451 |
+
mgs[8] = True if mgs[8] =="True" else False
|
452 |
+
mgs[10] = mgs[10].replace("[","").replace("]","").replace("'", "")
|
453 |
+
mgs[10] = [x.strip() for x in mgs[10].split(",")]
|
454 |
+
mgs[11] = mgs[11].replace("[","").replace("]","").replace("'", "")
|
455 |
+
mgs[11] = [x.strip() for x in mgs[11].split(",")]
|
456 |
+
while len(mgs) < 14:
|
457 |
+
mgs.append("")
|
458 |
+
mgs[13] = "normal" if mgs[13] == "" else mgs[13]
|
459 |
+
return [gr.update(value = "setting loaded") ,*[gr.update(value = x) for x in mgs[0:14]]]
|
460 |
+
|
461 |
+
def add_to_seq(seq,maker):
|
462 |
+
return gr.Textbox.update(value = maker if seq=="" else seq+"\r\n"+maker)
|
463 |
+
|
464 |
+
def load_cachelist():
|
465 |
+
text = ""
|
466 |
+
for x in checkpoints_loaded.keys():
|
467 |
+
text = text +"\r\n"+ x.model_name
|
468 |
+
return text.replace("\r\n","",1)
|
469 |
+
|
470 |
+
def makerand(num):
|
471 |
+
text = ""
|
472 |
+
for x in range(int(num)):
|
473 |
+
text = text +"-1,"
|
474 |
+
text = text[:-1]
|
475 |
+
return text
|
476 |
+
|
477 |
+
#row_blockids,row_checkpoints,row_inputers,ygrid
|
478 |
+
def showxy(x,y):
|
479 |
+
flags =[False]*6
|
480 |
+
t = TYPESEG
|
481 |
+
txy = t[x] + t[y]
|
482 |
+
if "model" in txy : flags[1] = flags[2] = True
|
483 |
+
if "pinpoint" in txy : flags[0] = flags[2] = True
|
484 |
+
if "effective" in txy or "element" in txy : flags[4] = True
|
485 |
+
if "calcmode" in txy : flags[5] = True
|
486 |
+
if not "none" in t[y] : flags[3] = flags[2] = True
|
487 |
+
return [gr.update(visible = x) for x in flags]
|
488 |
+
|
489 |
+
def text2slider(text):
|
490 |
+
vals = [t.strip() for t in text.split(",")]
|
491 |
+
return [gr.update(value = float(v)) for v in vals]
|
492 |
+
|
493 |
+
def slider2text(a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z):
|
494 |
+
numbers = [a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z]
|
495 |
+
numbers = [str(x) for x in numbers]
|
496 |
+
return gr.update(value = ",".join(numbers) )
|
497 |
+
|
498 |
+
def tagdicter(presets):
|
499 |
+
presets=presets.splitlines()
|
500 |
+
wdict={}
|
501 |
+
for l in presets:
|
502 |
+
w=[]
|
503 |
+
if ":" in l :
|
504 |
+
key = l.split(":",1)[0]
|
505 |
+
w = l.split(":",1)[1]
|
506 |
+
if "\t" in l:
|
507 |
+
key = l.split("\t",1)[0]
|
508 |
+
w = l.split("\t",1)[1]
|
509 |
+
if len([w for w in w.split(",")]) == 26:
|
510 |
+
wdict[key.strip()]=w
|
511 |
+
return ",".join(list(wdict.keys()))
|
512 |
+
|
513 |
+
def loadkeys(model_a):
|
514 |
+
checkpoint_info = sd_models.get_closet_checkpoint_match(model_a)
|
515 |
+
sd = sd_models.read_state_dict(checkpoint_info.filename,"cpu")
|
516 |
+
keys = []
|
517 |
+
for i, key in enumerate(sd.keys()):
|
518 |
+
re_inp = re.compile(r'\.input_blocks\.(\d+)\.') # 12
|
519 |
+
re_mid = re.compile(r'\.middle_block\.(\d+)\.') # 1
|
520 |
+
re_out = re.compile(r'\.output_blocks\.(\d+)\.') # 12
|
521 |
+
|
522 |
+
weight_index = -1
|
523 |
+
blockid=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","IN09","IN10","IN11","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11","Not Merge"]
|
524 |
+
|
525 |
+
NUM_INPUT_BLOCKS = 12
|
526 |
+
NUM_MID_BLOCK = 1
|
527 |
+
NUM_OUTPUT_BLOCKS = 12
|
528 |
+
NUM_TOTAL_BLOCKS = NUM_INPUT_BLOCKS + NUM_MID_BLOCK + NUM_OUTPUT_BLOCKS
|
529 |
+
|
530 |
+
if 'time_embed' in key:
|
531 |
+
weight_index = -2 # before input blocks
|
532 |
+
elif '.out.' in key:
|
533 |
+
weight_index = NUM_TOTAL_BLOCKS - 1 # after output blocks
|
534 |
+
else:
|
535 |
+
m = re_inp.search(key)
|
536 |
+
if m:
|
537 |
+
inp_idx = int(m.groups()[0])
|
538 |
+
weight_index = inp_idx
|
539 |
+
else:
|
540 |
+
m = re_mid.search(key)
|
541 |
+
if m:
|
542 |
+
weight_index = NUM_INPUT_BLOCKS
|
543 |
+
else:
|
544 |
+
m = re_out.search(key)
|
545 |
+
if m:
|
546 |
+
out_idx = int(m.groups()[0])
|
547 |
+
weight_index = NUM_INPUT_BLOCKS + NUM_MID_BLOCK + out_idx
|
548 |
+
keys.append([i,blockid[weight_index+1],key])
|
549 |
+
return keys
|
550 |
+
|
551 |
+
script_callbacks.on_ui_tabs(on_ui_tabs)
|
552 |
+
script_callbacks.on_ui_train_tabs(on_ui_train_tabs)
|
extensions/microsoftexcel-tunnels/.gitignore
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Created by https://www.toptal.com/developers/gitignore/api/python
|
2 |
+
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
3 |
+
|
4 |
+
### Python ###
|
5 |
+
# Byte-compiled / optimized / DLL files
|
6 |
+
__pycache__/
|
7 |
+
*.py[cod]
|
8 |
+
*$py.class
|
9 |
+
|
10 |
+
# C extensions
|
11 |
+
*.so
|
12 |
+
|
13 |
+
# Distribution / packaging
|
14 |
+
.Python
|
15 |
+
build/
|
16 |
+
develop-eggs/
|
17 |
+
dist/
|
18 |
+
downloads/
|
19 |
+
eggs/
|
20 |
+
.eggs/
|
21 |
+
lib/
|
22 |
+
lib64/
|
23 |
+
parts/
|
24 |
+
sdist/
|
25 |
+
var/
|
26 |
+
wheels/
|
27 |
+
share/python-wheels/
|
28 |
+
*.egg-info/
|
29 |
+
.installed.cfg
|
30 |
+
*.egg
|
31 |
+
MANIFEST
|
32 |
+
|
33 |
+
# PyInstaller
|
34 |
+
# Usually these files are written by a python script from a template
|
35 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
36 |
+
*.manifest
|
37 |
+
*.spec
|
38 |
+
|
39 |
+
# Installer logs
|
40 |
+
pip-log.txt
|
41 |
+
pip-delete-this-directory.txt
|
42 |
+
|
43 |
+
# Unit test / coverage reports
|
44 |
+
htmlcov/
|
45 |
+
.tox/
|
46 |
+
.nox/
|
47 |
+
.coverage
|
48 |
+
.coverage.*
|
49 |
+
.cache
|
50 |
+
nosetests.xml
|
51 |
+
coverage.xml
|
52 |
+
*.cover
|
53 |
+
*.py,cover
|
54 |
+
.hypothesis/
|
55 |
+
.pytest_cache/
|
56 |
+
cover/
|
57 |
+
|
58 |
+
# Translations
|
59 |
+
*.mo
|
60 |
+
*.pot
|
61 |
+
|
62 |
+
# Django stuff:
|
63 |
+
*.log
|
64 |
+
local_settings.py
|
65 |
+
db.sqlite3
|
66 |
+
db.sqlite3-journal
|
67 |
+
|
68 |
+
# Flask stuff:
|
69 |
+
instance/
|
70 |
+
.webassets-cache
|
71 |
+
|
72 |
+
# Scrapy stuff:
|
73 |
+
.scrapy
|
74 |
+
|
75 |
+
# Sphinx documentation
|
76 |
+
docs/_build/
|
77 |
+
|
78 |
+
# PyBuilder
|
79 |
+
.pybuilder/
|
80 |
+
target/
|
81 |
+
|
82 |
+
# Jupyter Notebook
|
83 |
+
.ipynb_checkpoints
|
84 |
+
|
85 |
+
# IPython
|
86 |
+
profile_default/
|
87 |
+
ipython_config.py
|
88 |
+
|
89 |
+
# pyenv
|
90 |
+
# For a library or package, you might want to ignore these files since the code is
|
91 |
+
# intended to run in multiple environments; otherwise, check them in:
|
92 |
+
# .python-version
|
93 |
+
|
94 |
+
# pipenv
|
95 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
96 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
97 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
98 |
+
# install all needed dependencies.
|
99 |
+
#Pipfile.lock
|
100 |
+
|
101 |
+
# poetry
|
102 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
103 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
104 |
+
# commonly ignored for libraries.
|
105 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
106 |
+
#poetry.lock
|
107 |
+
|
108 |
+
# pdm
|
109 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
110 |
+
#pdm.lock
|
111 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
112 |
+
# in version control.
|
113 |
+
# https://pdm.fming.dev/#use-with-ide
|
114 |
+
.pdm.toml
|
115 |
+
|
116 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
117 |
+
__pypackages__/
|
118 |
+
|
119 |
+
# Celery stuff
|
120 |
+
celerybeat-schedule
|
121 |
+
celerybeat.pid
|
122 |
+
|
123 |
+
# SageMath parsed files
|
124 |
+
*.sage.py
|
125 |
+
|
126 |
+
# Environments
|
127 |
+
.env
|
128 |
+
.venv
|
129 |
+
env/
|
130 |
+
venv/
|
131 |
+
ENV/
|
132 |
+
env.bak/
|
133 |
+
venv.bak/
|
134 |
+
|
135 |
+
# Spyder project settings
|
136 |
+
.spyderproject
|
137 |
+
.spyproject
|
138 |
+
|
139 |
+
# Rope project settings
|
140 |
+
.ropeproject
|
141 |
+
|
142 |
+
# mkdocs documentation
|
143 |
+
/site
|
144 |
+
|
145 |
+
# mypy
|
146 |
+
.mypy_cache/
|
147 |
+
.dmypy.json
|
148 |
+
dmypy.json
|
149 |
+
|
150 |
+
# Pyre type checker
|
151 |
+
.pyre/
|
152 |
+
|
153 |
+
# pytype static type analyzer
|
154 |
+
.pytype/
|
155 |
+
|
156 |
+
# Cython debug symbols
|
157 |
+
cython_debug/
|
158 |
+
|
159 |
+
# PyCharm
|
160 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
161 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
162 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
163 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
164 |
+
#.idea/
|
165 |
+
|
166 |
+
### Python Patch ###
|
167 |
+
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
168 |
+
poetry.toml
|
169 |
+
|
170 |
+
# ruff
|
171 |
+
.ruff_cache/
|
172 |
+
|
173 |
+
# End of https://www.toptal.com/developers/gitignore/api/python
|
174 |
+
|
175 |
+
id_rsa
|
176 |
+
id_rsa.pub
|
extensions/microsoftexcel-tunnels/.pre-commit-config.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
3 |
+
rev: v4.4.0
|
4 |
+
hooks:
|
5 |
+
- id: trailing-whitespace
|
6 |
+
args: [--markdown-linebreak-ext=md]
|
7 |
+
- id: end-of-file-fixer
|
8 |
+
|
9 |
+
- repo: https://github.com/asottile/pyupgrade
|
10 |
+
rev: v3.3.1
|
11 |
+
hooks:
|
12 |
+
- id: pyupgrade
|
13 |
+
args: [--py310-plus]
|
14 |
+
|
15 |
+
- repo: https://github.com/psf/black
|
16 |
+
rev: 23.1.0
|
17 |
+
hooks:
|
18 |
+
- id: black
|
19 |
+
|
20 |
+
- repo: https://github.com/charliermarsh/ruff-pre-commit
|
21 |
+
# Ruff version.
|
22 |
+
rev: "v0.0.244"
|
23 |
+
hooks:
|
24 |
+
- id: ruff
|
25 |
+
args: [--fix]
|
extensions/microsoftexcel-tunnels/LICENSE.md
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
The MIT License (MIT)
|
3 |
+
|
4 |
+
Copyright (c) 2023 Bingsu
|
5 |
+
|
6 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
of this software and associated documentation files (the "Software"), to deal
|
8 |
+
in the Software without restriction, including without limitation the rights
|
9 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
copies of the Software, and to permit persons to whom the Software is
|
11 |
+
furnished to do so, subject to the following conditions:
|
12 |
+
|
13 |
+
The above copyright notice and this permission notice shall be included in all
|
14 |
+
copies or substantial portions of the Software.
|
15 |
+
|
16 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
19 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
22 |
+
SOFTWARE.
|
extensions/microsoftexcel-tunnels/README.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# sd-webui-tunnels
|
2 |
+
|
3 |
+
Tunneling extension for [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
|
4 |
+
|
5 |
+
## Usage
|
6 |
+
|
7 |
+
### [cloudflared](https://try.cloudflare.com/)
|
8 |
+
|
9 |
+
add `--cloudflared` to commandline options.
|
10 |
+
|
11 |
+
### [localhost.run](https://localhost.run/)
|
12 |
+
|
13 |
+
add `--localhostrun` to commandline options.
|
14 |
+
|
15 |
+
### [remote.moe](https://github.com/fasmide/remotemoe)
|
16 |
+
|
17 |
+
add `--remotemoe` to commandline options.
|
18 |
+
|
19 |
+
The feature of `remote.moe` is that as long as the same ssh key is used, the same url is generated.
|
20 |
+
|
21 |
+
The ssh keys for `localhost.run` and `remote.moe` are created with the name `id_rsa` in the script's root folder. However, if there is a problem with the write permission, it is created in a temporary folder instead, so a different url is created each time.
|
extensions/microsoftexcel-tunnels/__pycache__/preload.cpython-310.pyc
ADDED
Binary file (623 Bytes). View file
|
|
extensions/microsoftexcel-tunnels/install.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import launch
|
2 |
+
|
3 |
+
if not launch.is_installed("pycloudflared"):
|
4 |
+
launch.run_pip("install pycloudflared", "pycloudflared")
|
extensions/microsoftexcel-tunnels/preload.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
|
3 |
+
|
4 |
+
def preload(parser: argparse.ArgumentParser):
|
5 |
+
parser.add_argument(
|
6 |
+
"--cloudflared",
|
7 |
+
action="store_true",
|
8 |
+
help="use trycloudflare, alternative to gradio --share",
|
9 |
+
)
|
10 |
+
|
11 |
+
parser.add_argument(
|
12 |
+
"--localhostrun",
|
13 |
+
action="store_true",
|
14 |
+
help="use localhost.run, alternative to gradio --share",
|
15 |
+
)
|
16 |
+
|
17 |
+
parser.add_argument(
|
18 |
+
"--remotemoe",
|
19 |
+
action="store_true",
|
20 |
+
help="use remote.moe, alternative to gradio --share",
|
21 |
+
)
|
extensions/microsoftexcel-tunnels/pyproject.toml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "sd-webui-tunnels"
|
3 |
+
version = "23.2.1"
|
4 |
+
description = "Tunneling extension for automatic1111 sd-webui"
|
5 |
+
authors = [
|
6 |
+
{name = "dowon", email = "ks2515@naver.com"},
|
7 |
+
]
|
8 |
+
requires-python = ">=3.8"
|
9 |
+
readme = "README.md"
|
10 |
+
license = {text = "MIT"}
|
11 |
+
|
12 |
+
[project.urls]
|
13 |
+
repository = "https://github.com/Bing-su/sd-webui-tunnels"
|
14 |
+
|
15 |
+
[tool.isort]
|
16 |
+
profile = "black"
|
17 |
+
known_first_party = ["modules", "launch"]
|
18 |
+
|
19 |
+
[tool.ruff]
|
20 |
+
select = ["A", "B", "C4", "E", "F", "I001", "N", "PT", "UP", "W"]
|
21 |
+
ignore = ["B008", "B905", "E501"]
|
22 |
+
unfixable = ["F401"]
|
23 |
+
|
24 |
+
[tool.ruff.isort]
|
25 |
+
known-first-party = ["modules", "launch"]
|
extensions/microsoftexcel-tunnels/scripts/__pycache__/ssh_tunnel.cpython-310.pyc
ADDED
Binary file (2.34 kB). View file
|
|
extensions/microsoftexcel-tunnels/scripts/__pycache__/try_cloudflare.cpython-310.pyc
ADDED
Binary file (597 Bytes). View file
|
|
extensions/microsoftexcel-tunnels/scripts/ssh_tunnel.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import atexit
|
2 |
+
import re
|
3 |
+
import shlex
|
4 |
+
import subprocess
|
5 |
+
from pathlib import Path
|
6 |
+
from tempfile import TemporaryDirectory
|
7 |
+
from typing import Union
|
8 |
+
from gradio import strings
|
9 |
+
import os
|
10 |
+
|
11 |
+
from modules.shared import cmd_opts
|
12 |
+
|
13 |
+
LOCALHOST_RUN = "localhost.run"
|
14 |
+
REMOTE_MOE = "remote.moe"
|
15 |
+
localhostrun_pattern = re.compile(r"(?P<url>https?://\S+\.lhr\.life)")
|
16 |
+
remotemoe_pattern = re.compile(r"(?P<url>https?://\S+\.remote\.moe)")
|
17 |
+
|
18 |
+
|
19 |
+
def gen_key(path: Union[str, Path]) -> None:
|
20 |
+
path = Path(path)
|
21 |
+
arg_string = f'ssh-keygen -t rsa -b 4096 -N "" -q -f {path.as_posix()}'
|
22 |
+
args = shlex.split(arg_string)
|
23 |
+
subprocess.run(args, check=True)
|
24 |
+
path.chmod(0o600)
|
25 |
+
|
26 |
+
|
27 |
+
def ssh_tunnel(host: str = LOCALHOST_RUN) -> None:
|
28 |
+
ssh_name = "id_rsa"
|
29 |
+
ssh_path = Path(__file__).parent.parent / ssh_name
|
30 |
+
|
31 |
+
tmp = None
|
32 |
+
if not ssh_path.exists():
|
33 |
+
try:
|
34 |
+
gen_key(ssh_path)
|
35 |
+
# write permission error or etc
|
36 |
+
except subprocess.CalledProcessError:
|
37 |
+
tmp = TemporaryDirectory()
|
38 |
+
ssh_path = Path(tmp.name) / ssh_name
|
39 |
+
gen_key(ssh_path)
|
40 |
+
|
41 |
+
port = cmd_opts.port if cmd_opts.port else 7860
|
42 |
+
|
43 |
+
arg_string = f"ssh -R 80:127.0.0.1:{port} -o StrictHostKeyChecking=no -i {ssh_path.as_posix()} {host}"
|
44 |
+
args = shlex.split(arg_string)
|
45 |
+
|
46 |
+
tunnel = subprocess.Popen(
|
47 |
+
args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8"
|
48 |
+
)
|
49 |
+
|
50 |
+
atexit.register(tunnel.terminate)
|
51 |
+
if tmp is not None:
|
52 |
+
atexit.register(tmp.cleanup)
|
53 |
+
|
54 |
+
tunnel_url = ""
|
55 |
+
lines = 27 if host == LOCALHOST_RUN else 5
|
56 |
+
pattern = localhostrun_pattern if host == LOCALHOST_RUN else remotemoe_pattern
|
57 |
+
|
58 |
+
for _ in range(lines):
|
59 |
+
line = tunnel.stdout.readline()
|
60 |
+
if line.startswith("Warning"):
|
61 |
+
print(line, end="")
|
62 |
+
|
63 |
+
url_match = pattern.search(line)
|
64 |
+
if url_match:
|
65 |
+
tunnel_url = url_match.group("url")
|
66 |
+
break
|
67 |
+
else:
|
68 |
+
raise RuntimeError(f"Failed to run {host}")
|
69 |
+
|
70 |
+
# print(f" * Running on {tunnel_url}")
|
71 |
+
os.environ['webui_url'] = tunnel_url
|
72 |
+
colab_url = os.getenv('colab_url')
|
73 |
+
strings.en["SHARE_LINK_MESSAGE"] = f"Running on public URL (recommended): {tunnel_url}"
|
74 |
+
|
75 |
+
if cmd_opts.localhostrun:
|
76 |
+
print("localhost.run detected, trying to connect...")
|
77 |
+
ssh_tunnel(LOCALHOST_RUN)
|
78 |
+
|
79 |
+
if cmd_opts.remotemoe:
|
80 |
+
print("remote.moe detected, trying to connect...")
|
81 |
+
ssh_tunnel(REMOTE_MOE)
|
extensions/microsoftexcel-tunnels/scripts/try_cloudflare.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# credit to camenduru senpai
|
2 |
+
from pycloudflared import try_cloudflare
|
3 |
+
|
4 |
+
from modules.shared import cmd_opts
|
5 |
+
|
6 |
+
from gradio import strings
|
7 |
+
|
8 |
+
import os
|
9 |
+
|
10 |
+
if cmd_opts.cloudflared:
|
11 |
+
print("cloudflared detected, trying to connect...")
|
12 |
+
port = cmd_opts.port if cmd_opts.port else 7860
|
13 |
+
tunnel_url = try_cloudflare(port=port, verbose=False)
|
14 |
+
os.environ['webui_url'] = tunnel_url.tunnel
|
15 |
+
strings.en["PUBLIC_SHARE_TRUE"] = f"Running on public URL: {tunnel_url.tunnel}"
|
extensions/microsoftexcel-tunnels/ssh_tunnel.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import atexit
|
2 |
+
import re
|
3 |
+
import shlex
|
4 |
+
import subprocess
|
5 |
+
from pathlib import Path
|
6 |
+
from tempfile import TemporaryDirectory
|
7 |
+
from typing import Union
|
8 |
+
from gradio import strings
|
9 |
+
import os
|
10 |
+
|
11 |
+
from modules.shared import cmd_opts
|
12 |
+
|
13 |
+
LOCALHOST_RUN = "localhost.run"
|
14 |
+
REMOTE_MOE = "remote.moe"
|
15 |
+
localhostrun_pattern = re.compile(r"(?P<url>https?://\S+\.lhr\.life)")
|
16 |
+
remotemoe_pattern = re.compile(r"(?P<url>https?://\S+\.remote\.moe)")
|
17 |
+
|
18 |
+
|
19 |
+
def gen_key(path: Union[str, Path]) -> None:
|
20 |
+
path = Path(path)
|
21 |
+
arg_string = f'ssh-keygen -t rsa -b 4096 -N "" -q -f {path.as_posix()}'
|
22 |
+
args = shlex.split(arg_string)
|
23 |
+
subprocess.run(args, check=True)
|
24 |
+
path.chmod(0o600)
|
25 |
+
|
26 |
+
|
27 |
+
def ssh_tunnel(host: str = LOCALHOST_RUN) -> None:
|
28 |
+
ssh_name = "id_rsa"
|
29 |
+
ssh_path = Path(__file__).parent.parent / ssh_name
|
30 |
+
|
31 |
+
tmp = None
|
32 |
+
if not ssh_path.exists():
|
33 |
+
try:
|
34 |
+
gen_key(ssh_path)
|
35 |
+
# write permission error or etc
|
36 |
+
except subprocess.CalledProcessError:
|
37 |
+
tmp = TemporaryDirectory()
|
38 |
+
ssh_path = Path(tmp.name) / ssh_name
|
39 |
+
gen_key(ssh_path)
|
40 |
+
|
41 |
+
port = cmd_opts.port if cmd_opts.port else 7860
|
42 |
+
|
43 |
+
arg_string = f"ssh -R 80:127.0.0.1:{port} -o StrictHostKeyChecking=no -i {ssh_path.as_posix()} {host}"
|
44 |
+
args = shlex.split(arg_string)
|
45 |
+
|
46 |
+
tunnel = subprocess.Popen(
|
47 |
+
args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8"
|
48 |
+
)
|
49 |
+
|
50 |
+
atexit.register(tunnel.terminate)
|
51 |
+
if tmp is not None:
|
52 |
+
atexit.register(tmp.cleanup)
|
53 |
+
|
54 |
+
tunnel_url = ""
|
55 |
+
lines = 27 if host == LOCALHOST_RUN else 5
|
56 |
+
pattern = localhostrun_pattern if host == LOCALHOST_RUN else remotemoe_pattern
|
57 |
+
|
58 |
+
for _ in range(lines):
|
59 |
+
line = tunnel.stdout.readline()
|
60 |
+
if line.startswith("Warning"):
|
61 |
+
print(line, end="")
|
62 |
+
|
63 |
+
url_match = pattern.search(line)
|
64 |
+
if url_match:
|
65 |
+
tunnel_url = url_match.group("url")
|
66 |
+
break
|
67 |
+
else:
|
68 |
+
raise RuntimeError(f"Failed to run {host}")
|
69 |
+
|
70 |
+
# print(f" * Running on {tunnel_url}")
|
71 |
+
os.environ['webui_url'] = tunnel_url
|
72 |
+
colab_url = os.getenv('colab_url')
|
73 |
+
strings.en["SHARE_LINK_MESSAGE"] = f"Public WebUI Colab URL: {tunnel_url}"
|
74 |
+
|
75 |
+
|
76 |
+
def googleusercontent_tunnel():
|
77 |
+
colab_url = os.getenv('colab_url')
|
78 |
+
strings.en["SHARE_LINK_MESSAGE"] = f"WebUI Colab URL: {colab_url}"
|
79 |
+
|
80 |
+
if cmd_opts.localhostrun:
|
81 |
+
print("localhost.run detected, trying to connect...")
|
82 |
+
ssh_tunnel(LOCALHOST_RUN)
|
83 |
+
|
84 |
+
if cmd_opts.remotemoe:
|
85 |
+
print("remote.moe detected, trying to connect...")
|
86 |
+
ssh_tunnel(REMOTE_MOE)
|
extensions/put extensions here.txt
ADDED
File without changes
|
extensions/sd-webui-lora-block-weight/README.md
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# LoRA Block Weight
|
2 |
+
- custom script for [AUTOMATIC1111's stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
|
3 |
+
- When applying Lora, strength can be set block by block.
|
4 |
+
|
5 |
+
- [AUTOMATIC1111's stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) 用のスクリプトです
|
6 |
+
- Loraを適用する際、強さを階層ごとに設定できます
|
7 |
+
|
8 |
+
### Updates/更新情報
|
9 |
+
2023.5.24.2000(JST)
|
10 |
+
- changed directory for presets(extentions/sd-webui-lora-block-weight/scripts/)
|
11 |
+
- プリセットの保存フォルダがextentions/sd-webui-lora-block-weight/scripts/に変更になりました。
|
12 |
+
|
13 |
+
2023.5.12.2100(JST)
|
14 |
+
- changed syntax of lycoris
|
15 |
+
- lycorisの書式を変更しました
|
16 |
+
|
17 |
+
2023.04.14.2000(JST)
|
18 |
+
- support LyCORIS(a1111-sd-webui-lycoris)
|
19 |
+
- LyCORIS(a1111-sd-webui-lycoris)に対応
|
20 |
+
|
21 |
+
2023.03.20.2030(JST)
|
22 |
+
- Comment lines can now be added to presets
|
23 |
+
- プリセットにコメント行を追加できるようになりました
|
24 |
+
- support XYZ plot hires.fix
|
25 |
+
- XYZプロットがhires.fixに対応しました
|
26 |
+
|
27 |
+
2023.03.16.2030(JST)
|
28 |
+
- [LyCORIS](https://github.com/KohakuBlueleaf/LyCORIS)に対応しました
|
29 |
+
- Support [LyCORIS](https://github.com/KohakuBlueleaf/LyCORIS)
|
30 |
+
|
31 |
+
別途[LyCORIS Extention](https://github.com/KohakuBlueleaf/a1111-sd-webui-locon)が必要です。
|
32 |
+
For use LyCORIS, [Extension](https://github.com/KohakuBlueleaf/a1111-sd-webui-locon) for LyCORIS needed.
|
33 |
+
|
34 |
+
日本語説明は[後半](#概要)後半にあります。
|
35 |
+
|
36 |
+
# Overview
|
37 |
+
Lora is a powerful tool, but it is sometimes difficult to use and can affect areas that you do not want it to affect. This script allows you to set the weights block-by-block. Using this script, you may be able to get the image you want.
|
38 |
+
|
39 |
+
## Usage
|
40 |
+
Place lora_block_weight.py in the script folder.
|
41 |
+
Or you can install from Extentions tab in web-ui. When installing, please restart web-ui.bat.
|
42 |
+
|
43 |
+
### Active
|
44 |
+
Check this box to activate it.
|
45 |
+
|
46 |
+
### Prompt
|
47 |
+
In the prompt box, enter the Lora you wish to use as usual. Enter the weight or identifier by typing ":" after the strength value. The identifier can be edited in the Weights setting.
|
48 |
+
```
|
49 |
+
<lora:"lora name":1:0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0>.
|
50 |
+
<lora:"lora name":1:0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>. (a1111-sd-webui-locon, etc.)
|
51 |
+
<lora:"lora name":1:IN02>
|
52 |
+
<lyco:"lora name":1:1:lbw=IN02> (a1111-sd-webui-lycoris)
|
53 |
+
<lyco:"lora name":1:1:lbw=1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0> (a1111-sd-webui-lycoris)
|
54 |
+
```
|
55 |
+
For LyCORIS using a1111-sd-webui-lycoris, syntax is different.
|
56 |
+
`lbw=IN02` is used and follow lycoirs syntax for others such as unet or else.
|
57 |
+
a1111-sd-webui-lycoris is under under development, so this syntax might be changed.
|
58 |
+
|
59 |
+
Lora strength is in effect and applies to the entire Blocks.
|
60 |
+
It is case-sensitive.
|
61 |
+
For LyCORIS, full-model blobks used,so you need to input 26 weights.
|
62 |
+
You can use weight for LoRA, in this case, the weight of blocks not in LoRA is set to 1.
|
63 |
+
If the above format is not used, the preset will treat it as a comment line.
|
64 |
+
|
65 |
+
### Weights Setting
|
66 |
+
Enter the identifier and weights.
|
67 |
+
Unlike the full model, Lora is divided into 17 blocks, including the encoder. Therefore, enter 17 values.
|
68 |
+
BASE, IN, OUT, etc. are the blocks equivalent to the full model.
|
69 |
+
|
70 |
+
|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|
|
71 |
+
|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
|
72 |
+
|BASE|IN01|IN02|IN04|IN05|IN07|IN08|MID|OUT03|OUT04|OUT05|OUT06|OUT07|OUT08|OUT09|OUT10|OUT11|
|
73 |
+
|
74 |
+
LyCORIS, etc.
|
75 |
+
|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|
|
76 |
+
|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
|
77 |
+
|BASE|IN00|IN01|IN02|IN03|IN04|IN05|IN06|IN07|IN08|IN09|IN10|IN11|MID|OUT00|OUT01|OUT02|OUT03|OUT04|OUT05|OUT06|OUT07|OUT08|OUT09|OUT10|OUT11|
|
78 |
+
|
79 |
+
### Special Values (Random)
|
80 |
+
Basically, a numerical value must be entered to work correctly, but by entering `R` and `U`, a random value will be entered.
|
81 |
+
R : Numerical value with 3 decimal places from 0~1
|
82 |
+
U : 3 decimal places from -1.5 to 1.5
|
83 |
+
|
84 |
+
For example, if ROUT:1,1,1,1,1,1,1,1,R,R,R,R,R,R,R,R,R
|
85 |
+
Only the OUT blocks is randomized.
|
86 |
+
The randomized values will be displayed on the command prompt screen when the image is generated.
|
87 |
+
|
88 |
+
### Special Values (Dynamic)
|
89 |
+
The special value `X` may also be included to use a dynamic weight specified in the LoRA syntax. This is activated by including an additional weight value after the specified `Original Weight`.
|
90 |
+
|
91 |
+
For example, if ROUT:X,1,1,1,1,1,1,1,1,1,1,1,X,X,X,X,X and you had a prompt containing \<lora:my_lore:0.5:ROUT:0.7\>. The `X` weights in ROUT would be replaced with `0.7` at runtime.
|
92 |
+
|
93 |
+
> NOTE: If you select an `Original Weight` tag that has a dynamic weight (`X`) and you do not specify a value in the LoRA syntax, it will default to `1`.
|
94 |
+
|
95 |
+
### Save Presets
|
96 |
+
|
97 |
+
The "Save Presets" button saves the text in the current text box. It is better to use a text editor, so use the "Open TextEditor" button to open a text editor, edit the text, and reload it.
|
98 |
+
The text box above the Weights setting is a list of currently available identifiers, useful for copying and pasting into an XY plot. 17 identifiers are required to appear in the list.
|
99 |
+
|
100 |
+
### Fun Usage
|
101 |
+
Used in conjunction with the XY plot, it is possible to examine the impact of each level of the hierarchy.
|
102 |
+
![xy_grid-0017-4285963917](https://user-images.githubusercontent.com/122196982/215341315-493ce5f9-1d6e-4990-a38c-6937e78c6b46.jpg)
|
103 |
+
|
104 |
+
The setting values are as follows.
|
105 |
+
NOT:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
106 |
+
ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
|
107 |
+
INS:1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
108 |
+
IND:1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0
|
109 |
+
INALL:1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0
|
110 |
+
MIDD:1,0,0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0
|
111 |
+
OUTD:1,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0
|
112 |
+
OUTS:1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1
|
113 |
+
OUTALL:1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1
|
114 |
+
|
115 |
+
## XYZ Plotting Function
|
116 |
+
The optimal value can be searched by changing the value of each layer individually.
|
117 |
+
### Usage
|
118 |
+
Check "Active" to activate the function. If Script (such as XYZ plot in Automatic1111) is enabled, it will take precedence.
|
119 |
+
Hires. fix is not supported. batch size is fixed to 1. batch count should be set to 1.
|
120 |
+
Enter XYZ as the identifier of the LoRA that you want to change. It will work even if you do not enter a value corresponding to XYZ in the preset. If a value corresponding to XYZ is entered, that value will be used as the initial value.
|
121 |
+
Inputting ZYX, inverted value will be automatically inputted.
|
122 |
+
This feature enables to match weights of two LoRAs.
|
123 |
+
Inputing XYZ for LoRA1 and ZYX for LoRA2, you get,
|
124 |
+
LoRA1 1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0
|
125 |
+
LoRA2 0,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1
|
126 |
+
### Axis type
|
127 |
+
#### values
|
128 |
+
Sets the weight of the hierarchy to be changed. Enter the values separated by commas. 0,0.25,0.5,0.75,1", etc.
|
129 |
+
|
130 |
+
#### Block ID
|
131 |
+
If a block ID is entered, only that block will change to the value specified by value. As with the other types, use commas to separate them. Multiple blocks can be changed at the same time by separating them with a space or hyphen. The initial NOT will invert the change, so NOT IN09-OUT02 will change all blocks except IN09-OUT02.
|
132 |
+
|
133 |
+
#### seed
|
134 |
+
Seed changes, and is intended to be specified on the Z-axis.
|
135 |
+
|
136 |
+
#### Original Weights
|
137 |
+
Specify the initial value to change the weight of each block. If Original Weight is enabled, the value entered for XYZ is ignored.
|
138 |
+
|
139 |
+
### Input example
|
140 |
+
X : value, value : 1,0.25,0.5,0.75,1
|
141 |
+
Y : Block ID, value : BASE,IN01-IN08,IN05-OUT05,OUT03-OUT11,NOT OUT03-OUT11
|
142 |
+
Z : Original Weights, Value : NONE,ALL0.5,ALL
|
143 |
+
|
144 |
+
In this case, an XY plot is created corresponding to the initial values NONE,ALL0.5,ALL.
|
145 |
+
If you select Seed for Z and enter -1,-1,-1, the XY plot will be created 3 times with different seeds.
|
146 |
+
|
147 |
+
### Effective Block Analyzer
|
148 |
+
This function check which layers are working well. The effect of the block is visualized and quantified by setting the intensity of the other bocks to 1, decreasing the intensity of the block you want to examine, and taking the difference.
|
149 |
+
#### Range
|
150 |
+
If you enter 0.5, 1, all initial values are set to 1, and only the target block is calculated as 0.5. Normally, 0.5 will make a difference, but some LoRAs may have difficulty making a difference, in which case, set 0.5 to 0 or a negative value.
|
151 |
+
|
152 |
+
#### settings
|
153 |
+
##### diff color
|
154 |
+
Specify the background color of the diff file.
|
155 |
+
|
156 |
+
##### chnage X-Y
|
157 |
+
Swaps the X and Y axes. By default, Block is assigned to the Y axis.
|
158 |
+
|
159 |
+
##### Threshold
|
160 |
+
Sets the threshold at which a change is recognized when calculating the difference. Basically, the default value is fine, but if you want to detect subtle differences in color, etc., lower the value.
|
161 |
+
|
162 |
+
#### Blocks
|
163 |
+
Enter the blocks to be examined, using the same format as for XYZ plots.
|
164 |
+
|
165 |
+
For more information on block-by-block merging, see
|
166 |
+
|
167 |
+
https://github.com/bbc-mc/sdweb-merge-block-weighted-gui
|
168 |
+
|
169 |
+
# 概要
|
170 |
+
Loraは強力なツールですが、時に扱いが難しく、影響してほしくないところにまで影響がでたりします。このスクリプトではLoraを適用する際、適用度合いをU-Netの階層ごとに設定することができます。これを使用することで求める画像に近づけることができるかもしれません。
|
171 |
+
|
172 |
+
## 使い方
|
173 |
+
scriptフォルダにlora_block_weightを置いてください。 インストール時はWeb-ui.batを再起動をしてください。
|
174 |
+
|
175 |
+
### Active
|
176 |
+
ここにチェックを入れることで動作します。
|
177 |
+
|
178 |
+
### プロンプト
|
179 |
+
プロンプト画面では通常通り使用したいLoraを記入してください。その際、強さの値の次に「:」を入力しウェイトか識別子を入力します。識別子はWeights setting で編集します。
|
180 |
+
```
|
181 |
+
<lora:"lora name":1:0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0>.
|
182 |
+
<lora:"lora name":1:0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0>. (a1111-sd-webui-locon, etc.)
|
183 |
+
<lyco:"lora name":1:1:lbw=IN02> (a1111-sd-webui-lycoris)
|
184 |
+
<lyco:"lora name":1:1:lbw=1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0> (a1111-sd-webui-lycoris)
|
185 |
+
```
|
186 |
+
Loraの強さは有効で、階層全体にかかります。大文字と小文字は区別されます。
|
187 |
+
LyCORISに対してLoRAのプリセットも使用できますが、その場合LoRAで使われていない階層のウェイトは1に設定されます。
|
188 |
+
上記の形式になっていない場合プリセットではコメント行として扱われます。
|
189 |
+
a1111-sd-webui-lycoris版のLyCORISを使用する場合構文が異なります。`lbw=IN02`を使って下さい。順番は問いません。その他の書式はlycorisの書式にしたがって下さい。詳しくはLyCORISのドキュメントを参照して下さい。識別子を入力して下さい。a1111-sd-webui-lycoris版は開発途中のためこの構文は変更される可能性があります。
|
190 |
+
|
191 |
+
### Weights setting
|
192 |
+
識別子とウェイトを入力します。
|
193 |
+
フルモデルと異なり、Loraではエンコーダーを含め17のブロックに分かれています。よって、17個の数値を入力してください。
|
194 |
+
BASE,IN,OUTなどはフルモデル相当の階層です。
|
195 |
+
|
196 |
+
|
197 |
+
|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|
|
198 |
+
|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
|
199 |
+
|BASE|IN01|IN02|IN04|IN05|IN07|IN08|MID|OUT03|OUT04|OUT05|OUT06|OUT07|OUT08|OUT09|OUT10|OUT11|
|
200 |
+
|
201 |
+
LyCORISなどの場合
|
202 |
+
|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20|21|22|23|24|25|26|
|
203 |
+
|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|
|
204 |
+
|BASE|IN00|IN01|IN02|IN03|IN04|IN05|IN06|IN07|IN08|IN09|IN10|IN11|MID|OUT00|OUT01|OUT02|OUT03|OUT04|OUT05|OUT06|OUT07|OUT08|OUT09|OUT10|OUT11|
|
205 |
+
|
206 |
+
### 特別な値
|
207 |
+
基本的には数値を入れないと正しく動きませんが R および U を入力することでランダムな数値が入力されます。
|
208 |
+
R : 0~1までの小数点3桁の数値
|
209 |
+
U : -1.5~1.5までの小数点3桁の数値
|
210 |
+
|
211 |
+
例えば ROUT:1,1,1,1,1,1,1,1,R,R,R,R,R,R,R,R,R とすると
|
212 |
+
OUT層のみダンダム化されます
|
213 |
+
ランダム化された数値は画像生成時にコマンドプロンプト画面に表示されます
|
214 |
+
|
215 |
+
saveボタンで現在のテキストボックスのテキストを保存できます。テキストエディタを使った方がいいので、open Texteditorボタンでテキストエディタ開き、編集後reloadしてください。
|
216 |
+
Weights settingの上にあるテキストボックスは現在使用できる識別子の一覧です。XYプロットにコピペするのに便利です。17個ないと一覧に表示されません。
|
217 |
+
|
218 |
+
### 楽しい使い方
|
219 |
+
XY plotと併用することで各階層の影響を調べることが可能になります。
|
220 |
+
![xy_grid-0017-4285963917](https://user-images.githubusercontent.com/122196982/215341315-493ce5f9-1d6e-4990-a38c-6937e78c6b46.jpg)
|
221 |
+
|
222 |
+
設定値は以下の通りです。
|
223 |
+
NOT:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
224 |
+
ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
|
225 |
+
INS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0
|
226 |
+
IND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0
|
227 |
+
INALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0
|
228 |
+
MIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0
|
229 |
+
OUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0
|
230 |
+
OUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1
|
231 |
+
OUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1
|
232 |
+
|
233 |
+
## XYZ プロット機能
|
234 |
+
各層の値を個別に変化させることで最適値を総当たりに探せます。
|
235 |
+
### 使い方
|
236 |
+
Activeをチェックすることで動作します。 Script(Automatic1111本体のXYZプロットなど)が有効になっている場合そちらが優先されます。noneを選択してください。
|
237 |
+
Hires. fixには対応していません。Batch sizeは1に固定されます。Batch countは1に設定してください。
|
238 |
+
変化させたいLoRAの識別子にXYZと入力します\<lora:"lora名":1:XYZ>。 プリセットにXYZに対応する値を入力していなくても動作します。その場合すべてのウェイトが0の状態からスタートします。XYZに対応する値が入力されている場合はその値が初期値になります。
|
239 |
+
ZYXと入力するとXYZとは反対の値が入力されます。これはふたつのLoRAのウェイトを合わせる際に有効です。
|
240 |
+
例えばLoRA1にXYZ,LoRA2にZYXと入力すると、
|
241 |
+
LoRA1 1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0
|
242 |
+
LoRA2 0,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1
|
243 |
+
となります。
|
244 |
+
### 軸タイプ
|
245 |
+
#### values
|
246 |
+
変化させる階層のウェイトを設定します。カンマ区切りで入力してください。「0,0.25,0.5,0.75,1」など。
|
247 |
+
|
248 |
+
#### Block ID
|
249 |
+
ブロックIDを入力すると、そのブロックのみvalueで指定した値に変わります。他のタイプと同様にカンマで区切ります。スペースまたはハイフンで区切ることで複数のブロックを同時に変化させることもできます。最初にNOTをつけることで変化対象が反転します。NOT IN09-OUT02とすると、IN09-OUT02以外が変化します。NOTは最初に入力しないと効果がありません。IN08-M00-OUT03は繋がっています。
|
250 |
+
|
251 |
+
#### Seed
|
252 |
+
シードが変わります。Z軸に指定することを想定��ています。
|
253 |
+
|
254 |
+
#### Original Weights
|
255 |
+
各ブロックのウェイトを変化させる初期値を指定します。プリセットに登録されている識別子を入力してください。Original Weightが有効になっている場合XYZに入力された値は無視されます。
|
256 |
+
|
257 |
+
### 入力例
|
258 |
+
X : value, 値 : 1,0.25,0.5,0.75,1
|
259 |
+
Y : Block ID, 値 : BASE,IN01-IN08,IN05-OUT05,OUT03-OUT11,NOT OUT03-OUT11
|
260 |
+
Z : Original Weights, 値 : NONE,ALL0.5,ALL
|
261 |
+
|
262 |
+
この場合、初期値NONE,ALL0.5,ALLに対応したXY plotが作製されます。
|
263 |
+
ZにSeedを選び、-1,-1,-1を入力すると、異なるseedでXY plotを3回作製します。
|
264 |
+
|
265 |
+
### Effective Block Analyzer
|
266 |
+
どの階層が良く効いているかを判別する機能です。対象の階層以外の強度を1にして、調べたい階層の強度を下げ、差分を取ることで階層の効果を可視化・数値化します。
|
267 |
+
#### Range
|
268 |
+
0.5, 1 と入力した場合、初期値がすべて1になり、対象のブロックのみ0.5として計算が行われます。普通は0.5で差がでますが、LoRAによっては差が出にくい場合があるので、その場合は0.5を0あるいはマイナスの値に設定してください。
|
269 |
+
|
270 |
+
#### 設定
|
271 |
+
##### diff color
|
272 |
+
差分ファイルの背景カラーを指定します。
|
273 |
+
|
274 |
+
##### chnage X-Y
|
275 |
+
X軸とY軸を入れ替えます。デフォルトではY軸にBlockが割り当てられています。
|
276 |
+
|
277 |
+
##### Threshold
|
278 |
+
差分を計算する際の変化したと認識される閾値を設定します。基本的にはデフォルト値で問題ありませんが、微妙な色の違いなどを検出したい場合は値を下げて下さい。
|
279 |
+
|
280 |
+
#### Blocks
|
281 |
+
調べたい階層を入力します。XYZプロットと同じ書式が使用可能です。
|
282 |
+
|
283 |
+
階層別マージについては下記を参照してください
|
284 |
+
|
285 |
+
### elemental
|
286 |
+
詳細は[こちら](https://github.com/hako-mikan/sd-webui-supermerger/blob/main/elemental_ja.md)を参照して下さい。
|
287 |
+
#### 使い方
|
288 |
+
Elementaタブにて階層指定と同じように識別子を設定します。識別子は階層の識別子の後に入力します。
|
289 |
+
\<lora:"lora名":1:IN04:ATTNON>
|
290 |
+
ATTNON:
|
291 |
+
|
292 |
+
書式は
|
293 |
+
識別子:階層指定:要素指定:ウェイト
|
294 |
+
のように指定します。要素は部分一致で判定されます。attn1ならattn1のみ、attnならattn1及びattn2が反応します。階層、要素共に空白で区切ると複数指定できます。
|
295 |
+
print changeをオンにすると反応した要素がコマンドプロンプト上に表示されます。
|
296 |
+
|
297 |
+
ALL0:::0
|
298 |
+
はすべての要素のウェイトをゼロに設定します。
|
299 |
+
IN1:IN00-IN11::1
|
300 |
+
はINのすべての要素を1にします
|
301 |
+
ATTNON::attn:1
|
302 |
+
はすべての階層のattnを1にします。
|
303 |
+
|
304 |
+
#### XYZプロット
|
305 |
+
XYZプロットのelementsの項ではカンマ区切りでXYZプロットが可能になります。
|
306 |
+
その場合は
|
307 |
+
\<lora:"lora名":1:XYZ:XYZ>
|
308 |
+
と指定して下さい。
|
309 |
+
elements
|
310 |
+
の項に
|
311 |
+
IN05-OUT05:attn:0,IN05-OUT05:attn:0.5,IN05-OUT05:attn:1
|
312 |
+
と入力して走らせるとIN05からOUT05までのattnのみを変化させることができます。
|
313 |
+
この際、XYZの値を変更することで初期値を変更できます。デフォルトではelementalのXYZはXYZ:::1となっており、これは全階層、全要素を1にしますが、ここをXYZ:encoder::1とするとテキストエンコーダーのみを有効にした状態で評価ができます。
|
314 |
+
|
315 |
+
https://github.com/bbc-mc/sdweb-merge-block-weighted-gui
|
316 |
+
|
317 |
+
### updates/更新情報
|
318 |
+
2023.02.07 1250(JST)
|
319 |
+
- Changed behavior when XYZ plot Active (Script of the main UI is prioritized).
|
320 |
+
|
321 |
+
2023.02.06 2000(JST)
|
322 |
+
- Feature added: XYZ plotting is added.
|
323 |
+
|
324 |
+
2023.01.31 0200(JST)
|
325 |
+
- Feature added: Random feature is added
|
326 |
+
- Fixed: Weighting now works for negative values.
|
327 |
+
|
328 |
+
2023.02.16 2040(JST)
|
329 |
+
- Original Weight をxやyに設定できない問題を解決しました
|
330 |
+
- Effective Weight Analyzer選択時にXYZのXやYがValuesとBlockIdになっていないとエラーになる問題を解決しました
|
331 |
+
|
332 |
+
2023.02.08 2120(JST)
|
333 |
+
- 階層適応した後通常使用する際、階層適応が残る問題を解決しました
|
334 |
+
- 効果のある階層をワンクリックで判別する機能を追加しました
|
335 |
+
|
336 |
+
2023.02.08 0050(JST)
|
337 |
+
- 一部環境でseedが固定されない問題を解決しました
|
338 |
+
|
339 |
+
2023.02.07 2015(JST)
|
340 |
+
- マイナスのウェイトが正常に働かない問題を修正しました
|
341 |
+
|
342 |
+
2023.02.07 1250(JST)
|
343 |
+
- XYZプロットActive時の動作を変更しました(本体のScriptが優先されるようになります)
|
344 |
+
|
345 |
+
2023.02.06 2000(JST)
|
346 |
+
- 機能追加:XYZプロット機能を追加しました
|
347 |
+
|
348 |
+
2023.01.31 0200(JST)
|
349 |
+
- 機能追加:ランダム機能を追加しました
|
350 |
+
- 機能修正:ウェイトがマイナスにも効くようになりました
|
extensions/sd-webui-lora-block-weight/scripts/Roboto-Regular.ttf
ADDED
Binary file (306 kB). View file
|
|
extensions/sd-webui-lora-block-weight/scripts/__pycache__/lora_block_weight.cpython-310.pyc
ADDED
Binary file (24.7 kB). View file
|
|
extensions/sd-webui-lora-block-weight/scripts/elempresets.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ATTNDEEPON:IN05-OUT05:attn:1
|
2 |
+
|
3 |
+
ATTNDEEPOFF:IN05-OUT05:attn:0
|
4 |
+
|
5 |
+
PROJDEEPOFF:IN05-OUT05:proj:0
|
6 |
+
|
7 |
+
XYZ:::1
|
extensions/sd-webui-lora-block-weight/scripts/lbwpresets.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
2 |
+
ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1
|
3 |
+
INS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0
|
4 |
+
IND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0
|
5 |
+
INALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0
|
6 |
+
MIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0
|
7 |
+
OUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0
|
8 |
+
OUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1
|
9 |
+
OUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1
|
10 |
+
ALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5
|
extensions/sd-webui-lora-block-weight/scripts/lora_block_weight.py
ADDED
@@ -0,0 +1,744 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import os
|
3 |
+
import gc
|
4 |
+
import re
|
5 |
+
import sys
|
6 |
+
import torch
|
7 |
+
import shutil
|
8 |
+
import math
|
9 |
+
import numpy as np
|
10 |
+
import gradio as gr
|
11 |
+
import os.path
|
12 |
+
import random
|
13 |
+
from pprint import pprint
|
14 |
+
import modules.ui
|
15 |
+
import modules.scripts as scripts
|
16 |
+
from PIL import Image, ImageFont, ImageDraw
|
17 |
+
import modules.shared as shared
|
18 |
+
from modules import devices, sd_models, images,extra_networks
|
19 |
+
from modules.shared import opts, state
|
20 |
+
from modules.processing import process_images, Processed
|
21 |
+
|
22 |
+
lxyz = ""
|
23 |
+
lzyx = ""
|
24 |
+
prompts = ""
|
25 |
+
xyelem = ""
|
26 |
+
princ = False
|
27 |
+
|
28 |
+
BLOCKID=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","IN09","IN10","IN11","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
|
29 |
+
|
30 |
+
BLOCKS=["encoder",
|
31 |
+
"diffusion_model_input_blocks_0_",
|
32 |
+
"diffusion_model_input_blocks_1_",
|
33 |
+
"diffusion_model_input_blocks_2_",
|
34 |
+
"diffusion_model_input_blocks_3_",
|
35 |
+
"diffusion_model_input_blocks_4_",
|
36 |
+
"diffusion_model_input_blocks_5_",
|
37 |
+
"diffusion_model_input_blocks_6_",
|
38 |
+
"diffusion_model_input_blocks_7_",
|
39 |
+
"diffusion_model_input_blocks_8_",
|
40 |
+
"diffusion_model_input_blocks_9_",
|
41 |
+
"diffusion_model_input_blocks_10_",
|
42 |
+
"diffusion_model_input_blocks_11_",
|
43 |
+
"diffusion_model_middle_block_",
|
44 |
+
"diffusion_model_output_blocks_0_",
|
45 |
+
"diffusion_model_output_blocks_1_",
|
46 |
+
"diffusion_model_output_blocks_2_",
|
47 |
+
"diffusion_model_output_blocks_3_",
|
48 |
+
"diffusion_model_output_blocks_4_",
|
49 |
+
"diffusion_model_output_blocks_5_",
|
50 |
+
"diffusion_model_output_blocks_6_",
|
51 |
+
"diffusion_model_output_blocks_7_",
|
52 |
+
"diffusion_model_output_blocks_8_",
|
53 |
+
"diffusion_model_output_blocks_9_",
|
54 |
+
"diffusion_model_output_blocks_10_",
|
55 |
+
"diffusion_model_output_blocks_11_"]
|
56 |
+
|
57 |
+
loopstopper = True
|
58 |
+
|
59 |
+
ATYPES =["none","Block ID","values","seed","Original Weights","elements"]
|
60 |
+
|
61 |
+
DEF_WEIGHT_PRESET = "\
|
62 |
+
NONE:0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0\n\
|
63 |
+
ALL:1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1\n\
|
64 |
+
INS:1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0\n\
|
65 |
+
IND:1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0\n\
|
66 |
+
INALL:1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0\n\
|
67 |
+
MIDD:1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0\n\
|
68 |
+
OUTD:1,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0\n\
|
69 |
+
OUTS:1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1\n\
|
70 |
+
OUTALL:1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1\n\
|
71 |
+
ALL0.5:0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5"
|
72 |
+
|
73 |
+
class Script(modules.scripts.Script):
|
74 |
+
def title(self):
|
75 |
+
return "LoRA Block Weight"
|
76 |
+
|
77 |
+
def show(self, is_img2img):
|
78 |
+
return modules.scripts.AlwaysVisible
|
79 |
+
|
80 |
+
def ui(self, is_img2img):
|
81 |
+
import lora
|
82 |
+
LWEIGHTSPRESETS = DEF_WEIGHT_PRESET
|
83 |
+
|
84 |
+
runorigin = scripts.scripts_txt2img.run
|
85 |
+
runorigini = scripts.scripts_img2img.run
|
86 |
+
|
87 |
+
path_root = scripts.basedir()
|
88 |
+
extpath = os.path.join(path_root,"extensions","sd-webui-lora-block-weight","scripts", "lbwpresets.txt")
|
89 |
+
extpathe = os.path.join(path_root,"extensions","sd-webui-lora-block-weight","scripts", "elempresets.txt")
|
90 |
+
filepath = os.path.join(path_root,"scripts", "lbwpresets.txt")
|
91 |
+
filepathe = os.path.join(path_root,"scripts", "elempresets.txt")
|
92 |
+
|
93 |
+
if os.path.isfile(filepath) and not os.path.isfile(extpath):
|
94 |
+
shutil.move(filepath,extpath)
|
95 |
+
|
96 |
+
if os.path.isfile(filepathe) and not os.path.isfile(extpathe):
|
97 |
+
shutil.move(filepathe,extpathe)
|
98 |
+
|
99 |
+
lbwpresets=""
|
100 |
+
|
101 |
+
try:
|
102 |
+
with open(extpath,encoding="utf-8") as f:
|
103 |
+
lbwpresets = f.read()
|
104 |
+
except OSError as e:
|
105 |
+
lbwpresets=LWEIGHTSPRESETS
|
106 |
+
if not os.path.isfile(extpath):
|
107 |
+
try:
|
108 |
+
with open(extpath,mode = 'w',encoding="utf-8") as f:
|
109 |
+
f.write(lbwpresets)
|
110 |
+
except:
|
111 |
+
pass
|
112 |
+
|
113 |
+
try:
|
114 |
+
with open(extpathe,encoding="utf-8") as f:
|
115 |
+
elempresets = f.read()
|
116 |
+
except OSError as e:
|
117 |
+
elempresets=ELEMPRESETS
|
118 |
+
if not os.path.isfile(extpathe):
|
119 |
+
try:
|
120 |
+
with open(extpathe,mode = 'w',encoding="utf-8") as f:
|
121 |
+
f.write(elempresets)
|
122 |
+
except:
|
123 |
+
pass
|
124 |
+
|
125 |
+
loraratios=lbwpresets.splitlines()
|
126 |
+
lratios={}
|
127 |
+
for i,l in enumerate(loraratios):
|
128 |
+
if ":" not in l or not (l.count(",") == 16 or l.count(",") == 25) : continue
|
129 |
+
lratios[l.split(":")[0]]=l.split(":")[1]
|
130 |
+
ratiostags = [k for k in lratios.keys()]
|
131 |
+
ratiostags = ",".join(ratiostags)
|
132 |
+
|
133 |
+
with gr.Accordion("LoRA Block Weight",open = False):
|
134 |
+
with gr.Row():
|
135 |
+
with gr.Column(min_width = 50, scale=1):
|
136 |
+
lbw_useblocks = gr.Checkbox(value = True,label="Active",interactive =True,elem_id="lbw_active")
|
137 |
+
with gr.Column(scale=5):
|
138 |
+
bw_ratiotags= gr.TextArea(label="",value=ratiostags,visible =True,interactive =True,elem_id="lbw_ratios")
|
139 |
+
with gr.Accordion("XYZ plot",open = False):
|
140 |
+
gr.HTML(value='<p style= "word-wrap:break-word;">changeable blocks : BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11</p>')
|
141 |
+
xyzsetting = gr.Radio(label = "Active",choices = ["Disable","XYZ plot","Effective Block Analyzer"], value ="Disable",type = "index")
|
142 |
+
with gr.Row(visible = False) as esets:
|
143 |
+
diffcol = gr.Radio(label = "diff image color",choices = ["black","white"], value ="black",type = "value",interactive =True)
|
144 |
+
revxy = gr.Checkbox(value = False,label="change X-Y",interactive =True,elem_id="lbw_changexy")
|
145 |
+
thresh = gr.Textbox(label="difference threshold",lines=1,value="20",interactive =True,elem_id="diff_thr")
|
146 |
+
xtype = gr.Dropdown(label="X Types ", choices=[x for x in ATYPES], value=ATYPES [2],interactive =True,elem_id="lbw_xtype")
|
147 |
+
xmen = gr.Textbox(label="X Values ",lines=1,value="0,0.25,0.5,0.75,1",interactive =True,elem_id="lbw_xmen")
|
148 |
+
ytype = gr.Dropdown(label="Y Types ", choices=[y for y in ATYPES], value=ATYPES [1],interactive =True,elem_id="lbw_ytype")
|
149 |
+
ymen = gr.Textbox(label="Y Values " ,lines=1,value="IN05-OUT05",interactive =True,elem_id="lbw_ymen")
|
150 |
+
ztype = gr.Dropdown(label="Z type ", choices=[z for z in ATYPES], value=ATYPES[0],interactive =True,elem_id="lbw_ztype")
|
151 |
+
zmen = gr.Textbox(label="Z values ",lines=1,value="",interactive =True,elem_id="lbw_zmen")
|
152 |
+
|
153 |
+
exmen = gr.Textbox(label="Range",lines=1,value="0.5,1",interactive =True,elem_id="lbw_exmen",visible = False)
|
154 |
+
eymen = gr.Textbox(label="Blocks" ,lines=1,value="BASE,IN00,IN01,IN02,IN03,IN04,IN05,IN06,IN07,IN08,IN09,IN10,IN11,M00,OUT00,OUT01,OUT02,OUT03,OUT04,OUT05,OUT06,OUT07,OUT08,OUT09,OUT10,OUT11",interactive =True,elem_id="lbw_eymen",visible = False)
|
155 |
+
ecount = gr.Number(value=1, label="number of seed", interactive=True, visible = True)
|
156 |
+
|
157 |
+
with gr.Accordion("Weights setting",open = True):
|
158 |
+
with gr.Row():
|
159 |
+
reloadtext = gr.Button(value="Reload Presets",variant='primary',elem_id="lbw_reload")
|
160 |
+
reloadtags = gr.Button(value="Reload Tags",variant='primary',elem_id="lbw_reload")
|
161 |
+
savetext = gr.Button(value="Save Presets",variant='primary',elem_id="lbw_savetext")
|
162 |
+
openeditor = gr.Button(value="Open TextEditor",variant='primary',elem_id="lbw_openeditor")
|
163 |
+
lbw_loraratios = gr.TextArea(label="",value=lbwpresets,visible =True,interactive = True,elem_id="lbw_ratiospreset")
|
164 |
+
|
165 |
+
with gr.Accordion("Elemental",open = False):
|
166 |
+
with gr.Row():
|
167 |
+
e_reloadtext = gr.Button(value="Reload Presets",variant='primary',elem_id="lbw_reload")
|
168 |
+
e_savetext = gr.Button(value="Save Presets",variant='primary',elem_id="lbw_savetext")
|
169 |
+
e_openeditor = gr.Button(value="Open TextEditor",variant='primary',elem_id="lbw_openeditor")
|
170 |
+
elemsets = gr.Checkbox(value = False,label="print change",interactive =True,elem_id="lbw_print_change")
|
171 |
+
elemental = gr.TextArea(label="Identifer:BlockID:Elements:Ratio,...,separated by empty line ",value = elempresets,interactive =True,elem_id="element")
|
172 |
+
|
173 |
+
d_true = gr.Checkbox(value = True,visible = False)
|
174 |
+
d_false = gr.Checkbox(value = False,visible = False)
|
175 |
+
|
176 |
+
import subprocess
|
177 |
+
def openeditors(b):
|
178 |
+
path = extpath if b else extpathe
|
179 |
+
subprocess.Popen(['start', path], shell=True)
|
180 |
+
|
181 |
+
def reloadpresets(isweight):
|
182 |
+
if isweight:
|
183 |
+
try:
|
184 |
+
with open(extpath,encoding="utf-8") as f:
|
185 |
+
return f.read()
|
186 |
+
except OSError as e:
|
187 |
+
pass
|
188 |
+
else:
|
189 |
+
try:
|
190 |
+
with open(extpath,encoding="utf-8") as f:
|
191 |
+
return f.read()
|
192 |
+
except OSError as e:
|
193 |
+
pass
|
194 |
+
|
195 |
+
def tagdicter(presets):
|
196 |
+
presets=presets.splitlines()
|
197 |
+
wdict={}
|
198 |
+
for l in presets:
|
199 |
+
if ":" not in l or not (l.count(",") == 16 or l.count(",") == 25) : continue
|
200 |
+
w=[]
|
201 |
+
if ":" in l :
|
202 |
+
key = l.split(":",1)[0]
|
203 |
+
w = l.split(":",1)[1]
|
204 |
+
if len([w for w in w.split(",")]) == 17 or len([w for w in w.split(",")]) ==26:
|
205 |
+
wdict[key.strip()]=w
|
206 |
+
return ",".join(list(wdict.keys()))
|
207 |
+
|
208 |
+
def savepresets(text,isweight):
|
209 |
+
if isweight:
|
210 |
+
with open(extpath,mode = 'w',encoding="utf-8") as f:
|
211 |
+
f.write(text)
|
212 |
+
else:
|
213 |
+
with open(extpathe,mode = 'w',encoding="utf-8") as f:
|
214 |
+
f.write(text)
|
215 |
+
|
216 |
+
reloadtext.click(fn=reloadpresets,inputs=[d_true],outputs=[lbw_loraratios])
|
217 |
+
reloadtags.click(fn=tagdicter,inputs=[lbw_loraratios],outputs=[bw_ratiotags])
|
218 |
+
savetext.click(fn=savepresets,inputs=[lbw_loraratios,d_true],outputs=[])
|
219 |
+
openeditor.click(fn=openeditors,inputs=[d_true],outputs=[])
|
220 |
+
|
221 |
+
e_reloadtext.click(fn=reloadpresets,inputs=[d_false],outputs=[elemental])
|
222 |
+
e_savetext.click(fn=savepresets,inputs=[elemental,d_false],outputs=[])
|
223 |
+
e_openeditor.click(fn=openeditors,inputs=[d_false],outputs=[])
|
224 |
+
|
225 |
+
def urawaza(active):
|
226 |
+
if active > 0:
|
227 |
+
for obj in scripts.scripts_txt2img.alwayson_scripts:
|
228 |
+
if "lora_block_weight" in obj.filename:
|
229 |
+
scripts.scripts_txt2img.selectable_scripts.append(obj)
|
230 |
+
scripts.scripts_txt2img.titles.append("LoRA Block Weight")
|
231 |
+
for obj in scripts.scripts_img2img.alwayson_scripts:
|
232 |
+
if "lora_block_weight" in obj.filename:
|
233 |
+
scripts.scripts_img2img.selectable_scripts.append(obj)
|
234 |
+
scripts.scripts_img2img.titles.append("LoRA Block Weight")
|
235 |
+
scripts.scripts_txt2img.run = newrun
|
236 |
+
scripts.scripts_img2img.run = newrun
|
237 |
+
if active == 1:return [*[gr.update(visible = True) for x in range(6)],*[gr.update(visible = False) for x in range(4)]]
|
238 |
+
else:return [*[gr.update(visible = False) for x in range(6)],*[gr.update(visible = True) for x in range(4)]]
|
239 |
+
else:
|
240 |
+
scripts.scripts_txt2img.run = runorigin
|
241 |
+
scripts.scripts_img2img.run = runorigini
|
242 |
+
return [*[gr.update(visible = True) for x in range(6)],*[gr.update(visible = False) for x in range(4)]]
|
243 |
+
|
244 |
+
xyzsetting.change(fn=urawaza,inputs=[xyzsetting],outputs =[xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,esets])
|
245 |
+
|
246 |
+
return lbw_loraratios,lbw_useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets
|
247 |
+
|
248 |
+
def process(self, p, loraratios,useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets):
|
249 |
+
#print("self =",self,"p =",p,"presets =",loraratios,"useblocks =",useblocks,"xyzsettings =",xyzsetting,"xtype =",xtype,"xmen =",xmen,"ytype =",ytype,"ymen =",ymen,"ztype =",ztype,"zmen =",zmen)
|
250 |
+
#Note that this does not use the default arg syntax because the default args are supposed to be at the end of the function
|
251 |
+
if(loraratios == None):
|
252 |
+
loraratios = DEF_WEIGHT_PRESET
|
253 |
+
if(useblocks == None):
|
254 |
+
useblocks = True
|
255 |
+
|
256 |
+
if useblocks:
|
257 |
+
loraratios=loraratios.splitlines()
|
258 |
+
elemental = elemental.split("\n\n") if elemental is not None else []
|
259 |
+
lratios={}
|
260 |
+
elementals={}
|
261 |
+
for l in loraratios:
|
262 |
+
if ":" not in l or not (l.count(",") == 16 or l.count(",") == 25) : continue
|
263 |
+
l0=l.split(":",1)[0]
|
264 |
+
lratios[l0.strip()]=l.split(":",1)[1]
|
265 |
+
for e in elemental:
|
266 |
+
e0=e.split(":",1)[0]
|
267 |
+
elementals[e0.strip()]=e.split(":",1)[1]
|
268 |
+
if elemsets : print(xyelem)
|
269 |
+
if xyzsetting and "XYZ" in p.prompt:
|
270 |
+
lratios["XYZ"] = lxyz
|
271 |
+
lratios["ZYX"] = lzyx
|
272 |
+
if xyelem != "":
|
273 |
+
if "XYZ" in elementals.keys():
|
274 |
+
elementals["XYZ"] = elementals["XYZ"] + ","+ xyelem
|
275 |
+
else:
|
276 |
+
elementals["XYZ"] = xyelem
|
277 |
+
self.lratios = lratios
|
278 |
+
self.elementals = elementals
|
279 |
+
global princ
|
280 |
+
princ = elemsets
|
281 |
+
return
|
282 |
+
|
283 |
+
def before_process_batch(self, p, loraratios,useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets,**kwargs):
|
284 |
+
if useblocks:
|
285 |
+
global prompts
|
286 |
+
prompts = kwargs["prompts"].copy()
|
287 |
+
|
288 |
+
def process_batch(self, p, loraratios,useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets,**kwargs):
|
289 |
+
if useblocks:
|
290 |
+
o_prompts = [p.prompt]
|
291 |
+
for prompt in prompts:
|
292 |
+
if "<lora" in prompt or "<lyco" in prompt:
|
293 |
+
o_prompts = prompts.copy()
|
294 |
+
loradealer(o_prompts ,self.lratios,self.elementals)
|
295 |
+
|
296 |
+
def postprocess(self, p, processed, *args):
|
297 |
+
import lora
|
298 |
+
lora.loaded_loras.clear()
|
299 |
+
global lxyz,lzyx,xyelem
|
300 |
+
lxyz = lzyx = xyelem = ""
|
301 |
+
gc.collect()
|
302 |
+
|
303 |
+
def run(self,p,presets,useblocks,xyzsetting,xtype,xmen,ytype,ymen,ztype,zmen,exmen,eymen,ecount,diffcol,thresh,revxy,elemental,elemsets):
|
304 |
+
if xyzsetting >0:
|
305 |
+
import lora
|
306 |
+
loraratios=presets.splitlines()
|
307 |
+
lratios={}
|
308 |
+
for l in loraratios:
|
309 |
+
if ":" not in l or not (l.count(",") == 16 or l.count(",") == 25) : continue
|
310 |
+
l0=l.split(":",1)[0]
|
311 |
+
lratios[l0.strip()]=l.split(":",1)[1]
|
312 |
+
|
313 |
+
if "XYZ" in p.prompt:
|
314 |
+
base = lratios["XYZ"] if "XYZ" in lratios.keys() else "1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1"
|
315 |
+
else: return
|
316 |
+
|
317 |
+
if xyzsetting > 1:
|
318 |
+
xmen,ymen = exmen,eymen
|
319 |
+
xtype,ytype = "values","ID"
|
320 |
+
ebase = xmen.split(",")[1]
|
321 |
+
ebase = [ebase.strip()]*26
|
322 |
+
base = ",".join(ebase)
|
323 |
+
ztype = ""
|
324 |
+
if ecount > 1:
|
325 |
+
ztype = "seed"
|
326 |
+
zmen = ",".join([str(random.randrange(4294967294)) for x in range(int(ecount))])
|
327 |
+
|
328 |
+
#ATYPES =["none","Block ID","values","seed","Base Weights"]
|
329 |
+
|
330 |
+
def dicedealer(am):
|
331 |
+
for i,a in enumerate(am):
|
332 |
+
if a =="-1": am[i] = str(random.randrange(4294967294))
|
333 |
+
print(f"the die was thrown : {am}")
|
334 |
+
|
335 |
+
if p.seed == -1: p.seed = str(random.randrange(4294967294))
|
336 |
+
|
337 |
+
#print(f"xs:{xmen},ys:{ymen},zs:{zmen}")
|
338 |
+
|
339 |
+
def adjuster(a,at):
|
340 |
+
if "none" in at:a = ""
|
341 |
+
a = [a.strip() for a in a.split(',')]
|
342 |
+
if "seed" in at:dicedealer(a)
|
343 |
+
return a
|
344 |
+
|
345 |
+
xs = adjuster(xmen,xtype)
|
346 |
+
ys = adjuster(ymen,ytype)
|
347 |
+
zs = adjuster(zmen,ztype)
|
348 |
+
|
349 |
+
ids = alpha =seed = ""
|
350 |
+
p.batch_size = 1
|
351 |
+
|
352 |
+
print(f"xs:{xs},ys:{ys},zs:{zs}")
|
353 |
+
|
354 |
+
images = []
|
355 |
+
|
356 |
+
def weightsdealer(alpha,ids,base):
|
357 |
+
blockid17=["BASE","IN01","IN02","IN04","IN05","IN07","IN08","M00","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
|
358 |
+
blockid26=["BASE","IN00","IN01","IN02","IN03","IN04","IN05","IN06","IN07","IN08","IN09","IN10","IN11","M00","OUT00","OUT01","OUT02","OUT03","OUT04","OUT05","OUT06","OUT07","OUT08","OUT09","OUT10","OUT11"]
|
359 |
+
#print(f"weights from : {base}")
|
360 |
+
ids = [z.strip() for z in ids.split(' ')]
|
361 |
+
weights_t = [w.strip() for w in base.split(',')]
|
362 |
+
blockid = blockid17 if len(weights_t) ==17 else blockid26
|
363 |
+
if ids[0]!="NOT":
|
364 |
+
flagger=[False]*len(weights_t)
|
365 |
+
changer = True
|
366 |
+
else:
|
367 |
+
flagger=[True]*len(weights_t)
|
368 |
+
changer = False
|
369 |
+
for id in ids:
|
370 |
+
if id =="NOT":continue
|
371 |
+
if "-" in id:
|
372 |
+
it = [it.strip() for it in id.split('-')]
|
373 |
+
if blockid.index(it[1]) > blockid.index(it[0]):
|
374 |
+
flagger[blockid.index(it[0]):blockid.index(it[1])+1] = [changer]*(blockid.index(it[1])-blockid.index(it[0])+1)
|
375 |
+
else:
|
376 |
+
flagger[blockid.index(it[1]):blockid.index(it[0])+1] = [changer]*(blockid.index(it[0])-blockid.index(it[1])+1)
|
377 |
+
else:
|
378 |
+
flagger[blockid.index(id)] =changer
|
379 |
+
for i,f in enumerate(flagger):
|
380 |
+
if f:weights_t[i]=alpha
|
381 |
+
outext = ",".join(weights_t)
|
382 |
+
#print(f"weights changed: {outext}")
|
383 |
+
return outext
|
384 |
+
|
385 |
+
def xyzdealer(a,at):
|
386 |
+
nonlocal ids,alpha,p,base,c_base
|
387 |
+
if "ID" in at:return
|
388 |
+
if "values" in at:alpha = a
|
389 |
+
if "seed" in at:
|
390 |
+
p.seed = int(a)
|
391 |
+
if "Weights" in at:base =c_base = lratios[a]
|
392 |
+
if "elements" in at:
|
393 |
+
global xyelem
|
394 |
+
xyelem = a
|
395 |
+
|
396 |
+
grids = []
|
397 |
+
images =[]
|
398 |
+
|
399 |
+
totalcount = len(xs)*len(ys)*len(zs) if xyzsetting < 2 else len(xs)*len(ys)*len(zs) //2 +1
|
400 |
+
shared.total_tqdm.updateTotal(totalcount)
|
401 |
+
xc = yc =zc = 0
|
402 |
+
state.job_count = totalcount
|
403 |
+
totalcount = len(xs)*len(ys)*len(zs)
|
404 |
+
c_base = base
|
405 |
+
|
406 |
+
for z in zs:
|
407 |
+
images = []
|
408 |
+
yc = 0
|
409 |
+
xyzdealer(z,ztype)
|
410 |
+
for y in ys:
|
411 |
+
xc = 0
|
412 |
+
xyzdealer(y,ytype)
|
413 |
+
for x in xs:
|
414 |
+
xyzdealer(x,xtype)
|
415 |
+
if "ID" in xtype:
|
416 |
+
if "values" in ytype:c_base = weightsdealer(y,x,base)
|
417 |
+
if "values" in ztype:c_base = weightsdealer(z,x,base)
|
418 |
+
if "ID" in ytype:
|
419 |
+
if "values" in xtype:c_base = weightsdealer(x,y,base)
|
420 |
+
if "values" in ztype:c_base = weightsdealer(z,y,base)
|
421 |
+
if "ID" in ztype:
|
422 |
+
if "values" in xtype:c_base = weightsdealer(x,z,base)
|
423 |
+
if "values" in ytype:c_base = weightsdealer(y,z,base)
|
424 |
+
|
425 |
+
print(f"X:{xtype}, {x},Y: {ytype},{y}, Z:{ztype},{z}, base:{c_base} ({len(xs)*len(ys)*zc + yc*len(xs) +xc +1}/{totalcount})")
|
426 |
+
|
427 |
+
global lxyz,lzyx
|
428 |
+
lxyz = c_base
|
429 |
+
|
430 |
+
cr_base = c_base.split(",")
|
431 |
+
cr_base_t=[]
|
432 |
+
for x in cr_base:
|
433 |
+
if not identifier(x):
|
434 |
+
cr_base_t.append(str(1-float(x)))
|
435 |
+
else:
|
436 |
+
cr_base_t.append(x)
|
437 |
+
lzyx = ",".join(cr_base_t)
|
438 |
+
|
439 |
+
if not(xc == 1 and not (yc ==0 ) and xyzsetting >1):
|
440 |
+
lora.loaded_loras.clear()
|
441 |
+
processed:Processed = process_images(p)
|
442 |
+
images.append(processed.images[0])
|
443 |
+
xc += 1
|
444 |
+
yc += 1
|
445 |
+
zc += 1
|
446 |
+
origin = loranames(processed.all_prompts) + ", "+ znamer(ztype,z,base)
|
447 |
+
images,xst,yst = effectivechecker(images,xs.copy(),ys.copy(),diffcol,thresh,revxy) if xyzsetting >1 else (images,xs.copy(),ys.copy())
|
448 |
+
grids.append(smakegrid(images,xst,yst,origin,p))
|
449 |
+
processed.images= grids
|
450 |
+
lora.loaded_loras.clear()
|
451 |
+
return processed
|
452 |
+
|
453 |
+
def identifier(char):
|
454 |
+
return char[0] in ["R", "U", "X"]
|
455 |
+
|
456 |
+
def znamer(at,a,base):
|
457 |
+
if "ID" in at:return f"Block : {a}"
|
458 |
+
if "values" in at:return f"value : {a}"
|
459 |
+
if "seed" in at:return f"seed : {a}"
|
460 |
+
if "Weights" in at:return f"original weights :\n {base}"
|
461 |
+
else: return ""
|
462 |
+
|
463 |
+
def loranames(all_prompts):
|
464 |
+
_, extra_network_data = extra_networks.parse_prompts(all_prompts[0:1])
|
465 |
+
calledloras = extra_network_data["lora"] if "lyco" not in extra_network_data.keys() else extra_network_data["lyco"]
|
466 |
+
names = ""
|
467 |
+
for called in calledloras:
|
468 |
+
if len(called.items) <3:continue
|
469 |
+
names += called.items[0]
|
470 |
+
return names
|
471 |
+
|
472 |
+
def lycodealer(called):
|
473 |
+
for item in called.items[1:]:
|
474 |
+
if "lbw" in item:
|
475 |
+
called.items[2] = item.split("=")[1]
|
476 |
+
return called
|
477 |
+
|
478 |
+
def loradealer(prompts,lratios,elementals):
|
479 |
+
_, extra_network_data = extra_networks.parse_prompts(prompts)
|
480 |
+
moduletypes = extra_network_data.keys()
|
481 |
+
|
482 |
+
for ltype in moduletypes:
|
483 |
+
lorans = []
|
484 |
+
lorars = []
|
485 |
+
multipliers = []
|
486 |
+
elements = []
|
487 |
+
if not (ltype == "lora" or ltype == "lyco") : continue
|
488 |
+
for called in extra_network_data[ltype]:
|
489 |
+
if ltype == "lyco":
|
490 |
+
called = lycodealer(called)
|
491 |
+
multiple = float(called.items[1])
|
492 |
+
multipliers.append(multiple)
|
493 |
+
if len(called.items) <3:
|
494 |
+
continue
|
495 |
+
lorans.append(called.items[0])
|
496 |
+
if called.items[2] in lratios or called.items[2].count(",") ==16 or called.items[2].count(",") ==25:
|
497 |
+
wei = lratios[called.items[2]] if called.items[2] in lratios else called.items[2]
|
498 |
+
ratios = [w.strip() for w in wei.split(",")]
|
499 |
+
for i,r in enumerate(ratios):
|
500 |
+
if r =="R":
|
501 |
+
ratios[i] = round(random.random(),3)
|
502 |
+
elif r == "U":
|
503 |
+
ratios[i] = round(random.uniform(-0.5,1.5),3)
|
504 |
+
elif r[0] == "X":
|
505 |
+
base = called.items[3] if len(called.items) >= 4 else 1
|
506 |
+
ratios[i] = getinheritedweight(base, r)
|
507 |
+
else:
|
508 |
+
ratios[i] = float(r)
|
509 |
+
print(f"LoRA Block weight ({ltype}): {called.items[0]}: {multiple} x {[x for x in ratios]}")
|
510 |
+
if len(ratios)==17:
|
511 |
+
ratios = [ratios[0]] + [1] + ratios[1:3]+ [1] + ratios[3:5]+[1] + ratios[5:7]+[1,1,1] + [ratios[7]] + [1,1,1] + ratios[8:]
|
512 |
+
lorars.append(ratios)
|
513 |
+
if len(called.items) > 3:
|
514 |
+
if called.items[3] in elementals:
|
515 |
+
elements.append(elementals[called.items[3]])
|
516 |
+
else:
|
517 |
+
elements.append(called.items[3])
|
518 |
+
else:
|
519 |
+
elements.append("")
|
520 |
+
if len(lorars) > 0: load_loras_blocks(lorans,lorars,multipliers,elements,ltype)
|
521 |
+
|
522 |
+
def isfloat(t):
|
523 |
+
try:
|
524 |
+
float(t)
|
525 |
+
return True
|
526 |
+
except:
|
527 |
+
return False
|
528 |
+
|
529 |
+
re_inherited_weight = re.compile(r"X([+-])?([\d.]+)?")
|
530 |
+
|
531 |
+
def getinheritedweight(weight, offset):
|
532 |
+
match = re_inherited_weight.search(offset)
|
533 |
+
if match.group(1) == "+":
|
534 |
+
return float(weight) + float(match.group(2))
|
535 |
+
elif match.group(1) == "-":
|
536 |
+
return float(weight) - float(match.group(2))
|
537 |
+
else:
|
538 |
+
return float(weight)
|
539 |
+
|
540 |
+
def load_loras_blocks(names, lwei,multipliers,elements = [],ltype = "lora"):
|
541 |
+
if "lora" == ltype:
|
542 |
+
print(names,lwei,elements)
|
543 |
+
import lora
|
544 |
+
for l, loaded in enumerate(lora.loaded_loras):
|
545 |
+
for n, name in enumerate(names):
|
546 |
+
if name == loaded.name:
|
547 |
+
lbw(lora.loaded_loras[l],lwei[n],elements[n])
|
548 |
+
lora.loaded_loras[l].name = lora.loaded_loras[l].name +"added_by_lora_block_weight"+ str(random.random())
|
549 |
+
|
550 |
+
elif "lyco" == ltype:
|
551 |
+
import lycoris as lycomo
|
552 |
+
for l, loaded in enumerate(lycomo.loaded_lycos):
|
553 |
+
for n, name in enumerate(names):
|
554 |
+
if name == loaded.name:
|
555 |
+
lbw(lycomo.loaded_lycos[l],lwei[n],elements[n])
|
556 |
+
lycomo.loaded_lycos[l].name = lycomo.loaded_lycos[l].name +"added_by_lora_block_weight"+ str(random.random())
|
557 |
+
|
558 |
+
def smakegrid(imgs,xs,ys,currentmodel,p):
|
559 |
+
ver_texts = [[images.GridAnnotation(y)] for y in ys]
|
560 |
+
hor_texts = [[images.GridAnnotation(x)] for x in xs]
|
561 |
+
|
562 |
+
w, h = imgs[0].size
|
563 |
+
grid = Image.new('RGB', size=(len(xs) * w, len(ys) * h), color='black')
|
564 |
+
|
565 |
+
for i, img in enumerate(imgs):
|
566 |
+
grid.paste(img, box=(i % len(xs) * w, i // len(xs) * h))
|
567 |
+
|
568 |
+
grid = images.draw_grid_annotations(grid,w, h, hor_texts, ver_texts)
|
569 |
+
grid = draw_origin(grid, currentmodel,w*len(xs),h*len(ys),w)
|
570 |
+
if opts.grid_save:
|
571 |
+
images.save_image(grid, opts.outdir_txt2img_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=p.seed, grid=True, p=p)
|
572 |
+
|
573 |
+
return grid
|
574 |
+
|
575 |
+
def get_font(fontsize):
|
576 |
+
path_root = scripts.basedir()
|
577 |
+
fontpath = os.path.join(path_root,"extensions","sd-webui-lora-block-weight","scripts", "Roboto-Regular.ttf")
|
578 |
+
try:
|
579 |
+
return ImageFont.truetype(opts.font or fontpath, fontsize)
|
580 |
+
except Exception:
|
581 |
+
return ImageFont.truetype(fontpath, fontsize)
|
582 |
+
|
583 |
+
def draw_origin(grid, text,width,height,width_one):
|
584 |
+
grid_d= Image.new("RGB", (grid.width,grid.height), "white")
|
585 |
+
grid_d.paste(grid,(0,0))
|
586 |
+
|
587 |
+
d= ImageDraw.Draw(grid_d)
|
588 |
+
color_active = (0, 0, 0)
|
589 |
+
fontsize = (width+height)//25
|
590 |
+
fnt = get_font(fontsize)
|
591 |
+
|
592 |
+
if grid.width != width_one:
|
593 |
+
while d.multiline_textsize(text, font=fnt)[0] > width_one*0.75 and fontsize > 0:
|
594 |
+
fontsize -=1
|
595 |
+
fnt = get_font(fontsize)
|
596 |
+
d.multiline_text((0,0), text, font=fnt, fill=color_active,align="center")
|
597 |
+
return grid_d
|
598 |
+
|
599 |
+
def newrun(p, *args):
|
600 |
+
script_index = args[0]
|
601 |
+
|
602 |
+
if args[0] ==0:
|
603 |
+
script = None
|
604 |
+
for obj in scripts.scripts_txt2img.alwayson_scripts:
|
605 |
+
if "lora_block_weight" in obj.filename:
|
606 |
+
script = obj
|
607 |
+
script_args = args[script.args_from:script.args_to]
|
608 |
+
else:
|
609 |
+
script = scripts.scripts_txt2img.selectable_scripts[script_index-1]
|
610 |
+
|
611 |
+
if script is None:
|
612 |
+
return None
|
613 |
+
|
614 |
+
script_args = args[script.args_from:script.args_to]
|
615 |
+
|
616 |
+
processed = script.run(p, *script_args)
|
617 |
+
|
618 |
+
shared.total_tqdm.clear()
|
619 |
+
|
620 |
+
return processed
|
621 |
+
|
622 |
+
def effectivechecker(imgs,ss,ls,diffcol,thresh,revxy):
|
623 |
+
diffs = []
|
624 |
+
outnum =[]
|
625 |
+
imgs[0],imgs[1] = imgs[1],imgs[0]
|
626 |
+
im1 = np.array(imgs[0])
|
627 |
+
|
628 |
+
for i in range(len(imgs)-1):
|
629 |
+
im2 = np.array(imgs[i+1])
|
630 |
+
|
631 |
+
abs_diff = cv2.absdiff(im2 , im1)
|
632 |
+
|
633 |
+
abs_diff_t = cv2.threshold(abs_diff, int(thresh), 255, cv2.THRESH_BINARY)[1]
|
634 |
+
res = abs_diff_t.astype(np.uint8)
|
635 |
+
percentage = (np.count_nonzero(res) * 100)/ res.size
|
636 |
+
if "white" in diffcol: abs_diff = cv2.bitwise_not(abs_diff)
|
637 |
+
outnum.append(percentage)
|
638 |
+
|
639 |
+
abs_diff = Image.fromarray(abs_diff)
|
640 |
+
|
641 |
+
diffs.append(abs_diff)
|
642 |
+
|
643 |
+
outs = []
|
644 |
+
for i in range(len(ls)):
|
645 |
+
ls[i] = ls[i] + "\n Diff : " + str(round(outnum[i],3)) + "%"
|
646 |
+
|
647 |
+
if not revxy:
|
648 |
+
for diff,img in zip(diffs,imgs[1:]):
|
649 |
+
outs.append(diff)
|
650 |
+
outs.append(img)
|
651 |
+
outs.append(imgs[0])
|
652 |
+
ss = ["diff",ss[0],"source"]
|
653 |
+
return outs,ss,ls
|
654 |
+
else:
|
655 |
+
outs = [imgs[0]]*len(diffs) + imgs[1:]+ diffs
|
656 |
+
ss = ["source",ss[0],"diff"]
|
657 |
+
return outs,ls,ss
|
658 |
+
|
659 |
+
def lbw(lora,lwei,elemental):
|
660 |
+
elemental = elemental.split(",")
|
661 |
+
for key in lora.modules.keys():
|
662 |
+
ratio = 1
|
663 |
+
picked = False
|
664 |
+
errormodules = []
|
665 |
+
|
666 |
+
for i,block in enumerate(BLOCKS):
|
667 |
+
if block in key:
|
668 |
+
ratio = lwei[i]
|
669 |
+
picked = True
|
670 |
+
currentblock = i
|
671 |
+
|
672 |
+
if not picked:
|
673 |
+
errormodules.append(key)
|
674 |
+
|
675 |
+
if len(elemental) > 0:
|
676 |
+
skey = key + BLOCKID[currentblock]
|
677 |
+
for d in elemental:
|
678 |
+
if d.count(":") != 2 :continue
|
679 |
+
dbs,dws,dr = (hyphener(d.split(":")[0]),d.split(":")[1],d.split(":")[2])
|
680 |
+
dbs,dws = (dbs.split(" "), dws.split(" "))
|
681 |
+
dbn,dbs = (True,dbs[1:]) if dbs[0] == "NOT" else (False,dbs)
|
682 |
+
dwn,dws = (True,dws[1:]) if dws[0] == "NOT" else (False,dws)
|
683 |
+
flag = dbn
|
684 |
+
for db in dbs:
|
685 |
+
if db in skey:
|
686 |
+
flag = not dbn
|
687 |
+
if flag:flag = dwn
|
688 |
+
else:continue
|
689 |
+
for dw in dws:
|
690 |
+
if dw in skey:
|
691 |
+
flag = not dwn
|
692 |
+
if flag:
|
693 |
+
dr = float(dr)
|
694 |
+
if princ :print(dbs,dws,key,dr)
|
695 |
+
ratio = dr
|
696 |
+
|
697 |
+
ltype = type(lora.modules[key]).__name__
|
698 |
+
set = False
|
699 |
+
if ltype in LORAANDSOON.keys():
|
700 |
+
setattr(lora.modules[key],LORAANDSOON[ltype],torch.nn.Parameter(getattr(lora.modules[key],LORAANDSOON[ltype]) * ratio))
|
701 |
+
#print(ltype)
|
702 |
+
set = True
|
703 |
+
else:
|
704 |
+
if hasattr(lora.modules[key],"up_model"):
|
705 |
+
lora.modules[key].up_model.weight= torch.nn.Parameter(lora.modules[key].up_model.weight *ratio)
|
706 |
+
#print("LoRA using LoCON")
|
707 |
+
set = True
|
708 |
+
else:
|
709 |
+
lora.modules[key].up.weight= torch.nn.Parameter(lora.modules[key].up.weight *ratio)
|
710 |
+
#print("LoRA")
|
711 |
+
set = True
|
712 |
+
if not set :
|
713 |
+
print("unkwon LoRA")
|
714 |
+
|
715 |
+
lora.name = lora.name +"added_by_lora_block_weight"+ str(random.random())
|
716 |
+
if len(errormodules) > 0:
|
717 |
+
print(errormodules)
|
718 |
+
return lora
|
719 |
+
|
720 |
+
LORAANDSOON = {
|
721 |
+
"LoraHadaModule" : "w1a",
|
722 |
+
"LycoHadaModule" : "w1a",
|
723 |
+
"FullModule" : "weight",
|
724 |
+
"IA3Module" : "w",
|
725 |
+
"LoraKronModule" : "w1",
|
726 |
+
"LycoKronModule" : "w1",
|
727 |
+
}
|
728 |
+
|
729 |
+
def hyphener(t):
|
730 |
+
t = t.split(" ")
|
731 |
+
for i,e in enumerate(t):
|
732 |
+
if "-" in e:
|
733 |
+
e = e.split("-")
|
734 |
+
if BLOCKID.index(e[1]) > BLOCKID.index(e[0]):
|
735 |
+
t[i] = " ".join(BLOCKID[BLOCKID.index(e[0]):BLOCKID.index(e[1])+1])
|
736 |
+
else:
|
737 |
+
t[i] = " ".join(BLOCKID[BLOCKID.index(e[1]):BLOCKID.index(e[0])+1])
|
738 |
+
return " ".join(t)
|
739 |
+
|
740 |
+
ELEMPRESETS="\
|
741 |
+
ATTNDEEPON:IN05-OUT05:attn:1\n\n\
|
742 |
+
ATTNDEEPOFF:IN05-OUT05:attn:0\n\n\
|
743 |
+
PROJDEEPOFF:IN05-OUT05:proj:0\n\n\
|
744 |
+
XYZ:::1"
|
extensions/stable-diffusion-webui-composable-lora/.gitignore
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
pip-wheel-metadata/
|
24 |
+
share/python-wheels/
|
25 |
+
*.egg-info/
|
26 |
+
.installed.cfg
|
27 |
+
*.egg
|
28 |
+
MANIFEST
|
29 |
+
|
30 |
+
# PyInstaller
|
31 |
+
# Usually these files are written by a python script from a template
|
32 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
33 |
+
*.manifest
|
34 |
+
*.spec
|
35 |
+
|
36 |
+
# Installer logs
|
37 |
+
pip-log.txt
|
38 |
+
pip-delete-this-directory.txt
|
39 |
+
|
40 |
+
# Unit test / coverage reports
|
41 |
+
htmlcov/
|
42 |
+
.tox/
|
43 |
+
.nox/
|
44 |
+
.coverage
|
45 |
+
.coverage.*
|
46 |
+
.cache
|
47 |
+
nosetests.xml
|
48 |
+
coverage.xml
|
49 |
+
*.cover
|
50 |
+
*.py,cover
|
51 |
+
.hypothesis/
|
52 |
+
.pytest_cache/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
target/
|
76 |
+
|
77 |
+
# Jupyter Notebook
|
78 |
+
.ipynb_checkpoints
|
79 |
+
|
80 |
+
# IPython
|
81 |
+
profile_default/
|
82 |
+
ipython_config.py
|
83 |
+
|
84 |
+
# pyenv
|
85 |
+
.python-version
|
86 |
+
|
87 |
+
# pipenv
|
88 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
89 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
90 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
91 |
+
# install all needed dependencies.
|
92 |
+
#Pipfile.lock
|
93 |
+
|
94 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
95 |
+
__pypackages__/
|
96 |
+
|
97 |
+
# Celery stuff
|
98 |
+
celerybeat-schedule
|
99 |
+
celerybeat.pid
|
100 |
+
|
101 |
+
# SageMath parsed files
|
102 |
+
*.sage.py
|
103 |
+
|
104 |
+
# Environments
|
105 |
+
.env
|
106 |
+
.venv
|
107 |
+
env/
|
108 |
+
venv/
|
109 |
+
ENV/
|
110 |
+
env.bak/
|
111 |
+
venv.bak/
|
112 |
+
|
113 |
+
# Spyder project settings
|
114 |
+
.spyderproject
|
115 |
+
.spyproject
|
116 |
+
|
117 |
+
# Rope project settings
|
118 |
+
.ropeproject
|
119 |
+
|
120 |
+
# mkdocs documentation
|
121 |
+
/site
|
122 |
+
|
123 |
+
# mypy
|
124 |
+
.mypy_cache/
|
125 |
+
.dmypy.json
|
126 |
+
dmypy.json
|
127 |
+
|
128 |
+
# Pyre type checker
|
129 |
+
.pyre/
|
extensions/stable-diffusion-webui-composable-lora/LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 opparco
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
extensions/stable-diffusion-webui-composable-lora/README.md
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Composable LoRA
|
2 |
+
This extension replaces the built-in LoRA forward procedure.
|
3 |
+
|
4 |
+
## Features
|
5 |
+
### Compatible with Composable-Diffusion
|
6 |
+
By associating LoRA's insertion position in the prompt with "AND" syntax, LoRA's scope of influence is limited to a specific subprompt.
|
7 |
+
|
8 |
+
### Eliminate the impact on negative prompts
|
9 |
+
With the built-in LoRA, negative prompts are always affected by LoRA. This often has a negative impact on the output.
|
10 |
+
So this extension offers options to eliminate the negative effects.
|
11 |
+
|
12 |
+
## How to use
|
13 |
+
### Enabled
|
14 |
+
When checked, Composable LoRA is enabled.
|
15 |
+
|
16 |
+
### Use Lora in uc text model encoder
|
17 |
+
Enable LoRA for uncondition (negative prompt) text model encoder.
|
18 |
+
With this disabled, you can expect better output.
|
19 |
+
|
20 |
+
### Use Lora in uc diffusion model
|
21 |
+
Enable LoRA for uncondition (negative prompt) diffusion model (denoiser).
|
22 |
+
With this disabled, you can expect better output.
|
23 |
+
|
24 |
+
## compatibilities
|
25 |
+
--always-batch-cond-uncond must be enabled with --medvram or --lowvram
|
extensions/stable-diffusion-webui-composable-lora/__pycache__/composable_lora.cpython-310.pyc
ADDED
Binary file (3.2 kB). View file
|
|
extensions/stable-diffusion-webui-composable-lora/composable_lora.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Dict
|
2 |
+
import re
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from modules import extra_networks, shared
|
6 |
+
|
7 |
+
re_AND = re.compile(r"\bAND\b")
|
8 |
+
|
9 |
+
|
10 |
+
def load_prompt_loras(prompt: str):
|
11 |
+
prompt_loras.clear()
|
12 |
+
subprompts = re_AND.split(prompt)
|
13 |
+
tmp_prompt_loras = []
|
14 |
+
for i, subprompt in enumerate(subprompts):
|
15 |
+
loras = {}
|
16 |
+
_, extra_network_data = extra_networks.parse_prompt(subprompt)
|
17 |
+
for params in extra_network_data['lora']:
|
18 |
+
name = params.items[0]
|
19 |
+
multiplier = float(params.items[1]) if len(params.items) > 1 else 1.0
|
20 |
+
loras[name] = multiplier
|
21 |
+
|
22 |
+
tmp_prompt_loras.append(loras)
|
23 |
+
prompt_loras.extend(tmp_prompt_loras * num_batches)
|
24 |
+
|
25 |
+
|
26 |
+
def reset_counters():
|
27 |
+
global text_model_encoder_counter
|
28 |
+
global diffusion_model_counter
|
29 |
+
|
30 |
+
# reset counter to uc head
|
31 |
+
text_model_encoder_counter = -1
|
32 |
+
diffusion_model_counter = 0
|
33 |
+
|
34 |
+
|
35 |
+
def lora_forward(compvis_module, input, res):
|
36 |
+
global text_model_encoder_counter
|
37 |
+
global diffusion_model_counter
|
38 |
+
|
39 |
+
import lora
|
40 |
+
|
41 |
+
if len(lora.loaded_loras) == 0:
|
42 |
+
return res
|
43 |
+
|
44 |
+
lora_layer_name: str | None = getattr(compvis_module, 'lora_layer_name', None)
|
45 |
+
if lora_layer_name is None:
|
46 |
+
return res
|
47 |
+
|
48 |
+
num_loras = len(lora.loaded_loras)
|
49 |
+
if text_model_encoder_counter == -1:
|
50 |
+
text_model_encoder_counter = len(prompt_loras) * num_loras
|
51 |
+
|
52 |
+
# print(f"lora.forward lora_layer_name={lora_layer_name} in.shape={input.shape} res.shape={res.shape} num_batches={num_batches} num_prompts={num_prompts}")
|
53 |
+
|
54 |
+
for lora in lora.loaded_loras:
|
55 |
+
module = lora.modules.get(lora_layer_name, None)
|
56 |
+
if module is None:
|
57 |
+
continue
|
58 |
+
|
59 |
+
if shared.opts.lora_apply_to_outputs and res.shape == input.shape:
|
60 |
+
patch = module.up(module.down(res))
|
61 |
+
else:
|
62 |
+
patch = module.up(module.down(input))
|
63 |
+
|
64 |
+
alpha = module.alpha / module.up.weight.shape[1] if module.alpha else 1.0
|
65 |
+
|
66 |
+
num_prompts = len(prompt_loras)
|
67 |
+
|
68 |
+
# print(f"lora.name={lora.name} lora.mul={lora.multiplier} alpha={alpha} pat.shape={patch.shape}")
|
69 |
+
|
70 |
+
if enabled:
|
71 |
+
if lora_layer_name.startswith("transformer_"): # "transformer_text_model_encoder_"
|
72 |
+
#
|
73 |
+
if 0 <= text_model_encoder_counter // num_loras < len(prompt_loras):
|
74 |
+
# c
|
75 |
+
loras = prompt_loras[text_model_encoder_counter // num_loras]
|
76 |
+
multiplier = loras.get(lora.name, 0.0)
|
77 |
+
if multiplier != 0.0:
|
78 |
+
# print(f"c #{text_model_encoder_counter // num_loras} lora.name={lora.name} mul={multiplier}")
|
79 |
+
res += multiplier * alpha * patch
|
80 |
+
else:
|
81 |
+
# uc
|
82 |
+
if opt_uc_text_model_encoder and lora.multiplier != 0.0:
|
83 |
+
# print(f"uc #{text_model_encoder_counter // num_loras} lora.name={lora.name} lora.mul={lora.multiplier}")
|
84 |
+
res += lora.multiplier * alpha * patch
|
85 |
+
|
86 |
+
if lora_layer_name.endswith("_11_mlp_fc2"): # last lora_layer_name of text_model_encoder
|
87 |
+
text_model_encoder_counter += 1
|
88 |
+
# c1 c1 c2 c2 .. .. uc uc
|
89 |
+
if text_model_encoder_counter == (len(prompt_loras) + num_batches) * num_loras:
|
90 |
+
text_model_encoder_counter = 0
|
91 |
+
|
92 |
+
elif lora_layer_name.startswith("diffusion_model_"): # "diffusion_model_"
|
93 |
+
|
94 |
+
if res.shape[0] == num_batches * num_prompts + num_batches:
|
95 |
+
# tensor.shape[1] == uncond.shape[1]
|
96 |
+
tensor_off = 0
|
97 |
+
uncond_off = num_batches * num_prompts
|
98 |
+
for b in range(num_batches):
|
99 |
+
# c
|
100 |
+
for p, loras in enumerate(prompt_loras):
|
101 |
+
multiplier = loras.get(lora.name, 0.0)
|
102 |
+
if multiplier != 0.0:
|
103 |
+
# print(f"tensor #{b}.{p} lora.name={lora.name} mul={multiplier}")
|
104 |
+
res[tensor_off] += multiplier * alpha * patch[tensor_off]
|
105 |
+
tensor_off += 1
|
106 |
+
|
107 |
+
# uc
|
108 |
+
if opt_uc_diffusion_model and lora.multiplier != 0.0:
|
109 |
+
# print(f"uncond lora.name={lora.name} lora.mul={lora.multiplier}")
|
110 |
+
res[uncond_off] += lora.multiplier * alpha * patch[uncond_off]
|
111 |
+
uncond_off += 1
|
112 |
+
else:
|
113 |
+
# tensor.shape[1] != uncond.shape[1]
|
114 |
+
cur_num_prompts = res.shape[0]
|
115 |
+
base = (diffusion_model_counter // cur_num_prompts) // num_loras * cur_num_prompts
|
116 |
+
if 0 <= base < len(prompt_loras):
|
117 |
+
# c
|
118 |
+
for off in range(cur_num_prompts):
|
119 |
+
loras = prompt_loras[base + off]
|
120 |
+
multiplier = loras.get(lora.name, 0.0)
|
121 |
+
if multiplier != 0.0:
|
122 |
+
# print(f"c #{base + off} lora.name={lora.name} mul={multiplier}", lora_layer_name=lora_layer_name)
|
123 |
+
res[off] += multiplier * alpha * patch[off]
|
124 |
+
else:
|
125 |
+
# uc
|
126 |
+
if opt_uc_diffusion_model and lora.multiplier != 0.0:
|
127 |
+
# print(f"uc {lora_layer_name} lora.name={lora.name} lora.mul={lora.multiplier}")
|
128 |
+
res += lora.multiplier * alpha * patch
|
129 |
+
|
130 |
+
if lora_layer_name.endswith("_11_1_proj_out"): # last lora_layer_name of diffusion_model
|
131 |
+
diffusion_model_counter += cur_num_prompts
|
132 |
+
# c1 c2 .. uc
|
133 |
+
if diffusion_model_counter >= (len(prompt_loras) + num_batches) * num_loras:
|
134 |
+
diffusion_model_counter = 0
|
135 |
+
else:
|
136 |
+
# default
|
137 |
+
if lora.multiplier != 0.0:
|
138 |
+
# print(f"default {lora_layer_name} lora.name={lora.name} lora.mul={lora.multiplier}")
|
139 |
+
res += lora.multiplier * alpha * patch
|
140 |
+
else:
|
141 |
+
# default
|
142 |
+
if lora.multiplier != 0.0:
|
143 |
+
# print(f"DEFAULT {lora_layer_name} lora.name={lora.name} lora.mul={lora.multiplier}")
|
144 |
+
res += lora.multiplier * alpha * patch
|
145 |
+
|
146 |
+
return res
|
147 |
+
|
148 |
+
|
149 |
+
def lora_Linear_forward(self, input):
|
150 |
+
return lora_forward(self, input, torch.nn.Linear_forward_before_lora(self, input))
|
151 |
+
|
152 |
+
|
153 |
+
def lora_Conv2d_forward(self, input):
|
154 |
+
return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora(self, input))
|
155 |
+
|
156 |
+
|
157 |
+
enabled = False
|
158 |
+
opt_uc_text_model_encoder = False
|
159 |
+
opt_uc_diffusion_model = False
|
160 |
+
verbose = True
|
161 |
+
|
162 |
+
num_batches: int = 0
|
163 |
+
prompt_loras: List[Dict[str, float]] = []
|
164 |
+
text_model_encoder_counter: int = -1
|
165 |
+
diffusion_model_counter: int = 0
|
extensions/stable-diffusion-webui-composable-lora/scripts/__pycache__/composable_lora_script.cpython-310.pyc
ADDED
Binary file (2.33 kB). View file
|
|
extensions/stable-diffusion-webui-composable-lora/scripts/composable_lora_script.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#
|
2 |
+
# Composable-Diffusion with Lora
|
3 |
+
#
|
4 |
+
import torch
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
import composable_lora
|
8 |
+
import modules.scripts as scripts
|
9 |
+
from modules import script_callbacks
|
10 |
+
from modules.processing import StableDiffusionProcessing
|
11 |
+
|
12 |
+
|
13 |
+
def unload():
|
14 |
+
torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
|
15 |
+
torch.nn.Conv2d.forward = torch.nn.Conv2d_forward_before_lora
|
16 |
+
|
17 |
+
|
18 |
+
if not hasattr(torch.nn, 'Linear_forward_before_lora'):
|
19 |
+
torch.nn.Linear_forward_before_lora = torch.nn.Linear.forward
|
20 |
+
|
21 |
+
if not hasattr(torch.nn, 'Conv2d_forward_before_lora'):
|
22 |
+
torch.nn.Conv2d_forward_before_lora = torch.nn.Conv2d.forward
|
23 |
+
|
24 |
+
torch.nn.Linear.forward = composable_lora.lora_Linear_forward
|
25 |
+
torch.nn.Conv2d.forward = composable_lora.lora_Conv2d_forward
|
26 |
+
|
27 |
+
script_callbacks.on_script_unloaded(unload)
|
28 |
+
|
29 |
+
|
30 |
+
class ComposableLoraScript(scripts.Script):
|
31 |
+
def title(self):
|
32 |
+
return "Composable Lora"
|
33 |
+
|
34 |
+
def show(self, is_img2img):
|
35 |
+
return scripts.AlwaysVisible
|
36 |
+
|
37 |
+
def ui(self, is_img2img):
|
38 |
+
with gr.Group():
|
39 |
+
with gr.Accordion("Composable Lora", open=False):
|
40 |
+
enabled = gr.Checkbox(value=False, label="Enabled")
|
41 |
+
opt_uc_text_model_encoder = gr.Checkbox(value=False, label="Use Lora in uc text model encoder")
|
42 |
+
opt_uc_diffusion_model = gr.Checkbox(value=False, label="Use Lora in uc diffusion model")
|
43 |
+
|
44 |
+
return [enabled, opt_uc_text_model_encoder, opt_uc_diffusion_model]
|
45 |
+
|
46 |
+
def process(self, p: StableDiffusionProcessing, enabled: bool, opt_uc_text_model_encoder: bool, opt_uc_diffusion_model: bool):
|
47 |
+
composable_lora.enabled = enabled
|
48 |
+
composable_lora.opt_uc_text_model_encoder = opt_uc_text_model_encoder
|
49 |
+
composable_lora.opt_uc_diffusion_model = opt_uc_diffusion_model
|
50 |
+
|
51 |
+
composable_lora.num_batches = p.batch_size
|
52 |
+
|
53 |
+
prompt = p.all_prompts[0]
|
54 |
+
composable_lora.load_prompt_loras(prompt)
|
55 |
+
|
56 |
+
def process_batch(self, p: StableDiffusionProcessing, *args, **kwargs):
|
57 |
+
composable_lora.reset_counters()
|
extensions/stable-diffusion-webui-images-browser/.gitignore
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.DS_Store
|
2 |
+
path_recorder.txt
|
3 |
+
__pycache__
|
4 |
+
*.json
|
5 |
+
*.sqlite3
|
6 |
+
*.log
|
extensions/stable-diffusion-webui-images-browser/README.md
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## stable-diffusion-webui-images-browser
|
2 |
+
|
3 |
+
A custom extension for [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui).
|
4 |
+
|
5 |
+
This is an image browser for browsing past generated pictures, view their generated informations, send that information to txt2img, img2img and others, collect images to your "favorites" folder and delete the images you no longer need.
|
6 |
+
|
7 |
+
## Installation
|
8 |
+
|
9 |
+
The extension can be installed directly from within the **Extensions** tab within the Webui.
|
10 |
+
|
11 |
+
You can also install it manually by running the following command from within the webui directory:
|
12 |
+
|
13 |
+
git clone https://github.com/AlUlkesh/stable-diffusion-webui-images-browser/ extensions/stable-diffusion-webui-images-browser
|
14 |
+
|
15 |
+
and restart your stable-diffusion-webui, then you can see the new tab "Image Browser".
|
16 |
+
|
17 |
+
Please be aware that when scanning a directory for the first time, the png-cache will be built. This can take several minutes, depending on the amount of images.
|
18 |
+
|
19 |
+
## Recent updates
|
20 |
+
- Image Reward scoring
|
21 |
+
- Size tooltip for thumbnails
|
22 |
+
- Optimized images in the thumbnail interface
|
23 |
+
- Send to ControlNet
|
24 |
+
- Hidable UI components
|
25 |
+
- Send to openOutpaint
|
26 |
+
- Regex search
|
27 |
+
- Maximum aesthetic_score filter
|
28 |
+
- Save ranking to EXIF option
|
29 |
+
- Maintenance tab
|
30 |
+
- Custom tabs
|
31 |
+
- Copy/Move to directory
|
32 |
+
- Keybindings
|
33 |
+
- Additional sorting and filtering by EXIF data including .txt file information
|
34 |
+
- Recyle bin option
|
35 |
+
- Add/Remove from saved directories, via buttons
|
36 |
+
- New dropdown with subdirs
|
37 |
+
- Option to not show the images from subdirs
|
38 |
+
- Refresh button
|
39 |
+
- Sort order
|
40 |
+
- View and save favorites with individual folder depth
|
41 |
+
- Now also supports jpg
|
42 |
+
|
43 |
+
Please also check the [discussions](https://github.com/AlUlkesh/stable-diffusion-webui-images-browser/discussions) for major update information.
|
44 |
+
|
45 |
+
## Keybindings
|
46 |
+
| Key | Explanation |
|
47 |
+
|---------|-------------|
|
48 |
+
| `0-5` | Ranks the current image, with 0 being the last option (None) |
|
49 |
+
| `F` | Adds the current image to Favorites |
|
50 |
+
| `R` | Refreshes the image gallery |
|
51 |
+
| `Delete` | Deletes the current image |
|
52 |
+
| `Ctrl + Arrow Left` | Goes to the previous page of images |
|
53 |
+
| `Ctrl + Arrow Right` | Goes to the next page of images |
|
54 |
+
|
55 |
+
(Ctrl can be changed in settings)
|
56 |
+
|
57 |
+
## Credit
|
58 |
+
|
59 |
+
Credit goes to the original maintainer of this extension: https://github.com/yfszzx and to major contributors https://github.com/Klace and https://github.com/EllangoK
|
60 |
+
|
61 |
+
Image Reward: https://github.com/THUDM/ImageReward
|
extensions/stable-diffusion-webui-images-browser/install.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import launch
|
2 |
+
import os
|
3 |
+
|
4 |
+
if not launch.is_installed("send2trash"):
|
5 |
+
launch.run_pip("install Send2Trash", "Send2Trash requirement for image browser")
|
6 |
+
|
7 |
+
if not launch.is_installed("ImageReward"):
|
8 |
+
req_IR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "req_IR.txt")
|
9 |
+
launch.run_pip(f'install -r "{req_IR}" --no-deps image-reward', 'ImageReward requirement for image browser')
|
extensions/stable-diffusion-webui-images-browser/javascript/image_browser.js
ADDED
@@ -0,0 +1,770 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
let image_browser_state = "free"
|
2 |
+
let image_browser_webui_ready = false
|
3 |
+
let image_browser_started = false
|
4 |
+
let image_browser_console_log = ""
|
5 |
+
let image_browser_debug = false
|
6 |
+
let image_browser_img_show_in_progress = false
|
7 |
+
|
8 |
+
function image_browser_delay(ms){return new Promise(resolve => setTimeout(resolve, ms))}
|
9 |
+
|
10 |
+
onUiLoaded(image_browser_start_it_up)
|
11 |
+
|
12 |
+
async function image_browser_wait_for_webui() {
|
13 |
+
if (image_browser_debug) console.log("image_browser_wait_for_webui:start")
|
14 |
+
await image_browser_delay(500)
|
15 |
+
sd_model = gradioApp().getElementById("setting_sd_model_checkpoint")
|
16 |
+
if (!sd_model.querySelector(".eta-bar")) {
|
17 |
+
image_browser_webui_ready = true
|
18 |
+
image_browser_start()
|
19 |
+
} else {
|
20 |
+
// Set timeout for MutationObserver
|
21 |
+
const startTime = Date.now()
|
22 |
+
// 40 seconds in milliseconds
|
23 |
+
const timeout = 40000
|
24 |
+
const webuiObserver = new MutationObserver(function(mutationsList) {
|
25 |
+
if (image_browser_debug) console.log("webuiObserver:start")
|
26 |
+
let found = false
|
27 |
+
outerLoop: for (let i = 0; i < mutationsList.length; i++) {
|
28 |
+
let mutation = mutationsList[i];
|
29 |
+
if (mutation.type === "childList") {
|
30 |
+
for (let j = 0; j < mutation.removedNodes.length; j++) {
|
31 |
+
let node = mutation.removedNodes[j];
|
32 |
+
if (node.nodeType === Node.ELEMENT_NODE && node.classList.contains("eta-bar")) {
|
33 |
+
found = true
|
34 |
+
break outerLoop;
|
35 |
+
}
|
36 |
+
}
|
37 |
+
}
|
38 |
+
}
|
39 |
+
if (found || (Date.now() - startTime > timeout)) {
|
40 |
+
image_browser_webui_ready = true
|
41 |
+
webuiObserver.disconnect()
|
42 |
+
if (image_browser_debug) console.log("webuiObserver:end")
|
43 |
+
image_browser_start()
|
44 |
+
}
|
45 |
+
})
|
46 |
+
webuiObserver.observe(gradioApp(), { childList:true, subtree:true })
|
47 |
+
}
|
48 |
+
if (image_browser_debug) console.log("image_browser_wait_for_webui:end")
|
49 |
+
}
|
50 |
+
|
51 |
+
async function image_browser_start_it_up() {
|
52 |
+
if (image_browser_debug) console.log("image_browser_start_it_up:start")
|
53 |
+
container = gradioApp().getElementById("image_browser_tabs_container")
|
54 |
+
let controls = container.querySelectorAll('[id*="_control_"]')
|
55 |
+
controls.forEach(function(control) {
|
56 |
+
control.style.pointerEvents = "none"
|
57 |
+
control.style.cursor = "not-allowed"
|
58 |
+
control.style.opacity = "0.65"
|
59 |
+
})
|
60 |
+
let warnings = container.querySelectorAll('[id*="_warning_box"]')
|
61 |
+
warnings.forEach(function(warning) {
|
62 |
+
warning.innerHTML = '<p style="font-weight: bold;">Waiting for webui...'
|
63 |
+
})
|
64 |
+
|
65 |
+
image_browser_wait_for_webui()
|
66 |
+
if (image_browser_debug) console.log("image_browser_start_it_up:end")
|
67 |
+
}
|
68 |
+
|
69 |
+
async function image_browser_lock(reason) {
|
70 |
+
if (image_browser_debug) console.log("image_browser_lock:start")
|
71 |
+
// Wait until lock removed
|
72 |
+
let i = 0
|
73 |
+
while (image_browser_state != "free") {
|
74 |
+
await image_browser_delay(200)
|
75 |
+
i = i + 1
|
76 |
+
if (i === 150) {
|
77 |
+
throw new Error("Still locked after 30 seconds. Please Reload UI.")
|
78 |
+
}
|
79 |
+
}
|
80 |
+
// Lock
|
81 |
+
image_browser_state = reason
|
82 |
+
if (image_browser_debug) console.log("image_browser_lock:end")
|
83 |
+
}
|
84 |
+
|
85 |
+
async function image_browser_unlock() {
|
86 |
+
if (image_browser_debug) console.log("image_browser_unlock:start")
|
87 |
+
image_browser_state = "free"
|
88 |
+
if (image_browser_debug) console.log("image_browser_unlock:end")
|
89 |
+
}
|
90 |
+
|
91 |
+
const image_browser_click_image = async function() {
|
92 |
+
if (image_browser_debug) console.log("image_browser_click_image:start")
|
93 |
+
await image_browser_lock("image_browser_click_image")
|
94 |
+
const tab_base_tag = image_browser_current_tab()
|
95 |
+
const container = gradioApp().getElementById(tab_base_tag + "_image_browser_container")
|
96 |
+
let child = this
|
97 |
+
let index = 0
|
98 |
+
while((child = child.previousSibling) != null) {
|
99 |
+
index = index + 1
|
100 |
+
}
|
101 |
+
const set_btn = container.querySelector(".image_browser_set_index")
|
102 |
+
let curr_idx
|
103 |
+
try {
|
104 |
+
curr_idx = set_btn.getAttribute("img_index")
|
105 |
+
} catch (e) {
|
106 |
+
curr_idx = -1
|
107 |
+
}
|
108 |
+
if (curr_idx != index) {
|
109 |
+
set_btn.setAttribute("img_index", index)
|
110 |
+
}
|
111 |
+
await image_browser_unlock()
|
112 |
+
set_btn.click()
|
113 |
+
if (image_browser_debug) console.log("image_browser_click_image:end")
|
114 |
+
}
|
115 |
+
|
116 |
+
async function image_browser_get_current_img(tab_base_tag, img_index, page_index, filenames, turn_page_switch, image_gallery) {
|
117 |
+
if (image_browser_debug) console.log("image_browser_get_current_img:start")
|
118 |
+
await image_browser_lock("image_browser_get_current_img")
|
119 |
+
img_index = gradioApp().getElementById(tab_base_tag + '_image_browser_set_index').getAttribute("img_index")
|
120 |
+
gradioApp().dispatchEvent(new Event("image_browser_get_current_img"))
|
121 |
+
await image_browser_unlock()
|
122 |
+
if (image_browser_debug) console.log("image_browser_get_current_img:end")
|
123 |
+
return [
|
124 |
+
tab_base_tag,
|
125 |
+
img_index,
|
126 |
+
page_index,
|
127 |
+
filenames,
|
128 |
+
turn_page_switch,
|
129 |
+
image_gallery
|
130 |
+
]
|
131 |
+
}
|
132 |
+
|
133 |
+
async function image_browser_refresh_current_page_preview() {
|
134 |
+
if (image_browser_debug) console.log("image_browser_refresh_current_page_preview:start")
|
135 |
+
await image_browser_delay(200)
|
136 |
+
const preview_div = gradioApp().querySelector('.preview')
|
137 |
+
if (preview_div === null) {
|
138 |
+
if (image_browser_debug) console.log("image_browser_refresh_current_page_preview:end")
|
139 |
+
return
|
140 |
+
}
|
141 |
+
const tab_base_tag = image_browser_current_tab()
|
142 |
+
const gallery = gradioApp().querySelector(`#${tab_base_tag}_image_browser`)
|
143 |
+
const set_btn = gallery.querySelector(".image_browser_set_index")
|
144 |
+
const curr_idx = parseInt(set_btn.getAttribute("img_index"))
|
145 |
+
// no loading animation, so click immediately
|
146 |
+
const gallery_items = gallery.querySelectorAll(".thumbnail-item")
|
147 |
+
const curr_image = gallery_items[curr_idx]
|
148 |
+
curr_image.click()
|
149 |
+
if (image_browser_debug) console.log("image_browser_refresh_current_page_preview:end")
|
150 |
+
}
|
151 |
+
|
152 |
+
async function image_browser_turnpage(tab_base_tag) {
|
153 |
+
if (image_browser_debug) console.log("image_browser_turnpage:start")
|
154 |
+
while (!image_browser_started) {
|
155 |
+
await image_browser_delay(200)
|
156 |
+
}
|
157 |
+
const gallery = gradioApp().querySelector(`#${tab_base_tag}_image_browser`)
|
158 |
+
let clear
|
159 |
+
try {
|
160 |
+
clear = gallery.querySelector("button[aria-label='Clear']")
|
161 |
+
if (clear) {
|
162 |
+
clear.click()
|
163 |
+
}
|
164 |
+
} catch (e) {
|
165 |
+
console.error(e)
|
166 |
+
}
|
167 |
+
if (image_browser_debug) console.log("image_browser_turnpage:end")
|
168 |
+
}
|
169 |
+
|
170 |
+
const image_browser_get_current_img_handler = (del_img_btn) => {
|
171 |
+
if (image_browser_debug) console.log("image_browser_get_current_img_handler:start")
|
172 |
+
// Prevent delete button spam
|
173 |
+
del_img_btn.style.pointerEvents = "auto"
|
174 |
+
del_img_btn.style.cursor = "default"
|
175 |
+
del_img_btn.style.opacity = "1"
|
176 |
+
if (image_browser_debug) console.log("image_browser_get_current_img_handler:end")
|
177 |
+
}
|
178 |
+
|
179 |
+
async function image_browser_select_image(tab_base_tag, img_index, select_image) {
|
180 |
+
if (image_browser_debug) console.log("image_browser_select_image:start")
|
181 |
+
if (select_image) {
|
182 |
+
await image_browser_lock("image_browser_select_image")
|
183 |
+
const del_img_btn = gradioApp().getElementById(tab_base_tag + "_image_browser_del_img_btn")
|
184 |
+
// Prevent delete button spam
|
185 |
+
del_img_btn.style.pointerEvents = "none"
|
186 |
+
del_img_btn.style.cursor = "not-allowed"
|
187 |
+
del_img_btn.style.opacity = "0.65"
|
188 |
+
|
189 |
+
const gallery = gradioApp().getElementById(tab_base_tag + "_image_browser_gallery")
|
190 |
+
const gallery_items = gallery.querySelectorAll(".thumbnail-item")
|
191 |
+
if (img_index >= gallery_items.length || gallery_items.length == 0) {
|
192 |
+
const refreshBtn = gradioApp().getElementById(tab_base_tag + "_image_browser_renew_page")
|
193 |
+
refreshBtn.dispatchEvent(new Event("click"))
|
194 |
+
} else {
|
195 |
+
const curr_image = gallery_items[img_index]
|
196 |
+
curr_image.click()
|
197 |
+
}
|
198 |
+
await image_browser_unlock()
|
199 |
+
|
200 |
+
// Prevent delete button spam
|
201 |
+
gradioApp().removeEventListener("image_browser_get_current_img", () => image_browser_get_current_img_handler(del_img_btn))
|
202 |
+
gradioApp().addEventListener("image_browser_get_current_img", () => image_browser_get_current_img_handler(del_img_btn))
|
203 |
+
}
|
204 |
+
if (image_browser_debug) console.log("image_browser_select_image:end")
|
205 |
+
}
|
206 |
+
|
207 |
+
async function image_browser_gototab(tabname) {
|
208 |
+
if (image_browser_debug) console.log("image_browser_gototab:start")
|
209 |
+
await image_browser_lock("image_browser_gototab")
|
210 |
+
|
211 |
+
tabNav = gradioApp().querySelector(".tab-nav")
|
212 |
+
const tabNavChildren = tabNav.children
|
213 |
+
let tabNavButtonNum
|
214 |
+
if (typeof tabname === "number") {
|
215 |
+
let buttonCnt = 0
|
216 |
+
for (let i = 0; i < tabNavChildren.length; i++) {
|
217 |
+
if (tabNavChildren[i].tagName === "BUTTON") {
|
218 |
+
if (buttonCnt === tabname) {
|
219 |
+
tabNavButtonNum = i
|
220 |
+
break
|
221 |
+
}
|
222 |
+
buttonCnt++
|
223 |
+
}
|
224 |
+
}
|
225 |
+
} else {
|
226 |
+
for (let i = 0; i < tabNavChildren.length; i++) {
|
227 |
+
if (tabNavChildren[i].tagName === "BUTTON" && tabNavChildren[i].textContent.trim() === tabname) {
|
228 |
+
tabNavButtonNum = i
|
229 |
+
break
|
230 |
+
}
|
231 |
+
}
|
232 |
+
}
|
233 |
+
let tabNavButton = tabNavChildren[tabNavButtonNum]
|
234 |
+
tabNavButton.click()
|
235 |
+
|
236 |
+
// Wait for click-action to complete
|
237 |
+
const startTime = Date.now()
|
238 |
+
// 60 seconds in milliseconds
|
239 |
+
const timeout = 60000
|
240 |
+
|
241 |
+
await image_browser_delay(100)
|
242 |
+
while (!tabNavButton.classList.contains("selected")) {
|
243 |
+
tabNavButton = tabNavChildren[tabNavButtonNum]
|
244 |
+
if (Date.now() - startTime > timeout) {
|
245 |
+
throw new Error("image_browser_gototab: 60 seconds have passed")
|
246 |
+
}
|
247 |
+
await image_browser_delay(200)
|
248 |
+
}
|
249 |
+
|
250 |
+
await image_browser_unlock()
|
251 |
+
if (image_browser_debug) console.log("image_browser_gototab:end")
|
252 |
+
}
|
253 |
+
|
254 |
+
async function image_browser_get_image_for_ext(tab_base_tag, image_index) {
|
255 |
+
if (image_browser_debug) console.log("image_browser_get_image_for_ext:start")
|
256 |
+
const image_browser_image = gradioApp().querySelectorAll(`#${tab_base_tag}_image_browser_gallery .thumbnail-item`)[image_index]
|
257 |
+
|
258 |
+
const canvas = document.createElement("canvas")
|
259 |
+
const image = document.createElement("img")
|
260 |
+
image.src = image_browser_image.querySelector("img").src
|
261 |
+
|
262 |
+
await image.decode()
|
263 |
+
|
264 |
+
canvas.width = image.width
|
265 |
+
canvas.height = image.height
|
266 |
+
|
267 |
+
canvas.getContext("2d").drawImage(image, 0, 0)
|
268 |
+
|
269 |
+
if (image_browser_debug) console.log("image_browser_get_image_for_ext:end")
|
270 |
+
return canvas.toDataURL()
|
271 |
+
}
|
272 |
+
|
273 |
+
function image_browser_openoutpaint_send(tab_base_tag, image_index, image_browser_prompt, image_browser_neg_prompt, name = "WebUI Resource") {
|
274 |
+
if (image_browser_debug) console.log("image_browser_openoutpaint_send:start")
|
275 |
+
image_browser_get_image_for_ext(tab_base_tag, image_index)
|
276 |
+
.then((dataURL) => {
|
277 |
+
// Send to openOutpaint
|
278 |
+
openoutpaint_send_image(dataURL, name)
|
279 |
+
|
280 |
+
// Send prompt to openOutpaint
|
281 |
+
const tab = get_uiCurrentTabContent().id
|
282 |
+
|
283 |
+
const prompt = image_browser_prompt
|
284 |
+
const negPrompt = image_browser_neg_prompt
|
285 |
+
openoutpaint.frame.contentWindow.postMessage({
|
286 |
+
key: openoutpaint.key,
|
287 |
+
type: "openoutpaint/set-prompt",
|
288 |
+
prompt,
|
289 |
+
negPrompt,
|
290 |
+
})
|
291 |
+
|
292 |
+
// Change Tab
|
293 |
+
image_browser_gototab("openOutpaint")
|
294 |
+
})
|
295 |
+
if (image_browser_debug) console.log("image_browser_openoutpaint_send:end")
|
296 |
+
}
|
297 |
+
|
298 |
+
async function image_browser_controlnet_send(toTabNum, tab_base_tag, image_index, controlnetNum, controlnetType) {
|
299 |
+
if (image_browser_debug) console.log("image_browser_controlnet_send:start")
|
300 |
+
// Logic originally based on github.com/fkunn1326/openpose-editor
|
301 |
+
const dataURL = await image_browser_get_image_for_ext(tab_base_tag, image_index)
|
302 |
+
const blob = await (await fetch(dataURL)).blob()
|
303 |
+
const dt = new DataTransfer()
|
304 |
+
dt.items.add(new File([blob], "ImageBrowser.png", { type: blob.type }))
|
305 |
+
const list = dt.files
|
306 |
+
|
307 |
+
await image_browser_gototab(toTabNum)
|
308 |
+
const current_tabid = image_browser_webui_current_tab()
|
309 |
+
const current_tab = current_tabid.replace("tab_", "")
|
310 |
+
const tab_controlnet = gradioApp().getElementById(current_tab + "_controlnet")
|
311 |
+
let accordion = tab_controlnet.querySelector("#controlnet > .label-wrap > .icon")
|
312 |
+
if (accordion.style.transform.includes("rotate(90deg)")) {
|
313 |
+
accordion.click()
|
314 |
+
// Wait for click-action to complete
|
315 |
+
const startTime = Date.now()
|
316 |
+
// 60 seconds in milliseconds
|
317 |
+
const timeout = 60000
|
318 |
+
|
319 |
+
await image_browser_delay(100)
|
320 |
+
while (accordion.style.transform.includes("rotate(90deg)")) {
|
321 |
+
accordion = tab_controlnet.querySelector("#controlnet > .label-wrap > .icon")
|
322 |
+
if (Date.now() - startTime > timeout) {
|
323 |
+
throw new Error("image_browser_controlnet_send/accordion: 60 seconds have passed")
|
324 |
+
}
|
325 |
+
await image_browser_delay(200)
|
326 |
+
}
|
327 |
+
}
|
328 |
+
|
329 |
+
let inputImage
|
330 |
+
let inputContainer
|
331 |
+
if (controlnetType == "single") {
|
332 |
+
inputImage = gradioApp().getElementById(current_tab + "_controlnet_ControlNet_input_image")
|
333 |
+
} else {
|
334 |
+
const tabs = gradioApp().getElementById(current_tab + "_controlnet_tabs")
|
335 |
+
const tab_num = (parseInt(controlnetNum) + 1).toString()
|
336 |
+
tab_button = tabs.querySelector(".tab-nav > button:nth-child(" + tab_num + ")")
|
337 |
+
tab_button.click()
|
338 |
+
// Wait for click-action to complete
|
339 |
+
const startTime = Date.now()
|
340 |
+
// 60 seconds in milliseconds
|
341 |
+
const timeout = 60000
|
342 |
+
|
343 |
+
await image_browser_delay(100)
|
344 |
+
while (!tab_button.classList.contains("selected")) {
|
345 |
+
tab_button = tabs.querySelector(".tab-nav > button:nth-child(" + tab_num + ")")
|
346 |
+
if (Date.now() - startTime > timeout) {
|
347 |
+
throw new Error("image_browser_controlnet_send/tabs: 60 seconds have passed")
|
348 |
+
}
|
349 |
+
await image_browser_delay(200)
|
350 |
+
}
|
351 |
+
inputImage = gradioApp().getElementById(current_tab + "_controlnet_ControlNet-" + controlnetNum.toString() + "_input_image")
|
352 |
+
}
|
353 |
+
try {
|
354 |
+
inputContainer = inputImage.querySelector('div[data-testid="image"]')
|
355 |
+
} catch (e) {}
|
356 |
+
|
357 |
+
const input = inputContainer.querySelector("input[type='file']")
|
358 |
+
|
359 |
+
let clear
|
360 |
+
try {
|
361 |
+
clear = inputContainer.querySelector("button[aria-label='Clear']")
|
362 |
+
if (clear) {
|
363 |
+
clear.click()
|
364 |
+
}
|
365 |
+
} catch (e) {
|
366 |
+
console.error(e)
|
367 |
+
}
|
368 |
+
|
369 |
+
try {
|
370 |
+
// Wait for click-action to complete
|
371 |
+
const startTime = Date.now()
|
372 |
+
// 60 seconds in milliseconds
|
373 |
+
const timeout = 60000
|
374 |
+
while (clear) {
|
375 |
+
clear = inputContainer.querySelector("button[aria-label='Clear']")
|
376 |
+
if (Date.now() - startTime > timeout) {
|
377 |
+
throw new Error("image_browser_controlnet_send/clear: 60 seconds have passed")
|
378 |
+
}
|
379 |
+
await image_browser_delay(200)
|
380 |
+
}
|
381 |
+
} catch (e) {
|
382 |
+
console.error(e)
|
383 |
+
}
|
384 |
+
|
385 |
+
input.value = ""
|
386 |
+
input.files = list
|
387 |
+
const event = new Event("change", { "bubbles": true, "composed": true })
|
388 |
+
input.dispatchEvent(event)
|
389 |
+
if (image_browser_debug) console.log("image_browser_controlnet_send:end")
|
390 |
+
}
|
391 |
+
|
392 |
+
function image_browser_controlnet_send_txt2img(tab_base_tag, image_index, controlnetNum, controlnetType) {
|
393 |
+
image_browser_controlnet_send(0, tab_base_tag, image_index, controlnetNum, controlnetType)
|
394 |
+
}
|
395 |
+
|
396 |
+
function image_browser_controlnet_send_img2img(tab_base_tag, image_index, controlnetNum, controlnetType) {
|
397 |
+
image_browser_controlnet_send(1, tab_base_tag, image_index, controlnetNum, controlnetType)
|
398 |
+
}
|
399 |
+
|
400 |
+
function image_browser_class_add(tab_base_tag) {
|
401 |
+
gradioApp().getElementById(tab_base_tag + '_image_browser').classList.add("image_browser_container")
|
402 |
+
gradioApp().getElementById(tab_base_tag + '_image_browser_set_index').classList.add("image_browser_set_index")
|
403 |
+
gradioApp().getElementById(tab_base_tag + '_image_browser_del_img_btn').classList.add("image_browser_del_img_btn")
|
404 |
+
gradioApp().getElementById(tab_base_tag + '_image_browser_gallery').classList.add("image_browser_gallery")
|
405 |
+
}
|
406 |
+
|
407 |
+
function btnClickHandler(tab_base_tag, btn) {
|
408 |
+
if (image_browser_debug) console.log("btnClickHandler:start")
|
409 |
+
const tabs_box = gradioApp().getElementById("image_browser_tabs_container")
|
410 |
+
if (!tabs_box.classList.contains(tab_base_tag)) {
|
411 |
+
gradioApp().getElementById(tab_base_tag + "_image_browser_renew_page").click()
|
412 |
+
tabs_box.classList.add(tab_base_tag)
|
413 |
+
}
|
414 |
+
if (image_browser_debug) console.log("btnClickHandler:end")
|
415 |
+
}
|
416 |
+
|
417 |
+
function image_browser_init() {
|
418 |
+
if (image_browser_debug) console.log("image_browser_init:start")
|
419 |
+
const tab_base_tags = gradioApp().getElementById("image_browser_tab_base_tags_list")
|
420 |
+
if (tab_base_tags) {
|
421 |
+
const image_browser_tab_base_tags_list = tab_base_tags.querySelector("textarea").value.split(",")
|
422 |
+
image_browser_tab_base_tags_list.forEach(function(tab_base_tag) {
|
423 |
+
image_browser_class_add(tab_base_tag)
|
424 |
+
})
|
425 |
+
|
426 |
+
const tab_btns = gradioApp().getElementById("image_browser_tabs_container").querySelector("div").querySelectorAll("button")
|
427 |
+
tab_btns.forEach(function(btn, i) {
|
428 |
+
const tab_base_tag = image_browser_tab_base_tags_list[i]
|
429 |
+
btn.setAttribute("tab_base_tag", tab_base_tag)
|
430 |
+
btn.removeEventListener('click', () => btnClickHandler(tab_base_tag, btn))
|
431 |
+
btn.addEventListener('click', () => btnClickHandler(tab_base_tag, btn))
|
432 |
+
})
|
433 |
+
//preload
|
434 |
+
if (gradioApp().getElementById("image_browser_preload").querySelector("input").checked) {
|
435 |
+
setTimeout(function(){tab_btns[0].click()}, 100)
|
436 |
+
}
|
437 |
+
}
|
438 |
+
image_browser_keydown()
|
439 |
+
|
440 |
+
const image_browser_swipe = gradioApp().getElementById("image_browser_swipe").getElementsByTagName("input")[0]
|
441 |
+
if (image_browser_swipe.checked) {
|
442 |
+
image_browser_touch()
|
443 |
+
}
|
444 |
+
if (image_browser_debug) console.log("image_browser_init:end")
|
445 |
+
}
|
446 |
+
|
447 |
+
async function image_browser_wait_for_gallery_btn(tab_base_tag){
|
448 |
+
if (image_browser_debug) console.log("image_browser_wait_for_gallery_btn:start")
|
449 |
+
await image_browser_delay(100)
|
450 |
+
while (!gradioApp().getElementById(image_browser_current_tab() + "_image_browser_gallery").getElementsByClassName("thumbnail-item")) {
|
451 |
+
await image_browser_delay(200)
|
452 |
+
}
|
453 |
+
if (image_browser_debug) console.log("image_browser_wait_for_gallery_btn:end")
|
454 |
+
}
|
455 |
+
|
456 |
+
function image_browser_hijack_console_log() {
|
457 |
+
(function () {
|
458 |
+
const oldLog = console.log
|
459 |
+
console.log = function (message) {
|
460 |
+
const formattedTime = new Date().toISOString().slice(0, -5).replace(/[TZ]/g, ' ').trim().replace(/\s+/g, '-').replace(/:/g, '-')
|
461 |
+
image_browser_console_log = image_browser_console_log + formattedTime + " " + "image_browser.js: " + message + "\n"
|
462 |
+
oldLog.apply(console, arguments)
|
463 |
+
}
|
464 |
+
})()
|
465 |
+
image_browser_debug = true
|
466 |
+
}
|
467 |
+
|
468 |
+
function get_js_logs() {
|
469 |
+
log_to_py = image_browser_console_log
|
470 |
+
image_browser_console_log = ""
|
471 |
+
return log_to_py
|
472 |
+
}
|
473 |
+
|
474 |
+
function isNumeric(str) {
|
475 |
+
if (typeof str != "string") return false
|
476 |
+
return !isNaN(str) && !isNaN(parseFloat(str))
|
477 |
+
}
|
478 |
+
|
479 |
+
function image_browser_start() {
|
480 |
+
if (image_browser_debug) console.log("image_browser_start:start")
|
481 |
+
image_browser_init()
|
482 |
+
const mutationObserver = new MutationObserver(function(mutationsList) {
|
483 |
+
const tab_base_tags = gradioApp().getElementById("image_browser_tab_base_tags_list")
|
484 |
+
if (tab_base_tags) {
|
485 |
+
const image_browser_tab_base_tags_list = tab_base_tags.querySelector("textarea").value.split(",")
|
486 |
+
image_browser_tab_base_tags_list.forEach(function(tab_base_tag) {
|
487 |
+
image_browser_class_add(tab_base_tag)
|
488 |
+
const tab_gallery_items = gradioApp().querySelectorAll('#' + tab_base_tag + '_image_browser .thumbnail-item')
|
489 |
+
|
490 |
+
const image_browser_img_info_json = gradioApp().getElementById(tab_base_tag + "_image_browser_img_info").querySelector('[data-testid="textbox"]').value
|
491 |
+
const image_browser_img_info = JSON.parse(image_browser_img_info_json)
|
492 |
+
const filenames = Object.keys(image_browser_img_info)
|
493 |
+
|
494 |
+
tab_gallery_items.forEach(function(gallery_item, i) {
|
495 |
+
gallery_item.removeEventListener('click', image_browser_click_image, true)
|
496 |
+
gallery_item.addEventListener('click', image_browser_click_image, true)
|
497 |
+
|
498 |
+
const filename = filenames[i]
|
499 |
+
try {
|
500 |
+
let x = image_browser_img_info[filename].x
|
501 |
+
let y = image_browser_img_info[filename].y
|
502 |
+
if (isNumeric(x) && isNumeric(y)) {
|
503 |
+
gallery_item.title = x + "x" + y
|
504 |
+
}
|
505 |
+
} catch (e) {}
|
506 |
+
|
507 |
+
document.onkeyup = async function(e) {
|
508 |
+
if (!image_browser_active()) {
|
509 |
+
if (image_browser_debug) console.log("image_browser_start:end")
|
510 |
+
return
|
511 |
+
}
|
512 |
+
const current_tab = image_browser_current_tab()
|
513 |
+
image_browser_wait_for_gallery_btn(current_tab).then(() => {
|
514 |
+
let gallery_btn
|
515 |
+
gallery_btn = gradioApp().getElementById(current_tab + "_image_browser_gallery").querySelector(".thumbnail-item .selected")
|
516 |
+
gallery_btn = gallery_btn && gallery_btn.length > 0 ? gallery_btn[0] : null
|
517 |
+
if (gallery_btn) {
|
518 |
+
image_browser_click_image.call(gallery_btn)
|
519 |
+
}
|
520 |
+
})
|
521 |
+
}
|
522 |
+
})
|
523 |
+
|
524 |
+
const cls_btn = gradioApp().getElementById(tab_base_tag + '_image_browser_gallery').querySelector("svg")
|
525 |
+
if (cls_btn) {
|
526 |
+
cls_btn.removeEventListener('click', () => image_browser_renew_page(tab_base_tag), false)
|
527 |
+
cls_btn.addEventListener('click', () => image_browser_renew_page(tab_base_tag), false)
|
528 |
+
}
|
529 |
+
})
|
530 |
+
const debug_level_option = gradioApp().getElementById("image_browser_debug_level_option").querySelector("textarea").value
|
531 |
+
if ((debug_level_option == 'javascript' || debug_level_option == 'capture') && !image_browser_debug) {
|
532 |
+
image_browser_hijack_console_log()
|
533 |
+
}
|
534 |
+
}
|
535 |
+
})
|
536 |
+
mutationObserver.observe(gradioApp(), { childList:true, subtree:true })
|
537 |
+
image_browser_started = true
|
538 |
+
image_browser_activate_controls()
|
539 |
+
if (image_browser_debug) console.log("image_browser_start:end")
|
540 |
+
}
|
541 |
+
|
542 |
+
async function image_browser_activate_controls() {
|
543 |
+
if (image_browser_debug) console.log("image_browser_activate_controls:start")
|
544 |
+
await image_browser_delay(500)
|
545 |
+
container = gradioApp().getElementById("image_browser_tabs_container")
|
546 |
+
let controls = container.querySelectorAll('[id*="_control_"]')
|
547 |
+
controls.forEach(function(control) {
|
548 |
+
control.style.pointerEvents = "auto"
|
549 |
+
control.style.cursor = "default"
|
550 |
+
control.style.opacity = "1"
|
551 |
+
})
|
552 |
+
let warnings = container.querySelectorAll('[id*="_warning_box"]')
|
553 |
+
warnings.forEach(function(warning) {
|
554 |
+
warning.innerHTML = "<p> "
|
555 |
+
})
|
556 |
+
if (image_browser_debug) console.log("image_browser_activate_controls:end")
|
557 |
+
}
|
558 |
+
|
559 |
+
function image_browser_img_show_progress_update() {
|
560 |
+
image_browser_img_show_in_progress = false
|
561 |
+
}
|
562 |
+
|
563 |
+
function image_browser_renew_page(tab_base_tag) {
|
564 |
+
if (image_browser_debug) console.log("image_browser_renew_page:start")
|
565 |
+
gradioApp().getElementById(tab_base_tag + '_image_browser_renew_page').click()
|
566 |
+
if (image_browser_debug) console.log("image_browser_renew_page:end")
|
567 |
+
}
|
568 |
+
|
569 |
+
function image_browser_current_tab() {
|
570 |
+
if (image_browser_debug) console.log("image_browser_current_tab:start")
|
571 |
+
const tabs = gradioApp().getElementById("image_browser_tabs_container").querySelectorAll('[id$="_image_browser_container"]')
|
572 |
+
const tab_base_tags = gradioApp().getElementById("image_browser_tab_base_tags_list")
|
573 |
+
const image_browser_tab_base_tags_list = tab_base_tags.querySelector("textarea").value.split(",").sort((a, b) => b.length - a.length)
|
574 |
+
for (const element of tabs) {
|
575 |
+
if (element.style.display === "block") {
|
576 |
+
const id = element.id
|
577 |
+
const tab_base_tag = image_browser_tab_base_tags_list.find(element => id.startsWith(element)) || null
|
578 |
+
if (image_browser_debug) console.log("image_browser_current_tab:end")
|
579 |
+
return tab_base_tag
|
580 |
+
}
|
581 |
+
}
|
582 |
+
if (image_browser_debug) console.log("image_browser_current_tab:end")
|
583 |
+
}
|
584 |
+
|
585 |
+
function image_browser_webui_current_tab() {
|
586 |
+
if (image_browser_debug) console.log("image_browser_webui_current_tab:start")
|
587 |
+
const tabs = gradioApp().getElementById("tabs").querySelectorAll('[id^="tab_"]')
|
588 |
+
let id
|
589 |
+
for (const element of tabs) {
|
590 |
+
if (element.style.display === "block") {
|
591 |
+
id = element.id
|
592 |
+
break
|
593 |
+
}
|
594 |
+
}
|
595 |
+
if (image_browser_debug) console.log("image_browser_webui_current_tab:end")
|
596 |
+
return id
|
597 |
+
}
|
598 |
+
|
599 |
+
function image_browser_active() {
|
600 |
+
if (image_browser_debug) console.log("image_browser_active:start")
|
601 |
+
const ext_active = gradioApp().getElementById("tab_image_browser")
|
602 |
+
if (image_browser_debug) console.log("image_browser_active:end")
|
603 |
+
return ext_active && ext_active.style.display !== "none"
|
604 |
+
}
|
605 |
+
|
606 |
+
async function image_browser_delete_key(tab_base_tag) {
|
607 |
+
// Wait for img_show to end
|
608 |
+
const startTime = Date.now()
|
609 |
+
// 60 seconds in milliseconds
|
610 |
+
const timeout = 60000
|
611 |
+
|
612 |
+
await image_browser_delay(100)
|
613 |
+
while (image_browser_img_show_in_progress) {
|
614 |
+
if (Date.now() - startTime > timeout) {
|
615 |
+
throw new Error("image_browser_delete_key: 60 seconds have passed")
|
616 |
+
}
|
617 |
+
await image_browser_delay(200)
|
618 |
+
}
|
619 |
+
|
620 |
+
const deleteBtn = gradioApp().getElementById(tab_base_tag + "_image_browser_del_img_btn")
|
621 |
+
deleteBtn.dispatchEvent(new Event("click"))
|
622 |
+
}
|
623 |
+
|
624 |
+
function image_browser_keydown() {
|
625 |
+
if (image_browser_debug) console.log("image_browser_keydown:start")
|
626 |
+
gradioApp().addEventListener("keydown", function(event) {
|
627 |
+
// If we are not on the Image Browser Extension, dont listen for keypresses
|
628 |
+
if (!image_browser_active()) {
|
629 |
+
if (image_browser_debug) console.log("image_browser_keydown:end")
|
630 |
+
return
|
631 |
+
}
|
632 |
+
|
633 |
+
// If the user is typing in an input field, dont listen for keypresses
|
634 |
+
let target
|
635 |
+
if (!event.composed) { // We shouldn't get here as the Shadow DOM is always active, but just in case
|
636 |
+
target = event.target
|
637 |
+
} else {
|
638 |
+
target = event.composedPath()[0]
|
639 |
+
}
|
640 |
+
if (!target || target.nodeName === "INPUT" || target.nodeName === "TEXTAREA") {
|
641 |
+
if (image_browser_debug) console.log("image_browser_keydown:end")
|
642 |
+
return
|
643 |
+
}
|
644 |
+
|
645 |
+
const tab_base_tag = image_browser_current_tab()
|
646 |
+
|
647 |
+
// Listens for keypresses 0-5 and updates the corresponding ranking (0 is the last option, None)
|
648 |
+
if (event.code >= "Digit0" && event.code <= "Digit5") {
|
649 |
+
const selectedValue = event.code.charAt(event.code.length - 1)
|
650 |
+
const radioInputs = gradioApp().getElementById(tab_base_tag + "_control_image_browser_ranking").getElementsByTagName("input")
|
651 |
+
for (const input of radioInputs) {
|
652 |
+
if (input.value === selectedValue || (selectedValue === '0' && input === radioInputs[radioInputs.length - 1])) {
|
653 |
+
input.checked = true
|
654 |
+
input.dispatchEvent(new Event("change"))
|
655 |
+
break
|
656 |
+
}
|
657 |
+
}
|
658 |
+
}
|
659 |
+
|
660 |
+
const mod_keys = gradioApp().querySelector(`#${tab_base_tag}_image_browser_mod_keys textarea`).value
|
661 |
+
let modifiers_pressed = false
|
662 |
+
if (mod_keys.indexOf("C") !== -1 && mod_keys.indexOf("S") !== -1) {
|
663 |
+
if (event.ctrlKey && event.shiftKey) {
|
664 |
+
modifiers_pressed = true
|
665 |
+
}
|
666 |
+
} else if (mod_keys.indexOf("S") !== -1) {
|
667 |
+
if (!event.ctrlKey && event.shiftKey) {
|
668 |
+
modifiers_pressed = true
|
669 |
+
}
|
670 |
+
} else {
|
671 |
+
if (event.ctrlKey && !event.shiftKey) {
|
672 |
+
modifiers_pressed = true
|
673 |
+
}
|
674 |
+
}
|
675 |
+
|
676 |
+
let modifiers_none = false
|
677 |
+
if (!event.ctrlKey && !event.shiftKey && !event.altKey && !event.metaKey) {
|
678 |
+
modifiers_none = true
|
679 |
+
}
|
680 |
+
|
681 |
+
if (event.code == "KeyF" && modifiers_none) {
|
682 |
+
if (tab_base_tag == "image_browser_tab_favorites") {
|
683 |
+
if (image_browser_debug) console.log("image_browser_keydown:end")
|
684 |
+
return
|
685 |
+
}
|
686 |
+
const favoriteBtn = gradioApp().getElementById(tab_base_tag + "_image_browser_favorites_btn")
|
687 |
+
favoriteBtn.dispatchEvent(new Event("click"))
|
688 |
+
}
|
689 |
+
|
690 |
+
if (event.code == "KeyR" && modifiers_none) {
|
691 |
+
const refreshBtn = gradioApp().getElementById(tab_base_tag + "_image_browser_renew_page")
|
692 |
+
refreshBtn.dispatchEvent(new Event("click"))
|
693 |
+
}
|
694 |
+
|
695 |
+
if (event.code == "Delete" && modifiers_none) {
|
696 |
+
image_browser_delete_key(tab_base_tag)
|
697 |
+
}
|
698 |
+
|
699 |
+
if (event.code == "ArrowLeft" && modifiers_pressed) {
|
700 |
+
const prevBtn = gradioApp().getElementById(tab_base_tag + "_control_image_browser_prev_page")
|
701 |
+
prevBtn.dispatchEvent(new Event("click"))
|
702 |
+
}
|
703 |
+
|
704 |
+
if (event.code == "ArrowLeft" && modifiers_none) {
|
705 |
+
image_browser_img_show_in_progress = true
|
706 |
+
const tab_base_tag = image_browser_current_tab()
|
707 |
+
const set_btn = gradioApp().querySelector(`#${tab_base_tag}_image_browser .image_browser_set_index`)
|
708 |
+
const curr_idx = parseInt(set_btn.getAttribute("img_index"))
|
709 |
+
set_btn.setAttribute("img_index", curr_idx - 1)
|
710 |
+
image_browser_refresh_current_page_preview()
|
711 |
+
}
|
712 |
+
|
713 |
+
if (event.code == "ArrowRight" && modifiers_pressed) {
|
714 |
+
const nextBtn = gradioApp().getElementById(tab_base_tag + "_control_image_browser_next_page")
|
715 |
+
nextBtn.dispatchEvent(new Event("click"))
|
716 |
+
}
|
717 |
+
|
718 |
+
if (event.code == "ArrowRight" && modifiers_none) {
|
719 |
+
image_browser_img_show_in_progress = true
|
720 |
+
const tab_base_tag = image_browser_current_tab()
|
721 |
+
const set_btn = gradioApp().querySelector(`#${tab_base_tag}_image_browser .image_browser_set_index`)
|
722 |
+
const curr_idx = parseInt(set_btn.getAttribute("img_index"))
|
723 |
+
set_btn.setAttribute("img_index", curr_idx + 1)
|
724 |
+
image_browser_refresh_current_page_preview()
|
725 |
+
}
|
726 |
+
})
|
727 |
+
if (image_browser_debug) console.log("image_browser_keydown:end")
|
728 |
+
}
|
729 |
+
|
730 |
+
function image_browser_touch() {
|
731 |
+
if (image_browser_debug) console.log("image_browser_touch:start")
|
732 |
+
let touchStartX = 0
|
733 |
+
let touchEndX = 0
|
734 |
+
gradioApp().addEventListener("touchstart", function(event) {
|
735 |
+
if (!image_browser_active()) {
|
736 |
+
if (image_browser_debug) console.log("image_browser_touch:end")
|
737 |
+
return
|
738 |
+
}
|
739 |
+
touchStartX = event.touches[0].clientX;
|
740 |
+
})
|
741 |
+
gradioApp().addEventListener("touchend", function(event) {
|
742 |
+
if (!image_browser_active()) {
|
743 |
+
if (image_browser_debug) console.log("image_browser_touch:end")
|
744 |
+
return
|
745 |
+
}
|
746 |
+
touchEndX = event.changedTouches[0].clientX
|
747 |
+
const touchDiffX = touchStartX - touchEndX
|
748 |
+
if (touchDiffX > 50) {
|
749 |
+
const tab_base_tag = image_browser_current_tab()
|
750 |
+
const set_btn = gradioApp().querySelector(`#${tab_base_tag}_image_browser .image_browser_set_index`)
|
751 |
+
const curr_idx = parseInt(set_btn.getAttribute("img_index"))
|
752 |
+
if (curr_idx >= 1) {
|
753 |
+
set_btn.setAttribute("img_index", curr_idx - 1)
|
754 |
+
image_browser_refresh_current_page_preview()
|
755 |
+
}
|
756 |
+
} else if (touchDiffX < -50) {
|
757 |
+
const tab_base_tag = image_browser_current_tab()
|
758 |
+
const gallery = gradioApp().querySelector(`#${tab_base_tag}_image_browser`)
|
759 |
+
const gallery_items = gallery.querySelectorAll(".thumbnail-item")
|
760 |
+
const thumbnails = gallery_items.length / 2
|
761 |
+
const set_btn = gradioApp().querySelector(`#${tab_base_tag}_image_browser .image_browser_set_index`)
|
762 |
+
const curr_idx = parseInt(set_btn.getAttribute("img_index"))
|
763 |
+
if (curr_idx + 1 < thumbnails) {
|
764 |
+
set_btn.setAttribute("img_index", curr_idx + 1)
|
765 |
+
image_browser_refresh_current_page_preview()
|
766 |
+
}
|
767 |
+
}
|
768 |
+
})
|
769 |
+
if (image_browser_debug) console.log("image_browser_touch:end")
|
770 |
+
}
|
extensions/stable-diffusion-webui-images-browser/req_IR.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
fairscale
|
extensions/stable-diffusion-webui-images-browser/scripts/__pycache__/image_browser.cpython-310.pyc
ADDED
Binary file (56.3 kB). View file
|
|
extensions/stable-diffusion-webui-images-browser/scripts/image_browser.py
ADDED
@@ -0,0 +1,1717 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import csv
|
3 |
+
import importlib
|
4 |
+
import json
|
5 |
+
import logging
|
6 |
+
import math
|
7 |
+
import os
|
8 |
+
import platform
|
9 |
+
import random
|
10 |
+
import re
|
11 |
+
import shutil
|
12 |
+
import stat
|
13 |
+
import subprocess as sp
|
14 |
+
import sys
|
15 |
+
import tempfile
|
16 |
+
import time
|
17 |
+
import torch
|
18 |
+
import traceback
|
19 |
+
import hashlib
|
20 |
+
import modules.extras
|
21 |
+
import modules.images
|
22 |
+
import modules.ui
|
23 |
+
from datetime import datetime
|
24 |
+
from modules import paths, shared, script_callbacks, scripts, images
|
25 |
+
from modules.shared import opts, cmd_opts
|
26 |
+
from modules.ui_common import plaintext_to_html
|
27 |
+
from modules.ui_components import ToolButton, DropdownMulti
|
28 |
+
from PIL import Image, UnidentifiedImageError
|
29 |
+
from packaging import version
|
30 |
+
from pathlib import Path
|
31 |
+
from typing import List, Tuple
|
32 |
+
from itertools import chain
|
33 |
+
from io import StringIO
|
34 |
+
|
35 |
+
try:
|
36 |
+
from scripts.wib import wib_db
|
37 |
+
except ModuleNotFoundError:
|
38 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "scripts")))
|
39 |
+
from wib import wib_db
|
40 |
+
|
41 |
+
try:
|
42 |
+
from send2trash import send2trash
|
43 |
+
send2trash_installed = True
|
44 |
+
except ImportError:
|
45 |
+
print("Image Browser: send2trash is not installed. recycle bin cannot be used.")
|
46 |
+
send2trash_installed = False
|
47 |
+
|
48 |
+
try:
|
49 |
+
import ImageReward
|
50 |
+
image_reward_installed = True
|
51 |
+
except ImportError:
|
52 |
+
print("Image Browser: ImageReward is not installed, cannot be used.")
|
53 |
+
image_reward_installed = False
|
54 |
+
|
55 |
+
# Force reload wib_db, as it doesn't get reloaded otherwise, if an extension update is started from webui
|
56 |
+
importlib.reload(wib_db)
|
57 |
+
|
58 |
+
yappi_do = False
|
59 |
+
|
60 |
+
components_list = ["Sort by", "Filename keyword search", "EXIF keyword search", "Ranking Filter", "Aesthestic Score", "Generation Info", "File Name", "File Time", "Open Folder", "Send to buttons", "Copy to directory", "Gallery Controls Bar", "Ranking Bar", "Delete Bar", "Additional Generation Info"]
|
61 |
+
|
62 |
+
num_of_imgs_per_page = 0
|
63 |
+
loads_files_num = 0
|
64 |
+
image_ext_list = [".png", ".jpg", ".jpeg", ".bmp", ".gif", ".webp", ".svg"]
|
65 |
+
finfo_aes = {}
|
66 |
+
finfo_image_reward = {}
|
67 |
+
exif_cache = {}
|
68 |
+
finfo_exif = {}
|
69 |
+
aes_cache = {}
|
70 |
+
image_reward_cache = {}
|
71 |
+
none_select = "Nothing selected"
|
72 |
+
refresh_symbol = '\U0001f504' # 🔄
|
73 |
+
up_symbol = '\U000025b2' # ▲
|
74 |
+
down_symbol = '\U000025bc' # ▼
|
75 |
+
caution_symbol = '\U000026a0' # ⚠
|
76 |
+
folder_symbol = '\U0001f4c2' # 📂
|
77 |
+
current_depth = 0
|
78 |
+
init = True
|
79 |
+
copy_move = ["Move", "Copy"]
|
80 |
+
copied_moved = ["Moved", "Copied"]
|
81 |
+
np = "negative_prompt: "
|
82 |
+
openoutpaint = False
|
83 |
+
controlnet = False
|
84 |
+
js_dummy_return = None
|
85 |
+
log_file = os.path.join(scripts.basedir(), "image_browser.log")
|
86 |
+
image_reward_model = None
|
87 |
+
|
88 |
+
def check_image_browser_active_tabs():
|
89 |
+
# Check if Maintenance tab has been added to settings in addition to as a mandatory tab. If so, remove.
|
90 |
+
if hasattr(opts, "image_browser_active_tabs"):
|
91 |
+
active_tabs_no_maint = re.sub(r",\s*Maintenance", "", opts.image_browser_active_tabs)
|
92 |
+
if len(active_tabs_no_maint) != len(opts.image_browser_active_tabs):
|
93 |
+
shared.opts.__setattr__("image_browser_active_tabs", active_tabs_no_maint)
|
94 |
+
shared.opts.save(shared.config_filename)
|
95 |
+
|
96 |
+
favorite_tab_name = "Favorites"
|
97 |
+
default_tab_options = ["txt2img", "img2img", "txt2img-grids", "img2img-grids", "Extras", favorite_tab_name, "Others"]
|
98 |
+
check_image_browser_active_tabs()
|
99 |
+
tabs_list = [tab.strip() for tab in chain.from_iterable(csv.reader(StringIO(opts.image_browser_active_tabs))) if tab] if hasattr(opts, "image_browser_active_tabs") else default_tab_options
|
100 |
+
try:
|
101 |
+
if opts.image_browser_enable_maint:
|
102 |
+
tabs_list.append("Maintenance") # mandatory tab
|
103 |
+
except AttributeError:
|
104 |
+
tabs_list.append("Maintenance") # mandatory tab
|
105 |
+
|
106 |
+
path_maps = {
|
107 |
+
"txt2img": opts.outdir_samples or opts.outdir_txt2img_samples,
|
108 |
+
"img2img": opts.outdir_samples or opts.outdir_img2img_samples,
|
109 |
+
"txt2img-grids": opts.outdir_grids or opts.outdir_txt2img_grids,
|
110 |
+
"img2img-grids": opts.outdir_grids or opts.outdir_img2img_grids,
|
111 |
+
"Extras": opts.outdir_samples or opts.outdir_extras_samples,
|
112 |
+
favorite_tab_name: opts.outdir_save
|
113 |
+
}
|
114 |
+
|
115 |
+
class ImageBrowserTab():
|
116 |
+
|
117 |
+
seen_base_tags = set()
|
118 |
+
|
119 |
+
def __init__(self, name: str):
|
120 |
+
self.name: str = os.path.basename(name) if os.path.isdir(name) else name
|
121 |
+
self.path: str = os.path.realpath(path_maps.get(name, name))
|
122 |
+
self.base_tag: str = f"image_browser_tab_{self.get_unique_base_tag(self.remove_invalid_html_tag_chars(self.name).lower())}"
|
123 |
+
|
124 |
+
def remove_invalid_html_tag_chars(self, tag: str) -> str:
|
125 |
+
# Removes any character that is not a letter, a digit, a hyphen, or an underscore
|
126 |
+
removed = re.sub(r'[^a-zA-Z0-9\-_]', '', tag)
|
127 |
+
return removed
|
128 |
+
|
129 |
+
def get_unique_base_tag(self, base_tag: str) -> str:
|
130 |
+
counter = 1
|
131 |
+
while base_tag in self.seen_base_tags:
|
132 |
+
match = re.search(r'_(\d+)$', base_tag)
|
133 |
+
if match:
|
134 |
+
counter = int(match.group(1)) + 1
|
135 |
+
base_tag = re.sub(r'_(\d+)$', f"_{counter}", base_tag)
|
136 |
+
else:
|
137 |
+
base_tag = f"{base_tag}_{counter}"
|
138 |
+
counter += 1
|
139 |
+
self.seen_base_tags.add(base_tag)
|
140 |
+
return base_tag
|
141 |
+
|
142 |
+
def __str__(self):
|
143 |
+
return f"Name: {self.name} / Path: {self.path} / Base tag: {self.base_tag} / Seen base tags: {self.seen_base_tags}"
|
144 |
+
|
145 |
+
tabs_list = [ImageBrowserTab(tab) for tab in tabs_list]
|
146 |
+
|
147 |
+
debug_level_types = ["none", "warning log", "debug log", "javascript log", "capture logs to file"]
|
148 |
+
|
149 |
+
debug_levels_list = []
|
150 |
+
for i in range(len(debug_level_types)):
|
151 |
+
level = debug_level_types[i].split(" ")[0]
|
152 |
+
text = str(i) + " - " + debug_level_types[i]
|
153 |
+
debug_levels_list.append((level, text))
|
154 |
+
|
155 |
+
def debug_levels(arg_value=None, arg_level=None, arg_text=None):
|
156 |
+
if arg_value is not None:
|
157 |
+
return arg_value, debug_levels_list[arg_value]
|
158 |
+
elif arg_level is not None:
|
159 |
+
for i, (level, text) in enumerate(debug_levels_list):
|
160 |
+
if level == arg_level:
|
161 |
+
return i, debug_levels_list[i]
|
162 |
+
elif arg_text is not None:
|
163 |
+
for i, (level, text) in enumerate(debug_levels_list):
|
164 |
+
if text == arg_text:
|
165 |
+
return i, debug_levels_list[i]
|
166 |
+
|
167 |
+
# Logging
|
168 |
+
logger = None
|
169 |
+
def restart_debug(parameter):
|
170 |
+
global logger
|
171 |
+
logger = logging.getLogger(__name__)
|
172 |
+
logger.disabled = False
|
173 |
+
logger_mode = logging.ERROR
|
174 |
+
level_value = 0
|
175 |
+
capture_level_value = 99
|
176 |
+
if hasattr(opts, "image_browser_debug_level"):
|
177 |
+
warning_level_value, (warning_level, warning_level_text) = debug_levels(arg_level="warning")
|
178 |
+
debug_level_value, (debug_level, debug_level_text) = debug_levels(arg_level="debug")
|
179 |
+
capture_level_value, (capture_level, capture_level_text) = debug_levels(arg_level="capture")
|
180 |
+
level_value, (level, level_text) = debug_levels(arg_text=opts.image_browser_debug_level)
|
181 |
+
if level_value >= debug_level_value:
|
182 |
+
logger_mode = logging.DEBUG
|
183 |
+
elif level_value >= warning_level_value:
|
184 |
+
logger_mode = logging.WARNING
|
185 |
+
logger.setLevel(logger_mode)
|
186 |
+
if (logger.hasHandlers()):
|
187 |
+
logger.handlers.clear()
|
188 |
+
console_handler = logging.StreamHandler()
|
189 |
+
console_handler.setLevel(logger_mode)
|
190 |
+
formatter = logging.Formatter(f'%(asctime)s image_browser.py: %(message)s', datefmt='%Y-%m-%d-%H:%M:%S')
|
191 |
+
console_handler.setFormatter(formatter)
|
192 |
+
logger.addHandler(console_handler)
|
193 |
+
if level_value >= capture_level_value:
|
194 |
+
try:
|
195 |
+
os.unlink(log_file)
|
196 |
+
except FileNotFoundError:
|
197 |
+
pass
|
198 |
+
file_handler = logging.FileHandler(log_file)
|
199 |
+
file_handler.setLevel(logger_mode)
|
200 |
+
file_handler.setFormatter(formatter)
|
201 |
+
logger.addHandler(file_handler)
|
202 |
+
logger.warning(f"debug_level: {level_value}")
|
203 |
+
# Debug logging
|
204 |
+
if logger.getEffectiveLevel() == logging.DEBUG:
|
205 |
+
if parameter != "startup":
|
206 |
+
logging.disable(logging.NOTSET)
|
207 |
+
|
208 |
+
logger.debug(f"{sys.executable} {sys.version}")
|
209 |
+
logger.debug(f"{platform.system()} {platform.version()}")
|
210 |
+
try:
|
211 |
+
git = os.environ.get('GIT', "git")
|
212 |
+
webui_commit_hash = os.popen(f"{git} rev-parse HEAD").read().strip()
|
213 |
+
sm_hashes = os.popen(f"{git} submodule").read()
|
214 |
+
sm_hashes_lines = sm_hashes.splitlines()
|
215 |
+
image_browser_commit_hash = f"image_browser_commit_hash not found: {sm_hashes}"
|
216 |
+
for sm_hashes_line in sm_hashes_lines:
|
217 |
+
if "images-browser" in sm_hashes_line.lower():
|
218 |
+
image_browser_commit_hash = sm_hashes_line[1:41]
|
219 |
+
break
|
220 |
+
except Exception as e:
|
221 |
+
webui_commit_hash = e
|
222 |
+
image_browser_commit_hash = e
|
223 |
+
logger.debug(f"Webui {webui_commit_hash}")
|
224 |
+
logger.debug(f"Image Browser {image_browser_commit_hash}")
|
225 |
+
logger.debug(f"Gradio {gr.__version__}")
|
226 |
+
logger.debug(f"{paths.script_path}")
|
227 |
+
with open(cmd_opts.ui_config_file, "r") as f:
|
228 |
+
logger.debug(f.read())
|
229 |
+
with open(cmd_opts.ui_settings_file, "r") as f:
|
230 |
+
logger.debug(f.read())
|
231 |
+
logger.debug(os.path.realpath(__file__))
|
232 |
+
logger.debug([str(tab) for tab in tabs_list])
|
233 |
+
maint_last_msg = "Debug restarted"
|
234 |
+
|
235 |
+
return parameter, maint_last_msg
|
236 |
+
|
237 |
+
restart_debug("startup")
|
238 |
+
|
239 |
+
def delete_recycle(filename):
|
240 |
+
if opts.image_browser_delete_recycle and send2trash_installed:
|
241 |
+
send2trash(filename)
|
242 |
+
else:
|
243 |
+
file = Path(filename)
|
244 |
+
file.unlink()
|
245 |
+
return
|
246 |
+
|
247 |
+
def img_path_subdirs_get(img_path):
|
248 |
+
subdirs = []
|
249 |
+
subdirs.append(none_select)
|
250 |
+
for item in os.listdir(img_path):
|
251 |
+
item_path = os.path.join(img_path, item)
|
252 |
+
if os.path.isdir(item_path):
|
253 |
+
subdirs.append(item_path)
|
254 |
+
return gr.update(choices=subdirs)
|
255 |
+
|
256 |
+
def img_path_add_remove(img_dir, path_recorder, add_remove, img_path_depth):
|
257 |
+
img_dir = os.path.realpath(img_dir)
|
258 |
+
if add_remove == "add" or (add_remove == "remove" and img_dir in path_recorder):
|
259 |
+
if add_remove == "add":
|
260 |
+
path_recorder[img_dir] = {
|
261 |
+
"depth": int(img_path_depth),
|
262 |
+
"path_display": f"{img_dir} [{int(img_path_depth)}]"
|
263 |
+
}
|
264 |
+
wib_db.update_path_recorder(img_dir, path_recorder[img_dir]["depth"], path_recorder[img_dir]["path_display"])
|
265 |
+
else:
|
266 |
+
del path_recorder[img_dir]
|
267 |
+
wib_db.delete_path_recorder(img_dir)
|
268 |
+
path_recorder_formatted = [value.get("path_display") for key, value in path_recorder.items()]
|
269 |
+
path_recorder_formatted = sorted(path_recorder_formatted, key=lambda x: natural_keys(x.lower()))
|
270 |
+
|
271 |
+
if add_remove == "remove":
|
272 |
+
selected = path_recorder[list(path_recorder.keys())[0]]["path_display"]
|
273 |
+
else:
|
274 |
+
selected = path_recorder[img_dir]["path_display"]
|
275 |
+
return path_recorder, gr.update(choices=path_recorder_formatted, value=selected)
|
276 |
+
|
277 |
+
def sort_order_flip(turn_page_switch, sort_order):
|
278 |
+
if sort_order == up_symbol:
|
279 |
+
sort_order = down_symbol
|
280 |
+
else:
|
281 |
+
sort_order = up_symbol
|
282 |
+
return 1, -turn_page_switch, sort_order
|
283 |
+
|
284 |
+
def read_path_recorder():
|
285 |
+
path_recorder = wib_db.load_path_recorder()
|
286 |
+
path_recorder_formatted = [value.get("path_display") for key, value in path_recorder.items()]
|
287 |
+
path_recorder_formatted = sorted(path_recorder_formatted, key=lambda x: natural_keys(x.lower()))
|
288 |
+
path_recorder_unformatted = list(path_recorder.keys())
|
289 |
+
path_recorder_unformatted = sorted(path_recorder_unformatted, key=lambda x: natural_keys(x.lower()))
|
290 |
+
|
291 |
+
return path_recorder, path_recorder_formatted, path_recorder_unformatted
|
292 |
+
|
293 |
+
def pure_path(path):
|
294 |
+
if path == []:
|
295 |
+
return path, 0
|
296 |
+
match = re.search(r" \[(\d+)\]$", path)
|
297 |
+
if match:
|
298 |
+
path = path[:match.start()]
|
299 |
+
depth = int(match.group(1))
|
300 |
+
else:
|
301 |
+
depth = 0
|
302 |
+
path = os.path.realpath(path)
|
303 |
+
return path, depth
|
304 |
+
|
305 |
+
def browser2path(img_path_browser):
|
306 |
+
img_path, _ = pure_path(img_path_browser)
|
307 |
+
return img_path
|
308 |
+
|
309 |
+
def totxt(file):
|
310 |
+
base, _ = os.path.splitext(file)
|
311 |
+
file_txt = base + '.txt'
|
312 |
+
|
313 |
+
return file_txt
|
314 |
+
|
315 |
+
def tab_select():
|
316 |
+
path_recorder, path_recorder_formatted, path_recorder_unformatted = read_path_recorder()
|
317 |
+
return path_recorder, gr.update(choices=path_recorder_unformatted)
|
318 |
+
|
319 |
+
def js_logs_output(js_log):
|
320 |
+
logger.debug(f"js_log: {js_log}")
|
321 |
+
return js_log
|
322 |
+
|
323 |
+
def ranking_filter_settings(page_index, turn_page_switch, ranking_filter):
|
324 |
+
if ranking_filter == "Min-max":
|
325 |
+
interactive = True
|
326 |
+
else:
|
327 |
+
interactive = False
|
328 |
+
page_index = 1
|
329 |
+
turn_page_switch = -turn_page_switch
|
330 |
+
return page_index, turn_page_switch, gr.update(interactive=interactive), gr.update(interactive=interactive)
|
331 |
+
|
332 |
+
def reduplicative_file_move(src, dst):
|
333 |
+
def same_name_file(basename, path):
|
334 |
+
name, ext = os.path.splitext(basename)
|
335 |
+
f_list = os.listdir(path)
|
336 |
+
max_num = 0
|
337 |
+
for f in f_list:
|
338 |
+
if len(f) <= len(basename):
|
339 |
+
continue
|
340 |
+
f_ext = f[-len(ext):] if len(ext) > 0 else ""
|
341 |
+
if f[:len(name)] == name and f_ext == ext:
|
342 |
+
if f[len(name)] == "(" and f[-len(ext)-1] == ")":
|
343 |
+
number = f[len(name)+1:-len(ext)-1]
|
344 |
+
if number.isdigit():
|
345 |
+
if int(number) > max_num:
|
346 |
+
max_num = int(number)
|
347 |
+
return f"{name}({max_num + 1}){ext}"
|
348 |
+
name = os.path.basename(src)
|
349 |
+
save_name = os.path.join(dst, name)
|
350 |
+
src_txt_exists = False
|
351 |
+
if opts.image_browser_txt_files:
|
352 |
+
src_txt = totxt(src)
|
353 |
+
if os.path.exists(src_txt):
|
354 |
+
src_txt_exists = True
|
355 |
+
if not os.path.exists(save_name):
|
356 |
+
if opts.image_browser_copy_image:
|
357 |
+
shutil.copy2(src, dst)
|
358 |
+
if opts.image_browser_txt_files and src_txt_exists:
|
359 |
+
shutil.copy2(src_txt, dst)
|
360 |
+
else:
|
361 |
+
shutil.move(src, dst)
|
362 |
+
if opts.image_browser_txt_files and src_txt_exists:
|
363 |
+
shutil.move(src_txt, dst)
|
364 |
+
else:
|
365 |
+
name = same_name_file(name, dst)
|
366 |
+
if opts.image_browser_copy_image:
|
367 |
+
shutil.copy2(src, os.path.join(dst, name))
|
368 |
+
if opts.image_browser_txt_files and src_txt_exists:
|
369 |
+
shutil.copy2(src_txt, totxt(os.path.join(dst, name)))
|
370 |
+
else:
|
371 |
+
shutil.move(src, os.path.join(dst, name))
|
372 |
+
if opts.image_browser_txt_files and src_txt_exists:
|
373 |
+
shutil.move(src_txt, totxt(os.path.join(dst, name)))
|
374 |
+
|
375 |
+
def save_image(file_name, filenames, page_index, turn_page_switch, dest_path):
|
376 |
+
if file_name is not None and os.path.exists(file_name):
|
377 |
+
reduplicative_file_move(file_name, dest_path)
|
378 |
+
message = f"<div style='color:#999'>{copied_moved[opts.image_browser_copy_image]} to {dest_path}</div>"
|
379 |
+
if not opts.image_browser_copy_image:
|
380 |
+
# Force page refresh with checking filenames
|
381 |
+
filenames = []
|
382 |
+
turn_page_switch = -turn_page_switch
|
383 |
+
else:
|
384 |
+
message = "<div style='color:#999'>Image not found (may have been already moved)</div>"
|
385 |
+
|
386 |
+
return message, filenames, page_index, turn_page_switch
|
387 |
+
|
388 |
+
def delete_image(tab_base_tag_box, delete_num, name, filenames, image_index, visible_num, delete_confirm, turn_page_switch, image_page_list):
|
389 |
+
logger.debug("delete_image")
|
390 |
+
refresh = False
|
391 |
+
delete_num = int(delete_num)
|
392 |
+
image_index = int(image_index)
|
393 |
+
visible_num = int(visible_num)
|
394 |
+
image_page_list = json.loads(image_page_list)
|
395 |
+
new_file_list = []
|
396 |
+
new_image_page_list = []
|
397 |
+
if name == "":
|
398 |
+
refresh = True
|
399 |
+
else:
|
400 |
+
try:
|
401 |
+
index_files = list(filenames).index(name)
|
402 |
+
|
403 |
+
index_on_page = image_page_list.index(name)
|
404 |
+
except ValueError as e:
|
405 |
+
print(traceback.format_exc(), file=sys.stderr)
|
406 |
+
# Something went wrong, force a page refresh
|
407 |
+
refresh = True
|
408 |
+
if not refresh:
|
409 |
+
if not delete_confirm:
|
410 |
+
delete_num = min(visible_num - index_on_page, delete_num)
|
411 |
+
new_file_list = filenames[:index_files] + filenames[index_files + delete_num:]
|
412 |
+
new_image_page_list = image_page_list[:index_on_page] + image_page_list[index_on_page + delete_num:]
|
413 |
+
|
414 |
+
for i in range(index_files, index_files + delete_num):
|
415 |
+
if os.path.exists(filenames[i]):
|
416 |
+
if opts.image_browser_delete_message:
|
417 |
+
print(f"Deleting file {filenames[i]}")
|
418 |
+
delete_recycle(filenames[i])
|
419 |
+
visible_num -= 1
|
420 |
+
if opts.image_browser_txt_files:
|
421 |
+
txt_file = totxt(filenames[i])
|
422 |
+
if os.path.exists(txt_file):
|
423 |
+
delete_recycle(txt_file)
|
424 |
+
else:
|
425 |
+
print(f"File does not exist {filenames[i]}")
|
426 |
+
# If we reach this point (which we shouldn't), things are messed up, better force a page refresh
|
427 |
+
refresh = True
|
428 |
+
|
429 |
+
if refresh:
|
430 |
+
turn_page_switch = -turn_page_switch
|
431 |
+
select_image = False
|
432 |
+
else:
|
433 |
+
select_image = True
|
434 |
+
|
435 |
+
return new_file_list, 1, turn_page_switch, visible_num, new_image_page_list, select_image, json.dumps(new_image_page_list)
|
436 |
+
|
437 |
+
def traverse_all_files(curr_path, image_list, tab_base_tag_box, img_path_depth) -> List[Tuple[str, os.stat_result, str, int]]:
|
438 |
+
global current_depth
|
439 |
+
logger.debug(f"curr_path: {curr_path}")
|
440 |
+
if curr_path == "":
|
441 |
+
return image_list
|
442 |
+
f_list = [(os.path.join(curr_path, entry.name), entry.stat()) for entry in os.scandir(curr_path)]
|
443 |
+
for f_info in f_list:
|
444 |
+
fname, fstat = f_info
|
445 |
+
if os.path.splitext(fname)[1] in image_ext_list:
|
446 |
+
image_list.append(f_info)
|
447 |
+
elif stat.S_ISDIR(fstat.st_mode):
|
448 |
+
if (opts.image_browser_with_subdirs and tab_base_tag_box != "image_browser_tab_others") or (tab_base_tag_box == "image_browser_tab_others" and img_path_depth != 0 and (current_depth < img_path_depth or img_path_depth < 0)):
|
449 |
+
current_depth = current_depth + 1
|
450 |
+
image_list = traverse_all_files(fname, image_list, tab_base_tag_box, img_path_depth)
|
451 |
+
current_depth = current_depth - 1
|
452 |
+
return image_list
|
453 |
+
|
454 |
+
def cache_exif(fileinfos):
|
455 |
+
global finfo_exif, exif_cache, finfo_aes, aes_cache, finfo_image_reward, image_reward_cache
|
456 |
+
|
457 |
+
if yappi_do:
|
458 |
+
import yappi
|
459 |
+
import pandas as pd
|
460 |
+
yappi.set_clock_type("wall")
|
461 |
+
yappi.start()
|
462 |
+
|
463 |
+
cache_exif_start = time.time()
|
464 |
+
new_exif = 0
|
465 |
+
new_aes = 0
|
466 |
+
conn, cursor = wib_db.transaction_begin()
|
467 |
+
for fi_info in fileinfos:
|
468 |
+
if any(fi_info[0].endswith(ext) for ext in image_ext_list):
|
469 |
+
found_exif = False
|
470 |
+
found_aes = False
|
471 |
+
if fi_info[0] in exif_cache:
|
472 |
+
finfo_exif[fi_info[0]] = exif_cache[fi_info[0]]
|
473 |
+
found_exif = True
|
474 |
+
if fi_info[0] in aes_cache:
|
475 |
+
finfo_aes[fi_info[0]] = aes_cache[fi_info[0]]
|
476 |
+
found_aes = True
|
477 |
+
if fi_info[0] in image_reward_cache:
|
478 |
+
finfo_image_reward[fi_info[0]] = image_reward_cache[fi_info[0]]
|
479 |
+
if not found_exif or not found_aes:
|
480 |
+
finfo_exif[fi_info[0]] = "0"
|
481 |
+
exif_cache[fi_info[0]] = "0"
|
482 |
+
finfo_aes[fi_info[0]] = "0"
|
483 |
+
aes_cache[fi_info[0]] = "0"
|
484 |
+
try:
|
485 |
+
image = Image.open(fi_info[0])
|
486 |
+
(_, allExif, allExif_html) = modules.extras.run_pnginfo(image)
|
487 |
+
image.close()
|
488 |
+
except SyntaxError:
|
489 |
+
allExif = False
|
490 |
+
logger.warning(f"Extension and content don't match: {fi_info[0]}")
|
491 |
+
except UnidentifiedImageError as e:
|
492 |
+
allExif = False
|
493 |
+
logger.warning(f"UnidentifiedImageError: {e}")
|
494 |
+
except Image.DecompressionBombError as e:
|
495 |
+
allExif = False
|
496 |
+
logger.warning(f"DecompressionBombError: {e}: {fi_info[0]}")
|
497 |
+
except PermissionError as e:
|
498 |
+
allExif = False
|
499 |
+
logger.warning(f"PermissionError: {e}: {fi_info[0]}")
|
500 |
+
except FileNotFoundError as e:
|
501 |
+
allExif = False
|
502 |
+
logger.warning(f"FileNotFoundError: {e}: {fi_info[0]}")
|
503 |
+
except OSError as e:
|
504 |
+
if e.errno == 22:
|
505 |
+
logger.warning(f"Caught OSError with error code 22: {fi_info[0]}")
|
506 |
+
else:
|
507 |
+
raise
|
508 |
+
if allExif:
|
509 |
+
finfo_exif[fi_info[0]] = allExif
|
510 |
+
exif_cache[fi_info[0]] = allExif
|
511 |
+
wib_db.update_exif_data(conn, fi_info[0], allExif)
|
512 |
+
new_exif = new_exif + 1
|
513 |
+
|
514 |
+
m = re.search("(?:aesthetic_score:|Score:) (\d+.\d+)", allExif)
|
515 |
+
if m:
|
516 |
+
aes_value = m.group(1)
|
517 |
+
else:
|
518 |
+
aes_value = "0"
|
519 |
+
finfo_aes[fi_info[0]] = aes_value
|
520 |
+
aes_cache[fi_info[0]] = aes_value
|
521 |
+
wib_db.update_exif_data_by_key(conn, fi_info[0], "aesthetic_score", aes_value)
|
522 |
+
new_aes = new_aes + 1
|
523 |
+
else:
|
524 |
+
try:
|
525 |
+
filename = os.path.splitext(fi_info[0])[0] + ".txt"
|
526 |
+
geninfo = ""
|
527 |
+
with open(filename) as f:
|
528 |
+
for line in f:
|
529 |
+
geninfo += line
|
530 |
+
finfo_exif[fi_info[0]] = geninfo
|
531 |
+
exif_cache[fi_info[0]] = geninfo
|
532 |
+
wib_db.update_exif_data_by_key(conn, fi_info[0], geninfo)
|
533 |
+
new_exif = new_exif + 1
|
534 |
+
|
535 |
+
m = re.search("(?:aesthetic_score:|Score:) (\d+.\d+)", geninfo)
|
536 |
+
if m:
|
537 |
+
aes_value = m.group(1)
|
538 |
+
else:
|
539 |
+
aes_value = "0"
|
540 |
+
finfo_aes[fi_info[0]] = aes_value
|
541 |
+
aes_cache[fi_info[0]] = aes_value
|
542 |
+
wib_db.update_exif_data_by_key(conn, fi_info[0], "aesthetic_score", aes_value)
|
543 |
+
new_aes = new_aes + 1
|
544 |
+
except Exception:
|
545 |
+
logger.warning(f"cache_exif: No EXIF in image or txt file for {fi_info[0]}")
|
546 |
+
# Saved with defaults to not scan it again next time
|
547 |
+
finfo_exif[fi_info[0]] = "0"
|
548 |
+
exif_cache[fi_info[0]] = "0"
|
549 |
+
allExif = "0"
|
550 |
+
wib_db.update_exif_data(conn, fi_info[0], allExif)
|
551 |
+
new_exif = new_exif + 1
|
552 |
+
|
553 |
+
aes_value = "0"
|
554 |
+
finfo_aes[fi_info[0]] = aes_value
|
555 |
+
aes_cache[fi_info[0]] = aes_value
|
556 |
+
wib_db.update_exif_data_by_key(conn, fi_info[0], "aesthetic_score", aes_value)
|
557 |
+
new_aes = new_aes + 1
|
558 |
+
wib_db.transaction_end(conn, cursor)
|
559 |
+
|
560 |
+
if yappi_do:
|
561 |
+
yappi.stop()
|
562 |
+
pd.set_option('display.float_format', lambda x: '%.6f' % x)
|
563 |
+
yappi_stats = yappi.get_func_stats().strip_dirs()
|
564 |
+
data = [(s.name, s.ncall, s.tsub, s.ttot, s.ttot/s.ncall) for s in yappi_stats]
|
565 |
+
df = pd.DataFrame(data, columns=['name', 'ncall', 'tsub', 'ttot', 'tavg'])
|
566 |
+
print(df.to_string(index=False))
|
567 |
+
yappi.get_thread_stats().print_all()
|
568 |
+
|
569 |
+
cache_exif_end = time.time()
|
570 |
+
logger.debug(f"cache_exif: {new_exif}/{len(fileinfos)} cache_aes: {new_aes}/{len(fileinfos)} {round(cache_exif_end - cache_exif_start, 1)} seconds")
|
571 |
+
|
572 |
+
def exif_rebuild(maint_wait):
|
573 |
+
global finfo_exif, exif_cache, finfo_aes, aes_cache, finfo_image_reward, image_reward_cache
|
574 |
+
if opts.image_browser_scan_exif:
|
575 |
+
logger.debug("Rebuild start")
|
576 |
+
exif_dirs = wib_db.get_exif_dirs()
|
577 |
+
finfo_aes = {}
|
578 |
+
finfo_image_reward = {}
|
579 |
+
exif_cache = {}
|
580 |
+
finfo_exif = {}
|
581 |
+
aes_cache = {}
|
582 |
+
image_reward_cache = {}
|
583 |
+
for key, value in exif_dirs.items():
|
584 |
+
if os.path.exists(key):
|
585 |
+
print(f"Rebuilding {key}")
|
586 |
+
fileinfos = traverse_all_files(key, [], "", 0)
|
587 |
+
cache_exif(fileinfos)
|
588 |
+
logger.debug("Rebuild end")
|
589 |
+
maint_last_msg = "Rebuild finished"
|
590 |
+
else:
|
591 |
+
maint_last_msg = "Exif cache not enabled in settings"
|
592 |
+
|
593 |
+
return maint_wait, maint_last_msg
|
594 |
+
|
595 |
+
def exif_delete_0(maint_wait):
|
596 |
+
global finfo_exif, exif_cache, finfo_aes, aes_cache
|
597 |
+
if opts.image_browser_scan_exif:
|
598 |
+
conn, cursor = wib_db.transaction_begin()
|
599 |
+
wib_db.delete_exif_0(cursor)
|
600 |
+
wib_db.transaction_end(conn, cursor)
|
601 |
+
finfo_aes = {}
|
602 |
+
finfo_exif = {}
|
603 |
+
exif_cache = wib_db.load_exif_data(exif_cache)
|
604 |
+
aes_cache = wib_db.load_aes_data(aes_cache)
|
605 |
+
maint_last_msg = "Delete finished"
|
606 |
+
else:
|
607 |
+
maint_last_msg = "Exif cache not enabled in settings"
|
608 |
+
|
609 |
+
return maint_wait, maint_last_msg
|
610 |
+
|
611 |
+
def exif_update_dirs(maint_update_dirs_from, maint_update_dirs_to, maint_wait):
|
612 |
+
global exif_cache, aes_cache, image_reward_cache
|
613 |
+
if maint_update_dirs_from == "":
|
614 |
+
maint_last_msg = "From is empty"
|
615 |
+
elif maint_update_dirs_to == "":
|
616 |
+
maint_last_msg = "To is empty"
|
617 |
+
else:
|
618 |
+
maint_update_dirs_from = os.path.realpath(maint_update_dirs_from)
|
619 |
+
maint_update_dirs_to = os.path.realpath(maint_update_dirs_to)
|
620 |
+
rows = 0
|
621 |
+
conn, cursor = wib_db.transaction_begin()
|
622 |
+
wib_db.update_path_recorder_mult(cursor, maint_update_dirs_from, maint_update_dirs_to)
|
623 |
+
rows = rows + cursor.rowcount
|
624 |
+
wib_db.update_exif_data_mult(cursor, maint_update_dirs_from, maint_update_dirs_to)
|
625 |
+
rows = rows + cursor.rowcount
|
626 |
+
wib_db.update_ranking_mult(cursor, maint_update_dirs_from, maint_update_dirs_to)
|
627 |
+
rows = rows + cursor.rowcount
|
628 |
+
wib_db.transaction_end(conn, cursor)
|
629 |
+
if rows == 0:
|
630 |
+
maint_last_msg = "No rows updated"
|
631 |
+
else:
|
632 |
+
maint_last_msg = f"{rows} rows updated. Please reload UI!"
|
633 |
+
|
634 |
+
return maint_wait, maint_last_msg
|
635 |
+
|
636 |
+
def reapply_ranking(path_recorder, maint_wait):
|
637 |
+
dirs = {}
|
638 |
+
|
639 |
+
for tab in tabs_list:
|
640 |
+
if os.path.exists(tab.path):
|
641 |
+
dirs[tab.path] = tab.path
|
642 |
+
|
643 |
+
for key in path_recorder:
|
644 |
+
if os.path.exists(key):
|
645 |
+
dirs[key] = key
|
646 |
+
|
647 |
+
conn, cursor = wib_db.transaction_begin()
|
648 |
+
|
649 |
+
# Traverse all known dirs, check if missing rankings are due to moved files
|
650 |
+
for key in dirs.keys():
|
651 |
+
fileinfos = traverse_all_files(key, [], "", 0)
|
652 |
+
for (file, _) in fileinfos:
|
653 |
+
# Is there a ranking for this full filepath
|
654 |
+
ranking_by_file = wib_db.get_ranking_by_file(cursor, file)
|
655 |
+
if ranking_by_file is None:
|
656 |
+
name = os.path.basename(file)
|
657 |
+
(ranking_by_name, alternate_hash) = wib_db.get_ranking_by_name(cursor, name)
|
658 |
+
# Is there a ranking only for the filename
|
659 |
+
if ranking_by_name is not None:
|
660 |
+
hash = wib_db.get_hash(file)
|
661 |
+
(alternate_file, alternate_ranking) = ranking_by_name
|
662 |
+
if alternate_ranking is not None:
|
663 |
+
(alternate_hash,) = alternate_hash
|
664 |
+
# Does the found filename's file have no hash or the same hash?
|
665 |
+
if alternate_hash is None or hash == alternate_hash:
|
666 |
+
if os.path.exists(alternate_file):
|
667 |
+
# Insert ranking as a copy of the found filename's ranking
|
668 |
+
wib_db.insert_ranking(cursor, file, alternate_ranking, hash)
|
669 |
+
else:
|
670 |
+
# Replace ranking of the found filename
|
671 |
+
wib_db.replace_ranking(cursor, file, alternate_file, hash)
|
672 |
+
|
673 |
+
wib_db.transaction_end(conn, cursor)
|
674 |
+
maint_last_msg = "Rankings reapplied"
|
675 |
+
|
676 |
+
return maint_wait, maint_last_msg
|
677 |
+
|
678 |
+
def atof(text):
|
679 |
+
try:
|
680 |
+
retval = float(text)
|
681 |
+
except ValueError:
|
682 |
+
retval = text
|
683 |
+
return retval
|
684 |
+
|
685 |
+
def natural_keys(text):
|
686 |
+
'''
|
687 |
+
alist.sort(key=natural_keys) sorts in human order
|
688 |
+
http://nedbatchelder.com/blog/200712/human_sorting.html
|
689 |
+
(See Toothy's implementation in the comments)
|
690 |
+
float regex comes from https://stackoverflow.com/a/12643073/190597
|
691 |
+
'''
|
692 |
+
return [ atof(c) for c in re.split(r'[+-]?([0-9]+(?:[.][0-9]*)?|[.][0-9]+)', text) ]
|
693 |
+
|
694 |
+
def open_folder(path):
|
695 |
+
if os.path.exists(path):
|
696 |
+
# Code from ui_common.py
|
697 |
+
if not shared.cmd_opts.hide_ui_dir_config:
|
698 |
+
if platform.system() == "Windows":
|
699 |
+
os.startfile(path)
|
700 |
+
elif platform.system() == "Darwin":
|
701 |
+
sp.Popen(["open", path])
|
702 |
+
elif "microsoft-standard-WSL2" in platform.uname().release:
|
703 |
+
sp.Popen(["wsl-open", path])
|
704 |
+
else:
|
705 |
+
sp.Popen(["xdg-open", path])
|
706 |
+
|
707 |
+
def check_ext(ext):
|
708 |
+
found = False
|
709 |
+
scripts_list = scripts.list_scripts("scripts", ".py")
|
710 |
+
for scriptfile in scripts_list:
|
711 |
+
if ext in scriptfile.basedir.lower():
|
712 |
+
found = True
|
713 |
+
break
|
714 |
+
return found
|
715 |
+
|
716 |
+
def exif_search(needle, haystack, use_regex, case_sensitive):
|
717 |
+
found = False
|
718 |
+
if use_regex:
|
719 |
+
if case_sensitive:
|
720 |
+
pattern = re.compile(needle, re.DOTALL)
|
721 |
+
else:
|
722 |
+
pattern = re.compile(needle, re.DOTALL | re.IGNORECASE)
|
723 |
+
if pattern.search(haystack) is not None:
|
724 |
+
found = True
|
725 |
+
else:
|
726 |
+
if not case_sensitive:
|
727 |
+
haystack = haystack.lower()
|
728 |
+
needle = needle.lower()
|
729 |
+
if needle in haystack:
|
730 |
+
found = True
|
731 |
+
return found
|
732 |
+
|
733 |
+
def get_all_images(dir_name, sort_by, sort_order, keyword, tab_base_tag_box, img_path_depth, ranking_filter, ranking_filter_min, ranking_filter_max, aes_filter_min, aes_filter_max, score_type, exif_keyword, negative_prompt_search, use_regex, case_sensitive):
|
734 |
+
global current_depth
|
735 |
+
logger.debug("get_all_images")
|
736 |
+
current_depth = 0
|
737 |
+
fileinfos = traverse_all_files(dir_name, [], tab_base_tag_box, img_path_depth)
|
738 |
+
keyword = keyword.strip(" ")
|
739 |
+
|
740 |
+
if opts.image_browser_scan_exif:
|
741 |
+
cache_exif(fileinfos)
|
742 |
+
|
743 |
+
if len(keyword) != 0:
|
744 |
+
fileinfos = [x for x in fileinfos if keyword.lower() in x[0].lower()]
|
745 |
+
filenames = [finfo[0] for finfo in fileinfos]
|
746 |
+
|
747 |
+
if opts.image_browser_scan_exif:
|
748 |
+
conn, cursor = wib_db.transaction_begin()
|
749 |
+
if len(exif_keyword) != 0:
|
750 |
+
if use_regex:
|
751 |
+
regex_error = False
|
752 |
+
try:
|
753 |
+
test_re = re.compile(exif_keyword, re.DOTALL)
|
754 |
+
except re.error as e:
|
755 |
+
regex_error = True
|
756 |
+
print(f"Regex error: {e}")
|
757 |
+
if (use_regex and not regex_error) or not use_regex:
|
758 |
+
if negative_prompt_search == "Yes":
|
759 |
+
fileinfos = [x for x in fileinfos if exif_search(exif_keyword, finfo_exif[x[0]], use_regex, case_sensitive)]
|
760 |
+
else:
|
761 |
+
result = []
|
762 |
+
for file_info in fileinfos:
|
763 |
+
file_name = file_info[0]
|
764 |
+
file_exif = finfo_exif[file_name]
|
765 |
+
file_exif_lc = file_exif.lower()
|
766 |
+
start_index = file_exif_lc.find(np)
|
767 |
+
end_index = file_exif.find("\n", start_index)
|
768 |
+
if negative_prompt_search == "Only":
|
769 |
+
start_index = start_index + len(np)
|
770 |
+
sub_string = file_exif[start_index:end_index].strip()
|
771 |
+
if exif_search(exif_keyword, sub_string, use_regex, case_sensitive):
|
772 |
+
result.append(file_info)
|
773 |
+
else:
|
774 |
+
sub_string = file_exif[start_index:end_index].strip()
|
775 |
+
file_exif = file_exif.replace(sub_string, "")
|
776 |
+
|
777 |
+
if exif_search(exif_keyword, file_exif, use_regex, case_sensitive):
|
778 |
+
result.append(file_info)
|
779 |
+
fileinfos = result
|
780 |
+
filenames = [finfo[0] for finfo in fileinfos]
|
781 |
+
wib_db.fill_work_files(cursor, fileinfos)
|
782 |
+
if len(aes_filter_min) != 0 or len(aes_filter_max) != 0:
|
783 |
+
try:
|
784 |
+
aes_filter_min_num = float(aes_filter_min)
|
785 |
+
except ValueError:
|
786 |
+
aes_filter_min_num = sys.float_info.min
|
787 |
+
try:
|
788 |
+
aes_filter_max_num = float(aes_filter_max)
|
789 |
+
except ValueError:
|
790 |
+
aes_filter_max_num = sys.float_info.max
|
791 |
+
|
792 |
+
fileinfos = wib_db.filter_aes(cursor, fileinfos, aes_filter_min_num, aes_filter_max_num, score_type)
|
793 |
+
filenames = [finfo[0] for finfo in fileinfos]
|
794 |
+
if ranking_filter != "All":
|
795 |
+
ranking_filter_min_num = 1
|
796 |
+
ranking_filter_max_num = 5
|
797 |
+
if ranking_filter == "Min-max":
|
798 |
+
try:
|
799 |
+
ranking_filter_min_num = int(ranking_filter_min)
|
800 |
+
except ValueError:
|
801 |
+
ranking_filter_min_num = 0
|
802 |
+
try:
|
803 |
+
ranking_filter_max_num = int(ranking_filter_max)
|
804 |
+
except ValueError:
|
805 |
+
ranking_filter_max_num = 0
|
806 |
+
if ranking_filter_min_num < 1:
|
807 |
+
ranking_filter_min_num = 1
|
808 |
+
if ranking_filter_max_num < 1 or ranking_filter_max_num > 5:
|
809 |
+
ranking_filter_max_num = 5
|
810 |
+
|
811 |
+
fileinfos = wib_db.filter_ranking(cursor, fileinfos, ranking_filter, ranking_filter_min_num, ranking_filter_max_num)
|
812 |
+
filenames = [finfo[0] for finfo in fileinfos]
|
813 |
+
|
814 |
+
wib_db.transaction_end(conn, cursor)
|
815 |
+
|
816 |
+
if sort_by == "date":
|
817 |
+
if sort_order == up_symbol:
|
818 |
+
fileinfos = sorted(fileinfos, key=lambda x: x[1].st_mtime)
|
819 |
+
else:
|
820 |
+
fileinfos = sorted(fileinfos, key=lambda x: -x[1].st_mtime)
|
821 |
+
filenames = [finfo[0] for finfo in fileinfos]
|
822 |
+
elif sort_by == "path name":
|
823 |
+
if sort_order == up_symbol:
|
824 |
+
fileinfos = sorted(fileinfos)
|
825 |
+
else:
|
826 |
+
fileinfos = sorted(fileinfos, reverse=True)
|
827 |
+
filenames = [finfo[0] for finfo in fileinfos]
|
828 |
+
elif sort_by == "random":
|
829 |
+
random.shuffle(fileinfos)
|
830 |
+
filenames = [finfo[0] for finfo in fileinfos]
|
831 |
+
elif sort_by == "ranking":
|
832 |
+
finfo_ranked = {}
|
833 |
+
for fi_info in fileinfos:
|
834 |
+
finfo_ranked[fi_info[0]], _ = get_ranking(fi_info[0])
|
835 |
+
if sort_order == up_symbol:
|
836 |
+
fileinfos = dict(sorted(finfo_ranked.items(), key=lambda x: (x[1], x[0])))
|
837 |
+
else:
|
838 |
+
fileinfos = dict(reversed(sorted(finfo_ranked.items(), key=lambda x: (x[1], x[0]))))
|
839 |
+
filenames = [finfo for finfo in fileinfos]
|
840 |
+
else:
|
841 |
+
sort_values = {}
|
842 |
+
exif_info = dict(finfo_exif)
|
843 |
+
if exif_info:
|
844 |
+
for k, v in exif_info.items():
|
845 |
+
match = re.search(r'(?<='+ sort_by + ":" ').*?(?=(,|$))', v, flags=re.DOTALL|re.IGNORECASE)
|
846 |
+
if match:
|
847 |
+
sort_values[k] = match.group().strip()
|
848 |
+
else:
|
849 |
+
sort_values[k] = "0"
|
850 |
+
if sort_by == "aesthetic_score" or sort_by == "ImageRewardScore" or sort_by == "cfg scale":
|
851 |
+
sort_float = True
|
852 |
+
else:
|
853 |
+
sort_float = False
|
854 |
+
|
855 |
+
if sort_order == down_symbol:
|
856 |
+
if sort_float:
|
857 |
+
fileinfos = [x for x in fileinfos if sort_values[x[0]] != "0"]
|
858 |
+
fileinfos.sort(key=lambda x: float(sort_values[x[0]]), reverse=True)
|
859 |
+
fileinfos = dict(fileinfos)
|
860 |
+
else:
|
861 |
+
fileinfos = dict(reversed(sorted(fileinfos, key=lambda x: natural_keys(sort_values[x[0]]))))
|
862 |
+
else:
|
863 |
+
if sort_float:
|
864 |
+
fileinfos = [x for x in fileinfos if sort_values[x[0]] != "0"]
|
865 |
+
fileinfos.sort(key=lambda x: float(sort_values[x[0]]))
|
866 |
+
fileinfos = dict(fileinfos)
|
867 |
+
else:
|
868 |
+
fileinfos = dict(sorted(fileinfos, key=lambda x: natural_keys(sort_values[x[0]])))
|
869 |
+
filenames = [finfo for finfo in fileinfos]
|
870 |
+
else:
|
871 |
+
filenames = [finfo for finfo in fileinfos]
|
872 |
+
return filenames
|
873 |
+
|
874 |
+
def get_image_thumbnail(image_list):
|
875 |
+
logger.debug("get_image_thumbnail")
|
876 |
+
optimized_cache = os.path.join(tempfile.gettempdir(),"optimized")
|
877 |
+
os.makedirs(optimized_cache,exist_ok=True)
|
878 |
+
thumbnail_list = []
|
879 |
+
for image_path in image_list:
|
880 |
+
image_path_hash = hashlib.md5(image_path.encode("utf-8")).hexdigest()
|
881 |
+
cache_image_path = os.path.join(optimized_cache, image_path_hash + ".jpg")
|
882 |
+
if os.path.isfile(cache_image_path):
|
883 |
+
thumbnail_list.append(cache_image_path)
|
884 |
+
else:
|
885 |
+
try:
|
886 |
+
image = Image.open(image_path)
|
887 |
+
except OSError:
|
888 |
+
# If PIL cannot open the image, use the original path
|
889 |
+
thumbnail_list.append(image_path)
|
890 |
+
continue
|
891 |
+
width, height = image.size
|
892 |
+
left = (width - min(width, height)) / 2
|
893 |
+
top = (height - min(width, height)) / 2
|
894 |
+
right = (width + min(width, height)) / 2
|
895 |
+
bottom = (height + min(width, height)) / 2
|
896 |
+
thumbnail = image.crop((left, top, right, bottom))
|
897 |
+
thumbnail.thumbnail((opts.image_browser_thumbnail_size, opts.image_browser_thumbnail_size))
|
898 |
+
if thumbnail.mode != "RGB":
|
899 |
+
thumbnail = thumbnail.convert("RGB")
|
900 |
+
try:
|
901 |
+
thumbnail.save(cache_image_path, "JPEG")
|
902 |
+
thumbnail_list.append(cache_image_path)
|
903 |
+
except FileNotFoundError:
|
904 |
+
# Cannot save cache, use PIL object
|
905 |
+
thumbnail_list.append(thumbnail)
|
906 |
+
return thumbnail_list
|
907 |
+
|
908 |
+
def set_tooltip_info(image_list):
|
909 |
+
image_browser_img_info = {}
|
910 |
+
conn, cursor = wib_db.transaction_begin()
|
911 |
+
for filename in image_list:
|
912 |
+
x, y = wib_db.select_x_y(cursor, filename)
|
913 |
+
image_browser_img_info[filename] = {"x": x, "y": y}
|
914 |
+
wib_db.transaction_end(conn, cursor)
|
915 |
+
image_browser_img_info_json = json.dumps(image_browser_img_info)
|
916 |
+
return image_browser_img_info_json
|
917 |
+
|
918 |
+
def get_image_page(img_path, page_index, filenames, keyword, sort_by, sort_order, tab_base_tag_box, img_path_depth, ranking_filter, ranking_filter_min, ranking_filter_max, aes_filter_min, aes_filter_max, score_type, exif_keyword, negative_prompt_search, use_regex, case_sensitive, image_reward_button):
|
919 |
+
logger.debug("get_image_page")
|
920 |
+
if img_path == "":
|
921 |
+
return [], page_index, [], "", "", "", 0, "", None, "", "[]", image_reward_button
|
922 |
+
|
923 |
+
# Set temp_dir from webui settings, so gradio uses it
|
924 |
+
if shared.opts.temp_dir != "":
|
925 |
+
tempfile.tempdir = shared.opts.temp_dir
|
926 |
+
|
927 |
+
img_path, _ = pure_path(img_path)
|
928 |
+
filenames = get_all_images(img_path, sort_by, sort_order, keyword, tab_base_tag_box, img_path_depth, ranking_filter, ranking_filter_min, ranking_filter_max, aes_filter_min, aes_filter_max, score_type, exif_keyword, negative_prompt_search, use_regex, case_sensitive)
|
929 |
+
page_index = int(page_index)
|
930 |
+
length = len(filenames)
|
931 |
+
max_page_index = math.ceil(length / num_of_imgs_per_page)
|
932 |
+
page_index = max_page_index if page_index == -1 else page_index
|
933 |
+
page_index = 1 if page_index < 1 else page_index
|
934 |
+
page_index = max_page_index if page_index > max_page_index else page_index
|
935 |
+
idx_frm = (page_index - 1) * num_of_imgs_per_page
|
936 |
+
image_list = filenames[idx_frm:idx_frm + num_of_imgs_per_page]
|
937 |
+
|
938 |
+
if opts.image_browser_scan_exif and opts.image_browser_img_tooltips:
|
939 |
+
image_browser_img_info = set_tooltip_info(image_list)
|
940 |
+
else:
|
941 |
+
image_browser_img_info = "[]"
|
942 |
+
|
943 |
+
if opts.image_browser_use_thumbnail:
|
944 |
+
thumbnail_list = get_image_thumbnail(image_list)
|
945 |
+
else:
|
946 |
+
thumbnail_list = image_list
|
947 |
+
|
948 |
+
visible_num = num_of_imgs_per_page if idx_frm + num_of_imgs_per_page < length else length % num_of_imgs_per_page
|
949 |
+
visible_num = num_of_imgs_per_page if visible_num == 0 else visible_num
|
950 |
+
|
951 |
+
load_info = "<div style='color:#999' align='center'>"
|
952 |
+
load_info += f"{length} images in this directory, divided into {int((length + 1) // num_of_imgs_per_page + 1)} pages"
|
953 |
+
load_info += "</div>"
|
954 |
+
|
955 |
+
return filenames, gr.update(value=page_index, label=f"Page Index ({page_index}/{max_page_index})"), thumbnail_list, "", "", "", visible_num, load_info, None, json.dumps(image_list), image_browser_img_info, gr.update(visible=True)
|
956 |
+
|
957 |
+
def get_current_file(tab_base_tag_box, num, page_index, filenames):
|
958 |
+
file = filenames[int(num) + int((page_index - 1) * num_of_imgs_per_page)]
|
959 |
+
return file
|
960 |
+
|
961 |
+
def show_image_info(tab_base_tag_box, num, page_index, filenames, turn_page_switch, image_gallery):
|
962 |
+
logger.debug(f"show_image_info: tab_base_tag_box, num, page_index, len(filenames), num_of_imgs_per_page: {tab_base_tag_box}, {num}, {page_index}, {len(filenames)}, {num_of_imgs_per_page}")
|
963 |
+
if len(filenames) == 0:
|
964 |
+
# This should only happen if webui was stopped and started again and the user clicks on one of the still displayed images.
|
965 |
+
# The state with the filenames will be empty then. In that case we return None to prevent further errors and force a page refresh.
|
966 |
+
turn_page_switch = -turn_page_switch
|
967 |
+
file = None
|
968 |
+
tm = None
|
969 |
+
info = ""
|
970 |
+
else:
|
971 |
+
file_num = int(num) + int(
|
972 |
+
(page_index - 1) * num_of_imgs_per_page)
|
973 |
+
if file_num >= len(filenames):
|
974 |
+
# Last image to the right is deleted, page refresh
|
975 |
+
turn_page_switch = -turn_page_switch
|
976 |
+
file = None
|
977 |
+
tm = None
|
978 |
+
info = ""
|
979 |
+
else:
|
980 |
+
file = filenames[file_num]
|
981 |
+
tm = "<div style='color:#999' align='right'>" + time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(os.path.getmtime(file))) + "</div>"
|
982 |
+
try:
|
983 |
+
with Image.open(file) as image:
|
984 |
+
_, geninfo, info = modules.extras.run_pnginfo(image)
|
985 |
+
except UnidentifiedImageError as e:
|
986 |
+
info = ""
|
987 |
+
logger.warning(f"UnidentifiedImageError: {e}")
|
988 |
+
if opts.image_browser_use_thumbnail:
|
989 |
+
image_gallery = [image['name'] for image in image_gallery]
|
990 |
+
image_gallery[int(num)] = filenames[file_num]
|
991 |
+
return file, tm, num, file, turn_page_switch, info, image_gallery
|
992 |
+
else:
|
993 |
+
return file, tm, num, file, turn_page_switch, info
|
994 |
+
|
995 |
+
def change_dir(img_dir, path_recorder, load_switch, img_path_browser, img_path_depth, img_path):
|
996 |
+
warning = None
|
997 |
+
img_path, _ = pure_path(img_path)
|
998 |
+
img_path_depth_org = img_path_depth
|
999 |
+
if img_dir == none_select:
|
1000 |
+
return warning, gr.update(visible=False), img_path_browser, path_recorder, load_switch, img_path, img_path_depth
|
1001 |
+
else:
|
1002 |
+
img_dir, img_path_depth = pure_path(img_dir)
|
1003 |
+
if warning is None:
|
1004 |
+
try:
|
1005 |
+
if os.path.exists(img_dir):
|
1006 |
+
try:
|
1007 |
+
f = os.listdir(img_dir)
|
1008 |
+
except:
|
1009 |
+
warning = f"'{img_dir} is not a directory"
|
1010 |
+
else:
|
1011 |
+
warning = "The directory does not exist"
|
1012 |
+
except:
|
1013 |
+
warning = "The format of the directory is incorrect"
|
1014 |
+
if warning is None:
|
1015 |
+
return "", gr.update(visible=True), img_path_browser, path_recorder, img_dir, img_dir, img_path_depth
|
1016 |
+
else:
|
1017 |
+
return warning, gr.update(visible=False), img_path_browser, path_recorder, load_switch, img_path, img_path_depth_org
|
1018 |
+
|
1019 |
+
def update_move_text_one(btn):
|
1020 |
+
btn_text = " ".join(btn.split()[1:])
|
1021 |
+
return f"{copy_move[opts.image_browser_copy_image]} {btn_text}"
|
1022 |
+
|
1023 |
+
def update_move_text(favorites_btn, to_dir_btn):
|
1024 |
+
return update_move_text_one(favorites_btn), update_move_text_one(to_dir_btn)
|
1025 |
+
|
1026 |
+
def get_ranking(filename):
|
1027 |
+
ranking_value = wib_db.select_ranking(filename)
|
1028 |
+
return ranking_value, None
|
1029 |
+
|
1030 |
+
def img_file_name_changed(img_file_name, favorites_btn, to_dir_btn):
|
1031 |
+
ranking_current, ranking = get_ranking(img_file_name)
|
1032 |
+
favorites_btn, to_dir_btn = update_move_text(favorites_btn, to_dir_btn)
|
1033 |
+
|
1034 |
+
return ranking_current, ranking, "", favorites_btn, to_dir_btn
|
1035 |
+
|
1036 |
+
def update_exif(img_file_name, key, value):
|
1037 |
+
image = Image.open(img_file_name)
|
1038 |
+
geninfo, items = images.read_info_from_image(image)
|
1039 |
+
if geninfo is not None:
|
1040 |
+
if f"{key}: " in geninfo:
|
1041 |
+
if value == "None":
|
1042 |
+
geninfo = re.sub(f', {key}: \d+(\.\d+)*', '', geninfo)
|
1043 |
+
else:
|
1044 |
+
geninfo = re.sub(f'{key}: \d+(\.\d+)*', f'{key}: {value}', geninfo)
|
1045 |
+
else:
|
1046 |
+
geninfo = f'{geninfo}, {key}: {value}'
|
1047 |
+
|
1048 |
+
original_time = os.path.getmtime(img_file_name)
|
1049 |
+
images.save_image(image, os.path.dirname(img_file_name), "", extension=os.path.splitext(img_file_name)[1][1:], info=geninfo, forced_filename=os.path.splitext(os.path.basename(img_file_name))[0], save_to_dirs=False)
|
1050 |
+
os.utime(img_file_name, (original_time, original_time))
|
1051 |
+
return geninfo
|
1052 |
+
|
1053 |
+
def update_ranking(img_file_name, ranking_current, ranking, img_file_info):
|
1054 |
+
# ranking = None is different than ranking = "None"! None means no radio button selected. "None" means radio button called "None" selected.
|
1055 |
+
if ranking is None:
|
1056 |
+
return ranking_current, None, img_file_info
|
1057 |
+
|
1058 |
+
saved_ranking, _ = get_ranking(img_file_name)
|
1059 |
+
if saved_ranking != ranking:
|
1060 |
+
wib_db.update_ranking(img_file_name, ranking)
|
1061 |
+
if opts.image_browser_ranking_pnginfo and any(img_file_name.endswith(ext) for ext in image_ext_list):
|
1062 |
+
img_file_info = update_exif(img_file_name, "Ranking", ranking)
|
1063 |
+
return ranking, None, img_file_info
|
1064 |
+
|
1065 |
+
def generate_image_reward(filenames, turn_page_switch, aes_filter_min, aes_filter_max):
|
1066 |
+
global image_reward_model
|
1067 |
+
if image_reward_model is None:
|
1068 |
+
image_reward_model = ImageReward.load("ImageReward-v1.0")
|
1069 |
+
conn, cursor = wib_db.transaction_begin()
|
1070 |
+
for filename in filenames:
|
1071 |
+
saved_image_reward_score, saved_image_reward_prompt = wib_db.select_image_reward_score(cursor, filename)
|
1072 |
+
if saved_image_reward_score is None and saved_image_reward_prompt is not None:
|
1073 |
+
try:
|
1074 |
+
with torch.no_grad():
|
1075 |
+
image_reward_score = image_reward_model.score(saved_image_reward_prompt, filename)
|
1076 |
+
image_reward_score = f"{image_reward_score:.2f}"
|
1077 |
+
try:
|
1078 |
+
logger.warning(f"Generated ImageRewardScore: {image_reward_score} for {filename}")
|
1079 |
+
except UnicodeEncodeError:
|
1080 |
+
pass
|
1081 |
+
wib_db.update_image_reward_score(cursor, filename, image_reward_score)
|
1082 |
+
if any(filename.endswith(ext) for ext in image_ext_list):
|
1083 |
+
img_file_info = update_exif(filename, "ImageRewardScore", image_reward_score)
|
1084 |
+
except UnidentifiedImageError as e:
|
1085 |
+
logger.warning(f"UnidentifiedImageError: {e}")
|
1086 |
+
wib_db.transaction_end(conn, cursor)
|
1087 |
+
return -turn_page_switch, aes_filter_min, aes_filter_max
|
1088 |
+
|
1089 |
+
def create_tab(tab: ImageBrowserTab, current_gr_tab: gr.Tab):
|
1090 |
+
global init, exif_cache, aes_cache, image_reward_cache, openoutpaint, controlnet, js_dummy_return
|
1091 |
+
dir_name = None
|
1092 |
+
others_dir = False
|
1093 |
+
maint = False
|
1094 |
+
standard_ui = True
|
1095 |
+
path_recorder = {}
|
1096 |
+
path_recorder_formatted = []
|
1097 |
+
path_recorder_unformatted = []
|
1098 |
+
|
1099 |
+
if init:
|
1100 |
+
db_version = wib_db.check()
|
1101 |
+
logger.debug(f"db_version: {db_version}")
|
1102 |
+
exif_cache = wib_db.load_exif_data(exif_cache)
|
1103 |
+
aes_cache = wib_db.load_exif_data_by_key(aes_cache, "aesthetic_score", "Score")
|
1104 |
+
image_reward_cache = wib_db.load_exif_data_by_key(image_reward_cache, "ImageRewardScore", "ImageRewardScore")
|
1105 |
+
init = False
|
1106 |
+
|
1107 |
+
path_recorder, path_recorder_formatted, path_recorder_unformatted = read_path_recorder()
|
1108 |
+
openoutpaint = check_ext("openoutpaint")
|
1109 |
+
controlnet = check_ext("controlnet")
|
1110 |
+
|
1111 |
+
if tab.name == "Others":
|
1112 |
+
others_dir = True
|
1113 |
+
standard_ui = False
|
1114 |
+
elif tab.name == "Maintenance":
|
1115 |
+
maint = True
|
1116 |
+
standard_ui = False
|
1117 |
+
else:
|
1118 |
+
dir_name = tab.path
|
1119 |
+
|
1120 |
+
if standard_ui:
|
1121 |
+
dir_name = str(Path(dir_name))
|
1122 |
+
if not os.path.exists(dir_name):
|
1123 |
+
os.makedirs(dir_name)
|
1124 |
+
|
1125 |
+
with gr.Row():
|
1126 |
+
path_recorder = gr.State(path_recorder)
|
1127 |
+
with gr.Column(scale=10):
|
1128 |
+
warning_box = gr.HTML("<p> ", elem_id=f"{tab.base_tag}_image_browser_warning_box")
|
1129 |
+
with gr.Column(scale=5, visible=(tab.name==favorite_tab_name)):
|
1130 |
+
gr.HTML(f"<p>Favorites path from settings: {opts.outdir_save}")
|
1131 |
+
|
1132 |
+
with gr.Row(visible=others_dir):
|
1133 |
+
with gr.Column(scale=10):
|
1134 |
+
suffix = "" if others_dir else tab.name
|
1135 |
+
img_path = gr.Textbox(dir_name, label="Images directory"+suffix, placeholder="Input images directory", interactive=others_dir)
|
1136 |
+
with gr.Column(scale=1):
|
1137 |
+
img_path_depth = gr.Number(value="0", label="Sub directory depth")
|
1138 |
+
with gr.Column(scale=1):
|
1139 |
+
img_path_save_button = gr.Button(value="Add to / replace in saved directories")
|
1140 |
+
|
1141 |
+
with gr.Row(visible=others_dir):
|
1142 |
+
with gr.Column(scale=10):
|
1143 |
+
img_path_browser = gr.Dropdown(choices=path_recorder_formatted, label="Saved directories")
|
1144 |
+
with gr.Column(scale=1):
|
1145 |
+
img_path_remove_button = gr.Button(value="Remove from saved directories")
|
1146 |
+
|
1147 |
+
with gr.Row(visible=others_dir):
|
1148 |
+
with gr.Column(scale=10):
|
1149 |
+
img_path_subdirs = gr.Dropdown(choices=[none_select], value=none_select, label="Sub directories", interactive=True, elem_id=f"{tab.base_tag}_img_path_subdirs")
|
1150 |
+
with gr.Column(scale=1):
|
1151 |
+
img_path_subdirs_button = gr.Button(value="Get sub directories")
|
1152 |
+
|
1153 |
+
with gr.Row(visible=standard_ui, elem_id=f"{tab.base_tag}_image_browser") as main_panel:
|
1154 |
+
with gr.Column():
|
1155 |
+
with gr.Row():
|
1156 |
+
with gr.Column(scale=2):
|
1157 |
+
with gr.Row(elem_id=f"{tab.base_tag}_image_browser_gallery_controls") as gallery_controls_panel:
|
1158 |
+
with gr.Column(scale=2, min_width=20):
|
1159 |
+
first_page = gr.Button("First Page", elem_id=f"{tab.base_tag}_control_image_browser_first_page")
|
1160 |
+
with gr.Column(scale=2, min_width=20):
|
1161 |
+
prev_page = gr.Button("Prev Page", elem_id=f"{tab.base_tag}_control_image_browser_prev_page")
|
1162 |
+
with gr.Column(scale=2, min_width=20):
|
1163 |
+
page_index = gr.Number(value=1, label="Page Index", elem_id=f"{tab.base_tag}_control_image_browser_page_index")
|
1164 |
+
with gr.Column(scale=1, min_width=20):
|
1165 |
+
refresh_index_button = ToolButton(value=refresh_symbol, elem_id=f"{tab.base_tag}_control_image_browser_refresh_index")
|
1166 |
+
with gr.Column(scale=2, min_width=20):
|
1167 |
+
next_page = gr.Button("Next Page", elem_id=f"{tab.base_tag}_control_image_browser_next_page")
|
1168 |
+
with gr.Column(scale=2, min_width=20):
|
1169 |
+
end_page = gr.Button("End Page", elem_id=f"{tab.base_tag}_control_image_browser_end_page")
|
1170 |
+
with gr.Row(visible=False) as ranking_panel:
|
1171 |
+
with gr.Column(scale=1, min_width=20):
|
1172 |
+
ranking_current = gr.Textbox(value="None", label="Current ranking", interactive=False)
|
1173 |
+
with gr.Column(scale=4, min_width=20):
|
1174 |
+
ranking = gr.Radio(choices=["1", "2", "3", "4", "5", "None"], label="Set ranking to", elem_id=f"{tab.base_tag}_control_image_browser_ranking", interactive=True)
|
1175 |
+
with gr.Row():
|
1176 |
+
image_gallery = gr.Gallery(show_label=False, elem_id=f"{tab.base_tag}_image_browser_gallery").style(grid=opts.image_browser_page_columns)
|
1177 |
+
with gr.Row() as delete_panel:
|
1178 |
+
with gr.Column(scale=1):
|
1179 |
+
delete_num = gr.Number(value=1, interactive=True, label="delete next", elem_id=f"{tab.base_tag}_image_browser_del_num")
|
1180 |
+
delete_confirm = gr.Checkbox(value=False, label="also delete off-screen images")
|
1181 |
+
with gr.Column(scale=3):
|
1182 |
+
delete = gr.Button('Delete', elem_id=f"{tab.base_tag}_image_browser_del_img_btn")
|
1183 |
+
with gr.Row() as info_add_panel:
|
1184 |
+
with gr.Accordion("Additional Generation Info", open=False):
|
1185 |
+
img_file_info_add = gr.HTML()
|
1186 |
+
|
1187 |
+
with gr.Column(scale=1):
|
1188 |
+
with gr.Row() as sort_panel:
|
1189 |
+
sort_by = gr.Dropdown(value="date", choices=["path name", "date", "aesthetic_score", "ImageRewardScore", "random", "cfg scale", "steps", "seed", "sampler", "size", "model", "model hash", "ranking"], label="Sort by")
|
1190 |
+
sort_order = ToolButton(value=down_symbol)
|
1191 |
+
with gr.Row() as filename_search_panel:
|
1192 |
+
filename_keyword_search = gr.Textbox(value="", label="Filename keyword search")
|
1193 |
+
with gr.Box() as exif_search_panel:
|
1194 |
+
with gr.Row():
|
1195 |
+
exif_keyword_search = gr.Textbox(value="", label="EXIF keyword search")
|
1196 |
+
negative_prompt_search = gr.Radio(value="No", choices=["No", "Yes", "Only"], label="Search negative prompt", interactive=True)
|
1197 |
+
with gr.Row():
|
1198 |
+
case_sensitive = gr.Checkbox(value=False, label="case sensitive")
|
1199 |
+
use_regex = gr.Checkbox(value=False, label=r"regex - e.g. ^(?!.*Hires).*$")
|
1200 |
+
with gr.Box() as ranking_filter_panel:
|
1201 |
+
with gr.Row():
|
1202 |
+
ranking_filter = gr.Radio(value="All", choices=["All", "1", "2", "3", "4", "5", "None", "Min-max"], label="Ranking filter", interactive=True)
|
1203 |
+
with gr.Row():
|
1204 |
+
with gr.Column(scale=2, min_width=20):
|
1205 |
+
ranking_filter_min = gr.Textbox(value="1", label="Minimum ranking", interactive=False)
|
1206 |
+
with gr.Column(scale=2, min_width=20):
|
1207 |
+
ranking_filter_max = gr.Textbox(value="5", label="Maximum ranking", interactive=False)
|
1208 |
+
with gr.Column(scale=4, min_width=20):
|
1209 |
+
gr.Textbox(value="Choose Min-max to activate these controls", label="", interactive=False)
|
1210 |
+
with gr.Box() as aesthetic_score_filter_panel:
|
1211 |
+
with gr.Row():
|
1212 |
+
with gr.Column(scale=4, min_width=20):
|
1213 |
+
score_type = gr.Dropdown(value=opts.image_browser_scoring_type, choices=["aesthetic_score", "ImageReward Score"], label="Scoring type", interactive=True)
|
1214 |
+
with gr.Column(scale=2, min_width=20):
|
1215 |
+
image_reward_button = gr.Button(value="Generate ImageReward Scores for all images", interactive=image_reward_installed, visible=False)
|
1216 |
+
with gr.Row():
|
1217 |
+
aes_filter_min = gr.Textbox(value="", label="Minimum score")
|
1218 |
+
aes_filter_max = gr.Textbox(value="", label="Maximum score")
|
1219 |
+
with gr.Row() as generation_info_panel:
|
1220 |
+
img_file_info = gr.Textbox(label="Generation Info", interactive=False, lines=6,elem_id=f"{tab.base_tag}_image_browser_file_info")
|
1221 |
+
with gr.Row() as filename_panel:
|
1222 |
+
img_file_name = gr.Textbox(value="", label="File Name", interactive=False)
|
1223 |
+
with gr.Row() as filetime_panel:
|
1224 |
+
img_file_time= gr.HTML()
|
1225 |
+
with gr.Row() as open_folder_panel:
|
1226 |
+
open_folder_button = gr.Button(folder_symbol, visible=standard_ui or others_dir)
|
1227 |
+
gr.HTML(" ")
|
1228 |
+
gr.HTML(" ")
|
1229 |
+
gr.HTML(" ")
|
1230 |
+
with gr.Row(elem_id=f"{tab.base_tag}_image_browser_button_panel", visible=False) as button_panel:
|
1231 |
+
with gr.Column():
|
1232 |
+
with gr.Row():
|
1233 |
+
if tab.name == favorite_tab_name:
|
1234 |
+
favorites_btn_show = False
|
1235 |
+
else:
|
1236 |
+
favorites_btn_show = True
|
1237 |
+
favorites_btn = gr.Button(f'{copy_move[opts.image_browser_copy_image]} to favorites', elem_id=f"{tab.base_tag}_image_browser_favorites_btn", visible=favorites_btn_show)
|
1238 |
+
try:
|
1239 |
+
send_to_buttons = modules.generation_parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"])
|
1240 |
+
except:
|
1241 |
+
pass
|
1242 |
+
sendto_openoutpaint = gr.Button("Send to openOutpaint", elem_id=f"{tab.base_tag}_image_browser_openoutpaint_btn", visible=openoutpaint)
|
1243 |
+
with gr.Row(visible=controlnet):
|
1244 |
+
sendto_controlnet_txt2img = gr.Button("Send to txt2img ControlNet", visible=controlnet)
|
1245 |
+
sendto_controlnet_img2img = gr.Button("Send to img2img ControlNet", visible=controlnet)
|
1246 |
+
controlnet_max = opts.data.get("control_net_max_models_num", 1)
|
1247 |
+
sendto_controlnet_num = gr.Dropdown([str(i) for i in range(controlnet_max)], label="ControlNet number", value="0", interactive=True, visible=(controlnet and controlnet_max > 1))
|
1248 |
+
if controlnet_max is None:
|
1249 |
+
sendto_controlnet_type = gr.Textbox(value="none", visible=False)
|
1250 |
+
elif controlnet_max == 1:
|
1251 |
+
sendto_controlnet_type = gr.Textbox(value="single", visible=False)
|
1252 |
+
else:
|
1253 |
+
sendto_controlnet_type = gr.Textbox(value="multiple", visible=False)
|
1254 |
+
with gr.Row(elem_id=f"{tab.base_tag}_image_browser_to_dir_panel", visible=False) as to_dir_panel:
|
1255 |
+
with gr.Box():
|
1256 |
+
with gr.Row():
|
1257 |
+
to_dir_path = gr.Textbox(label="Directory path")
|
1258 |
+
with gr.Row():
|
1259 |
+
to_dir_saved = gr.Dropdown(choices=path_recorder_unformatted, label="Saved directories")
|
1260 |
+
with gr.Row():
|
1261 |
+
to_dir_btn = gr.Button(f'{copy_move[opts.image_browser_copy_image]} to directory', elem_id=f"{tab.base_tag}_image_browser_to_dir_btn")
|
1262 |
+
|
1263 |
+
with gr.Row():
|
1264 |
+
collected_warning = gr.HTML()
|
1265 |
+
|
1266 |
+
with gr.Row(visible=False):
|
1267 |
+
renew_page = gr.Button("Renew Page", elem_id=f"{tab.base_tag}_image_browser_renew_page")
|
1268 |
+
visible_img_num = gr.Number()
|
1269 |
+
tab_base_tag_box = gr.Textbox(tab.base_tag)
|
1270 |
+
image_index = gr.Textbox(value=-1, elem_id=f"{tab.base_tag}_image_browser_image_index")
|
1271 |
+
set_index = gr.Button('set_index', elem_id=f"{tab.base_tag}_image_browser_set_index")
|
1272 |
+
filenames = gr.State([])
|
1273 |
+
hidden = gr.Image(type="pil", elem_id=f"{tab.base_tag}_image_browser_hidden_image")
|
1274 |
+
image_page_list = gr.Textbox(elem_id=f"{tab.base_tag}_image_browser_image_page_list")
|
1275 |
+
info1 = gr.Textbox()
|
1276 |
+
info2 = gr.Textbox()
|
1277 |
+
load_switch = gr.Textbox(value="load_switch", label="load_switch")
|
1278 |
+
to_dir_load_switch = gr.Textbox(value="to dir load_switch", label="to_dir_load_switch")
|
1279 |
+
turn_page_switch = gr.Number(value=1, label="turn_page_switch")
|
1280 |
+
select_image = gr.Number(value=1)
|
1281 |
+
img_path_add = gr.Textbox(value="add")
|
1282 |
+
img_path_remove = gr.Textbox(value="remove")
|
1283 |
+
favorites_path = gr.Textbox(value=opts.outdir_save)
|
1284 |
+
mod_keys = ""
|
1285 |
+
if opts.image_browser_mod_ctrl_shift:
|
1286 |
+
mod_keys = f"{mod_keys}CS"
|
1287 |
+
elif opts.image_browser_mod_shift:
|
1288 |
+
mod_keys = f"{mod_keys}S"
|
1289 |
+
image_browser_mod_keys = gr.Textbox(value=mod_keys, elem_id=f"{tab.base_tag}_image_browser_mod_keys")
|
1290 |
+
image_browser_prompt = gr.Textbox(elem_id=f"{tab.base_tag}_image_browser_prompt")
|
1291 |
+
image_browser_neg_prompt = gr.Textbox(elem_id=f"{tab.base_tag}_image_browser_neg_prompt")
|
1292 |
+
js_logs = gr.Textbox()
|
1293 |
+
image_browser_img_info = gr.Textbox(value="[]", elem_id=f"{tab.base_tag}_image_browser_img_info")
|
1294 |
+
|
1295 |
+
# Maintenance tab
|
1296 |
+
with gr.Row(visible=maint):
|
1297 |
+
with gr.Column(scale=4):
|
1298 |
+
gr.HTML(f"{caution_symbol} Caution: You should only use these options if you know what you are doing. {caution_symbol}")
|
1299 |
+
with gr.Column(scale=3):
|
1300 |
+
maint_wait = gr.HTML("Status:")
|
1301 |
+
with gr.Column(scale=7):
|
1302 |
+
gr.HTML(" ")
|
1303 |
+
with gr.Row(visible=maint):
|
1304 |
+
maint_last_msg = gr.Textbox(label="Last message", interactive=False)
|
1305 |
+
with gr.Row(visible=maint):
|
1306 |
+
with gr.Column(scale=1):
|
1307 |
+
maint_exif_rebuild = gr.Button(value="Rebuild exif cache")
|
1308 |
+
with gr.Column(scale=1):
|
1309 |
+
maint_exif_delete_0 = gr.Button(value="Delete 0-entries from exif cache")
|
1310 |
+
with gr.Column(scale=10):
|
1311 |
+
gr.HTML(visible=False)
|
1312 |
+
with gr.Row(visible=maint):
|
1313 |
+
with gr.Column(scale=1):
|
1314 |
+
maint_update_dirs = gr.Button(value="Update directory names in database")
|
1315 |
+
with gr.Column(scale=10):
|
1316 |
+
maint_update_dirs_from = gr.Textbox(label="From (full path)")
|
1317 |
+
with gr.Column(scale=10):
|
1318 |
+
maint_update_dirs_to = gr.Textbox(label="to (full path)")
|
1319 |
+
with gr.Row(visible=maint):
|
1320 |
+
with gr.Column(scale=1):
|
1321 |
+
maint_reapply_ranking = gr.Button(value="Reapply ranking after moving files")
|
1322 |
+
with gr.Column(scale=10):
|
1323 |
+
gr.HTML(visible=False)
|
1324 |
+
with gr.Row(visible=maint):
|
1325 |
+
with gr.Column(scale=1):
|
1326 |
+
maint_restart_debug = gr.Button(value="Restart debug")
|
1327 |
+
with gr.Column(scale=10):
|
1328 |
+
gr.HTML(visible=False)
|
1329 |
+
with gr.Row(visible=maint):
|
1330 |
+
with gr.Column(scale=1):
|
1331 |
+
maint_get_js_logs = gr.Button(value="Get javascript logs")
|
1332 |
+
with gr.Column(scale=10):
|
1333 |
+
maint_show_logs = gr.Textbox(label="Javascript logs", lines=10, interactive=False)
|
1334 |
+
with gr.Row(visible=False):
|
1335 |
+
with gr.Column(scale=1):
|
1336 |
+
maint_rebuild_ranking = gr.Button(value="Rebuild ranking from exif info")
|
1337 |
+
with gr.Column(scale=10):
|
1338 |
+
gr.HTML(visible=False)
|
1339 |
+
|
1340 |
+
# Hide components based on opts.image_browser_hidden_components
|
1341 |
+
hidden_component_map = {
|
1342 |
+
"Sort by": sort_panel,
|
1343 |
+
"Filename keyword search": filename_search_panel,
|
1344 |
+
"EXIF keyword search": exif_search_panel,
|
1345 |
+
"Ranking Filter": ranking_filter_panel,
|
1346 |
+
"Aesthestic Score": aesthetic_score_filter_panel,
|
1347 |
+
"Generation Info": generation_info_panel,
|
1348 |
+
"File Name": filename_panel,
|
1349 |
+
"File Time": filetime_panel,
|
1350 |
+
"Open Folder": open_folder_panel,
|
1351 |
+
"Send to buttons": button_panel,
|
1352 |
+
"Copy to directory": to_dir_panel,
|
1353 |
+
"Gallery Controls Bar": gallery_controls_panel,
|
1354 |
+
"Ranking Bar": ranking_panel,
|
1355 |
+
"Delete Bar": delete_panel,
|
1356 |
+
"Additional Generation Info": info_add_panel
|
1357 |
+
}
|
1358 |
+
|
1359 |
+
if set(hidden_component_map.keys()) != set(components_list):
|
1360 |
+
logger.warning(f"Invalid items present in either hidden_component_map or components_list. Make sure when adding new components they are added to both.")
|
1361 |
+
|
1362 |
+
override_hidden = set()
|
1363 |
+
if hasattr(opts, "image_browser_hidden_components"):
|
1364 |
+
for item in opts.image_browser_hidden_components:
|
1365 |
+
hidden_component_map[item].visible = False
|
1366 |
+
override_hidden.add(hidden_component_map[item])
|
1367 |
+
|
1368 |
+
change_dir_outputs = [warning_box, main_panel, img_path_browser, path_recorder, load_switch, img_path, img_path_depth]
|
1369 |
+
img_path.submit(change_dir, inputs=[img_path, path_recorder, load_switch, img_path_browser, img_path_depth, img_path], outputs=change_dir_outputs, show_progress=opts.image_browser_show_progress)
|
1370 |
+
img_path_browser.change(change_dir, inputs=[img_path_browser, path_recorder, load_switch, img_path_browser, img_path_depth, img_path], outputs=change_dir_outputs, show_progress=opts.image_browser_show_progress)
|
1371 |
+
# img_path_browser.change(browser2path, inputs=[img_path_browser], outputs=[img_path])
|
1372 |
+
to_dir_saved.change(change_dir, inputs=[to_dir_saved, path_recorder, to_dir_load_switch, to_dir_saved, img_path_depth, to_dir_path], outputs=[warning_box, main_panel, to_dir_saved, path_recorder, to_dir_load_switch, to_dir_path, img_path_depth], show_progress=opts.image_browser_show_progress)
|
1373 |
+
|
1374 |
+
#delete
|
1375 |
+
delete.click(
|
1376 |
+
fn=delete_image,
|
1377 |
+
inputs=[tab_base_tag_box, delete_num, img_file_name, filenames, image_index, visible_img_num, delete_confirm, turn_page_switch, image_page_list],
|
1378 |
+
outputs=[filenames, delete_num, turn_page_switch, visible_img_num, image_gallery, select_image, image_page_list],
|
1379 |
+
show_progress=opts.image_browser_show_progress
|
1380 |
+
).then(
|
1381 |
+
fn=None,
|
1382 |
+
_js="image_browser_select_image",
|
1383 |
+
inputs=[tab_base_tag_box, image_index, select_image],
|
1384 |
+
outputs=[js_dummy_return],
|
1385 |
+
show_progress=opts.image_browser_show_progress
|
1386 |
+
)
|
1387 |
+
|
1388 |
+
to_dir_btn.click(save_image, inputs=[img_file_name, filenames, page_index, turn_page_switch, to_dir_path], outputs=[collected_warning, filenames, page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1389 |
+
#turn page
|
1390 |
+
first_page.click(lambda s:(1, -s) , inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1391 |
+
next_page.click(lambda p, s: (p + 1, -s), inputs=[page_index, turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1392 |
+
prev_page.click(lambda p, s: (p - 1, -s), inputs=[page_index, turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1393 |
+
end_page.click(lambda s: (-1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1394 |
+
load_switch.change(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1395 |
+
filename_keyword_search.submit(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1396 |
+
exif_keyword_search.submit(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1397 |
+
ranking_filter_min.submit(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1398 |
+
ranking_filter_max.submit(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1399 |
+
aes_filter_min.submit(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1400 |
+
aes_filter_max.submit(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1401 |
+
sort_by.change(lambda s:(1, -s), inputs=[turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1402 |
+
page_index.submit(lambda s: -s, inputs=[turn_page_switch], outputs=[turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1403 |
+
renew_page.click(lambda s: -s, inputs=[turn_page_switch], outputs=[turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1404 |
+
refresh_index_button.click(lambda p, s:(p, -s), inputs=[page_index, turn_page_switch], outputs=[page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1405 |
+
img_path_depth.change(lambda s: -s, inputs=[turn_page_switch], outputs=[turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1406 |
+
|
1407 |
+
hide_on_thumbnail_view = [delete_panel, button_panel, ranking_panel, to_dir_panel, info_add_panel]
|
1408 |
+
|
1409 |
+
sort_order.click(
|
1410 |
+
fn=sort_order_flip,
|
1411 |
+
inputs=[turn_page_switch, sort_order],
|
1412 |
+
outputs=[page_index, turn_page_switch, sort_order],
|
1413 |
+
show_progress=opts.image_browser_show_progress
|
1414 |
+
)
|
1415 |
+
ranking_filter.change(
|
1416 |
+
fn=ranking_filter_settings,
|
1417 |
+
inputs=[page_index, turn_page_switch, ranking_filter],
|
1418 |
+
outputs=[page_index, turn_page_switch, ranking_filter_min, ranking_filter_max],
|
1419 |
+
show_progress=opts.image_browser_show_progress
|
1420 |
+
)
|
1421 |
+
|
1422 |
+
# Others
|
1423 |
+
img_path_subdirs_button.click(
|
1424 |
+
fn=img_path_subdirs_get,
|
1425 |
+
inputs=[img_path],
|
1426 |
+
outputs=[img_path_subdirs],
|
1427 |
+
show_progress=opts.image_browser_show_progress
|
1428 |
+
)
|
1429 |
+
img_path_subdirs.change(
|
1430 |
+
fn=change_dir,
|
1431 |
+
inputs=[img_path_subdirs, path_recorder, load_switch, img_path_browser, img_path_depth, img_path],
|
1432 |
+
outputs=change_dir_outputs,
|
1433 |
+
show_progress=opts.image_browser_show_progress
|
1434 |
+
)
|
1435 |
+
img_path_save_button.click(
|
1436 |
+
fn=img_path_add_remove,
|
1437 |
+
inputs=[img_path, path_recorder, img_path_add, img_path_depth],
|
1438 |
+
outputs=[path_recorder, img_path_browser],
|
1439 |
+
show_progress=opts.image_browser_show_progress
|
1440 |
+
)
|
1441 |
+
img_path_remove_button.click(
|
1442 |
+
fn=img_path_add_remove,
|
1443 |
+
inputs=[img_path, path_recorder, img_path_remove, img_path_depth],
|
1444 |
+
outputs=[path_recorder, img_path_browser],
|
1445 |
+
show_progress=opts.image_browser_show_progress
|
1446 |
+
)
|
1447 |
+
maint_exif_rebuild.click(
|
1448 |
+
fn=exif_rebuild,
|
1449 |
+
inputs=[maint_wait],
|
1450 |
+
outputs=[maint_wait, maint_last_msg],
|
1451 |
+
show_progress=True
|
1452 |
+
)
|
1453 |
+
maint_exif_delete_0.click(
|
1454 |
+
fn=exif_delete_0,
|
1455 |
+
inputs=[maint_wait],
|
1456 |
+
outputs=[maint_wait, maint_last_msg],
|
1457 |
+
show_progress=True
|
1458 |
+
)
|
1459 |
+
maint_update_dirs.click(
|
1460 |
+
fn=exif_update_dirs,
|
1461 |
+
inputs=[maint_update_dirs_from, maint_update_dirs_to, maint_wait],
|
1462 |
+
outputs=[maint_wait, maint_last_msg],
|
1463 |
+
show_progress=True
|
1464 |
+
)
|
1465 |
+
maint_reapply_ranking.click(
|
1466 |
+
fn=reapply_ranking,
|
1467 |
+
inputs=[path_recorder, maint_wait],
|
1468 |
+
outputs=[maint_wait, maint_last_msg],
|
1469 |
+
show_progress=True
|
1470 |
+
)
|
1471 |
+
maint_restart_debug.click(
|
1472 |
+
fn=restart_debug,
|
1473 |
+
inputs=[maint_wait],
|
1474 |
+
outputs=[maint_wait, maint_last_msg],
|
1475 |
+
show_progress=True
|
1476 |
+
)
|
1477 |
+
maint_get_js_logs.click(
|
1478 |
+
fn=js_logs_output,
|
1479 |
+
_js="get_js_logs",
|
1480 |
+
inputs=[js_logs],
|
1481 |
+
outputs=[maint_show_logs],
|
1482 |
+
show_progress=True
|
1483 |
+
)
|
1484 |
+
|
1485 |
+
# other functions
|
1486 |
+
if opts.image_browser_use_thumbnail:
|
1487 |
+
set_index_outputs = [img_file_name, img_file_time, image_index, hidden, turn_page_switch, img_file_info_add, image_gallery]
|
1488 |
+
else:
|
1489 |
+
set_index_outputs = [img_file_name, img_file_time, image_index, hidden, turn_page_switch, img_file_info_add]
|
1490 |
+
set_index.click(
|
1491 |
+
fn=show_image_info,
|
1492 |
+
_js="image_browser_get_current_img",
|
1493 |
+
inputs=[tab_base_tag_box, image_index, page_index, filenames, turn_page_switch, image_gallery],
|
1494 |
+
outputs=set_index_outputs,
|
1495 |
+
show_progress=opts.image_browser_show_progress
|
1496 |
+
).then(
|
1497 |
+
fn=None,
|
1498 |
+
_js="image_browser_img_show_progress_update",
|
1499 |
+
inputs=[],
|
1500 |
+
outputs=[js_dummy_return],
|
1501 |
+
show_progress=opts.image_browser_show_progress
|
1502 |
+
)
|
1503 |
+
|
1504 |
+
set_index.click(fn=lambda:(gr.update(visible=delete_panel not in override_hidden), gr.update(visible=button_panel not in override_hidden), gr.update(visible=ranking_panel not in override_hidden), gr.update(visible=to_dir_panel not in override_hidden), gr.update(visible=info_add_panel not in override_hidden)), inputs=None, outputs=hide_on_thumbnail_view, show_progress=opts.image_browser_show_progress)
|
1505 |
+
|
1506 |
+
favorites_btn.click(save_image, inputs=[img_file_name, filenames, page_index, turn_page_switch, favorites_path], outputs=[collected_warning, filenames, page_index, turn_page_switch], show_progress=opts.image_browser_show_progress)
|
1507 |
+
img_file_name.change(img_file_name_changed, inputs=[img_file_name, favorites_btn, to_dir_btn], outputs=[ranking_current, ranking, collected_warning, favorites_btn, to_dir_btn], show_progress=opts.image_browser_show_progress)
|
1508 |
+
|
1509 |
+
hidden.change(fn=run_pnginfo, inputs=[hidden, img_path, img_file_name], outputs=[info1, img_file_info, info2, image_browser_prompt, image_browser_neg_prompt], show_progress=opts.image_browser_show_progress)
|
1510 |
+
|
1511 |
+
#ranking
|
1512 |
+
ranking.change(update_ranking, inputs=[img_file_name, ranking_current, ranking, img_file_info], outputs=[ranking_current, ranking, img_file_info], show_progress=opts.image_browser_show_progress)
|
1513 |
+
|
1514 |
+
try:
|
1515 |
+
modules.generation_parameters_copypaste.bind_buttons(send_to_buttons, hidden, img_file_info)
|
1516 |
+
except:
|
1517 |
+
pass
|
1518 |
+
|
1519 |
+
if standard_ui:
|
1520 |
+
current_gr_tab.select(
|
1521 |
+
fn=tab_select,
|
1522 |
+
inputs=[],
|
1523 |
+
outputs=[path_recorder, to_dir_saved],
|
1524 |
+
show_progress=opts.image_browser_show_progress
|
1525 |
+
)
|
1526 |
+
open_folder_button.click(
|
1527 |
+
fn=lambda: open_folder(dir_name),
|
1528 |
+
inputs=[],
|
1529 |
+
outputs=[],
|
1530 |
+
show_progress=opts.image_browser_show_progress
|
1531 |
+
)
|
1532 |
+
elif others_dir:
|
1533 |
+
open_folder_button.click(
|
1534 |
+
fn=open_folder,
|
1535 |
+
inputs=[img_path],
|
1536 |
+
outputs=[],
|
1537 |
+
show_progress=opts.image_browser_show_progress
|
1538 |
+
)
|
1539 |
+
if standard_ui or others_dir:
|
1540 |
+
turn_page_switch.change(
|
1541 |
+
fn=get_image_page,
|
1542 |
+
inputs=[img_path, page_index, filenames, filename_keyword_search, sort_by, sort_order, tab_base_tag_box, img_path_depth, ranking_filter, ranking_filter_min, ranking_filter_max, aes_filter_min, aes_filter_max, score_type, exif_keyword_search, negative_prompt_search, use_regex, case_sensitive, image_reward_button],
|
1543 |
+
outputs=[filenames, page_index, image_gallery, img_file_name, img_file_time, img_file_info, visible_img_num, warning_box, hidden, image_page_list, image_browser_img_info, image_reward_button],
|
1544 |
+
show_progress=opts.image_browser_show_progress
|
1545 |
+
).then(
|
1546 |
+
fn=None,
|
1547 |
+
_js="image_browser_turnpage",
|
1548 |
+
inputs=[tab_base_tag_box],
|
1549 |
+
outputs=[js_dummy_return],
|
1550 |
+
show_progress=opts.image_browser_show_progress
|
1551 |
+
)
|
1552 |
+
turn_page_switch.change(fn=lambda:(gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), inputs=None, outputs=hide_on_thumbnail_view, show_progress=opts.image_browser_show_progress)
|
1553 |
+
sendto_openoutpaint.click(
|
1554 |
+
fn=None,
|
1555 |
+
inputs=[tab_base_tag_box, image_index, image_browser_prompt, image_browser_neg_prompt],
|
1556 |
+
outputs=[js_dummy_return],
|
1557 |
+
_js="image_browser_openoutpaint_send",
|
1558 |
+
show_progress=opts.image_browser_show_progress )
|
1559 |
+
sendto_controlnet_txt2img.click(
|
1560 |
+
fn=None,
|
1561 |
+
inputs=[tab_base_tag_box, image_index, sendto_controlnet_num, sendto_controlnet_type],
|
1562 |
+
outputs=[js_dummy_return],
|
1563 |
+
_js="image_browser_controlnet_send_txt2img",
|
1564 |
+
show_progress=opts.image_browser_show_progress
|
1565 |
+
)
|
1566 |
+
sendto_controlnet_img2img.click(
|
1567 |
+
fn=None,
|
1568 |
+
inputs=[tab_base_tag_box, image_index, sendto_controlnet_num, sendto_controlnet_type],
|
1569 |
+
outputs=[js_dummy_return],
|
1570 |
+
_js="image_browser_controlnet_send_img2img",
|
1571 |
+
show_progress=opts.image_browser_show_progress
|
1572 |
+
)
|
1573 |
+
image_reward_button.click(
|
1574 |
+
fn=generate_image_reward,
|
1575 |
+
inputs=[filenames, turn_page_switch, aes_filter_min, aes_filter_max],
|
1576 |
+
outputs=[turn_page_switch, aes_filter_min, aes_filter_max],
|
1577 |
+
show_progress=True
|
1578 |
+
)
|
1579 |
+
|
1580 |
+
def run_pnginfo(image, image_path, image_file_name):
|
1581 |
+
if image is None:
|
1582 |
+
return '', '', '', '', ''
|
1583 |
+
try:
|
1584 |
+
geninfo, items = images.read_info_from_image(image)
|
1585 |
+
items = {**{'parameters': geninfo}, **items}
|
1586 |
+
|
1587 |
+
info = ''
|
1588 |
+
for key, text in items.items():
|
1589 |
+
info += f"""
|
1590 |
+
<div>
|
1591 |
+
<p><b>{plaintext_to_html(str(key))}</b></p>
|
1592 |
+
<p>{plaintext_to_html(str(text))}</p>
|
1593 |
+
</div>
|
1594 |
+
""".strip()+"\n"
|
1595 |
+
except UnidentifiedImageError as e:
|
1596 |
+
geninfo = None
|
1597 |
+
info = ""
|
1598 |
+
|
1599 |
+
if geninfo is None:
|
1600 |
+
try:
|
1601 |
+
filename = os.path.splitext(image_file_name)[0] + ".txt"
|
1602 |
+
geninfo = ""
|
1603 |
+
with open(filename) as f:
|
1604 |
+
for line in f:
|
1605 |
+
geninfo += line
|
1606 |
+
except Exception:
|
1607 |
+
logger.warning(f"run_pnginfo: No EXIF in image or txt file")
|
1608 |
+
|
1609 |
+
if openoutpaint:
|
1610 |
+
prompt, neg_prompt = wib_db.select_prompts(image_file_name)
|
1611 |
+
if prompt == "0":
|
1612 |
+
prompt = ""
|
1613 |
+
if neg_prompt == "0":
|
1614 |
+
neg_prompt = ""
|
1615 |
+
else:
|
1616 |
+
prompt = ""
|
1617 |
+
neg_prompt = ""
|
1618 |
+
|
1619 |
+
return '', geninfo, info, prompt, neg_prompt
|
1620 |
+
|
1621 |
+
|
1622 |
+
def on_ui_tabs():
|
1623 |
+
global num_of_imgs_per_page, loads_files_num, js_dummy_return
|
1624 |
+
num_of_imgs_per_page = int(opts.image_browser_page_columns * opts.image_browser_page_rows)
|
1625 |
+
loads_files_num = int(opts.image_browser_pages_perload * num_of_imgs_per_page)
|
1626 |
+
with gr.Blocks(analytics_enabled=False) as image_browser:
|
1627 |
+
gradio_needed = "3.23.0"
|
1628 |
+
if version.parse(gr.__version__) < version.parse(gradio_needed):
|
1629 |
+
gr.HTML(f'<p style="color: red; font-weight: bold;">You are running Gradio version {gr.__version__}. This version of the extension requires at least Gradio version {gradio_needed}.</p><p style="color: red; font-weight: bold;">For more details see <a href="https://github.com/AlUlkesh/stable-diffusion-webui-images-browser/issues/116#issuecomment-1493259585" target="_blank">https://github.com/AlUlkesh/stable-diffusion-webui-images-browser/issues/116#issuecomment-1493259585</a></p>')
|
1630 |
+
else:
|
1631 |
+
with gr.Tabs(elem_id="image_browser_tabs_container") as tabs:
|
1632 |
+
js_dummy_return = gr.Textbox(interactive=False, visible=False)
|
1633 |
+
for i, tab in enumerate(tabs_list):
|
1634 |
+
with gr.Tab(tab.name, elem_id=f"{tab.base_tag}_image_browser_container") as current_gr_tab:
|
1635 |
+
with gr.Blocks(analytics_enabled=False):
|
1636 |
+
create_tab(tab, current_gr_tab)
|
1637 |
+
gr.Checkbox(value=opts.image_browser_preload, elem_id="image_browser_preload", visible=False)
|
1638 |
+
gr.Textbox(",".join( [tab.base_tag for tab in tabs_list] ), elem_id="image_browser_tab_base_tags_list", visible=False)
|
1639 |
+
gr.Checkbox(value=opts.image_browser_swipe, elem_id=f"image_browser_swipe", visible=False)
|
1640 |
+
|
1641 |
+
javascript_level_value, (javascript_level, javascript_level_text) = debug_levels(arg_level="javascript")
|
1642 |
+
level_value, (level, level_text) = debug_levels(arg_text=opts.image_browser_debug_level)
|
1643 |
+
if level_value >= javascript_level_value:
|
1644 |
+
debug_level_option = level
|
1645 |
+
else:
|
1646 |
+
debug_level_option = ""
|
1647 |
+
gr.Textbox(value=debug_level_option, elem_id="image_browser_debug_level_option", visible=False)
|
1648 |
+
|
1649 |
+
return (image_browser, "Image Browser", "image_browser"),
|
1650 |
+
|
1651 |
+
def move_setting(cur_setting_name, old_setting_name, option_info, section, added):
|
1652 |
+
try:
|
1653 |
+
old_value = shared.opts.__getattr__(old_setting_name)
|
1654 |
+
except AttributeError:
|
1655 |
+
old_value = None
|
1656 |
+
try:
|
1657 |
+
new_value = shared.opts.__getattr__(cur_setting_name)
|
1658 |
+
except AttributeError:
|
1659 |
+
new_value = None
|
1660 |
+
if old_value is not None and new_value is None:
|
1661 |
+
# Add new option
|
1662 |
+
shared.opts.add_option(cur_setting_name, shared.OptionInfo(*option_info, section=section))
|
1663 |
+
shared.opts.__setattr__(cur_setting_name, old_value)
|
1664 |
+
added = added + 1
|
1665 |
+
# Remove old option
|
1666 |
+
shared.opts.data.pop(old_setting_name, None)
|
1667 |
+
|
1668 |
+
return added
|
1669 |
+
|
1670 |
+
def on_ui_settings():
|
1671 |
+
# [current setting_name], [old setting_name], [default], [label], [component], [component_args]
|
1672 |
+
active_tabs_description = f"List of active tabs (separated by commas). Available options are {', '.join(default_tab_options)}. Custom folders are also supported by specifying their path."
|
1673 |
+
debug_level_choices = []
|
1674 |
+
for i in range(len(debug_level_types)):
|
1675 |
+
level_value, (level, level_text) = debug_levels(arg_value=i)
|
1676 |
+
debug_level_choices.append(level_text)
|
1677 |
+
|
1678 |
+
image_browser_options = [
|
1679 |
+
("image_browser_active_tabs", None, ", ".join(default_tab_options), active_tabs_description),
|
1680 |
+
("image_browser_hidden_components", None, [], "Select components to hide", DropdownMulti, lambda: {"choices": components_list}),
|
1681 |
+
("image_browser_with_subdirs", "images_history_with_subdirs", True, "Include images in sub directories"),
|
1682 |
+
("image_browser_preload", "images_history_preload", False, "Preload images at startup for first tab"),
|
1683 |
+
("image_browser_copy_image", "images_copy_image", False, "Move buttons copy instead of move"),
|
1684 |
+
("image_browser_delete_message", "images_delete_message", True, "Print image deletion messages to the console"),
|
1685 |
+
("image_browser_txt_files", "images_txt_files", True, "Move/Copy/Delete matching .txt files"),
|
1686 |
+
("image_browser_debug_level", None, debug_level_choices[0], "Debug level", gr.Dropdown, lambda: {"choices": debug_level_choices}),
|
1687 |
+
("image_browser_delete_recycle", "images_delete_recycle", True, "Use recycle bin when deleting images"),
|
1688 |
+
("image_browser_scan_exif", "images_scan_exif", True, "Scan Exif-/.txt-data (initially slower, but required for many features to work)"),
|
1689 |
+
("image_browser_mod_shift", None, False, "Change CTRL keybindings to SHIFT"),
|
1690 |
+
("image_browser_mod_ctrl_shift", None, False, "or to CTRL+SHIFT"),
|
1691 |
+
("image_browser_enable_maint", None, True, "Enable Maintenance tab"),
|
1692 |
+
("image_browser_ranking_pnginfo", None, False, "Save ranking in image's pnginfo"),
|
1693 |
+
("image_browser_page_columns", "images_history_page_columns", 6, "Number of columns on the page"),
|
1694 |
+
("image_browser_page_rows", "images_history_page_rows", 6, "Number of rows on the page"),
|
1695 |
+
("image_browser_pages_perload", "images_history_pages_perload", 20, "Minimum number of pages per load"),
|
1696 |
+
("image_browser_use_thumbnail", None, False, "Use optimized images in the thumbnail interface (significantly reduces the amount of data transferred)"),
|
1697 |
+
("image_browser_thumbnail_size", None, 200, "Size of the thumbnails (px)"),
|
1698 |
+
("image_browser_swipe", None, False, "Swipe left/right navigates to the next image"),
|
1699 |
+
("image_browser_img_tooltips", None, True, "Enable thumbnail tooltips"),
|
1700 |
+
("image_browser_scoring_type", None, "aesthetic_score", "Default scoring type", gr.Dropdown, lambda: {"choices": ["aesthetic_score", "ImageReward Score"]}),
|
1701 |
+
("image_browser_show_progress", None, True, "Show progress indicator"),
|
1702 |
+
]
|
1703 |
+
|
1704 |
+
section = ('image-browser', "Image Browser")
|
1705 |
+
# Move historic setting names to current names
|
1706 |
+
added = 0
|
1707 |
+
for cur_setting_name, old_setting_name, *option_info in image_browser_options:
|
1708 |
+
if old_setting_name is not None:
|
1709 |
+
added = move_setting(cur_setting_name, old_setting_name, option_info, section, added)
|
1710 |
+
if added > 0:
|
1711 |
+
shared.opts.save(shared.config_filename)
|
1712 |
+
|
1713 |
+
for cur_setting_name, _, *option_info in image_browser_options:
|
1714 |
+
shared.opts.add_option(cur_setting_name, shared.OptionInfo(*option_info, section=section))
|
1715 |
+
|
1716 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
1717 |
+
script_callbacks.on_ui_tabs(on_ui_tabs)
|