John6666 commited on
Commit
9ee2570
0 Parent(s):

Super-squash branch 'main' using huggingface_hub

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Votepurchase Multiple Model (SD1.5/SDXL Text-to-Image)
3
+ emoji: 🖼🖼️📦
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.41.0
8
+ app_file: app.py
9
+ license: mit
10
+ short_description: Text-to-Image
11
+ pinned: true
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import numpy as np
4
+
5
+ # DiffuseCraft
6
+ from dc import (
7
+ infer,
8
+ _infer,
9
+ pass_result,
10
+ get_diffusers_model_list,
11
+ get_samplers,
12
+ get_vaes,
13
+ enable_model_recom_prompt,
14
+ enable_diffusers_model_detail,
15
+ get_t2i_model_info,
16
+ get_all_lora_tupled_list,
17
+ update_loras,
18
+ apply_lora_prompt,
19
+ download_my_lora,
20
+ search_civitai_lora,
21
+ select_civitai_lora,
22
+ search_civitai_lora_json,
23
+ preset_quality,
24
+ preset_styles,
25
+ process_style_prompt,
26
+ )
27
+ # Translator
28
+ from llmdolphin import (
29
+ dolphin_respond_auto,
30
+ dolphin_parse_simple,
31
+ get_llm_formats,
32
+ get_dolphin_model_format,
33
+ get_dolphin_models,
34
+ get_dolphin_model_info,
35
+ select_dolphin_model,
36
+ select_dolphin_format,
37
+ get_dolphin_sysprompt,
38
+ )
39
+ # Tagger
40
+ from tagger.v2 import (
41
+ v2_upsampling_prompt,
42
+ V2_ALL_MODELS,
43
+ )
44
+ from tagger.utils import (
45
+ gradio_copy_text,
46
+ gradio_copy_prompt,
47
+ COPY_ACTION_JS,
48
+ V2_ASPECT_RATIO_OPTIONS,
49
+ V2_RATING_OPTIONS,
50
+ V2_LENGTH_OPTIONS,
51
+ V2_IDENTITY_OPTIONS
52
+ )
53
+ from tagger.tagger import (
54
+ predict_tags_wd,
55
+ convert_danbooru_to_e621_prompt,
56
+ remove_specific_prompt,
57
+ insert_recom_prompt,
58
+ compose_prompt_to_copy,
59
+ translate_prompt,
60
+ select_random_character,
61
+ )
62
+ from tagger.fl2sd3longcap import (
63
+ predict_tags_fl2_sd3,
64
+ )
65
+ def description_ui():
66
+ gr.Markdown(
67
+ """
68
+ ## Danbooru Tags Transformer V2 Demo with WD Tagger & SD3 Long Captioner
69
+ (Image =>) Prompt => Upsampled longer prompt
70
+ - Mod of p1atdev's [Danbooru Tags Transformer V2 Demo](https://huggingface.co/spaces/p1atdev/danbooru-tags-transformer-v2) and [WD Tagger with 🤗 transformers](https://huggingface.co/spaces/p1atdev/wd-tagger-transformers).
71
+ - Models: p1atdev's [wd-swinv2-tagger-v3-hf](https://huggingface.co/p1atdev/wd-swinv2-tagger-v3-hf), [dart-v2-moe-sft](https://huggingface.co/p1atdev/dart-v2-moe-sft), [dart-v2-sft](https://huggingface.co/p1atdev/dart-v2-sft)\
72
+ , gokaygokay's [Florence-2-SD3-Captioner](https://huggingface.co/gokaygokay/Florence-2-SD3-Captioner)
73
+ """
74
+ )
75
+
76
+
77
+ MAX_SEED = np.iinfo(np.int32).max
78
+ MAX_IMAGE_SIZE = 1216
79
+
80
+ css = """
81
+ #container {
82
+ margin: 0 auto;
83
+ }
84
+ #col-container {
85
+ margin: 0 auto;
86
+ max-width: 520px;
87
+ }
88
+ #model-info { text-align: center; }
89
+ """
90
+
91
+ with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
92
+ with gr.Tab("Image Generator"):
93
+ with gr.Column(elem_id="col-container"):
94
+
95
+ prompt = gr.Text(
96
+ label="Prompt",
97
+ show_label=False,
98
+ lines=1,
99
+ max_lines=8,
100
+ placeholder="Enter your prompt",
101
+ container=False,
102
+ )
103
+
104
+ with gr.Row():
105
+ run_button = gr.Button("Run")
106
+ run_translate_button = gr.Button("Translate")
107
+
108
+ result = gr.Image(label="Result", show_label=False, interactive=False, show_download_button=True, show_share_button=False, container=True)
109
+
110
+ with gr.Accordion("Advanced Settings", open=False):
111
+
112
+ negative_prompt = gr.Text(
113
+ label="Negative prompt",
114
+ lines=1,
115
+ max_lines=6,
116
+ placeholder="Enter a negative prompt",
117
+ value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly"
118
+ )
119
+
120
+ seed = gr.Slider(
121
+ label="Seed",
122
+ minimum=0,
123
+ maximum=MAX_SEED,
124
+ step=1,
125
+ value=0,
126
+ )
127
+
128
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
129
+
130
+ with gr.Row():
131
+ width = gr.Slider(
132
+ label="Width",
133
+ minimum=256,
134
+ maximum=MAX_IMAGE_SIZE,
135
+ step=32,
136
+ value=1024,#832,
137
+ )
138
+
139
+ height = gr.Slider(
140
+ label="Height",
141
+ minimum=256,
142
+ maximum=MAX_IMAGE_SIZE,
143
+ step=32,
144
+ value=1024,#1216,
145
+ )
146
+
147
+ with gr.Row():
148
+ guidance_scale = gr.Slider(
149
+ label="Guidance scale",
150
+ minimum=0.0,
151
+ maximum=30.0,
152
+ step=0.1,
153
+ value=7,
154
+ )
155
+
156
+ num_inference_steps = gr.Slider(
157
+ label="Number of inference steps",
158
+ minimum=1,
159
+ maximum=100,
160
+ step=1,
161
+ value=28,
162
+ )
163
+
164
+ with gr.Group():
165
+ model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.", choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0], allow_custom_value=True, interactive=True)
166
+ model_info = gr.Markdown(elem_id="model-info")
167
+ model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
168
+
169
+ sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler a")
170
+
171
+ chatbot = gr.Chatbot(likeable=False, render_markdown=False, visible=False) # component for auto-translation
172
+
173
+ with gr.Accordion("LoRA", open=True, visible=True):
174
+ with gr.Group():
175
+ lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
176
+ lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
177
+ with gr.Row():
178
+ lora1_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
179
+ lora1_copy = gr.Button(value="Copy example to prompt", visible=False)
180
+ lora1_md = gr.Markdown(value="", visible=False)
181
+ with gr.Group():
182
+ lora2 = gr.Dropdown(label="LoRA 2", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
183
+ lora2_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 2: weight")
184
+ with gr.Row():
185
+ lora2_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
186
+ lora2_copy = gr.Button(value="Copy example to prompt", visible=False)
187
+ lora2_md = gr.Markdown(value="", visible=False)
188
+ with gr.Group():
189
+ lora3 = gr.Dropdown(label="LoRA 3", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
190
+ lora3_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 3: weight")
191
+ with gr.Row():
192
+ lora3_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
193
+ lora3_copy = gr.Button(value="Copy example to prompt", visible=False)
194
+ lora3_md = gr.Markdown(value="", visible=False)
195
+ with gr.Group():
196
+ lora4 = gr.Dropdown(label="LoRA 4", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
197
+ lora4_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 4: weight")
198
+ with gr.Row():
199
+ lora4_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
200
+ lora4_copy = gr.Button(value="Copy example to prompt", visible=False)
201
+ lora4_md = gr.Markdown(value="", visible=False)
202
+ with gr.Group():
203
+ lora5 = gr.Dropdown(label="LoRA 5", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True)
204
+ lora5_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 5: weight")
205
+ with gr.Row():
206
+ lora5_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
207
+ lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
208
+ lora5_md = gr.Markdown(value="", visible=False)
209
+ with gr.Accordion("From URL", open=True, visible=True):
210
+ with gr.Row():
211
+ lora_search_civitai_query = gr.Textbox(label="Query", placeholder="oomuro sakurako...", lines=1)
212
+ lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Pony", "SD 1.5", "SDXL 1.0"], value=["Pony", "SDXL 1.0"])
213
+ lora_search_civitai_submit = gr.Button("Search on Civitai")
214
+ lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
215
+ lora_search_civitai_json = gr.JSON(value={}, visible=False)
216
+ lora_search_civitai_desc = gr.Markdown(value="", visible=False)
217
+ lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
218
+ lora_download = gr.Button("Get and set LoRA and apply to prompt")
219
+
220
+ vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
221
+ recom_prompt = gr.Checkbox(label="Recommended prompt", value=True)
222
+ quality_selector = gr.Dropdown(label="Quality Tags Presets", interactive=True, choices=list(preset_quality.keys()), value="None")
223
+ style_selector = gr.Dropdown(label="Style Preset", interactive=True, choices=list(preset_styles.keys()), value="None")
224
+
225
+ with gr.Accordion("Translation Settings", open=False):
226
+ chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
227
+ chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
228
+ chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
229
+ chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
230
+ chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
231
+ chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
232
+ chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
233
+ chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
234
+ chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
235
+
236
+ examples = gr.Examples(
237
+ examples = [
238
+ ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
239
+ ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
240
+ ["kafuu chino, 1girl, solo"],
241
+ ["1girl"],
242
+ ["beautiful sunset"],
243
+ ],
244
+ inputs=[prompt],
245
+ )
246
+
247
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
248
+ triggers=[run_button.click, prompt.submit],
249
+ fn=infer,
250
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
251
+ guidance_scale, num_inference_steps, model_name,
252
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
253
+ sampler, vae_model],
254
+ outputs=[result],
255
+ queue=True,
256
+ show_progress="full",
257
+ show_api=True,
258
+ )
259
+
260
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
261
+ triggers=[run_translate_button.click],
262
+ fn=_infer, # dummy fn for api
263
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
264
+ guidance_scale, num_inference_steps, model_name,
265
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
266
+ sampler, vae_model],
267
+ outputs=[result],
268
+ queue=False,
269
+ show_api=True,
270
+ api_name="infer_translate",
271
+ ).success(
272
+ fn=dolphin_respond_auto,
273
+ inputs=[prompt, chatbot],
274
+ outputs=[chatbot],
275
+ queue=True,
276
+ show_progress="full",
277
+ show_api=False,
278
+ ).success(
279
+ fn=dolphin_parse_simple,
280
+ inputs=[prompt, chatbot],
281
+ outputs=[prompt],
282
+ queue=False,
283
+ show_api=False,
284
+ ).success(
285
+ fn=infer,
286
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
287
+ guidance_scale, num_inference_steps, model_name,
288
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
289
+ sampler, vae_model],
290
+ outputs=[result],
291
+ queue=True,
292
+ show_progress="full",
293
+ show_api=False,
294
+ ).success(lambda: None, None, chatbot, queue=False, show_api=False)\
295
+ .success(pass_result, [result], [result], queue=False, show_api=False) # dummy fn for api
296
+
297
+ gr.on(
298
+ triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
299
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change],
300
+ fn=update_loras,
301
+ inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt],
302
+ outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
303
+ lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
304
+ lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md],
305
+ queue=False,
306
+ trigger_mode="once",
307
+ show_api=False,
308
+ )
309
+ lora1_copy.click(apply_lora_prompt, [prompt, lora1_info], [prompt], queue=False, show_api=False)
310
+ lora2_copy.click(apply_lora_prompt, [prompt, lora2_info], [prompt], queue=False, show_api=False)
311
+ lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
312
+ lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
313
+ lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
314
+
315
+ gr.on(
316
+ triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
317
+ fn=search_civitai_lora,
318
+ inputs=[lora_search_civitai_query, lora_search_civitai_basemodel],
319
+ outputs=[lora_search_civitai_result, lora_search_civitai_desc, lora_search_civitai_submit, lora_search_civitai_query],
320
+ scroll_to_output=True,
321
+ queue=True,
322
+ show_api=False,
323
+ )
324
+ lora_search_civitai_json.change(search_civitai_lora_json, [lora_search_civitai_query, lora_search_civitai_basemodel], [lora_search_civitai_json], queue=True, show_api=True) # fn for api
325
+ lora_search_civitai_result.change(select_civitai_lora, [lora_search_civitai_result], [lora_download_url, lora_search_civitai_desc], scroll_to_output=True, queue=False, show_api=False)
326
+ gr.on(
327
+ triggers=[lora_download.click, lora_download_url.submit],
328
+ fn=download_my_lora,
329
+ inputs=[lora_download_url,lora1, lora2, lora3, lora4, lora5],
330
+ outputs=[lora1, lora2, lora3, lora4, lora5],
331
+ scroll_to_output=True,
332
+ queue=True,
333
+ show_api=False,
334
+ )
335
+
336
+ recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
337
+ gr.on(
338
+ triggers=[quality_selector.change, style_selector.change],
339
+ fn=process_style_prompt,
340
+ inputs=[prompt, negative_prompt, style_selector, quality_selector],
341
+ outputs=[prompt, negative_prompt],
342
+ queue=False,
343
+ trigger_mode="once",
344
+ )
345
+
346
+ model_detail.change(enable_diffusers_model_detail, [model_detail, model_name], [model_detail, model_name], queue=False, show_api=False)
347
+ model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
348
+
349
+ chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full", show_api=False)\
350
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
351
+ chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False, show_api=False)\
352
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
353
+
354
+ # Tagger
355
+ with gr.Tab("Tags Transformer with Tagger"):
356
+ with gr.Column():
357
+ with gr.Group():
358
+ input_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
359
+ with gr.Accordion(label="Advanced options", open=False):
360
+ general_threshold = gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.01, interactive=True)
361
+ character_threshold = gr.Slider(label="Character threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.01, interactive=True)
362
+ input_tag_type = gr.Radio(label="Convert tags to", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
363
+ recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)
364
+ image_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner"], label="Algorithms", value=["Use WD Tagger"])
365
+ keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
366
+ generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
367
+ with gr.Group():
368
+ with gr.Row():
369
+ input_character = gr.Textbox(label="Character tags", placeholder="hatsune miku")
370
+ input_copyright = gr.Textbox(label="Copyright tags", placeholder="vocaloid")
371
+ random_character = gr.Button(value="Random character 🎲", size="sm")
372
+ input_general = gr.TextArea(label="General tags", lines=4, placeholder="1girl, ...", value="")
373
+ input_tags_to_copy = gr.Textbox(value="", visible=False)
374
+ with gr.Row():
375
+ copy_input_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
376
+ copy_prompt_btn_input = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
377
+ translate_input_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
378
+ tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
379
+ input_rating = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="explicit")
380
+ with gr.Accordion(label="Advanced options", open=False):
381
+ input_aspect_ratio = gr.Radio(label="Aspect ratio", info="The aspect ratio of the image.", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square")
382
+ input_length = gr.Radio(label="Length", info="The total length of the tags.", choices=list(V2_LENGTH_OPTIONS), value="very_long")
383
+ input_identity = gr.Radio(label="Keep identity", info="How strictly to keep the identity of the character or subject. If you specify the detail of subject in the prompt, you should choose `strict`. Otherwise, choose `none` or `lax`. `none` is very creative but sometimes ignores the input prompt.", choices=list(V2_IDENTITY_OPTIONS), value="lax")
384
+ input_ban_tags = gr.Textbox(label="Ban tags", info="Tags to ban from the output.", placeholder="alternate costumen, ...", value="censored")
385
+ model_name = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
386
+ dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
387
+ recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
388
+ recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
389
+ generate_btn = gr.Button(value="GENERATE TAGS", size="lg", variant="primary")
390
+ with gr.Row():
391
+ with gr.Group():
392
+ output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
393
+ with gr.Row():
394
+ copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
395
+ copy_prompt_btn = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
396
+ with gr.Group():
397
+ output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
398
+ with gr.Row():
399
+ copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
400
+ copy_prompt_btn_pony = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
401
+
402
+ random_character.click(select_random_character, [input_copyright, input_character], [input_copyright, input_character], queue=False, show_api=False)
403
+
404
+ translate_input_prompt_button.click(translate_prompt, [input_general], [input_general], queue=False, show_api=False)
405
+ translate_input_prompt_button.click(translate_prompt, [input_character], [input_character], queue=False, show_api=False)
406
+ translate_input_prompt_button.click(translate_prompt, [input_copyright], [input_copyright], queue=False, show_api=False)
407
+
408
+ generate_from_image_btn.click(
409
+ lambda: ("", "", ""), None, [input_copyright, input_character, input_general], queue=False, show_api=False,
410
+ ).success(
411
+ predict_tags_wd,
412
+ [input_image, input_general, image_algorithms, general_threshold, character_threshold],
413
+ [input_copyright, input_character, input_general, copy_input_btn],
414
+ show_api=False,
415
+ ).success(
416
+ predict_tags_fl2_sd3, [input_image, input_general, image_algorithms], [input_general], show_api=False,
417
+ ).success(
418
+ remove_specific_prompt, [input_general, keep_tags], [input_general], queue=False, show_api=False,
419
+ ).success(
420
+ convert_danbooru_to_e621_prompt, [input_general, input_tag_type], [input_general], queue=False, show_api=False,
421
+ ).success(
422
+ insert_recom_prompt, [input_general, dummy_np, recom_prompt], [input_general, dummy_np], queue=False, show_api=False,
423
+ ).success(lambda: gr.update(interactive=True), None, [copy_prompt_btn_input], queue=False, show_api=False)
424
+ copy_input_btn.click(compose_prompt_to_copy, [input_character, input_copyright, input_general], [input_tags_to_copy], show_api=False)\
425
+ .success(gradio_copy_text, [input_tags_to_copy], js=COPY_ACTION_JS, show_api=False)
426
+ copy_prompt_btn_input.click(compose_prompt_to_copy, inputs=[input_character, input_copyright, input_general], outputs=[input_tags_to_copy], show_api=False)\
427
+ .success(gradio_copy_prompt, inputs=[input_tags_to_copy], outputs=[prompt], show_api=False)
428
+
429
+ generate_btn.click(
430
+ v2_upsampling_prompt,
431
+ [model_name, input_copyright, input_character, input_general,
432
+ input_rating, input_aspect_ratio, input_length, input_identity, input_ban_tags],
433
+ [output_text],
434
+ show_api=False,
435
+ ).success(
436
+ convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False, show_api=False,
437
+ ).success(
438
+ insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False, show_api=False,
439
+ ).success(
440
+ insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
441
+ ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
442
+ None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
443
+ copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
444
+ copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
445
+ copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
446
+ copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt], show_api=False)
447
+
448
+ demo.queue()
449
+ demo.launch()
dc.py ADDED
@@ -0,0 +1,1328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from stablepy import Model_Diffusers
4
+ from stablepy.diffusers_vanilla.model import scheduler_names
5
+ from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
6
+ import torch
7
+ import re
8
+ import shutil
9
+ import random
10
+ from stablepy import (
11
+ CONTROLNET_MODEL_IDS,
12
+ VALID_TASKS,
13
+ T2I_PREPROCESSOR_NAME,
14
+ FLASH_LORA,
15
+ SCHEDULER_CONFIG_MAP,
16
+ scheduler_names,
17
+ IP_ADAPTER_MODELS,
18
+ IP_ADAPTERS_SD,
19
+ IP_ADAPTERS_SDXL,
20
+ REPO_IMAGE_ENCODER,
21
+ ALL_PROMPT_WEIGHT_OPTIONS,
22
+ SD15_TASKS,
23
+ SDXL_TASKS,
24
+ )
25
+ import urllib.parse
26
+ import gradio as gr
27
+ from PIL import Image
28
+ import IPython.display
29
+ import time, json
30
+ from IPython.utils import capture
31
+ import logging
32
+ logging.getLogger("diffusers").setLevel(logging.ERROR)
33
+ import diffusers
34
+ diffusers.utils.logging.set_verbosity(40)
35
+ import warnings
36
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
37
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
38
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
39
+ from stablepy import logger
40
+ logger.setLevel(logging.CRITICAL)
41
+
42
+ from env import (
43
+ hf_token,
44
+ hf_read_token, # to use only for private repos
45
+ CIVITAI_API_KEY,
46
+ HF_LORA_PRIVATE_REPOS1,
47
+ HF_LORA_PRIVATE_REPOS2,
48
+ HF_LORA_ESSENTIAL_PRIVATE_REPO,
49
+ HF_VAE_PRIVATE_REPO,
50
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO,
51
+ HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
52
+ directory_models,
53
+ directory_loras,
54
+ directory_vaes,
55
+ directory_embeds,
56
+ directory_embeds_sdxl,
57
+ directory_embeds_positive_sdxl,
58
+ load_diffusers_format_model,
59
+ download_model_list,
60
+ download_lora_list,
61
+ download_vae_list,
62
+ download_embeds,
63
+ )
64
+
65
+ preprocessor_controlnet = {
66
+ "openpose": [
67
+ "Openpose",
68
+ "None",
69
+ ],
70
+ "scribble": [
71
+ "HED",
72
+ "Pidinet",
73
+ "None",
74
+ ],
75
+ "softedge": [
76
+ "Pidinet",
77
+ "HED",
78
+ "HED safe",
79
+ "Pidinet safe",
80
+ "None",
81
+ ],
82
+ "segmentation": [
83
+ "UPerNet",
84
+ "None",
85
+ ],
86
+ "depth": [
87
+ "DPT",
88
+ "Midas",
89
+ "None",
90
+ ],
91
+ "normalbae": [
92
+ "NormalBae",
93
+ "None",
94
+ ],
95
+ "lineart": [
96
+ "Lineart",
97
+ "Lineart coarse",
98
+ "Lineart (anime)",
99
+ "None",
100
+ "None (anime)",
101
+ ],
102
+ "shuffle": [
103
+ "ContentShuffle",
104
+ "None",
105
+ ],
106
+ "canny": [
107
+ "Canny"
108
+ ],
109
+ "mlsd": [
110
+ "MLSD"
111
+ ],
112
+ "ip2p": [
113
+ "ip2p"
114
+ ],
115
+ }
116
+
117
+ task_stablepy = {
118
+ 'txt2img': 'txt2img',
119
+ 'img2img': 'img2img',
120
+ 'inpaint': 'inpaint',
121
+ # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
122
+ # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
123
+ # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
124
+ # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
125
+ # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
126
+ 'openpose ControlNet': 'openpose',
127
+ 'canny ControlNet': 'canny',
128
+ 'mlsd ControlNet': 'mlsd',
129
+ 'scribble ControlNet': 'scribble',
130
+ 'softedge ControlNet': 'softedge',
131
+ 'segmentation ControlNet': 'segmentation',
132
+ 'depth ControlNet': 'depth',
133
+ 'normalbae ControlNet': 'normalbae',
134
+ 'lineart ControlNet': 'lineart',
135
+ # 'lineart_anime ControlNet': 'lineart_anime',
136
+ 'shuffle ControlNet': 'shuffle',
137
+ 'ip2p ControlNet': 'ip2p',
138
+ 'optical pattern ControlNet': 'pattern',
139
+ 'tile realistic': 'sdxl_tile_realistic',
140
+ }
141
+
142
+ task_model_list = list(task_stablepy.keys())
143
+
144
+
145
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
146
+ url = url.strip()
147
+
148
+ if "drive.google.com" in url:
149
+ original_dir = os.getcwd()
150
+ os.chdir(directory)
151
+ os.system(f"gdown --fuzzy {url}")
152
+ os.chdir(original_dir)
153
+ elif "huggingface.co" in url:
154
+ url = url.replace("?download=true", "")
155
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
156
+ if "/blob/" in url:
157
+ url = url.replace("/blob/", "/resolve/")
158
+ user_header = f'"Authorization: Bearer {hf_token}"'
159
+ if hf_token:
160
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
161
+ else:
162
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
163
+ elif "civitai.com" in url:
164
+ if "?" in url:
165
+ url = url.split("?")[0]
166
+ if civitai_api_key:
167
+ url = url + f"?token={civitai_api_key}"
168
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
169
+ else:
170
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
171
+ else:
172
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
173
+
174
+
175
+ def get_model_list(directory_path):
176
+ model_list = []
177
+ valid_extensions = {'.ckpt' , '.pt', '.pth', '.safetensors', '.bin'}
178
+
179
+ for filename in os.listdir(directory_path):
180
+ if os.path.splitext(filename)[1] in valid_extensions:
181
+ name_without_extension = os.path.splitext(filename)[0]
182
+ file_path = os.path.join(directory_path, filename)
183
+ # model_list.append((name_without_extension, file_path))
184
+ model_list.append(file_path)
185
+ print('\033[34mFILE: ' + file_path + '\033[0m')
186
+ return model_list
187
+
188
+
189
+ def process_string(input_string):
190
+ parts = input_string.split('/')
191
+
192
+ if len(parts) == 2:
193
+ first_element = parts[1]
194
+ complete_string = input_string
195
+ result = (first_element, complete_string)
196
+ return result
197
+ else:
198
+ return None
199
+
200
+ ## BEGIN MOD
201
+ from modutils import (
202
+ to_list,
203
+ list_uniq,
204
+ list_sub,
205
+ get_model_id_list,
206
+ get_tupled_embed_list,
207
+ get_tupled_model_list,
208
+ get_lora_model_list,
209
+ download_private_repo,
210
+ )
211
+
212
+ # - **Download Models**
213
+ download_model = ", ".join(download_model_list)
214
+ # - **Download VAEs**
215
+ download_vae = ", ".join(download_vae_list)
216
+ # - **Download LoRAs**
217
+ download_lora = ", ".join(download_lora_list)
218
+
219
+ #download_private_repo(HF_LORA_ESSENTIAL_PRIVATE_REPO, directory_loras, True)
220
+ download_private_repo(HF_VAE_PRIVATE_REPO, directory_vaes, False)
221
+
222
+ load_diffusers_format_model = list_uniq(load_diffusers_format_model + get_model_id_list())
223
+ ## END MOD
224
+
225
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
226
+ hf_token = os.environ.get("HF_TOKEN")
227
+
228
+ # Download stuffs
229
+ for url in [url.strip() for url in download_model.split(',')]:
230
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
231
+ download_things(directory_models, url, hf_token, CIVITAI_API_KEY)
232
+ for url in [url.strip() for url in download_vae.split(',')]:
233
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
234
+ download_things(directory_vaes, url, hf_token, CIVITAI_API_KEY)
235
+ for url in [url.strip() for url in download_lora.split(',')]:
236
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
237
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
238
+
239
+ # Download Embeddings
240
+ for url_embed in download_embeds:
241
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
242
+ download_things(directory_embeds, url_embed, hf_token, CIVITAI_API_KEY)
243
+
244
+ # Build list models
245
+ embed_list = get_model_list(directory_embeds)
246
+ model_list = get_model_list(directory_models)
247
+ model_list = load_diffusers_format_model + model_list
248
+ ## BEGIN MOD
249
+ lora_model_list = get_lora_model_list()
250
+ vae_model_list = get_model_list(directory_vaes)
251
+ vae_model_list.insert(0, "None")
252
+
253
+ #download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, directory_embeds_sdxl, False)
254
+ #download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, directory_embeds_positive_sdxl, False)
255
+ embed_sdxl_list = get_model_list(directory_embeds_sdxl) + get_model_list(directory_embeds_positive_sdxl)
256
+
257
+ def get_embed_list(pipeline_name):
258
+ return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
259
+
260
+
261
+ ## END MOD
262
+
263
+ print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
264
+
265
+ upscaler_dict_gui = {
266
+ None : None,
267
+ "Lanczos" : "Lanczos",
268
+ "Nearest" : "Nearest",
269
+ "RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
270
+ "RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
271
+ "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
272
+ "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
273
+ "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
274
+ "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
275
+ "realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
276
+ "4x-UltraSharp" : "https:&#