John6666 commited on
Commit
26e6b0a
β€’
1 Parent(s): c81fdb0

Upload 24 files

Browse files
README.md CHANGED
@@ -1,12 +1,14 @@
1
- ---
2
- title: Votepurchase Crash
3
- emoji: 🐨
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 4.42.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
+ ---
2
+ title: Votepurchase Multiple Model (SD1.5/SDXL Text-to-Image)
3
+ emoji: πŸ–ΌπŸ–ΌοΈπŸ“¦
4
+ colorFrom: purple
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 4.41.0
8
+ app_file: app.py
9
+ license: mit
10
+ short_description: Text-to-Image
11
+ pinned: true
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,373 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import numpy as np
4
+
5
+ # DiffuseCraft
6
+ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_samplers,
7
+ get_vaes, enable_model_recom_prompt, enable_diffusers_model_detail,
8
+ get_t2i_model_info, get_all_lora_tupled_list, update_loras,
9
+ apply_lora_prompt, download_my_lora, search_civitai_lora,
10
+ select_civitai_lora, search_civitai_lora_json,
11
+ preset_quality, preset_styles, process_style_prompt)
12
+ # Translator
13
+ from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
14
+ get_llm_formats, get_dolphin_model_format, get_dolphin_models,
15
+ get_dolphin_model_info, select_dolphin_model, select_dolphin_format, get_dolphin_sysprompt)
16
+ # Tagger
17
+ from tagger.v2 import v2_upsampling_prompt, V2_ALL_MODELS
18
+ from tagger.utils import (gradio_copy_text, gradio_copy_prompt, COPY_ACTION_JS,
19
+ V2_ASPECT_RATIO_OPTIONS, V2_RATING_OPTIONS, V2_LENGTH_OPTIONS, V2_IDENTITY_OPTIONS)
20
+ from tagger.tagger import (predict_tags_wd, convert_danbooru_to_e621_prompt,
21
+ remove_specific_prompt, insert_recom_prompt, compose_prompt_to_copy,
22
+ translate_prompt, select_random_character)
23
+ from tagger.fl2sd3longcap import predict_tags_fl2_sd3
24
+ def description_ui():
25
+ gr.Markdown(
26
+ """
27
+ ## Danbooru Tags Transformer V2 Demo with WD Tagger & SD3 Long Captioner
28
+ (Image =>) Prompt => Upsampled longer prompt
29
+ - Mod of p1atdev's [Danbooru Tags Transformer V2 Demo](https://huggingface.co/spaces/p1atdev/danbooru-tags-transformer-v2) and [WD Tagger with πŸ€— transformers](https://huggingface.co/spaces/p1atdev/wd-tagger-transformers).
30
+ - Models: p1atdev's [wd-swinv2-tagger-v3-hf](https://huggingface.co/p1atdev/wd-swinv2-tagger-v3-hf), [dart-v2-moe-sft](https://huggingface.co/p1atdev/dart-v2-moe-sft), [dart-v2-sft](https://huggingface.co/p1atdev/dart-v2-sft)\
31
+ , gokaygokay's [Florence-2-SD3-Captioner](https://huggingface.co/gokaygokay/Florence-2-SD3-Captioner)
32
+ """
33
+ )
34
+
35
+
36
+ MAX_SEED = np.iinfo(np.int32).max
37
+ MAX_IMAGE_SIZE = 1216
38
+
39
+ css = """
40
+ #container { margin: 0 auto; !important; }
41
+ #col-container { margin: 0 auto; !important; }
42
+ #result { display: inline-block; max-width: 520px; max-height: 520px; width: 520px; height: 520px; align: center; margin: 0px auto; !important; }
43
+ .lora { display: inline-block; min-width: 480px; !important; }
44
+ #model-info { text-align: center; !important; }
45
+ """
46
+
47
+ with gr.Blocks(css=css, fill_width=True, elem_id="container") as demo:
48
+ with gr.Tab("Image Generator"):
49
+ with gr.Column(elem_id="col-container"):
50
+ with gr.Row():
51
+ prompt = gr.Text(label="Prompt", show_label=False, lines=1, max_lines=8, placeholder="Enter your prompt", container=False)
52
+
53
+ with gr.Row():
54
+ run_button = gr.Button("Run")
55
+ run_translate_button = gr.Button("Translate")
56
+
57
+ result = gr.Image(label="Result", elem_id="result", show_label=False, interactive=False,
58
+ show_download_button=True, show_share_button=False, container=True)
59
+
60
+ with gr.Accordion("Advanced Settings", open=False):
61
+ with gr.Row():
62
+ negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt",
63
+ value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
64
+
65
+ with gr.Row():
66
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
67
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
68
+
69
+ with gr.Row():
70
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
71
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
72
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
73
+ num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
74
+
75
+ with gr.Row():
76
+ with gr.Column(scale=4):
77
+ model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.",
78
+ choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0],
79
+ allow_custom_value=True, interactive=True, min_width=320)
80
+ model_info = gr.Markdown(elem_id="model-info")
81
+ with gr.Column(scale=1):
82
+ model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
83
+
84
+ with gr.Row():
85
+ sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler a")
86
+ vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
87
+
88
+ with gr.Accordion("LoRA", open=True, visible=True):
89
+ with gr.Row():
90
+ with gr.Column():
91
+ with gr.Row():
92
+ lora1 = gr.Dropdown(label="LoRA 1", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
93
+ lora1_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 1: weight")
94
+ with gr.Row():
95
+ lora1_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
96
+ lora1_copy = gr.Button(value="Copy example to prompt", visible=False)
97
+ lora1_md = gr.Markdown(value="", visible=False)
98
+ with gr.Column():
99
+ with gr.Row():
100
+ lora2 = gr.Dropdown(label="LoRA 2", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
101
+ lora2_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 2: weight")
102
+ with gr.Row():
103
+ lora2_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
104
+ lora2_copy = gr.Button(value="Copy example to prompt", visible=False)
105
+ lora2_md = gr.Markdown(value="", visible=False)
106
+ with gr.Column():
107
+ with gr.Row():
108
+ lora3 = gr.Dropdown(label="LoRA 3", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
109
+ lora3_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 3: weight")
110
+ with gr.Row():
111
+ lora3_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
112
+ lora3_copy = gr.Button(value="Copy example to prompt", visible=False)
113
+ lora3_md = gr.Markdown(value="", visible=False)
114
+ with gr.Column():
115
+ with gr.Row():
116
+ lora4 = gr.Dropdown(label="LoRA 4", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
117
+ lora4_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 4: weight")
118
+ with gr.Row():
119
+ lora4_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
120
+ lora4_copy = gr.Button(value="Copy example to prompt", visible=False)
121
+ lora4_md = gr.Markdown(value="", visible=False)
122
+ with gr.Column():
123
+ with gr.Row():
124
+ lora5 = gr.Dropdown(label="LoRA 5", choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
125
+ lora5_wt = gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label="LoRA 5: weight")
126
+ with gr.Row():
127
+ lora5_info = gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
128
+ lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
129
+ lora5_md = gr.Markdown(value="", visible=False)
130
+ with gr.Accordion("From URL", open=True, visible=True):
131
+ with gr.Row():
132
+ lora_search_civitai_query = gr.Textbox(label="Query", placeholder="oomuro sakurako...", lines=1)
133
+ lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=["Pony", "SD 1.5", "SDXL 1.0"], value=["Pony", "SDXL 1.0"])
134
+ lora_search_civitai_submit = gr.Button("Search on Civitai")
135
+ with gr.Row():
136
+ lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
137
+ lora_search_civitai_json = gr.JSON(value={}, visible=False)
138
+ lora_search_civitai_desc = gr.Markdown(value="", visible=False)
139
+ lora_download_url = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
140
+ lora_download = gr.Button("Get and set LoRA and apply to prompt")
141
+
142
+ with gr.Row():
143
+ recom_prompt = gr.Checkbox(label="Recommended prompt", value=True)
144
+ quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None")
145
+ style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None")
146
+
147
+ with gr.Accordion("Translation Settings", open=False):
148
+ chatbot = gr.Chatbot(likeable=False, render_markdown=False, visible=False) # component for auto-translation
149
+ chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
150
+ chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
151
+ chat_format = gr.Dropdown(choices=get_llm_formats(), value=get_dolphin_model_format(get_dolphin_models()[0][1]), label="Message format")
152
+ with gr.Row():
153
+ chat_tokens = gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max tokens")
154
+ chat_temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
155
+ chat_topp = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
156
+ chat_topk = gr.Slider(minimum=0, maximum=100, value=40, step=1, label="Top-k")
157
+ chat_rp = gr.Slider(minimum=0.0, maximum=2.0, value=1.1, step=0.1, label="Repetition penalty")
158
+ chat_sysmsg = gr.Textbox(value=get_dolphin_sysprompt(), label="System message")
159
+
160
+ examples = gr.Examples(
161
+ examples = [
162
+ ["souryuu asuka langley, 1girl, neon genesis evangelion, plugsuit, pilot suit, red bodysuit, sitting, crossing legs, black eye patch, cat hat, throne, symmetrical, looking down, from bottom, looking at viewer, outdoors"],
163
+ ["sailor moon, magical girl transformation, sparkles and ribbons, soft pastel colors, crescent moon motif, starry night sky background, shoujo manga style"],
164
+ ["kafuu chino, 1girl, solo"],
165
+ ["1girl"],
166
+ ["beautiful sunset"],
167
+ ],
168
+ inputs=[prompt],
169
+ )
170
+
171
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
172
+ triggers=[run_button.click, prompt.submit],
173
+ fn=infer,
174
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
175
+ guidance_scale, num_inference_steps, model_name,
176
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
177
+ sampler, vae_model],
178
+ outputs=[result],
179
+ queue=True,
180
+ show_progress="full",
181
+ show_api=True,
182
+ )
183
+
184
+ gr.on( #lambda x: None, inputs=None, outputs=result).then(
185
+ triggers=[run_translate_button.click],
186
+ fn=_infer, # dummy fn for api
187
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
188
+ guidance_scale, num_inference_steps, model_name,
189
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
190
+ sampler, vae_model],
191
+ outputs=[result],
192
+ queue=False,
193
+ show_api=True,
194
+ api_name="infer_translate",
195
+ ).success(
196
+ fn=dolphin_respond_auto,
197
+ inputs=[prompt, chatbot],
198
+ outputs=[chatbot],
199
+ queue=True,
200
+ show_progress="full",
201
+ show_api=False,
202
+ ).success(
203
+ fn=dolphin_parse_simple,
204
+ inputs=[prompt, chatbot],
205
+ outputs=[prompt],
206
+ queue=False,
207
+ show_api=False,
208
+ ).success(
209
+ fn=infer,
210
+ inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
211
+ guidance_scale, num_inference_steps, model_name,
212
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
213
+ sampler, vae_model],
214
+ outputs=[result],
215
+ queue=True,
216
+ show_progress="full",
217
+ show_api=False,
218
+ ).success(lambda: None, None, chatbot, queue=False, show_api=False)\
219
+ .success(pass_result, [result], [result], queue=False, show_api=False) # dummy fn for api
220
+
221
+ gr.on(
222
+ triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
223
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change],
224
+ fn=update_loras,
225
+ inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt],
226
+ outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
227
+ lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
228
+ lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md],
229
+ queue=False,
230
+ trigger_mode="once",
231
+ show_api=False,
232
+ )
233
+ lora1_copy.click(apply_lora_prompt, [prompt, lora1_info], [prompt], queue=False, show_api=False)
234
+ lora2_copy.click(apply_lora_prompt, [prompt, lora2_info], [prompt], queue=False, show_api=False)
235
+ lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
236
+ lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
237
+ lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
238
+
239
+ gr.on(
240
+ triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
241
+ fn=search_civitai_lora,
242
+ inputs=[lora_search_civitai_query, lora_search_civitai_basemodel],
243
+ outputs=[lora_search_civitai_result, lora_search_civitai_desc, lora_search_civitai_submit, lora_search_civitai_query],
244
+ scroll_to_output=True,
245
+ queue=True,
246
+ show_api=False,
247
+ )
248
+ lora_search_civitai_json.change(search_civitai_lora_json, [lora_search_civitai_query, lora_search_civitai_basemodel], [lora_search_civitai_json], queue=True, show_api=True) # fn for api
249
+ lora_search_civitai_result.change(select_civitai_lora, [lora_search_civitai_result], [lora_download_url, lora_search_civitai_desc], scroll_to_output=True, queue=False, show_api=False)
250
+ gr.on(
251
+ triggers=[lora_download.click, lora_download_url.submit],
252
+ fn=download_my_lora,
253
+ inputs=[lora_download_url,lora1, lora2, lora3, lora4, lora5],
254
+ outputs=[lora1, lora2, lora3, lora4, lora5],
255
+ scroll_to_output=True,
256
+ queue=True,
257
+ show_api=False,
258
+ )
259
+
260
+ recom_prompt.change(enable_model_recom_prompt, [recom_prompt], [recom_prompt], queue=False, show_api=False)
261
+ gr.on(
262
+ triggers=[quality_selector.change, style_selector.change],
263
+ fn=process_style_prompt,
264
+ inputs=[prompt, negative_prompt, style_selector, quality_selector],
265
+ outputs=[prompt, negative_prompt],
266
+ queue=False,
267
+ trigger_mode="once",
268
+ )
269
+
270
+ model_detail.change(enable_diffusers_model_detail, [model_detail, model_name], [model_detail, model_name], queue=False, show_api=False)
271
+ model_name.change(get_t2i_model_info, [model_name], [model_info], queue=False, show_api=False)
272
+
273
+ chat_model.change(select_dolphin_model, [chat_model], [chat_model, chat_format, chat_model_info], queue=True, show_progress="full", show_api=False)\
274
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
275
+ chat_format.change(select_dolphin_format, [chat_format], [chat_format], queue=False, show_api=False)\
276
+ .success(lambda: None, None, chatbot, queue=False, show_api=False)
277
+
278
+ # Tagger
279
+ with gr.Tab("Tags Transformer with Tagger"):
280
+ with gr.Column():
281
+ with gr.Group():
282
+ input_image = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
283
+ with gr.Accordion(label="Advanced options", open=False):
284
+ general_threshold = gr.Slider(label="Threshold", minimum=0.0, maximum=1.0, value=0.3, step=0.01, interactive=True)
285
+ character_threshold = gr.Slider(label="Character threshold", minimum=0.0, maximum=1.0, value=0.8, step=0.01, interactive=True)
286
+ input_tag_type = gr.Radio(label="Convert tags to", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="danbooru")
287
+ recom_prompt = gr.Radio(label="Insert reccomended prompt", choices=["None", "Animagine", "Pony"], value="None", interactive=True)
288
+ image_algorithms = gr.CheckboxGroup(["Use WD Tagger", "Use Florence-2-SD3-Long-Captioner"], label="Algorithms", value=["Use WD Tagger"])
289
+ keep_tags = gr.Radio(label="Remove tags leaving only the following", choices=["body", "dress", "all"], value="all")
290
+ generate_from_image_btn = gr.Button(value="GENERATE TAGS FROM IMAGE", size="lg", variant="primary")
291
+ with gr.Group():
292
+ with gr.Row():
293
+ input_character = gr.Textbox(label="Character tags", placeholder="hatsune miku")
294
+ input_copyright = gr.Textbox(label="Copyright tags", placeholder="vocaloid")
295
+ random_character = gr.Button(value="Random character 🎲", size="sm")
296
+ input_general = gr.TextArea(label="General tags", lines=4, placeholder="1girl, ...", value="")
297
+ input_tags_to_copy = gr.Textbox(value="", visible=False)
298
+ with gr.Row():
299
+ copy_input_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
300
+ copy_prompt_btn_input = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
301
+ translate_input_prompt_button = gr.Button(value="Translate prompt to English", size="sm", variant="secondary")
302
+ tag_type = gr.Radio(label="Output tag conversion", info="danbooru for Animagine, e621 for Pony.", choices=["danbooru", "e621"], value="e621", visible=False)
303
+ input_rating = gr.Radio(label="Rating", choices=list(V2_RATING_OPTIONS), value="explicit")
304
+ with gr.Accordion(label="Advanced options", open=False):
305
+ input_aspect_ratio = gr.Radio(label="Aspect ratio", info="The aspect ratio of the image.", choices=list(V2_ASPECT_RATIO_OPTIONS), value="square")
306
+ input_length = gr.Radio(label="Length", info="The total length of the tags.", choices=list(V2_LENGTH_OPTIONS), value="very_long")
307
+ input_identity = gr.Radio(label="Keep identity", info="How strictly to keep the identity of the character or subject. If you specify the detail of subject in the prompt, you should choose `strict`. Otherwise, choose `none` or `lax`. `none` is very creative but sometimes ignores the input prompt.", choices=list(V2_IDENTITY_OPTIONS), value="lax")
308
+ input_ban_tags = gr.Textbox(label="Ban tags", info="Tags to ban from the output.", placeholder="alternate costumen, ...", value="censored")
309
+ model_name = gr.Dropdown(label="Model", choices=list(V2_ALL_MODELS.keys()), value=list(V2_ALL_MODELS.keys())[0])
310
+ dummy_np = gr.Textbox(label="Negative prompt", value="", visible=False)
311
+ recom_animagine = gr.Textbox(label="Animagine reccomended prompt", value="Animagine", visible=False)
312
+ recom_pony = gr.Textbox(label="Pony reccomended prompt", value="Pony", visible=False)
313
+ generate_btn = gr.Button(value="GENERATE TAGS", size="lg", variant="primary")
314
+ with gr.Row():
315
+ with gr.Group():
316
+ output_text = gr.TextArea(label="Output tags", interactive=False, show_copy_button=True)
317
+ with gr.Row():
318
+ copy_btn = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
319
+ copy_prompt_btn = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
320
+ with gr.Group():
321
+ output_text_pony = gr.TextArea(label="Output tags (Pony e621 style)", interactive=False, show_copy_button=True)
322
+ with gr.Row():
323
+ copy_btn_pony = gr.Button(value="Copy to clipboard", size="sm", interactive=False)
324
+ copy_prompt_btn_pony = gr.Button(value="Copy to primary prompt", size="sm", interactive=False)
325
+
326
+ random_character.click(select_random_character, [input_copyright, input_character], [input_copyright, input_character], queue=False, show_api=False)
327
+
328
+ translate_input_prompt_button.click(translate_prompt, [input_general], [input_general], queue=False, show_api=False)
329
+ translate_input_prompt_button.click(translate_prompt, [input_character], [input_character], queue=False, show_api=False)
330
+ translate_input_prompt_button.click(translate_prompt, [input_copyright], [input_copyright], queue=False, show_api=False)
331
+
332
+ generate_from_image_btn.click(
333
+ lambda: ("", "", ""), None, [input_copyright, input_character, input_general], queue=False, show_api=False,
334
+ ).success(
335
+ predict_tags_wd,
336
+ [input_image, input_general, image_algorithms, general_threshold, character_threshold],
337
+ [input_copyright, input_character, input_general, copy_input_btn],
338
+ show_api=False,
339
+ ).success(
340
+ predict_tags_fl2_sd3, [input_image, input_general, image_algorithms], [input_general], show_api=False,
341
+ ).success(
342
+ remove_specific_prompt, [input_general, keep_tags], [input_general], queue=False, show_api=False,
343
+ ).success(
344
+ convert_danbooru_to_e621_prompt, [input_general, input_tag_type], [input_general], queue=False, show_api=False,
345
+ ).success(
346
+ insert_recom_prompt, [input_general, dummy_np, recom_prompt], [input_general, dummy_np], queue=False, show_api=False,
347
+ ).success(lambda: gr.update(interactive=True), None, [copy_prompt_btn_input], queue=False, show_api=False)
348
+ copy_input_btn.click(compose_prompt_to_copy, [input_character, input_copyright, input_general], [input_tags_to_copy], show_api=False)\
349
+ .success(gradio_copy_text, [input_tags_to_copy], js=COPY_ACTION_JS, show_api=False)
350
+ copy_prompt_btn_input.click(compose_prompt_to_copy, inputs=[input_character, input_copyright, input_general], outputs=[input_tags_to_copy], show_api=False)\
351
+ .success(gradio_copy_prompt, inputs=[input_tags_to_copy], outputs=[prompt], show_api=False)
352
+
353
+ generate_btn.click(
354
+ v2_upsampling_prompt,
355
+ [model_name, input_copyright, input_character, input_general,
356
+ input_rating, input_aspect_ratio, input_length, input_identity, input_ban_tags],
357
+ [output_text],
358
+ show_api=False,
359
+ ).success(
360
+ convert_danbooru_to_e621_prompt, [output_text, tag_type], [output_text_pony], queue=False, show_api=False,
361
+ ).success(
362
+ insert_recom_prompt, [output_text, dummy_np, recom_animagine], [output_text, dummy_np], queue=False, show_api=False,
363
+ ).success(
364
+ insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
365
+ ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
366
+ None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
367
+ copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
368
+ copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
369
+ copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
370
+ copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt], show_api=False)
371
+
372
+ demo.queue()
373
+ demo.launch()
dc.py ADDED
@@ -0,0 +1,1328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ from stablepy import Model_Diffusers
4
+ from stablepy.diffusers_vanilla.model import scheduler_names
5
+ from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
6
+ import torch
7
+ import re
8
+ import shutil
9
+ import random
10
+ from stablepy import (
11
+ CONTROLNET_MODEL_IDS,
12
+ VALID_TASKS,
13
+ T2I_PREPROCESSOR_NAME,
14
+ FLASH_LORA,
15
+ SCHEDULER_CONFIG_MAP,
16
+ scheduler_names,
17
+ IP_ADAPTER_MODELS,
18
+ IP_ADAPTERS_SD,
19
+ IP_ADAPTERS_SDXL,
20
+ REPO_IMAGE_ENCODER,
21
+ ALL_PROMPT_WEIGHT_OPTIONS,
22
+ SD15_TASKS,
23
+ SDXL_TASKS,
24
+ )
25
+ import urllib.parse
26
+ import gradio as gr
27
+ from PIL import Image
28
+ import IPython.display
29
+ import time, json
30
+ from IPython.utils import capture
31
+ import logging
32
+ logging.getLogger("diffusers").setLevel(logging.ERROR)
33
+ import diffusers
34
+ diffusers.utils.logging.set_verbosity(40)
35
+ import warnings
36
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
37
+ warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
38
+ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
39
+ from stablepy import logger
40
+ logger.setLevel(logging.CRITICAL)
41
+
42
+ from env import (
43
+ hf_token,
44
+ hf_read_token, # to use only for private repos
45
+ CIVITAI_API_KEY,
46
+ HF_LORA_PRIVATE_REPOS1,
47
+ HF_LORA_PRIVATE_REPOS2,
48
+ HF_LORA_ESSENTIAL_PRIVATE_REPO,
49
+ HF_VAE_PRIVATE_REPO,
50
+ HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO,
51
+ HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO,
52
+ directory_models,
53
+ directory_loras,
54
+ directory_vaes,
55
+ directory_embeds,
56
+ directory_embeds_sdxl,
57
+ directory_embeds_positive_sdxl,
58
+ load_diffusers_format_model,
59
+ download_model_list,
60
+ download_lora_list,
61
+ download_vae_list,
62
+ download_embeds,
63
+ )
64
+
65
+ preprocessor_controlnet = {
66
+ "openpose": [
67
+ "Openpose",
68
+ "None",
69
+ ],
70
+ "scribble": [
71
+ "HED",
72
+ "Pidinet",
73
+ "None",
74
+ ],
75
+ "softedge": [
76
+ "Pidinet",
77
+ "HED",
78
+ "HED safe",
79
+ "Pidinet safe",
80
+ "None",
81
+ ],
82
+ "segmentation": [
83
+ "UPerNet",
84
+ "None",
85
+ ],
86
+ "depth": [
87
+ "DPT",
88
+ "Midas",
89
+ "None",
90
+ ],
91
+ "normalbae": [
92
+ "NormalBae",
93
+ "None",
94
+ ],
95
+ "lineart": [
96
+ "Lineart",
97
+ "Lineart coarse",
98
+ "Lineart (anime)",
99
+ "None",
100
+ "None (anime)",
101
+ ],
102
+ "shuffle": [
103
+ "ContentShuffle",
104
+ "None",
105
+ ],
106
+ "canny": [
107
+ "Canny"
108
+ ],
109
+ "mlsd": [
110
+ "MLSD"
111
+ ],
112
+ "ip2p": [
113
+ "ip2p"
114
+ ],
115
+ }
116
+
117
+ task_stablepy = {
118
+ 'txt2img': 'txt2img',
119
+ 'img2img': 'img2img',
120
+ 'inpaint': 'inpaint',
121
+ # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
122
+ # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
123
+ # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
124
+ # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
125
+ # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
126
+ 'openpose ControlNet': 'openpose',
127
+ 'canny ControlNet': 'canny',
128
+ 'mlsd ControlNet': 'mlsd',
129
+ 'scribble ControlNet': 'scribble',
130
+ 'softedge ControlNet': 'softedge',
131
+ 'segmentation ControlNet': 'segmentation',
132
+ 'depth ControlNet': 'depth',
133
+ 'normalbae ControlNet': 'normalbae',
134
+ 'lineart ControlNet': 'lineart',
135
+ # 'lineart_anime ControlNet': 'lineart_anime',
136
+ 'shuffle ControlNet': 'shuffle',
137
+ 'ip2p ControlNet': 'ip2p',
138
+ 'optical pattern ControlNet': 'pattern',
139
+ 'tile realistic': 'sdxl_tile_realistic',
140
+ }
141
+
142
+ task_model_list = list(task_stablepy.keys())
143
+
144
+
145
+ def download_things(directory, url, hf_token="", civitai_api_key=""):
146
+ url = url.strip()
147
+
148
+ if "drive.google.com" in url:
149
+ original_dir = os.getcwd()
150
+ os.chdir(directory)
151
+ os.system(f"gdown --fuzzy {url}")
152
+ os.chdir(original_dir)
153
+ elif "huggingface.co" in url:
154
+ url = url.replace("?download=true", "")
155
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
156
+ if "/blob/" in url:
157
+ url = url.replace("/blob/", "/resolve/")
158
+ user_header = f'"Authorization: Bearer {hf_token}"'
159
+ if hf_token:
160
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
161
+ else:
162
+ os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
163
+ elif "civitai.com" in url:
164
+ if "?" in url:
165
+ url = url.split("?")[0]
166
+ if civitai_api_key:
167
+ url = url + f"?token={civitai_api_key}"
168
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
169
+ else:
170
+ print("\033[91mYou need an API key to download Civitai models.\033[0m")
171
+ else:
172
+ os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
173
+
174
+
175
+ def get_model_list(directory_path):
176
+ model_list = []
177
+ valid_extensions = {'.ckpt' , '.pt', '.pth', '.safetensors', '.bin'}
178
+
179
+ for filename in os.listdir(directory_path):
180
+ if os.path.splitext(filename)[1] in valid_extensions:
181
+ name_without_extension = os.path.splitext(filename)[0]
182
+ file_path = os.path.join(directory_path, filename)
183
+ # model_list.append((name_without_extension, file_path))
184
+ model_list.append(file_path)
185
+ print('\033[34mFILE: ' + file_path + '\033[0m')
186
+ return model_list
187
+
188
+
189
+ def process_string(input_string):
190
+ parts = input_string.split('/')
191
+
192
+ if len(parts) == 2:
193
+ first_element = parts[1]
194
+ complete_string = input_string
195
+ result = (first_element, complete_string)
196
+ return result
197
+ else:
198
+ return None
199
+
200
+ ## BEGIN MOD
201
+ from modutils import (
202
+ to_list,
203
+ list_uniq,
204
+ list_sub,
205
+ get_model_id_list,
206
+ get_tupled_embed_list,
207
+ get_tupled_model_list,
208
+ get_lora_model_list,
209
+ download_private_repo,
210
+ )
211
+
212
+ # - **Download Models**
213
+ download_model = ", ".join(download_model_list)
214
+ # - **Download VAEs**
215
+ download_vae = ", ".join(download_vae_list)
216
+ # - **Download LoRAs**
217
+ download_lora = ", ".join(download_lora_list)
218
+
219
+ #download_private_repo(HF_LORA_ESSENTIAL_PRIVATE_REPO, directory_loras, True)
220
+ download_private_repo(HF_VAE_PRIVATE_REPO, directory_vaes, False)
221
+
222
+ load_diffusers_format_model = list_uniq(load_diffusers_format_model + get_model_id_list())
223
+ ## END MOD
224
+
225
+ CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
226
+ hf_token = os.environ.get("HF_TOKEN")
227
+
228
+ # Download stuffs
229
+ for url in [url.strip() for url in download_model.split(',')]:
230
+ if not os.path.exists(f"./models/{url.split('/')[-1]}"):
231
+ download_things(directory_models, url, hf_token, CIVITAI_API_KEY)
232
+ for url in [url.strip() for url in download_vae.split(',')]:
233
+ if not os.path.exists(f"./vaes/{url.split('/')[-1]}"):
234
+ download_things(directory_vaes, url, hf_token, CIVITAI_API_KEY)
235
+ for url in [url.strip() for url in download_lora.split(',')]:
236
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
237
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
238
+
239
+ # Download Embeddings
240
+ for url_embed in download_embeds:
241
+ if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
242
+ download_things(directory_embeds, url_embed, hf_token, CIVITAI_API_KEY)
243
+
244
+ # Build list models
245
+ embed_list = get_model_list(directory_embeds)
246
+ model_list = get_model_list(directory_models)
247
+ model_list = load_diffusers_format_model + model_list
248
+ ## BEGIN MOD
249
+ lora_model_list = get_lora_model_list()
250
+ vae_model_list = get_model_list(directory_vaes)
251
+ vae_model_list.insert(0, "None")
252
+
253
+ #download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, directory_embeds_sdxl, False)
254
+ #download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, directory_embeds_positive_sdxl, False)
255
+ embed_sdxl_list = get_model_list(directory_embeds_sdxl) + get_model_list(directory_embeds_positive_sdxl)
256
+
257
+ def get_embed_list(pipeline_name):
258
+ return get_tupled_embed_list(embed_sdxl_list if pipeline_name == "StableDiffusionXLPipeline" else embed_list)
259
+
260
+
261
+ ## END MOD
262
+
263
+ print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
264
+
265
+ upscaler_dict_gui = {
266
+ None : None,
267
+ "Lanczos" : "Lanczos",
268
+ "Nearest" : "Nearest",
269
+ "RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
270
+ "RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
271
+ "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
272
+ "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
273
+ "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
274
+ "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
275
+ "realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
276
+ "4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
277
+ "4x_foolhardy_Remacri" : "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
278
+ "Remacri4xExtraSmoother" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
279
+ "AnimeSharp4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
280
+ "lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
281
+ "RealisticRescaler4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
282
+ "NickelbackFS4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
283
+ }
284
+
285
+
286
+ def extract_parameters(input_string):
287
+ parameters = {}
288
+ input_string = input_string.replace("\n", "")
289
+
290
+ if not "Negative prompt:" in input_string:
291
+ print("Negative prompt not detected")
292
+ parameters["prompt"] = input_string
293
+ return parameters
294
+
295
+ parm = input_string.split("Negative prompt:")
296
+ parameters["prompt"] = parm[0]
297
+ if not "Steps:" in parm[1]:
298
+ print("Steps not detected")
299
+ parameters["neg_prompt"] = parm[1]
300
+ return parameters
301
+ parm = parm[1].split("Steps:")
302
+ parameters["neg_prompt"] = parm[0]
303
+ input_string = "Steps:" + parm[1]
304
+
305
+ # Extracting Steps
306
+ steps_match = re.search(r'Steps: (\d+)', input_string)
307
+ if steps_match:
308
+ parameters['Steps'] = int(steps_match.group(1))
309
+
310
+ # Extracting Size
311
+ size_match = re.search(r'Size: (\d+x\d+)', input_string)
312
+ if size_match:
313
+ parameters['Size'] = size_match.group(1)
314
+ width, height = map(int, parameters['Size'].split('x'))
315
+ parameters['width'] = width
316
+ parameters['height'] = height
317
+
318
+ # Extracting other parameters
319
+ other_parameters = re.findall(r'(\w+): (.*?)(?=, \w+|$)', input_string)
320
+ for param in other_parameters:
321
+ parameters[param[0]] = param[1].strip('"')
322
+
323
+ return parameters
324
+
325
+
326
+ ## BEGIN MOD
327
+ class GuiSD:
328
+ def __init__(self):
329
+ self.model = None
330
+
331
+ print("Loading model...")
332
+ self.model = Model_Diffusers(
333
+ base_model_id="cagliostrolab/animagine-xl-3.1",
334
+ task_name="txt2img",
335
+ vae_model=None,
336
+ type_model_precision=torch.float16,
337
+ retain_task_model_in_cache=False,
338
+ )
339
+
340
+ def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
341
+ progress(0, desc="Start inference...")
342
+ images, image_list = model(**pipe_params)
343
+ progress(1, desc="Inference completed.")
344
+ if not isinstance(images, list): images = [images]
345
+ img = []
346
+ for image in images:
347
+ img.append((image, None))
348
+ return img
349
+
350
+ def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
351
+
352
+ yield f"Loading model: {model_name}"
353
+
354
+ vae_model = vae_model if vae_model != "None" else None
355
+
356
+ if model_name in model_list:
357
+ model_is_xl = "xl" in model_name.lower()
358
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
359
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
360
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
361
+
362
+ if incompatible_vae:
363
+ vae_model = None
364
+
365
+
366
+ self.model.load_pipe(
367
+ model_name,
368
+ task_name=task_stablepy[task],
369
+ vae_model=vae_model if vae_model != "None" else None,
370
+ type_model_precision=torch.float16,
371
+ retain_task_model_in_cache=False,
372
+ )
373
+ yield f"Model loaded: {model_name}"
374
+
375
+ @spaces.GPU
376
+ def generate_pipeline(
377
+ self,
378
+ prompt,
379
+ neg_prompt,
380
+ num_images,
381
+ steps,
382
+ cfg,
383
+ clip_skip,
384
+ seed,
385
+ lora1,
386
+ lora_scale1,
387
+ lora2,
388
+ lora_scale2,
389
+ lora3,
390
+ lora_scale3,
391
+ lora4,
392
+ lora_scale4,
393
+ lora5,
394
+ lora_scale5,
395
+ sampler,
396
+ img_height,
397
+ img_width,
398
+ model_name,
399
+ vae_model,
400
+ task,
401
+ image_control,
402
+ preprocessor_name,
403
+ preprocess_resolution,
404
+ image_resolution,
405
+ style_prompt, # list []
406
+ style_json_file,
407
+ image_mask,
408
+ strength,
409
+ low_threshold,
410
+ high_threshold,
411
+ value_threshold,
412
+ distance_threshold,
413
+ controlnet_output_scaling_in_unet,
414
+ controlnet_start_threshold,
415
+ controlnet_stop_threshold,
416
+ textual_inversion,
417
+ syntax_weights,
418
+ upscaler_model_path,
419
+ upscaler_increases_size,
420
+ esrgan_tile,
421
+ esrgan_tile_overlap,
422
+ hires_steps,
423
+ hires_denoising_strength,
424
+ hires_sampler,
425
+ hires_prompt,
426
+ hires_negative_prompt,
427
+ hires_before_adetailer,
428
+ hires_after_adetailer,
429
+ loop_generation,
430
+ leave_progress_bar,
431
+ disable_progress_bar,
432
+ image_previews,
433
+ display_images,
434
+ save_generated_images,
435
+ image_storage_location,
436
+ retain_compel_previous_load,
437
+ retain_detailfix_model_previous_load,
438
+ retain_hires_model_previous_load,
439
+ t2i_adapter_preprocessor,
440
+ t2i_adapter_conditioning_scale,
441
+ t2i_adapter_conditioning_factor,
442
+ xformers_memory_efficient_attention,
443
+ freeu,
444
+ generator_in_cpu,
445
+ adetailer_inpaint_only,
446
+ adetailer_verbose,
447
+ adetailer_sampler,
448
+ adetailer_active_a,
449
+ prompt_ad_a,
450
+ negative_prompt_ad_a,
451
+ strength_ad_a,
452
+ face_detector_ad_a,
453
+ person_detector_ad_a,
454
+ hand_detector_ad_a,
455
+ mask_dilation_a,
456
+ mask_blur_a,
457
+ mask_padding_a,
458
+ adetailer_active_b,
459
+ prompt_ad_b,
460
+ negative_prompt_ad_b,
461
+ strength_ad_b,
462
+ face_detector_ad_b,
463
+ person_detector_ad_b,
464
+ hand_detector_ad_b,
465
+ mask_dilation_b,
466
+ mask_blur_b,
467
+ mask_padding_b,
468
+ retain_task_cache_gui,
469
+ image_ip1,
470
+ mask_ip1,
471
+ model_ip1,
472
+ mode_ip1,
473
+ scale_ip1,
474
+ image_ip2,
475
+ mask_ip2,
476
+ model_ip2,
477
+ mode_ip2,
478
+ scale_ip2,
479
+ progress=gr.Progress(track_tqdm=True),
480
+ ):
481
+ progress(0, desc="Preparing inference...")
482
+
483
+ vae_model = vae_model if vae_model != "None" else None
484
+ loras_list = [lora1, lora2, lora3, lora4, lora5]
485
+ vae_msg = f"VAE: {vae_model}" if vae_model else ""
486
+ msg_lora = []
487
+
488
+ ## BEGIN MOD
489
+ prompt, neg_prompt = insert_model_recom_prompt(prompt, neg_prompt, model_name)
490
+ global lora_model_list
491
+ lora_model_list = get_lora_model_list()
492
+ ## END MOD
493
+
494
+ if model_name in model_list:
495
+ model_is_xl = "xl" in model_name.lower()
496
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
497
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
498
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
499
+
500
+ if incompatible_vae:
501
+ msg_inc_vae = (
502
+ f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
503
+ f" are using a { model_type } model. The default VAE "
504
+ "will be used."
505
+ )
506
+ gr.Info(msg_inc_vae)
507
+ vae_msg = msg_inc_vae
508
+ vae_model = None
509
+
510
+ for la in loras_list:
511
+ if la is not None and la != "None" and la in lora_model_list:
512
+ print(la)
513
+ lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
514
+ if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
515
+ msg_inc_lora = f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }."
516
+ gr.Info(msg_inc_lora)
517
+ msg_lora.append(msg_inc_lora)
518
+
519
+ task = task_stablepy[task]
520
+
521
+ params_ip_img = []
522
+ params_ip_msk = []
523
+ params_ip_model = []
524
+ params_ip_mode = []
525
+ params_ip_scale = []
526