John6666 commited on
Commit
10a9d50
1 Parent(s): 21671c5

Upload 23 files

Browse files
Files changed (8) hide show
  1. app.py +276 -64
  2. constants.py +77 -82
  3. dc.py +169 -394
  4. image_processor.py +130 -0
  5. llmdolphin.py +167 -0
  6. modutils.py +66 -22
  7. requirements.txt +3 -4
  8. utils.py +5 -1
app.py CHANGED
@@ -8,7 +8,10 @@ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_sample
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
11
- SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS)
 
 
 
12
  # Translator
13
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
14
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
@@ -34,7 +37,8 @@ def description_ui():
34
 
35
 
36
  MAX_SEED = np.iinfo(np.int32).max
37
- MAX_IMAGE_SIZE = 1216
 
38
 
39
  css = """
40
  #container { margin: 0 auto; !important; }
@@ -60,54 +64,62 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
60
  auto_trans = gr.Checkbox(label="Auto translate to English", value=False, scale=2)
61
 
62
  result = gr.Image(label="Result", elem_id="result", format="png", type="filepath", show_label=False, interactive=False,
63
- show_download_button=True, show_share_button=False, container=True)
 
64
  with gr.Accordion("History", open=False):
 
65
  history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", format="png", interactive=False, show_share_button=False,
66
  show_download_button=True)
67
- history_files = gr.Files(interactive=False, visible=False)
68
  history_clear_button = gr.Button(value="Clear History", variant="secondary")
69
  history_clear_button.click(lambda: ([], []), None, [history_gallery, history_files], queue=False, show_api=False)
70
 
71
- with gr.Accordion("Advanced Settings", open=False):
72
- with gr.Row():
73
- negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt",
74
- value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
75
-
76
- with gr.Row():
77
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
78
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
79
- gpu_duration = gr.Number(minimum=5, maximum=240, value=59, show_label=False, container=False, info="GPU time duration (seconds)")
80
-
81
- with gr.Row():
82
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
83
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
84
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
85
- num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
86
- pag_scale = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
87
- clip_skip = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
88
- free_u = gr.Checkbox(value=False, label="FreeU")
89
-
90
- with gr.Row():
91
- with gr.Column(scale=4):
92
- model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.",
93
- choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0],
94
- allow_custom_value=True, interactive=True, min_width=320)
95
- model_info = gr.Markdown(elem_classes="info")
96
- with gr.Column(scale=1):
97
- model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
98
-
99
- with gr.Row():
100
- sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler")
101
- schedule_type = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
102
- schedule_prediction_type = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
103
- vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
104
-
105
- with gr.Accordion("LoRA", open=True, visible=True):
106
- def lora_dropdown(label):
107
- return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320)
108
 
109
- def lora_scale_slider(label):
110
- return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
 
112
  def lora_textbox():
113
  return gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
@@ -153,6 +165,22 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
153
  lora5_info = lora_textbox()
154
  lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
155
  lora5_md = gr.Markdown(value="", visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  with gr.Accordion("From URL", open=True, visible=True):
157
  with gr.Row():
158
  lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=CIVITAI_BASEMODEL, value=["Pony", "Illustrious", "SDXL 1.0"])
@@ -171,13 +199,143 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
171
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
172
  lora_download_url = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1)
173
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
174
-
175
- with gr.Row():
176
- quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None", scale=3)
177
- style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None", scale=3)
178
- recom_prompt = gr.Checkbox(label="Recommended prompt", value=True, scale=1)
179
 
180
- with gr.Accordion("Translation Settings", open=False):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
182
  chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
183
  chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
@@ -202,14 +360,34 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
202
  cache_examples=False,
203
  )
204
 
 
 
 
 
 
 
205
  gr.on( #lambda x: None, inputs=None, outputs=result).then(
206
  triggers=[run_button.click, prompt.submit],
207
  fn=infer,
208
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
209
  guidance_scale, num_inference_steps, model_name,
210
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
211
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
212
- clip_skip, pag_scale, free_u, gpu_duration, recom_prompt],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  outputs=[result],
214
  queue=True,
215
  show_progress="full",
@@ -221,9 +399,23 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
221
  fn=_infer, # dummy fn for api
222
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
223
  guidance_scale, num_inference_steps, model_name,
224
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
225
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
226
- clip_skip, pag_scale, free_u, gpu_duration, recom_prompt],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
  outputs=[result],
228
  queue=False,
229
  show_api=True,
@@ -245,9 +437,23 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
245
  fn=infer,
246
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
247
  guidance_scale, num_inference_steps, model_name,
248
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt,
249
- sampler, vae_model, auto_trans, schedule_type, schedule_prediction_type,
250
- clip_skip, pag_scale, free_u, gpu_duration, recom_prompt],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  outputs=[result],
252
  queue=True,
253
  show_progress="full",
@@ -259,12 +465,13 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
259
 
260
  gr.on(
261
  triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
262
- lora4.change, lora4_wt.change, lora5.change, lora5_wt.change],
263
  fn=update_loras,
264
- inputs=[prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt],
265
  outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
266
  lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
267
- lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md],
 
268
  queue=False,
269
  trigger_mode="once",
270
  show_api=False,
@@ -274,6 +481,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
274
  lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
275
  lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
276
  lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
 
 
277
 
278
  gr.on(
279
  triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
@@ -289,8 +498,8 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
289
  gr.on(
290
  triggers=[lora_download.click, lora_download_url.submit],
291
  fn=download_my_lora,
292
- inputs=[lora_download_url,lora1, lora2, lora3, lora4, lora5],
293
- outputs=[lora1, lora2, lora3, lora4, lora5],
294
  scroll_to_output=True,
295
  queue=True,
296
  show_api=False,
@@ -404,7 +613,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
404
  ).success(
405
  insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
406
  ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
407
- None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
408
  copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
409
  copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
410
  copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
@@ -441,6 +650,9 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
441
  outputs=[result_up_tab],
442
  )
443
 
 
 
 
444
  gr.LoginButton()
445
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
446
 
 
8
  preset_quality, preset_styles, process_style_prompt, get_all_lora_tupled_list, update_loras, apply_lora_prompt,
9
  download_my_lora, search_civitai_lora, update_civitai_selection, select_civitai_lora, search_civitai_lora_json,
10
  get_t2i_model_info, get_civitai_tag, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
11
+ SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS, preprocessor_tab, SDXL_TASK, TASK_MODEL_LIST,
12
+ PROMPT_W_OPTIONS, POST_PROCESSING_SAMPLER, IP_ADAPTERS_SD, IP_ADAPTERS_SDXL, DIFFUSERS_CONTROLNET_MODEL,
13
+ TASK_AND_PREPROCESSORS, update_task_options, change_preprocessor_choices, get_ti_choices,
14
+ update_textual_inversion, set_textual_inversion_prompt, create_mask_now)
15
  # Translator
16
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
17
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
 
37
 
38
 
39
  MAX_SEED = np.iinfo(np.int32).max
40
+ MAX_IMAGE_SIZE = 4096
41
+ MIN_IMAGE_SIZE = 256
42
 
43
  css = """
44
  #container { margin: 0 auto; !important; }
 
64
  auto_trans = gr.Checkbox(label="Auto translate to English", value=False, scale=2)
65
 
66
  result = gr.Image(label="Result", elem_id="result", format="png", type="filepath", show_label=False, interactive=False,
67
+ show_download_button=True, show_share_button=False, container=True)
68
+
69
  with gr.Accordion("History", open=False):
70
+ history_files = gr.Files(interactive=False, visible=False)
71
  history_gallery = gr.Gallery(label="History", columns=6, object_fit="contain", format="png", interactive=False, show_share_button=False,
72
  show_download_button=True)
 
73
  history_clear_button = gr.Button(value="Clear History", variant="secondary")
74
  history_clear_button.click(lambda: ([], []), None, [history_gallery, history_files], queue=False, show_api=False)
75
 
76
+ with gr.Accordion("Advanced Settings", open=True):
77
+ task = gr.Dropdown(label="Task", choices=SDXL_TASK, value=TASK_MODEL_LIST[0])
78
+ with gr.Tab("Model & Prompt"):
79
+ with gr.Row():
80
+ negative_prompt = gr.Text(label="Negative prompt", lines=1, max_lines=6, placeholder="Enter a negative prompt", show_copy_button=True,
81
+ value="(low quality, worst quality:1.2), very displeasing, watermark, signature, ugly")
82
+ with gr.Row():
83
+ with gr.Column(scale=4):
84
+ model_name = gr.Dropdown(label="Model", info="You can enter a huggingface model repo_id to want to use.",
85
+ choices=get_diffusers_model_list(), value=get_diffusers_model_list()[0],
86
+ allow_custom_value=True, interactive=True, min_width=320)
87
+ model_info = gr.Markdown(elem_classes="info")
88
+ with gr.Column(scale=1):
89
+ model_detail = gr.Checkbox(label="Show detail of model in list", value=False)
90
+ with gr.Row():
91
+ quality_selector = gr.Radio(label="Quality Tag Presets", interactive=True, choices=list(preset_quality.keys()), value="None", scale=3)
92
+ style_selector = gr.Radio(label="Style Presets", interactive=True, choices=list(preset_styles.keys()), value="None", scale=3)
93
+ recom_prompt = gr.Checkbox(label="Recommended prompt", value=True, scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
 
95
+ with gr.Tab("Generation Settings"):
96
+ with gr.Row():
97
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
98
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
99
+ gpu_duration = gr.Slider(label="GPU time duration (seconds)", minimum=5, maximum=240, value=59)
100
+ with gr.Row():
101
+ width = gr.Slider(label="Width", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 832
102
+ height = gr.Slider(label="Height", minimum=MIN_IMAGE_SIZE, maximum=MAX_IMAGE_SIZE, step=32, value=1024) # 1216
103
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=30.0, step=0.1, value=7)
104
+ guidance_rescale = gr.Slider(label="CFG rescale", value=0., step=0.01, minimum=0., maximum=1.5)
105
+ with gr.Row():
106
+ num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=100, step=1, value=28)
107
+ pag_scale = gr.Slider(minimum=0.0, maximum=10.0, step=0.1, value=0.0, label="PAG Scale")
108
+ clip_skip = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
109
+ free_u = gr.Checkbox(value=False, label="FreeU")
110
+ with gr.Row():
111
+ sampler = gr.Dropdown(label="Sampler", choices=get_samplers(), value="Euler")
112
+ schedule_type = gr.Dropdown(label="Schedule type", choices=SCHEDULE_TYPE_OPTIONS, value=SCHEDULE_TYPE_OPTIONS[0])
113
+ schedule_prediction_type = gr.Dropdown(label="Discrete Sampling Type", choices=SCHEDULE_PREDICTION_TYPE_OPTIONS, value=SCHEDULE_PREDICTION_TYPE_OPTIONS[0])
114
+ vae_model = gr.Dropdown(label="VAE Model", choices=get_vaes(), value=get_vaes()[0])
115
+ prompt_syntax = gr.Dropdown(label="Prompt Syntax", choices=PROMPT_W_OPTIONS, value=PROMPT_W_OPTIONS[1][1])
116
+
117
+ with gr.Tab("LoRA"):
118
+ def lora_dropdown(label, visible=True):
119
+ return gr.Dropdown(label=label, choices=get_all_lora_tupled_list(), value="", allow_custom_value=True, elem_classes="lora", min_width=320, visible=visible)
120
+
121
+ def lora_scale_slider(label, visible=True):
122
+ return gr.Slider(minimum=-2, maximum=2, step=0.01, value=1.00, label=label, visible=visible)
123
 
124
  def lora_textbox():
125
  return gr.Textbox(label="", info="Example of prompt:", value="", show_copy_button=True, interactive=False, visible=False)
 
165
  lora5_info = lora_textbox()
166
  lora5_copy = gr.Button(value="Copy example to prompt", visible=False)
167
  lora5_md = gr.Markdown(value="", visible=False)
168
+ with gr.Column():
169
+ with gr.Row():
170
+ lora6 = lora_dropdown("LoRA 6", visible=False)
171
+ lora6_wt = lora_scale_slider("LoRA 6: weight", visible=False)
172
+ with gr.Row():
173
+ lora6_info = lora_textbox()
174
+ lora6_copy = gr.Button(value="Copy example to prompt", visible=False)
175
+ lora6_md = gr.Markdown(value="", visible=False)
176
+ with gr.Column():
177
+ with gr.Row():
178
+ lora7 = lora_dropdown("LoRA 7", visible=False)
179
+ lora7_wt = lora_scale_slider("LoRA 7: weight", visible=False)
180
+ with gr.Row():
181
+ lora7_info = lora_textbox()
182
+ lora7_copy = gr.Button(value="Copy example to prompt", visible=False)
183
+ lora7_md = gr.Markdown(value="", visible=False)
184
  with gr.Accordion("From URL", open=True, visible=True):
185
  with gr.Row():
186
  lora_search_civitai_basemodel = gr.CheckboxGroup(label="Search LoRA for", choices=CIVITAI_BASEMODEL, value=["Pony", "Illustrious", "SDXL 1.0"])
 
199
  lora_search_civitai_result = gr.Dropdown(label="Search Results", choices=[("", "")], value="", allow_custom_value=True, visible=False)
200
  lora_download_url = gr.Textbox(label="LoRA's download URL", placeholder="https://civitai.com/api/download/models/28907", info="It has to be .safetensors files, and you can also download them from Hugging Face.", lines=1)
201
  lora_download = gr.Button("Get and set LoRA and apply to prompt")
 
 
 
 
 
202
 
203
+ with gr.Tab("ControlNet / Img2img / Inpaint"):
204
+ with gr.Row():
205
+ #image_control = gr.Image(label="Image ControlNet / Inpaint / Img2img", type="filepath", height=384, sources=["upload", "clipboard", "webcam"], show_share_button=False)
206
+ image_control = gr.ImageEditor(label="Image ControlNet / Inpaint / Img2img", type="filepath", sources=["upload", "clipboard", "webcam"], image_mode='RGB',
207
+ show_share_button=False, show_fullscreen_button=False, layers=False, canvas_size=(384, 384), width=384, height=512,
208
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed", default_size=32), eraser=gr.Eraser(default_size="32"))
209
+ image_mask = gr.Image(label="Image Mask", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
210
+ with gr.Row():
211
+ strength = gr.Slider(minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
212
+ info="This option adjusts the level of changes for img2img and inpainting.")
213
+ image_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
214
+ info="The maximum proportional size of the generated image based on the uploaded image.")
215
+ with gr.Row():
216
+ controlnet_model = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
217
+ control_net_output_scaling = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
218
+ control_net_start_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
219
+ control_net_stop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
220
+ with gr.Row():
221
+ preprocessor_name = gr.Dropdown(label="Preprocessor Name", choices=TASK_AND_PREPROCESSORS["canny"])
222
+ preprocess_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
223
+ low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
224
+ high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
225
+ with gr.Row():
226
+ value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
227
+ distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
228
+ recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
229
+ tile_blur_sigma = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
230
+
231
+ with gr.Tab("IP-Adapter"):
232
+ IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
233
+ MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
234
+ with gr.Accordion("IP-Adapter 1", open=True, visible=True):
235
+ with gr.Row():
236
+ #image_ip1 = gr.Image(label="IP Image", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
237
+ image_ip1 = gr.ImageEditor(label="IP Image", type="filepath", sources=["upload", "clipboard", "webcam"], image_mode='RGB',
238
+ show_share_button=False, show_fullscreen_button=False, layers=False, canvas_size=(384, 384), width=384, height=512,
239
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed", default_size=32), eraser=gr.Eraser(default_size="32"))
240
+ mask_ip1 = gr.Image(label="IP Mask (optional)", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
241
+ with gr.Row():
242
+ model_ip1 = gr.Dropdown(value="plus_face", label="Model", choices=IP_MODELS)
243
+ mode_ip1 = gr.Dropdown(value="original", label="Mode", choices=MODE_IP_OPTIONS)
244
+ scale_ip1 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
245
+ with gr.Accordion("IP-Adapter 2", open=True, visible=True):
246
+ with gr.Row():
247
+ #image_ip2 = gr.Image(label="IP Image", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
248
+ image_ip2 = gr.ImageEditor(label="IP Image", type="filepath", sources=["upload", "clipboard", "webcam"], image_mode='RGB',
249
+ show_share_button=False, show_fullscreen_button=False, layers=False, canvas_size=(384, 384), width=384, height=512,
250
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed", default_size=32), eraser=gr.Eraser(default_size="32"))
251
+ mask_ip2 = gr.Image(label="IP Mask (optional)", type="filepath", height=384, sources=["upload", "clipboard"], show_share_button=False)
252
+ with gr.Row():
253
+ model_ip2 = gr.Dropdown(value="base", label="Model", choices=IP_MODELS)
254
+ mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
255
+ scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
256
+
257
+ with gr.Tab("Inpaint Mask Maker"):
258
+ with gr.Row():
259
+ with gr.Column():
260
+ image_base = gr.ImageEditor(sources=["upload", "clipboard", "webcam"],
261
+ brush=gr.Brush(default_size="32", color_mode="fixed", colors=["rgba(0, 0, 0, 1)", "rgba(0, 0, 0, 0.1)", "rgba(255, 255, 255, 0.1)"]),
262
+ eraser=gr.Eraser(default_size="32"), show_share_button=False, show_fullscreen_button=False,
263
+ canvas_size=(384, 384), width=384, height=512)
264
+ invert_mask = gr.Checkbox(value=False, label="Invert mask")
265
+ cm_btn = gr.Button("Create mask")
266
+ with gr.Column():
267
+ img_source = gr.Image(interactive=False, height=384, show_share_button=False)
268
+ img_result = gr.Image(label="Mask image", show_label=True, interactive=False, height=384, show_share_button=False)
269
+ cm_btn_send = gr.Button("Send to ControlNet / Img2img / Inpaint")
270
+ cm_btn_send_ip1 = gr.Button("Send to IP-Adapter 1")
271
+ cm_btn_send_ip2 = gr.Button("Send to IP-Adapter 2")
272
+ cm_btn.click(create_mask_now, [image_base, invert_mask], [img_source, img_result], show_api=False)
273
+ def send_img(img_source, img_result):
274
+ return img_source, img_result
275
+ cm_btn_send.click(send_img, [img_source, img_result], [image_control, image_mask], queue=False, show_api=False)
276
+ cm_btn_send_ip1.click(send_img, [img_source, img_result], [image_ip1, mask_ip1], queue=False, show_api=False)
277
+ cm_btn_send_ip2.click(send_img, [img_source, img_result], [image_ip2, mask_ip2], queue=False, show_api=False)
278
+
279
+ with gr.Tab("Hires fix"):
280
+ with gr.Row():
281
+ upscaler_model_path = gr.Dropdown(label="Upscaler", choices=UPSCALER_KEYS, value=UPSCALER_KEYS[0])
282
+ upscaler_increases_size = gr.Slider(minimum=1.1, maximum=4., step=0.1, value=1.2, label="Upscale by")
283
+ esrgan_tile = gr.Slider(minimum=0, value=0, maximum=500, step=1, label="ESRGAN Tile")
284
+ esrgan_tile_overlap = gr.Slider(minimum=1, maximum=200, step=1, value=8, label="ESRGAN Tile Overlap")
285
+ with gr.Row():
286
+ hires_steps = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
287
+ hires_denoising_strength = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
288
+ hires_sampler = gr.Dropdown(label="Hires Sampler", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
289
+ hires_schedule_list = ["Use same schedule type"] + SCHEDULE_TYPE_OPTIONS
290
+ hires_schedule_type = gr.Dropdown(label="Hires Schedule type", choices=hires_schedule_list, value=hires_schedule_list[0])
291
+ hires_guidance_scale = gr.Slider(minimum=-1., maximum=30., step=0.5, value=-1., label="Hires CFG", info="If the value is -1, the main CFG will be used")
292
+ with gr.Row():
293
+ hires_prompt = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
294
+ hires_negative_prompt = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
295
+
296
+ with gr.Tab("Detailfix"):
297
+ with gr.Row():
298
+ # Adetailer Inpaint Only
299
+ adetailer_inpaint_only = gr.Checkbox(label="Inpaint only", value=True)
300
+ # Adetailer Verbose
301
+ adetailer_verbose = gr.Checkbox(label="Verbose", value=False)
302
+ # Adetailer Sampler
303
+ adetailer_sampler = gr.Dropdown(label="Adetailer sampler:", choices=POST_PROCESSING_SAMPLER, value=POST_PROCESSING_SAMPLER[0])
304
+ with gr.Row():
305
+ with gr.Accordion("Detailfix A", open=True, visible=True):
306
+ # Adetailer A
307
+ adetailer_active_a = gr.Checkbox(label="Enable Adetailer A", value=False)
308
+ prompt_ad_a = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
309
+ negative_prompt_ad_a = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
310
+ with gr.Row():
311
+ strength_ad_a = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
312
+ face_detector_ad_a = gr.Checkbox(label="Face detector", value=False)
313
+ person_detector_ad_a = gr.Checkbox(label="Person detector", value=True)
314
+ hand_detector_ad_a = gr.Checkbox(label="Hand detector", value=False)
315
+ with gr.Row():
316
+ mask_dilation_a = gr.Number(label="Mask dilation:", value=4, minimum=1)
317
+ mask_blur_a = gr.Number(label="Mask blur:", value=4, minimum=1)
318
+ mask_padding_a = gr.Number(label="Mask padding:", value=32, minimum=1)
319
+ with gr.Accordion("Detailfix B", open=True, visible=True):
320
+ # Adetailer B
321
+ adetailer_active_b = gr.Checkbox(label="Enable Adetailer B", value=False)
322
+ prompt_ad_b = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
323
+ negative_prompt_ad_b = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
324
+ with gr.Row():
325
+ strength_ad_b = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
326
+ face_detector_ad_b = gr.Checkbox(label="Face detector", value=False)
327
+ person_detector_ad_b = gr.Checkbox(label="Person detector", value=True)
328
+ hand_detector_ad_b = gr.Checkbox(label="Hand detector", value=False)
329
+ with gr.Row():
330
+ mask_dilation_b = gr.Number(label="Mask dilation:", value=4, minimum=1)
331
+ mask_blur_b = gr.Number(label="Mask blur:", value=4, minimum=1)
332
+ mask_padding_b = gr.Number(label="Mask padding:", value=32, minimum=1)
333
+
334
+ with gr.Tab("Textual inversion"):
335
+ active_textual_inversion = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
336
+ use_textual_inversion = gr.CheckboxGroup(choices=get_ti_choices(model_name.value) if active_textual_inversion.value else [], value=None, label="Use Textual Invertion in prompt")
337
+
338
+ with gr.Tab("Translation Settings"):
339
  chatbot = gr.Chatbot(render_markdown=False, visible=False) # component for auto-translation
340
  chat_model = gr.Dropdown(choices=get_dolphin_models(), value=get_dolphin_models()[0][1], allow_custom_value=True, label="Model")
341
  chat_model_info = gr.Markdown(value=get_dolphin_model_info(get_dolphin_models()[0][1]), label="Model info")
 
360
  cache_examples=False,
361
  )
362
 
363
+ model_name.change(update_task_options, [model_name, task], [task], queue=False, show_api=False)
364
+ task.change(change_preprocessor_choices, [task], [preprocessor_name], queue=False, show_api=False)
365
+ active_textual_inversion.change(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
366
+ model_name.change(update_textual_inversion, [active_textual_inversion, model_name], [use_textual_inversion], queue=False, show_api=False)
367
+ use_textual_inversion.change(set_textual_inversion_prompt, [use_textual_inversion, prompt, negative_prompt, prompt_syntax], [prompt, negative_prompt])
368
+
369
  gr.on( #lambda x: None, inputs=None, outputs=result).then(
370
  triggers=[run_button.click, prompt.submit],
371
  fn=infer,
372
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
373
  guidance_scale, num_inference_steps, model_name,
374
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
375
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, task, prompt_syntax,
376
+ sampler, vae_model, schedule_type, schedule_prediction_type,
377
+ clip_skip, pag_scale, free_u, guidance_rescale,
378
+ image_control, image_mask, strength, image_resolution,
379
+ controlnet_model, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
380
+ preprocessor_name, preprocess_resolution, low_threshold, high_threshold,
381
+ value_threshold, distance_threshold, recolor_gamma_correction, tile_blur_sigma,
382
+ image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
383
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2,
384
+ upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap, hires_steps, hires_denoising_strength,
385
+ hires_sampler, hires_schedule_type, hires_guidance_scale, hires_prompt, hires_negative_prompt,
386
+ adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a,
387
+ prompt_ad_a, negative_prompt_ad_a, strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a,
388
+ mask_dilation_a, mask_blur_a, mask_padding_a, adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b,
389
+ face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b, mask_dilation_b, mask_blur_b, mask_padding_b,
390
+ active_textual_inversion, gpu_duration, auto_trans, recom_prompt],
391
  outputs=[result],
392
  queue=True,
393
  show_progress="full",
 
399
  fn=_infer, # dummy fn for api
400
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
401
  guidance_scale, num_inference_steps, model_name,
402
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
403
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, task, prompt_syntax,
404
+ sampler, vae_model, schedule_type, schedule_prediction_type,
405
+ clip_skip, pag_scale, free_u, guidance_rescale,
406
+ image_control, image_mask, strength, image_resolution,
407
+ controlnet_model, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
408
+ preprocessor_name, preprocess_resolution, low_threshold, high_threshold,
409
+ value_threshold, distance_threshold, recolor_gamma_correction, tile_blur_sigma,
410
+ image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
411
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2,
412
+ upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap, hires_steps, hires_denoising_strength,
413
+ hires_sampler, hires_schedule_type, hires_guidance_scale, hires_prompt, hires_negative_prompt,
414
+ adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a,
415
+ prompt_ad_a, negative_prompt_ad_a, strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a,
416
+ mask_dilation_a, mask_blur_a, mask_padding_a, adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b,
417
+ face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b, mask_dilation_b, mask_blur_b, mask_padding_b,
418
+ active_textual_inversion, gpu_duration, auto_trans, recom_prompt],
419
  outputs=[result],
420
  queue=False,
421
  show_api=True,
 
437
  fn=infer,
438
  inputs=[prompt, negative_prompt, seed, randomize_seed, width, height,
439
  guidance_scale, num_inference_steps, model_name,
440
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt,
441
+ lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, task, prompt_syntax,
442
+ sampler, vae_model, schedule_type, schedule_prediction_type,
443
+ clip_skip, pag_scale, free_u, guidance_rescale,
444
+ image_control, image_mask, strength, image_resolution,
445
+ controlnet_model, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
446
+ preprocessor_name, preprocess_resolution, low_threshold, high_threshold,
447
+ value_threshold, distance_threshold, recolor_gamma_correction, tile_blur_sigma,
448
+ image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
449
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2,
450
+ upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap, hires_steps, hires_denoising_strength,
451
+ hires_sampler, hires_schedule_type, hires_guidance_scale, hires_prompt, hires_negative_prompt,
452
+ adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a,
453
+ prompt_ad_a, negative_prompt_ad_a, strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a,
454
+ mask_dilation_a, mask_blur_a, mask_padding_a, adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b,
455
+ face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b, mask_dilation_b, mask_blur_b, mask_padding_b,
456
+ active_textual_inversion, gpu_duration, auto_trans, recom_prompt],
457
  outputs=[result],
458
  queue=True,
459
  show_progress="full",
 
465
 
466
  gr.on(
467
  triggers=[lora1.change, lora1_wt.change, lora2.change, lora2_wt.change, lora3.change, lora3_wt.change,
468
+ lora4.change, lora4_wt.change, lora5.change, lora5_wt.change, lora6.change, lora6_wt.change, lora7.change, lora7_wt.change, prompt_syntax.change],
469
  fn=update_loras,
470
+ inputs=[prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt],
471
  outputs=[prompt, lora1, lora1_wt, lora1_info, lora1_copy, lora1_md,
472
  lora2, lora2_wt, lora2_info, lora2_copy, lora2_md, lora3, lora3_wt, lora3_info, lora3_copy, lora3_md,
473
+ lora4, lora4_wt, lora4_info, lora4_copy, lora4_md, lora5, lora5_wt, lora5_info, lora5_copy, lora5_md,
474
+ lora6, lora6_wt, lora6_info, lora6_copy, lora6_md, lora7, lora7_wt, lora7_info, lora7_copy, lora7_md],
475
  queue=False,
476
  trigger_mode="once",
477
  show_api=False,
 
481
  lora3_copy.click(apply_lora_prompt, [prompt, lora3_info], [prompt], queue=False, show_api=False)
482
  lora4_copy.click(apply_lora_prompt, [prompt, lora4_info], [prompt], queue=False, show_api=False)
483
  lora5_copy.click(apply_lora_prompt, [prompt, lora5_info], [prompt], queue=False, show_api=False)
484
+ lora6_copy.click(apply_lora_prompt, [prompt, lora6_info], [prompt], queue=False, show_api=False)
485
+ lora7_copy.click(apply_lora_prompt, [prompt, lora7_info], [prompt], queue=False, show_api=False)
486
 
487
  gr.on(
488
  triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
 
498
  gr.on(
499
  triggers=[lora_download.click, lora_download_url.submit],
500
  fn=download_my_lora,
501
+ inputs=[lora_download_url, lora1, lora2, lora3, lora4, lora5, lora6, lora7],
502
+ outputs=[lora1, lora2, lora3, lora4, lora5, lora6, lora7],
503
  scroll_to_output=True,
504
  queue=True,
505
  show_api=False,
 
613
  ).success(
614
  insert_recom_prompt, [output_text_pony, dummy_np, recom_pony], [output_text_pony, dummy_np], queue=False, show_api=False,
615
  ).success(lambda: (gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=True)),
616
+ None, [copy_btn, copy_btn_pony, copy_prompt_btn, copy_prompt_btn_pony], queue=False, show_api=False)
617
  copy_btn.click(gradio_copy_text, [output_text], js=COPY_ACTION_JS, show_api=False)
618
  copy_btn_pony.click(gradio_copy_text, [output_text_pony], js=COPY_ACTION_JS, show_api=False)
619
  copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt], show_api=False)
 
650
  outputs=[result_up_tab],
651
  )
652
 
653
+ with gr.Tab("Preprocessor", render=True):
654
+ preprocessor_tab()
655
+
656
  gr.LoginButton()
657
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
658
 
constants.py CHANGED
@@ -17,7 +17,7 @@ DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book
17
 
18
  LOAD_DIFFUSERS_FORMAT_MODEL = [
19
  'stabilityai/stable-diffusion-xl-base-1.0',
20
- 'Laxhar/noobai-XL-1.0',
21
  'black-forest-labs/FLUX.1-dev',
22
  'John6666/blue-pencil-flux1-v021-fp8-flux',
23
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
@@ -31,6 +31,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
31
  'terminusresearch/FluxBooru-v0.3',
32
  'ostris/OpenFLUX.1',
33
  'shuttleai/shuttle-3-diffusion',
 
34
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
35
  'Laxhar/noobai-XL-0.77',
36
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
@@ -40,9 +41,13 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
40
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
41
  'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
42
  'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
 
 
43
  'John6666/haruki-mix-illustrious-v10-sdxl',
44
  'John6666/noobreal-v10-sdxl',
45
  'John6666/complicated-noobai-merge-vprediction-sdxl',
 
 
46
  'Laxhar/noobai-XL-Vpred-0.6',
47
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
48
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
@@ -56,6 +61,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
56
  'John6666/wai-nsfw-illustrious-v70-sdxl',
57
  'John6666/illustrious-pony-mix-v3-sdxl',
58
  'John6666/nova-anime-xl-illustriousv10-sdxl',
 
59
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
60
  'eienmojiki/Anything-XL',
61
  'eienmojiki/Starry-XL-v5.2',
@@ -82,9 +88,8 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
82
  'John6666/prefect-pony-xl-v4-sdxl',
83
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
84
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
85
- 'John6666/wai-ani-nsfw-ponyxl-v9-sdxl',
86
  'John6666/wai-real-mix-v11-sdxl',
87
- 'John6666/babes-by-stable-yogi-ponyv3-sdxl',
88
  'John6666/wai-c-v6-sdxl',
89
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
90
  'John6666/sifw-annihilation-xl-v2-sdxl',
@@ -114,7 +119,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
114
  'digiplay/DarkSushi2.5D_v1',
115
  'digiplay/darkphoenix3D_v1.1',
116
  'digiplay/BeenYouLiteL11_diffusers',
117
- 'Yntec/RevAnimatedV2Rebirth',
118
  'youknownothing/cyberrealistic_v50',
119
  'youknownothing/deliberate-v6',
120
  'GraydientPlatformAPI/deliberate-cyber3',
@@ -142,7 +147,7 @@ DOWNLOAD_EMBEDS = [
142
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
143
  # 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
144
  # 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
145
- ]
146
 
147
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
148
  HF_TOKEN = os.environ.get("HF_READ_TOKEN")
@@ -155,79 +160,6 @@ DIRECTORY_EMBEDS = 'embedings'
155
  CACHE_HF = "/home/user/.cache/huggingface/hub/"
156
  STORAGE_ROOT = "/home/user/"
157
 
158
- PREPROCESSOR_CONTROLNET = {
159
- "openpose": [
160
- "Openpose",
161
- "None",
162
- ],
163
- "scribble": [
164
- "HED",
165
- "PidiNet",
166
- "None",
167
- ],
168
- "softedge": [
169
- "PidiNet",
170
- "HED",
171
- "HED safe",
172
- "PidiNet safe",
173
- "None",
174
- ],
175
- "segmentation": [
176
- "UPerNet",
177
- "None",
178
- ],
179
- "depth": [
180
- "DPT",
181
- "Midas",
182
- "None",
183
- ],
184
- "normalbae": [
185
- "NormalBae",
186
- "None",
187
- ],
188
- "lineart": [
189
- "Lineart",
190
- "Lineart coarse",
191
- "Lineart (anime)",
192
- "None",
193
- "None (anime)",
194
- ],
195
- "lineart_anime": [
196
- "Lineart",
197
- "Lineart coarse",
198
- "Lineart (anime)",
199
- "None",
200
- "None (anime)",
201
- ],
202
- "shuffle": [
203
- "ContentShuffle",
204
- "None",
205
- ],
206
- "canny": [
207
- "Canny",
208
- "None",
209
- ],
210
- "mlsd": [
211
- "MLSD",
212
- "None",
213
- ],
214
- "ip2p": [
215
- "ip2p"
216
- ],
217
- "recolor": [
218
- "Recolor luminance",
219
- "Recolor intensity",
220
- "None",
221
- ],
222
- "tile": [
223
- "Mild Blur",
224
- "Moderate Blur",
225
- "Heavy Blur",
226
- "None",
227
- ],
228
-
229
- }
230
-
231
  TASK_STABLEPY = {
232
  'txt2img': 'txt2img',
233
  'img2img': 'img2img',
@@ -284,11 +216,74 @@ UPSCALER_DICT_GUI = {
284
 
285
  UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  PROMPT_W_OPTIONS = [
288
  ("Compel format: (word)weight", "Compel"),
289
  ("Classic format: (word:weight)", "Classic"),
290
  ("Classic-original format: (word:weight)", "Classic-original"),
291
  ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
 
292
  ("Classic-ignore", "Classic-ignore"),
293
  ("None", "None"),
294
  ]
@@ -371,7 +366,7 @@ EXAMPLES_GUI = [
371
  1.0, # cn scale
372
  0.0, # cn start
373
  1.0, # cn end
374
- "Classic",
375
  "Nearest",
376
  45,
377
  False,
@@ -384,7 +379,7 @@ EXAMPLES_GUI = [
384
  -1,
385
  "None",
386
  0.33,
387
- "FlowMatchEuler",
388
  1152,
389
  896,
390
  "black-forest-labs/FLUX.1-dev",
@@ -408,7 +403,7 @@ EXAMPLES_GUI = [
408
  -1,
409
  "None",
410
  0.33,
411
- "DPM++ 2M SDE Lu",
412
  1024,
413
  1024,
414
  "John6666/epicrealism-xl-v10kiss2-sdxl",
@@ -491,7 +486,7 @@ EXAMPLES_GUI = [
491
  1.0, # cn scale
492
  0.0, # cn start
493
  0.9, # cn end
494
- "Compel",
495
  "Latent (antialiased)",
496
  46,
497
  False,
 
17
 
18
  LOAD_DIFFUSERS_FORMAT_MODEL = [
19
  'stabilityai/stable-diffusion-xl-base-1.0',
20
+ 'Laxhar/noobai-XL-1.1',
21
  'black-forest-labs/FLUX.1-dev',
22
  'John6666/blue-pencil-flux1-v021-fp8-flux',
23
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
 
31
  'terminusresearch/FluxBooru-v0.3',
32
  'ostris/OpenFLUX.1',
33
  'shuttleai/shuttle-3-diffusion',
34
+ 'Laxhar/noobai-XL-1.0',
35
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
36
  'Laxhar/noobai-XL-0.77',
37
  'John6666/noobai-xl-nai-xl-epsilonpred075version-sdxl',
 
41
  'John6666/noobaiiter-xl-vpred-v075-sdxl',
42
  'John6666/ntr-mix-illustrious-xl-noob-xl-v40-sdxl',
43
  'John6666/ntr-mix-illustrious-xl-noob-xl-ntrmix35-sdxl',
44
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777-sdxl',
45
+ 'John6666/ntr-mix-illustrious-xl-noob-xl-v777forlora-sdxl',
46
  'John6666/haruki-mix-illustrious-v10-sdxl',
47
  'John6666/noobreal-v10-sdxl',
48
  'John6666/complicated-noobai-merge-vprediction-sdxl',
49
+ 'Laxhar/noobai-XL-Vpred-0.65s',
50
+ 'Laxhar/noobai-XL-Vpred-0.65',
51
  'Laxhar/noobai-XL-Vpred-0.6',
52
  'John6666/noobai-xl-nai-xl-vpred05version-sdxl',
53
  'John6666/noobai-fusion2-vpred-itercomp-v1-sdxl',
 
61
  'John6666/wai-nsfw-illustrious-v70-sdxl',
62
  'John6666/illustrious-pony-mix-v3-sdxl',
63
  'John6666/nova-anime-xl-illustriousv10-sdxl',
64
+ 'John6666/nova-orange-xl-v30-sdxl',
65
  'John6666/silvermoon-mix03-illustrious-v10-sdxl',
66
  'eienmojiki/Anything-XL',
67
  'eienmojiki/Starry-XL-v5.2',
 
88
  'John6666/prefect-pony-xl-v4-sdxl',
89
  'John6666/mala-anime-mix-nsfw-pony-xl-v5-sdxl',
90
  'John6666/wai-ani-nsfw-ponyxl-v10-sdxl',
 
91
  'John6666/wai-real-mix-v11-sdxl',
92
+ 'John6666/wai-shuffle-pdxl-v2-sdxl',
93
  'John6666/wai-c-v6-sdxl',
94
  'John6666/iniverse-mix-xl-sfwnsfw-pony-guofeng-v43-sdxl',
95
  'John6666/sifw-annihilation-xl-v2-sdxl',
 
119
  'digiplay/DarkSushi2.5D_v1',
120
  'digiplay/darkphoenix3D_v1.1',
121
  'digiplay/BeenYouLiteL11_diffusers',
122
+ 'GraydientPlatformAPI/rev-animated2',
123
  'youknownothing/cyberrealistic_v50',
124
  'youknownothing/deliberate-v6',
125
  'GraydientPlatformAPI/deliberate-cyber3',
 
147
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
148
  # 'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
149
  # 'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
150
+ ]
151
 
152
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
153
  HF_TOKEN = os.environ.get("HF_READ_TOKEN")
 
160
  CACHE_HF = "/home/user/.cache/huggingface/hub/"
161
  STORAGE_ROOT = "/home/user/"
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  TASK_STABLEPY = {
164
  'txt2img': 'txt2img',
165
  'img2img': 'img2img',
 
216
 
217
  UPSCALER_KEYS = list(UPSCALER_DICT_GUI.keys())
218
 
219
+ DIFFUSERS_CONTROLNET_MODEL = [
220
+ "Automatic",
221
+
222
+ "xinsir/controlnet-union-sdxl-1.0",
223
+ "xinsir/anime-painter",
224
+ "Eugeoter/noob-sdxl-controlnet-canny",
225
+ "Eugeoter/noob-sdxl-controlnet-lineart_anime",
226
+ "Eugeoter/noob-sdxl-controlnet-depth",
227
+ "Eugeoter/noob-sdxl-controlnet-normal",
228
+ "Eugeoter/noob-sdxl-controlnet-softedge_hed",
229
+ "Eugeoter/noob-sdxl-controlnet-scribble_pidinet",
230
+ "Eugeoter/noob-sdxl-controlnet-scribble_hed",
231
+ "Eugeoter/noob-sdxl-controlnet-manga_line",
232
+ "Eugeoter/noob-sdxl-controlnet-lineart_realistic",
233
+ "Eugeoter/noob-sdxl-controlnet-depth_midas-v1-1",
234
+ "dimitribarbot/controlnet-openpose-sdxl-1.0-safetensors",
235
+ "r3gm/controlnet-openpose-sdxl-1.0-fp16",
236
+ "r3gm/controlnet-canny-scribble-integrated-sdxl-v2-fp16",
237
+ "r3gm/controlnet-union-sdxl-1.0-fp16",
238
+ "r3gm/controlnet-lineart-anime-sdxl-fp16",
239
+ "r3gm/control_v1p_sdxl_qrcode_monster_fp16",
240
+ "r3gm/controlnet-tile-sdxl-1.0-fp16",
241
+ "r3gm/controlnet-recolor-sdxl-fp16",
242
+ "r3gm/controlnet-openpose-twins-sdxl-1.0-fp16",
243
+ "r3gm/controlnet-qr-pattern-sdxl-fp16",
244
+ "brad-twinkl/controlnet-union-sdxl-1.0-promax",
245
+ "Yakonrus/SDXL_Controlnet_Tile_Realistic_v2",
246
+ "TheMistoAI/MistoLine",
247
+ "briaai/BRIA-2.3-ControlNet-Recoloring",
248
+ "briaai/BRIA-2.3-ControlNet-Canny",
249
+
250
+ "lllyasviel/control_v11p_sd15_openpose",
251
+ "lllyasviel/control_v11p_sd15_canny",
252
+ "lllyasviel/control_v11p_sd15_mlsd",
253
+ "lllyasviel/control_v11p_sd15_scribble",
254
+ "lllyasviel/control_v11p_sd15_softedge",
255
+ "lllyasviel/control_v11p_sd15_seg",
256
+ "lllyasviel/control_v11f1p_sd15_depth",
257
+ "lllyasviel/control_v11p_sd15_normalbae",
258
+ "lllyasviel/control_v11p_sd15_lineart",
259
+ "lllyasviel/control_v11p_sd15s2_lineart_anime",
260
+ "lllyasviel/control_v11e_sd15_shuffle",
261
+ "lllyasviel/control_v11e_sd15_ip2p",
262
+ "lllyasviel/control_v11p_sd15_inpaint",
263
+ "monster-labs/control_v1p_sd15_qrcode_monster",
264
+ "lllyasviel/control_v11f1e_sd15_tile",
265
+ "latentcat/control_v1p_sd15_brightness",
266
+ "yuanqiuye/qrcode_controlnet_v3",
267
+
268
+ "Shakker-Labs/FLUX.1-dev-ControlNet-Union-Pro",
269
+ # "Shakker-Labs/FLUX.1-dev-ControlNet-Pose",
270
+ # "Shakker-Labs/FLUX.1-dev-ControlNet-Depth",
271
+ # "jasperai/Flux.1-dev-Controlnet-Upscaler",
272
+ # "jasperai/Flux.1-dev-Controlnet-Depth",
273
+ # "jasperai/Flux.1-dev-Controlnet-Surface-Normals",
274
+ # "XLabs-AI/flux-controlnet-canny-diffusers",
275
+ # "XLabs-AI/flux-controlnet-hed-diffusers",
276
+ # "XLabs-AI/flux-controlnet-depth-diffusers",
277
+ # "InstantX/FLUX.1-dev-Controlnet-Union",
278
+ # "InstantX/FLUX.1-dev-Controlnet-Canny",
279
+ ]
280
+
281
  PROMPT_W_OPTIONS = [
282
  ("Compel format: (word)weight", "Compel"),
283
  ("Classic format: (word:weight)", "Classic"),
284
  ("Classic-original format: (word:weight)", "Classic-original"),
285
  ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
286
+ ("Classic-sd_embed format: (word:weight)", "Classic-sd_embed"),
287
  ("Classic-ignore", "Classic-ignore"),
288
  ("None", "None"),
289
  ]
 
366
  1.0, # cn scale
367
  0.0, # cn start
368
  1.0, # cn end
369
+ "Classic-no_norm",
370
  "Nearest",
371
  45,
372
  False,
 
379
  -1,
380
  "None",
381
  0.33,
382
+ "FlowMatch Euler",
383
  1152,
384
  896,
385
  "black-forest-labs/FLUX.1-dev",
 
403
  -1,
404
  "None",
405
  0.33,
406
+ "DPM++ 2M SDE Ef",
407
  1024,
408
  1024,
409
  "John6666/epicrealism-xl-v10kiss2-sdxl",
 
486
  1.0, # cn scale
487
  0.0, # cn start
488
  0.9, # cn end
489
+ "Classic-original",
490
  "Latent (antialiased)",
491
  46,
492
  False,
dc.py CHANGED
@@ -5,9 +5,9 @@ from stablepy import (
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
 
8
  )
9
  from constants import (
10
- PREPROCESSOR_CONTROLNET,
11
  TASK_STABLEPY,
12
  TASK_MODEL_LIST,
13
  UPSCALER_DICT_GUI,
@@ -17,6 +17,7 @@ from constants import (
17
  SDXL_TASK,
18
  MODEL_TYPE_TASK,
19
  POST_PROCESSING_SAMPLER,
 
20
 
21
  )
22
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
@@ -42,29 +43,27 @@ from utils import (
42
  html_template_message,
43
  escape_html,
44
  )
 
45
  from datetime import datetime
46
  import gradio as gr
47
  import logging
48
  import diffusers
49
  import warnings
50
  from stablepy import logger
 
51
  # import urllib.parse
52
 
53
  ImageFile.LOAD_TRUNCATED_IMAGES = True
 
54
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
55
  print(os.getenv("SPACES_ZERO_GPU"))
56
 
57
  ## BEGIN MOD
58
- import gradio as gr
59
- import logging
60
  logging.getLogger("diffusers").setLevel(logging.ERROR)
61
- import diffusers
62
  diffusers.utils.logging.set_verbosity(40)
63
- import warnings
64
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
65
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
66
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
67
- from stablepy import logger
68
  logger.setLevel(logging.DEBUG)
69
 
70
  from env import (
@@ -120,8 +119,8 @@ vae_model_list = get_model_list(DIRECTORY_VAES)
120
  vae_model_list.insert(0, "BakedVAE")
121
  vae_model_list.insert(0, "None")
122
 
123
- #download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_SDXL, False)
124
- #download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_POSITIVE_SDXL, False)
125
  embed_sdxl_list = get_model_list(DIRECTORY_EMBEDS_SDXL) + get_model_list(DIRECTORY_EMBEDS_POSITIVE_SDXL)
126
 
127
  def get_embed_list(pipeline_name):
@@ -130,6 +129,16 @@ def get_embed_list(pipeline_name):
130
 
131
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
132
 
 
 
 
 
 
 
 
 
 
 
133
  ## BEGIN MOD
134
  class GuiSD:
135
  def __init__(self, stream=True):
@@ -139,7 +148,7 @@ class GuiSD:
139
  self.last_load = datetime.now()
140
  self.inventory = []
141
 
142
- def update_storage_models(self, storage_floor_gb=32, required_inventory_for_purge=3):
143
  while get_used_storage_gb() > storage_floor_gb:
144
  if len(self.inventory) < required_inventory_for_purge:
145
  break
@@ -153,23 +162,12 @@ class GuiSD:
153
  ] + [model_name]
154
  print(self.inventory)
155
 
156
- def infer_short(self, model, pipe_params, progress=gr.Progress(track_tqdm=True)):
157
- #progress(0, desc="Start inference...")
158
- images, seed, image_list, metadata = model(**pipe_params)
159
- #progress(1, desc="Inference completed.")
160
- if not isinstance(images, list): images = [images]
161
- images = save_images(images, metadata)
162
- img = []
163
- for image in images:
164
- img.append((image, None))
165
- return img
166
 
167
- def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
168
 
169
  self.update_storage_models()
170
 
171
- # download link model > model_name
172
-
173
  vae_model = vae_model if vae_model != "None" else None
174
  model_type = get_model_type(model_name)
175
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
@@ -221,17 +219,19 @@ class GuiSD:
221
  vae_model=vae_model,
222
  type_model_precision=dtype_model,
223
  retain_task_model_in_cache=False,
 
224
  device="cpu",
 
225
  )
 
226
  else:
227
-
228
  if self.model.base_model_id != model_name:
229
  load_now_time = datetime.now()
230
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
231
 
232
- if elapsed_time <= 8:
233
  print("Waiting for the previous model's time ops...")
234
- time.sleep(8-elapsed_time)
235
 
236
  self.model.device = torch.device("cpu")
237
  self.model.load_pipe(
@@ -240,6 +240,7 @@ class GuiSD:
240
  vae_model=vae_model,
241
  type_model_precision=dtype_model,
242
  retain_task_model_in_cache=False,
 
243
  )
244
 
245
  end_time = time.time()
@@ -276,6 +277,10 @@ class GuiSD:
276
  lora_scale4,
277
  lora5,
278
  lora_scale5,
 
 
 
 
279
  sampler,
280
  schedule_type,
281
  schedule_prediction_type,
@@ -296,6 +301,8 @@ class GuiSD:
296
  high_threshold,
297
  value_threshold,
298
  distance_threshold,
 
 
299
  controlnet_output_scaling_in_unet,
300
  controlnet_start_threshold,
301
  controlnet_stop_threshold,
@@ -312,6 +319,9 @@ class GuiSD:
312
  hires_negative_prompt,
313
  hires_before_adetailer,
314
  hires_after_adetailer,
 
 
 
315
  loop_generation,
316
  leave_progress_bar,
317
  disable_progress_bar,
@@ -353,6 +363,7 @@ class GuiSD:
353
  mask_blur_b,
354
  mask_padding_b,
355
  retain_task_cache_gui,
 
356
  image_ip1,
357
  mask_ip1,
358
  model_ip1,
@@ -369,7 +380,7 @@ class GuiSD:
369
  yield info_state, gr.update(), gr.update()
370
 
371
  vae_model = vae_model if vae_model != "None" else None
372
- loras_list = [lora1, lora2, lora3, lora4, lora5]
373
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
374
  msg_lora = ""
375
 
@@ -478,6 +489,8 @@ class GuiSD:
478
  "high_threshold": high_threshold,
479
  "value_threshold": value_threshold,
480
  "distance_threshold": distance_threshold,
 
 
481
  "lora_A": lora1 if lora1 != "None" else None,
482
  "lora_scale_A": lora_scale1,
483
  "lora_B": lora2 if lora2 != "None" else None,
@@ -488,6 +501,10 @@ class GuiSD:
488
  "lora_scale_D": lora_scale4,
489
  "lora_E": lora5 if lora5 != "None" else None,
490
  "lora_scale_E": lora_scale5,
 
 
 
 
491
  ## BEGIN MOD
492
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
493
  ## END MOD
@@ -531,6 +548,8 @@ class GuiSD:
531
  "hires_sampler": hires_sampler,
532
  "hires_before_adetailer": hires_before_adetailer,
533
  "hires_after_adetailer": hires_after_adetailer,
 
 
534
  "ip_adapter_image": params_ip_img,
535
  "ip_adapter_mask": params_ip_msk,
536
  "ip_adapter_model": params_ip_model,
@@ -538,13 +557,15 @@ class GuiSD:
538
  "ip_adapter_scale": params_ip_scale,
539
  }
540
 
 
 
 
 
541
  self.model.device = torch.device("cuda:0")
542
- if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * 5:
543
  self.model.pipe.transformer.to(self.model.device)
544
  print("transformer to cuda")
545
 
546
- #return self.infer_short(self.model, pipe_params), info_state
547
-
548
  actual_progress = 0
549
  info_images = gr.update()
550
  for img, [seed, image_path, metadata] in self.model(**pipe_params):
@@ -569,7 +590,7 @@ class GuiSD:
569
  if msg_lora:
570
  info_images += msg_lora
571
 
572
- info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[0]) + "<br>-------<br>"
573
 
574
  download_links = "<br>".join(
575
  [
@@ -604,37 +625,38 @@ def dummy_gpu():
604
 
605
 
606
  def sd_gen_generate_pipeline(*args):
607
-
608
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
609
  verbose_arg = int(args[-2])
610
  load_lora_cpu = args[-3]
611
  generation_args = args[:-3]
612
  lora_list = [
613
  None if item == "None" or item == "" else item # MOD
614
- for item in [args[7], args[9], args[11], args[13], args[15]]
615
  ]
616
- lora_status = [None] * 5
617
 
618
  msg_load_lora = "Updating LoRAs in GPU..."
619
  if load_lora_cpu:
620
- msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
621
 
622
- if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
623
  yield msg_load_lora, gr.update(), gr.update()
624
 
625
  # Load lora in CPU
626
  if load_lora_cpu:
627
- lora_status = sd_gen.model.lora_merge(
628
  lora_A=lora_list[0], lora_scale_A=args[8],
629
  lora_B=lora_list[1], lora_scale_B=args[10],
630
  lora_C=lora_list[2], lora_scale_C=args[12],
631
  lora_D=lora_list[3], lora_scale_D=args[14],
632
  lora_E=lora_list[4], lora_scale_E=args[16],
 
 
633
  )
634
  print(lora_status)
635
 
636
- sampler_name = args[17]
637
- schedule_type_name = args[18]
638
  _, _, msg_sampler = check_scheduler_compatibility(
639
  sd_gen.model.class_name, sampler_name, schedule_type_name
640
  )
@@ -648,7 +670,7 @@ def sd_gen_generate_pipeline(*args):
648
  elif status is not None:
649
  gr.Warning(f"Failed to load LoRA: {lora}")
650
 
651
- if lora_status == [None] * 5 and sd_gen.model.lora_memory != [None] * 5 and load_lora_cpu:
652
  lora_cache_msg = ", ".join(
653
  str(x) for x in sd_gen.model.lora_memory if x is not None
654
  )
@@ -664,7 +686,6 @@ def sd_gen_generate_pipeline(*args):
664
 
665
  # yield from sd_gen.generate_pipeline(*generation_args)
666
  yield from dynamic_gpu_duration(
667
- #return dynamic_gpu_duration(
668
  sd_gen.generate_pipeline,
669
  gpu_duration_arg,
670
  *generation_args,
@@ -706,6 +727,7 @@ def esrgan_upscale(image, upscaler_name, upscaler_size):
706
  return image_path
707
 
708
 
 
709
  dynamic_gpu_duration.zerogpu = True
710
  sd_gen_generate_pipeline.zerogpu = True
711
  sd_gen = GuiSD()
@@ -718,30 +740,69 @@ import numpy as np
718
  import random
719
  import json
720
  import shutil
721
- from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path,
722
- get_local_model_list, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
 
723
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
724
- normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history)
 
 
725
 
726
 
727
  #@spaces.GPU
728
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
729
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
730
- lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0,
731
- sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
732
- clip_skip=True, pag_scale=0.0, free_u=False, gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
733
  MAX_SEED = np.iinfo(np.int32).max
734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
735
  image_previews = True
 
 
 
 
 
 
 
 
 
 
 
 
 
736
  load_lora_cpu = False
737
  verbose_info = False
738
- filename_pattern = "model,seed"
739
 
740
  images: list[tuple[PIL.Image.Image, str | None]] = []
741
  progress(0, desc="Preparing...")
742
 
743
  if randomize_seed: seed = random.randint(0, MAX_SEED)
744
-
745
  generator = torch.Generator().manual_seed(seed).seed()
746
 
747
  if translate:
@@ -750,31 +811,38 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
750
 
751
  prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name, recom_prompt)
752
  progress(0.5, desc="Preparing...")
753
- lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt = \
754
- set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt)
755
  lora1 = get_valid_lora_path(lora1)
756
  lora2 = get_valid_lora_path(lora2)
757
  lora3 = get_valid_lora_path(lora3)
758
  lora4 = get_valid_lora_path(lora4)
759
  lora5 = get_valid_lora_path(lora5)
 
 
760
  progress(1, desc="Preparation completed. Starting inference...")
761
 
762
  progress(0, desc="Loading model...")
763
- for _ in sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0]):
764
  pass
765
  progress(1, desc="Model loaded.")
766
  progress(0, desc="Starting Inference...")
767
  for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
768
  guidance_scale, clip_skip, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
769
- lora4, lora4_wt, lora5, lora5_wt, sampler, schedule_type, schedule_prediction_type,
770
- height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
771
- None, None, None, 0.35, 100, 200, 0.1, 0.1, 1.0, 0., 1., False, "Classic", None,
772
- 1.0, 100, 10, 30, 0.55, "Use same sampler", "", "",
773
- False, True, 1, True, False, image_previews, False, False, filename_pattern, "./images", False, False, False, True, 1, 0.55,
774
- False, free_u, False, True, False, "Use same sampler", False, "", "", 0.35, True, True, False, 4, 4, 32,
775
- False, "", "", 0.35, True, True, False, 4, 4, 32,
776
- True, None, None, "plus_face", "original", 0.7, None, None, "base", "style", 0.7, pag_scale,
777
- load_lora_cpu, verbose_info, gpu_duration
 
 
 
 
 
778
  ):
779
  images = stream_images if isinstance(stream_images, list) else images
780
  progress(1, desc="Inference completed.")
@@ -786,9 +854,21 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
786
  #@spaces.GPU
787
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
788
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
789
- lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0,
790
- sampler="Euler", vae=None, translate=False, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
791
- clip_skip=True, pag_scale=0.0, free_u=False, gpu_duration=59, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
 
 
 
792
  return gr.update()
793
 
794
 
@@ -808,6 +888,32 @@ def get_vaes():
808
  return vae_model_list
809
 
810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
812
  def get_diffusers_model_list(state: dict = {}):
813
  show_diffusers_model_list_detail = get_state(state, "show_diffusers_model_list_detail")
@@ -831,337 +937,6 @@ def enable_diffusers_model_detail(is_enable: bool = False, model_name: str = "",
831
  return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list(state)), state
832
 
833
 
834
- def load_model_prompt_dict():
835
- dict = {}
836
- try:
837
- with open('model_dict.json', encoding='utf-8') as f:
838
- dict = json.load(f)
839
- except Exception:
840
- pass
841
- return dict
842
-
843
-
844
- model_prompt_dict = load_model_prompt_dict()
845
-
846
-
847
- animagine_ps = to_list("masterpiece, best quality, very aesthetic, absurdres")
848
- animagine_nps = to_list("lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
849
- pony_ps = to_list("score_9, score_8_up, score_7_up, masterpiece, best quality, very aesthetic, absurdres")
850
- pony_nps = to_list("source_pony, score_6, score_5, score_4, busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends")
851
- other_ps = to_list("anime artwork, anime style, studio anime, highly detailed, cinematic photo, 35mm photograph, film, bokeh, professional, 4k, highly detailed")
852
- other_nps = to_list("photo, deformed, black and white, realism, disfigured, low contrast, drawing, painting, crayon, sketch, graphite, impressionist, noisy, blurry, soft, deformed, ugly")
853
- default_ps = to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres")
854
- default_nps = to_list("score_6, score_5, score_4, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]")
855
- def insert_model_recom_prompt(prompt: str = "", neg_prompt: str = "", model_name: str = "None", model_recom_prompt_enabled = True):
856
- if not model_recom_prompt_enabled or not model_name: return prompt, neg_prompt
857
- prompts = to_list(prompt)
858
- neg_prompts = to_list(neg_prompt)
859
- prompts = list_sub(prompts, animagine_ps + pony_ps + other_ps)
860
- neg_prompts = list_sub(neg_prompts, animagine_nps + pony_nps + other_nps)
861
- last_empty_p = [""] if not prompts and type != "None" else []
862
- last_empty_np = [""] if not neg_prompts and type != "None" else []
863
- ps = []
864
- nps = []
865
- if model_name in model_prompt_dict.keys():
866
- ps = to_list(model_prompt_dict[model_name]["prompt"])
867
- nps = to_list(model_prompt_dict[model_name]["negative_prompt"])
868
- else:
869
- ps = default_ps
870
- nps = default_nps
871
- prompts = prompts + ps
872
- neg_prompts = neg_prompts + nps
873
- prompt = ", ".join(list_uniq(prompts) + last_empty_p)
874
- neg_prompt = ", ".join(list_uniq(neg_prompts) + last_empty_np)
875
- return prompt, neg_prompt
876
-
877
-
878
- private_lora_dict = {}
879
- try:
880
- with open('lora_dict.json', encoding='utf-8') as f:
881
- d = json.load(f)
882
- for k, v in d.items():
883
- private_lora_dict[escape_lora_basename(k)] = v
884
- except Exception:
885
- pass
886
-
887
-
888
- private_lora_model_list = get_private_lora_model_lists()
889
- loras_dict = {"None": ["", "", "", "", ""], "": ["", "", "", "", ""]} | private_lora_dict.copy()
890
- loras_url_to_path_dict = {} # {"URL to download": "local filepath", ...}
891
- civitai_last_results = {} # {"URL to download": {search results}, ...}
892
- all_lora_list = []
893
-
894
-
895
- def get_all_lora_list():
896
- global all_lora_list
897
- loras = get_lora_model_list()
898
- all_lora_list = loras.copy()
899
- return loras
900
-
901
-
902
- def get_all_lora_tupled_list():
903
- global loras_dict
904
- models = get_all_lora_list()
905
- if not models: return []
906
- tupled_list = []
907
- for model in models:
908
- #if not model: continue # to avoid GUI-related bug
909
- basename = Path(model).stem
910
- key = to_lora_key(model)
911
- items = None
912
- if key in loras_dict.keys():
913
- items = loras_dict.get(key, None)
914
- else:
915
- items = get_civitai_info(model)
916
- if items != None:
917
- loras_dict[key] = items
918
- name = basename
919
- value = model
920
- if items and items[2] != "":
921
- if items[1] == "Pony":
922
- name = f"{basename} (for {items[1]}🐴, {items[2]})"
923
- else:
924
- name = f"{basename} (for {items[1]}, {items[2]})"
925
- tupled_list.append((name, value))
926
- return tupled_list
927
-
928
-
929
- def update_lora_dict(path: str):
930
- global loras_dict
931
- key = to_lora_key(path)
932
- if key in loras_dict.keys(): return
933
- items = get_civitai_info(path)
934
- if items == None: return
935
- loras_dict[key] = items
936
-
937
-
938
- def download_lora(dl_urls: str):
939
- global loras_url_to_path_dict
940
- dl_path = ""
941
- before = get_local_model_list(DIRECTORY_LORAS)
942
- urls = []
943
- for url in [url.strip() for url in dl_urls.split(',')]:
944
- local_path = f"{DIRECTORY_LORAS}/{url.split('/')[-1]}"
945
- if not Path(local_path).exists():
946
- download_things(DIRECTORY_LORAS, url, HF_TOKEN, CIVITAI_API_KEY)
947
- urls.append(url)
948
- after = get_local_model_list(DIRECTORY_LORAS)
949
- new_files = list_sub(after, before)
950
- i = 0
951
- for file in new_files:
952
- path = Path(file)
953
- if path.exists():
954
- new_path = Path(f'{path.parent.name}/{escape_lora_basename(path.stem)}{path.suffix}')
955
- path.resolve().rename(new_path.resolve())
956
- loras_url_to_path_dict[urls[i]] = str(new_path)
957
- update_lora_dict(str(new_path))
958
- dl_path = str(new_path)
959
- i += 1
960
- return dl_path
961
-
962
-
963
- def copy_lora(path: str, new_path: str):
964
- if path == new_path: return new_path
965
- cpath = Path(path)
966
- npath = Path(new_path)
967
- if cpath.exists():
968
- try:
969
- shutil.copy(str(cpath.resolve()), str(npath.resolve()))
970
- except Exception:
971
- return None
972
- update_lora_dict(str(npath))
973
- return new_path
974
- else:
975
- return None
976
-
977
-
978
- def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
979
- path = download_lora(dl_urls)
980
- if path:
981
- if not lora1 or lora1 == "None":
982
- lora1 = path
983
- elif not lora2 or lora2 == "None":
984
- lora2 = path
985
- elif not lora3 or lora3 == "None":
986
- lora3 = path
987
- elif not lora4 or lora4 == "None":
988
- lora4 = path
989
- elif not lora5 or lora5 == "None":
990
- lora5 = path
991
- choices = get_all_lora_tupled_list()
992
- return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
993
- gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
994
-
995
-
996
- def set_prompt_loras(prompt, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
997
- import re
998
- lora1 = get_valid_lora_name(lora1, model_name)
999
- lora2 = get_valid_lora_name(lora2, model_name)
1000
- lora3 = get_valid_lora_name(lora3, model_name)
1001
- lora4 = get_valid_lora_name(lora4, model_name)
1002
- lora5 = get_valid_lora_name(lora5, model_name)
1003
- if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1004
- lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
1005
- lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
1006
- lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
1007
- lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
1008
- lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
1009
- on1, label1, tag1, md1 = get_lora_info(lora1)
1010
- on2, label2, tag2, md2 = get_lora_info(lora2)
1011
- on3, label3, tag3, md3 = get_lora_info(lora3)
1012
- on4, label4, tag4, md4 = get_lora_info(lora4)
1013
- on5, label5, tag5, md5 = get_lora_info(lora5)
1014
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1015
- prompts = prompt.split(",") if prompt else []
1016
- for p in prompts:
1017
- p = str(p).strip()
1018
- if "<lora" in p:
1019
- result = re.findall(r'<lora:(.+?):(.+?)>', p)
1020
- if not result: continue
1021
- key = result[0][0]
1022
- wt = result[0][1]
1023
- path = to_lora_path(key)
1024
- if not key in loras_dict.keys() or not path:
1025
- path = get_valid_lora_name(path)
1026
- if not path or path == "None": continue
1027
- if path in lora_paths:
1028
- continue
1029
- elif not on1:
1030
- lora1 = path
1031
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1032
- lora1_wt = safe_float(wt)
1033
- on1 = True
1034
- elif not on2:
1035
- lora2 = path
1036
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1037
- lora2_wt = safe_float(wt)
1038
- on2 = True
1039
- elif not on3:
1040
- lora3 = path
1041
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1042
- lora3_wt = safe_float(wt)
1043
- on3 = True
1044
- elif not on4:
1045
- lora4 = path
1046
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1047
- lora4_wt = safe_float(wt)
1048
- on4, label4, tag4, md4 = get_lora_info(lora4)
1049
- elif not on5:
1050
- lora5 = path
1051
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1052
- lora5_wt = safe_float(wt)
1053
- on5 = True
1054
- return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
1055
-
1056
-
1057
- def apply_lora_prompt(prompt: str, lora_info: str):
1058
- if lora_info == "None": return gr.update(value=prompt)
1059
- tags = prompt.split(",") if prompt else []
1060
- prompts = normalize_prompt_list(tags)
1061
- lora_tag = lora_info.replace("/",",")
1062
- lora_tags = lora_tag.split(",") if str(lora_info) != "None" else []
1063
- lora_prompts = normalize_prompt_list(lora_tags)
1064
- empty = [""]
1065
- prompt = ", ".join(list_uniq(prompts + lora_prompts) + empty)
1066
- return gr.update(value=prompt)
1067
-
1068
-
1069
- def update_loras(prompt, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
1070
- import re
1071
- on1, label1, tag1, md1 = get_lora_info(lora1)
1072
- on2, label2, tag2, md2 = get_lora_info(lora2)
1073
- on3, label3, tag3, md3 = get_lora_info(lora3)
1074
- on4, label4, tag4, md4 = get_lora_info(lora4)
1075
- on5, label5, tag5, md5 = get_lora_info(lora5)
1076
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
1077
- prompts = prompt.split(",") if prompt else []
1078
- output_prompts = []
1079
- for p in prompts:
1080
- p = str(p).strip()
1081
- if "<lora" in p:
1082
- result = re.findall(r'<lora:(.+?):(.+?)>', p)
1083
- if not result: continue
1084
- key = result[0][0]
1085
- wt = result[0][1]
1086
- path = to_lora_path(key)
1087
- if not key in loras_dict.keys() or not path: continue
1088
- if path in lora_paths:
1089
- output_prompts.append(f"<lora:{to_lora_key(path)}:{safe_float(wt):.2f}>")
1090
- elif p:
1091
- output_prompts.append(p)
1092
- lora_prompts = []
1093
- if on1: lora_prompts.append(f"<lora:{to_lora_key(lora1)}:{lora1_wt:.2f}>")
1094
- if on2: lora_prompts.append(f"<lora:{to_lora_key(lora2)}:{lora2_wt:.2f}>")
1095
- if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
1096
- if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
1097
- if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
1098
- output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
1099
- choices = get_all_lora_tupled_list()
1100
- return gr.update(value=output_prompt), gr.update(value=lora1, choices=choices), gr.update(value=lora1_wt),\
1101
- gr.update(value=tag1, label=label1, visible=on1), gr.update(visible=on1), gr.update(value=md1, visible=on1),\
1102
- gr.update(value=lora2, choices=choices), gr.update(value=lora2_wt),\
1103
- gr.update(value=tag2, label=label2, visible=on2), gr.update(visible=on2), gr.update(value=md2, visible=on2),\
1104
- gr.update(value=lora3, choices=choices), gr.update(value=lora3_wt),\
1105
- gr.update(value=tag3, label=label3, visible=on3), gr.update(visible=on3), gr.update(value=md3, visible=on3),\
1106
- gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
1107
- gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
1108
- gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
1109
- gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
1110
-
1111
-
1112
- def search_civitai_lora(query, base_model=[], sort=CIVITAI_SORT[0], period=CIVITAI_PERIOD[0], tag="", user="", gallery=[]):
1113
- global civitai_last_results, civitai_last_choices, civitai_last_gallery
1114
- civitai_last_choices = [("", "")]
1115
- civitai_last_gallery = []
1116
- civitai_last_results = {}
1117
- items = search_lora_on_civitai(query, base_model, 100, sort, period, tag, user)
1118
- if not items: return gr.update(choices=[("", "")], value="", visible=False),\
1119
- gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
1120
- civitai_last_results = {}
1121
- choices = []
1122
- gallery = []
1123
- for item in items:
1124
- base_model_name = "Pony🐴" if item['base_model'] == "Pony" else item['base_model']
1125
- name = f"{item['name']} (for {base_model_name} / By: {item['creator']} / Tags: {', '.join(item['tags'])})"
1126
- value = item['dl_url']
1127
- choices.append((name, value))
1128
- gallery.append((item['img_url'], name))
1129
- civitai_last_results[value] = item
1130
- if not choices: return gr.update(choices=[("", "")], value="", visible=False),\
1131
- gr.update(value="", visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
1132
- civitai_last_choices = choices
1133
- civitai_last_gallery = gallery
1134
- result = civitai_last_results.get(choices[0][1], "None")
1135
- md = result['md'] if result else ""
1136
- return gr.update(choices=choices, value=choices[0][1], visible=True), gr.update(value=md, visible=True),\
1137
- gr.update(visible=True), gr.update(visible=True), gr.update(value=gallery)
1138
-
1139
-
1140
- def update_civitai_selection(evt: gr.SelectData):
1141
- try:
1142
- selected_index = evt.index
1143
- selected = civitai_last_choices[selected_index][1]
1144
- return gr.update(value=selected)
1145
- except Exception:
1146
- return gr.update(visible=True)
1147
-
1148
-
1149
- def select_civitai_lora(search_result):
1150
- if not "http" in search_result: return gr.update(value=""), gr.update(value="None", visible=True)
1151
- result = civitai_last_results.get(search_result, "None")
1152
- md = result['md'] if result else ""
1153
- return gr.update(value=search_result), gr.update(value=md, visible=True)
1154
-
1155
-
1156
- def search_civitai_lora_json(query, base_model):
1157
- results = {}
1158
- items = search_lora_on_civitai(query, base_model)
1159
- if not items: return gr.update(value=results)
1160
- for item in items:
1161
- results[item['dl_url']] = item
1162
- return gr.update(value=results)
1163
-
1164
-
1165
  quality_prompt_list = [
1166
  {
1167
  "name": "None",
 
5
  SCHEDULE_TYPE_OPTIONS,
6
  SCHEDULE_PREDICTION_TYPE_OPTIONS,
7
  check_scheduler_compatibility,
8
+ TASK_AND_PREPROCESSORS,
9
  )
10
  from constants import (
 
11
  TASK_STABLEPY,
12
  TASK_MODEL_LIST,
13
  UPSCALER_DICT_GUI,
 
17
  SDXL_TASK,
18
  MODEL_TYPE_TASK,
19
  POST_PROCESSING_SAMPLER,
20
+ DIFFUSERS_CONTROLNET_MODEL,
21
 
22
  )
23
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
 
43
  html_template_message,
44
  escape_html,
45
  )
46
+ from image_processor import preprocessor_tab
47
  from datetime import datetime
48
  import gradio as gr
49
  import logging
50
  import diffusers
51
  import warnings
52
  from stablepy import logger
53
+ from diffusers import FluxPipeline
54
  # import urllib.parse
55
 
56
  ImageFile.LOAD_TRUNCATED_IMAGES = True
57
+ torch.backends.cuda.matmul.allow_tf32 = True
58
  # os.environ["PYTORCH_NO_CUDA_MEMORY_CACHING"] = "1"
59
  print(os.getenv("SPACES_ZERO_GPU"))
60
 
61
  ## BEGIN MOD
 
 
62
  logging.getLogger("diffusers").setLevel(logging.ERROR)
 
63
  diffusers.utils.logging.set_verbosity(40)
 
64
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
65
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
66
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
 
67
  logger.setLevel(logging.DEBUG)
68
 
69
  from env import (
 
119
  vae_model_list.insert(0, "BakedVAE")
120
  vae_model_list.insert(0, "None")
121
 
122
+ download_private_repo(HF_SDXL_EMBEDS_NEGATIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_SDXL, False)
123
+ download_private_repo(HF_SDXL_EMBEDS_POSITIVE_PRIVATE_REPO, DIRECTORY_EMBEDS_POSITIVE_SDXL, False)
124
  embed_sdxl_list = get_model_list(DIRECTORY_EMBEDS_SDXL) + get_model_list(DIRECTORY_EMBEDS_POSITIVE_SDXL)
125
 
126
  def get_embed_list(pipeline_name):
 
129
 
130
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
131
 
132
+ flux_repo = "camenduru/FLUX.1-dev-diffusers"
133
+ flux_pipe = FluxPipeline.from_pretrained(
134
+ flux_repo,
135
+ transformer=None,
136
+ torch_dtype=torch.bfloat16,
137
+ ).to("cuda")
138
+ components = flux_pipe.components
139
+ components.pop("transformer", None)
140
+ delete_model(flux_repo)
141
+
142
  ## BEGIN MOD
143
  class GuiSD:
144
  def __init__(self, stream=True):
 
148
  self.last_load = datetime.now()
149
  self.inventory = []
150
 
151
+ def update_storage_models(self, storage_floor_gb=24, required_inventory_for_purge=3):
152
  while get_used_storage_gb() > storage_floor_gb:
153
  if len(self.inventory) < required_inventory_for_purge:
154
  break
 
162
  ] + [model_name]
163
  print(self.inventory)
164
 
165
+ def load_new_model(self, model_name, vae_model, task, controlnet_model, progress=gr.Progress(track_tqdm=True)):
 
 
 
 
 
 
 
 
 
166
 
167
+ # download link model > model_name
168
 
169
  self.update_storage_models()
170
 
 
 
171
  vae_model = vae_model if vae_model != "None" else None
172
  model_type = get_model_type(model_name)
173
  dtype_model = torch.bfloat16 if model_type == "FLUX" else torch.float16
 
219
  vae_model=vae_model,
220
  type_model_precision=dtype_model,
221
  retain_task_model_in_cache=False,
222
+ controlnet_model=controlnet_model,
223
  device="cpu",
224
+ env_components=components,
225
  )
226
+ self.model.advanced_params(image_preprocessor_cuda_active=True)
227
  else:
 
228
  if self.model.base_model_id != model_name:
229
  load_now_time = datetime.now()
230
  elapsed_time = max((load_now_time - self.last_load).total_seconds(), 0)
231
 
232
+ if elapsed_time <= 9:
233
  print("Waiting for the previous model's time ops...")
234
+ time.sleep(9 - elapsed_time)
235
 
236
  self.model.device = torch.device("cpu")
237
  self.model.load_pipe(
 
240
  vae_model=vae_model,
241
  type_model_precision=dtype_model,
242
  retain_task_model_in_cache=False,
243
+ controlnet_model=controlnet_model,
244
  )
245
 
246
  end_time = time.time()
 
277
  lora_scale4,
278
  lora5,
279
  lora_scale5,
280
+ lora6,
281
+ lora_scale6,
282
+ lora7,
283
+ lora_scale7,
284
  sampler,
285
  schedule_type,
286
  schedule_prediction_type,
 
301
  high_threshold,
302
  value_threshold,
303
  distance_threshold,
304
+ recolor_gamma_correction,
305
+ tile_blur_sigma,
306
  controlnet_output_scaling_in_unet,
307
  controlnet_start_threshold,
308
  controlnet_stop_threshold,
 
319
  hires_negative_prompt,
320
  hires_before_adetailer,
321
  hires_after_adetailer,
322
+ hires_schedule_type,
323
+ hires_guidance_scale,
324
+ controlnet_model,
325
  loop_generation,
326
  leave_progress_bar,
327
  disable_progress_bar,
 
363
  mask_blur_b,
364
  mask_padding_b,
365
  retain_task_cache_gui,
366
+ guidance_rescale,
367
  image_ip1,
368
  mask_ip1,
369
  model_ip1,
 
380
  yield info_state, gr.update(), gr.update()
381
 
382
  vae_model = vae_model if vae_model != "None" else None
383
+ loras_list = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
384
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
385
  msg_lora = ""
386
 
 
489
  "high_threshold": high_threshold,
490
  "value_threshold": value_threshold,
491
  "distance_threshold": distance_threshold,
492
+ "recolor_gamma_correction": float(recolor_gamma_correction),
493
+ "tile_blur_sigma": int(tile_blur_sigma),
494
  "lora_A": lora1 if lora1 != "None" else None,
495
  "lora_scale_A": lora_scale1,
496
  "lora_B": lora2 if lora2 != "None" else None,
 
501
  "lora_scale_D": lora_scale4,
502
  "lora_E": lora5 if lora5 != "None" else None,
503
  "lora_scale_E": lora_scale5,
504
+ "lora_F": lora6 if lora6 != "None" else None,
505
+ "lora_scale_F": lora_scale6,
506
+ "lora_G": lora7 if lora7 != "None" else None,
507
+ "lora_scale_G": lora_scale7,
508
  ## BEGIN MOD
509
  "textual_inversion": get_embed_list(self.model.class_name) if textual_inversion else [],
510
  ## END MOD
 
548
  "hires_sampler": hires_sampler,
549
  "hires_before_adetailer": hires_before_adetailer,
550
  "hires_after_adetailer": hires_after_adetailer,
551
+ "hires_schedule_type": hires_schedule_type,
552
+ "hires_guidance_scale": hires_guidance_scale,
553
  "ip_adapter_image": params_ip_img,
554
  "ip_adapter_mask": params_ip_msk,
555
  "ip_adapter_model": params_ip_model,
 
557
  "ip_adapter_scale": params_ip_scale,
558
  }
559
 
560
+ # kwargs for diffusers pipeline
561
+ if guidance_rescale:
562
+ pipe_params["guidance_rescale"] = guidance_rescale
563
+
564
  self.model.device = torch.device("cuda:0")
565
+ if hasattr(self.model.pipe, "transformer") and loras_list != ["None"] * self.model.num_loras:
566
  self.model.pipe.transformer.to(self.model.device)
567
  print("transformer to cuda")
568
 
 
 
569
  actual_progress = 0
570
  info_images = gr.update()
571
  for img, [seed, image_path, metadata] in self.model(**pipe_params):
 
590
  if msg_lora:
591
  info_images += msg_lora
592
 
593
+ info_images = info_images + "<br>" + "GENERATION DATA:<br>" + escape_html(metadata[-1]) + "<br>-------<br>"
594
 
595
  download_links = "<br>".join(
596
  [
 
625
 
626
 
627
  def sd_gen_generate_pipeline(*args):
 
628
  gpu_duration_arg = int(args[-1]) if args[-1] else 59
629
  verbose_arg = int(args[-2])
630
  load_lora_cpu = args[-3]
631
  generation_args = args[:-3]
632
  lora_list = [
633
  None if item == "None" or item == "" else item # MOD
634
+ for item in [args[7], args[9], args[11], args[13], args[15], args[17], args[19]]
635
  ]
636
+ lora_status = [None] * sd_gen.model.num_loras
637
 
638
  msg_load_lora = "Updating LoRAs in GPU..."
639
  if load_lora_cpu:
640
+ msg_load_lora = "Updating LoRAs in CPU..."
641
 
642
+ if lora_list != sd_gen.model.lora_memory and lora_list != [None] * sd_gen.model.num_loras:
643
  yield msg_load_lora, gr.update(), gr.update()
644
 
645
  # Load lora in CPU
646
  if load_lora_cpu:
647
+ lora_status = sd_gen.model.load_lora_on_the_fly(
648
  lora_A=lora_list[0], lora_scale_A=args[8],
649
  lora_B=lora_list[1], lora_scale_B=args[10],
650
  lora_C=lora_list[2], lora_scale_C=args[12],
651
  lora_D=lora_list[3], lora_scale_D=args[14],
652
  lora_E=lora_list[4], lora_scale_E=args[16],
653
+ lora_F=lora_list[5], lora_scale_F=args[18],
654
+ lora_G=lora_list[6], lora_scale_G=args[20],
655
  )
656
  print(lora_status)
657
 
658
+ sampler_name = args[21]
659
+ schedule_type_name = args[22]
660
  _, _, msg_sampler = check_scheduler_compatibility(
661
  sd_gen.model.class_name, sampler_name, schedule_type_name
662
  )
 
670
  elif status is not None:
671
  gr.Warning(f"Failed to load LoRA: {lora}")
672
 
673
+ if lora_status == [None] * sd_gen.model.num_loras and sd_gen.model.lora_memory != [None] * sd_gen.model.num_loras and load_lora_cpu:
674
  lora_cache_msg = ", ".join(
675
  str(x) for x in sd_gen.model.lora_memory if x is not None
676
  )
 
686
 
687
  # yield from sd_gen.generate_pipeline(*generation_args)
688
  yield from dynamic_gpu_duration(
 
689
  sd_gen.generate_pipeline,
690
  gpu_duration_arg,
691
  *generation_args,
 
727
  return image_path
728
 
729
 
730
+ # https://huggingface.co/spaces/BestWishYsh/ConsisID-preview-Space/discussions/1#674969a022b99c122af5d407
731
  dynamic_gpu_duration.zerogpu = True
732
  sd_gen_generate_pipeline.zerogpu = True
733
  sd_gen = GuiSD()
 
740
  import random
741
  import json
742
  import shutil
743
+ from tagger.tagger import insert_model_recom_prompt
744
+ from modutils import (safe_float, escape_lora_basename, to_lora_key, to_lora_path, valid_model_name, set_textual_inversion_prompt,
745
+ get_local_model_list, get_model_pipeline, get_private_lora_model_lists, get_valid_lora_name, get_state, set_state,
746
  get_valid_lora_path, get_valid_lora_wt, get_lora_info, CIVITAI_SORT, CIVITAI_PERIOD, CIVITAI_BASEMODEL,
747
+ normalize_prompt_list, get_civitai_info, search_lora_on_civitai, translate_to_en, get_t2i_model_info, get_civitai_tag, save_image_history,
748
+ get_all_lora_list, get_all_lora_tupled_list, update_lora_dict, download_lora, copy_lora, download_my_lora, set_prompt_loras,
749
+ apply_lora_prompt, update_loras, search_civitai_lora, search_civitai_lora_json, update_civitai_selection, select_civitai_lora)
750
 
751
 
752
  #@spaces.GPU
753
  def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
754
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
755
+ lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
756
+ task=TASK_MODEL_LIST[0], prompt_syntax="Classic", sampler="Euler", vae=None, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
757
+ clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., image_control_dict=None, image_mask=None, strength=0.35, image_resolution=1024,
758
+ controlnet_model=DIFFUSERS_CONTROLNET_MODEL[0], control_net_output_scaling=1.0, control_net_start_threshold=0., control_net_stop_threshold=1.,
759
+ preprocessor_name="Canny", preprocess_resolution=512, low_threshold=100, high_threshold=200,
760
+ value_threshold=0.1, distance_threshold=0.1, recolor_gamma_correction=1., tile_blur_sigma=9,
761
+ image_ip1_dict=None, mask_ip1=None, model_ip1="plus_face", mode_ip1="original", scale_ip1=0.7,
762
+ image_ip2_dict=None, mask_ip2=None, model_ip2="base", mode_ip2="style", scale_ip2=0.7,
763
+ upscaler_model_path=None, upscaler_increases_size=1.0, esrgan_tile=5, esrgan_tile_overlap=8, hires_steps=30, hires_denoising_strength=0.55,
764
+ hires_sampler="Use same sampler", hires_schedule_type="Use same schedule type", hires_guidance_scale=-1, hires_prompt="", hires_negative_prompt="",
765
+ adetailer_inpaint_only=True, adetailer_verbose=False, adetailer_sampler="Use same sampler", adetailer_active_a=False,
766
+ prompt_ad_a="", negative_prompt_ad_a="", strength_ad_a=0.35, face_detector_ad_a=True, person_detector_ad_a=True, hand_detector_ad_a=False,
767
+ mask_dilation_a=4, mask_blur_a=4, mask_padding_a=32, adetailer_active_b=False, prompt_ad_b="", negative_prompt_ad_b="", strength_ad_b=0.35,
768
+ face_detector_ad_b=True, person_detector_ad_b=True, hand_detector_ad_b=False, mask_dilation_b=4, mask_blur_b=4, mask_padding_b=32,
769
+ active_textual_inversion=False, gpu_duration=59, translate=False, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
770
  MAX_SEED = np.iinfo(np.int32).max
771
 
772
+ image_mask = image_control_dict['layers'][0] if isinstance(image_control_dict, dict) and not image_mask else image_mask
773
+ image_control = image_control_dict['background'] if isinstance(image_control_dict, dict) else None
774
+ mask_ip1 = image_ip1_dict['layers'][0] if isinstance(image_ip1_dict, dict) and not mask_ip1 else mask_ip1
775
+ image_ip1 = image_ip1_dict['background'] if isinstance(image_ip1_dict, dict) else None
776
+ mask_ip2 = image_ip2_dict['layers'][0] if isinstance(image_ip2_dict, dict) and not mask_ip1 else mask_ip1
777
+ image_ip2 = image_ip2_dict['background'] if isinstance(image_ip2_dict, dict) else None
778
+ style_prompt = None
779
+ style_json = None
780
+ hires_before_adetailer = False
781
+ hires_after_adetailer = True
782
+ loop_generation = 1
783
+ leave_progress_bar = True
784
+ disable_progress_bar = False
785
  image_previews = True
786
+ display_images = False
787
+ save_generated_images = False
788
+ filename_pattern = "model,seed"
789
+ image_storage_location = "./images"
790
+ retain_compel_previous_load = False
791
+ retain_detailfix_model_previous_load = False
792
+ retain_hires_model_previous_load = False
793
+ t2i_adapter_preprocessor = True
794
+ adapter_conditioning_scale = 1
795
+ adapter_conditioning_factor = 0.55
796
+ xformers_memory_efficient_attention = False
797
+ generator_in_cpu = False
798
+ retain_task_cache = True
799
  load_lora_cpu = False
800
  verbose_info = False
 
801
 
802
  images: list[tuple[PIL.Image.Image, str | None]] = []
803
  progress(0, desc="Preparing...")
804
 
805
  if randomize_seed: seed = random.randint(0, MAX_SEED)
 
806
  generator = torch.Generator().manual_seed(seed).seed()
807
 
808
  if translate:
 
811
 
812
  prompt, negative_prompt = insert_model_recom_prompt(prompt, negative_prompt, model_name, recom_prompt)
813
  progress(0.5, desc="Preparing...")
814
+ lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt = \
815
+ set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt)
816
  lora1 = get_valid_lora_path(lora1)
817
  lora2 = get_valid_lora_path(lora2)
818
  lora3 = get_valid_lora_path(lora3)
819
  lora4 = get_valid_lora_path(lora4)
820
  lora5 = get_valid_lora_path(lora5)
821
+ lora6 = get_valid_lora_path(lora6)
822
+ lora7 = get_valid_lora_path(lora7)
823
  progress(1, desc="Preparation completed. Starting inference...")
824
 
825
  progress(0, desc="Loading model...")
826
+ for _ in sd_gen.load_new_model(valid_model_name(model_name), vae, task, controlnet_model):
827
  pass
828
  progress(1, desc="Model loaded.")
829
  progress(0, desc="Starting Inference...")
830
  for info_state, stream_images, info_images in sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
831
  guidance_scale, clip_skip, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
832
+ lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt, sampler, schedule_type, schedule_prediction_type,
833
+ height, width, model_name, vae, task, image_control, preprocessor_name, preprocess_resolution, image_resolution,
834
+ style_prompt, style_json, image_mask, strength, low_threshold, high_threshold, value_threshold, distance_threshold,
835
+ recolor_gamma_correction, tile_blur_sigma, control_net_output_scaling, control_net_start_threshold, control_net_stop_threshold,
836
+ active_textual_inversion, prompt_syntax, upscaler_model_path, upscaler_increases_size, esrgan_tile, esrgan_tile_overlap,
837
+ hires_steps, hires_denoising_strength, hires_sampler, hires_prompt, hires_negative_prompt, hires_before_adetailer, hires_after_adetailer,
838
+ hires_schedule_type, hires_guidance_scale, controlnet_model, loop_generation, leave_progress_bar, disable_progress_bar, image_previews,
839
+ display_images, save_generated_images, filename_pattern, image_storage_location, retain_compel_previous_load, retain_detailfix_model_previous_load,
840
+ retain_hires_model_previous_load, t2i_adapter_preprocessor, adapter_conditioning_scale, adapter_conditioning_factor, xformers_memory_efficient_attention,
841
+ free_u, generator_in_cpu, adetailer_inpaint_only, adetailer_verbose, adetailer_sampler, adetailer_active_a, prompt_ad_a, negative_prompt_ad_a,
842
+ strength_ad_a, face_detector_ad_a, person_detector_ad_a, hand_detector_ad_a, mask_dilation_a, mask_blur_a, mask_padding_a,
843
+ adetailer_active_b, prompt_ad_b, negative_prompt_ad_b, strength_ad_b, face_detector_ad_b, person_detector_ad_b, hand_detector_ad_b,
844
+ mask_dilation_b, mask_blur_b, mask_padding_b, retain_task_cache, guidance_rescale, image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1,
845
+ image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2, pag_scale, load_lora_cpu, verbose_info, gpu_duration
846
  ):
847
  images = stream_images if isinstance(stream_images, list) else images
848
  progress(1, desc="Inference completed.")
 
854
  #@spaces.GPU
855
  def _infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps,
856
  model_name=load_diffusers_format_model[0], lora1=None, lora1_wt=1.0, lora2=None, lora2_wt=1.0,
857
+ lora3=None, lora3_wt=1.0, lora4=None, lora4_wt=1.0, lora5=None, lora5_wt=1.0, lora6=None, lora6_wt=1.0, lora7=None, lora7_wt=1.0,
858
+ task=TASK_MODEL_LIST[0], prompt_syntax="Classic", sampler="Euler", vae=None, schedule_type=SCHEDULE_TYPE_OPTIONS[0], schedule_prediction_type=SCHEDULE_PREDICTION_TYPE_OPTIONS[0],
859
+ clip_skip=True, pag_scale=0.0, free_u=False, guidance_rescale=0., image_control_dict=None, image_mask=None, strength=0.35, image_resolution=1024,
860
+ controlnet_model=DIFFUSERS_CONTROLNET_MODEL[0], control_net_output_scaling=1.0, control_net_start_threshold=0., control_net_stop_threshold=1.,
861
+ preprocessor_name="Canny", preprocess_resolution=512, low_threshold=100, high_threshold=200,
862
+ value_threshold=0.1, distance_threshold=0.1, recolor_gamma_correction=1., tile_blur_sigma=9,
863
+ image_ip1_dict=None, mask_ip1=None, model_ip1="plus_face", mode_ip1="original", scale_ip1=0.7,
864
+ image_ip2_dict=None, mask_ip2=None, model_ip2="base", mode_ip2="style", scale_ip2=0.7,
865
+ upscaler_model_path=None, upscaler_increases_size=1.0, esrgan_tile=5, esrgan_tile_overlap=8, hires_steps=30, hires_denoising_strength=0.55,
866
+ hires_sampler="Use same sampler", hires_schedule_type="Use same schedule type", hires_guidance_scale=-1, hires_prompt="", hires_negative_prompt="",
867
+ adetailer_inpaint_only=True, adetailer_verbose=False, adetailer_sampler="Use same sampler", adetailer_active_a=False,
868
+ prompt_ad_a="", negative_prompt_ad_a="", strength_ad_a=0.35, face_detector_ad_a=True, person_detector_ad_a=True, hand_detector_ad_a=False,
869
+ mask_dilation_a=4, mask_blur_a=4, mask_padding_a=32, adetailer_active_b=False, prompt_ad_b="", negative_prompt_ad_b="", strength_ad_b=0.35,
870
+ face_detector_ad_b=True, person_detector_ad_b=True, hand_detector_ad_b=False, mask_dilation_b=4, mask_blur_b=4, mask_padding_b=32,
871
+ active_textual_inversion=False, gpu_duration=59, translate=False, recom_prompt=True, progress=gr.Progress(track_tqdm=True)):
872
  return gr.update()
873
 
874
 
 
888
  return vae_model_list
889
 
890
 
891
+ def update_task_options(model_name, task_name):
892
+ new_choices = MODEL_TYPE_TASK[get_model_type(valid_model_name(model_name))]
893
+
894
+ if task_name not in new_choices:
895
+ task_name = "txt2img"
896
+
897
+ return gr.update(value=task_name, choices=new_choices)
898
+
899
+
900
+ def change_preprocessor_choices(task):
901
+ task = TASK_STABLEPY[task]
902
+ if task in TASK_AND_PREPROCESSORS.keys():
903
+ choices_task = TASK_AND_PREPROCESSORS[task]
904
+ else:
905
+ choices_task = TASK_AND_PREPROCESSORS["canny"]
906
+ return gr.update(choices=choices_task, value=choices_task[0])
907
+
908
+
909
+ def get_ti_choices(model_name: str):
910
+ return get_embed_list(get_model_pipeline(valid_model_name(model_name)))
911
+
912
+
913
+ def update_textual_inversion(active_textual_inversion: bool, model_name: str):
914
+ return gr.update(choices=get_ti_choices(model_name) if active_textual_inversion else [])
915
+
916
+
917
  cached_diffusers_model_tupled_list = get_tupled_model_list(load_diffusers_format_model)
918
  def get_diffusers_model_list(state: dict = {}):
919
  show_diffusers_model_list_detail = get_state(state, "show_diffusers_model_list_detail")
 
937
  return gr.update(value=is_enable), gr.update(value=new_value, choices=get_diffusers_model_list(state)), state
938
 
939
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
940
  quality_prompt_list = [
941
  {
942
  "name": "None",
image_processor.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from stablepy import Preprocessor
4
+
5
+ PREPROCESSOR_TASKS_LIST = [
6
+ "Canny",
7
+ "Openpose",
8
+ "DPT",
9
+ "Midas",
10
+ "ZoeDepth",
11
+ "DepthAnything",
12
+ "HED",
13
+ "PidiNet",
14
+ "TEED",
15
+ "Lineart",
16
+ "LineartAnime",
17
+ "Anyline",
18
+ "Lineart standard",
19
+ "SegFormer",
20
+ "UPerNet",
21
+ "ContentShuffle",
22
+ "Recolor",
23
+ "Blur",
24
+ "MLSD",
25
+ "NormalBae",
26
+ ]
27
+
28
+ preprocessor = Preprocessor()
29
+
30
+
31
+ def process_inputs(
32
+ image,
33
+ name,
34
+ resolution,
35
+ precessor_resolution,
36
+ low_threshold,
37
+ high_threshold,
38
+ value_threshod,
39
+ distance_threshold,
40
+ recolor_mode,
41
+ recolor_gamma_correction,
42
+ blur_k_size,
43
+ pre_openpose_extra,
44
+ hed_scribble,
45
+ pre_pidinet_safe,
46
+ pre_lineart_coarse,
47
+ use_cuda,
48
+ ):
49
+ if not image:
50
+ raise ValueError("To use this, simply upload an image.")
51
+
52
+ preprocessor.load(name, False)
53
+
54
+ params = dict(
55
+ image_resolution=resolution,
56
+ detect_resolution=precessor_resolution,
57
+ low_threshold=low_threshold,
58
+ high_threshold=high_threshold,
59
+ thr_v=value_threshod,
60
+ thr_d=distance_threshold,
61
+ mode=recolor_mode,
62
+ gamma_correction=recolor_gamma_correction,
63
+ blur_sigma=blur_k_size,
64
+ hand_and_face=pre_openpose_extra,
65
+ scribble=hed_scribble,
66
+ safe=pre_pidinet_safe,
67
+ coarse=pre_lineart_coarse,
68
+ )
69
+
70
+ if use_cuda:
71
+ @spaces.GPU(duration=15)
72
+ def wrapped_func():
73
+ preprocessor.to("cuda")
74
+ return preprocessor(image, **params)
75
+ return wrapped_func()
76
+
77
+ return preprocessor(image, **params)
78
+
79
+
80
+ def preprocessor_tab():
81
+ with gr.Row():
82
+ with gr.Column():
83
+ pre_image = gr.Image(label="Image", type="pil", sources=["upload"])
84
+ pre_options = gr.Dropdown(label="Preprocessor", choices=PREPROCESSOR_TASKS_LIST, value=PREPROCESSOR_TASKS_LIST[0])
85
+ pre_img_resolution = gr.Slider(
86
+ minimum=64, maximum=4096, step=64, value=1024, label="Image Resolution",
87
+ info="The maximum proportional size of the generated image based on the uploaded image."
88
+ )
89
+ pre_start = gr.Button(value="PROCESS IMAGE", variant="primary")
90
+ with gr.Accordion("Advanced Settings", open=False):
91
+ with gr.Column():
92
+ pre_processor_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocessor Resolution")
93
+ pre_low_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="'CANNY' low threshold")
94
+ pre_high_threshold = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="'CANNY' high threshold")
95
+ pre_value_threshold = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="'MLSD' Hough value threshold")
96
+ pre_distance_threshold = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="'MLSD' Hough distance threshold")
97
+ pre_recolor_mode = gr.Dropdown(label="'RECOLOR' mode", choices=["luminance", "intensity"], value="luminance")
98
+ pre_recolor_gamma_correction = gr.Number(minimum=0., maximum=25., value=1., step=0.001, label="'RECOLOR' gamma correction")
99
+ pre_blur_k_size = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'BLUR' sigma")
100
+ pre_openpose_extra = gr.Checkbox(value=True, label="'OPENPOSE' face and hand")
101
+ pre_hed_scribble = gr.Checkbox(value=False, label="'HED' scribble")
102
+ pre_pidinet_safe = gr.Checkbox(value=False, label="'PIDINET' safe")
103
+ pre_lineart_coarse = gr.Checkbox(value=False, label="'LINEART' coarse")
104
+ pre_use_cuda = gr.Checkbox(value=False, label="Use CUDA")
105
+
106
+ with gr.Column():
107
+ pre_result = gr.Image(label="Result", type="pil", interactive=False, format="png")
108
+
109
+ pre_start.click(
110
+ fn=process_inputs,
111
+ inputs=[
112
+ pre_image,
113
+ pre_options,
114
+ pre_img_resolution,
115
+ pre_processor_resolution,
116
+ pre_low_threshold,
117
+ pre_high_threshold,
118
+ pre_value_threshold,
119
+ pre_distance_threshold,
120
+ pre_recolor_mode,
121
+ pre_recolor_gamma_correction,
122
+ pre_blur_k_size,
123
+ pre_openpose_extra,
124
+ pre_hed_scribble,
125
+ pre_pidinet_safe,
126
+ pre_lineart_coarse,
127
+ pre_use_cuda,
128
+ ],
129
+ outputs=[pre_result],
130
+ )
llmdolphin.py CHANGED
@@ -72,13 +72,179 @@ llm_models = {
72
  "Rocinante-12B-v2h-Q4_K_M.gguf": ["BeaverAI/Rocinante-12B-v2h-GGUF", MessagesFormatterType.MISTRAL],
73
  "Mistral-Nemo-12B-ArliAI-RPMax-v1.1.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-12B-ArliAI-RPMax-v1.1-i1-GGUF", MessagesFormatterType.MISTRAL],
74
  "Pans_Gutenbergum_V0.1.Q4_K_M.gguf": ["mradermacher/Pans_Gutenbergum_V0.1-GGUF", MessagesFormatterType.MISTRAL],
 
75
  "ChronoStar-Unleashed-v0.1.i1-Q4_K_M.gguf": ["mradermacher/ChronoStar-Unleashed-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
 
 
 
76
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
77
  "ChatWaifu_Magnum_V0.2.Q4_K_M.gguf": ["mradermacher/ChatWaifu_Magnum_V0.2-GGUF", MessagesFormatterType.MISTRAL],
78
  "ChatWaifu_12B_v2.0.Q5_K_M.gguf": ["mradermacher/ChatWaifu_12B_v2.0-GGUF", MessagesFormatterType.MISTRAL],
79
  "ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
80
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
81
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  "SeQwence-14B-v5.Q4_K_S.gguf": ["mradermacher/SeQwence-14B-v5-GGUF", MessagesFormatterType.OPEN_CHAT],
83
  "L3.1-8B-Dark-Planet-Slush.i1-Q4_K_M.gguf": ["mradermacher/L3.1-8B-Dark-Planet-Slush-i1-GGUF", MessagesFormatterType.LLAMA_3],
84
  "QwenSlerp12-7B.Q5_K_M.gguf": ["mradermacher/QwenSlerp12-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
@@ -980,6 +1146,7 @@ llm_models = {
980
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
981
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
982
  "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
 
983
  "Ninja-V2-7B_Q4_K_M.gguf": ["Local-Novel-LLM-project/Ninja-V2-7B-GGUF", MessagesFormatterType.VICUNA],
984
  "Yamase-12B.Q4_K_M.gguf": ["mradermacher/Yamase-12B-GGUF", MessagesFormatterType.MISTRAL],
985
  "borea-phi-3.5-mini-instruct-common.Q5_K_M.gguf": ["keitokei1994/Borea-Phi-3.5-mini-Instruct-Common-GGUF", MessagesFormatterType.PHI_3],
 
72
  "Rocinante-12B-v2h-Q4_K_M.gguf": ["BeaverAI/Rocinante-12B-v2h-GGUF", MessagesFormatterType.MISTRAL],
73
  "Mistral-Nemo-12B-ArliAI-RPMax-v1.1.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-12B-ArliAI-RPMax-v1.1-i1-GGUF", MessagesFormatterType.MISTRAL],
74
  "Pans_Gutenbergum_V0.1.Q4_K_M.gguf": ["mradermacher/Pans_Gutenbergum_V0.1-GGUF", MessagesFormatterType.MISTRAL],
75
+ "AbominationScience-12B-v4.i1-Q4_K_M.gguf": ["mradermacher/AbominationScience-12B-v4-i1-GGUF", MessagesFormatterType.MISTRAL],
76
  "ChronoStar-Unleashed-v0.1.i1-Q4_K_M.gguf": ["mradermacher/ChronoStar-Unleashed-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
77
+ "Chatty-Harry_V3.0.i1-Q4_K_M.gguf": ["mradermacher/Chatty-Harry_V3.0-i1-GGUF", MessagesFormatterType.MISTRAL],
78
+ "Tora-12B.i1-Q4_K_M.gguf": ["mradermacher/Tora-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
79
+ "ChatML-Nemo-Pro-V2.i1-Q4_K_M.gguf": ["mradermacher/ChatML-Nemo-Pro-V2-i1-GGUF", MessagesFormatterType.MISTRAL],
80
  "Trinas_Nectar-8B-model_stock.i1-Q4_K_M.gguf": ["mradermacher/Trinas_Nectar-8B-model_stock-i1-GGUF", MessagesFormatterType.MISTRAL],
81
  "ChatWaifu_Magnum_V0.2.Q4_K_M.gguf": ["mradermacher/ChatWaifu_Magnum_V0.2-GGUF", MessagesFormatterType.MISTRAL],
82
  "ChatWaifu_12B_v2.0.Q5_K_M.gguf": ["mradermacher/ChatWaifu_12B_v2.0-GGUF", MessagesFormatterType.MISTRAL],
83
  "ChatWaifu_22B_v2.0_preview.Q4_K_S.gguf": ["mradermacher/ChatWaifu_22B_v2.0_preview-GGUF", MessagesFormatterType.MISTRAL],
84
  "ChatWaifu_v1.4.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.4-GGUF", MessagesFormatterType.MISTRAL],
85
  "ChatWaifu_v1.3.1.Q4_K_M.gguf": ["mradermacher/ChatWaifu_v1.3.1-GGUF", MessagesFormatterType.MISTRAL],
86
+ "Lamarck-14B-v0.2-experimental.Q4_K_M.gguf": ["mradermacher/Lamarck-14B-v0.2-experimental-GGUF", MessagesFormatterType.OPEN_CHAT],
87
+ "Llama3.1-Reddit-Writer-8B.Q5_K_M.gguf": ["mradermacher/Llama3.1-Reddit-Writer-8B-GGUF", MessagesFormatterType.LLAMA_3],
88
+ "Franken-MistressMaid-10.5B-v2.i1-Q4_K_M.gguf": ["mradermacher/Franken-MistressMaid-10.5B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
89
+ "Mercury_In_Retrograde-ALT-8b-Model-Stock.i1-Q4_K_M.gguf": ["mradermacher/Mercury_In_Retrograde-ALT-8b-Model-Stock-i1-GGUF", MessagesFormatterType.LLAMA_3],
90
+ "Virtuoso-Small.i1-Q4_K_M.gguf": ["mradermacher/Virtuoso-Small-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
91
+ "Tuldur-8B.Q4_K_M.gguf": ["mradermacher/Tuldur-8B-GGUF", MessagesFormatterType.LLAMA_3],
92
+ "Orbita-v0.1.i1-Q4_K_M.gguf": ["mradermacher/Orbita-v0.1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
93
+ "Violet_Eris-BMO-12B.i1-Q4_K_M.gguf": ["mradermacher/Violet_Eris-BMO-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
94
+ "Mistral-Darwin-7b-v0.1.i1-Q5_K_M.gguf": ["mradermacher/Mistral-Darwin-7b-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
95
+ "PrimaSumika-10.7B-128k.Q4_K_M.gguf": ["mradermacher/PrimaSumika-10.7B-128k-GGUF", MessagesFormatterType.MISTRAL],
96
+ "L3-Umbral-Mind-RP-v2-8B.i1-Q5_K_M.gguf": ["mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
97
+ "Arch-Function-7B.i1-Q5_K_M.gguf": ["mradermacher/Arch-Function-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
98
+ "Llama-3-Nerdy-RP-8B.i1-Q5_K_M.gguf": ["mradermacher/Llama-3-Nerdy-RP-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
99
+ "magnum-twilight-12b.i1-Q4_K_M.gguf": ["mradermacher/magnum-twilight-12b-i1-GGUF", MessagesFormatterType.MISTRAL],
100
+ "Qwen2.5-Ultimate-14B-Instruct.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-Ultimate-14B-Instruct-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
101
+ "ContaLLM-Beauty-8B-Instruct.i1-Q5_K_M.gguf": ["mradermacher/ContaLLM-Beauty-8B-Instruct-i1-GGUF", MessagesFormatterType.LLAMA_3],
102
+ "Eidolon-v3.1-14B-deconditioned.Q4_K_M.gguf": ["mradermacher/Eidolon-v3.1-14B-deconditioned-GGUF", MessagesFormatterType.OPEN_CHAT],
103
+ "ZEUS-8B-V2L2.i1-Q5_K_M.gguf": ["mradermacher/ZEUS-8B-V2L2-i1-GGUF", MessagesFormatterType.LLAMA_3],
104
+ "Rocinante-Prism_V2.0.Q4_K_M.gguf": ["mradermacher/Rocinante-Prism_V2.0-GGUF", MessagesFormatterType.MISTRAL],
105
+ "Rocinante-Prism_V2.1.Q4_K_M.gguf": ["mradermacher/Rocinante-Prism_V2.1-GGUF", MessagesFormatterType.MISTRAL],
106
+ "Virtuoso-Small-Q4_K_M.gguf": ["bartowski/Virtuoso-Small-GGUF", MessagesFormatterType.OPEN_CHAT],
107
+ "ZEUS-8B-V2.i1-Q5_K_M.gguf": ["mradermacher/ZEUS-8B-V2-i1-GGUF", MessagesFormatterType.LLAMA_3],
108
+ "Lamarck-14B-v0.1-experimental.i1-Q4_K_M.gguf": ["mradermacher/Lamarck-14B-v0.1-experimental-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
109
+ "patricide-12B-Unslop-Mell.Q4_K_M.gguf": ["mradermacher/patricide-12B-Unslop-Mell-GGUF", MessagesFormatterType.MISTRAL],
110
+ "Eidolon-v3.1-14B.Q4_K_M.gguf": ["mradermacher/Eidolon-v3.1-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
111
+ "Frigg-v1.4-8b-HIGH-FANTASY.Q5_K_M.gguf": ["mradermacher/Frigg-v1.4-8b-HIGH-FANTASY-GGUF", MessagesFormatterType.LLAMA_3],
112
+ "Thor-v1.4-8b-DARK-FICTION.i1-Q5_K_M.gguf": ["mradermacher/Thor-v1.4-8b-DARK-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
113
+ "QwenMosaic-7B.i1-Q5_K_M.gguf": ["mradermacher/QwenMosaic-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
114
+ "Qwen2.5-7B-Spanish-0.2.i1-Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-Spanish-0.2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
115
+ "IceDrunkenCherryRP-7b-Q5_K_M.gguf": ["bartowski/IceDrunkenCherryRP-7b-GGUF", MessagesFormatterType.MISTRAL],
116
+ "SmolLumi-8B-Instruct.i1-Q5_K_M.gguf": ["mradermacher/SmolLumi-8B-Instruct-i1-GGUF", MessagesFormatterType.LLAMA_3],
117
+ "Capt-Dark-Science-12B.i1-Q4_K_S.gguf": ["mradermacher/Capt-Dark-Science-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
118
+ "Thor-v1.4-8b-DARK-FANTASY.i1-Q4_K_M.gguf": ["mradermacher/Thor-v1.4-8b-DARK-FANTASY-i1-GGUF", MessagesFormatterType.LLAMA_3],
119
+ "Mayo.Q5_K_M.gguf": ["mradermacher/Mayo-GGUF", MessagesFormatterType.MISTRAL],
120
+ "Ella-9B.i1-Q4_K_M.gguf": ["mradermacher/Ella-9B-i1-GGUF", MessagesFormatterType.ALPACA],
121
+ "miscii-14b-1028-Q4_K_M.gguf": ["bartowski/miscii-14b-1028-GGUF", MessagesFormatterType.OPEN_CHAT],
122
+ "SeaMarco-o1-7B-v1.Q4_K_M.gguf": ["mradermacher/SeaMarco-o1-7B-v1-GGUF", MessagesFormatterType.OPEN_CHAT],
123
+ "L3.1-RP-Hero-InBetween-8B.i1-Q5_K_M.gguf": ["mradermacher/L3.1-RP-Hero-InBetween-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
124
+ "SeQwence-14Bv4.Q4_K_M.gguf": ["mradermacher/SeQwence-14Bv4-GGUF", MessagesFormatterType.OPEN_CHAT],
125
+ "QwenStock2-14B.Q4_K_M.gguf": ["mradermacher/QwenStock2-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
126
+ "Dark-Science-12B.i1-Q4_K_S.gguf": ["mradermacher/Dark-Science-12B-i1-GGUF", MessagesFormatterType.MISTRAL],
127
+ "Liberated-Qwen1.5-14B.Q4_K_M.gguf": ["mradermacher/Liberated-Qwen1.5-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
128
+ "Blur-7b-slerp-v1.4.Q5_K_M.gguf": ["mradermacher/Blur-7b-slerp-v1.4-GGUF", MessagesFormatterType.MISTRAL],
129
+ "Llama-3-linear-8B.Q5_K_M.gguf": ["mradermacher/Llama-3-linear-8B-GGUF", MessagesFormatterType.LLAMA_3],
130
+ "SAINEMO-reMIX.i1-Q4_K_M.gguf": ["mradermacher/SAINEMO-reMIX-i1-GGUF", MessagesFormatterType.MISTRAL],
131
+ "Blur-7b-slerp-v1.44.Q5_K_M.gguf": ["mradermacher/Blur-7b-slerp-v1.44-GGUF", MessagesFormatterType.MISTRAL],
132
+ "QwenStock1-14B.Q4_K_S.gguf": ["mradermacher/QwenStock1-14B-GGUF", MessagesFormatterType.OPEN_CHAT],
133
+ "Matryoshka-8B-LINEAR.i1-Q4_K_S.gguf": ["mradermacher/Matryoshka-8B-LINEAR-i1-GGUF", MessagesFormatterType.LLAMA_3],
134
+ "8b-Base-mixed-1.Q5_K_M.gguf": ["mradermacher/8b-Base-mixed-1-GGUF", MessagesFormatterType.LLAMA_3],
135
+ "MT-Gen3-IMM-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT-Gen3-IMM-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
136
+ "MT-Gen3-IF-gemma-2-MT4g2S5-9B.Q4_K_M.gguf": ["mradermacher/MT-Gen3-IF-gemma-2-MT4g2S5-9B-GGUF", MessagesFormatterType.ALPACA],
137
+ "Nemo-DPO-v11.Q4_K_M.gguf": ["mradermacher/Nemo-DPO-v11-GGUF", MessagesFormatterType.MISTRAL],
138
+ "SeQwence-14B-EvolMerge.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14B-EvolMerge-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
139
+ "Q2.5-14B-Evalternagar.Q4_K_M.gguf": ["mradermacher/Q2.5-14B-Evalternagar-GGUF", MessagesFormatterType.OPEN_CHAT],
140
+ "RP-SAINEMO.i1-Q4_K_M.gguf": ["mradermacher/RP-SAINEMO-i1-GGUF", MessagesFormatterType.MISTRAL],
141
+ "Freyja-v4.95-New-writer7-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-New-writer7-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
142
+ "WIP-Acacia-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/WIP-Acacia-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
143
+ "Freyja-v4.95-Sao10K-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-Sao10K-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
144
+ "MT-Merge2-MUB-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT-Merge2-MUB-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
145
+ "llama3.1-8b-instruct-political-subreddits.i1-Q5_K_M.gguf": ["mradermacher/llama3.1-8b-instruct-political-subreddits-i1-GGUF", MessagesFormatterType.LLAMA_3],
146
+ "Freyja-v4.95-Undi95-7b-NON-FICTION.i1-Q5_K_M.gguf": ["mradermacher/Freyja-v4.95-Undi95-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
147
+ "Freyja-v4.95-mix-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-mix-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
148
+ "Condensed_Milk-8B-Model_Stock.Q4_K_S.gguf": ["mradermacher/Condensed_Milk-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
149
+ "Freyja-v4.95-maldv-7b-NON-FICTION.i1-Q4_K_S.gguf": ["mradermacher/Freyja-v4.95-maldv-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
150
+ "Freyja-v4.95-New-writer-7b-NON-FICTION.i1-Q4_K_M.gguf": ["mradermacher/Freyja-v4.95-New-writer-7b-NON-FICTION-i1-GGUF", MessagesFormatterType.LLAMA_3],
151
+ "Freyja-v4.95-Dark-Planet-7b-NON-FICTION.Q5_K_M.gguf": ["mradermacher/Freyja-v4.95-Dark-Planet-7b-NON-FICTION-GGUF", MessagesFormatterType.LLAMA_3],
152
+ "AgoraMix-14B-stock-v0.1.i1-Q4_K_M.gguf": ["mradermacher/AgoraMix-14B-stock-v0.1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
153
+ "Keiana-L3-Test6.2-8B-18.Q5_K_M.gguf": ["mradermacher/Keiana-L3-Test6.2-8B-18-GGUF", MessagesFormatterType.LLAMA_3],
154
+ "MFANNv0.25.i1-Q5_K_M.gguf": ["mradermacher/MFANNv0.25-i1-GGUF", MessagesFormatterType.LLAMA_3],
155
+ "Qwestion-14B.i1-Q4_K_M.gguf": ["mradermacher/Qwestion-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
156
+ "SeQwence-14B-EvolMergev1.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14B-EvolMergev1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
157
+ "Frigg-v1.35-8b-HIGH-FANTASY-1024k.i1-Q5_K_M.gguf": ["mradermacher/Frigg-v1.35-8b-HIGH-FANTASY-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
158
+ "Odin-v1.0-8b-FICTION-1024k.i1-Q4_K_M.gguf": ["mradermacher/Odin-v1.0-8b-FICTION-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
159
+ "Marco-01-slerp6-7B.Q5_K_M.gguf": ["mradermacher/Marco-01-slerp6-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
160
+ "Alita99-8B-LINEAR.Q5_K_M.gguf": ["mradermacher/Alita99-8B-LINEAR-GGUF", MessagesFormatterType.LLAMA_3],
161
+ "Thor-v1.35-8b-DARK-FANTASY-1024k.Q5_K_M.gguf": ["mradermacher/Thor-v1.35-8b-DARK-FANTASY-1024k-GGUF", MessagesFormatterType.LLAMA_3],
162
+ "Thor-v1.3a-8b-FANTASY-1024k.Q5_K_M.gguf": ["mradermacher/Thor-v1.3a-8b-FANTASY-1024k-GGUF", MessagesFormatterType.LLAMA_3],
163
+ "LemonP_ALT-8B-Model_Stock.Q5_K_M.gguf": ["mradermacher/LemonP_ALT-8B-Model_Stock-GGUF", MessagesFormatterType.LLAMA_3],
164
+ "Loki-v2.75-8b-EROTICA-1024k.Q5_K_M.gguf": ["mradermacher/Loki-v2.75-8b-EROTICA-1024k-GGUF", MessagesFormatterType.LLAMA_3],
165
+ "marco-o1-uncensored.Q5_K_M.gguf": ["mradermacher/marco-o1-uncensored-GGUF", MessagesFormatterType.OPEN_CHAT],
166
+ "SeQwence-14Bv1.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14Bv1-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
167
+ "MT5-Gen2-GP-gemma-2-MT1RGDv0.1-9B.Q4_K_M.gguf": ["mradermacher/MT5-Gen2-GP-gemma-2-MT1RGDv0.1-9B-GGUF", MessagesFormatterType.ALPACA],
168
+ "MT5-Gen2-IF-gemma-2-MT1RAv0.1-9B.Q4_K_M.gguf": ["mradermacher/MT5-Gen2-IF-gemma-2-MT1RAv0.1-9B-GGUF", MessagesFormatterType.ALPACA],
169
+ "SeQwence-14Bv2.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14Bv2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
170
+ "ChaiML-Nemo-DPO-V8.i1-Q4_K_M.gguf": ["mradermacher/ChaiML-Nemo-DPO-V8-i1-GGUF", MessagesFormatterType.MISTRAL],
171
+ "MT4-Gen2-MAMU-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT4-Gen2-MAMU-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
172
+ "MT4-Gen2-IF-gemma-2-MT5MT1-9B.Q4_K_M.gguf": ["mradermacher/MT4-Gen2-IF-gemma-2-MT5MT1-9B-GGUF", MessagesFormatterType.ALPACA],
173
+ "Chronos-Prism_V1.0.i1-Q4_K_M.gguf": ["mradermacher/Chronos-Prism_V1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
174
+ "Marco-01-slerp5-7B.Q5_K_M.gguf": ["mradermacher/Marco-01-slerp5-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
175
+ "RQwen-v0.1.Q4_K_M.gguf": ["mradermacher/RQwen-v0.1-GGUF", MessagesFormatterType.OPEN_CHAT],
176
+ "Teleut-7b.i1-Q5_K_M.gguf": ["mradermacher/Teleut-7b-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
177
+ "RP-Naughty-v1.0f-8b.i1-Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0f-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
178
+ "MT4-Gen2-GBMAMU-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT4-Gen2-GBMAMU-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
179
+ "RP-Naughty-v1.1-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.1-8b-GGUF", MessagesFormatterType.LLAMA_3],
180
+ "RP-Naughty-v1.0e-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0e-8b-GGUF", MessagesFormatterType.LLAMA_3],
181
+ "RP-Naughty-v1.1b-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.1b-8b-GGUF", MessagesFormatterType.LLAMA_3],
182
+ "RP-Naughty-v1.1a-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.1a-8b-GGUF", MessagesFormatterType.LLAMA_3],
183
+ "Marco-01-slerp4-7B.Q5_K_M.gguf": ["mradermacher/Marco-01-slerp4-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
184
+ "RP-Naughty-v1.0b-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0b-8b-GGUF", MessagesFormatterType.LLAMA_3],
185
+ "RP-Naughty-v1.0c-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0c-8b-GGUF", MessagesFormatterType.LLAMA_3],
186
+ "IceDrunkenCherryRP-7b.i1-Q5_K_M.gguf": ["mradermacher/IceDrunkenCherryRP-7b-i1-GGUF", MessagesFormatterType.ALPACA],
187
+ "Thor-v1.2-8b-1024k.i1-Q5_K_M.gguf": ["mradermacher/Thor-v1.2-8b-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
188
+ "RP-Naughty-v1.0d-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0d-8b-GGUF", MessagesFormatterType.LLAMA_3],
189
+ "Cakrawala-8B.i1-Q4_K_S.gguf": ["mradermacher/Cakrawala-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
190
+ "Thor-v1.1e-8b-1024k.Q5_K_M.gguf": ["mradermacher/Thor-v1.1e-8b-1024k-GGUF", MessagesFormatterType.LLAMA_3],
191
+ "RP-Naughty-v1.0a-8b.Q5_K_M.gguf": ["mradermacher/RP-Naughty-v1.0a-8b-GGUF", MessagesFormatterType.LLAMA_3],
192
+ "Qwen2.5-14B-Mixed-Instruct.Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Mixed-Instruct-GGUF", MessagesFormatterType.OPEN_CHAT],
193
+ "WestKunai-Hermes-10.7b-test.Q4_K_M.gguf": ["mradermacher/WestKunai-Hermes-10.7b-test-GGUF", MessagesFormatterType.MISTRAL],
194
+ "Tulu-3.1-8B-SuperNova.i1-Q4_K_M.gguf": ["mradermacher/Tulu-3.1-8B-SuperNova-i1-GGUF", MessagesFormatterType.LLAMA_3],
195
+ "Loki-v2.6-8b-1024k.i1-Q5_K_M.gguf": ["mradermacher/Loki-v2.6-8b-1024k-i1-GGUF", MessagesFormatterType.LLAMA_3],
196
+ "Kosmos-8B-v1.i1-Q5_K_M.gguf": ["mradermacher/Kosmos-8B-v1-i1-GGUF", MessagesFormatterType.LLAMA_3],
197
+ "Qwen2.5-7B-Instruct-DPO-v01.Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-Instruct-DPO-v01-GGUF", MessagesFormatterType.OPEN_CHAT],
198
+ "HomerCreativeAnvita-Mix-Qw7B.i1-Q5_K_M.gguf": ["mradermacher/HomerCreativeAnvita-Mix-Qw7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
199
+ "EVA-Tissint-v1.2-14B.i1-Q4_K_M.gguf": ["mradermacher/EVA-Tissint-v1.2-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
200
+ "Llama-3.1-Tulu-3-8B-abliterated.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-Tulu-3-8B-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
201
+ "Mistral-Nemo-12B-ArliAI-RPMax-v1.2.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-12B-ArliAI-RPMax-v1.2-i1-GGUF", MessagesFormatterType.MISTRAL],
202
+ "EVA-Tissint-v1.2-14B.i1-Q4_K_M.gguf": ["mradermacher/EVA-Tissint-v1.2-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
203
+ "Intelligence-7.i1-Q5_K_M.gguf": ["mradermacher/Intelligence-7-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
204
+ "Ice0.40-20.11-RP.i1-Q5_K_M.gguf": ["mradermacher/Ice0.40-20.11-RP-i1-GGUF", MessagesFormatterType.MISTRAL],
205
+ "Llama-3.1-8B-ArliAI-RPMax-v1.2.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-8B-ArliAI-RPMax-v1.2-i1-GGUF", MessagesFormatterType.LLAMA_3],
206
+ "SzilviaB_DarkSlushNeuralDaredevil-8b-abliterated.i1-Q5_K_M.gguf": ["mradermacher/SzilviaB_DarkSlushNeuralDaredevil-8b-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
207
+ "Qwen2.5-7B-HomerCreative-Mix.i1-Q5_K_M.gguf": ["mradermacher/Qwen2.5-7B-HomerCreative-Mix-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
208
+ "MN-Instruct-2407-14.7B-BRAINSTORM-10x-FORM-3.i1-Q4_K_M.gguf": ["mradermacher/MN-Instruct-2407-14.7B-BRAINSTORM-10x-FORM-3-i1-GGUF", MessagesFormatterType.MISTRAL],
209
+ "MN-Instruct-2407-13.35B-BRAINSTORM-5x-FORM-11.Q4_K_M.gguf": ["mradermacher/MN-Instruct-2407-13.35B-BRAINSTORM-5x-FORM-11-GGUF", MessagesFormatterType.MISTRAL],
210
+ "NeuralDarkDevil-8b-abliterated.i1-Q5_K_M.gguf": ["mradermacher/NeuralDarkDevil-8b-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
211
+ "DarkNeuralDaredevilUnholy-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkNeuralDaredevilUnholy-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
212
+ "DarkAuraUnholy-Uncensored-OAS-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkAuraUnholy-Uncensored-OAS-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
213
+ "DarkUnholyDareDevil-abliterated-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkUnholyDareDevil-abliterated-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
214
+ "DarkDareDevilAura-abliterated-uncensored-OAS-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkDareDevilAura-abliterated-uncensored-OAS-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
215
+ "DarkNeuralDareDevil-Eight-Orbs-Of-Power-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkNeuralDareDevil-Eight-Orbs-Of-Power-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
216
+ "Mistral-NeuralDPO-v0.4.Q5_K_M.gguf": ["mradermacher/Mistral-NeuralDPO-v0.4-GGUF", MessagesFormatterType.MISTRAL],
217
+ "Hermes-Instruct-7B-v0.2.i1-Q5_K_M.gguf": ["mradermacher/Hermes-Instruct-7B-v0.2-i1-GGUF", MessagesFormatterType.MISTRAL],
218
+ "Llama-3.1-Tulu-3-8B-DPO-Q5_K_M.gguf": ["bartowski/Llama-3.1-Tulu-3-8B-DPO-GGUF", MessagesFormatterType.LLAMA_3],
219
+ "Platyboros-Instruct-7B.i1-Q5_K_M.gguf": ["mradermacher/Platyboros-Instruct-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
220
+ "hermes-llama3-roleplay-2000-v3.i1-Q5_K_M.gguf": ["mradermacher/hermes-llama3-roleplay-2000-v3-i1-GGUF", MessagesFormatterType.LLAMA_3],
221
+ "Hermes-Instruct-7B-100K.i1-Q5_K_M.gguf": ["mradermacher/Hermes-Instruct-7B-100K-i1-GGUF", MessagesFormatterType.MISTRAL],
222
+ "SeQwence-14B.i1-Q4_K_M.gguf": ["mradermacher/SeQwence-14B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
223
+ "Llama-3.1-Tulu-3-8B-Q5_K_M.gguf": ["bartowski/Llama-3.1-Tulu-3-8B-GGUF", MessagesFormatterType.LLAMA_3],
224
+ "Ministral-8B-Instruct-2410.Q5_K_M.gguf": ["mradermacher/Ministral-8B-Instruct-2410-GGUF", MessagesFormatterType.MISTRAL],
225
+ "Loki-v2.6-8b-1024k.Q4_K_M.gguf": ["mradermacher/Loki-v2.6-8b-1024k-GGUF", MessagesFormatterType.LLAMA_3],
226
+ "DarkUnholyPlanet-OAS-8b.i1-Q5_K_M.gguf": ["mradermacher/DarkUnholyPlanet-OAS-8b-i1-GGUF", MessagesFormatterType.LLAMA_3],
227
+ "Qwen2.5-7B-HomerAnvita-NerdMix.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-7B-HomerAnvita-NerdMix-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
228
+ "DarkUnholyDareDevil-8b-abliterated.i1-Q4_K_M.gguf": ["mradermacher/DarkUnholyDareDevil-8b-abliterated-i1-GGUF", MessagesFormatterType.LLAMA_3],
229
+ "LLama3.1-Hawkish-Theia-Fireball-8B.i1-Q5_K_M.gguf": ["mradermacher/LLama3.1-Hawkish-Theia-Fireball-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
230
+ "MT3-Gen2-MU-gemma-2-GQv1-9B.Q4_K_M.gguf": ["mradermacher/MT3-Gen2-MU-gemma-2-GQv1-9B-GGUF", MessagesFormatterType.ALPACA],
231
+ "MT3-Gen2-GMM-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT3-Gen2-GMM-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
232
+ "Platyboros-Instruct-7B.Q5_K_M.gguf": ["mradermacher/Platyboros-Instruct-7B-GGUF", MessagesFormatterType.MISTRAL],
233
+ "Fuselage-8B.Q5_K_M.gguf": ["mradermacher/Fuselage-8B-GGUF", MessagesFormatterType.LLAMA_3],
234
+ "Kudzerk-8B.Q5_K_M.gguf": ["mradermacher/Kudzerk-8B-GGUF", MessagesFormatterType.LLAMA_3],
235
+ "Qwen2.5-Coder-7B-Instruct-abliterated-TIES-v2.0.i1-Q5_K_M.gguf": ["mradermacher/Qwen2.5-Coder-7B-Instruct-abliterated-TIES-v2.0-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
236
+ "B-NIMITA-L3-8B-v0.02.Q5_K_M.gguf": ["mradermacher/B-NIMITA-L3-8B-v0.02-GGUF", MessagesFormatterType.LLAMA_3],
237
+ "L3.1-Aspire-Heart-Matrix-8B.Q5_K_M.gguf": ["mradermacher/L3.1-Aspire-Heart-Matrix-8B-GGUF", MessagesFormatterType.LLAMA_3],
238
+ "HomerSlerp1-7B.Q5_K_M.gguf": ["mradermacher/HomerSlerp1-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
239
+ "MN-Slush.i1-Q4_K_M.gguf": ["mradermacher/MN-Slush-i1-GGUF", MessagesFormatterType.MISTRAL],
240
+ "HomerSlerp2-7B.i1-Q4_K_M.gguf": ["mradermacher/HomerSlerp2-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
241
+ "LLaMA-Mesh-Q5_K_M.gguf": ["bartowski/LLaMA-Mesh-GGUF", MessagesFormatterType.LLAMA_3],
242
+ "BgGPT-Gemma-2-9B-IT-v1.0.i1-Q4_K_M.gguf": ["mradermacher/BgGPT-Gemma-2-9B-IT-v1.0-i1-GGUF", MessagesFormatterType.ALPACA],
243
+ "Ice0.37-19.11-RP-orpo-1.i1-Q5_K_M.gguf": ["mradermacher/Ice0.37-19.11-RP-orpo-1-i1-GGUF", MessagesFormatterType.MISTRAL],
244
+ "CursedMatrix-8B-v9.i1-Q5_K_M.gguf": ["mradermacher/CursedMatrix-8B-v9-i1-GGUF", MessagesFormatterType.LLAMA_3],
245
+ "Cakrawala-8B.i1-Q5_K_M.gguf": ["mradermacher/Cakrawala-8B-i1-GGUF", MessagesFormatterType.LLAMA_3],
246
+ "JerseyDevil-14b.i1-Q4_K_M.gguf": ["mradermacher/JerseyDevil-14b-i1-GGUF", MessagesFormatterType.SOLAR],
247
+ "Llama-3.1-Jamet-8B-MK.I.i1-Q5_K_M.gguf": ["mradermacher/Llama-3.1-Jamet-8B-MK.I-i1-GGUF", MessagesFormatterType.LLAMA_3],
248
  "SeQwence-14B-v5.Q4_K_S.gguf": ["mradermacher/SeQwence-14B-v5-GGUF", MessagesFormatterType.OPEN_CHAT],
249
  "L3.1-8B-Dark-Planet-Slush.i1-Q4_K_M.gguf": ["mradermacher/L3.1-8B-Dark-Planet-Slush-i1-GGUF", MessagesFormatterType.LLAMA_3],
250
  "QwenSlerp12-7B.Q5_K_M.gguf": ["mradermacher/QwenSlerp12-7B-GGUF", MessagesFormatterType.OPEN_CHAT],
 
1146
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
1147
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
1148
  "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
1149
+ "ChatWaifu_v1.0.i1-Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
1150
  "Ninja-V2-7B_Q4_K_M.gguf": ["Local-Novel-LLM-project/Ninja-V2-7B-GGUF", MessagesFormatterType.VICUNA],
1151
  "Yamase-12B.Q4_K_M.gguf": ["mradermacher/Yamase-12B-GGUF", MessagesFormatterType.MISTRAL],
1152
  "borea-phi-3.5-mini-instruct-common.Q5_K_M.gguf": ["keitokei1994/Borea-Phi-3.5-mini-Instruct-Common-GGUF", MessagesFormatterType.PHI_3],
modutils.py CHANGED
@@ -172,7 +172,7 @@ class ModelInformation:
172
  self.download_url = json_data.get("downloadUrl", "")
173
  self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
174
  self.filename_url = next(
175
- (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "")), ""
176
  )
177
  self.filename_url = self.filename_url if self.filename_url else ""
178
  self.description = json_data.get("description", "")
@@ -302,6 +302,10 @@ def safe_float(input):
302
  return output
303
 
304
 
 
 
 
 
305
  def save_images(images: list[Image.Image], metadatas: list[str]):
306
  from PIL import PngImagePlugin
307
  import uuid
@@ -566,7 +570,8 @@ private_lora_model_list = get_private_lora_model_lists()
566
 
567
  def get_civitai_info(path):
568
  global civitai_not_exists_list
569
- if path in set(civitai_not_exists_list): return ["", "", "", "", ""]
 
570
  if not Path(path).exists(): return None
571
  user_agent = get_user_agent()
572
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
@@ -584,12 +589,12 @@ def get_civitai_info(path):
584
  r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
585
  except Exception as e:
586
  print(e)
587
- return ["", "", "", "", ""]
588
  if not r.ok: return None
589
  json = r.json()
590
  if not 'baseModel' in json:
591
  civitai_not_exists_list.append(path)
592
- return ["", "", "", "", ""]
593
  items = []
594
  items.append(" / ".join(json['trainedWords']))
595
  items.append(json['baseModel'])
@@ -690,7 +695,7 @@ def copy_lora(path: str, new_path: str):
690
  return None
691
 
692
 
693
- def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str):
694
  path = download_lora(dl_urls)
695
  if path:
696
  if not lora1 or lora1 == "None":
@@ -703,9 +708,13 @@ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: st
703
  lora4 = path
704
  elif not lora5 or lora5 == "None":
705
  lora5 = path
 
 
 
 
706
  choices = get_all_lora_tupled_list()
707
  return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
708
- gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices)
709
 
710
 
711
  def get_valid_lora_name(query: str, model_name: str):
@@ -745,25 +754,31 @@ def get_valid_lora_wt(prompt: str, lora_path: str, lora_wt: float):
745
  return wt
746
 
747
 
748
- def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
749
- if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
750
  lora1 = get_valid_lora_name(lora1, model_name)
751
  lora2 = get_valid_lora_name(lora2, model_name)
752
  lora3 = get_valid_lora_name(lora3, model_name)
753
  lora4 = get_valid_lora_name(lora4, model_name)
754
  lora5 = get_valid_lora_name(lora5, model_name)
755
- if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
 
 
756
  lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
757
  lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
758
  lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
759
  lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
760
  lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
 
 
761
  on1, label1, tag1, md1 = get_lora_info(lora1)
762
  on2, label2, tag2, md2 = get_lora_info(lora2)
763
  on3, label3, tag3, md3 = get_lora_info(lora3)
764
  on4, label4, tag4, md4 = get_lora_info(lora4)
765
  on5, label5, tag5, md5 = get_lora_info(lora5)
766
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
 
 
767
  prompts = prompt.split(",") if prompt else []
768
  for p in prompts:
769
  p = str(p).strip()
@@ -780,30 +795,40 @@ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2,
780
  continue
781
  elif not on1:
782
  lora1 = path
783
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
784
  lora1_wt = safe_float(wt)
785
  on1 = True
786
  elif not on2:
787
  lora2 = path
788
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
789
  lora2_wt = safe_float(wt)
790
  on2 = True
791
  elif not on3:
792
  lora3 = path
793
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
794
  lora3_wt = safe_float(wt)
795
  on3 = True
796
  elif not on4:
797
  lora4 = path
798
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
799
  lora4_wt = safe_float(wt)
800
  on4 = True
801
  elif not on5:
802
  lora5 = path
803
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
804
  lora5_wt = safe_float(wt)
805
  on5 = True
806
- return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt
 
 
 
 
 
 
 
 
 
 
807
 
808
 
809
  def get_lora_info(lora_path: str):
@@ -864,13 +889,15 @@ def apply_lora_prompt(prompt: str = "", lora_info: str = ""):
864
  return gr.update(value=prompt)
865
 
866
 
867
- def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt):
868
  on1, label1, tag1, md1 = get_lora_info(lora1)
869
  on2, label2, tag2, md2 = get_lora_info(lora2)
870
  on3, label3, tag3, md3 = get_lora_info(lora3)
871
  on4, label4, tag4, md4 = get_lora_info(lora4)
872
  on5, label5, tag5, md5 = get_lora_info(lora5)
873
- lora_paths = [lora1, lora2, lora3, lora4, lora5]
 
 
874
 
875
  output_prompt = prompt
876
  if "Classic" in str(prompt_syntax):
@@ -895,6 +922,8 @@ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3,
895
  if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
896
  if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
897
  if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
 
 
898
  output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
899
  choices = get_all_lora_tupled_list()
900
 
@@ -907,7 +936,11 @@ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3,
907
  gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
908
  gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
909
  gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
910
- gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5)
 
 
 
 
911
 
912
 
913
  def get_my_lora(link_url, romanize):
@@ -926,7 +959,6 @@ def get_my_lora(link_url, romanize):
926
  path.resolve().rename(new_path.resolve())
927
  update_lora_dict(str(new_path))
928
  l_path = str(new_path)
929
- new_lora_model_list = get_lora_model_list()
930
  new_lora_tupled_list = get_all_lora_tupled_list()
931
  msg_lora = "Downloaded"
932
  if l_name:
@@ -943,6 +975,10 @@ def get_my_lora(link_url, romanize):
943
  choices=new_lora_tupled_list
944
  ), gr.update(
945
  choices=new_lora_tupled_list
 
 
 
 
946
  ), gr.update(
947
  value=msg_lora
948
  )
@@ -975,12 +1011,19 @@ def move_file_lora(filepaths):
975
  choices=new_lora_tupled_list
976
  ), gr.update(
977
  choices=new_lora_tupled_list
 
 
 
 
978
  )
979
 
980
 
981
- CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Newest"]
982
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
983
- CIVITAI_BASEMODEL = ["Pony", "Illustrious", "SDXL 1.0", "SD 1.5", "Flux.1 D", "Flux.1 S"]
 
 
 
984
 
985
 
986
  def get_civitai_info(path):
@@ -1025,6 +1068,7 @@ def search_lora_on_civitai(query: str, allow_model: list[str] = ["Pony", "SDXL 1
1025
  sort: str = "Highest Rated", period: str = "AllTime", tag: str = "", user: str = "", page: int = 1):
1026
  user_agent = get_user_agent()
1027
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
 
1028
  base_url = 'https://civitai.com/api/v1/models'
1029
  params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'page': int(page), 'nsfw': 'true'}
1030
  if query: params["query"] = query
 
172
  self.download_url = json_data.get("downloadUrl", "")
173
  self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
174
  self.filename_url = next(
175
+ (v.get("name", "") for v in reversed(json_data.get("files", [])) if str(self.model_version_id) in v.get("downloadUrl", "")), ""
176
  )
177
  self.filename_url = self.filename_url if self.filename_url else ""
178
  self.description = json_data.get("description", "")
 
302
  return output
303
 
304
 
305
+ def valid_model_name(model_name: str):
306
+ return model_name.split(" ")[0]
307
+
308
+
309
  def save_images(images: list[Image.Image], metadatas: list[str]):
310
  from PIL import PngImagePlugin
311
  import uuid
 
570
 
571
  def get_civitai_info(path):
572
  global civitai_not_exists_list
573
+ default = ["", "", "", "", ""]
574
+ if path in set(civitai_not_exists_list): return default
575
  if not Path(path).exists(): return None
576
  user_agent = get_user_agent()
577
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
 
589
  r = session.get(url, params=params, headers=headers, stream=True, timeout=(3.0, 15))
590
  except Exception as e:
591
  print(e)
592
+ return default
593
  if not r.ok: return None
594
  json = r.json()
595
  if not 'baseModel' in json:
596
  civitai_not_exists_list.append(path)
597
+ return default
598
  items = []
599
  items.append(" / ".join(json['trainedWords']))
600
  items.append(json['baseModel'])
 
695
  return None
696
 
697
 
698
+ def download_my_lora(dl_urls: str, lora1: str, lora2: str, lora3: str, lora4: str, lora5: str, lora6: str, lora7: str):
699
  path = download_lora(dl_urls)
700
  if path:
701
  if not lora1 or lora1 == "None":
 
708
  lora4 = path
709
  elif not lora5 or lora5 == "None":
710
  lora5 = path
711
+ #elif not lora6 or lora6 == "None":
712
+ # lora6 = path
713
+ #elif not lora7 or lora7 == "None":
714
+ # lora7 = path
715
  choices = get_all_lora_tupled_list()
716
  return gr.update(value=lora1, choices=choices), gr.update(value=lora2, choices=choices), gr.update(value=lora3, choices=choices),\
717
+ gr.update(value=lora4, choices=choices), gr.update(value=lora5, choices=choices), gr.update(value=lora6, choices=choices), gr.update(value=lora7, choices=choices)
718
 
719
 
720
  def get_valid_lora_name(query: str, model_name: str):
 
754
  return wt
755
 
756
 
757
+ def set_prompt_loras(prompt, prompt_syntax, model_name, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt):
758
+ if not "Classic" in str(prompt_syntax): return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
759
  lora1 = get_valid_lora_name(lora1, model_name)
760
  lora2 = get_valid_lora_name(lora2, model_name)
761
  lora3 = get_valid_lora_name(lora3, model_name)
762
  lora4 = get_valid_lora_name(lora4, model_name)
763
  lora5 = get_valid_lora_name(lora5, model_name)
764
+ #lora6 = get_valid_lora_name(lora6, model_name)
765
+ #lora7 = get_valid_lora_name(lora7, model_name)
766
+ if not "<lora" in prompt: return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
767
  lora1_wt = get_valid_lora_wt(prompt, lora1, lora1_wt)
768
  lora2_wt = get_valid_lora_wt(prompt, lora2, lora2_wt)
769
  lora3_wt = get_valid_lora_wt(prompt, lora3, lora3_wt)
770
  lora4_wt = get_valid_lora_wt(prompt, lora4, lora4_wt)
771
  lora5_wt = get_valid_lora_wt(prompt, lora5, lora5_wt)
772
+ #lora6_wt = get_valid_lora_wt(prompt, lora6, lora5_wt)
773
+ #lora7_wt = get_valid_lora_wt(prompt, lora7, lora5_wt)
774
  on1, label1, tag1, md1 = get_lora_info(lora1)
775
  on2, label2, tag2, md2 = get_lora_info(lora2)
776
  on3, label3, tag3, md3 = get_lora_info(lora3)
777
  on4, label4, tag4, md4 = get_lora_info(lora4)
778
  on5, label5, tag5, md5 = get_lora_info(lora5)
779
+ #on6, label6, tag6, md6 = get_lora_info(lora6)
780
+ #on7, label7, tag7, md7 = get_lora_info(lora7)
781
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
782
  prompts = prompt.split(",") if prompt else []
783
  for p in prompts:
784
  p = str(p).strip()
 
795
  continue
796
  elif not on1:
797
  lora1 = path
798
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
799
  lora1_wt = safe_float(wt)
800
  on1 = True
801
  elif not on2:
802
  lora2 = path
803
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
804
  lora2_wt = safe_float(wt)
805
  on2 = True
806
  elif not on3:
807
  lora3 = path
808
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
809
  lora3_wt = safe_float(wt)
810
  on3 = True
811
  elif not on4:
812
  lora4 = path
813
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
814
  lora4_wt = safe_float(wt)
815
  on4 = True
816
  elif not on5:
817
  lora5 = path
818
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
819
  lora5_wt = safe_float(wt)
820
  on5 = True
821
+ #elif not on6:
822
+ # lora6 = path
823
+ # lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
824
+ # lora6_wt = safe_float(wt)
825
+ # on6 = True
826
+ #elif not on7:
827
+ # lora7 = path
828
+ # lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
829
+ # lora7_wt = safe_float(wt)
830
+ # on7 = True
831
+ return lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt
832
 
833
 
834
  def get_lora_info(lora_path: str):
 
889
  return gr.update(value=prompt)
890
 
891
 
892
+ def update_loras(prompt, prompt_syntax, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt, lora4, lora4_wt, lora5, lora5_wt, lora6, lora6_wt, lora7, lora7_wt):
893
  on1, label1, tag1, md1 = get_lora_info(lora1)
894
  on2, label2, tag2, md2 = get_lora_info(lora2)
895
  on3, label3, tag3, md3 = get_lora_info(lora3)
896
  on4, label4, tag4, md4 = get_lora_info(lora4)
897
  on5, label5, tag5, md5 = get_lora_info(lora5)
898
+ on6, label6, tag6, md6 = get_lora_info(lora6)
899
+ on7, label7, tag7, md7 = get_lora_info(lora7)
900
+ lora_paths = [lora1, lora2, lora3, lora4, lora5, lora6, lora7]
901
 
902
  output_prompt = prompt
903
  if "Classic" in str(prompt_syntax):
 
922
  if on3: lora_prompts.append(f"<lora:{to_lora_key(lora3)}:{lora3_wt:.2f}>")
923
  if on4: lora_prompts.append(f"<lora:{to_lora_key(lora4)}:{lora4_wt:.2f}>")
924
  if on5: lora_prompts.append(f"<lora:{to_lora_key(lora5)}:{lora5_wt:.2f}>")
925
+ #if on6: lora_prompts.append(f"<lora:{to_lora_key(lora6)}:{lora6_wt:.2f}>")
926
+ #if on7: lora_prompts.append(f"<lora:{to_lora_key(lora7)}:{lora7_wt:.2f}>")
927
  output_prompt = ", ".join(list_uniq(output_prompts + lora_prompts + [""]))
928
  choices = get_all_lora_tupled_list()
929
 
 
936
  gr.update(value=lora4, choices=choices), gr.update(value=lora4_wt),\
937
  gr.update(value=tag4, label=label4, visible=on4), gr.update(visible=on4), gr.update(value=md4, visible=on4),\
938
  gr.update(value=lora5, choices=choices), gr.update(value=lora5_wt),\
939
+ gr.update(value=tag5, label=label5, visible=on5), gr.update(visible=on5), gr.update(value=md5, visible=on5),\
940
+ gr.update(value=lora6, choices=choices), gr.update(value=lora6_wt),\
941
+ gr.update(value=tag6, label=label6, visible=on6), gr.update(visible=on6), gr.update(value=md6, visible=on6),\
942
+ gr.update(value=lora7, choices=choices), gr.update(value=lora7_wt),\
943
+ gr.update(value=tag7, label=label7, visible=on7), gr.update(visible=on7), gr.update(value=md7, visible=on7)
944
 
945
 
946
  def get_my_lora(link_url, romanize):
 
959
  path.resolve().rename(new_path.resolve())
960
  update_lora_dict(str(new_path))
961
  l_path = str(new_path)
 
962
  new_lora_tupled_list = get_all_lora_tupled_list()
963
  msg_lora = "Downloaded"
964
  if l_name:
 
975
  choices=new_lora_tupled_list
976
  ), gr.update(
977
  choices=new_lora_tupled_list
978
+ ), gr.update(
979
+ choices=new_lora_tupled_list
980
+ ), gr.update(
981
+ choices=new_lora_tupled_list
982
  ), gr.update(
983
  value=msg_lora
984
  )
 
1011
  choices=new_lora_tupled_list
1012
  ), gr.update(
1013
  choices=new_lora_tupled_list
1014
+ ), gr.update(
1015
+ choices=new_lora_tupled_list
1016
+ ), gr.update(
1017
+ choices=new_lora_tupled_list
1018
  )
1019
 
1020
 
1021
+ CIVITAI_SORT = ["Highest Rated", "Most Downloaded", "Most Liked", "Most Discussed", "Most Collected", "Most Buzz", "Newest"]
1022
  CIVITAI_PERIOD = ["AllTime", "Year", "Month", "Week", "Day"]
1023
+ CIVITAI_BASEMODEL = ["Pony", "Illustrious", "SDXL 1.0", "SD 1.5", "Flux.1 D", "Flux.1 S"] # , "SD 3.5"
1024
+ CIVITAI_TYPE = ["Checkpoint", "TextualInversion", "Hypernetwork", "AestheticGradient", "LORA", "LoCon", "DoRA",
1025
+ "Controlnet", "Upscaler", "MotionModule", "VAE", "Poses", "Wildcards", "Workflows", "Other"]
1026
+ CIVITAI_FILETYPE = ["Model", "VAE", "Config", "Training Data"]
1027
 
1028
 
1029
  def get_civitai_info(path):
 
1068
  sort: str = "Highest Rated", period: str = "AllTime", tag: str = "", user: str = "", page: int = 1):
1069
  user_agent = get_user_agent()
1070
  headers = {'User-Agent': user_agent, 'content-type': 'application/json'}
1071
+ if CIVITAI_API_KEY: headers['Authorization'] = f'Bearer {{{CIVITAI_API_KEY}}}'
1072
  base_url = 'https://civitai.com/api/v1/models'
1073
  params = {'types': ['LORA'], 'sort': sort, 'period': period, 'limit': limit, 'page': int(page), 'nsfw': 'true'}
1074
  if query: params["query"] = query
requirements.txt CHANGED
@@ -1,10 +1,8 @@
1
- spaces
2
  accelerate
3
  diffusers
4
  invisible_watermark
5
  transformers
6
- xformers
7
- git+https://github.com/R3gm/stablepy.git@8edabb0 # -b refactor_sampler_fix
8
  torch==2.2.0
9
  numpy<2
10
  gdown
@@ -22,4 +20,5 @@ translatepy
22
  timm
23
  wrapt-timeout-decorator
24
  sentencepiece
25
- unidecode
 
 
1
+ git+https://github.com/R3gm/stablepy.git@a9fe2dc # -b refactor_sampler_fix
2
  accelerate
3
  diffusers
4
  invisible_watermark
5
  transformers
 
 
6
  torch==2.2.0
7
  numpy<2
8
  gdown
 
20
  timm
21
  wrapt-timeout-decorator
22
  sentencepiece
23
+ unidecode
24
+ ultralytics==8.3.47
utils.py CHANGED
@@ -62,7 +62,7 @@ class ModelInformation:
62
  self.download_url = json_data.get("downloadUrl", "")
63
  self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
64
  self.filename_url = next(
65
- (v.get("name", "") for v in json_data.get("files", []) if str(self.model_version_id) in v.get("downloadUrl", "")), ""
66
  )
67
  self.filename_url = self.filename_url if self.filename_url else ""
68
  self.description = json_data.get("description", "")
@@ -274,6 +274,10 @@ def get_my_lora(link_url, romanize):
274
  choices=new_lora_model_list
275
  ), gr.update(
276
  choices=new_lora_model_list
 
 
 
 
277
  ), gr.update(
278
  value=msg_lora
279
  )
 
62
  self.download_url = json_data.get("downloadUrl", "")
63
  self.model_url = f"https://civitai.com/models/{self.model_id}?modelVersionId={self.model_version_id}"
64
  self.filename_url = next(
65
+ (v.get("name", "") for v in reversed(json_data.get("files", [])) if str(self.model_version_id) in v.get("downloadUrl", "")), ""
66
  )
67
  self.filename_url = self.filename_url if self.filename_url else ""
68
  self.description = json_data.get("description", "")
 
274
  choices=new_lora_model_list
275
  ), gr.update(
276
  choices=new_lora_model_list
277
+ ), gr.update(
278
+ choices=new_lora_model_list
279
+ ), gr.update(
280
+ choices=new_lora_model_list
281
  ), gr.update(
282
  value=msg_lora
283
  )