r3gm commited on
Commit
9071247
1 Parent(s): 92ac6a2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +323 -116
app.py CHANGED
@@ -1,28 +1,3 @@
1
- task_stablepy = {
2
- 'txt2img': 'txt2img',
3
- 'img2img': 'img2img',
4
- 'inpaint': 'inpaint',
5
- 'sdxl_canny T2I Adapter': 'sdxl_canny',
6
- 'sdxl_sketch T2I Adapter': 'sdxl_sketch',
7
- 'sdxl_lineart T2I Adapter': 'sdxl_lineart',
8
- 'sdxl_depth-midas T2I Adapter': 'sdxl_depth-midas',
9
- 'sdxl_openpose T2I Adapter': 'sdxl_openpose',
10
- 'sd_openpose ControlNet': 'openpose',
11
- 'sd_canny ControlNet': 'canny',
12
- 'sd_mlsd ControlNet': 'mlsd',
13
- 'sd_scribble ControlNet': 'scribble',
14
- 'sd_softedge ControlNet': 'softedge',
15
- 'sd_segmentation ControlNet': 'segmentation',
16
- 'sd_depth ControlNet': 'depth',
17
- 'sd_normalbae ControlNet': 'normalbae',
18
- 'sd_lineart ControlNet': 'lineart',
19
- 'sd_lineart_anime ControlNet': 'lineart_anime',
20
- 'sd_shuffle ControlNet': 'shuffle',
21
- 'sd_ip2p ControlNet': 'ip2p',
22
- }
23
-
24
- task_model_list = list(task_stablepy.keys())
25
-
26
  #######################
27
  # UTILS
28
  #######################
@@ -34,7 +9,22 @@ from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
34
  import torch
35
  import re
36
  import shutil
37
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  preprocessor_controlnet = {
40
  "openpose": [
@@ -88,6 +78,33 @@ preprocessor_controlnet = {
88
  ]
89
  }
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  def download_things(directory, url, hf_token="", civitai_api_key=""):
93
  url = url.strip()
@@ -154,9 +171,9 @@ os.makedirs(directory_vaes, exist_ok=True)
154
  # - **Download SD 1.5 Models**
155
  download_model = "https://huggingface.co/frankjoshua/toonyou_beta6/resolve/main/toonyou_beta6.safetensors"
156
  # - **Download VAEs**
157
- download_vae = "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl.vae.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-blessed.safetensors?download=true, https://huggingface.co/digiplay/VAE/resolve/main/vividReal_v20.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/kl-f8-anime2_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/ClearVAE_V2.3_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/blessed2_fp16.safetensors?download=true"
158
  # - **Download LoRAs**
159
- download_lora = "https://civitai.com/api/download/models/135867, https://civitai.com/api/download/models/135931, https://civitai.com/api/download/models/177492, https://civitai.com/api/download/models/145907, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://civitai.com/api/download/models/28609"
160
  load_diffusers_format_model = [
161
  'stabilityai/stable-diffusion-xl-base-1.0',
162
  'misri/epicrealismXL_v7FinalDestination',
@@ -165,7 +182,6 @@ load_diffusers_format_model = [
165
  'cagliostrolab/animagine-xl-3.1',
166
  'misri/kohakuXLEpsilon_rev1',
167
  'kitty7779/ponyDiffusionV6XL',
168
- 'runwayml/stable-diffusion-v1-5',
169
  'digiplay/majicMIX_realistic_v6',
170
  'digiplay/majicMIX_realistic_v7',
171
  'digiplay/DreamShaper_8',
@@ -191,16 +207,9 @@ for url in [url.strip() for url in download_lora.split(',')]:
191
  directory_embeds = 'embedings'
192
  os.makedirs(directory_embeds, exist_ok=True)
193
  download_embeds = [
194
- 'https://huggingface.co/datasets/Nerfgun3/bad_prompt/resolve/main/bad_prompt.pt',
195
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
196
- 'https://huggingface.co/embed/EasyNegative/resolve/main/EasyNegative.safetensors',
197
  'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
198
  'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
199
- 'https://huggingface.co/embed/negative/resolve/main/bad-artist.pt',
200
- 'https://huggingface.co/embed/negative/resolve/main/ng_deepnegative_v1_75t.pt',
201
- 'https://huggingface.co/embed/negative/resolve/main/bad-artist-anime.pt',
202
- 'https://huggingface.co/embed/negative/resolve/main/bad-image-v2-39000.pt',
203
- 'https://huggingface.co/embed/negative/resolve/main/verybadimagenegative_v1.3.pt',
204
  ]
205
 
206
  for url_embed in download_embeds:
@@ -299,21 +308,51 @@ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transfo
299
  from stablepy import logger
300
  logger.setLevel(logging.DEBUG)
301
 
302
-
303
  class GuiSD:
304
- def __init__(self):
305
  self.model = None
306
-
307
- @spaces.GPU
308
- def infer_short(self, model, pipe_params):
309
- images, image_list = model(**pipe_params)
310
- return images
 
 
 
 
311
 
312
  @spaces.GPU(duration=120)
313
  def infer(self, model, pipe_params):
314
  images, image_list = model(**pipe_params)
315
  return images
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  def generate_pipeline(
318
  self,
319
  prompt,
@@ -406,8 +445,23 @@ class GuiSD:
406
  mask_dilation_b,
407
  mask_blur_b,
408
  mask_padding_b,
 
 
 
 
 
 
 
 
 
 
 
 
 
409
  ):
410
 
 
 
411
  vae_model = vae_model if vae_model != "None" else None
412
  loras_list = [lora1, lora2, lora3, lora4, lora5]
413
 
@@ -427,23 +481,45 @@ class GuiSD:
427
 
428
  for la in loras_list:
429
  if la is not None and la != "None":
430
- lora_type = "animetarot" in la.lower()
 
431
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
432
  gr.Info(f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }.")
433
 
434
  task = task_stablepy[task]
435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
  # First load
437
  model_precision = torch.float16
438
  if not self.model:
439
- from stablepy import Model_Diffusers
440
 
441
  print("Loading model...")
442
- self.model = Model_Diffusers(
443
  base_model_id=model_name,
444
  task_name=task,
445
  vae_model=vae_model if vae_model != "None" else None,
446
- type_model_precision=model_precision
 
447
  )
448
 
449
  if task != "txt2img" and not image_control:
@@ -473,7 +549,8 @@ class GuiSD:
473
  model_name,
474
  task_name=task,
475
  vae_model=vae_model if vae_model != "None" else None,
476
- type_model_precision=model_precision
 
477
  )
478
 
479
  if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
@@ -575,27 +652,30 @@ class GuiSD:
575
  "hires_negative_prompt": hires_negative_prompt,
576
  "hires_sampler": hires_sampler,
577
  "hires_before_adetailer": hires_before_adetailer,
578
- "hires_after_adetailer": hires_after_adetailer
 
 
 
 
 
579
  }
580
 
581
  # print(pipe_params)
582
 
583
- if (
584
- (img_height > 1700 and img_width > 1700)
585
- or (num_images > 1 and img_height>1048 and img_width>1048)
586
- or (num_images > 1 and upscaler_model)
587
- or (num_images > 1 and adetailer_active_a or num_images > 1 and adetailer_active_b)
588
- or (num_images > 1 and steps>50)
589
- or (adetailer_active_a and adetailer_active_b)
590
- or (upscaler_model and upscaler_increases_size > 1.7)
591
- or (steps > 75)
592
- or (image_resolution > 1048)
593
- ):
594
- print("Inference 2")
595
- return self.infer(self.model, pipe_params)
596
 
597
- print("Inference 1")
598
- return self.infer_short(self.model, pipe_params)
 
 
 
 
 
599
 
600
 
601
  sd_gen = GuiSD()
@@ -605,11 +685,8 @@ CSS ="""
605
  #component-0 { height: 100%; }
606
  #gallery { flex-grow: 1; }
607
  """
608
-
609
- sdxl_task = task_model_list[:3] + task_model_list[3:8]
610
- sd_task = task_model_list[:3] + task_model_list[8:]
611
-
612
-
613
  def update_task_options(model_name, task_name):
614
  if model_name in model_list:
615
  if "xl" in model_name.lower():
@@ -641,14 +718,20 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
641
  model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
642
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
643
  neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt")
 
 
 
 
644
  generate_button = gr.Button(value="GENERATE", variant="primary")
645
-
646
  model_name_gui.change(
647
  update_task_options,
648
  [model_name_gui, task_gui],
649
  [task_gui],
650
  )
651
 
 
 
652
  result_images = gr.Gallery(
653
  label="Generated images",
654
  show_label=False,
@@ -662,20 +745,147 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
662
  selected_index=50,
663
  )
664
 
 
 
665
  with gr.Column(scale=1):
666
  steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
667
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.5, label="CFG")
668
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler a")
669
  img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
670
  img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
671
- clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
672
- free_u_gui = gr.Checkbox(value=True, label="FreeU")
673
  seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
674
  num_images_gui = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Images")
675
- prompt_s_options = [("Compel (default) format: (word)weight", "Compel"), ("Classic (sd1.5 long prompts) format: (word:weight)", "Classic")]
 
 
 
 
 
 
 
676
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options, value=prompt_s_options[0][1])
677
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list)
678
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
679
  with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
680
  image_control = gr.Image(label="Image ControlNet/Inpaint/Img2img", type="filepath")
681
  image_mask_gui = gr.Image(label="Image Mask", type="filepath")
@@ -685,7 +895,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
685
  )
686
  image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution")
687
  preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=preprocessor_controlnet["canny"])
688
-
689
  def change_preprocessor_choices(task):
690
  task = task_stablepy[task]
691
  if task in preprocessor_controlnet.keys():
@@ -693,7 +903,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
693
  else:
694
  choices_task = preprocessor_controlnet["canny"]
695
  return gr.update(choices=choices_task, value=choices_task[0])
696
-
697
  task_gui.change(
698
  change_preprocessor_choices,
699
  [task_gui],
@@ -713,25 +923,13 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
713
  adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
714
  adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
715
 
716
- with gr.Accordion("LoRA", open=False, visible=True):
717
- lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
718
- lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 1")
719
- lora2_gui = gr.Dropdown(label="Lora2", choices=lora_model_list)
720
- lora_scale_2_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 2")
721
- lora3_gui = gr.Dropdown(label="Lora3", choices=lora_model_list)
722
- lora_scale_3_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 3")
723
- lora4_gui = gr.Dropdown(label="Lora4", choices=lora_model_list)
724
- lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 4")
725
- lora5_gui = gr.Dropdown(label="Lora5", choices=lora_model_list)
726
- lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 5")
727
-
728
  with gr.Accordion("Styles", open=False, visible=True):
729
-
730
  try:
731
  style_names_found = sd_gen.model.STYLE_NAMES
732
  except:
733
  style_names_found = STYLE_NAMES
734
-
735
  style_prompt_gui = gr.Dropdown(
736
  style_names_found,
737
  multiselect=True,
@@ -746,42 +944,28 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
746
  if not sd_gen.model:
747
  gr.Info("First load the model")
748
  return gr.update(value=None, choices=STYLE_NAMES)
749
-
750
  sd_gen.model.load_style_file(json)
751
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
752
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
753
 
754
  style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
755
-
756
  with gr.Accordion("Textual inversion", open=False, visible=False):
757
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
758
 
759
- with gr.Accordion("Hires fix", open=False, visible=True):
760
-
761
- upscaler_keys = list(upscaler_dict_gui.keys())
762
-
763
- upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=upscaler_keys, value=upscaler_keys[0])
764
- upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.5, label="Upscale by")
765
- esrgan_tile_gui = gr.Slider(minimum=0, value=100, maximum=500, step=1, label="ESRGAN Tile")
766
- esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=10, label="ESRGAN Tile Overlap")
767
- hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
768
- hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
769
- hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=["Use same sampler"] + scheduler_names[:-1], value="Use same sampler")
770
- hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
771
- hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
772
-
773
  with gr.Accordion("Detailfix", open=False, visible=True):
774
 
775
  # Adetailer Inpaint Only
776
  adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
777
-
778
  # Adetailer Verbose
779
  adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
780
-
781
  # Adetailer Sampler
782
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
783
  adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=adetailer_sampler_options, value="Use same sampler")
784
-
785
  with gr.Accordion("Detailfix A", open=False, visible=True):
786
  # Adetailer A
787
  adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
@@ -794,7 +978,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
794
  mask_dilation_a_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
795
  mask_blur_a_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
796
  mask_padding_a_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
797
-
798
  with gr.Accordion("Detailfix B", open=False, visible=True):
799
  # Adetailer B
800
  adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
@@ -809,16 +993,17 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
809
  mask_padding_b_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
810
 
811
  with gr.Accordion("Other settings", open=False, visible=True):
 
812
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
813
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
814
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
815
 
816
  with gr.Accordion("More settings", open=False, visible=False):
817
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
 
818
  leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
819
  disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
820
- image_previews_gui = gr.Checkbox(value=False, label="Image Previews")
821
- display_images_gui = gr.Checkbox(value=False, label="Display Images")
822
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
823
  image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
824
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
@@ -938,7 +1123,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
938
  1024,
939
  "misri/epicrealismXL_v7FinalDestination",
940
  None, # vae
941
- "sdxl_canny T2I Adapter",
942
  "image.webp", # img conttol
943
  "Canny", # preprocessor
944
  1024, # preproc resolution
@@ -1067,7 +1252,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1067
  512,
1068
  "digiplay/majicMIX_realistic_v7",
1069
  None, # vae
1070
- "sd_canny ControlNet",
1071
  "image.webp", # img conttol
1072
  "Canny", # preprocessor
1073
  512, # preproc resolution
@@ -1176,7 +1361,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1176
  brush=gr.Brush(
1177
  default_size="16", # or leave it as 'auto'
1178
  color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1179
- #default_color="black", # html names are supported
1180
  colors=[
1181
  "rgba(0, 0, 0, 1)", # rgb(a)
1182
  "rgba(0, 0, 0, 0.1)",
@@ -1200,6 +1385,16 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1200
  btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
1201
 
1202
  generate_button.click(
 
 
 
 
 
 
 
 
 
 
1203
  fn=sd_gen.generate_pipeline,
1204
  inputs=[
1205
  prompt_gui,
@@ -1292,9 +1487,21 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1292
  mask_dilation_b_gui,
1293
  mask_blur_b_gui,
1294
  mask_padding_b_gui,
 
 
 
 
 
 
 
 
 
 
 
1295
  ],
1296
- outputs=[result_images],
1297
  queue=True,
 
1298
  )
1299
 
1300
  app.queue()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  #######################
2
  # UTILS
3
  #######################
 
9
  import torch
10
  import re
11
  import shutil
12
+ import random
13
+ from stablepy import (
14
+ CONTROLNET_MODEL_IDS,
15
+ VALID_TASKS,
16
+ T2I_PREPROCESSOR_NAME,
17
+ FLASH_LORA,
18
+ SCHEDULER_CONFIG_MAP,
19
+ scheduler_names,
20
+ IP_ADAPTER_MODELS,
21
+ IP_ADAPTERS_SD,
22
+ IP_ADAPTERS_SDXL,
23
+ REPO_IMAGE_ENCODER,
24
+ ALL_PROMPT_WEIGHT_OPTIONS,
25
+ SD15_TASKS,
26
+ SDXL_TASKS,
27
+ )
28
 
29
  preprocessor_controlnet = {
30
  "openpose": [
 
78
  ]
79
  }
80
 
81
+ task_stablepy = {
82
+ 'txt2img': 'txt2img',
83
+ 'img2img': 'img2img',
84
+ 'inpaint': 'inpaint',
85
+ # 'canny T2I Adapter': 'sdxl_canny_t2i', # NO HAVE STEP CALLBACK PARAMETERS SO NOT WORKS WITH DIFFUSERS 0.29.0
86
+ # 'sketch T2I Adapter': 'sdxl_sketch_t2i',
87
+ # 'lineart T2I Adapter': 'sdxl_lineart_t2i',
88
+ # 'depth-midas T2I Adapter': 'sdxl_depth-midas_t2i',
89
+ # 'openpose T2I Adapter': 'sdxl_openpose_t2i',
90
+ 'openpose ControlNet': 'openpose',
91
+ 'canny ControlNet': 'canny',
92
+ 'mlsd ControlNet': 'mlsd',
93
+ 'scribble ControlNet': 'scribble',
94
+ 'softedge ControlNet': 'softedge',
95
+ 'segmentation ControlNet': 'segmentation',
96
+ 'depth ControlNet': 'depth',
97
+ 'normalbae ControlNet': 'normalbae',
98
+ 'lineart ControlNet': 'lineart',
99
+ 'lineart_anime ControlNet': 'lineart_anime',
100
+ 'shuffle ControlNet': 'shuffle',
101
+ 'ip2p ControlNet': 'ip2p',
102
+ 'optical pattern ControlNet': 'pattern',
103
+ 'tile realistic': 'sdxl_tile_realistic',
104
+ }
105
+
106
+ task_model_list = list(task_stablepy.keys())
107
+
108
 
109
  def download_things(directory, url, hf_token="", civitai_api_key=""):
110
  url = url.strip()
 
171
  # - **Download SD 1.5 Models**
172
  download_model = "https://huggingface.co/frankjoshua/toonyou_beta6/resolve/main/toonyou_beta6.safetensors"
173
  # - **Download VAEs**
174
+ download_vae = "https://huggingface.co/madebyollin/sdxl-vae-fp16-fix/resolve/main/sdxl.vae.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-c-1.1-b-0.5.safetensors?download=true, https://huggingface.co/nubby/blessed-sdxl-vae-fp16-fix/resolve/main/sdxl_vae-fp16fix-blessed.safetensors?download=true, https://huggingface.co/digiplay/VAE/resolve/main/vividReal_v20.safetensors?download=true, https://huggingface.co/fp16-guy/anything_kl-f8-anime2_vae-ft-mse-840000-ema-pruned_blessed_clearvae_fp16_cleaned/resolve/main/vae-ft-mse-840000-ema-pruned_fp16.safetensors?download=true"
175
  # - **Download LoRAs**
176
+ download_lora = "https://civitai.com/api/download/models/135867, https://civitai.com/api/download/models/135931, https://civitai.com/api/download/models/177492, https://civitai.com/api/download/models/145907, https://huggingface.co/Linaqruf/anime-detailer-xl-lora/resolve/main/anime-detailer-xl.safetensors?download=true, https://huggingface.co/Linaqruf/style-enhancer-xl-lora/resolve/main/style-enhancer-xl.safetensors?download=true, https://civitai.com/api/download/models/28609, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SD15-8steps-CFG-lora.safetensors?download=true, https://huggingface.co/ByteDance/Hyper-SD/resolve/main/Hyper-SDXL-8steps-CFG-lora.safetensors?download=true"
177
  load_diffusers_format_model = [
178
  'stabilityai/stable-diffusion-xl-base-1.0',
179
  'misri/epicrealismXL_v7FinalDestination',
 
182
  'cagliostrolab/animagine-xl-3.1',
183
  'misri/kohakuXLEpsilon_rev1',
184
  'kitty7779/ponyDiffusionV6XL',
 
185
  'digiplay/majicMIX_realistic_v6',
186
  'digiplay/majicMIX_realistic_v7',
187
  'digiplay/DreamShaper_8',
 
207
  directory_embeds = 'embedings'
208
  os.makedirs(directory_embeds, exist_ok=True)
209
  download_embeds = [
 
210
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
 
211
  'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
212
  'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
 
 
 
 
 
213
  ]
214
 
215
  for url_embed in download_embeds:
 
308
  from stablepy import logger
309
  logger.setLevel(logging.DEBUG)
310
 
 
311
  class GuiSD:
312
+ def __init__(self, stream=True):
313
  self.model = None
314
+
315
+ print("Loading model...")
316
+ self.model = Model_Diffusers(
317
+ base_model_id="cagliostrolab/animagine-xl-3.1",
318
+ task_name="txt2img",
319
+ vae_model=None,
320
+ type_model_precision=torch.float16,
321
+ retain_task_model_in_cache=False,
322
+ )
323
 
324
  @spaces.GPU(duration=120)
325
  def infer(self, model, pipe_params):
326
  images, image_list = model(**pipe_params)
327
  return images
328
 
329
+ def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
330
+
331
+ yield f"Loading model: {model_name}"
332
+
333
+ vae_model = vae_model if vae_model != "None" else None
334
+
335
+
336
+ if model_name in model_list:
337
+ model_is_xl = "xl" in model_name.lower()
338
+ sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
339
+ model_type = "SDXL" if model_is_xl else "SD 1.5"
340
+ incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
341
+
342
+ if incompatible_vae:
343
+ vae_model = None
344
+
345
+
346
+ self.model.load_pipe(
347
+ model_name,
348
+ task_name=task_stablepy[task],
349
+ vae_model=vae_model if vae_model != "None" else None,
350
+ type_model_precision=torch.float16,
351
+ retain_task_model_in_cache=False,
352
+ )
353
+ yield f"Model loaded: {model_name} {vae_model if vae_model else ''}"
354
+
355
+ @spaces.GPU
356
  def generate_pipeline(
357
  self,
358
  prompt,
 
445
  mask_dilation_b,
446
  mask_blur_b,
447
  mask_padding_b,
448
+ retain_task_cache_gui,
449
+ image_ip1,
450
+ mask_ip1,
451
+ model_ip1,
452
+ mode_ip1,
453
+ scale_ip1,
454
+ image_ip2,
455
+ mask_ip2,
456
+ model_ip2,
457
+ mode_ip2,
458
+ scale_ip2,
459
+ # progress=gr.Progress(track_tqdm=True),
460
+ # progress=gr.Progress()
461
  ):
462
 
463
+ # progress(0.01, desc="Loading model...")
464
+
465
  vae_model = vae_model if vae_model != "None" else None
466
  loras_list = [lora1, lora2, lora3, lora4, lora5]
467
 
 
481
 
482
  for la in loras_list:
483
  if la is not None and la != "None":
484
+ print(la)
485
+ lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
486
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
487
  gr.Info(f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }.")
488
 
489
  task = task_stablepy[task]
490
 
491
+ params_ip_img = []
492
+ params_ip_msk = []
493
+ params_ip_model = []
494
+ params_ip_mode = []
495
+ params_ip_scale = []
496
+
497
+ all_adapters = [
498
+ (image_ip1, mask_ip1, model_ip1, mode_ip1, scale_ip1),
499
+ (image_ip2, mask_ip2, model_ip2, mode_ip2, scale_ip2),
500
+ ]
501
+
502
+ for imgip, mskip, modelip, modeip, scaleip in all_adapters:
503
+ if imgip:
504
+ params_ip_img.append(imgip)
505
+ if mskip:
506
+ params_ip_msk.append(mskip)
507
+ params_ip_model.append(modelip)
508
+ params_ip_mode.append(modeip)
509
+ params_ip_scale.append(scaleip)
510
+
511
  # First load
512
  model_precision = torch.float16
513
  if not self.model:
514
+ from modelstream import Model_Diffusers2
515
 
516
  print("Loading model...")
517
+ self.model = Model_Diffusers2(
518
  base_model_id=model_name,
519
  task_name=task,
520
  vae_model=vae_model if vae_model != "None" else None,
521
+ type_model_precision=model_precision,
522
+ retain_task_model_in_cache=retain_task_cache_gui,
523
  )
524
 
525
  if task != "txt2img" and not image_control:
 
549
  model_name,
550
  task_name=task,
551
  vae_model=vae_model if vae_model != "None" else None,
552
+ type_model_precision=model_precision,
553
+ retain_task_model_in_cache=retain_task_cache_gui,
554
  )
555
 
556
  if textual_inversion and self.model.class_name == "StableDiffusionXLPipeline":
 
652
  "hires_negative_prompt": hires_negative_prompt,
653
  "hires_sampler": hires_sampler,
654
  "hires_before_adetailer": hires_before_adetailer,
655
+ "hires_after_adetailer": hires_after_adetailer,
656
+ "ip_adapter_image": params_ip_img,
657
+ "ip_adapter_mask": params_ip_msk,
658
+ "ip_adapter_model": params_ip_model,
659
+ "ip_adapter_mode": params_ip_mode,
660
+ "ip_adapter_scale": params_ip_scale,
661
  }
662
 
663
  # print(pipe_params)
664
 
665
+ random_number = random.randint(1, 100)
666
+ if random_number < 25 and num_images < 3:
667
+ if not upscaler_model and steps < 45 and task in ["txt2img", "img2img"] and not adetailer_active_a and not adetailer_active_b:
668
+ num_images *=2
669
+ pipe_params["num_images"] = num_images
670
+ gr.Info("Num images x 2 🎉")
 
 
 
 
 
 
 
671
 
672
+ # print("Inference 1")
673
+ # yield self.infer_short(self.model, pipe_params)
674
+ for img, seed, data in self.model(**pipe_params):
675
+ info_state = f"PROCESSING..."
676
+ if data:
677
+ info_state = f"COMPLETE: seeds={str(seed)}"
678
+ yield img, info_state
679
 
680
 
681
  sd_gen = GuiSD()
 
685
  #component-0 { height: 100%; }
686
  #gallery { flex-grow: 1; }
687
  """
688
+ sdxl_task = [k for k, v in task_stablepy.items() if v in SDXL_TASKS ]
689
+ sd_task = [k for k, v in task_stablepy.items() if v in SD15_TASKS ]
 
 
 
690
  def update_task_options(model_name, task_name):
691
  if model_name in model_list:
692
  if "xl" in model_name.lower():
 
718
  model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
719
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
720
  neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt")
721
+ with gr.Row(equal_height=False):
722
+ set_params_gui = gr.Button(value="↙️")
723
+ clear_prompt_gui = gr.Button(value="🗑️")
724
+ set_random_seed = gr.Button(value="🎲")
725
  generate_button = gr.Button(value="GENERATE", variant="primary")
726
+
727
  model_name_gui.change(
728
  update_task_options,
729
  [model_name_gui, task_gui],
730
  [task_gui],
731
  )
732
 
733
+ load_model_gui = gr.HTML()
734
+
735
  result_images = gr.Gallery(
736
  label="Generated images",
737
  show_label=False,
 
745
  selected_index=50,
746
  )
747
 
748
+ actual_task_info = gr.HTML()
749
+
750
  with gr.Column(scale=1):
751
  steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
752
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.5, label="CFG")
753
  sampler_gui = gr.Dropdown(label="Sampler", choices=scheduler_names, value="Euler a")
754
  img_width_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Width")
755
  img_height_gui = gr.Slider(minimum=64, maximum=4096, step=8, value=1024, label="Img Height")
 
 
756
  seed_gui = gr.Number(minimum=-1, maximum=9999999999, value=-1, label="Seed")
757
+ with gr.Row():
758
+ clip_skip_gui = gr.Checkbox(value=True, label="Layer 2 Clip Skip")
759
+ free_u_gui = gr.Checkbox(value=True, label="FreeU")
760
+
761
+ with gr.Row(equal_height=False):
762
+
763
+
764
+
765
+ def run_set_params_gui(base_prompt):
766
+ valid_receptors = { # default values
767
+ "prompt": gr.update(value=base_prompt),
768
+ "neg_prompt": gr.update(value=""),
769
+ "Steps": gr.update(value=30),
770
+ "width": gr.update(value=1024),
771
+ "height": gr.update(value=1024),
772
+ "Seed": gr.update(value=-1),
773
+ "Sampler": gr.update(value="Euler a"),
774
+ "scale": gr.update(value=7.5), # cfg
775
+ "skip": gr.update(value=True),
776
+ }
777
+ valid_keys = list(valid_receptors.keys())
778
+
779
+ parameters = extract_parameters(base_prompt)
780
+ for key, val in parameters.items():
781
+ # print(val)
782
+ if key in valid_keys:
783
+ if key == "Sampler":
784
+ if val not in scheduler_names:
785
+ continue
786
+ elif key == "skip":
787
+ if int(val) >= 2:
788
+ val = True
789
+ if key == "prompt":
790
+ if ">" in val and "<" in val:
791
+ val = re.sub(r'<[^>]+>', '', val)
792
+ print("Removed LoRA written in the prompt")
793
+ if key in ["prompt", "neg_prompt"]:
794
+ val = val.strip()
795
+ if key in ["Steps", "width", "height", "Seed"]:
796
+ val = int(val)
797
+ if key == "scale":
798
+ val = float(val)
799
+ if key == "Seed":
800
+ continue
801
+ valid_receptors[key] = gr.update(value=val)
802
+ # print(val, type(val))
803
+ # print(valid_receptors)
804
+ return [value for value in valid_receptors.values()]
805
+
806
+ set_params_gui.click(
807
+ run_set_params_gui, [prompt_gui],[
808
+ prompt_gui,
809
+ neg_prompt_gui,
810
+ steps_gui,
811
+ img_width_gui,
812
+ img_height_gui,
813
+ seed_gui,
814
+ sampler_gui,
815
+ cfg_gui,
816
+ clip_skip_gui,
817
+ ],
818
+ )
819
+
820
+
821
+ def run_clear_prompt_gui():
822
+ return gr.update(value=""), gr.update(value="")
823
+ clear_prompt_gui.click(
824
+ run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
825
+ )
826
+
827
+ def run_set_random_seed():
828
+ return -1
829
+ set_random_seed.click(
830
+ run_set_random_seed, [], seed_gui
831
+ )
832
+
833
  num_images_gui = gr.Slider(minimum=1, maximum=4, step=1, value=1, label="Images")
834
+ prompt_s_options = [
835
+ ("Compel format: (word)weight", "Compel"),
836
+ ("Classic format: (word:weight)", "Classic"),
837
+ ("Classic-original format: (word:weight)", "Classic-original"),
838
+ ("Classic-no_norm format: (word:weight)", "Classic-no_norm"),
839
+ ("Classic-ignore", "Classic-ignore"),
840
+ ("None", "None"),
841
+ ]
842
  prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options, value=prompt_s_options[0][1])
843
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list)
844
+
845
+ with gr.Accordion("Hires fix", open=False, visible=True):
846
+
847
+ upscaler_keys = list(upscaler_dict_gui.keys())
848
+
849
+ upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=upscaler_keys, value=upscaler_keys[0])
850
+ upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.4, label="Upscale by")
851
+ esrgan_tile_gui = gr.Slider(minimum=0, value=100, maximum=500, step=1, label="ESRGAN Tile")
852
+ esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=10, label="ESRGAN Tile Overlap")
853
+ hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
854
+ hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
855
+ hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=["Use same sampler"] + scheduler_names[:-1], value="Use same sampler")
856
+ hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
857
+ hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
858
+
859
+ with gr.Accordion("LoRA", open=False, visible=True):
860
+ lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
861
+ lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 1")
862
+ lora2_gui = gr.Dropdown(label="Lora2", choices=lora_model_list)
863
+ lora_scale_2_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 2")
864
+ lora3_gui = gr.Dropdown(label="Lora3", choices=lora_model_list)
865
+ lora_scale_3_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 3")
866
+ lora4_gui = gr.Dropdown(label="Lora4", choices=lora_model_list)
867
+ lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 4")
868
+ lora5_gui = gr.Dropdown(label="Lora5", choices=lora_model_list)
869
+ lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 5")
870
+
871
+ with gr.Accordion("IP-Adapter", open=False, visible=True):##############
872
+
873
+ IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
874
+ MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
875
+
876
+ with gr.Accordion("IP-Adapter 1", open=False, visible=True):
877
+ image_ip1 = gr.Image(label="IP Image", type="filepath")
878
+ mask_ip1 = gr.Image(label="IP Mask", type="filepath")
879
+ model_ip1 = gr.Dropdown(value="plus_face", label="Model", choices=IP_MODELS)
880
+ mode_ip1 = gr.Dropdown(value="original", label="Mode", choices=MODE_IP_OPTIONS)
881
+ scale_ip1 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
882
+ with gr.Accordion("IP-Adapter 2", open=False, visible=True):
883
+ image_ip2 = gr.Image(label="IP Image", type="filepath")
884
+ mask_ip2 = gr.Image(label="IP Mask (optional)", type="filepath")
885
+ model_ip2 = gr.Dropdown(value="base", label="Model", choices=IP_MODELS)
886
+ mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
887
+ scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
888
+
889
  with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
890
  image_control = gr.Image(label="Image ControlNet/Inpaint/Img2img", type="filepath")
891
  image_mask_gui = gr.Image(label="Image Mask", type="filepath")
 
895
  )
896
  image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution")
897
  preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=preprocessor_controlnet["canny"])
898
+
899
  def change_preprocessor_choices(task):
900
  task = task_stablepy[task]
901
  if task in preprocessor_controlnet.keys():
 
903
  else:
904
  choices_task = preprocessor_controlnet["canny"]
905
  return gr.update(choices=choices_task, value=choices_task[0])
906
+
907
  task_gui.change(
908
  change_preprocessor_choices,
909
  [task_gui],
 
923
  adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
924
  adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
925
 
 
 
 
 
 
 
 
 
 
 
 
 
926
  with gr.Accordion("Styles", open=False, visible=True):
927
+
928
  try:
929
  style_names_found = sd_gen.model.STYLE_NAMES
930
  except:
931
  style_names_found = STYLE_NAMES
932
+
933
  style_prompt_gui = gr.Dropdown(
934
  style_names_found,
935
  multiselect=True,
 
944
  if not sd_gen.model:
945
  gr.Info("First load the model")
946
  return gr.update(value=None, choices=STYLE_NAMES)
947
+
948
  sd_gen.model.load_style_file(json)
949
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
950
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
951
 
952
  style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
953
+
954
  with gr.Accordion("Textual inversion", open=False, visible=False):
955
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
956
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
957
  with gr.Accordion("Detailfix", open=False, visible=True):
958
 
959
  # Adetailer Inpaint Only
960
  adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
961
+
962
  # Adetailer Verbose
963
  adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
964
+
965
  # Adetailer Sampler
966
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
967
  adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=adetailer_sampler_options, value="Use same sampler")
968
+
969
  with gr.Accordion("Detailfix A", open=False, visible=True):
970
  # Adetailer A
971
  adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
 
978
  mask_dilation_a_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
979
  mask_blur_a_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
980
  mask_padding_a_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
981
+
982
  with gr.Accordion("Detailfix B", open=False, visible=True):
983
  # Adetailer B
984
  adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
 
993
  mask_padding_b_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
994
 
995
  with gr.Accordion("Other settings", open=False, visible=True):
996
+ image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
997
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
998
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
999
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1000
 
1001
  with gr.Accordion("More settings", open=False, visible=False):
1002
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1003
+ retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
1004
  leave_progress_bar_gui = gr.Checkbox(value=True, label="Leave Progress Bar")
1005
  disable_progress_bar_gui = gr.Checkbox(value=False, label="Disable Progress Bar")
1006
+ display_images_gui = gr.Checkbox(value=True, label="Display Images")
 
1007
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
1008
  image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1009
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
 
1123
  1024,
1124
  "misri/epicrealismXL_v7FinalDestination",
1125
  None, # vae
1126
+ "canny ControlNet",
1127
  "image.webp", # img conttol
1128
  "Canny", # preprocessor
1129
  1024, # preproc resolution
 
1252
  512,
1253
  "digiplay/majicMIX_realistic_v7",
1254
  None, # vae
1255
+ "openpose ControlNet",
1256
  "image.webp", # img conttol
1257
  "Canny", # preprocessor
1258
  512, # preproc resolution
 
1361
  brush=gr.Brush(
1362
  default_size="16", # or leave it as 'auto'
1363
  color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1364
+ # default_color="black", # html names are supported
1365
  colors=[
1366
  "rgba(0, 0, 0, 1)", # rgb(a)
1367
  "rgba(0, 0, 0, 0.1)",
 
1385
  btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
1386
 
1387
  generate_button.click(
1388
+ fn=sd_gen.load_new_model,
1389
+ inputs=[
1390
+ model_name_gui,
1391
+ vae_model_gui,
1392
+ task_gui
1393
+ ],
1394
+ outputs=[load_model_gui],
1395
+ queue=True,
1396
+ show_progress="minimal",
1397
+ ).success(
1398
  fn=sd_gen.generate_pipeline,
1399
  inputs=[
1400
  prompt_gui,
 
1487
  mask_dilation_b_gui,
1488
  mask_blur_b_gui,
1489
  mask_padding_b_gui,
1490
+ retain_task_cache_gui,
1491
+ image_ip1,
1492
+ mask_ip1,
1493
+ model_ip1,
1494
+ mode_ip1,
1495
+ scale_ip1,
1496
+ image_ip2,
1497
+ mask_ip2,
1498
+ model_ip2,
1499
+ mode_ip2,
1500
+ scale_ip2,
1501
  ],
1502
+ outputs=[result_images, actual_task_info],
1503
  queue=True,
1504
+ show_progress="minimal",
1505
  )
1506
 
1507
  app.queue()