zerhero commited on
Commit
82452b3
1 Parent(s): 706551f
Files changed (2) hide show
  1. README.md +3 -3
  2. app.py +430 -372
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
- title: 🧩 DiffuseCraft
3
- emoji: 🧩🖼️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
@@ -8,7 +8,7 @@ sdk_version: 4.31.3
8
  app_file: app.py
9
  pinned: true
10
  license: mit
11
- short_description: Stunning images using stable diffusion.
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: 😈️ Ivan's DiffuseCraft
3
+ emoji: 😈️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
 
8
  app_file: app.py
9
  pinned: true
10
  license: mit
11
+ short_description: (ivan) Stunning images using stable diffusion.
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -25,55 +25,55 @@ from stablepy import (
25
  import urllib.parse
26
 
27
  preprocessor_controlnet = {
28
- "openpose": [
29
- "Openpose",
30
- "None",
31
- ],
32
- "scribble": [
33
- "HED",
34
- "Pidinet",
35
- "None",
36
- ],
37
- "softedge": [
38
- "Pidinet",
39
- "HED",
40
- "HED safe",
41
- "Pidinet safe",
42
- "None",
43
- ],
44
- "segmentation": [
45
- "UPerNet",
46
- "None",
47
- ],
48
- "depth": [
49
- "DPT",
50
- "Midas",
51
- "None",
52
- ],
53
- "normalbae": [
54
- "NormalBae",
55
- "None",
56
- ],
57
- "lineart": [
58
- "Lineart",
59
- "Lineart coarse",
60
- "Lineart (anime)",
61
- "None",
62
- "None (anime)",
63
- ],
64
- "shuffle": [
65
- "ContentShuffle",
66
- "None",
67
- ],
68
- "canny": [
69
- "Canny"
70
- ],
71
- "mlsd": [
72
- "MLSD"
73
- ],
74
- "ip2p": [
75
- "ip2p"
76
- ]
77
  }
78
 
79
  task_stablepy = {
@@ -106,7 +106,7 @@ task_model_list = list(task_stablepy.keys())
106
 
107
  def download_things(directory, url, hf_token="", civitai_api_key=""):
108
  url = url.strip()
109
-
110
  if "drive.google.com" in url:
111
  original_dir = os.getcwd()
112
  os.chdir(directory)
@@ -119,15 +119,18 @@ def download_things(directory, url, hf_token="", civitai_api_key=""):
119
  url = url.replace("/blob/", "/resolve/")
120
  user_header = f'"Authorization: Bearer {hf_token}"'
121
  if hf_token:
122
- os.system(f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
 
123
  else:
124
- os.system (f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
 
125
  elif "civitai.com" in url:
126
  if "?" in url:
127
  url = url.split("?")[0]
128
  if civitai_api_key:
129
  url = url + f"?token={civitai_api_key}"
130
- os.system(f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
 
131
  else:
132
  print("\033[91mYou need an API key to download Civitai models.\033[0m")
133
  else:
@@ -136,7 +139,7 @@ def download_things(directory, url, hf_token="", civitai_api_key=""):
136
 
137
  def get_model_list(directory_path):
138
  model_list = []
139
- valid_extensions = {'.ckpt' , '.pt', '.pth', '.safetensors', '.bin'}
140
 
141
  for filename in os.listdir(directory_path):
142
  if os.path.splitext(filename)[1] in valid_extensions:
@@ -228,7 +231,7 @@ download_embeds = [
228
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
229
  'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
230
  'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
231
- ]
232
 
233
  for url_embed in download_embeds:
234
  if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
@@ -243,13 +246,14 @@ lora_model_list.insert(0, "None")
243
  vae_model_list = get_model_list(directory_vaes)
244
  vae_model_list.insert(0, "None")
245
 
 
246
  def get_my_lora(link_url):
247
  for url in [url.strip() for url in link_url.split(',')]:
248
  if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
249
  download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
250
  new_lora_model_list = get_model_list(directory_loras)
251
  new_lora_model_list.insert(0, "None")
252
-
253
  return gr.update(
254
  choices=new_lora_model_list
255
  ), gr.update(
@@ -262,26 +266,27 @@ def get_my_lora(link_url):
262
  choices=new_lora_model_list
263
  ),
264
 
 
265
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
266
 
267
  upscaler_dict_gui = {
268
- None : None,
269
- "Lanczos" : "Lanczos",
270
- "Nearest" : "Nearest",
271
- "RealESRGAN_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
272
- "RealESRNet_x4plus" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
273
  "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
274
  "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
275
  "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
276
  "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
277
- "realesr-general-wdn-x4v3" : "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
278
- "4x-UltraSharp" : "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
279
- "4x_foolhardy_Remacri" : "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
280
- "Remacri4xExtraSmoother" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
281
- "AnimeSharp4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
282
- "lollypop" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
283
- "RealisticRescaler4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
284
- "NickelbackFS4x" : "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
285
  }
286
 
287
 
@@ -335,16 +340,21 @@ import IPython.display
335
  import time, json
336
  from IPython.utils import capture
337
  import logging
 
338
  logging.getLogger("diffusers").setLevel(logging.ERROR)
339
  import diffusers
 
340
  diffusers.utils.logging.set_verbosity(40)
341
  import warnings
 
342
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
343
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
344
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
345
  from stablepy import logger
 
346
  logger.setLevel(logging.DEBUG)
347
 
 
348
  def info_html(json_data, title, subtitle):
349
  return f"""
350
  <div style='padding: 0; border-radius: 10px;'>
@@ -356,10 +366,11 @@ def info_html(json_data, title, subtitle):
356
  </div>
357
  """
358
 
 
359
  class GuiSD:
360
  def __init__(self, stream=True):
361
  self.model = None
362
-
363
  print("Loading model...")
364
  self.model = Model_Diffusers(
365
  base_model_id="cagliostrolab/animagine-xl-3.1",
@@ -372,7 +383,7 @@ class GuiSD:
372
  def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
373
 
374
  yield f"Loading model: {model_name}"
375
-
376
  vae_model = vae_model if vae_model != "None" else None
377
 
378
  if model_name in model_list:
@@ -384,7 +395,6 @@ class GuiSD:
384
  if incompatible_vae:
385
  vae_model = None
386
 
387
-
388
  self.model.load_pipe(
389
  model_name,
390
  task_name=task_stablepy[task],
@@ -393,119 +403,118 @@ class GuiSD:
393
  retain_task_model_in_cache=False,
394
  )
395
  yield f"Model loaded: {model_name}"
396
-
397
  @spaces.GPU
398
  def generate_pipeline(
399
- self,
400
- prompt,
401
- neg_prompt,
402
- num_images,
403
- steps,
404
- cfg,
405
- clip_skip,
406
- seed,
407
- lora1,
408
- lora_scale1,
409
- lora2,
410
- lora_scale2,
411
- lora3,
412
- lora_scale3,
413
- lora4,
414
- lora_scale4,
415
- lora5,
416
- lora_scale5,
417
- sampler,
418
- img_height,
419
- img_width,
420
- model_name,
421
- vae_model,
422
- task,
423
- image_control,
424
- preprocessor_name,
425
- preprocess_resolution,
426
- image_resolution,
427
- style_prompt, # list []
428
- style_json_file,
429
- image_mask,
430
- strength,
431
- low_threshold,
432
- high_threshold,
433
- value_threshold,
434
- distance_threshold,
435
- controlnet_output_scaling_in_unet,
436
- controlnet_start_threshold,
437
- controlnet_stop_threshold,
438
- textual_inversion,
439
- syntax_weights,
440
- upscaler_model_path,
441
- upscaler_increases_size,
442
- esrgan_tile,
443
- esrgan_tile_overlap,
444
- hires_steps,
445
- hires_denoising_strength,
446
- hires_sampler,
447
- hires_prompt,
448
- hires_negative_prompt,
449
- hires_before_adetailer,
450
- hires_after_adetailer,
451
- loop_generation,
452
- leave_progress_bar,
453
- disable_progress_bar,
454
- image_previews,
455
- display_images,
456
- save_generated_images,
457
- image_storage_location,
458
- retain_compel_previous_load,
459
- retain_detailfix_model_previous_load,
460
- retain_hires_model_previous_load,
461
- t2i_adapter_preprocessor,
462
- t2i_adapter_conditioning_scale,
463
- t2i_adapter_conditioning_factor,
464
- xformers_memory_efficient_attention,
465
- freeu,
466
- generator_in_cpu,
467
- adetailer_inpaint_only,
468
- adetailer_verbose,
469
- adetailer_sampler,
470
- adetailer_active_a,
471
- prompt_ad_a,
472
- negative_prompt_ad_a,
473
- strength_ad_a,
474
- face_detector_ad_a,
475
- person_detector_ad_a,
476
- hand_detector_ad_a,
477
- mask_dilation_a,
478
- mask_blur_a,
479
- mask_padding_a,
480
- adetailer_active_b,
481
- prompt_ad_b,
482
- negative_prompt_ad_b,
483
- strength_ad_b,
484
- face_detector_ad_b,
485
- person_detector_ad_b,
486
- hand_detector_ad_b,
487
- mask_dilation_b,
488
- mask_blur_b,
489
- mask_padding_b,
490
- retain_task_cache_gui,
491
- image_ip1,
492
- mask_ip1,
493
- model_ip1,
494
- mode_ip1,
495
- scale_ip1,
496
- image_ip2,
497
- mask_ip2,
498
- model_ip2,
499
- mode_ip2,
500
- scale_ip2,
501
  ):
502
-
503
  vae_model = vae_model if vae_model != "None" else None
504
  loras_list = [lora1, lora2, lora3, lora4, lora5]
505
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
506
  msg_lora = []
507
 
508
-
509
  if model_name in model_list:
510
  model_is_xl = "xl" in model_name.lower()
511
  sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
@@ -514,8 +523,8 @@ class GuiSD:
514
 
515
  if incompatible_vae:
516
  msg_inc_vae = (
517
- f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
518
- f" are using a { model_type } model. The default VAE "
519
  "will be used."
520
  )
521
  gr.Info(msg_inc_vae)
@@ -527,7 +536,7 @@ class GuiSD:
527
  print(la)
528
  lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
529
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
530
- msg_inc_lora = f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }."
531
  gr.Info(msg_inc_lora)
532
  msg_lora.append(msg_inc_lora)
533
 
@@ -568,7 +577,10 @@ class GuiSD:
568
  )
569
 
570
  if task != "txt2img" and not image_control:
571
- raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
 
 
 
572
 
573
  if task == "inpaint" and not image_mask:
574
  raise ValueError("No mask image found: Specify one in 'Image Mask'")
@@ -602,31 +614,31 @@ class GuiSD:
602
  print("No Textual inversion for SDXL")
603
 
604
  adetailer_params_A = {
605
- "face_detector_ad" : face_detector_ad_a,
606
- "person_detector_ad" : person_detector_ad_a,
607
- "hand_detector_ad" : hand_detector_ad_a,
608
  "prompt": prompt_ad_a,
609
- "negative_prompt" : negative_prompt_ad_a,
610
- "strength" : strength_ad_a,
611
  # "image_list_task" : None,
612
- "mask_dilation" : mask_dilation_a,
613
- "mask_blur" : mask_blur_a,
614
- "mask_padding" : mask_padding_a,
615
- "inpaint_only" : adetailer_inpaint_only,
616
- "sampler" : adetailer_sampler,
617
  }
618
 
619
  adetailer_params_B = {
620
- "face_detector_ad" : face_detector_ad_b,
621
- "person_detector_ad" : person_detector_ad_b,
622
- "hand_detector_ad" : hand_detector_ad_b,
623
  "prompt": prompt_ad_b,
624
- "negative_prompt" : negative_prompt_ad_b,
625
- "strength" : strength_ad_b,
626
  # "image_list_task" : None,
627
- "mask_dilation" : mask_dilation_b,
628
- "mask_blur" : mask_blur_b,
629
- "mask_padding" : mask_padding_b,
630
  }
631
  pipe_params = {
632
  "prompt": prompt,
@@ -709,8 +721,9 @@ class GuiSD:
709
 
710
  random_number = random.randint(1, 100)
711
  if random_number < 25 and num_images < 3:
712
- if not upscaler_model and steps < 45 and task in ["txt2img", "img2img"] and not adetailer_active_a and not adetailer_active_b:
713
- num_images *=2
 
714
  pipe_params["num_images"] = num_images
715
  gr.Info("Num images x 2 🎉")
716
 
@@ -731,13 +744,15 @@ class GuiSD:
731
 
732
  sd_gen = GuiSD()
733
 
734
- CSS ="""
735
  .contain { display: flex; flex-direction: column; }
736
  #component-0 { height: 100%; }
737
  #gallery { flex-grow: 1; }
738
  """
739
- sdxl_task = [k for k, v in task_stablepy.items() if v in SDXL_TASKS ]
740
- sd_task = [k for k, v in task_stablepy.items() if v in SD15_TASKS ]
 
 
741
  def update_task_options(model_name, task_name):
742
  if model_name in model_list:
743
  if "xl" in model_name.lower():
@@ -754,7 +769,7 @@ def update_task_options(model_name, task_name):
754
 
755
 
756
  with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
757
- gr.Markdown("# 🧩 DiffuseCraft")
758
  gr.Markdown(
759
  f"""
760
  ### This demo uses [diffusers](https://github.com/huggingface/diffusers) to perform different tasks in image generation.
@@ -766,7 +781,8 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
766
  with gr.Column(scale=2):
767
 
768
  task_gui = gr.Dropdown(label="Task", choices=sdxl_task, value=task_model_list[0])
769
- model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0], allow_custom_value=True)
 
770
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
771
  neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt")
772
  with gr.Row(equal_height=False):
@@ -774,7 +790,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
774
  clear_prompt_gui = gr.Button(value="🗑️")
775
  set_random_seed = gr.Button(value="🎲")
776
  generate_button = gr.Button(value="GENERATE", variant="primary")
777
-
778
  model_name_gui.change(
779
  update_task_options,
780
  [model_name_gui, task_gui],
@@ -782,7 +798,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
782
  )
783
 
784
  load_model_gui = gr.HTML()
785
-
786
  result_images = gr.Gallery(
787
  label="Generated images",
788
  show_label=False,
@@ -797,7 +813,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
797
  )
798
 
799
  actual_task_info = gr.HTML()
800
-
801
  with gr.Column(scale=1):
802
  steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
803
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.5, label="CFG")
@@ -810,8 +826,6 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
810
  free_u_gui = gr.Checkbox(value=True, label="FreeU")
811
 
812
  with gr.Row(equal_height=False):
813
-
814
-
815
 
816
  def run_set_params_gui(base_prompt):
817
  valid_receptors = { # default values
@@ -822,7 +836,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
822
  "height": gr.update(value=1024),
823
  "Seed": gr.update(value=-1),
824
  "Sampler": gr.update(value="Euler a"),
825
- "scale": gr.update(value=7.5), # cfg
826
  "skip": gr.update(value=True),
827
  }
828
  valid_keys = list(valid_receptors.keys())
@@ -832,15 +846,15 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
832
  # print(val)
833
  if key in valid_keys:
834
  if key == "Sampler":
835
- if val not in scheduler_names:
836
- continue
837
  elif key == "skip":
838
- if int(val) >= 2:
839
- val = True
840
  if key == "prompt":
841
- if ">" in val and "<" in val:
842
- val = re.sub(r'<[^>]+>', '', val)
843
- print("Removed LoRA written in the prompt")
844
  if key in ["prompt", "neg_prompt"]:
845
  val = val.strip()
846
  if key in ["Steps", "width", "height", "Seed"]:
@@ -854,8 +868,9 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
854
  # print(valid_receptors)
855
  return [value for value in valid_receptors.values()]
856
 
 
857
  set_params_gui.click(
858
- run_set_params_gui, [prompt_gui],[
859
  prompt_gui,
860
  neg_prompt_gui,
861
  steps_gui,
@@ -867,16 +882,21 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
867
  clip_skip_gui,
868
  ],
869
  )
870
-
871
-
872
  def run_clear_prompt_gui():
873
  return gr.update(value=""), gr.update(value="")
 
 
874
  clear_prompt_gui.click(
875
  run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
876
  )
877
 
 
878
  def run_set_random_seed():
879
  return -1
 
 
880
  set_random_seed.click(
881
  run_set_random_seed, [], seed_gui
882
  )
@@ -890,22 +910,30 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
890
  ("Classic-ignore", "Classic-ignore"),
891
  ("None", "None"),
892
  ]
893
- prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options, value=prompt_s_options[0][1])
 
894
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list)
895
 
896
  with gr.Accordion("Hires fix", open=False, visible=True):
897
 
898
  upscaler_keys = list(upscaler_dict_gui.keys())
899
 
900
- upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=upscaler_keys, value=upscaler_keys[0])
901
- upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.4, label="Upscale by")
 
 
902
  esrgan_tile_gui = gr.Slider(minimum=0, value=100, maximum=500, step=1, label="ESRGAN Tile")
903
- esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=10, label="ESRGAN Tile Overlap")
 
904
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
905
- hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55, label="Hires Denoising Strength")
906
- hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=["Use same sampler"] + scheduler_names[:-1], value="Use same sampler")
 
 
 
907
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
908
- hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
 
909
 
910
  with gr.Accordion("LoRA", open=False, visible=True):
911
  lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
@@ -928,7 +956,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
928
  [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui]
929
  )
930
 
931
- with gr.Accordion("IP-Adapter", open=False, visible=True):##############
932
 
933
  IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
934
  MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
@@ -953,8 +981,11 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
953
  minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
954
  info="This option adjusts the level of changes for img2img and inpainting."
955
  )
956
- image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution")
957
- preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=preprocessor_controlnet["canny"])
 
 
 
958
 
959
  def change_preprocessor_choices(task):
960
  task = task_stablepy[task]
@@ -964,24 +995,35 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
964
  choices_task = preprocessor_controlnet["canny"]
965
  return gr.update(choices=choices_task, value=choices_task[0])
966
 
 
967
  task_gui.change(
968
  change_preprocessor_choices,
969
  [task_gui],
970
  [preprocessor_name_gui],
971
  )
972
- preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512, label="Preprocess Resolution")
973
- low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100, label="Canny low threshold")
974
- high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200, label="Canny high threshold")
975
- value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1, label="Hough value threshold (MLSD)")
976
- distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1, label="Hough distance threshold (MLSD)")
977
- control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
978
- control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
979
- control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
 
 
 
 
 
 
 
 
980
 
981
  with gr.Accordion("T2I adapter", open=False, visible=True):
982
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
983
- adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
984
- adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
 
 
985
 
986
  with gr.Accordion("Styles", open=False, visible=True):
987
 
@@ -1000,6 +1042,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1000
  style_json_gui = gr.File(label="Style JSON File")
1001
  style_button = gr.Button("Load styles")
1002
 
 
1003
  def load_json_style_file(json):
1004
  if not sd_gen.model:
1005
  gr.Info("First load the model")
@@ -1009,7 +1052,8 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1009
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1010
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1011
 
1012
- style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
 
1013
 
1014
  with gr.Accordion("Textual inversion", open=False, visible=False):
1015
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
@@ -1024,14 +1068,18 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1024
 
1025
  # Adetailer Sampler
1026
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
1027
- adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=adetailer_sampler_options, value="Use same sampler")
 
1028
 
1029
  with gr.Accordion("Detailfix A", open=False, visible=True):
1030
  # Adetailer A
1031
  adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
1032
- prompt_ad_a_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
1033
- negative_prompt_ad_a_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
1034
- strength_ad_a_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
 
 
 
1035
  face_detector_ad_a_gui = gr.Checkbox(label="Face detector", value=True)
1036
  person_detector_ad_a_gui = gr.Checkbox(label="Person detector", value=True)
1037
  hand_detector_ad_a_gui = gr.Checkbox(label="Hand detector", value=False)
@@ -1042,9 +1090,12 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1042
  with gr.Accordion("Detailfix B", open=False, visible=True):
1043
  # Adetailer B
1044
  adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
1045
- prompt_ad_b_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use", lines=3)
1046
- negative_prompt_ad_b_gui = gr.Textbox(label="Negative prompt", placeholder="Main negative prompt will be use", lines=3)
1047
- strength_ad_b_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01, maximum=1.0)
 
 
 
1048
  face_detector_ad_b_gui = gr.Checkbox(label="Face detector", value=True)
1049
  person_detector_ad_b_gui = gr.Checkbox(label="Person detector", value=True)
1050
  hand_detector_ad_b_gui = gr.Checkbox(label="Hand detector", value=False)
@@ -1067,9 +1118,12 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1067
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
1068
  image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1069
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1070
- retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Detailfix Model Previous Load")
1071
- retain_hires_model_previous_load_gui = gr.Checkbox(value=False, label="Retain Hires Model Previous Load")
1072
- xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
 
 
 
1073
 
1074
  with gr.Accordion("Examples and help", open=False, visible=True):
1075
  gr.Markdown(
@@ -1106,7 +1160,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1106
  None,
1107
  1.0,
1108
  None,
1109
- 1.0,
1110
  None,
1111
  1.0,
1112
  None,
@@ -1115,24 +1169,24 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1115
  1152,
1116
  896,
1117
  "cagliostrolab/animagine-xl-3.1",
1118
- None, # vae
1119
  "txt2img",
1120
- None, # img conttol
1121
- "Canny", # preprocessor
1122
- 512, # preproc resolution
1123
- 1024, # img resolution
1124
- None, # Style prompt
1125
- None, # Style json
1126
- None, # img Mask
1127
- 0.35, # strength
1128
- 100, # low th canny
1129
- 200, # high th canny
1130
- 0.1, # value mstd
1131
- 0.1, # distance mstd
1132
- 1.0, # cn scale
1133
- 0., # cn start
1134
- 1., # cn end
1135
- False, # ti
1136
  "Classic",
1137
  "Nearest",
1138
  ],
@@ -1149,7 +1203,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1149
  None,
1150
  1.0,
1151
  None,
1152
- 1.0,
1153
  None,
1154
  1.0,
1155
  None,
@@ -1158,24 +1212,24 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1158
  1024,
1159
  1024,
1160
  "kitty7779/ponyDiffusionV6XL",
1161
- None, # vae
1162
  "txt2img",
1163
- None, # img conttol
1164
- "Canny", # preprocessor
1165
- 512, # preproc resolution
1166
- 1024, # img resolution
1167
- None, # Style prompt
1168
- None, # Style json
1169
- None, # img Mask
1170
- 0.35, # strength
1171
- 100, # low th canny
1172
- 200, # high th canny
1173
- 0.1, # value mstd
1174
- 0.1, # distance mstd
1175
- 1.0, # cn scale
1176
- 0., # cn start
1177
- 1., # cn end
1178
- False, # ti
1179
  "Classic",
1180
  "Nearest",
1181
  ],
@@ -1192,7 +1246,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1192
  None,
1193
  1.0,
1194
  None,
1195
- 1.0,
1196
  None,
1197
  1.0,
1198
  None,
@@ -1201,24 +1255,24 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1201
  1024,
1202
  1024,
1203
  "misri/epicrealismXL_v7FinalDestination",
1204
- None, # vae
1205
  "canny ControlNet",
1206
- "image.webp", # img conttol
1207
- "Canny", # preprocessor
1208
- 1024, # preproc resolution
1209
- 1024, # img resolution
1210
- None, # Style prompt
1211
- None, # Style json
1212
- None, # img Mask
1213
- 0.35, # strength
1214
- 100, # low th canny
1215
- 200, # high th canny
1216
- 0.1, # value mstd
1217
- 0.1, # distance mstd
1218
- 1.0, # cn scale
1219
- 0., # cn start
1220
- 1., # cn end
1221
- False, # ti
1222
  "Classic",
1223
  None,
1224
  ],
@@ -1235,7 +1289,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1235
  None,
1236
  1.0,
1237
  None,
1238
- 1.0,
1239
  None,
1240
  1.0,
1241
  None,
@@ -1244,24 +1298,24 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1244
  1024,
1245
  1024,
1246
  "misri/juggernautXL_juggernautX",
1247
- None, # vae
1248
  "optical pattern ControlNet",
1249
- "spiral_no_transparent.png", # img conttol
1250
- "Canny", # preprocessor
1251
- 512, # preproc resolution
1252
- 1024, # img resolution
1253
- None, # Style prompt
1254
- None, # Style json
1255
- None, # img Mask
1256
- 0.35, # strength
1257
- 100, # low th canny
1258
- 200, # high th canny
1259
- 0.1, # value mstd
1260
- 0.1, # distance mstd
1261
- 1.0, # cn scale
1262
- 0.05, # cn start
1263
- 0.75, # cn end
1264
- False, # ti
1265
  "Classic",
1266
  None,
1267
  ],
@@ -1278,7 +1332,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1278
  None,
1279
  1.0,
1280
  None,
1281
- 1.0,
1282
  None,
1283
  1.0,
1284
  None,
@@ -1287,24 +1341,24 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1287
  1024,
1288
  1024,
1289
  "cagliostrolab/animagine-xl-3.1",
1290
- None, # vae
1291
  "lineart ControlNet",
1292
- "color_image.png", # img conttol
1293
- "Lineart", # preprocessor
1294
- 512, # preproc resolution
1295
- 896, # img resolution
1296
- None, # Style prompt
1297
- None, # Style json
1298
- None, # img Mask
1299
- 0.35, # strength
1300
- 100, # low th canny
1301
- 200, # high th canny
1302
- 0.1, # value mstd
1303
- 0.1, # distance mstd
1304
- 1.0, # cn scale
1305
- 0., # cn start
1306
- 1., # cn end
1307
- False, # ti
1308
  "Compel",
1309
  None,
1310
  ],
@@ -1321,7 +1375,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1321
  None,
1322
  1.0,
1323
  None,
1324
- 1.0,
1325
  None,
1326
  1.0,
1327
  None,
@@ -1330,24 +1384,24 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1330
  512,
1331
  512,
1332
  "digiplay/majicMIX_realistic_v7",
1333
- None, # vae
1334
  "openpose ControlNet",
1335
- "image.webp", # img conttol
1336
- "Canny", # preprocessor
1337
- 512, # preproc resolution
1338
- 1024, # img resolution
1339
- None, # Style prompt
1340
- None, # Style json
1341
- None, # img Mask
1342
- 0.35, # strength
1343
- 100, # low th canny
1344
- 200, # high th canny
1345
- 0.1, # value mstd
1346
- 0.1, # distance mstd
1347
- 1.0, # cn scale
1348
- 0., # cn start
1349
- 0.9, # cn end
1350
- False, # ti
1351
  "Compel",
1352
  "Nearest",
1353
  ],
@@ -1402,7 +1456,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1402
 
1403
  with gr.Tab("Inpaint mask maker", render=True):
1404
 
1405
- def create_mask_now(img, invert):
1406
  import numpy as np
1407
  import time
1408
 
@@ -1429,6 +1483,7 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1429
 
1430
  return img["background"], rgb_mask
1431
 
 
1432
  with gr.Row():
1433
  with gr.Column(scale=2):
1434
  # image_base = gr.ImageEditor(label="Base image", show_label=True, brush=gr.Brush(colors=["#000000"]))
@@ -1438,15 +1493,15 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1438
  # enable crop (or disable it)
1439
  # transforms=["crop"],
1440
  brush=gr.Brush(
1441
- default_size="16", # or leave it as 'auto'
1442
- color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1443
- # default_color="black", # html names are supported
1444
- colors=[
1445
- "rgba(0, 0, 0, 1)", # rgb(a)
1446
- "rgba(0, 0, 0, 0.1)",
1447
- "rgba(255, 255, 255, 0.1)",
1448
- # "hsl(360, 120, 120)" # in fact any valid colorstring
1449
- ]
1450
  ),
1451
  eraser=gr.Eraser(default_size="16")
1452
  )
@@ -1459,10 +1514,13 @@ with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
1459
 
1460
  btn.click(create_mask_now, [image_base, invert_mask], [img_source, img_result])
1461
 
 
1462
  def send_img(img_source, img_result):
1463
  return img_source, img_result
 
 
1464
  btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
1465
-
1466
  generate_button.click(
1467
  fn=sd_gen.load_new_model,
1468
  inputs=[
 
25
  import urllib.parse
26
 
27
  preprocessor_controlnet = {
28
+ "openpose": [
29
+ "Openpose",
30
+ "None",
31
+ ],
32
+ "scribble": [
33
+ "HED",
34
+ "Pidinet",
35
+ "None",
36
+ ],
37
+ "softedge": [
38
+ "Pidinet",
39
+ "HED",
40
+ "HED safe",
41
+ "Pidinet safe",
42
+ "None",
43
+ ],
44
+ "segmentation": [
45
+ "UPerNet",
46
+ "None",
47
+ ],
48
+ "depth": [
49
+ "DPT",
50
+ "Midas",
51
+ "None",
52
+ ],
53
+ "normalbae": [
54
+ "NormalBae",
55
+ "None",
56
+ ],
57
+ "lineart": [
58
+ "Lineart",
59
+ "Lineart coarse",
60
+ "Lineart (anime)",
61
+ "None",
62
+ "None (anime)",
63
+ ],
64
+ "shuffle": [
65
+ "ContentShuffle",
66
+ "None",
67
+ ],
68
+ "canny": [
69
+ "Canny"
70
+ ],
71
+ "mlsd": [
72
+ "MLSD"
73
+ ],
74
+ "ip2p": [
75
+ "ip2p"
76
+ ]
77
  }
78
 
79
  task_stablepy = {
 
106
 
107
  def download_things(directory, url, hf_token="", civitai_api_key=""):
108
  url = url.strip()
109
+
110
  if "drive.google.com" in url:
111
  original_dir = os.getcwd()
112
  os.chdir(directory)
 
119
  url = url.replace("/blob/", "/resolve/")
120
  user_header = f'"Authorization: Bearer {hf_token}"'
121
  if hf_token:
122
+ os.system(
123
+ f"aria2c --console-log-level=error --summary-interval=10 --header={user_header} -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
124
  else:
125
+ os.system(
126
+ f"aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 {url} -d {directory} -o {url.split('/')[-1]}")
127
  elif "civitai.com" in url:
128
  if "?" in url:
129
  url = url.split("?")[0]
130
  if civitai_api_key:
131
  url = url + f"?token={civitai_api_key}"
132
+ os.system(
133
+ f"aria2c --console-log-level=error --summary-interval=10 -c -x 16 -k 1M -s 16 -d {directory} {url}")
134
  else:
135
  print("\033[91mYou need an API key to download Civitai models.\033[0m")
136
  else:
 
139
 
140
  def get_model_list(directory_path):
141
  model_list = []
142
+ valid_extensions = {'.ckpt', '.pt', '.pth', '.safetensors', '.bin'}
143
 
144
  for filename in os.listdir(directory_path):
145
  if os.path.splitext(filename)[1] in valid_extensions:
 
231
  'https://huggingface.co/datasets/Nerfgun3/bad_prompt/blob/main/bad_prompt_version2.pt',
232
  'https://huggingface.co/embed/negative/resolve/main/EasyNegativeV2.safetensors',
233
  'https://huggingface.co/embed/negative/resolve/main/bad-hands-5.pt',
234
+ ]
235
 
236
  for url_embed in download_embeds:
237
  if not os.path.exists(f"./embedings/{url_embed.split('/')[-1]}"):
 
246
  vae_model_list = get_model_list(directory_vaes)
247
  vae_model_list.insert(0, "None")
248
 
249
+
250
  def get_my_lora(link_url):
251
  for url in [url.strip() for url in link_url.split(',')]:
252
  if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
253
  download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
254
  new_lora_model_list = get_model_list(directory_loras)
255
  new_lora_model_list.insert(0, "None")
256
+
257
  return gr.update(
258
  choices=new_lora_model_list
259
  ), gr.update(
 
266
  choices=new_lora_model_list
267
  ),
268
 
269
+
270
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
271
 
272
  upscaler_dict_gui = {
273
+ None: None,
274
+ "Lanczos": "Lanczos",
275
+ "Nearest": "Nearest",
276
+ "RealESRGAN_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
277
+ "RealESRNet_x4plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
278
  "RealESRGAN_x4plus_anime_6B": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
279
  "RealESRGAN_x2plus": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
280
  "realesr-animevideov3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
281
  "realesr-general-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
282
+ "realesr-general-wdn-x4v3": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth",
283
+ "4x-UltraSharp": "https://huggingface.co/Shandypur/ESRGAN-4x-UltraSharp/resolve/main/4x-UltraSharp.pth",
284
+ "4x_foolhardy_Remacri": "https://huggingface.co/FacehugmanIII/4x_foolhardy_Remacri/resolve/main/4x_foolhardy_Remacri.pth",
285
+ "Remacri4xExtraSmoother": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/Remacri%204x%20ExtraSmoother.pth",
286
+ "AnimeSharp4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/AnimeSharp%204x.pth",
287
+ "lollypop": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/lollypop.pth",
288
+ "RealisticRescaler4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/RealisticRescaler%204x.pth",
289
+ "NickelbackFS4x": "https://huggingface.co/hollowstrawberry/upscalers-backup/resolve/main/ESRGAN/NickelbackFS%204x.pth"
290
  }
291
 
292
 
 
340
  import time, json
341
  from IPython.utils import capture
342
  import logging
343
+
344
  logging.getLogger("diffusers").setLevel(logging.ERROR)
345
  import diffusers
346
+
347
  diffusers.utils.logging.set_verbosity(40)
348
  import warnings
349
+
350
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="diffusers")
351
  warnings.filterwarnings(action="ignore", category=UserWarning, module="diffusers")
352
  warnings.filterwarnings(action="ignore", category=FutureWarning, module="transformers")
353
  from stablepy import logger
354
+
355
  logger.setLevel(logging.DEBUG)
356
 
357
+
358
  def info_html(json_data, title, subtitle):
359
  return f"""
360
  <div style='padding: 0; border-radius: 10px;'>
 
366
  </div>
367
  """
368
 
369
+
370
  class GuiSD:
371
  def __init__(self, stream=True):
372
  self.model = None
373
+
374
  print("Loading model...")
375
  self.model = Model_Diffusers(
376
  base_model_id="cagliostrolab/animagine-xl-3.1",
 
383
  def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
384
 
385
  yield f"Loading model: {model_name}"
386
+
387
  vae_model = vae_model if vae_model != "None" else None
388
 
389
  if model_name in model_list:
 
395
  if incompatible_vae:
396
  vae_model = None
397
 
 
398
  self.model.load_pipe(
399
  model_name,
400
  task_name=task_stablepy[task],
 
403
  retain_task_model_in_cache=False,
404
  )
405
  yield f"Model loaded: {model_name}"
406
+
407
  @spaces.GPU
408
  def generate_pipeline(
409
+ self,
410
+ prompt,
411
+ neg_prompt,
412
+ num_images,
413
+ steps,
414
+ cfg,
415
+ clip_skip,
416
+ seed,
417
+ lora1,
418
+ lora_scale1,
419
+ lora2,
420
+ lora_scale2,
421
+ lora3,
422
+ lora_scale3,
423
+ lora4,
424
+ lora_scale4,
425
+ lora5,
426
+ lora_scale5,
427
+ sampler,
428
+ img_height,
429
+ img_width,
430
+ model_name,
431
+ vae_model,
432
+ task,
433
+ image_control,
434
+ preprocessor_name,
435
+ preprocess_resolution,
436
+ image_resolution,
437
+ style_prompt, # list []
438
+ style_json_file,
439
+ image_mask,
440
+ strength,
441
+ low_threshold,
442
+ high_threshold,
443
+ value_threshold,
444
+ distance_threshold,
445
+ controlnet_output_scaling_in_unet,
446
+ controlnet_start_threshold,
447
+ controlnet_stop_threshold,
448
+ textual_inversion,
449
+ syntax_weights,
450
+ upscaler_model_path,
451
+ upscaler_increases_size,
452
+ esrgan_tile,
453
+ esrgan_tile_overlap,
454
+ hires_steps,
455
+ hires_denoising_strength,
456
+ hires_sampler,
457
+ hires_prompt,
458
+ hires_negative_prompt,
459
+ hires_before_adetailer,
460
+ hires_after_adetailer,
461
+ loop_generation,
462
+ leave_progress_bar,
463
+ disable_progress_bar,
464
+ image_previews,
465
+ display_images,
466
+ save_generated_images,
467
+ image_storage_location,
468
+ retain_compel_previous_load,
469
+ retain_detailfix_model_previous_load,
470
+ retain_hires_model_previous_load,
471
+ t2i_adapter_preprocessor,
472
+ t2i_adapter_conditioning_scale,
473
+ t2i_adapter_conditioning_factor,
474
+ xformers_memory_efficient_attention,
475
+ freeu,
476
+ generator_in_cpu,
477
+ adetailer_inpaint_only,
478
+ adetailer_verbose,
479
+ adetailer_sampler,
480
+ adetailer_active_a,
481
+ prompt_ad_a,
482
+ negative_prompt_ad_a,
483
+ strength_ad_a,
484
+ face_detector_ad_a,
485
+ person_detector_ad_a,
486
+ hand_detector_ad_a,
487
+ mask_dilation_a,
488
+ mask_blur_a,
489
+ mask_padding_a,
490
+ adetailer_active_b,
491
+ prompt_ad_b,
492
+ negative_prompt_ad_b,
493
+ strength_ad_b,
494
+ face_detector_ad_b,
495
+ person_detector_ad_b,
496
+ hand_detector_ad_b,
497
+ mask_dilation_b,
498
+ mask_blur_b,
499
+ mask_padding_b,
500
+ retain_task_cache_gui,
501
+ image_ip1,
502
+ mask_ip1,
503
+ model_ip1,
504
+ mode_ip1,
505
+ scale_ip1,
506
+ image_ip2,
507
+ mask_ip2,
508
+ model_ip2,
509
+ mode_ip2,
510
+ scale_ip2,
511
  ):
512
+
513
  vae_model = vae_model if vae_model != "None" else None
514
  loras_list = [lora1, lora2, lora3, lora4, lora5]
515
  vae_msg = f"VAE: {vae_model}" if vae_model else ""
516
  msg_lora = []
517
 
 
518
  if model_name in model_list:
519
  model_is_xl = "xl" in model_name.lower()
520
  sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
 
523
 
524
  if incompatible_vae:
525
  msg_inc_vae = (
526
+ f"The selected VAE is for a {'SD 1.5' if model_is_xl else 'SDXL'} model, but you"
527
+ f" are using a {model_type} model. The default VAE "
528
  "will be used."
529
  )
530
  gr.Info(msg_inc_vae)
 
536
  print(la)
537
  lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
538
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
539
+ msg_inc_lora = f"The LoRA {la} is for {'SD 1.5' if model_is_xl else 'SDXL'}, but you are using {model_type}."
540
  gr.Info(msg_inc_lora)
541
  msg_lora.append(msg_inc_lora)
542
 
 
577
  )
578
 
579
  if task != "txt2img" and not image_control:
580
+ raise ValueError(
581
+ "No control image found: To use this function, "
582
+ "you have to upload an image in 'Image ControlNet/Inpaint/Img2img'"
583
+ )
584
 
585
  if task == "inpaint" and not image_mask:
586
  raise ValueError("No mask image found: Specify one in 'Image Mask'")
 
614
  print("No Textual inversion for SDXL")
615
 
616
  adetailer_params_A = {
617
+ "face_detector_ad": face_detector_ad_a,
618
+ "person_detector_ad": person_detector_ad_a,
619
+ "hand_detector_ad": hand_detector_ad_a,
620
  "prompt": prompt_ad_a,
621
+ "negative_prompt": negative_prompt_ad_a,
622
+ "strength": strength_ad_a,
623
  # "image_list_task" : None,
624
+ "mask_dilation": mask_dilation_a,
625
+ "mask_blur": mask_blur_a,
626
+ "mask_padding": mask_padding_a,
627
+ "inpaint_only": adetailer_inpaint_only,
628
+ "sampler": adetailer_sampler,
629
  }
630
 
631
  adetailer_params_B = {
632
+ "face_detector_ad": face_detector_ad_b,
633
+ "person_detector_ad": person_detector_ad_b,
634
+ "hand_detector_ad": hand_detector_ad_b,
635
  "prompt": prompt_ad_b,
636
+ "negative_prompt": negative_prompt_ad_b,
637
+ "strength": strength_ad_b,
638
  # "image_list_task" : None,
639
+ "mask_dilation": mask_dilation_b,
640
+ "mask_blur": mask_blur_b,
641
+ "mask_padding": mask_padding_b,
642
  }
643
  pipe_params = {
644
  "prompt": prompt,
 
721
 
722
  random_number = random.randint(1, 100)
723
  if random_number < 25 and num_images < 3:
724
+ if not upscaler_model and steps < 45 and task in ["txt2img",
725
+ "img2img"] and not adetailer_active_a and not adetailer_active_b:
726
+ num_images *= 2
727
  pipe_params["num_images"] = num_images
728
  gr.Info("Num images x 2 🎉")
729
 
 
744
 
745
  sd_gen = GuiSD()
746
 
747
+ CSS = """
748
  .contain { display: flex; flex-direction: column; }
749
  #component-0 { height: 100%; }
750
  #gallery { flex-grow: 1; }
751
  """
752
+ sdxl_task = [k for k, v in task_stablepy.items() if v in SDXL_TASKS]
753
+ sd_task = [k for k, v in task_stablepy.items() if v in SD15_TASKS]
754
+
755
+
756
  def update_task_options(model_name, task_name):
757
  if model_name in model_list:
758
  if "xl" in model_name.lower():
 
769
 
770
 
771
  with gr.Blocks(theme="NoCrypt/miku", css=CSS) as app:
772
+ gr.Markdown("# 🧩 (Ivan) DiffuseCraft")
773
  gr.Markdown(
774
  f"""
775
  ### This demo uses [diffusers](https://github.com/huggingface/diffusers) to perform different tasks in image generation.
 
781
  with gr.Column(scale=2):
782
 
783
  task_gui = gr.Dropdown(label="Task", choices=sdxl_task, value=task_model_list[0])
784
+ model_name_gui = gr.Dropdown(label="Model", choices=model_list, value=model_list[0],
785
+ allow_custom_value=True)
786
  prompt_gui = gr.Textbox(lines=5, placeholder="Enter prompt", label="Prompt")
787
  neg_prompt_gui = gr.Textbox(lines=3, placeholder="Enter Neg prompt", label="Negative prompt")
788
  with gr.Row(equal_height=False):
 
790
  clear_prompt_gui = gr.Button(value="🗑️")
791
  set_random_seed = gr.Button(value="🎲")
792
  generate_button = gr.Button(value="GENERATE", variant="primary")
793
+
794
  model_name_gui.change(
795
  update_task_options,
796
  [model_name_gui, task_gui],
 
798
  )
799
 
800
  load_model_gui = gr.HTML()
801
+
802
  result_images = gr.Gallery(
803
  label="Generated images",
804
  show_label=False,
 
813
  )
814
 
815
  actual_task_info = gr.HTML()
816
+
817
  with gr.Column(scale=1):
818
  steps_gui = gr.Slider(minimum=1, maximum=100, step=1, value=30, label="Steps")
819
  cfg_gui = gr.Slider(minimum=0, maximum=30, step=0.5, value=7.5, label="CFG")
 
826
  free_u_gui = gr.Checkbox(value=True, label="FreeU")
827
 
828
  with gr.Row(equal_height=False):
 
 
829
 
830
  def run_set_params_gui(base_prompt):
831
  valid_receptors = { # default values
 
836
  "height": gr.update(value=1024),
837
  "Seed": gr.update(value=-1),
838
  "Sampler": gr.update(value="Euler a"),
839
+ "scale": gr.update(value=7.5), # cfg
840
  "skip": gr.update(value=True),
841
  }
842
  valid_keys = list(valid_receptors.keys())
 
846
  # print(val)
847
  if key in valid_keys:
848
  if key == "Sampler":
849
+ if val not in scheduler_names:
850
+ continue
851
  elif key == "skip":
852
+ if int(val) >= 2:
853
+ val = True
854
  if key == "prompt":
855
+ if ">" in val and "<" in val:
856
+ val = re.sub(r'<[^>]+>', '', val)
857
+ print("Removed LoRA written in the prompt")
858
  if key in ["prompt", "neg_prompt"]:
859
  val = val.strip()
860
  if key in ["Steps", "width", "height", "Seed"]:
 
868
  # print(valid_receptors)
869
  return [value for value in valid_receptors.values()]
870
 
871
+
872
  set_params_gui.click(
873
+ run_set_params_gui, [prompt_gui], [
874
  prompt_gui,
875
  neg_prompt_gui,
876
  steps_gui,
 
882
  clip_skip_gui,
883
  ],
884
  )
885
+
886
+
887
  def run_clear_prompt_gui():
888
  return gr.update(value=""), gr.update(value="")
889
+
890
+
891
  clear_prompt_gui.click(
892
  run_clear_prompt_gui, [], [prompt_gui, neg_prompt_gui]
893
  )
894
 
895
+
896
  def run_set_random_seed():
897
  return -1
898
+
899
+
900
  set_random_seed.click(
901
  run_set_random_seed, [], seed_gui
902
  )
 
910
  ("Classic-ignore", "Classic-ignore"),
911
  ("None", "None"),
912
  ]
913
+ prompt_syntax_gui = gr.Dropdown(label="Prompt Syntax", choices=prompt_s_options,
914
+ value=prompt_s_options[0][1])
915
  vae_model_gui = gr.Dropdown(label="VAE Model", choices=vae_model_list)
916
 
917
  with gr.Accordion("Hires fix", open=False, visible=True):
918
 
919
  upscaler_keys = list(upscaler_dict_gui.keys())
920
 
921
+ upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=upscaler_keys,
922
+ value=upscaler_keys[0])
923
+ upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.4,
924
+ label="Upscale by")
925
  esrgan_tile_gui = gr.Slider(minimum=0, value=100, maximum=500, step=1, label="ESRGAN Tile")
926
+ esrgan_tile_overlap_gui = gr.Slider(minimum=1, maximum=200, step=1, value=10,
927
+ label="ESRGAN Tile Overlap")
928
  hires_steps_gui = gr.Slider(minimum=0, value=30, maximum=100, step=1, label="Hires Steps")
929
+ hires_denoising_strength_gui = gr.Slider(minimum=0.1, maximum=1.0, step=0.01, value=0.55,
930
+ label="Hires Denoising Strength")
931
+ hires_sampler_gui = gr.Dropdown(label="Hires Sampler",
932
+ choices=["Use same sampler"] + scheduler_names[:-1],
933
+ value="Use same sampler")
934
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
935
+ hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt",
936
+ placeholder="Main negative prompt will be use", lines=3)
937
 
938
  with gr.Accordion("LoRA", open=False, visible=True):
939
  lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
 
956
  [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui]
957
  )
958
 
959
+ with gr.Accordion("IP-Adapter", open=False, visible=True): ##############
960
 
961
  IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
962
  MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
 
981
  minimum=0.01, maximum=1.0, step=0.01, value=0.55, label="Strength",
982
  info="This option adjusts the level of changes for img2img and inpainting."
983
  )
984
+ image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024,
985
+ label="Image Resolution")
986
+ preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name",
987
+ choices=preprocessor_controlnet["canny"])
988
+
989
 
990
  def change_preprocessor_choices(task):
991
  task = task_stablepy[task]
 
995
  choices_task = preprocessor_controlnet["canny"]
996
  return gr.update(choices=choices_task, value=choices_task[0])
997
 
998
+
999
  task_gui.change(
1000
  change_preprocessor_choices,
1001
  [task_gui],
1002
  [preprocessor_name_gui],
1003
  )
1004
+ preprocess_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=512,
1005
+ label="Preprocess Resolution")
1006
+ low_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=100,
1007
+ label="Canny low threshold")
1008
+ high_threshold_gui = gr.Slider(minimum=1, maximum=255, step=1, value=200,
1009
+ label="Canny high threshold")
1010
+ value_threshold_gui = gr.Slider(minimum=1, maximum=2.0, step=0.01, value=0.1,
1011
+ label="Hough value threshold (MLSD)")
1012
+ distance_threshold_gui = gr.Slider(minimum=1, maximum=20.0, step=0.01, value=0.1,
1013
+ label="Hough distance threshold (MLSD)")
1014
+ control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1,
1015
+ label="ControlNet Output Scaling in UNet")
1016
+ control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0,
1017
+ label="ControlNet Start Threshold (%)")
1018
+ control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1,
1019
+ label="ControlNet Stop Threshold (%)")
1020
 
1021
  with gr.Accordion("T2I adapter", open=False, visible=True):
1022
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
1023
+ adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1,
1024
+ label="Adapter Conditioning Scale")
1025
+ adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55,
1026
+ label="Adapter Conditioning Factor (%)")
1027
 
1028
  with gr.Accordion("Styles", open=False, visible=True):
1029
 
 
1042
  style_json_gui = gr.File(label="Style JSON File")
1043
  style_button = gr.Button("Load styles")
1044
 
1045
+
1046
  def load_json_style_file(json):
1047
  if not sd_gen.model:
1048
  gr.Info("First load the model")
 
1052
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1053
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1054
 
1055
+
1056
+ style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1057
 
1058
  with gr.Accordion("Textual inversion", open=False, visible=False):
1059
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
 
1068
 
1069
  # Adetailer Sampler
1070
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
1071
+ adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=adetailer_sampler_options,
1072
+ value="Use same sampler")
1073
 
1074
  with gr.Accordion("Detailfix A", open=False, visible=True):
1075
  # Adetailer A
1076
  adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
1077
+ prompt_ad_a_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use",
1078
+ lines=3)
1079
+ negative_prompt_ad_a_gui = gr.Textbox(label="Negative prompt",
1080
+ placeholder="Main negative prompt will be use", lines=3)
1081
+ strength_ad_a_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01,
1082
+ maximum=1.0)
1083
  face_detector_ad_a_gui = gr.Checkbox(label="Face detector", value=True)
1084
  person_detector_ad_a_gui = gr.Checkbox(label="Person detector", value=True)
1085
  hand_detector_ad_a_gui = gr.Checkbox(label="Hand detector", value=False)
 
1090
  with gr.Accordion("Detailfix B", open=False, visible=True):
1091
  # Adetailer B
1092
  adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
1093
+ prompt_ad_b_gui = gr.Textbox(label="Main prompt", placeholder="Main prompt will be use",
1094
+ lines=3)
1095
+ negative_prompt_ad_b_gui = gr.Textbox(label="Negative prompt",
1096
+ placeholder="Main negative prompt will be use", lines=3)
1097
+ strength_ad_b_gui = gr.Number(label="Strength:", value=0.35, step=0.01, minimum=0.01,
1098
+ maximum=1.0)
1099
  face_detector_ad_b_gui = gr.Checkbox(label="Face detector", value=True)
1100
  person_detector_ad_b_gui = gr.Checkbox(label="Person detector", value=True)
1101
  hand_detector_ad_b_gui = gr.Checkbox(label="Hand detector", value=False)
 
1118
  save_generated_images_gui = gr.Checkbox(value=False, label="Save Generated Images")
1119
  image_storage_location_gui = gr.Textbox(value="./images", label="Image Storage Location")
1120
  retain_compel_previous_load_gui = gr.Checkbox(value=False, label="Retain Compel Previous Load")
1121
+ retain_detailfix_model_previous_load_gui = gr.Checkbox(value=False,
1122
+ label="Retain Detailfix Model Previous Load")
1123
+ retain_hires_model_previous_load_gui = gr.Checkbox(value=False,
1124
+ label="Retain Hires Model Previous Load")
1125
+ xformers_memory_efficient_attention_gui = gr.Checkbox(value=False,
1126
+ label="Xformers Memory Efficient Attention")
1127
 
1128
  with gr.Accordion("Examples and help", open=False, visible=True):
1129
  gr.Markdown(
 
1160
  None,
1161
  1.0,
1162
  None,
1163
+ 1.0,
1164
  None,
1165
  1.0,
1166
  None,
 
1169
  1152,
1170
  896,
1171
  "cagliostrolab/animagine-xl-3.1",
1172
+ None, # vae
1173
  "txt2img",
1174
+ None, # img conttol
1175
+ "Canny", # preprocessor
1176
+ 512, # preproc resolution
1177
+ 1024, # img resolution
1178
+ None, # Style prompt
1179
+ None, # Style json
1180
+ None, # img Mask
1181
+ 0.35, # strength
1182
+ 100, # low th canny
1183
+ 200, # high th canny
1184
+ 0.1, # value mstd
1185
+ 0.1, # distance mstd
1186
+ 1.0, # cn scale
1187
+ 0., # cn start
1188
+ 1., # cn end
1189
+ False, # ti
1190
  "Classic",
1191
  "Nearest",
1192
  ],
 
1203
  None,
1204
  1.0,
1205
  None,
1206
+ 1.0,
1207
  None,
1208
  1.0,
1209
  None,
 
1212
  1024,
1213
  1024,
1214
  "kitty7779/ponyDiffusionV6XL",
1215
+ None, # vae
1216
  "txt2img",
1217
+ None, # img conttol
1218
+ "Canny", # preprocessor
1219
+ 512, # preproc resolution
1220
+ 1024, # img resolution
1221
+ None, # Style prompt
1222
+ None, # Style json
1223
+ None, # img Mask
1224
+ 0.35, # strength
1225
+ 100, # low th canny
1226
+ 200, # high th canny
1227
+ 0.1, # value mstd
1228
+ 0.1, # distance mstd
1229
+ 1.0, # cn scale
1230
+ 0., # cn start
1231
+ 1., # cn end
1232
+ False, # ti
1233
  "Classic",
1234
  "Nearest",
1235
  ],
 
1246
  None,
1247
  1.0,
1248
  None,
1249
+ 1.0,
1250
  None,
1251
  1.0,
1252
  None,
 
1255
  1024,
1256
  1024,
1257
  "misri/epicrealismXL_v7FinalDestination",
1258
+ None, # vae
1259
  "canny ControlNet",
1260
+ "image.webp", # img conttol
1261
+ "Canny", # preprocessor
1262
+ 1024, # preproc resolution
1263
+ 1024, # img resolution
1264
+ None, # Style prompt
1265
+ None, # Style json
1266
+ None, # img Mask
1267
+ 0.35, # strength
1268
+ 100, # low th canny
1269
+ 200, # high th canny
1270
+ 0.1, # value mstd
1271
+ 0.1, # distance mstd
1272
+ 1.0, # cn scale
1273
+ 0., # cn start
1274
+ 1., # cn end
1275
+ False, # ti
1276
  "Classic",
1277
  None,
1278
  ],
 
1289
  None,
1290
  1.0,
1291
  None,
1292
+ 1.0,
1293
  None,
1294
  1.0,
1295
  None,
 
1298
  1024,
1299
  1024,
1300
  "misri/juggernautXL_juggernautX",
1301
+ None, # vae
1302
  "optical pattern ControlNet",
1303
+ "spiral_no_transparent.png", # img conttol
1304
+ "Canny", # preprocessor
1305
+ 512, # preproc resolution
1306
+ 1024, # img resolution
1307
+ None, # Style prompt
1308
+ None, # Style json
1309
+ None, # img Mask
1310
+ 0.35, # strength
1311
+ 100, # low th canny
1312
+ 200, # high th canny
1313
+ 0.1, # value mstd
1314
+ 0.1, # distance mstd
1315
+ 1.0, # cn scale
1316
+ 0.05, # cn start
1317
+ 0.75, # cn end
1318
+ False, # ti
1319
  "Classic",
1320
  None,
1321
  ],
 
1332
  None,
1333
  1.0,
1334
  None,
1335
+ 1.0,
1336
  None,
1337
  1.0,
1338
  None,
 
1341
  1024,
1342
  1024,
1343
  "cagliostrolab/animagine-xl-3.1",
1344
+ None, # vae
1345
  "lineart ControlNet",
1346
+ "color_image.png", # img conttol
1347
+ "Lineart", # preprocessor
1348
+ 512, # preproc resolution
1349
+ 896, # img resolution
1350
+ None, # Style prompt
1351
+ None, # Style json
1352
+ None, # img Mask
1353
+ 0.35, # strength
1354
+ 100, # low th canny
1355
+ 200, # high th canny
1356
+ 0.1, # value mstd
1357
+ 0.1, # distance mstd
1358
+ 1.0, # cn scale
1359
+ 0., # cn start
1360
+ 1., # cn end
1361
+ False, # ti
1362
  "Compel",
1363
  None,
1364
  ],
 
1375
  None,
1376
  1.0,
1377
  None,
1378
+ 1.0,
1379
  None,
1380
  1.0,
1381
  None,
 
1384
  512,
1385
  512,
1386
  "digiplay/majicMIX_realistic_v7",
1387
+ None, # vae
1388
  "openpose ControlNet",
1389
+ "image.webp", # img conttol
1390
+ "Canny", # preprocessor
1391
+ 512, # preproc resolution
1392
+ 1024, # img resolution
1393
+ None, # Style prompt
1394
+ None, # Style json
1395
+ None, # img Mask
1396
+ 0.35, # strength
1397
+ 100, # low th canny
1398
+ 200, # high th canny
1399
+ 0.1, # value mstd
1400
+ 0.1, # distance mstd
1401
+ 1.0, # cn scale
1402
+ 0., # cn start
1403
+ 0.9, # cn end
1404
+ False, # ti
1405
  "Compel",
1406
  "Nearest",
1407
  ],
 
1456
 
1457
  with gr.Tab("Inpaint mask maker", render=True):
1458
 
1459
+ def create_mask_now(img, invert):
1460
  import numpy as np
1461
  import time
1462
 
 
1483
 
1484
  return img["background"], rgb_mask
1485
 
1486
+
1487
  with gr.Row():
1488
  with gr.Column(scale=2):
1489
  # image_base = gr.ImageEditor(label="Base image", show_label=True, brush=gr.Brush(colors=["#000000"]))
 
1493
  # enable crop (or disable it)
1494
  # transforms=["crop"],
1495
  brush=gr.Brush(
1496
+ default_size="16", # or leave it as 'auto'
1497
+ color_mode="fixed", # 'fixed' hides the user swatches and colorpicker, 'defaults' shows it
1498
+ # default_color="black", # html names are supported
1499
+ colors=[
1500
+ "rgba(0, 0, 0, 1)", # rgb(a)
1501
+ "rgba(0, 0, 0, 0.1)",
1502
+ "rgba(255, 255, 255, 0.1)",
1503
+ # "hsl(360, 120, 120)" # in fact any valid colorstring
1504
+ ]
1505
  ),
1506
  eraser=gr.Eraser(default_size="16")
1507
  )
 
1514
 
1515
  btn.click(create_mask_now, [image_base, invert_mask], [img_source, img_result])
1516
 
1517
+
1518
  def send_img(img_source, img_result):
1519
  return img_source, img_result
1520
+
1521
+
1522
  btn_send.click(send_img, [img_source, img_result], [image_control, image_mask_gui])
1523
+
1524
  generate_button.click(
1525
  fn=sd_gen.load_new_model,
1526
  inputs=[